Datasets:
AI4M
/

text
stringlengths
0
3.34M
If for every $t \in T$ and $x \in X$, there exists a neighborhood $U$ of $t$ and a neighborhood $V$ of $x$ such that $f$ is Lipschitz on $V$ uniformly for all $t \in U$, then $f$ is locally Lipschitz.
#ifdef WIN32 #define NOMINMAX #endif #define _USE_MATH_DEFINES #include <math.h> #include <app.h> #include <NewtonFunctionMinimizer.h> #include <RigidBodySimulation.h> #include <MechanismOptimizer.h> #include "mechanisms.h" #include <iostream> #include <chrono> #include <algorithm> #include <Eigen/Core> using Eigen::Vector2f; using Eigen::Vector2d; using Eigen::VectorXd; #define PLOT_N 100 // number of data points in plot class MoveRigidBodyObj : public ObjectiveFunction { public: MoveRigidBodyObj(Vector2d pTarget, Vector2d pLocal) : rb({0, 1.0}), pTarget(pTarget), pLocal(pLocal) { } double evaluate(const VectorXd& x) const override { return (pTarget - rb.pWorld(x, pLocal)).squaredNorm(); } public: RigidBody rb; Vector2d pTarget, pLocal; }; class RigidBodyApp : public App { public: RigidBodyApp(int width, int height, const char * title, float pixelRatio = 0.f) : App(width, height, title, pixelRatio), base(width) { clear_color = ImVec4(0.8f, 0.8f, 0.8f, 1.00f); lastFrame = std::chrono::high_resolution_clock::now(); font = nvgCreateFont(vg, "sans", DATA_FOLDER"/Roboto-Regular.ttf"); if (font == -1) { printf("Could not add font.\n"); } for (float & d : dataEnergy) d = 0; sim = make4barSim(); mechOpt = MechanismOptimizer(sim); } void process() override{ // move image if right mouse button is pressed if(mouseDown[GLFW_MOUSE_BUTTON_RIGHT]){ auto dw = (int)(cursorPos[0] - cursorPosDown[0]); auto dh = (int)(cursorPos[1] - cursorPosDown[1]); translation[0] += dw/(double)base; translation[1] -= dh/(double)base; cursorPosDown[0] = cursorPos[0]; cursorPosDown[1] = cursorPos[1]; } // run at 60fps, or in slow mo std::chrono::high_resolution_clock::time_point now = std::chrono::high_resolution_clock::now(); if(std::chrono::duration_cast<std::chrono::milliseconds>(now-lastFrame).count() > ((slowMo) ? 320 : 16)){ if(selectedRb != -1){ double scale = 1.0; if(keyDown[GLFW_KEY_UP]) scale = 1.01; else if(keyDown[GLFW_KEY_DOWN]) scale = 0.99; if(scale != 1.0){ sim.scaleRigidBody(selectedRb, scale); if(sim.motorIdx != -1) trackedTrajectory = sim.recordTrajectory(); cout << "trackedTrajectory: " << trackedTrajectory << endl; } } if(sim.motorIdx != -1){ if(keyDown[GLFW_KEY_LEFT]) sim.fixedAngle()[sim.motorIdx].angle += 4 * M_PI / 180; if(keyDown[GLFW_KEY_RIGHT]) sim.fixedAngle()[sim.motorIdx].angle -= 4 * M_PI / 180; if(sim.fixedAngle()[sim.motorIdx].angle > 2*M_PI) sim.fixedAngle()[sim.motorIdx].angle -= 2*M_PI; if(sim.fixedAngle()[sim.motorIdx].angle < 0) sim.fixedAngle()[sim.motorIdx].angle += 2*M_PI; } if(mouseDown[GLFW_MOUSE_BUTTON_LEFT] && selectedRb != -1){ Vector2d pos = fromScreen(cursorPos[0], cursorPos[1]); const auto &rb = sim.rigidbodies()[selectedRb]; MoveRigidBodyObj obj({pos, selectedRbLocal}); GradientDescentVariableStep gd; VectorXd xRb = sim.x.segment<3>(rb.dofIdx); gd.minimize(&obj, xRb, false); sim.x.segment<3>(rb.dofIdx) = xRb; } if(runSim){ sim.run(); } energy = sim.energy.evaluate(sim.x); dataCounter %= PLOT_N; dataEnergy.at(dataCounter++) = (float)energy; lastFrame = now; } } void drawScene() override { // this is the GUI { ImGui::Begin("Assignement 1"); ImGui::TextColored(ImVec4(1.f, 1.f, 1.f, 0.5f), "left mouse: Select rigid bodies, apply forces"); ImGui::TextColored(ImVec4(1.f, 1.f, 1.f, 0.5f), "right mouse: move around"); ImGui::TextColored(ImVec4(1.f, 1.f, 1.f, 0.5f), "mouse wheel: zoom"); ImGui::TextColored(ImVec4(1.f, 1.f, 1.f, 0.5f), "space bar: play/pause"); //Define the mechanisms to include in main app. const RigidBodySimulation sims[] = { makeJansenSim(), make4barSim(), makePrismaticSim1(), makePrismaticSim2(), makePrismaticSim3(), makePrismaticSim4_XY(), makePrismaticSim5_XY(), makePrismaticSim6_XYLoop(), makeTangentSim1(), makeTangentSim2(), }; const int numberOfSims = 10; //Construct the dropdown list based on the included function char* items[numberOfSims]; for (int i = 0; i < numberOfSims; i++) { items[i] = sims[i].name; } if(ImGui::Combo("Mechanism", &loadMech, items, numberOfSims)){ sim = sims[loadMech]; targetTrajectory.resize(0, 2); //if (loadMech == 0) { // targetTrajectory = makeJansenTargetPath(); // sim.energy.fixedAngleEnabled = true; // trackedTrajectory = sim.recordTrajectory(); //} mechOpt = MechanismOptimizer(sim); } if(ImGui::CollapsingHeader("Simulation")){ ImGui::Checkbox("run simulation", &runSim); ImGui::PlotLines("Energy", dataEnergy.data(), PLOT_N, dataCounter, "energy", 0, 1.0, ImVec2(0, 100)); ImGui::Separator(); if(sim.motorIdx != -1){ ImGui::Checkbox("Fixed Angles enabled", &sim.energy.fixedAngleEnabled); auto &fixedAngle = sim.fixedAngle()[sim.motorIdx]; float angle = (sim.energy.fixedAngleEnabled) ? fixedAngle.angle : sim.rigidbodies()[fixedAngle.rbIdx].theta(sim.x); if(ImGui::SliderAngle("angle", &angle, 0, 360)){ fixedAngle.angle = angle; sim.rigidbodies()[fixedAngle.rbIdx].theta(sim.x) = angle; } ImGui::Separator(); } } // if(sim.motorIdx != -1 && ImGui::CollapsingHeader("Design Optimization")){ // ImGui::Checkbox("Run optimization", &isMechOpt); // if(isMechOpt){ // mechOpt.targetPath = targetTrajectory; // mechOpt.optimizeTrajectory(); // VectorXd p = sim.getDesignParameters(); // if((p-mechOpt.p).norm() > 1e-10){ // sim.setDesignParameters(mechOpt.p); // trackedTrajectory = sim.recordTrajectory(); // } // } // if(ImGui::Button("print link lengths")) // std::cout << "Link lengths:" << std::endl << // sim.getDesignParameters() << std::endl; //} ImGui::Text("Application average %.3f ms/frame (%.1f FPS)", 1000.0f / ImGui::GetIO().Framerate, ImGui::GetIO().Framerate); ImGui::End(); } for (const auto &f : sim.fixedAngle()) { nvgResetTransform(vg); nvgBeginPath(vg); const auto &rb = sim.rigidbodies()[f.rbIdx]; Vector2d p = rb.pos(sim.x); nvgCircle(vg, toScreen(p.x(), 0), toScreen(p.y(), 1), toScreen(0.5)); nvgFillColor(vg, (sim.motorIdx == -1) ? nvgRGBAf(0, 0, 1, 0.5) : nvgRGBAf(0, 1, 1, 0.5)); nvgFill(vg); } int i = 0; for (const auto &rb : sim.rigidbodies()) { nvgResetTransform(vg); Vector2d p = rb.pos(sim.x); nvgTranslate(vg, toScreen(p.x(), 0), toScreen(p.y(), 1)); nvgRotate(vg, -rb.theta(sim.x)); nvgBeginPath(vg); double r = rb.width/2; nvgRoundedRect(vg, toScreen(-rb.length*0.5 - r), toScreen(-rb.width*0.5), toScreen(rb.length + 2*r), toScreen(rb.width), toScreen(r)); if(i == selectedRb) nvgFillColor(vg, nvgRGBAf(1.0, 0.9, 0.7, 0.5)); else nvgFillColor(vg, nvgRGBAf(0.5, 0.5, 0.5, 0.5)); nvgFill(vg); nvgStrokeColor(vg, nvgRGBAf(0, 0, 0, 1)); nvgStrokeWidth(vg, 2*pixelRatio); nvgStroke(vg); nvgFontSize(vg, toScreen(0.3)); nvgFontFace(vg, "sans"); nvgFillColor(vg, nvgRGBAf(0,0,0,1)); nvgText(vg, 0, 100/zoom, rb.name.c_str(), nullptr); i++; } for (const auto &joint : sim.hingeJoints()) { nvgResetTransform(vg); nvgBeginPath(vg); const auto &rb0 = sim.rigidbodies()[joint.rbIdx[0]]; const auto &rb1 = sim.rigidbodies()[joint.rbIdx[1]]; Vector2d p0 = rb0.pWorld(sim.x, joint.local[0]); Vector2d p1 = rb1.pWorld(sim.x, joint.local[1]); nvgMoveTo(vg, toScreen(p0.x(), 0), toScreen(p0.y(), 1)); nvgLineTo(vg, toScreen(p1.x(), 0), toScreen(p1.y(), 1)); nvgStrokeColor(vg, nvgRGBAf(1, 0, 0, 0.5)); nvgStrokeWidth(vg, 2.0*pixelRatio); nvgStroke(vg); nvgBeginPath(vg); nvgCircle(vg, toScreen(p0.x(), 0), toScreen(p0.y(), 1), toScreen(rb0.width/3)); nvgCircle(vg, toScreen(p1.x(), 0), toScreen(p1.y(), 1), toScreen(rb1.width/3)); nvgStrokeColor(vg, nvgRGBAf(1, 0, 0, 0.5)); nvgStrokeWidth(vg, 2.0*pixelRatio); nvgStroke(vg); } //Added Draw Point On Line Joint Graphics for (const auto &joint : sim.pointOnLineJoints()) { nvgResetTransform(vg); nvgBeginPath(vg); const auto &rb0 = sim.rigidbodies()[joint.rbIdx[0]]; Vector2d p0_bgn = rb0.pWorld(sim.x, joint.local0Point); Vector2d p0_end = rb0.pWorld(sim.x, joint.local0Point + joint.local0Vector); nvgMoveTo(vg, toScreen(p0_bgn.x(), 0), toScreen(p0_bgn.y(), 1)); nvgLineTo(vg, toScreen(p0_end.x(), 0), toScreen(p0_end.y(), 1)); nvgStrokeColor(vg, nvgRGBAf(0, 0, 1, 0.5)); nvgStrokeWidth(vg, 2.0*pixelRatio); nvgStroke(vg); //Draw circle on rb1 point const auto &rb1 = sim.rigidbodies()[joint.rbIdx[1]]; Vector2d p1 = rb1.pWorld(sim.x, joint.local1Point); nvgBeginPath(vg); nvgCircle(vg, toScreen(p1.x(), 0), toScreen(p1.y(), 1), toScreen(rb1.width / 3)); nvgStrokeColor(vg, nvgRGBAf(0, 0, 1, 0.5)); nvgStrokeWidth(vg, 2.0*pixelRatio); nvgStroke(vg); } for (const auto &f : sim.fixed()) { nvgResetTransform(vg); nvgBeginPath(vg); const auto &rb = sim.rigidbodies()[f.rbIdx]; Vector2d p0 = rb.pWorld(sim.x, f.localPos); Vector2d p1 = f.pos; nvgMoveTo(vg, toScreen(p0.x(), 0), toScreen(p0.y(), 1)); nvgLineTo(vg, toScreen(p1.x(), 0), toScreen(p1.y(), 1)); nvgStrokeColor(vg, nvgRGBAf(0.2, 0.2, 0, 0.5)); nvgStrokeWidth(vg, 2.0*pixelRatio); nvgStroke(vg); nvgBeginPath(vg); nvgCircle(vg, toScreen(p0.x(), 0), toScreen(p0.y(), 1), toScreen(rb.width/4)); nvgCircle(vg, toScreen(p1.x(), 0), toScreen(p1.y(), 1), toScreen(rb.width/4)); nvgStrokeColor(vg, nvgRGBAf(0.2, 0.2, 0, 0.5)); nvgStrokeWidth(vg, 2.0*pixelRatio); nvgStroke(vg); } // draw tracked point if(sim.trackRBPoint.rbIdx >= 0){ nvgResetTransform(vg); nvgBeginPath(vg); const auto &rb = sim.rigidbodies()[sim.trackRBPoint.rbIdx]; Vector2d p = rb.pWorld(sim.x, sim.trackRBPoint.local); nvgCircle(vg, toScreen(p.x(), 0), toScreen(p.y(), 1), 2.0); nvgFillColor(vg, nvgRGBAf(0.2, 0.8, 0.2, 0.5)); nvgFill(vg); } auto draw_path = [=](const Matrix<double, -1, 2> &path, NVGcolor color){ if(path.rows() > 0){ nvgBeginPath(vg); nvgMoveTo(vg, toScreen(path(0, 0), 0), toScreen(path(0, 1), 1)); for (int i = 0; i < path.rows(); i++) { nvgLineTo(vg, toScreen(path(i, 0), 0), toScreen(path(i, 1), 1)); } nvgStrokeColor(vg, color); nvgStrokeWidth(vg, 2); nvgStroke(vg); nvgBeginPath(vg); for (int i = 0; i < path.rows(); i++) nvgCircle(vg, toScreen(path(i, 0), 0), toScreen(path(i, 1), 1), toScreen(0.07)); nvgFillColor(vg, color); nvgFill(vg); } }; draw_path(trackedTrajectory, nvgRGBAf(0.2, 0.8, 0.2, 0.5)); draw_path(targetTrajectory, nvgRGBAf(0.8, 0.5, 0.2, 0.5)); } protected: void keyPressed(int key, int /*mods*/) override { // play / pause with space bar if(key == GLFW_KEY_SPACE) runSim = !runSim; } void mousePressed(int button) override { cursorPosDown[0] = cursorPos[0]; cursorPosDown[1] = cursorPos[1]; if(button == GLFW_MOUSE_BUTTON_LEFT){ Vector2d cursor = fromScreen(cursorPos[0], cursorPos[1]); int i = 0; selectedRb = -1; for (const auto &rb : sim.rigidbodies()) { auto rot = rotationMatrix(-rb.theta(sim.x)); Vector2d d = rot * (cursor - rb.pos(sim.x)); if(std::abs(d.x()) <= rb.length/2 && std::abs(d.y()) <= rb.width/2){ selectedRb = i; selectedRbLocal = d; break; } i++; } } } void mouseReleased(int /*button*/) override { } void scrollWheel(double /*xoffset*/, double yoffset) override { double zoomOld = zoom; zoom *= std::pow(1.10, yoffset); for (int dim = 0; dim < 2; ++dim) { double c = cursorPos[dim]/(double) ((dim == 0) ? base : -base); translation[dim] = c - zoomOld/zoom * (c-translation[dim]); } } void windowResized(int /*w*/, int /*h*/) override { } private: VectorXd fromScreen(int i, int j, int w, int h) const { VectorXd x(2); x[0] = ((double)i/(double)w - translation[0])*zoom/pixelRatio; x[1] = (-(double)j/(double)h - translation[1])*zoom/pixelRatio; return x; } template<class S> VectorXd fromScreen(S i, S j) const { return fromScreen((double)i, (double)j, base, base); } double toScreen(double s, int dim) const { return (s/zoom*pixelRatio + translation[dim]) * (double)((dim == 0) ? base : -base); } double toScreen(double s) const { return s/zoom*pixelRatio * base; } private: int font = -1; int loadMech = 0; bool runSim = false; std::chrono::high_resolution_clock::time_point lastFrame; bool slowMo = false; double cursorPosDown[2]{}; double translation[2] = {0.75*pixelRatio, -0.25*pixelRatio}; double zoom = 24; int base; int selectedRb = -1; Vector2d selectedRbLocal; bool isMechOpt = false; public: // optimization double energy = 0; int dataCounter = 0; std::array<float, PLOT_N> dataEnergy{}; RigidBodySimulation sim; Matrix<double, -1, 2> trackedTrajectory, targetTrajectory; MechanismOptimizer mechOpt; }; int main(int, char**) { // If you have high DPI screen settings, you can change the pixel ratio // accordingly. E.g. for 200% scaling use `pixelRatio = 2.f` RigidBodyApp app(1080, 720, "Assignement 1"); app.run(); return 0; }
open import Prelude open import core module ground-decidable where -- every type is either ground or not ground-decidable : (τ : typ) → (τ ground) + ((τ ground) → ⊥) ground-decidable b = Inl GBase ground-decidable ⦇·⦈ = Inr (λ ()) ground-decidable (b ==> b) = Inr (λ ()) ground-decidable (b ==> ⦇·⦈) = Inr (λ ()) ground-decidable (b ==> τ' ==> τ'') = Inr (λ ()) ground-decidable (b ==> τ₁ ⊗ τ₂) = Inr (λ ()) ground-decidable (⦇·⦈ ==> b) = Inr (λ ()) ground-decidable (⦇·⦈ ==> ⦇·⦈) = Inl GHole ground-decidable (⦇·⦈ ==> τ' ==> τ'') = Inr (λ ()) ground-decidable ((τ ==> τ₁) ==> b) = Inr (λ ()) ground-decidable ((τ ==> τ₁) ==> ⦇·⦈) = Inr (λ ()) ground-decidable ((τ ==> τ₁) ==> τ' ==> τ'') = Inr (λ ()) ground-decidable ((τ ⊗ τ₂) ==> τ₁) = Inr (λ ()) ground-decidable (τ ⊗ b) = Inr (λ ()) ground-decidable (b ⊗ ⦇·⦈) = Inr (λ ()) ground-decidable (⦇·⦈ ⊗ ⦇·⦈) = Inl GProd ground-decidable (⦇·⦈ ==> τ₁ ⊗ τ₂) = Inr (λ ()) ground-decidable ((τ ⊗ τ₁) ⊗ ⦇·⦈) = Inr (λ ()) ground-decidable ((τ ==> τ₁) ⊗ ⦇·⦈) = Inr (λ ()) ground-decidable ((τ ==> τ₂) ==> τ₁ ⊗ τ₃) = Inr (λ ()) ground-decidable (τ ⊗ τ₁ ==> τ₂) = Inr (λ ()) ground-decidable (τ ⊗ τ₁ ⊗ τ₂) = Inr (λ ())
/- Copyright (c) 2017 Johannes Hölzl. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Johannes Hölzl -/ import meta.univs import tactic.lint import tactic.ext /-! # Sigma types This file proves basic results about sigma types. A sigma type is a dependent pair type. Like `α × β` but where the type of the second component depends on the first component. This can be seen as a generalization of the sum type `α ⊕ β`: * `α ⊕ β` is made of stuff which is either of type `α` or `β`. * Given `α : ι → Type*`, `sigma α` is made of stuff which is of type `α i` for some `i : ι`. One effectively recovers a type isomorphic to `α ⊕ β` by taking a `ι` with exactly two elements. See `equiv.sum_equiv_sigma_bool`. `Σ x, A x` is notation for `sigma A` (note the difference with the big operator `∑`). `Σ x y z ..., A x y z ...` is notation for `Σ x, Σ y, Σ z, ..., A x y z ...`. Here we have `α : Type*`, `β : α → Type*`, `γ : Π a : α, β a → Type*`, ..., `A : Π (a : α) (b : β a) (c : γ a b) ..., Type*` with `x : α` `y : β x`, `z : γ x y`, ... ## Notes The definition of `sigma` takes values in `Type*`. This effectively forbids `Prop`- valued sigma types. To that effect, we have `psigma`, which takes value in `Sort*` and carries a more complicated universe signature in consequence. -/ section sigma variables {α α₁ α₂ : Type*} {β : α → Type*} {β₁ : α₁ → Type*} {β₂ : α₂ → Type*} namespace sigma instance [inhabited α] [inhabited (β default)] : inhabited (sigma β) := ⟨⟨default, default⟩⟩ instance [h₁ : decidable_eq α] [h₂ : ∀a, decidable_eq (β a)] : decidable_eq (sigma β) | ⟨a₁, b₁⟩ ⟨a₂, b₂⟩ := match a₁, b₁, a₂, b₂, h₁ a₁ a₂ with | _, b₁, _, b₂, is_true (eq.refl a) := match b₁, b₂, h₂ a b₁ b₂ with | _, _, is_true (eq.refl b) := is_true rfl | b₁, b₂, is_false n := is_false (assume h, sigma.no_confusion h (λe₁ e₂, n $ eq_of_heq e₂)) end | a₁, _, a₂, _, is_false n := is_false (assume h, sigma.no_confusion h (λe₁ e₂, n e₁)) end @[simp, nolint simp_nf] -- sometimes the built-in injectivity support does not work theorem mk.inj_iff {a₁ a₂ : α} {b₁ : β a₁} {b₂ : β a₂} : sigma.mk a₁ b₁ = ⟨a₂, b₂⟩ ↔ (a₁ = a₂ ∧ b₁ == b₂) := by simp @[simp] theorem eta : ∀ x : Σ a, β a, sigma.mk x.1 x.2 = x | ⟨i, x⟩ := rfl @[ext] lemma ext {x₀ x₁ : sigma β} (h₀ : x₀.1 = x₁.1) (h₁ : x₀.2 == x₁.2) : x₀ = x₁ := by { cases x₀, cases x₁, cases h₀, cases h₁, refl } lemma ext_iff {x₀ x₁ : sigma β} : x₀ = x₁ ↔ x₀.1 = x₁.1 ∧ x₀.2 == x₁.2 := by { cases x₀, cases x₁, exact sigma.mk.inj_iff } /-- A specialized ext lemma for equality of sigma types over an indexed subtype. -/ @[ext] lemma subtype_ext {β : Type*} {p : α → β → Prop} : ∀ {x₀ x₁ : Σ a, subtype (p a)}, x₀.fst = x₁.fst → (x₀.snd : β) = x₁.snd → x₀ = x₁ | ⟨a₀, b₀, hb₀⟩ ⟨a₁, b₁, hb₁⟩ rfl rfl := rfl lemma subtype_ext_iff {β : Type*} {p : α → β → Prop} {x₀ x₁ : Σ a, subtype (p a)} : x₀ = x₁ ↔ x₀.fst = x₁.fst ∧ (x₀.snd : β) = x₁.snd := ⟨λ h, h ▸ ⟨rfl, rfl⟩, λ ⟨h₁, h₂⟩, subtype_ext h₁ h₂⟩ @[simp] theorem «forall» {p : (Σ a, β a) → Prop} : (∀ x, p x) ↔ (∀ a b, p ⟨a, b⟩) := ⟨assume h a b, h ⟨a, b⟩, assume h ⟨a, b⟩, h a b⟩ @[simp] theorem «exists» {p : (Σ a, β a) → Prop} : (∃ x, p x) ↔ (∃ a b, p ⟨a, b⟩) := ⟨assume ⟨⟨a, b⟩, h⟩, ⟨a, b, h⟩, assume ⟨a, b, h⟩, ⟨⟨a, b⟩, h⟩⟩ /-- Map the left and right components of a sigma -/ def map (f₁ : α₁ → α₂) (f₂ : Πa, β₁ a → β₂ (f₁ a)) (x : sigma β₁) : sigma β₂ := ⟨f₁ x.1, f₂ x.1 x.2⟩ end sigma lemma sigma_mk_injective {i : α} : function.injective (@sigma.mk α β i) | _ _ rfl := rfl lemma function.injective.sigma_map {f₁ : α₁ → α₂} {f₂ : Πa, β₁ a → β₂ (f₁ a)} (h₁ : function.injective f₁) (h₂ : ∀ a, function.injective (f₂ a)) : function.injective (sigma.map f₁ f₂) | ⟨i, x⟩ ⟨j, y⟩ h := begin obtain rfl : i = j, from h₁ (sigma.mk.inj_iff.mp h).1, obtain rfl : x = y, from h₂ i (eq_of_heq (sigma.mk.inj_iff.mp h).2), refl end lemma function.surjective.sigma_map {f₁ : α₁ → α₂} {f₂ : Πa, β₁ a → β₂ (f₁ a)} (h₁ : function.surjective f₁) (h₂ : ∀ a, function.surjective (f₂ a)) : function.surjective (sigma.map f₁ f₂) := begin intros y, cases y with j y, cases h₁ j with i hi, subst j, cases h₂ i y with x hx, subst y, exact ⟨⟨i, x⟩, rfl⟩ end /-- Interpret a function on `Σ x : α, β x` as a dependent function with two arguments. This also exists as an `equiv` as `equiv.Pi_curry γ`. -/ def sigma.curry {γ : Π a, β a → Type*} (f : Π x : sigma β, γ x.1 x.2) (x : α) (y : β x) : γ x y := f ⟨x,y⟩ /-- Interpret a dependent function with two arguments as a function on `Σ x : α, β x`. This also exists as an `equiv` as `(equiv.Pi_curry γ).symm`. -/ def sigma.uncurry {γ : Π a, β a → Type*} (f : Π x (y : β x), γ x y) (x : sigma β) : γ x.1 x.2 := f x.1 x.2 @[simp] lemma sigma.uncurry_curry {γ : Π a, β a → Type*} (f : Π x : sigma β, γ x.1 x.2) : sigma.uncurry (sigma.curry f) = f := funext $ λ ⟨i, j⟩, rfl @[simp] lemma sigma.curry_uncurry {γ : Π a, β a → Type*} (f : Π x (y : β x), γ x y) : sigma.curry (sigma.uncurry f) = f := rfl /-- Convert a product type to a Σ-type. -/ @[simp] def prod.to_sigma {α β} : α × β → Σ _ : α, β | ⟨x,y⟩ := ⟨x,y⟩ @[simp] lemma prod.fst_to_sigma {α β} (x : α × β) : (prod.to_sigma x).fst = x.fst := by cases x; refl @[simp] lemma prod.snd_to_sigma {α β} (x : α × β) : (prod.to_sigma x).snd = x.snd := by cases x; refl -- we generate this manually as `@[derive has_reflect]` fails @[instance] protected meta def {u v} sigma.reflect [reflected_univ.{u}] [reflected_univ.{v}] {α : Type u} (β : α → Type v) [reflected _ α] [reflected _ β] [hα : has_reflect α] [hβ : Π i, has_reflect (β i)] : has_reflect (Σ a, β a) := λ ⟨a, b⟩, (by reflect_name : reflected _ @sigma.mk.{u v}).subst₄ `(α) `(β) `(a) `(b) end sigma section psigma variables {α : Sort*} {β : α → Sort*} namespace psigma /-- Nondependent eliminator for `psigma`. -/ def elim {γ} (f : ∀ a, β a → γ) (a : psigma β) : γ := psigma.cases_on a f @[simp] theorem elim_val {γ} (f : ∀ a, β a → γ) (a b) : psigma.elim f ⟨a, b⟩ = f a b := rfl instance [inhabited α] [inhabited (β default)] : inhabited (psigma β) := ⟨⟨default, default⟩⟩ instance [h₁ : decidable_eq α] [h₂ : ∀a, decidable_eq (β a)] : decidable_eq (psigma β) | ⟨a₁, b₁⟩ ⟨a₂, b₂⟩ := match a₁, b₁, a₂, b₂, h₁ a₁ a₂ with | _, b₁, _, b₂, is_true (eq.refl a) := match b₁, b₂, h₂ a b₁ b₂ with | _, _, is_true (eq.refl b) := is_true rfl | b₁, b₂, is_false n := is_false (assume h, psigma.no_confusion h (λe₁ e₂, n $ eq_of_heq e₂)) end | a₁, _, a₂, _, is_false n := is_false (assume h, psigma.no_confusion h (λe₁ e₂, n e₁)) end theorem mk.inj_iff {a₁ a₂ : α} {b₁ : β a₁} {b₂ : β a₂} : @psigma.mk α β a₁ b₁ = @psigma.mk α β a₂ b₂ ↔ (a₁ = a₂ ∧ b₁ == b₂) := iff.intro psigma.mk.inj $ assume ⟨h₁, h₂⟩, match a₁, a₂, b₁, b₂, h₁, h₂ with _, _, _, _, eq.refl a, heq.refl b := rfl end @[ext] lemma ext {x₀ x₁ : psigma β} (h₀ : x₀.1 = x₁.1) (h₁ : x₀.2 == x₁.2) : x₀ = x₁ := by { cases x₀, cases x₁, cases h₀, cases h₁, refl } lemma ext_iff {x₀ x₁ : psigma β} : x₀ = x₁ ↔ x₀.1 = x₁.1 ∧ x₀.2 == x₁.2 := by { cases x₀, cases x₁, exact psigma.mk.inj_iff } @[simp] theorem «forall» {p : (Σ' a, β a) → Prop} : (∀ x, p x) ↔ (∀ a b, p ⟨a, b⟩) := ⟨assume h a b, h ⟨a, b⟩, assume h ⟨a, b⟩, h a b⟩ @[simp] theorem «exists» {p : (Σ' a, β a) → Prop} : (∃ x, p x) ↔ (∃ a b, p ⟨a, b⟩) := ⟨assume ⟨⟨a, b⟩, h⟩, ⟨a, b, h⟩, assume ⟨a, b, h⟩, ⟨⟨a, b⟩, h⟩⟩ /-- A specialized ext lemma for equality of psigma types over an indexed subtype. -/ @[ext] lemma subtype_ext {β : Sort*} {p : α → β → Prop} : ∀ {x₀ x₁ : Σ' a, subtype (p a)}, x₀.fst = x₁.fst → (x₀.snd : β) = x₁.snd → x₀ = x₁ | ⟨a₀, b₀, hb₀⟩ ⟨a₁, b₁, hb₁⟩ rfl rfl := rfl lemma subtype_ext_iff {β : Sort*} {p : α → β → Prop} {x₀ x₁ : Σ' a, subtype (p a)} : x₀ = x₁ ↔ x₀.fst = x₁.fst ∧ (x₀.snd : β) = x₁.snd := ⟨λ h, h ▸ ⟨rfl, rfl⟩, λ ⟨h₁, h₂⟩, subtype_ext h₁ h₂⟩ variables {α₁ : Sort*} {α₂ : Sort*} {β₁ : α₁ → Sort*} {β₂ : α₂ → Sort*} /-- Map the left and right components of a sigma -/ def map (f₁ : α₁ → α₂) (f₂ : Πa, β₁ a → β₂ (f₁ a)) : psigma β₁ → psigma β₂ | ⟨a, b⟩ := ⟨f₁ a, f₂ a b⟩ end psigma end psigma
# Illustration of the sampling theorem ## The Shannon sampling theorem A signal $f(t)$ with Fourier transform that is zero outside $[-\omega_1, \omega_1]$ is completely described by equidistant points $f(kh)$ if the sampling frequency is higher than $2\omega_1$. ### Reconstruction The reconstruction is given by \begin{equation} f(t) = \sum_{k=-\infty}^\infty f(kh) \frac{\sin (\omega_s(t-kh)/2)}{\omega_s (t-kh)/2} = \sum_{k=-\infty}^\infty f(kh) \mathrm{sinc} \frac{\omega_s(t-kh)}{2} \end{equation} ## Example from class, Problem 7.2 in Åström & Wittenmark A signal $y(t)$ that we want to sample for purpose of feedback control has frequency content within the range $(-\omega_0, \omega_2$. The signal is corrupted by a sinusoidal noise at the frequency $5\omega_0$, let's assume a cosine, since its spectrum is real: \begin{equation} y_m(t) = y(t) + a\cos 5\omega t \end{equation} What is the lowest sampling frequency we can use, and still separate the sampled sinusoid (possibly its alias frequency) from the frequency content of $y(t)$? 1. Solution in book: $\omega_s = 6\omega_0$. 2. Suggested in class: $\omega_s = 2\omega_0$. ```python %matplotlib inline import numpy as np import matplotlib.pyplot as plt # Generate signal of interest y(t). Let's use the sinc^2(t) function, which has a triangular fourier transform # within (-2\pi, 2\pi) def y_measurement(w0, t): y = np.sinc(w0*t/(2*np.pi))**2 n = 0.1*np.cos(5*w0*t) return (y+n, y, n) Nc = 4000 # Number of samples in "continuous" signal T = 10.0 # Seconds to simulate hc = T/Nc # Sampling frequency of "continuous" signal wNc = np.pi/hc t = np.linspace(0,10, Nc) w0 = 2*np.pi (ym, y, n) = y_measurement(w0, t) # Fourier transforms Yf = np.fft.fft(y) Nf = np.fft.fft(n) wpos = np.linspace(0,wNc, Nc/2) # Plot signals and discrete Fourier transform plt.figure(figsize=(16,4)) plt.plot(t,y) plt.plot(t,n) plt.plot(t, y+n) plt.xlabel(r'$t$ [s]') plt.xlim((-0.1,4)) plt.legend((r'$y(t)$', r'$n(t)$', r'$y(t)+n(t)$')) plt.title('Time series') plt.figure(figsize=(16,4)) plt.plot(np.hstack((-wpos[::-1]-wpos[1], wpos)), np.hstack((Yf[int(Nc/2):], Yf[:int(Nc/2)]))) plt.plot(np.hstack((-wpos[::-1]-wpos[1], wpos)), np.hstack((Nf[int(Nc/2):], Nf[:int(Nc/2)]))) plt.xlabel(r'$\omega$ [rad/s]') plt.xlim((-6*w0, 6*w0)) plt.xticks((-5*w0, -w0, w0, 5*w0)) plt.ylim((-20, 220)) lbls=plt.gca().set_xticklabels([r'$-5\omega_0$', r'$-\omega_0$', r'$\omega_0$', r'$5\omega_0$']) plt.title('Spectrum (real part)') ``` ```python # Now let's sample at ws=6w0 N = 600 # Number of samples to take ws1 = 6*w0 h1 = 2*np.pi/ws1 ts1 = np.arange(N)*h1 (ym1, y1, n1) = y_measurement(w0, ts1) Ym1f = np.fft.fft(ym1) wpos1 = np.linspace(0, ws1/2, N/2) # Plot the sampled signal and its spectrum plt.figure(figsize=(16,4)) plt.plot(t, ym, color=[0.7, 0.7, 1]) plt.stem(ts1,ym1, linefmt='r--', markerfmt='ro', basefmt = 'r-') plt.xlabel(r'$t$ [s]') plt.xlim((-0.1,4)) plt.title('Time series') plt.figure(figsize=(10,4)) plt.plot(np.hstack((-wpos1[::-1]-wpos1[1], wpos1)), np.hstack((Ym1f[int(N/2):], Ym1f[:int(N/2)]))) plt.xlabel(r'$\omega$ [rad/s]') plt.xlim((-6*w0, 6*w0)) plt.xticks((-5*w0, -w0, w0, 5*w0)) lbls=plt.gca().set_xticklabels([r'$-5\omega_0$', r'$-\omega_0$', r'$\omega_0$', r'$5\omega_0$']) plt.title('Spectrum (real part)') ``` ```python # And sampling at ws=2w0 N = 600 # Number of samples to take ws2 = 2*w0 h2 = 2*np.pi/ws2 ts2 = np.arange(N)*h2 (ym2, y2, n2) = y_measurement(w0, ts2) Ym2f = np.fft.fft(ym2) Ym2fpos = Ym2f[:int(N/2)] Ym2fpos[-1] = 0.5*Ym2f[int(N/2)] # Divide the energy at wN equally among the positive and negative part Ym2fneg = Ym2f[int(N/2):] Ym2fneg[0] /= 2.0 wpos2 = np.linspace(0, ws2/2, N/2) # Plot the sampled signal and its spectrum plt.figure(figsize=(16,4)) plt.plot(t, ym, color=[0.7, 0.7, 1]) plt.stem(ts2,ym2, linefmt='r--', markerfmt='ro', basefmt = 'r-') plt.xlabel(r'$t$ [s]') plt.xlim((-0.1,4)) plt.title('Time series') plt.figure(figsize=(16,4)) plt.plot(np.hstack((-wpos2[::-1]-wpos2[1], wpos2)), np.hstack((Ym2fneg, Ym2fpos))) plt.xlabel(r'$\omega$ [rad/s]') plt.xlim((-6*w0, 6*w0)) plt.xticks((-5*w0, -w0, w0, 5*w0)) lbls=plt.gca().set_xticklabels([r'$-5\omega_0$', r'$-\omega_0$', r'$\omega_0$', r'$5\omega_0$']) plt.title('Spectrum (real part)') ``` ## A digital notch filter to get rid of the alias of the sinusoid at $\omega_o$ A digital filter with two complex conjugated zeros at $\mathrm{e}^{\pm i \omega_n h}$ will filter out signals at the frequency $\omega_n$. In this case with $\omega_s = 2\omega_0$ we would want the zero at the Nyquist frequency, since for this case $\omega_N = \omega_0$. In order to not attenuate too much of the signal content near $\omega_0$, we combine the zero with a resonanse near the frequency, meaning two poles close to the unit circle at the frequency. How close the poles are is determined with a parameter $r < 1$. This gives the filter \begin{equation} H(z) = \frac{ z^2 -2\cos \omega_0 h z + 1}{z^2 - 2r\cos \omega_0 hz + r^2} \end{equation} With $r=0.9$, and $\omega_0 h = \pi$, this gives the filter \begin{equation} H(z) = \frac{(z+1)^2}{z^2 + 1.8z + 0.81} \end{equation} ```python # So, apply a digital notch filter at w0 import scipy.signal as ss r = 0.9 bf1 = [1, -2*np.cos(w0*h1), 1] af1 = [1, -2*np.cos(w0*h1), r**2] bf2 = [1, 2, 1] af2 = [1, 2*r, r**2] yf1 = ss.lfilter(bf1, af1, ym1) yf2 = ss.lfilter(bf2, af2, ym2) # Fourier transform Yf1f = np.fft.fft(yf1)*h1 Yf1fpos = Yf1f[:int(N/2)] Yf1fpos[-1] = 0.5*Yf1f[int(N/2)] # Divide the energy at wN equally among the positive and negative part Yf1fneg = Yf1f[int(N/2):] Yf1fneg[0] /= 2.0 Yf2f = np.fft.fft(yf2)*h2 Yf2fpos = Yf2f[:int(N/2)] Yf2fpos[-1] = 0.5*Yf2f[int(N/2)] # Divide the energy at wN equally among the positive and negative part Yf2fneg = Yf2f[int(N/2):] Yf2fneg[0] /= 2.0 wpos2 = np.linspace(0, ws2/2, N/2) wpos1 = np.linspace(0, ws1/2, N/2) # Plot the sampled signal and its spectrum plt.figure(figsize=(16,4)) plt.plot(t, ym) plt.plot(t, y) plt.stem(ts2,ym2, linefmt='r--', markerfmt='ro', basefmt = 'r-') plt.stem(ts2,yf2, linefmt='m--', markerfmt='mo', basefmt = 'm-') plt.stem(ts1[::3],yf1[::3], linefmt='y--', markerfmt='yo', basefmt = 'y-') plt.xlabel(r'$t$ [s]') plt.xlim(-0.1,4) plt.legend((r'$y(t)+n(t)$', r'$y(t)$', r'Sampled at $2\omega_0$', r'Sampled at $2\omega_0$ and filtered', r'Sampled at $6\omega_0$, filtered and resampled')) plt.title('Time series') plt.figure(figsize=(16,4)) plt.plot(np.hstack((-wpos2[::-1]-wpos2[1], wpos2)), np.hstack((Yf2fneg, Yf2fpos))) plt.plot(np.hstack((-wpos1[::-1]-wpos1[1], wpos1)), np.hstack((Yf1fneg, Yf1fpos))) plt.xlabel(r'$\omega$ [rad/s]') plt.xlim((-3*w0, 3*w0)) plt.xticks((-2*w0, -w0, w0, 2*w0)) lbls=plt.gca().set_xticklabels([r'$-2\omega_0$', r'$-\omega_0$', r'$\omega_0$', r'$2\omega_0$']) plt.legend((r'Sampled at $2\omega_0$', r'Sampled at $6\omega$')) plt.title('Spectrum (real part) of filtered signals') ``` ```python ```
Address(Mercedes Avenue) is a residential street in North Davis. It connects to Grande Avenue near the Grande Avenue School Site. Grande Avenue Hidalgo Place Inca Place Leon Place Lindo Place Norte Avenue
function [B, elapse] = DSH_compress(A, model) % This is a wrapper function of Density Sensitive Hashing testing. % % [B, elapse] = DSH_compress(A, model) % % A: Rows of vectors of data points. Each row is sample point % model: The model generated by DSH_learn. % % B: The binary code of the input data A. Each row is sample point % elapse: The coding time (testing time). % % % % Reference: % % Zhongming Jin, Cheng Li, Yue Lin, Deng Cai: Density Sensitive Hashing. % IEEE Trans. Cybernetics 44(8): 1362-1371 (2014) % % % % version 2.0 --Nov/2016 % version 1.0 -- Feb/2012 % % Written by Yue Lin ([email protected]) % Deng Cai (dengcai AT gmail DOT com) res = repmat(model.intercept', size(A,1), 1); tmp_T = tic; Ym = A * model.U'; B = (Ym > res); elapse = toc(tmp_T); end
[STATEMENT] lemma closed_Collect_imp: "open {x. P x} \<Longrightarrow> closed {x. Q x} \<Longrightarrow> closed {x. P x \<longrightarrow> Q x}" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>open {x. P x}; closed {x. Q x}\<rbrakk> \<Longrightarrow> closed {x. P x \<longrightarrow> Q x} [PROOF STEP] unfolding imp_conv_disj [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>open {x. P x}; closed {x. Q x}\<rbrakk> \<Longrightarrow> closed {x. \<not> P x \<or> Q x} [PROOF STEP] by (intro closed_Collect_disj closed_Collect_neg)
%% Decision Procedure %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{The BLT Decision Procedure} \label{sec:dp} To describe our decision procedure, we assume that we are attempting to check whether the constraints below are satisfiable: % \begin{equation} \label{eq:prob-matrix} \v{l} \le \mat{A} \v{x} \le \v{u}. \tag{P1} \end{equation} % We let $n$ denote the number of free integer variables in $\v{x} \in \ZZ^n$, and let $m$ denote the number of constrained linear forms. The coefficients are rational, so $\mat{A} \in \QQ^{m \times n}$ and $\v{l},\v{u} \in \QQ^m$. Without loss of generality we assume that $n \le m$ and that $\mat{A}$ has rank $n$ (full rank). In case the problem at hand is such that $n > m$ and/or that $\mat{A}$ is less than full rank, one can compute a basis of the column space, say $\mat{A}'$, that meets the requirement (cf. \cite{Cohen}, \S~2.7.1). The new system $\v{l} \le \mat{A}' \v{y} \le \v{u}$ is equisatisfiable with the original and solutions of the new system determine one or more solutions of the original. Geometrically, we can think of $\v{l}$ and $\v{u}$ as opposite corners of an $m$-dimensional hyperrectangle defined by % \[ \Co := \{ \v{z} \in \RR^m \mid \v{l}_i \le \v{z}_i \le \v{u}_i \}. \] % We refer to this as the \emph{constraint set} of the problem. Without loss of generality we may scale the rows of \eqref{eq:prob-matrix} so that the width of $\Co$ is the same along every axis, i.e. % \begin{equation} \label{eq:cube} \v{u}_i - \v{l}_i = \v{u}_j - \v{l}_j \quad \forall \, i,j \in \{1,\ldots,m\}. \end{equation} % Note that this transformation makes $\Co$ a \emph{hypercube}. We let $d_\Co$ denote the common width, and let $r_\Co = d_\Co/2$ denote the corresponding radius of the hypercube. The problem given by \eqref{eq:prob-matrix} can also be characterized as trying to find a common point in both a hypercube and a lattice. The columns of $\mat{A}$, regarded as vectors, generate a lattice. % Let $\{\v{b}_1, \v{b}_2, \ldots, \v{b}_n\}$ denote the column vectors of $\mat{A}$ and define: % \begin{equation} \label{eq:lattice-def} \La := \left\{ a_1 \v{b}_1 + \cdots + a_n \v{b}_n \in \RR^m \mid a_i \in \ZZ \right\} \end{equation} % By our assumption that $\mat{A}$ has full rank, $\{\v{b}_1, \ldots, \v{b}_n\}$ are linearly independent, and there is a one-to-one correspondance between elements in $\La \cap \Co$ and satifying assignments to~\eqref{eq:prob-matrix}. % It follows that checking the satisfiability of~\eqref{eq:prob-matrix} is equivalent to deciding whether $\La \cap \Co$ is non-empty. The set $\La \cap \Co$ is guaranteed to be finite as a lattice must have a finite number of elements in any space with a bounded volume. % Due to the one-to-one correspondance, the number of solutions to~\eqref{eq:prob-matrix} must be finite as well, and hence a procedure capable of enumerating the elements in $\La \cap \Co$ can be used as a decision procedure for checking the satisfiability of~\eqref{eq:prob-matrix}. Before describing such a procedure, we note that a nice feature of the lattice and hypercube formulation is that it provides a simple way to estimate the number of satisfying assignments. The \emph{volume} of a lattice $\La$ can be defined in several ways (see \cite{Lenstra}, \S5), but the simplest computationally is $\vol(\La) = \abs{\det{\m{B}}}$ where the columns of $\m{B}$ generate $\La$. Then, the number of elements of $\La \cap \Co$ is approximately $\vol{\Co}\:{/}\:\vol{\La}$. We will use this to compute the number of expected solutions for the JPEG preimage problems described in the Section~\ref{sec:jpeg}. % We will lateFigure %\ref{fig:solution_count} shows a plot of this estimate for a specific family %of problems. %% Decision Procedure %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Enumerating Lattice Elements} \label{ssec:dp} % What I want to say... % Our algorithm is a search over the tree of partial assignments $x_1 = a_1, x_2 % = a_2, \ldots, x_j = a_j$. We use a \emph{best-first} search strategy, where % "best" at each step means making the assignment to the next variable % such that the resulting layer is the \emph{closest} one to center among the % remaining possibilities. The best-first search is also pruned by keeping track % of a best known upper bound on the distance of a closest lattice vector to % center. Layers whose distance to center exceeds the known upper bound are not % examined. % % In section \ref{ssec:cvp-inf}, we discuss a slightly more general family of % search strategies and show that they have good properties. After that we give % some details about our implementation. % We now turn our attention to the problem of enumerating the elements $\La \cap \Co$. Recall that, without loss of generality, we have taken $\Co$ to be a hypercube. Let $\v{p}$ denote the geometric center of $\Co$: % \begin{equation} \label{eq:center} \v{p} := (\frac{\v{l}_1 + \v{u}_1}{2}, \ldots, \frac{\v{l}_m + \v{u}_m}{2}). \end{equation} With respect to the $\linf$ metric, $\Co$ is a closed ball of radius $r_\Co$, centered at $\v{p}$, and hence the elements in $\La \cap \Co$ are precisely those that are at most a distance $r_\Co$ from $\v{p}$. Algorithms for finding lattice elements that are close to a given point have been extensively studied and many algorithmic approaches to it exist; see \cite{AgrellEtAl} for a good survey. % We have developed a complete search procedure by adapting the Schnorr-Euchner algorithm for computing the closest vector point to a lattice~\cite{Schnorr-Euchner}. %In adapting the algorithm, we have made three changes: (1) We use the $\linf$ %metric instead of the Euclidean $\ltwo$ %metric; (2) rather than compute the closest element, we immediately return %when we find an element within the hypercube; and (3) we prune search paths %when the $R_\Co$ %Our adaption %$\linf$, both because of its performance characteristics and the ease with %which we could change metrics. % In particular, $\v{z} \in \Co$ if and only if $\norminf{z - p} \le r_{\Co}$. %In the algorithm below we use \proc{Center} to denote the calculation %\eqref{eq:center}. %Now, suppose we have a procedure $\proc{Closest}_\infty$ which takes as input %a lattice $\La \subset \RR^m$, a point $\v{q} \in \RR^m$, and returns a %closest lattice vector to $\v{q}$, not in the usual $L^2$ metric, but in the %$\linf$ metric. Assuming this for the moment, we arrive at our decision %procedure for \eqref{eq:prob-matrix}. %\begin{algorithm}[H] %\SetLine % \KwIn{a lattice $\La \subset \RR^m$ and a hypercube $\Co$} % \KwOut{``SAT'' and a $\v{z} \in \La \cap \Co$, or ``UNSAT''} % $\v{p} \leftarrow \proc{Center}(\Co)$\; % $\v{r} \leftarrow \proc{Closest}_\infty(\La, \v{p})$\; % \eIf{$\v{r} \in \Co$}{ % \KwRet{$(\text{SAT}, \v{r})$}\; % }{ % \KwRet{UNSAT}\; % } % \caption{Decision procedure for lattice points in a hypercube} %\end{algorithm} %The first branch is obviously correct. For the alternative, simply observe %that if the closest lattice vector to $\v{p}$ in the $\linf$ metric is not %contained in $\Co$ then there can be no lattice vector in $\Co$ (any such %vector would be strictly closer to $\v{p}$). %The utility of this simple procedure obviously hinges on %$\proc{Closest}_\infty$. %\subsection{Closest Vector Search with Infinity Norm} %\label{ssec:cvp-inf} %The closest vector problem has been extensively studied and many algorithmic %approaches to it exist; see \cite{AgrellEtAl} for a good survey. In our %implementation BLT we have chosen to adapt the Schnorr-Euchner strategy for %$\linf$, both because of its performance characteristics and the ease with %which we could change metrics. \newcommand{\ruleref}{\textbf{Split}} We model our search procedure as a non-deterministic transition rule on partial assignments to the vectors $\v{x}$. The procedure begins with the empty assignment $\emptyset$, and incrementally assigns values to variables in $\v{x}$. If the transition rule terminates with a complete assignment $\v{u} \in \ZZ^n$, then $\v{u}$ is a solution to the constraint problem. % \begin{equation} %\label{eq:rules-split} %\tag{\textbf{Split}} \mathbf{(Split)} \hspace{20pt} \theta \ \Rightarrow \ \theta \cup \{\,j \mapsto s\,\} \ \textbf{where}\,\begin{cases} j \in \{\,1, \ldots, n\,\} \setminus \fn{dom}(\theta)\\ s \in \ZZ\ \textbf{s.t.} \ \La^\RR_{\theta \cup \{\,j \mapsto s\,\}} \cap \Co \neq \emptyset \end{cases} \end{equation} This rule takes a partial assignment $\theta$, and extends it with an additional binding $j \mapsto s$ such that the real-affine linear space $\La^\RR_{\theta \cup \{\,j \mapsto s\,\}}$ of the resulting assignment intersects with $\Co$. This rule models backtracking implicitly; at each step, we may find that there is no legal value $s$ to assign $j$. If this occurs, our procedure must backtrack to a previous step, and explore an alternative assignment. We can show that the set of lattice points in $\Co$ can be enumerated by applying~\ruleref{} transitively starting from $\emptyset$. % Before stating the theorem, we first observe that each point $\v{x} \in \La$ can be expressed as the weighted sum of the columns in $\mat{A}$, (i.e.,~$\v{x} = \mat{A}\v{u}$ for some unique $\v{u} \in \ZZ^n$). % \begin{thm} For each vector $\v{u} \in \ZZ^n$, $\mat{A} \v{u} \in \Co$ iff. there is a derivation $\emptyset \Rightarrow^{+} \v{u}$. \end{thm} \begin{proof} % To see that $\emptyset \Rightarrow^{+} \v{u}$ implies $\mat{A} \v{u} \in \Co$, observe that the proceeding step must have shown that $\La^\RR_{\v{u}} \cap \Co \neq \emptyset$. Since $\v{u}$ is a complete assignment, $\La^\RR_{\v{u}} = \{\,\mat{A} \v{u}\,\}$, and hence $\mat{A} \v{u} \in \Co$. To see that $\mat{A} \v{u} \in \Co$ implies $\emptyset \Rightarrow^{+} \v{u}$, observe that for all partial assignments $\theta$, $\La^\RR_\theta \cap \Co \neq \emptyset$ implies that $\emptyset \Rightarrow^{+} \theta$ by induction on the number of bindings in $\theta$. % For a complete assignment $\v{u}$, $\La^\RR_{\v{u}} = \{\,\mat{A} \v{u}\}$. Hence, if $\mat{A} \v{u}$ is in $\Co$, then $\La^\RR_{\v{u}}$ is a non-empty subset of $\Co$ and $\varnothing \Rightarrow^{+} \v{u}$. \end{proof} % We note that the above theorem holds regardless of the order in which we choose the values of $j$ in~\ruleref{}. We only need to consider all valid assignments to $s$ once we have chosen $j$. An implementation then has a choice in which it can use different heuristics to search for an assignment. % We will briefly describe the heuristics used by BLT in Section~\ref{ssec:blt-optimizations}. The previous theorem shows that the transition rule is sound and complete from a logical point of view, to show that it is computable we prove the following: \begin{thm} The set of partial assignments $\theta$ such that $\emptyset \Rightarrow^{+} \theta$ is finite and computable. \label{thm:computable} \end{thm} \begin{proof} As each application of~\ruleref{} adds an additional binding to the substitution, the number of applications along any path is bounded by $n$. To show that the set of $\theta$ is finite, we must show that the number of potential values of $s$ used to instantiate~\ruleref{} is both finite and computable. More precisely, we must prove that there are at most a finite number of integers $s \in \ZZ$ such that \begin{equation} \La^\RR_{\theta \cup \{\,j \mapsto s\,\}} \cap \Co \neq \emptyset. \label{eq:extend_theta} \end{equation} % Observe that for any $u,k \in \ZZ$ with $k \neq 0$, the affine set $\La^\RR_{\theta \cup \{\,j \mapsto u+k\,\}}$ can be obtained by shifting the set $\La^\RR_{\theta \cup \{\,j \mapsto u\,\}}$ by a multiple $k$ of the basis vector $\v{b}_j$. As $\v{b}_j$ is linearly independent from the other basis vectors, it follows that $\La^\RR_{\theta \cup \{\,j \mapsto u\,\}}$ and $\La^\RR_{\theta \cup \{\,j \mapsto u+k\,\}}$ are disjoint and separated by some positive distance $k \times d_{\theta,j}$ where $d_{\theta,j}$ is the distance between the adjacent hyperplanes $\La^\RR_{\theta \cup \{\,j \mapsto 0\,\}}$ and $\La^\RR_{\theta \cup \{\,j \mapsto 1\,\}}$ % As the distance between any two points $\v{x}, \v{y} \in \Co$ is at most $d_\Co$, it follows that the number of distinct $s$ satisfying~\eqref{eq:extend_theta} is at most $d_\Co / d_{\theta,j}$. Moreover, as both $\Co$ and $\La^\RR_{\theta \cup \{\,j \mapsto s\,\}}$ are convex, there must be a bounded interval $s \in \{\,l,l+1,\dots, u-1, u\,\}$ of values satisfying~\eqref{eq:extend_theta}. Rather than compute the bounds explicitly, we compute the $\linf$-distance between $\La^\RR_\theta$ and the point $\v{p}$ at the center of $\Co$ using the reduction to linear programming described at the end of the Section~\ref{sec:preliminaries} in equation~\eqref{eq:set_distance}. % This reduction to linear programming allows one to find an assignment $\v{y} \in \RR^{n}$ so that $\mat{A} \v{y}$ is one of the points in $\La^\RR_\theta$ with minimal distance to $\v{p}$. We can then start by considering for $s$ the points $\{\,\floor{\v{y}_j},\,\floor{\v{y}_j-1},\dots\,\}$ and $\{\,\ceil{\v{y}_j},\,\ceil{\v{y}_j+1},\dots\,\}$ until we have explored all the assignments in the set $\{\,l,l+1,\dots, u\,\}$. % \end{proof} %\proc{Split} says we can take a sublayer in $S$ and decompose it into its %component sub-sublayers by choosing an unassigned index $j$. The assignments %$s_i$ are taken to be precisely those for which $d_\infty(\La^\RR_{I \cup (j, % s_i)}, p) < r$. This is always a finite set of consecutive integers. % See figure \ref{fig:layers} for illustration. % %\proc{Prune} removes sublayers that we know have no points closer to $\v{p}$ %than $r$. % %Finally, \proc{Satisfiable} applies to 0-dimensional sublayers, i.e. lattice %points. If $\La_I = \{ \v{y} \}$ and the distance from $\v{y}$ to $\v{p}$ is %less than $r_\Co$, we have found a satisfying assignment $I$. % \begin{figure} % \centering % \vspace{0.1cm} % \includegraphics[width=0.48\textwidth]{lattice-layers} % \caption{\proc{Split} $\La_I$ into sublayers} % \label{fig:layers} % \end{figure} %Starting at some initial state $S_0$, a \emph{derivation} in this system is a %sequence of transitions $S_0 \Rightarrow S_1 \Rightarrow \cdots$ using any of %the three rules as long as their precondition applies. We call any state of %the form $(\emptyset, r, \v{z})$ a final state. % %\begin{lem} % \label{lem:search} % The transition system described above always terminates in a final state, i.e. % \begin{enumerate} % \item every derivation is finite, % \item the only states in which no rule applies are final states. % \end{enumerate} %\end{lem} % %\begin{proof} %Consider the function $\nu$ which maps states to $n+1$-tuples of natural %numbers: $\nu(S, r, \v{z})_i = \#\{\La_I \in S \mid \abs{I}=i \}$ for $i=0,\ldots,n$. %We claim that all three transition rules cause $\nu$ to strictly decrease in %the lexicographic order. For \proc{Prune} and \proc{Record} this is obvious. %Applying $\proc{Split}_{j,I}$ causes $\nu_{\abs{I}}$ to decrease by 1 and %$\nu_{\abs{I}+1}$ to increase (by an amount bounded above by a constant %multiple of $r$). Hence $\nu$ strictly decreases in lexicographic order and so %every derivation is finite. % %For (2), suppose to the contrary $(S, r, \v{z})$ is a state such that $S \ne %\emptyset$ but none of the transition rules apply. Choose a $\La_I \in S$. If %$\abs{I} < n$ then $\proc{Split}_{j,I}$ applies for some $j$. Otherwise, if %$\abs{I} = n$ then either $d_\infty(\La^\RR_I, \v{p}) \ge r$, in which case %$\proc{Prune}_I$ applies, or the opposite holds, in which case %$\proc{Record}_I$ applies. Thus we have a contradiction. %\end{proof} % %\begin{thm} % \label{thm:closest} % Let $\v{z}_0$ be an arbitrary lattice vector and $r_0 = d_\infty(\v{z}_0, % \v{p})$. Then any derivation starting with initial state % $(\{\La_\emptyset\}, r_0, \v{z}_0)$ terminates at a final state $(\{\}, % r_f, \v{z}_f)$ in which $\v{z}$ is a closest vector in $\La$ to $\v{p}$. %\end{thm} % %\begin{proof} %Assume to the contrary that there is a \emph{closer} lattice vector %$\tz$. Clearly $\tz \in \La_\emptyset = \La$. Further, if %we are at a state $(S, r, \v{z})$ in which $\proc{Split}_{j,I}$ applies and $\tz \in %\La_I$, then there is a new sublayer produced by $\proc{Split}$ that contains %$\tz$. This follows from the definition of \proc{Split}, since $\tz$ is in %\emph{some} sublayer of $\La_I$, say $\La_{I \cup (j,s)}$ and %$d_\infty(\La^\RR_{I \cup (j,s)}, \v{p}) \le d_\infty(\tz, \v{p}) \le r$, the %latter inequality following from our assumption that $\tz$ is closer than the %final vector $\v{z}_f$ and hence than $\v{z}$. Similarly it should be clear %that if at any state, $\tz \in \La_I$ and $\La_I \in S$, then $\proc{Prune}_I$ %does not apply. %The above argument implies that at some point in our derivation, the %$0$-dimensional sublayer $\La_J = \{ \tz \}$ must appear. The only rule that %removes it is $\proc{Record}_J$. Hence, $d_\infty(\tz, \v{p}) \le r_f = %d_\infty(\v{z}_f, \v{p})$, a contradiction. %\end{proof} % The Schnorr-Euchner strategy is best understood as a recursive % search operation. Recall the problem is to find a $\v{z} \in \La$ such that % $\norminf{\v{z}-\v{p}}$ is minimal. % % % The main idea in the Schnorr-Euchner search strategy is to recursively search % for a closet vector in a finite number of the layers, ordered by increasing % distance from the target point. First we discuss which layers are searched and % then how to compute the $\linf$ distance from the target point to a layer. % % Suppose for a moment that an upper bound $\rho$ is known on the distance from a % closest vector to $\v{p}$. In this case it suffices to search a finite number % of consecutive layers. % % \begin{lemma} % \label{lem:layers-to-search} % If $d(\La, p) \le \rho$ then a closest vector is contained in some layer % $\mathcal{Y}_a$ such that $\mathcal{Y}^\RR_a \cap H \ne \emptyset$ and % moreover $\{ a \in \ZZ \mid \mathcal{Y}^\RR_a \cap H \ne \emptyset \}$ is % bounded set of consecutive integers. % \end{lemma} % \begin{proof} % TODO % \end{proof} % The order in which layers are searched makes a significant difference in % practice. We discuss the choice our implementation makes at the end of the % section. \subsection{Implementation Decisions} \label{ssec:blt-optimizations} Turning the previous section into a working and efficient procedure involves many more choices and details than we have room to describe. We would like to indicate, however a couple choices we have made in implementing BLT. \textbf{Search Strategy.} % In implementing the transition system, we have chosen to adopt a strategy similar to Schnorr and Euchner in~\cite{Schnorr-Euchner}. % We use the LLL algorithm~\cite{Lenstra} to generate a reduced basis, and fix the basis vectors by sorting in order of decreasing $L^2$ magnitude. % We then proceed by applying \ruleref{} in a depth first order with the sequence of $j$'s chosen according to our basis order. % The variables with the largest magnitude are typically the most-constrained variables in our problems, as they have the largest distance between adjacent sublayers. % Choosing the most constrained variable is a common strategy in constraint satisfication, and we have found the strategy effective in this case as well. The other choice we have with split is to consider which values of $s$ to explore. To maximize the likelihood of finding a satisfying assignment, we would like to choose a value for $s$ that imposes the least constraints on subsequent assignments. This could be done by choosing an assignment to $s$ that maximizes the volume of the intersection between the hypercube $\Co$ and real-affine set $\La^\RR_{\theta \cup \{\,j \mapsto s\,\}}$. Unfortunately, we do not know of an efficient way to compute the $s$ with the maximal volume\footnote{In~\cite{dyer_freize88}, the authors show that the related problem of computing the volume of the intersection of the unit cube and a rational halfspace is \#P-hard.}, but we have developed a proxy that works well in practice. % As alluded to in the proof of Theorem~\ref{thm:computable}, we use linear programming to find an initial assignment to $s$ that minimizes the $\linf$-distance between the center of the hypercube $\v{p}$ and $\La^\RR_{\theta \cup \{\,j \mapsto s\,\}}$. % Since the distance between the sublayer and center point is minimal, we can expect that the volume of the sublayer within the hypercube should be maximal or near maximal. If this assignment is found infeasible, and we backtrack, then we explore adjacent assignments $s + \delta, s - \delta, s + 2\delta, \ldots{}$, where $\delta = \pm 1$ depending on orientation, in order of increasing distance. %In implementing the transition system, %we have chosen to adopt a strategy similar to Schnorr and Euchner in %\cite{Schnorr-Euchner}. After choosing a lattice basis, we fix an ordering of %the basis vectors and proceed by applying $\proc{Split}_{j,-}$ for the %sequence of $j$'s corresponding to our basis order. After splitting say %$\La_I$ into $\La_{I \cup (i,a_1)}, \ldots, \La_{I \cup (i,a_k)}$ we choose %the sublayer among these that is closest to $\v{p}$ and apply \proc{Split} %there. Thus we have a best-first search that always follows the closest %sublayer first. In this way we quickly reach a $0$-dimensional sublayer, %namely a lattice point, and apply \proc{Record}. This point is known as the %\emph{Babai point} in the literature and gives us a good starting $r$ and %$\v{z}$ value for our state. %After reaching the Babai point, we backtrack to the $1$-dimensional layer it %came from and decide to either \proc{Prune} or \proc{Record} at the other %sublayers there, doing so in order of increasing distance from $\v{p}$ %\footnote{If the closest sublayer was $\La_{I \cup (j,s)}$ and $\v{p}$ is %``above'' it with respect to the direction of $\v{v}_j$, then one can show %that the assignments $(s_i) = (s, s+1, s-1, s+2, \ldots)$ enumerate the %sublayers in order of increasing distance from $\v{p}$.}. The advantage %of making this choice is that when we hit a sublayer that \proc{Prune} applies %to, then we infer that all the other sublayers at this level can be pruned and %we jump up another level. %Our implementation deviates slightly from the transition system %described here in order to make some crucial optimizations. When solving a %bounded integer constraint problem, the goal is to find a lattice vector %satisfying the constraints. Accordingly, in our $\proc{Closest}_\infty$ %implementation we check at each application of \proc{Record} whether the new %point satisfies the constraints and if it does we return early as there is no %need to find the closest solution. In the problems we've studied, early exit %like this saves an enormous amount of time, in particular when the Babai point %mentioned above already satisfies the constraints. %Finally, note that we've described $\proc{Closest}_\infty$ as starting with %the state $(\La_\emptyset, r_0, \v{z}_0)$ for some $\v{z}_0 \in \La$ and $r_0 = %d_\infty(\v{z}_0, \v{p})$. However, if our constraint set $\Co$ is a hypercube %with radius $r_\Co$, it's obviously better to start with $r_0 = r_\Co$ as this %is likely to be a much tighter upper bound. Some of the arguments above need %to be modified to account for this, but the performance gain is significant. % \paragraph{Lattice Basis.} In section \ref{sec:bcp} we hinted % choosing a good lattice basis to work with is important. For solving the % problems presented in section \ref{sec:jpeg}, it is essential. As a % pre-processing step in BLT we compute a reduced lattice basis once and for all % using Algorithm 2.6.3 of \cite{Cohen} as implemented in \cite{NTL}. This is % done \emph{after} the hypercube scaling since, depending on the nature of the constraints % present, this step can make the lattice matrix quite non-orthogonal. %Joe \textbf{Layer-point distance.} % Due to efficiency concerns as well as implementation issues with linking GMP with other Haskell code that BLT is linked against, we compute the distance using a conventional linear programming solver, GLPK \cite{GLPK}, which uses IEEE double precision floating point for its calculations. If the distance calculation is inaccurate, there is the potential to prune a sublayer that is mistaken for being slightly too far away, and consequently BLT may incorrectly return UNSAT. In cases where it returns SAT, the model is checked against the problem for certainty. In principle the distance calculations can be done using exact arithmetic\footnote{GLPK supports this directly.} or arbitrary-precision floating point arithmetic, but we have not attempted to do so yet.
/************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ #ifndef SDEXT_PRESENTER_PRESENTER_PANE_ANIMATOR_HXX #define SDEXT_PRESENTER_PRESENTER_PANE_ANIMATOR_HXX #include <com/sun/star/awt/Point.hpp> #include <com/sun/star/awt/Rectangle.hpp> #include <com/sun/star/awt/XWindow.hpp> #include <com/sun/star/drawing/framework/XResourceId.hpp> #include <com/sun/star/geometry/RealPoint2D.hpp> #include <com/sun/star/rendering/XBitmap.hpp> #include <com/sun/star/rendering/XSprite.hpp> #include <com/sun/star/rendering/XSpriteCanvas.hpp> #include <com/sun/star/uno/XComponentContext.hpp> #include <rtl/ref.hxx> #include <vector> #include <boost/function.hpp> #include <boost/noncopyable.hpp> #include <boost/shared_ptr.hpp> namespace css = ::com::sun::star; namespace sdext { namespace presenter { class PresenterController; class PresenterPaneContainer; class PresenterWindowManager; /** Base class for different types of pane animations. Each of these animations either shows or hides a single pane. */ class PresenterPaneAnimator : private ::boost::noncopyable { public: virtual void ShowPane (void) = 0; virtual void HidePane (void) = 0; protected: virtual ~PresenterPaneAnimator (void) {}; }; typedef ::std::vector< ::boost::function<void()> > EndActions; ::boost::shared_ptr<PresenterPaneAnimator> CreateUnfoldInCenterAnimator ( const css::uno::Reference<css::drawing::framework::XResourceId>& rxPaneId, const ::rtl::Reference<PresenterController>& rpPresenterController, const bool bAnimate, const EndActions& rShowEndActions, const EndActions& rEndEndActions); ::boost::shared_ptr<PresenterPaneAnimator> CreateMoveInFromBottomAnimator ( const css::uno::Reference<css::drawing::framework::XResourceId>& rxPaneId, const ::rtl::Reference<PresenterController>& rpPresenterController, const bool bAnimate, const EndActions& rShowEndActions, const EndActions& rEndEndActions); ::boost::shared_ptr<PresenterPaneAnimator> CreateTransparentOverlay ( const css::uno::Reference<css::drawing::framework::XResourceId>& rxPaneId, const ::rtl::Reference<PresenterController>& rpPresenterController, const bool bAnimate, const EndActions& rShowEndActions, const EndActions& rEndEndActions); } } #endif
{-# LANGUAGE FlexibleContexts #-} import Data.Function import Data.Fixed import Numeric.LinearAlgebra grid :: Integer -> Matrix Double grid n = build (300, 300) (builder `on` (+1) . round) where builder x y = let id = x + 10 in fromInteger . subtract 5 $ (id * y + n) * id `mod` 1000 `div` 100 main = getContents >>= putStrLn . (\(x, y) -> show (x+1) ++ "," ++ show (y+1)) . maxIndex . corr2 (konst 1 (3, 3)) . grid . read
State Before: α : Type u_1 β : Type ?u.84659 γ : Type ?u.84662 inst✝ : MeasurableSpace α p : Pmf α s t : Set α hs : MeasurableSet s hp : MeasurableSet (support p) ⊢ ↑↑(toMeasure p) (s ∩ support p) = ↑↑(toMeasure p) s State After: no goals Tactic: simp [p.toMeasure_apply_eq_toOuterMeasure_apply s hs, p.toMeasure_apply_eq_toOuterMeasure_apply _ (hs.inter hp)]
module UnSizedIO.Object where open import Data.Product record Interface : Set₁ where field Method : Set Result : (m : Method) → Set open Interface public -- A simple object just returns for a method the response -- and the object itself record Object (i : Interface) : Set where coinductive field objectMethod : (m : Method i) → Result i m × Object i open Object public
Formal statement is: lemma support_on_cong: "(\<And>x. x \<in> S \<Longrightarrow> f x = 0 \<longleftrightarrow> g x = 0) \<Longrightarrow> support_on S f = support_on S g" Informal statement is: If two functions agree on their support, then they have the same support.
If $f$ is $C$-Lipschitz on $U$, then $a f$ is $(a C)$-Lipschitz on $U$ for any $a \geq 0$.
If $f$ is $C$-Lipschitz on $U$, then $a f$ is $(a C)$-Lipschitz on $U$ for any $a \geq 0$.
```python %matplotlib inline import matplotlib.pyplot as plt import numpy as np ``` ```python # Helper function to draw regression line def draw_result(x, y, xfit, yfit, cost=None, x_predict=None): plt.figure(figsize=(10,5)) ax = plt.gca() ax.grid(color='#b7b7b7', linestyle='-', linewidth=0.5, alpha=0.5) plt.scatter(x,y, color='#333333', alpha=0.7) plt.plot(xfit,yfit, color='#333333') if x_predict: ax.axvline(x_predict, color='#121212', linestyle='--', linewidth=1, alpha=0.9) plt.scatter(x_predict,yfit[x_predict*100], s=100, c='#212121', alpha=0.7) if cost: ax.text(0, 10, f'error = {cost:.2f}',fontsize=12,color='#000000') plt.show() ``` ```python # generate random data sample_size = 50 rng = np.random.RandomState(1) # generate input and output data with shape = (SAMPLE, FEATURE) x = np.array([10 * rng.rand(sample_size)]).T y = 2 * x - 5 + np.array([rng.rand(sample_size)]).T ``` To predit the value of the incoming points, the simple solution is to approximate $y$ as a continuous linear function of $x$: \begin{equation} \hat{y} = f(x, \mathbf{w}) = \omega_0 + \omega_1x \end{equation} Objective: find $\mathbf{w}$ which minimize the error. \begin{equation} J(\mathbf{w}) = \frac{1}{2n}\sum_{i=1}^{N}(f(x_i, \mathbf{w}) - y_i)^2 \end{equation} Let's start with random values of $\omega_0$ and $\omega_1$. ```python # initilize weights with shape = (INPUT NODES, OUTPUT NODES) weights = np.array([10 * rng.rand(2)]).T xfit = np.linspace(0, 10, 1000) yfit = weights[0] + weights[1] * xfit draw_result(x, y, xfit, yfit) ``` ## Linear regression Optimize fit line with linear regression. ### 1. Feedforward \begin{equation} \mathbf{\hat{y}} = XW \end{equation} ### 2. Compute cost function \begin{equation} J(\mathbf{y}, \mathbf{\hat{y}}) = \frac{1}{2m}(\mathbf{\hat{y}} - \mathbf{y})^T(\mathbf{\hat{y}} - \mathbf{y}) \end{equation} ### 3. Backpropagation \begin{equation} \frac{\partial J(\mathbf{y}, \mathbf{\hat{y}})}{\partial W} = \frac{\partial J(\mathbf{y}, \mathbf{\hat{y}})}{\partial \mathbf{\hat{y}}} \cdot \frac{\partial \mathbf{\hat{y}}}{\partial W} = \frac{1}{m}X^T(\mathbf{\hat{y}} - \mathbf{y}) \end{equation} ### 4. Gradient descent \begin{equation} W = W - \alpha \frac{\partial J(\mathbf{y}, \mathbf{\hat{y}})}{\partial W} \end{equation} ```python class LinearRegression: """ Simple linear regression """ def __init__(self): pass def _init_params(self, x, y, iterations, learning_rate, reg_factor): """ Initilize parameters. ---------- W : ndarray, shape (n_features+1,) Coefficient vector """ self._X = np.hstack([np.ones((x.shape[0], 1)), x]) self._y = y self._learning_rate = learning_rate self._reg_factor = reg_factor self.weights_ = np.random.rand(self._X.shape[1],1) self.costs_ = np.zeros(iterations) def _feedforward(self): """ Computes np.dot(X, W). """ self._y_hat = self._X.dot(self.weights_) def _backprop(self): """ Update weights. """ m = len(self._y) # update weights with L2 regularization term _weights = self.weights_.copy() # ignore bias term _weights[0, 0] = 0 self.weights_ -= self._learning_rate * self._X.T.dot(self._y_hat - self._y)/m + self._reg_factor/m*_weights def _get_cost(self): """ Compute loss. """ m = len(self._y) errors = self._y-self._y_hat # cost function with L2 regularization term _weights = self.weights_.copy() # ignore bias term _weights[0, 0] = 0 return 0.5/m * errors.T.dot(errors) + self._reg_factor/(2*m)*_weights.T.dot(_weights) def fit(self, x, y, iterations=1000, learning_rate=0.02, reg_factor=0.5): """ Fit model. ---------- x : ndarray, shape (n_samples, n_features) Training data y : ndarray, shape (n_samples,) Target data """ self._init_params(x, y, iterations, learning_rate, reg_factor) # train model for i in range(iterations): self._feedforward() self._backprop() self.costs_[i] = self._get_cost() ``` ```python # Linear function lr = LinearRegression() lr.fit(x, y) Xfit = np.vstack((np.ones((xfit.shape[0], )), xfit)).T yfit = Xfit.dot(lr.weights_) draw_result(x, y, xfit, yfit, cost=lr.costs_[-1], x_predict=6) ``` ```python # Polynomial function x_ = np.hstack([x, x**2]) y_ = 2 * x + 0.6 * x**2 - 0.08 * x**3 - 5 + np.array([rng.rand(sample_size)]).T lr_ = LinearRegression() lr_.fit(x_, y_, iterations=50000, learning_rate=0.001, reg_factor=0.005) Xfit_ = np.vstack([np.ones((xfit.shape[0], )), xfit, xfit**2]).T yfit_ = Xfit_.dot(lr_.weights_) draw_result(x, y_, xfit, yfit_, cost=lr_.costs_[-1], x_predict=6) ```
Parents in Oregon can support their children with the help of the online tools and resources provided by the state. It is clear that they can use the online platform to access their accounts and perform different actions as if they were doing it over the counter at the child support offices. It helps them to save time and resources through the flexible online child support services. Step 2: Enter your case number and date of birth, and then click login. You have to begin somewhere if you want to pay for child support online. Assuming that you have already enrolled for this service, you can proceed to login and make payment. Step 2: Enter your username and password, and then click Login. Step 2: Enter your account details and click continue. Step 1: Click the create account space. Step 2: Enter your details as required and then continue. You can log in and perform different activities from your account. Since every account holder will have their own account details, you can log in and logout from it after completing different activities. For example, you can sign in, check when child support is due, make payment and log out. With the sign in account, you will be able to adhere to the guidelines of child support and now when payment is due.
\subsection{Scope} \label{sec:intro_scope} This thesis aims to thoroughly describe a financial machine learning pipeline for strategy training and validation. It will be exercised over bitcoin price with a fundamental momentum strategy. Multiple source features will be examined and used: financial, bitcoin and Bitcoin features, social media and structural break features. Feature engineering for time series will be applied and discussed in favor of determining the implications of sample uniqueness and series stationarity. Ensemble tree models will be used, trained and verified via cross validation with sample adjustments in favor of reduced leakage. Strategy and hyperparameter optimization, and feature selection will be conducted prior to back testing. The final stage will assign bet sizes and run back tests with budget metrics to quantitatively determine whether staking, momentum alone or this full strategy is the best one. Figure \ref{fig:pipeline}, which is in section \ref{sec:methods_pipeline}, shows the aforementioned pipeline.
submodule (h5fortran:read) reader_lt implicit none (type, external) contains module procedure h5exist type(hdf5_file) :: h call h%open(filename, status='old', action='r') h5exist = h%exist(dname) call h%close() end procedure h5exist module procedure lt0read @reader_lt_template@ end procedure lt0read module procedure lt1read @reader_lt_template@ end procedure lt1read module procedure lt2read @reader_lt_template@ end procedure lt2read module procedure lt3read @reader_lt_template@ end procedure lt3read module procedure lt4read @reader_lt_template@ end procedure lt4read module procedure lt5read @reader_lt_template@ end procedure lt5read module procedure lt6read @reader_lt_template@ end procedure lt6read module procedure lt7read @reader_lt_template@ end procedure lt7read end submodule reader_lt
If $a \neq 0$, then the image of the ball of radius $r$ centered at $c$ under the map $x \mapsto a \cdot x$ is the ball of radius $\lvert a \rvert \cdot r$ centered at $a \cdot c$.
[STATEMENT] lemma walk_tl: "walk xs \<Longrightarrow> walk (tl xs)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. walk xs \<Longrightarrow> walk (tl xs) [PROOF STEP] by (induct rule: walk.induct) simp_all
#coding:utf-8 import cv2 import math import numpy as np import os import random import os,shutil import xml.etree.cElementTree as ET import sys #用于过滤或删除特定标签 counts=0 if __name__=='__main__': image_suff=".jpg" srcGT="/media/nizhengqi/0007912600089656/fpn.pytorch-master/data/VOCdevkit2007/VOC2007/Annotations/" srcimage = "/media/nizhengqi/0007912600089656/fpn.pytorch-master/data/VOCdevkit2007/VOC2007/JPEGImages/" desGT="/media/nizhengqi/0007912600089656/fpn.pytorch-master/data/VOCdevkit2007/VOC2007/new_gt/" desimage = "/media/nizhengqi/0007912600089656/fpn.pytorch-master/data/VOCdevkit2007/VOC2007/new_image/" tags=["xiaochicun"] pos=1#0代表过滤出来上面的标签,1代表删除上面的标签 for root, dirs, files in os.walk(srcGT): print(len(files)) count = 0 for path in files: fullpath=srcGT+path if os.path.isfile(fullpath) and ".xml" in path: tree = ET.parse(fullpath) root = tree.getroot() r_name=path.replace(".xml","") need_delete = [] count=0 for object in root.getchildren(): if object.tag=="object": count=count+1 name = object.find('name').text if pos==0: if name not in tags: need_delete.append(object) else: if name in tags: need_delete.append(object) if count!=len(need_delete) and count!=0: while len(need_delete) > 0: root.remove(need_delete[0]) need_delete.remove(need_delete[0]) tree2 = ET.ElementTree(root) new_full_path = desGT + r_name print(new_full_path) if os.path.isfile(new_full_path+".xml"): r_name=r_name+"_" new_full_path = desGT + r_name tree2.write(new_full_path+".xml", encoding='utf-8') shutil.copyfile(srcimage+path.replace(".xml",image_suff), desimage+r_name+image_suff) cv2.waitKey(0)
// // keyvi - A key value store. // // Copyright 2015 Hendrik Muhs<[email protected]> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // /* * dictionary_merger_test.cpp * * Created on: Feb 29, 2016 * Author: hendrik */ #include <unordered_set> #include <boost/test/unit_test.hpp> #include "keyvi/dictionary/dictionary.h" #include "keyvi/dictionary/dictionary_merger.h" #include "keyvi/dictionary/dictionary_types.h" #include "keyvi/dictionary/fsa/traverser_types.h" #include "keyvi/testing/temp_dictionary.h" #include "keyvi/util/configuration.h" namespace keyvi { namespace dictionary { BOOST_AUTO_TEST_SUITE(DictionaryMergerTests) BOOST_AUTO_TEST_CASE(MergeKeyOnlyDicts) { std::vector<std::string> test_data = {"aaaa", "aabb", "aabc", "aacd", "bbcd", "aaceh", "cdefgh"}; testing::TempDictionary dictionary(&test_data); std::vector<std::string> test_data2 = {"aaaaz", "aabbe", "cdddefgh"}; testing::TempDictionary dictionary2(&test_data2); keyvi::util::parameters_t merge_configurations[] = {{{"memory_limit_mb", "10"}}, {{"memory_limit_mb", "10"}, {"merge_mode", "append"}}}; for (const auto params : merge_configurations) { DictionaryMerger<> merger(params); std::string filename("merged-dict-key-only.kv"); merger.Add(dictionary.GetFileName()); merger.Add(dictionary2.GetFileName()); merger.Merge(filename); fsa::automata_t fsa(new fsa::Automata(filename.c_str())); dictionary_t d(new Dictionary(fsa)); BOOST_CHECK(d->Contains("aaaa")); BOOST_CHECK(d->Contains("aabb")); BOOST_CHECK(d->Contains("aabc")); BOOST_CHECK(d->Contains("aacd")); BOOST_CHECK(d->Contains("bbcd")); BOOST_CHECK(d->Contains("aaceh")); BOOST_CHECK(d->Contains("cdefgh")); BOOST_CHECK(d->Contains("aaaaz")); BOOST_CHECK(d->Contains("aabbe")); BOOST_CHECK(d->Contains("cdddefgh")); BOOST_CHECK(!d->Contains("aaab")); BOOST_CHECK(!d->Contains("a")); BOOST_CHECK(!d->Contains("cde")); BOOST_CHECK_EQUAL(0, merger.GetStats().deleted_keys_); BOOST_CHECK_EQUAL(0, merger.GetStats().updated_keys_); BOOST_CHECK_EQUAL(10, merger.GetStats().number_of_keys_); std::remove(filename.c_str()); } } BOOST_AUTO_TEST_CASE(MergeIntegerDicts) { std::vector<std::pair<std::string, uint32_t>> test_data = { {"abc", 22}, {"abbc", 24}, {"abbcd", 444}, {"abcde", 200}, {"abdd", 180}, {"bba", 10}, }; testing::TempDictionary dictionary(&test_data); std::vector<std::pair<std::string, uint32_t>> test_data2 = { {"abbe", 25}, {"abcd", 21}, {"bbacd", 30}, }; testing::TempDictionary dictionary2(&test_data2); std::string filename("merged-dict-int.kv"); DictionaryMerger<dictionary_type_t::INT_WITH_WEIGHTS> merger(keyvi::util::parameters_t({{"memory_limit_mb", "10"}})); merger.Add(dictionary.GetFileName()); merger.Add(dictionary2.GetFileName()); merger.Merge(filename); fsa::automata_t fsa(new fsa::Automata(filename.c_str())); dictionary_t d(new Dictionary(fsa)); BOOST_CHECK(d->Contains("abc")); BOOST_CHECK(d->Contains("abbc")); BOOST_CHECK(d->Contains("abbcd")); BOOST_CHECK(d->Contains("abcde")); BOOST_CHECK(d->Contains("abdd")); BOOST_CHECK(d->Contains("bba")); BOOST_CHECK_EQUAL("22", d->operator[]("abc").GetValueAsString()); BOOST_CHECK_EQUAL("24", d->operator[]("abbc").GetValueAsString()); BOOST_CHECK_EQUAL("444", d->operator[]("abbcd").GetValueAsString()); BOOST_CHECK_EQUAL("200", d->operator[]("abcde").GetValueAsString()); BOOST_CHECK_EQUAL("180", d->operator[]("abdd").GetValueAsString()); BOOST_CHECK_EQUAL("10", d->operator[]("bba").GetValueAsString()); BOOST_CHECK(d->Contains("abcd")); BOOST_CHECK(d->Contains("abbe")); BOOST_CHECK(d->Contains("bbacd")); BOOST_CHECK_EQUAL("21", d->operator[]("abcd").GetValueAsString()); BOOST_CHECK_EQUAL("25", d->operator[]("abbe").GetValueAsString()); BOOST_CHECK_EQUAL("30", d->operator[]("bbacd").GetValueAsString()); std::remove(filename.c_str()); } BOOST_AUTO_TEST_CASE(MergeIntegerDictsValueMerge) { std::vector<std::pair<std::string, uint32_t>> test_data = {{"abc", 22}, {"abbc", 24}}; testing::TempDictionary dictionary(&test_data, false); std::vector<std::pair<std::string, uint32_t>> test_data2 = { {"abc", 25}, {"abbc", 42}, {"abcd", 21}, {"abbc", 30}, }; testing::TempDictionary dictionary2(&test_data2, false); std::string filename("merged-dict-int-v1.kv"); IntDictionaryMerger merger(keyvi::util::parameters_t({{"memory_limit_mb", "10"}})); merger.Add(dictionary.GetFileName()); merger.Add(dictionary2.GetFileName()); merger.Merge(filename); fsa::automata_t fsa(new fsa::Automata(filename.c_str())); dictionary_t d(new Dictionary(fsa)); BOOST_CHECK(d->Contains("abc")); BOOST_CHECK(d->Contains("abbc")); BOOST_CHECK(d->Contains("abcd")); BOOST_CHECK_EQUAL("25", d->operator[]("abc").GetValueAsString()); BOOST_CHECK_EQUAL("21", d->operator[]("abcd").GetValueAsString()); BOOST_CHECK_EQUAL("30", d->operator[]("abbc").GetValueAsString()); std::remove(filename.c_str()); filename = "merged-dict-int-v2.kv"; testing::TempDictionary dictionary3(&test_data, false); IntDictionaryMerger merger2(keyvi::util::parameters_t({{"memory_limit_mb", "10"}})); merger2.Add(dictionary.GetFileName()); merger2.Add(dictionary2.GetFileName()); merger2.Add(dictionary3.GetFileName()); merger2.Merge(filename); fsa::automata_t fsa2(new fsa::Automata(filename.c_str())); dictionary_t d2(new Dictionary(fsa2)); BOOST_CHECK(d2->Contains("abc")); BOOST_CHECK(d2->Contains("abbc")); BOOST_CHECK(d2->Contains("abcd")); BOOST_CHECK_EQUAL("22", d2->operator[]("abc").GetValueAsString()); BOOST_CHECK_EQUAL("21", d2->operator[]("abcd").GetValueAsString()); // overwritten by 2nd BOOST_CHECK_EQUAL("24", d2->operator[]("abbc").GetValueAsString()); std::remove(filename.c_str()); filename = "merged-dict-int-v3.kv"; IntDictionaryMerger merger3(keyvi::util::parameters_t({{"memory_limit_mb", "10"}})); merger3.Add(dictionary2.GetFileName()); merger3.Add(dictionary.GetFileName()); merger3.Merge(filename); fsa::automata_t fsa3(new fsa::Automata(filename.c_str())); dictionary_t d3(new Dictionary(fsa3)); BOOST_CHECK(d3->Contains("abc")); BOOST_CHECK(d3->Contains("abbc")); BOOST_CHECK(d3->Contains("abcd")); BOOST_CHECK_EQUAL("22", d3->operator[]("abc").GetValueAsString()); BOOST_CHECK_EQUAL("21", d3->operator[]("abcd").GetValueAsString()); BOOST_CHECK_EQUAL("24", d3->operator[]("abbc").GetValueAsString()); std::remove(filename.c_str()); } BOOST_AUTO_TEST_CASE(MergeIntegerDictsAppendMerge) { std::vector<std::pair<std::string, uint32_t>> test_data = {{"abc", 22}, {"abbc", 24}}; testing::TempDictionary dictionary(&test_data, false); std::vector<std::pair<std::string, uint32_t>> test_data2 = { {"abc", 25}, {"abbc", 42}, {"abcd", 21}, {"abbc", 30}, }; testing::TempDictionary dictionary2(&test_data2, false); std::string filename("merged-dict-int-v1.kv"); IntDictionaryMerger merger(keyvi::util::parameters_t({{"memory_limit_mb", "10"}, {"merge_mode", "append"}})); merger.Add(dictionary.GetFileName()); merger.Add(dictionary2.GetFileName()); merger.Merge(filename); fsa::automata_t fsa(new fsa::Automata(filename.c_str())); dictionary_t d(new Dictionary(fsa)); BOOST_CHECK(d->Contains("abc")); BOOST_CHECK(d->Contains("abbc")); BOOST_CHECK(d->Contains("abcd")); BOOST_CHECK_EQUAL("25", d->operator[]("abc").GetValueAsString()); BOOST_CHECK_EQUAL("21", d->operator[]("abcd").GetValueAsString()); BOOST_CHECK_EQUAL("30", d->operator[]("abbc").GetValueAsString()); std::remove(filename.c_str()); filename = "merged-dict-int-v2.kv"; testing::TempDictionary dictionary3(&test_data, false); IntDictionaryMerger merger2(keyvi::util::parameters_t({{"memory_limit_mb", "10"}, {"merge_mode", "append"}})); merger2.Add(dictionary.GetFileName()); merger2.Add(dictionary2.GetFileName()); merger2.Add(dictionary3.GetFileName()); merger2.Merge(filename); fsa::automata_t fsa2(new fsa::Automata(filename.c_str())); dictionary_t d2(new Dictionary(fsa2)); BOOST_CHECK(d2->Contains("abc")); BOOST_CHECK(d2->Contains("abbc")); BOOST_CHECK(d2->Contains("abcd")); BOOST_CHECK_EQUAL("22", d2->operator[]("abc").GetValueAsString()); BOOST_CHECK_EQUAL("21", d2->operator[]("abcd").GetValueAsString()); // overwritten by 2nd BOOST_CHECK_EQUAL("24", d2->operator[]("abbc").GetValueAsString()); std::remove(filename.c_str()); filename = "merged-dict-int-v3.kv"; IntDictionaryMerger merger3(keyvi::util::parameters_t({{"memory_limit_mb", "10"}, {"merge_mode", "append"}})); merger3.Add(dictionary2.GetFileName()); merger3.Add(dictionary.GetFileName()); merger3.Merge(filename); fsa::automata_t fsa3(new fsa::Automata(filename.c_str())); dictionary_t d3(new Dictionary(fsa3)); BOOST_CHECK(d3->Contains("abc")); BOOST_CHECK(d3->Contains("abbc")); BOOST_CHECK(d3->Contains("abcd")); BOOST_CHECK_EQUAL("22", d3->operator[]("abc").GetValueAsString()); BOOST_CHECK_EQUAL("21", d3->operator[]("abcd").GetValueAsString()); BOOST_CHECK_EQUAL("24", d3->operator[]("abbc").GetValueAsString()); std::remove(filename.c_str()); } BOOST_AUTO_TEST_CASE(MergeStringDicts) { keyvi::util::parameters_t merge_configurations[] = {{{"memory_limit_mb", "10"}}, {{"memory_limit_mb", "10"}, {"merge_mode", "append"}}}; for (const auto params : merge_configurations) { std::vector<std::pair<std::string, std::string>> test_data = { {"abc", "a"}, {"abbc", "b"}, {"abbcd", "c"}, {"abcde", "a"}, {"abdd", "b"}, {"bba", "c"}, }; testing::TempDictionary dictionary(&test_data); std::vector<std::pair<std::string, std::string>> test_data2 = { {"abbe", "d"}, {"abbc", "z"}, {"abcd", "a"}, {"bbacd", "f"}, }; testing::TempDictionary dictionary2(&test_data2); std::string filename("merged-dict-string.kv"); DictionaryMerger<dictionary_type_t::STRING> merger(params); merger.Add(dictionary.GetFileName()); merger.Add(dictionary2.GetFileName()); merger.Merge(filename); fsa::automata_t fsa(new fsa::Automata(filename.c_str())); dictionary_t d(new Dictionary(fsa)); BOOST_CHECK(d->Contains("abc")); BOOST_CHECK(d->Contains("abbc")); BOOST_CHECK(d->Contains("abbcd")); BOOST_CHECK(d->Contains("abcde")); BOOST_CHECK(d->Contains("abdd")); BOOST_CHECK(d->Contains("bba")); BOOST_CHECK_EQUAL("a", d->operator[]("abc").GetValueAsString()); // overwritten by 2nd BOOST_CHECK_EQUAL("z", d->operator[]("abbc").GetValueAsString()); BOOST_CHECK_EQUAL("c", d->operator[]("abbcd").GetValueAsString()); BOOST_CHECK_EQUAL("a", d->operator[]("abcde").GetValueAsString()); BOOST_CHECK_EQUAL("b", d->operator[]("abdd").GetValueAsString()); BOOST_CHECK_EQUAL("c", d->operator[]("bba").GetValueAsString()); BOOST_CHECK(d->Contains("abcd")); BOOST_CHECK(d->Contains("abbe")); BOOST_CHECK(d->Contains("bbacd")); BOOST_CHECK_EQUAL("a", d->operator[]("abcd").GetValueAsString()); BOOST_CHECK_EQUAL("d", d->operator[]("abbe").GetValueAsString()); BOOST_CHECK_EQUAL("f", d->operator[]("bbacd").GetValueAsString()); std::remove(filename.c_str()); } } BOOST_AUTO_TEST_CASE(MergeJsonDicts) { keyvi::util::parameters_t merge_configurations[] = {{{"memory_limit_mb", "10"}}, {{"memory_limit_mb", "10"}, {"merge_mode", "append"}}}; for (const auto params : merge_configurations) { std::vector<std::pair<std::string, std::string>> test_data = { {"abc", "{a:1}"}, {"abbc", "{b:2}"}, {"abbcd", "{c:3}"}, {"abcde", "{a:1}"}, {"abdd", "{b:2}"}, {"bba", "{c:3}"}, }; testing::TempDictionary dictionary = testing::TempDictionary::makeTempDictionaryFromJson(&test_data); std::vector<std::pair<std::string, std::string>> test_data2 = { {"abbe", "{d:4}"}, {"abbc", "{b:3}"}, {"abcd", "{a:1}"}, {"bbacd", "{f:5}"}, }; testing::TempDictionary dictionary2 = testing::TempDictionary::makeTempDictionaryFromJson(&test_data2); std::string filename("merged-dict-json.kv"); DictionaryMerger<dictionary_type_t::JSON> merger(params); merger.Add(dictionary.GetFileName()); merger.Add(dictionary2.GetFileName()); merger.Merge(filename); fsa::automata_t fsa(new fsa::Automata(filename.c_str())); dictionary_t d(new Dictionary(fsa)); BOOST_CHECK(d->Contains("abc")); BOOST_CHECK(d->Contains("abbc")); BOOST_CHECK(d->Contains("abbcd")); BOOST_CHECK(d->Contains("abbe")); BOOST_CHECK(d->Contains("abcd")); BOOST_CHECK(d->Contains("abcde")); BOOST_CHECK(d->Contains("abdd")); BOOST_CHECK(d->Contains("bba")); BOOST_CHECK(d->Contains("bbacd")); BOOST_CHECK_EQUAL("\"{a:1}\"", d->operator[]("abc").GetValueAsString()); // overwritten by 2nd BOOST_CHECK_EQUAL("\"{b:3}\"", d->operator[]("abbc").GetValueAsString()); BOOST_CHECK_EQUAL("\"{c:3}\"", d->operator[]("abbcd").GetValueAsString()); BOOST_CHECK_EQUAL("\"{a:1}\"", d->operator[]("abcde").GetValueAsString()); BOOST_CHECK_EQUAL("\"{b:2}\"", d->operator[]("abdd").GetValueAsString()); BOOST_CHECK_EQUAL("\"{c:3}\"", d->operator[]("bba").GetValueAsString()); BOOST_CHECK(d->Contains("abcd")); BOOST_CHECK(d->Contains("abbe")); BOOST_CHECK(d->Contains("bbacd")); BOOST_CHECK_EQUAL("\"{a:1}\"", d->operator[]("abcd").GetValueAsString()); BOOST_CHECK_EQUAL("\"{d:4}\"", d->operator[]("abbe").GetValueAsString()); BOOST_CHECK_EQUAL("\"{f:5}\"", d->operator[]("bbacd").GetValueAsString()); BOOST_CHECK_EQUAL(0, merger.GetStats().deleted_keys_); BOOST_CHECK_EQUAL(1, merger.GetStats().updated_keys_); BOOST_CHECK_EQUAL(9, merger.GetStats().number_of_keys_); std::remove(filename.c_str()); } } BOOST_AUTO_TEST_CASE(MergeIncompatible) { std::vector<std::string> test_data = {"aaaa", "aabb", "aabc", "aacd", "bbcd", "aaceh", "cdefgh"}; testing::TempDictionary dictionary(&test_data); DictionaryMerger<dictionary_type_t::INT_WITH_WEIGHTS> merger(keyvi::util::parameters_t({{"memory_limit_mb", "10"}})); BOOST_CHECK_THROW(merger.Add(dictionary.GetFileName()), std::invalid_argument); } BOOST_AUTO_TEST_CASE(MergeIntegerWeightDictsValueMerge) { std::vector<std::pair<std::string, uint32_t>> test_data = {{"abc", 22}, {"abbc", 24}}; testing::TempDictionary dictionary(&test_data); std::vector<std::pair<std::string, uint32_t>> test_data2 = { {"abc", 25}, {"abbc", 42}, {"abcd", 21}, {"abbc", 30}, }; testing::TempDictionary dictionary2(&test_data2); std::string filename("merged-dict-int-weight-v1.kv"); DictionaryMerger<dictionary_type_t::INT_WITH_WEIGHTS> merger(keyvi::util::parameters_t({{"memory_limit_mb", "10"}})); merger.Add(dictionary.GetFileName()); merger.Add(dictionary2.GetFileName()); merger.Merge(filename); fsa::automata_t fsa(new fsa::Automata(filename.c_str())); dictionary_t d(new Dictionary(fsa)); BOOST_CHECK(d->Contains("abc")); BOOST_CHECK(d->Contains("abbc")); BOOST_CHECK(d->Contains("abcd")); BOOST_CHECK_EQUAL("25", d->operator[]("abc").GetValueAsString()); BOOST_CHECK_EQUAL("21", d->operator[]("abcd").GetValueAsString()); BOOST_CHECK_EQUAL("30", d->operator[]("abbc").GetValueAsString()); fsa::WeightedStateTraverser s(fsa); BOOST_CHECK_EQUAL('a', s.GetStateLabel()); BOOST_CHECK_EQUAL(1, s.GetDepth()); BOOST_CHECK_EQUAL(30, s.GetInnerWeight()); s++; BOOST_CHECK_EQUAL('b', s.GetStateLabel()); BOOST_CHECK_EQUAL(2, s.GetDepth()); BOOST_CHECK_EQUAL(30, s.GetInnerWeight()); s++; BOOST_CHECK_EQUAL('b', s.GetStateLabel()); BOOST_CHECK_EQUAL(3, s.GetDepth()); BOOST_CHECK_EQUAL(30, s.GetInnerWeight()); s++; BOOST_CHECK_EQUAL('c', s.GetStateLabel()); BOOST_CHECK_EQUAL(4, s.GetDepth()); BOOST_CHECK_EQUAL(30, s.GetInnerWeight()); s++; BOOST_CHECK_EQUAL('c', s.GetStateLabel()); BOOST_CHECK_EQUAL(3, s.GetDepth()); BOOST_CHECK_EQUAL(25, s.GetInnerWeight()); s++; BOOST_CHECK_EQUAL('d', s.GetStateLabel()); BOOST_CHECK_EQUAL(4, s.GetDepth()); BOOST_CHECK_EQUAL(21, s.GetInnerWeight()); s++; // at end BOOST_CHECK_EQUAL(0, s.GetStateLabel()); std::remove(filename.c_str()); filename = "merged-dict-int-weight-v2.kv"; testing::TempDictionary dictionary3(&test_data); CompletionDictionaryMerger merger2(keyvi::util::parameters_t({{"memory_limit_mb", "10"}})); merger2.Add(dictionary.GetFileName()); merger2.Add(dictionary2.GetFileName()); merger2.Add(dictionary3.GetFileName()); merger2.Merge(filename); fsa::automata_t fsa2(new fsa::Automata(filename.c_str())); dictionary_t d2(new Dictionary(fsa2)); BOOST_CHECK(d2->Contains("abc")); BOOST_CHECK(d2->Contains("abbc")); BOOST_CHECK(d2->Contains("abcd")); BOOST_CHECK_EQUAL("22", d2->operator[]("abc").GetValueAsString()); BOOST_CHECK_EQUAL("21", d2->operator[]("abcd").GetValueAsString()); // overwritten by 2nd BOOST_CHECK_EQUAL("24", d2->operator[]("abbc").GetValueAsString()); std::remove(filename.c_str()); filename = "merged-dict-int-weight-v3.kv"; CompletionDictionaryMerger merger3(keyvi::util::parameters_t({{"memory_limit_mb", "10"}})); merger3.Add(dictionary2.GetFileName()); merger3.Add(dictionary.GetFileName()); merger3.Merge(filename); fsa::automata_t fsa3(new fsa::Automata(filename.c_str())); dictionary_t d3(new Dictionary(fsa3)); BOOST_CHECK(d3->Contains("abc")); BOOST_CHECK(d3->Contains("abbc")); BOOST_CHECK(d3->Contains("abcd")); BOOST_CHECK_EQUAL("22", d3->operator[]("abc").GetValueAsString()); BOOST_CHECK_EQUAL("21", d3->operator[]("abcd").GetValueAsString()); BOOST_CHECK_EQUAL("24", d3->operator[]("abbc").GetValueAsString()); fsa::WeightedStateTraverser s3(fsa3); BOOST_CHECK_EQUAL('a', s3.GetStateLabel()); BOOST_CHECK_EQUAL(1, s3.GetDepth()); BOOST_CHECK_EQUAL(24, s3.GetInnerWeight()); s3++; BOOST_CHECK_EQUAL('b', s3.GetStateLabel()); BOOST_CHECK_EQUAL(2, s3.GetDepth()); BOOST_CHECK_EQUAL(24, s3.GetInnerWeight()); s3++; BOOST_CHECK_EQUAL('b', s3.GetStateLabel()); BOOST_CHECK_EQUAL(3, s3.GetDepth()); BOOST_CHECK_EQUAL(24, s3.GetInnerWeight()); s3++; BOOST_CHECK_EQUAL('c', s3.GetStateLabel()); BOOST_CHECK_EQUAL(4, s3.GetDepth()); BOOST_CHECK_EQUAL(24, s3.GetInnerWeight()); s3++; BOOST_CHECK_EQUAL('c', s3.GetStateLabel()); BOOST_CHECK_EQUAL(3, s3.GetDepth()); BOOST_CHECK_EQUAL(22, s3.GetInnerWeight()); s3++; BOOST_CHECK_EQUAL('d', s3.GetStateLabel()); BOOST_CHECK_EQUAL(4, s3.GetDepth()); BOOST_CHECK_EQUAL(21, s3.GetInnerWeight()); s3++; // at end BOOST_CHECK_EQUAL(0, s3.GetStateLabel()); std::remove(filename.c_str()); } BOOST_AUTO_TEST_CASE(MergeIntegerWeightDictsAppendMerge) { std::vector<std::pair<std::string, uint32_t>> test_data = {{"abc", 22}, {"abbc", 24}}; testing::TempDictionary dictionary(&test_data); std::vector<std::pair<std::string, uint32_t>> test_data2 = { {"abc", 25}, {"abbc", 42}, {"abcd", 21}, {"abbc", 30}, }; testing::TempDictionary dictionary2(&test_data2); std::string filename = "merged-dict-int-weight-v2.kv"; testing::TempDictionary dictionary3(&test_data); CompletionDictionaryMerger merger2(keyvi::util::parameters_t({{"memory_limit_mb", "10"}, {"merge_mode", "append"}})); merger2.Add(dictionary.GetFileName()); merger2.Add(dictionary2.GetFileName()); merger2.Add(dictionary3.GetFileName()); merger2.Merge(filename); fsa::automata_t fsa2(new fsa::Automata(filename.c_str())); dictionary_t d2(new Dictionary(fsa2)); BOOST_CHECK(d2->Contains("abc")); BOOST_CHECK(d2->Contains("abbc")); BOOST_CHECK(d2->Contains("abcd")); BOOST_CHECK_EQUAL("22", d2->operator[]("abc").GetValueAsString()); BOOST_CHECK_EQUAL("21", d2->operator[]("abcd").GetValueAsString()); // overwritten by 2nd BOOST_CHECK_EQUAL("24", d2->operator[]("abbc").GetValueAsString()); std::remove(filename.c_str()); } BOOST_AUTO_TEST_CASE(MergeToEmptyDict) { std::vector<std::pair<std::string, std::string>> test_data = {}; testing::TempDictionary dictionary = testing::TempDictionary::makeTempDictionaryFromJson(&test_data); std::vector<std::pair<std::string, std::string>> test_data2 = { {"abbe", "{d:4}"}, {"abbc", "{b:3}"}, }; testing::TempDictionary dictionary2 = testing::TempDictionary::makeTempDictionaryFromJson(&test_data2); std::string filename("merge-to-empty-dict.kv"); JsonDictionaryMerger merger; merger.Add(dictionary.GetFileName()); merger.Add(dictionary2.GetFileName()); merger.Merge(filename); dictionary_t d(new Dictionary(filename)); BOOST_CHECK(d->Contains("abbc")); BOOST_CHECK(d->Contains("abbe")); BOOST_CHECK_EQUAL("\"{b:3}\"", d->operator[]("abbc").GetValueAsString()); BOOST_CHECK_EQUAL("\"{d:4}\"", d->operator[]("abbe").GetValueAsString()); BOOST_CHECK_EQUAL(0, merger.GetStats().deleted_keys_); BOOST_CHECK_EQUAL(0, merger.GetStats().updated_keys_); BOOST_CHECK_EQUAL(2, merger.GetStats().number_of_keys_); std::remove(filename.c_str()); } BOOST_AUTO_TEST_CASE(MergeDuplicateAdd) { std::vector<std::pair<std::string, std::string>> test_data = { {"abbe", "{d:4}"}, {"abbc", "{b:3}"}, }; testing::TempDictionary dictionary = testing::TempDictionary::makeTempDictionaryFromJson(&test_data); JsonDictionaryMerger merger; merger.Add(dictionary.GetFileName()); BOOST_CHECK_THROW(merger.Add(dictionary.GetFileName()), std::invalid_argument); } BOOST_AUTO_TEST_CASE(Delete) { std::vector<std::pair<std::string, std::string>> test_data = { {"abcd", "{g:5}"}, {"xyz", "{t:4}"}, }; testing::TempDictionary dictionary = testing::TempDictionary::makeTempDictionaryFromJson(&test_data); boost::filesystem::path deleted_keys_file{dictionary.GetFileName()}; deleted_keys_file += ".dk"; JsonDictionaryMerger merger; { std::unordered_set<std::string> deleted_keys{"xyz"}; std::ofstream out_stream(deleted_keys_file.string(), std::ios::binary); msgpack::pack(out_stream, deleted_keys); } merger.Add(dictionary.GetFileName()); std::string filename("merge-delete-key-dict.kv"); std::ofstream out_stream(filename, std::ios::binary); merger.Merge(); merger.Write(out_stream); out_stream.close(); fsa::automata_t fsa(new fsa::Automata(filename.c_str())); dictionary_t d(new Dictionary(fsa)); BOOST_CHECK(d->Contains("abcd")); BOOST_CHECK(!d->Contains("xyz")); BOOST_CHECK_EQUAL(1, merger.GetStats().deleted_keys_); BOOST_CHECK_EQUAL(0, merger.GetStats().updated_keys_); BOOST_CHECK_EQUAL(1, merger.GetStats().number_of_keys_); std::remove(filename.c_str()); std::remove(deleted_keys_file.string().c_str()); } BOOST_AUTO_TEST_CASE(MultipleDeletes) { std::vector<std::pair<std::string, std::string>> test_data1 = { {"abcd", "{g:5}"}, {"abbc", "{t:4}"}, {"abbcd", "{u:3}"}, {"abbd", "{v:2}"}, {"abbdef", "{w:1}"}, {"abbe", "{x:0}"}, {"acdd", "{y:-1}"}, {"afgh", "{z:-2}"}, }; testing::TempDictionary dictionary1 = testing::TempDictionary::makeTempDictionaryFromJson(&test_data1); boost::filesystem::path deleted_keys_file1{dictionary1.GetFileName()}; deleted_keys_file1 += ".dk"; { std::unordered_set<std::string> deleted_keys1{"abbc", "afgh"}; std::ofstream out_stream(deleted_keys_file1.string(), std::ios::binary); msgpack::pack(out_stream, deleted_keys1); } std::vector<std::pair<std::string, std::string>> test_data2 = { {"abcd", "{g:15}"}, {"abbc", "{t:14}"}, {"abbcd", "{u:13}"}, {"abbd", "{v:12}"}, {"abbdef", "{w:11}"}, {"abbe", "{x:10}"}, {"acdd", "{y:9}"}, }; testing::TempDictionary dictionary2 = testing::TempDictionary::makeTempDictionaryFromJson(&test_data2); boost::filesystem::path deleted_keys_file2{dictionary2.GetFileName()}; deleted_keys_file2 += ".dk"; { std::unordered_set<std::string> deleted_keys2{"abbc", "abbcd", "abbd"}; std::ofstream out_stream(deleted_keys_file2.string(), std::ios::binary); msgpack::pack(out_stream, deleted_keys2); } std::vector<std::pair<std::string, std::string>> test_data3 = { {"abcd", "{g:25}"}, {"abbc", "{t:24}"}, {"abbcd", "{u:23}"}, {"abbdef", "{w:21}"}, {"abbe", "{x:20}"}, {"acdd", "{y:19}"}, }; testing::TempDictionary dictionary3 = testing::TempDictionary::makeTempDictionaryFromJson(&test_data3); boost::filesystem::path deleted_keys_file3{dictionary3.GetFileName()}; deleted_keys_file3 += ".dk"; { std::unordered_set<std::string> deleted_keys3{"abbc", "abbcd", "abbdef"}; std::ofstream out_stream(deleted_keys_file3.string(), std::ios::binary); msgpack::pack(out_stream, deleted_keys3); } JsonDictionaryMerger merger; merger.Add(dictionary1.GetFileName()); merger.Add(dictionary2.GetFileName()); merger.Add(dictionary3.GetFileName()); std::string filename("merge-multiple-deletes-dict.kv"); merger.Merge(); merger.WriteToFile(filename); fsa::automata_t fsa(new fsa::Automata(filename.c_str())); dictionary_t d(new Dictionary(fsa)); BOOST_CHECK(d->Contains("abcd")); BOOST_CHECK(!d->Contains("abbc")); BOOST_CHECK(!d->Contains("abbcd")); BOOST_CHECK(!d->Contains("abbd")); BOOST_CHECK(!d->Contains("abbdef")); BOOST_CHECK(d->Contains("abbe")); BOOST_CHECK(d->Contains("acdd")); BOOST_CHECK(!d->Contains("afgh")); BOOST_CHECK_EQUAL(8, merger.GetStats().deleted_keys_); BOOST_CHECK_EQUAL(13, merger.GetStats().updated_keys_); BOOST_CHECK_EQUAL(3, merger.GetStats().number_of_keys_); std::remove(filename.c_str()); std::remove(deleted_keys_file1.string().c_str()); std::remove(deleted_keys_file2.string().c_str()); std::remove(deleted_keys_file3.string().c_str()); } BOOST_AUTO_TEST_CASE(WriteWithoutMerge) { JsonDictionaryMerger merger; const std::string filename("write-without-merger.kv"); BOOST_CHECK_THROW(merger.WriteToFile(filename), merger_exception); { std::ofstream out_stream(filename, std::ios::binary); BOOST_CHECK_THROW(merger.Write(out_stream), merger_exception); } std::remove(filename.c_str()); } BOOST_AUTO_TEST_SUITE_END() } /* namespace dictionary */ } /* namespace keyvi */
# look at ITA to RCA vs SVG to RCA with veins to LCX library(easypackages) libraries(c("survival","rms","Hmisc","survminer","tidyverse", "tidylog", "Publish")) # get the whole dataset here. df <- read_csv("D:/lita_bita3/complete_dataset.csv") # some small changes before tables... df$diabetes[df$diabetes == 8]<- 0 df$left_main_disease[df$left_main_disease == 2]<- 0 df2 <- df %>% filter(distals_n != 1) dim(df) dim(df2) # save this dataset as the actual final complete dataset. write_csv(df2, "D:/lita_bita3/complete_dataset_final.csv") glimpse(df2) df2$surv_years = (1 + df2$survival_days)/365.24 df2$age10 = df2$age_at_surgery/10 df2$oldage <- with(df2, ifelse(age_at_surgery > 70, 1, 0)) df2 = data.frame(df2) df2$bima = factor(df2$bima, levels = c(0,1), labels = c("sita","bita")) df2$diabetes = factor(df2$diabetes, levels = c(0,1), labels = c("no_dm","dm")) df2$gender = factor(df2$gender, levels = c(1,2), labels = c("male","female")) df2$oldage = factor(df2$oldage, levels = c(0,1), labels = c("young","old")) m2 = coxph(Surv(surv_years, died) ~ bima + oldage + art_hypertension + smoker + copd + diabetes + pad + pre_dialysis + gender + left_main_disease + prior_pci + pre_stroke, data = df2) summary(m2) sub_cox = subgroupAnalysis(m2, df2, treatment = "bima", subgroups = c("diabetes","gender","oldage")) res <- summary(sub_cox) str(res) rest <- tibble(res) str(rest) write_csv(rest, "D:/lita_bita3/rest.csv") tb <- rest %>% filter(subgroups == 'oldage') tb2 <- (tb[, c(1,2,9:13)]) tb2 write_csv(tb2, "D:/lita_bita3/age_interact.csv") # now to compare groups:- glimpse(df2) ## lima + svg to lcx = svg to rca vs lima + rima to rca + svg to lcx lcx_vein = df2 %>% filter(lcx_graft == 1) lcx_vein %>% group_by(bima) %>% count(rca_graft) # some patients may not have an RCA graft at all. lcx_vein2 = lcx_vein %>% filter(rca_graft != 0) lcx_vein2 %>% group_by(bima) %>% count(rca_graft) # now lcx_vein2 has vein to lcx and either svg to rima to rca. bima_rca = coxph(Surv(surv_years, died) ~ bima + age10 + art_hypertension + smoker + copd + diabetes + pad + pre_dialysis + gender + left_main_disease + prior_pci + pre_stroke, data = lcx_vein2) summary(bima_rca) # lima + svg to rca + bima to lcx VS lima + svg to rca + svg to lcx rca_vein = df2 %>% filter(rca_graft == 1) rca_vein2 = rca_vein %>% filter(lcx_graft != 0) rca_vein2 %>% group_by(bima) %>% count(lcx_graft) rca_vein2$keep = with(rca_vein2, ifelse(bima == "sita" & lcx_graft == 1, 1, ifelse(bima == "bita" & lcx_graft %in% c(3,4), 1, 0))) rca_vein2 %>% count(keep) rca_vein3 = rca_vein2 %>% filter(keep == 1) rca_vein3 %>% count(bima) bima_lcx = coxph(Surv(surv_years, died) ~ bima + age10 + art_hypertension + smoker + copd + diabetes + pad + pre_dialysis + gender + left_main_disease + prior_pci + pre_stroke, data = rca_vein3) summary(bima_lcx)
[STATEMENT] lemma diagseq_seqseq: "diagseq \<circ> ((+) k) = (seqseq k \<circ> (\<lambda>x. fold_reduce k x (k + x)))" [PROOF STATE] proof (prove) goal (1 subgoal): 1. diagseq \<circ> (+) k = seqseq k \<circ> (\<lambda>x. fold_reduce k x (k + x)) [PROOF STEP] by (auto simp: o_def diagseq_add)
% !TEX root = thesis.tex \section{Appendix}\label{ap:appendix} In the Appendix (or Appendices) you may give the details that did not fit in the main text. If necessary, you may use a one-column lay-out here. Start the first appendix on a new page.
\chapter{Validation}
labelWith : Stream labelType -> List a -> List (labelType, a) labelWith lbs [] = [] labelWith (lbl :: lbs) (val :: vals) = (lbl, val) :: labelWith lbs vals label : List a -> List (Integer, a) label = labelWith (iterate (+1) 0)
I was in a South Carolina accident when a car turned left in front of me while I was riding my motorcycle. Who is at fault? When you have been in a motorcycle accident in South Carolina where a car makes a left turn in front of you, the other driver is almost always liable for the accident and your injuries. Before making any assumptions on who is at fault for your motorcycle accident in South Carolina, you should consult with a Charleston motorcycle accident attorney. There are many circumstances to consider when you are involved in a motorcycle accident in South Carolina and trying to determine who is at fault. One of the few exceptions to the near-automatic rule that a left turning car is at fault in a motorcycle accident in South Carolina is when you are found to have violated a traffic law. If you were found to have been speeding or disobeying a traffic signal, you may be found to be at fault for part of the accident as well. When you need to prove who was at fault in a motorcycle accident in South Carolina, your claim will benefit greatly from the help of Charleston motorcycle accident attorney. This is because in most scenarios, insurance adjusters will determine fault for the accident. Depending on how much at fault insurance adjusters find you to be, your potential compensation could be impacted accordingly. This is not a situation in which you want to fend for yourself. A Charleston motorcycle accident attorney can speak up on your behalf after a left turn accident and can handle each and every stage of your claim while you focus on recovery. The Charleston personal injury attorney team at the Shelly Leeke Law Firm is available to help accident victims cope with the legal implications of a serious injury. Our firm’s focus on personal injury cases includes injury from auto accidents including pedestrian and bicycle injuries, work-related injuries and Workers’ Compensation claims, and injuries resulting from dangerous prescription drugs. Before you file a South Carolina personal injury claim, request a free copy of our South Carolina injury book. When you’re ready to get started on settling your accident claim, contact a Charleston motorcycle accident attorney for a free information packet and consultation – 1-888-690-0211.
# Computational Astrophysics ## Partial Differential Equations. 05 ## Non-Linear Hiperbolic PDE. Burguer's Equation --- ## Eduard Larrañaga Observatorio Astronómico Nacional\ Facultad de Ciencias\ Universidad Nacional de Colombia --- ### About this notebook In this notebook we present some of the techniques used to solve the linear advection equation. `A. Garcia. Numerical Methods for Physics. (1999). Chapter 6 - 7 ` --- ## The Linear 1-D Advection Equation The linear advection equation is \begin{equation} \label{eq:advect} \partial_t u + v \partial_x u = 0 \end{equation} where $u(t,x)$ is some scalar quantity and $v$ is the constant velocity at which it is advected ($v > 0$ advects to the right). The solution to this equation is to simply take the initial data, $u(t=0,x)$, and displace it to the right at a speed $v$. The shape of the initial data is preserved in the advection. Direct substitution shows that $u(x - vt)$ is a solution to advection equation for any choice of u. This means that the solution is constant along the lines $x = v t$ (the curves along which the solution is constant are called the characteristics). Many hyperbolic systems of PDEs, e.g. the equations of hydrodynamics, can be written in a form that looks like a system of (nonlinear) advection equations, so the advection equation provides important insight into the methods used for these systems. --- ## Non-linear Hyperbolic PDE. Burguer's Equation Burgers' equation is the simplest **nonlinear hyperbolic equation**, \begin{equation} \partial_t u + u \partial_x u = 0. \end{equation} It is almost identical to the advection equation treated before, but this time the wave speed is **NOT a constant** $v$ but is given by the field $u$ itself. **Then, $u$ is both the quantity being advected and the speed at which it is moving.** --- ### Shocks For the linear advection equation, the solution was constant along lines $x = vt + x_0$, which are parallel (because $v$ is spatially constant). For Burgers' equation, this is no longer the case, and the characteristic lines are now given by $\frac{dx}{dt} = u$, with $x(0) = x_0$. Since $u = u(t)$, we cannot integrate this directly. If we take $u_0 = u(t=0)$, then we can look at how the characteristic behave over a small time interval (before $u(x,t)$ changes significantly). We see that after a short period of time, the characteristics intersect. At the point, $(x_s, t_s)$ where they intersect, there is no way to trace backwards along the characteristics to find a unique initial state. This merging of the characteristics in the $x$-$t$ plane is a **shock**, and represents just one way that nonlinear problems can differ from linear ones. --- ### Rarefaction Another type of wave not present in a linear system is a **rarefaction**. Next figure shows initial conditions of slower velocity to the left of faster velocity. We see that the characteristics diverge in this case, and we will be left with having to fill in the solution inbetween as some intermediate state.
From 2011 to 2015 , an aeroplane belonging to Flybe bore an image of Stansfield , with other aeroplanes belonging to the company featuring such former footballers as George Best and Kevin Keegan . In 2015 , Stansfield was featured on £ 5 Exeter Pound notes in the city .
[STATEMENT] lemma correctArray_update: assumes "correctArray rcs a h" assumes "correctClause rcs c" "sorted c" "distinct c" shows "correctArray rcs a (Array.update a i (Some c) h)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. correctArray rcs a (Array.update a i (Some c) h) [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: correctArray rcs a h correctClause rcs c sorted c distinct c goal (1 subgoal): 1. correctArray rcs a (Array.update a i (Some c) h) [PROOF STEP] unfolding correctArray_def [PROOF STATE] proof (prove) using this: \<forall>cl\<in>array_ran a h. correctClause rcs cl \<and> sorted cl \<and> distinct cl correctClause rcs c sorted c distinct c goal (1 subgoal): 1. \<forall>cl\<in>array_ran a (Array.update a i (Some c) h). correctClause rcs cl \<and> sorted cl \<and> distinct cl [PROOF STEP] by (auto dest:array_ran_upd_array_Some)
function expInfo = exportInfo(guifile) %EXPORTINFO Extract useful info from a CHEBGUI object for exporting. % % Calling sequence % % EXPINFO = EXPORTINFO(GUIFILE) % % where % % GUIFILE: A CHEBGUI object % EXPINFO: A struct, containing fields with information for exporting to an % .m-file. % Copyright 2017 by The University of Oxford and The Chebfun Developers. % See http://www.chebfun.org/ for Chebfun information. % Extract information from the GUI fields dom = guifile.domain; deInput = guifile.DE; bcInput = guifile.BC; initInput = guifile.init; % Wrap all input strings in a cell (if they're not a cell already) if ( isa(deInput, 'char') ) deInput = cellstr(deInput); end if ( isa(bcInput, 'char') ) bcInput = cellstr(bcInput); end if ( isa(initInput, 'char') ) initInput = cellstr(initInput); end % Obtain useful strings describing the differential equation part: [deString, allVarString, indVarNameDE, dummy, dummy, dummy, allVarNames] = ... setupFields(guifile, deInput, 'DE'); % Do some error checking before we do further printing. Check that independent % variable name match. % Ensure that no EIG specific variables appear: eigNames = {'lambda'; 'lam'; 'l'}; eigMatch = zeros(size(allVarNames)); for eigNameCounter = 1:length(eigNames); eigMatch = eigMatch + strcmp(allVarNames, eigNames{eigNameCounter}); end assert(~any(eigMatch), 'CHEBFUN:CHEBGUIEXPORTER:exportInfo:eig', ... ['Problem appears to be an eigenvalue problem. \n Please make sure ' ... '''l'', ''lam'' or ''lambda'' do not appear in appear in input.']); % Obtain the independent variable name appearing in the initial condition: useLatest = strcmpi(initInput{1}, 'Using latest solution'); if ( ~isempty(initInput{1}) && ~useLatest ) [dummy, dummy, indVarNameInit] = ... setupFields(guifile, initInput, 'INIT', allVarString); else indVarNameInit = {''}; end % Make sure we don't have a discrepency in indVarNames if ( ~isempty(indVarNameInit{1}) && ~isempty(indVarNameDE{1}) ) if ( strcmp(indVarNameDE{1}, indVarNameInit{1}) ) indVarNameSpace = indVarNameDE{1}; else error('CHEBFUN:CHEBGUIEXPORTERBVP:exportInfo:SolveGUIbvp', 'Independent variable names do not agree') end elseif ( ~isempty(indVarNameInit{1}) && isempty(indVarNameDE{1}) ) indVarNameSpace = indVarNameInit{1}; elseif ( isempty(indVarNameInit{1}) && ~isempty(indVarNameDE{1}) ) indVarNameSpace = indVarNameDE{1}; else indVarNameSpace = 't'; % Default value for IVPs end % Replace the 'DUMMYSPACE' variable in the DE field deString = strrep(deString, 'DUMMYSPACE', indVarNameSpace); deString = chebguiExporter.prettyPrintFevalString(deString, allVarNames); % Do we want to solve the problem globally, or with timestepping? timeSteppingSolver = ~isempty(strfind(guifile.options.ivpSolver, 'ode')); % Add spaces to DOM and ALLVARSTRING so it looks nices once we export dom = strrep(dom, ',', ', '); allVarString = strrep(allVarString, ',', ', '); %% Fill up the expInfo struct expInfo.dom = dom; expInfo.deInput = deInput; expInfo.bcInput = bcInput; expInfo.initInput = initInput; expInfo.deString = deString; expInfo.allVarString = allVarString; expInfo.allVarNames = allVarNames; expInfo.numVars = length(allVarNames); expInfo.indVarNameSpace = indVarNameSpace; expInfo.useLatest = useLatest; expInfo.timeSteppingSolver = timeSteppingSolver; %Information related to options set-up expInfo.tol = guifile.tol; expInfo.dampingOn = guifile.options.damping; expInfo.discretization = guifile.options.discretization; expInfo.plotting = guifile.options.plotting; expInfo.ivpSolver = guifile.options.ivpSolver; end
module Main import Control.App import Control.App.Console data Counter : Type where --helloCount : (Console es, State Counter Int es) => App es () helloCount : Has [Console, State Counter Int] es => App es () helloCount = do c <- get Counter put Counter (c + 1) putStrLn "Hello, counting world!" c <- get Counter putStrLn $ "Counter: " ++ show c main : IO () main = run (new 93 helloCount)
(* Title: JinjaThreads/Framework/FWLocking.thy Author: Andreas Lochbihler *) section \<open>Semantics of the thread actions for locking\<close> theory FWLocking imports FWLock begin definition redT_updLs :: "('l,'t) locks \<Rightarrow> 't \<Rightarrow> 'l lock_actions \<Rightarrow> ('l,'t) locks" where "redT_updLs ls t las \<equiv> (\<lambda>(l, la). upd_locks l t la) \<circ>$ (($ls, las$))" lemma redT_updLs_iff [simp]: "redT_updLs ls t las $ l = upd_locks (ls $ l) t (las $ l)" by(simp add: redT_updLs_def) lemma upd_locks_empty_conv [simp]: "(\<lambda>(l, las). upd_locks l t las) \<circ>$ ($ls, K$ []$) = ls" by(auto intro: finfun_ext) lemma redT_updLs_Some_thread_idD: "\<lbrakk> has_lock (redT_updLs ls t las $ l) t'; t \<noteq> t' \<rbrakk> \<Longrightarrow> has_lock (ls $ l) t'" by(auto simp add: redT_updLs_def intro: has_lock_upd_locks_implies_has_lock) definition acquire_all :: "('l, 't) locks \<Rightarrow> 't \<Rightarrow> ('l \<Rightarrow>f nat) \<Rightarrow> ('l, 't) locks" where "\<And>ln. acquire_all ls t ln \<equiv> (\<lambda>(l, la). acquire_locks l t la) \<circ>$ (($ls, ln$))" lemma acquire_all_iff [simp]: "\<And>ln. acquire_all ls t ln $ l = acquire_locks (ls $ l) t (ln $ l)" by(simp add: acquire_all_def) definition lock_ok_las :: "('l,'t) locks \<Rightarrow> 't \<Rightarrow> 'l lock_actions \<Rightarrow> bool" where "lock_ok_las ls t las \<equiv> \<forall>l. lock_actions_ok (ls $ l) t (las $ l)" lemma lock_ok_lasI [intro]: "(\<And>l. lock_actions_ok (ls $ l) t (las $ l)) \<Longrightarrow> lock_ok_las ls t las" by(simp add: lock_ok_las_def) lemma lock_ok_lasE: "\<lbrakk> lock_ok_las ls t las; (\<And>l. lock_actions_ok (ls $ l) t (las $ l)) \<Longrightarrow> Q \<rbrakk> \<Longrightarrow> Q" by(simp add: lock_ok_las_def) lemma lock_ok_lasD: "lock_ok_las ls t las \<Longrightarrow> lock_actions_ok (ls $ l) t (las $ l)" by(simp add: lock_ok_las_def) lemma lock_ok_las_code [code]: "lock_ok_las ls t las = finfun_All ((\<lambda>(l, la). lock_actions_ok l t la) \<circ>$ ($ls, las$))" by(simp add: lock_ok_las_def finfun_All_All o_def) lemma lock_ok_las_may_lock: "\<lbrakk> lock_ok_las ls t las; Lock \<in> set (las $ l) \<rbrakk> \<Longrightarrow> may_lock (ls $ l) t" by(erule lock_ok_lasE)(rule lock_actions_ok_Lock_may_lock) lemma redT_updLs_may_lock [simp]: "lock_ok_las ls t las \<Longrightarrow> may_lock (redT_updLs ls t las $ l) t = may_lock (ls $ l) t" by(auto dest!: lock_ok_lasD[where l=l]) lemma redT_updLs_has_locks [simp]: "\<lbrakk> lock_ok_las ls t' las; t \<noteq> t' \<rbrakk> \<Longrightarrow> has_locks (redT_updLs ls t' las $ l) t = has_locks (ls $ l) t" by(auto dest!: lock_ok_lasD[where l=l]) definition may_acquire_all :: "('l, 't) locks \<Rightarrow> 't \<Rightarrow> ('l \<Rightarrow>f nat) \<Rightarrow> bool" where "\<And>ln. may_acquire_all ls t ln \<equiv> \<forall>l. ln $ l > 0 \<longrightarrow> may_lock (ls $ l) t" lemma may_acquire_allE: "\<And>ln. \<lbrakk> may_acquire_all ls t ln; \<forall>l. ln $ l > 0 \<longrightarrow> may_lock (ls $ l) t \<Longrightarrow> P \<rbrakk> \<Longrightarrow> P" by(auto simp add: may_acquire_all_def) lemma may_acquire_allD [dest]: "\<And>ln. \<lbrakk> may_acquire_all ls t ln; ln $ l > 0 \<rbrakk> \<Longrightarrow> may_lock (ls $ l) t" by(auto simp add: may_acquire_all_def) lemma may_acquire_all_has_locks_acquire_locks [simp]: fixes ln shows "\<lbrakk> may_acquire_all ls t ln; t \<noteq> t' \<rbrakk> \<Longrightarrow> has_locks (acquire_locks (ls $ l) t (ln $ l)) t' = has_locks (ls $ l) t'" by(cases "ln $ l > 0")(auto dest: may_acquire_allD) lemma may_acquire_all_code [code]: "\<And>ln. may_acquire_all ls t ln \<longleftrightarrow> finfun_All ((\<lambda>(lock, n). n > 0 \<longrightarrow> may_lock lock t) \<circ>$ ($ls, ln$))" by(auto simp add: may_acquire_all_def finfun_All_All o_def) definition collect_locks :: "'l lock_actions \<Rightarrow> 'l set" where "collect_locks las = {l. Lock \<in> set (las $ l)}" lemma collect_locksI: "Lock \<in> set (las $ l) \<Longrightarrow> l \<in> collect_locks las" by(simp add: collect_locks_def) lemma collect_locksE: "\<lbrakk> l \<in> collect_locks las; Lock \<in> set (las $ l) \<Longrightarrow> P \<rbrakk> \<Longrightarrow> P" by(simp add: collect_locks_def) lemma collect_locksD: "l \<in> collect_locks las \<Longrightarrow> Lock \<in> set (las $ l)" by(simp add: collect_locks_def) fun must_acquire_lock :: "lock_action list \<Rightarrow> bool" where "must_acquire_lock [] = False" | "must_acquire_lock (Lock # las) = True" | "must_acquire_lock (Unlock # las) = False" | "must_acquire_lock (_ # las) = must_acquire_lock las" lemma must_acquire_lock_append: "must_acquire_lock (xs @ ys) \<longleftrightarrow> (if Lock \<in> set xs \<or> Unlock \<in> set xs then must_acquire_lock xs else must_acquire_lock ys)" proof(induct xs) case Nil thus ?case by simp next case (Cons L Ls) thus ?case by (cases L, simp_all) qed lemma must_acquire_lock_contains_lock: "must_acquire_lock las \<Longrightarrow> Lock \<in> set las" proof(induct las) case (Cons l las) thus ?case by(cases l) auto qed simp lemma must_acquire_lock_conv: "must_acquire_lock las = (case (filter (\<lambda>L. L = Lock \<or> L = Unlock) las) of [] \<Rightarrow> False | L # Ls \<Rightarrow> L = Lock)" proof(induct las) case Nil thus ?case by simp next case (Cons LA LAS) thus ?case by(cases LA, auto split: list.split_asm) qed definition collect_locks' :: "'l lock_actions \<Rightarrow> 'l set" where "collect_locks' las \<equiv> {l. must_acquire_lock (las $ l)}" lemma collect_locks'I: "must_acquire_lock (las $ l) \<Longrightarrow> l \<in> collect_locks' las" by(simp add: collect_locks'_def) lemma collect_locks'E: "\<lbrakk> l \<in> collect_locks' las; must_acquire_lock (las $ l) \<Longrightarrow> P \<rbrakk> \<Longrightarrow> P" by(simp add: collect_locks'_def) lemma collect_locks'_subset_collect_locks: "collect_locks' las \<subseteq> collect_locks las" by(auto simp add: collect_locks'_def collect_locks_def intro: must_acquire_lock_contains_lock) definition lock_ok_las' :: "('l,'t) locks \<Rightarrow> 't \<Rightarrow> 'l lock_actions \<Rightarrow> bool" where "lock_ok_las' ls t las \<equiv> \<forall>l. lock_actions_ok' (ls $ l) t (las $ l)" lemma lock_ok_las'I: "(\<And>l. lock_actions_ok' (ls $ l) t (las $ l)) \<Longrightarrow> lock_ok_las' ls t las" by(simp add: lock_ok_las'_def) lemma lock_ok_las'D: "lock_ok_las' ls t las \<Longrightarrow> lock_actions_ok' (ls $ l) t (las $ l)" by(simp add: lock_ok_las'_def) lemma not_lock_ok_las'_conv: "\<not> lock_ok_las' ls t las \<longleftrightarrow> (\<exists>l. \<not> lock_actions_ok' (ls $ l) t (las $ l))" by(simp add: lock_ok_las'_def) lemma lock_ok_las'_code: "lock_ok_las' ls t las = finfun_All ((\<lambda>(l, la). lock_actions_ok' l t la) \<circ>$ ($ls, las$))" by(simp add: lock_ok_las'_def finfun_All_All o_def) lemma lock_ok_las'_collect_locks'_may_lock: assumes lot': "lock_ok_las' ls t las" and mayl: "\<forall>l \<in> collect_locks' las. may_lock (ls $ l) t" and l: "l \<in> collect_locks las" shows "may_lock (ls $ l) t" proof(cases "l \<in> collect_locks' las") case True thus ?thesis using mayl by auto next case False hence nmal: "\<not> must_acquire_lock (las $ l)" by(auto intro: collect_locks'I) from l have locklasl: "Lock \<in> set (las $ l)" by(rule collect_locksD) then obtain ys zs where las: "las $ l = ys @ Lock # zs" and notin: "Lock \<notin> set ys" by(auto dest: split_list_first) from lot' have "lock_actions_ok' (ls $ l) t (las $ l)" by(auto simp add: lock_ok_las'_def) thus ?thesis proof(induct rule: lock_actions_ok'E) case ok with locklasl show ?thesis by -(rule lock_actions_ok_Lock_may_lock) next case (Lock YS ZS) note LAS = \<open>las $ l = YS @ Lock # ZS\<close> note lao = \<open>lock_actions_ok (ls $ l) t YS\<close> note nml = \<open>\<not> may_lock (upd_locks (ls $ l) t YS) t\<close> from LAS las nmal notin have "Unlock \<in> set YS" by -(erule contrapos_np, auto simp add: must_acquire_lock_append append_eq_append_conv2 append_eq_Cons_conv) then obtain ys' zs' where YS: "YS = ys' @ Unlock # zs'" and unlock: "Unlock \<notin> set ys'" by(auto dest: split_list_first) from YS las LAS lao have lao': "lock_actions_ok (ls $ l) t (ys' @ [Unlock])" by(auto) hence "has_lock (upd_locks (ls $ l) t ys') t" by simp hence "may_lock (upd_locks (ls $ l) t ys') t" by(rule has_lock_may_lock) moreover from lao' have "lock_actions_ok (ls $ l) t ys'" by simp ultimately show ?thesis by simp qed qed lemma lock_actions_ok'_must_acquire_lock_lock_actions_ok: "\<lbrakk> lock_actions_ok' l t Ls; must_acquire_lock Ls \<longrightarrow> may_lock l t\<rbrakk> \<Longrightarrow> lock_actions_ok l t Ls" proof(induct l t Ls rule: lock_actions_ok.induct) case 1 thus ?case by simp next case (2 l t L LS) thus ?case proof(cases "L = Lock \<or> L = Unlock") case True with 2 show ?thesis by(auto simp add: lock_actions_ok'_iff Cons_eq_append_conv intro: has_lock_may_lock) qed(cases L, auto) qed lemma lock_ok_las'_collect_locks_lock_ok_las: assumes lol': "lock_ok_las' ls t las" and clml: "\<And>l. l \<in> collect_locks las \<Longrightarrow> may_lock (ls $ l) t" shows "lock_ok_las ls t las" proof(rule lock_ok_lasI) fix l from lol' have "lock_actions_ok' (ls $ l) t (las $ l)" by(rule lock_ok_las'D) thus "lock_actions_ok (ls $ l) t (las $ l)" proof(rule lock_actions_ok'_must_acquire_lock_lock_actions_ok[OF _ impI]) assume mal: "must_acquire_lock (las $ l)" thus "may_lock (ls $ l) t" by(auto intro!: clml collect_locksI elim: must_acquire_lock_contains_lock ) qed qed lemma lock_ok_las'_into_lock_on_las: "\<lbrakk>lock_ok_las' ls t las; \<And>l. l \<in> collect_locks' las \<Longrightarrow> may_lock (ls $ l) t\<rbrakk> \<Longrightarrow> lock_ok_las ls t las" by (metis lock_ok_las'_collect_locks'_may_lock lock_ok_las'_collect_locks_lock_ok_las) end
using BinaryProvider using Pkg tarball_url = "https://github.com/kdheepak/FIGletFonts/archive/v0.5.0.tar.gz" hash = "39f46c840ba035ba3b52aebf46123e6eda7393a7a18c5e6e02fb68c8cb50a33d" prefix = Prefix(@__DIR__) !isdefined(Pkg, :Artifacts) && !isinstalled(tarball_url, hash, prefix=prefix) && install(tarball_url, hash, prefix=prefix)
using CMPUtils recodeaudio("wav/W") #recodeaudio("wav/W") #recodeaudio("wav/1") #recodeaudio("wav/2") #recodeaudio("wav/34") #recodeaudio("wav/R") # Do the above one at a time # and restart VSCode in between # And don't put anything else in the terminal, including the comments! # Only run the two lines "using CMPUtils" and "recodeaudio(...)" # Otherwise the terminal input gets mangled # Features: 5 = Recommended # 13 = Maximum (see CMPUtils) # TODO: try this with 13 and see how the classifiers like it
If $F$ is a trivial filter, then $f$ converges to $y$ in $F$.
Formal statement is: lemma frontier_Int: "frontier(S \<inter> T) = closure(S \<inter> T) \<inter> (frontier S \<union> frontier T)" Informal statement is: The frontier of the intersection of two sets is the closure of the intersection of the two sets, intersected with the union of the frontiers of the two sets.
! This file is part of mctc-lib. ! ! Licensed under the Apache License, Version 2.0 (the "License"); ! you may not use this file except in compliance with the License. ! You may obtain a copy of the License at ! ! http://www.apache.org/licenses/LICENSE-2.0 ! ! Unless required by applicable law or agreed to in writing, software ! distributed under the License is distributed on an "AS IS" BASIS, ! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ! See the License for the specific language governing permissions and ! limitations under the License. !> Provides a light-weight testing framework for usage in projects depending on !> the tool chain library. !> !> Testsuites are defined by a [[collect_interface]] returning a set of !> [[unittest_type]] objects. To create a new test use the [[new_unittest]] !> constructor, which requires a test identifier and a procedure with a !> [[test_interface]] compatible signature. The error status is communicated !> by the allocation status of an [[error_type]]. !> !> The necessary boilerplate code to setup the test entry point is just !> !>```fortran !>program tester !> use, intrinsic :: iso_fortran_env, only : error_unit !> use mctc_env_testing, only : run_testsuite, new_testsuite, testsuite_type !> use test_suite1, only : collect_suite1 !> use test_suite2, only : collect_suite2 !> implicit none !> integer :: stat, ii !> type(testsuite_type), allocatable :: testsuites(:) !> character(len=*), parameter :: fmt = '("#", *(1x, a))' !> !> stat = 0 !> !> testsuites = [ & !> & new_testsuite("suite1", collect_suite1), & !> & new_testsuite("suite2", collect_suite2) & !> & ] !> !> do ii = 1, size(testsuites) !> write(error_unit, fmt) "Testing:", testsuites(ii)%name !> call run_testsuite(testsuites(ii)%collect, error_unit, stat) !> end do !> !> if (stat > 0) then !> write(error_unit, '(i0, 1x, a)') stat, "test(s) failed!" !> error stop !> end if !> !>end program tester !>``` !> !> Every test is defined in a separate module using a ``collect`` function, which !> is exported and added to the ``testsuites`` array in the test runner. !> All test have a simple interface with just an allocatable [[error_type]] as !> output to provide the test results. !> !>```fortran !>module test_suite1 !> use mctc_env_testing, only : new_unittest, unittest_type, error_type, check !> implicit none !> private !> !> public :: collect_suite1 !> !>contains !> !>!> Collect all exported unit tests !>subroutine collect_suite1(testsuite) !> !> Collection of tests !> type(unittest_type), allocatable, intent(out) :: testsuite(:) !> !> testsuite = [ & !> & new_unittest("valid", test_valid), & !> & new_unittest("invalid", test_invalid, should_fail=.true.) & !> & ] !> !>end subroutine collect_suite1 !> !>subroutine test_valid(error) !> type(error_type), allocatable, intent(out) :: error !> ! ... !>end subroutine test_valid !> !>subroutine test_invalid(error) !> type(error_type), allocatable, intent(out) :: error !> ! ... !>end subroutine test_invalid !> !>end module test_suite1 !>``` !> !> For an example setup checkout the ``test/`` directory in this project. module mctc_env_testing use mctc_env_error, only : error_type, mctc_stat use mctc_env_accuracy, only : sp, dp, i1, i2, i4, i8 implicit none private public :: run_testsuite, run_selected, new_unittest, new_testsuite public :: select_test, select_suite public :: unittest_type, testsuite_type, error_type public :: check, test_failed public :: test_interface, collect_interface interface check module procedure :: check_stat module procedure :: check_logical module procedure :: check_float_sp module procedure :: check_float_dp module procedure :: check_int_i1 module procedure :: check_int_i2 module procedure :: check_int_i4 module procedure :: check_int_i8 module procedure :: check_bool module procedure :: check_string end interface check abstract interface !> Entry point for tests subroutine test_interface(error) import :: error_type !> Error handling type(error_type), allocatable, intent(out) :: error end subroutine test_interface end interface !> Declaration of a unit test type :: unittest_type !> Name of the test character(len=:), allocatable :: name !> Entry point of the test procedure(test_interface), pointer, nopass :: test => null() !> Whether test is supposed to fail logical :: should_fail = .false. end type unittest_type abstract interface !> Collect all tests subroutine collect_interface(testsuite) import :: unittest_type !> Collection of tests type(unittest_type), allocatable, intent(out) :: testsuite(:) end subroutine collect_interface end interface !> Collection of unit tests type :: testsuite_type !> Name of the testsuite character(len=:), allocatable :: name !> Entry point of the test procedure(collect_interface), pointer, nopass :: collect => null() end type testsuite_type character(len=*), parameter :: fmt = '(1x, *(1x, a))' character(len=*), parameter :: indent = repeat(" ", 5) // repeat(".", 3) contains !> Driver for testsuite subroutine run_testsuite(collect, unit, stat) !> Collect tests procedure(collect_interface) :: collect !> Unit for IO integer, intent(in) :: unit !> Number of failed tests integer, intent(inout) :: stat type(unittest_type), allocatable :: testsuite(:) integer :: ii call collect(testsuite) !$omp parallel do shared(testsuite, unit) reduction(+:stat) do ii = 1, size(testsuite) !$omp critical(mctc_env_testsuite) write(unit, '(1x, 3(1x, a), 1x, "(", i0, "/", i0, ")")') & & "Starting", testsuite(ii)%name, "...", ii, size(testsuite) !$omp end critical(mctc_env_testsuite) call run_unittest(testsuite(ii), unit, stat) end do end subroutine run_testsuite !> Driver for selective testing subroutine run_selected(collect, name, unit, stat) !> Collect tests procedure(collect_interface) :: collect !> Name of the selected test character(len=*), intent(in) :: name !> Unit for IO integer, intent(in) :: unit !> Number of failed tests integer, intent(inout) :: stat type(unittest_type), allocatable :: testsuite(:) integer :: ii call collect(testsuite) ii = select_test(testsuite, name) if (ii > 0 .and. ii <= size(testsuite)) then call run_unittest(testsuite(ii), unit, stat) else write(unit, fmt) "Available tests:" do ii = 1, size(testsuite) write(unit, fmt) "-", testsuite(ii)%name end do stat = -huge(ii) end if end subroutine run_selected !> Run a selected unit test subroutine run_unittest(test, unit, stat) !> Unit test type(unittest_type), intent(in) :: test !> Unit for IO integer, intent(in) :: unit !> Number of failed tests integer, intent(inout) :: stat type(error_type), allocatable :: error call test%test(error) !$omp critical(mctc_env_testsuite) if (allocated(error) .neqv. test%should_fail) then if (test%should_fail) then write(unit, fmt) indent, test%name, "[UNEXPECTED PASS]" else write(unit, fmt) indent, test%name, "[FAILED]" end if stat = stat + 1 else if (test%should_fail) then write(unit, fmt) indent, test%name, "[EXPECTED FAIL]" else write(unit, fmt) indent, test%name, "[PASSED]" end if end if if (allocated(error)) then write(unit, fmt) "Message:", error%message end if !$omp end critical(mctc_env_testsuite) end subroutine run_unittest !> Select a unit test from all available tests function select_test(tests, name) result(pos) !> Name identifying the test suite character(len=*), intent(in) :: name !> Available unit tests type(unittest_type) :: tests(:) !> Selected test suite integer :: pos integer :: it pos = 0 do it = 1, size(tests) if (name == tests(it)%name) then pos = it exit end if end do end function select_test !> Select a test suite from all available suites function select_suite(suites, name) result(pos) !> Name identifying the test suite character(len=*), intent(in) :: name !> Available test suites type(testsuite_type) :: suites(:) !> Selected test suite integer :: pos integer :: it pos = 0 do it = 1, size(suites) if (name == suites(it)%name) then pos = it exit end if end do end function select_suite !> Register a new unit test function new_unittest(name, test, should_fail) result(self) !> Name of the test character(len=*), intent(in) :: name !> Entry point for the test procedure(test_interface) :: test !> Whether test is supposed to error or not logical, intent(in), optional :: should_fail !> Newly registered test type(unittest_type) :: self self%name = name self%test => test if (present(should_fail)) self%should_fail = should_fail end function new_unittest !> Register a new testsuite function new_testsuite(name, collect) result(self) !> Name of the testsuite character(len=*), intent(in) :: name !> Entry point to collect tests procedure(collect_interface) :: collect !> Newly registered testsuite type(testsuite_type) :: self self%name = name self%collect => collect end function new_testsuite subroutine check_stat(error, stat, message, more) !> Error handling type(error_type), allocatable, intent(out) :: error !> Status of operation integer, intent(in) :: stat !> A detailed message describing the error character(len=*), intent(in), optional :: message !> Another line of error message character(len=*), intent(in), optional :: more if (stat /= mctc_stat%success) then if (present(message)) then call test_failed(error, message, more) else call test_failed(error, "Non-zero exit code encountered", more) end if end if end subroutine check_stat subroutine check_logical(error, expression, message, more) !> Error handling type(error_type), allocatable, intent(out) :: error !> Result of logical operator logical, intent(in) :: expression !> A detailed message describing the error character(len=*), intent(in), optional :: message !> Another line of error message character(len=*), intent(in), optional :: more if (.not.expression) then if (present(message)) then call test_failed(error, message, more) else call test_failed(error, "Condition not fullfilled", more) end if end if end subroutine check_logical subroutine check_float_dp(error, actual, expected, message, more, thr, rel) !> Error handling type(error_type), allocatable, intent(out) :: error !> Found floating point value real(dp), intent(in) :: actual !> Expected floating point value real(dp), intent(in) :: expected !> A detailed message describing the error character(len=*), intent(in), optional :: message !> Another line of error message character(len=*), intent(in), optional :: more !> Allowed threshold for matching floating point values real(dp), intent(in), optional :: thr !> Check for relative errors instead logical, intent(in), optional :: rel logical :: relative real(dp) :: diff, threshold if (present(thr)) then threshold = thr else threshold = epsilon(expected) end if if (present(rel)) then relative = rel else relative = .false. end if if (relative) then diff = abs(actual - expected) / expected else diff = abs(actual - expected) end if if (diff > threshold) then if (present(message)) then call test_failed(error, message, more) else call test_failed(error, "Floating point value missmatch", more) end if end if end subroutine check_float_dp subroutine check_float_sp(error, actual, expected, message, more, thr, rel) !> Error handling type(error_type), allocatable, intent(out) :: error !> Found floating point value real(sp), intent(in) :: actual !> Expected floating point value real(sp), intent(in) :: expected !> A detailed message describing the error character(len=*), intent(in), optional :: message !> Another line of error message character(len=*), intent(in), optional :: more !> Allowed threshold for matching floating point values real(sp), intent(in), optional :: thr !> Check for relative errors instead logical, intent(in), optional :: rel logical :: relative real(sp) :: diff, threshold if (present(thr)) then threshold = thr else threshold = epsilon(expected) end if if (present(rel)) then relative = rel else relative = .false. end if if (relative) then diff = abs(actual - expected) / expected else diff = abs(actual - expected) end if if (diff > threshold) then if (present(message)) then call test_failed(error, message, more) else call test_failed(error, "Floating point value missmatch", more) end if end if end subroutine check_float_sp subroutine check_int_i1(error, actual, expected, message, more) !> Error handling type(error_type), allocatable, intent(out) :: error !> Found integer value integer(i1), intent(in) :: actual !> Expected integer value integer(i1), intent(in) :: expected !> A detailed message describing the error character(len=*), intent(in), optional :: message !> Another line of error message character(len=*), intent(in), optional :: more if (expected /= actual) then if (present(message)) then call test_failed(error, message, more) else call test_failed(error, "Integer value missmatch", more) end if end if end subroutine check_int_i1 subroutine check_int_i2(error, actual, expected, message, more) !> Error handling type(error_type), allocatable, intent(out) :: error !> Found integer value integer(i2), intent(in) :: actual !> Expected integer value integer(i2), intent(in) :: expected !> A detailed message describing the error character(len=*), intent(in), optional :: message !> Another line of error message character(len=*), intent(in), optional :: more if (expected /= actual) then if (present(message)) then call test_failed(error, message, more) else call test_failed(error, "Integer value missmatch", more) end if end if end subroutine check_int_i2 subroutine check_int_i4(error, actual, expected, message, more) !> Error handling type(error_type), allocatable, intent(out) :: error !> Found integer value integer(i4), intent(in) :: actual !> Expected integer value integer(i4), intent(in) :: expected !> A detailed message describing the error character(len=*), intent(in), optional :: message !> Another line of error message character(len=*), intent(in), optional :: more if (expected /= actual) then if (present(message)) then call test_failed(error, message, more) else call test_failed(error, "Integer value missmatch", more) end if end if end subroutine check_int_i4 subroutine check_int_i8(error, actual, expected, message, more) !> Error handling type(error_type), allocatable, intent(out) :: error !> Found integer value integer(i8), intent(in) :: actual !> Expected integer value integer(i8), intent(in) :: expected !> A detailed message describing the error character(len=*), intent(in), optional :: message !> Another line of error message character(len=*), intent(in), optional :: more if (expected /= actual) then if (present(message)) then call test_failed(error, message, more) else call test_failed(error, "Integer value missmatch", more) end if end if end subroutine check_int_i8 subroutine check_bool(error, actual, expected, message, more) !> Error handling type(error_type), allocatable, intent(out) :: error !> Found boolean value logical, intent(in) :: actual !> Expected boolean value logical, intent(in) :: expected !> A detailed message describing the error character(len=*), intent(in), optional :: message !> Another line of error message character(len=*), intent(in), optional :: more if (expected .neqv. actual) then if (present(message)) then call test_failed(error, message, more) else call test_failed(error, "Logical value missmatch", more) end if end if end subroutine check_bool subroutine check_string(error, actual, expected, message, more) !> Error handling type(error_type), allocatable, intent(out) :: error !> Found boolean value character(len=*), intent(in) :: actual !> Expected boolean value character(len=*), intent(in) :: expected !> A detailed message describing the error character(len=*), intent(in), optional :: message !> Another line of error message character(len=*), intent(in), optional :: more if (expected /= actual) then if (present(message)) then call test_failed(error, message, more) else call test_failed(error, "Character value missmatch", more) end if end if end subroutine check_string subroutine test_failed(error, message, more) !> Error handling type(error_type), allocatable, intent(out) :: error !> A detailed message describing the error character(len=*), intent(in) :: message !> Another line of error message character(len=*), intent(in), optional :: more allocate(error) error%stat = mctc_stat%fatal if (present(more)) then error%message = message // new_line('a') // more else error%message = message end if end subroutine test_failed end module mctc_env_testing
-- ---------------------------------------------------------------- [ Date.idr ] -- Module : Data.Date -- Description : Simple data types and helper functions to represent dates. -- -- License : This code is distributed under the MIT license. See the -- file LICENSE in the root directory for its full text. -- --------------------------------------------------------------------- [ EOH ] module Data.Date %access public export -- -------------------------------------------------------------- [ Data Types ] ||| A day of the week. data Day = Monday | Tuesday | Wednesday | Thursday | Friday | Saturday | Sunday %name Day day, day1, day2 ||| A month of the year. data Month = January | February | March | April | May | June | July | August | September | October | November | December %name Month month, month1, month2 -- ------------------------------------------------------ [ Eq Implementations ] Eq Day where Monday == Monday = True Tuesday == Tuesday = True Wednesday == Wednesday = True Thursday == Thursday = True Friday == Friday = True Saturday == Saturday = True Sunday == Sunday = True _ == _ = False Eq Month where January == January = True February == February = True March == March = True April == April = True May == May = True June == June = True July == July = True August == August = True September == September = True October == October = True November == November = True December == December = True _ == _ = False -- ---------------------------------------------------- [ Show Implementations ] Show Day where show Monday = "Monday" show Tuesday = "Tuesday" show Wednesday = "Wednesday" show Thursday = "Thursday" show Friday = "Friday" show Saturday = "Saturday" show Sunday = "Sunday" Show Month where show January = "January" show February = "February" show March = "March" show April = "April" show May = "May" show June = "June" show July = "July" show August = "August" show September = "September" show October = "October" show November = "November" show December = "December" -- -------------------------------------------------------- [ Helper Functions ] namespace Days ||| Convert a string to a day, if possible, otherwise return `Nothing`. ||| @ str A string representing a day. fromString : (str : String) -> Maybe Day fromString "Monday" = Just Monday fromString "Tuesday" = Just Tuesday fromString "Wednesday" = Just Wednesday fromString "Thursday" = Just Thursday fromString "Friday" = Just Friday fromString "Saturday" = Just Saturday fromString "Sunday" = Just Sunday fromString _ = Nothing namespace Months ||| Convert a string to a month, if possible, otherwise return `Nothing`. ||| @ str A string representing a month. fromString : (str : String) -> Maybe Month fromString "January" = Just January fromString "February" = Just February fromString "March" = Just March fromString "April" = Just April fromString "May" = Just May fromString "June" = Just June fromString "July" = Just July fromString "August" = Just August fromString "September" = Just September fromString "October" = Just October fromString "November" = Just November fromString "December" = Just December fromString _ = Nothing -- -------------------------------------------------------------------- [ Date ] ||| A date consists of a day, month, date and year. record Date where ||| Make a date. constructor MkDate ||| A day. DDay : Day ||| A month. DMon : Month ||| A date. DDate : Integer -- Fin 32 ||| A year. DYear : Integer -- TODO: data Time where ... implementation Show Date where show (MkDate day month date year) = concat $ List.intersperse ", " [ show day , show month ++ " " ++ show date , show year ] implementation Eq Date where (MkDate dayA monthA dateA yearA) == (MkDate dayB monthB dateB yearB) = dayA == dayB && monthA == monthB && dateA == dateB && yearA == yearB -- --------------------------------------------------------------------- [ EOF ]
diy tile shower pan shower floor pans for tile a luxury best ideas about tile shower pan on diy tiled shower tray. diy tile shower pan shower pan shower floor base tile basin product details pan inside for inspirations 8 diy tiled shower base. diy tile shower pan save diy tile ready shower pan. diy tile shower pan how diy tile shower pan installation. diy tile shower pan build a tile shower how to build a tiled shower in your home with steps for build a tile shower form shower pans diy ceramic shower base. diy tile shower pan showers shower pan making a tile shower pan a inspire shower pan final steps diy ceramic tile shower floor. diy tile shower pan be on diy tile shower floor. diy tile shower pan how to a shower pan the tile shop diy shower pan installation. diy tile shower pan tile shower pan installation video floor pans lovely ceramic base p diy shower tile installation video. diy tile shower pan shower basin for tile a searching for best ideas about tile shower pan on diy tile shower installation. diy tile shower pan shower tile shower pan kit style the best home decor base shower pan for tile diy pebble tile shower floor. diy tile shower pan tile shower full size of shower corner shelf home depot tile shower curb designs tile tile shower diy shower tile installation video. diy tile shower pan mortar bed complete diy tile ready shower pan. diy tile shower pan how to build a shower base for tile home design ideas and pictures diy tile shower base. diy tile shower pan tile shower tile shower pan installation com pebble tile shower floor diy ceramic tile shower pan. diy tile shower pan shower pans for tile large spring rain pebble tile shower pan tile shower pan installation diy ceramic shower base. diy tile shower pan tile shower shower tutorial from musings 4 tile ready shower pan diy tile ready shower pan. diy tile shower pan master bath shower liner master bath shower floor diy tile shower floor. diy tile shower pan 5 tips for a champagne shower on a beer budget shower base tile showers and custom shower diy tiled shower tray. diy tile shower pan medium size of pan pans for tile showers buy panels with diy tile shower pan installation. diy tile shower pan custom tile shower pan kit a comfy bathroom install best cars reviews for diy tile ready shower pan. diy tile shower pan bathroom renovation how to build a custom tiled shower pan apartment therapy tutorials diy tile shower pan video. diy tile shower pan build shower pan full size of shower building a shower base shower stalls bathroom shower pans diy tile ready shower pan. diy tile shower pan showers custom shower pan shower pan vs tile shower base for tile custom shower pan diy ceramic tile shower floor. diy tile shower pan installing tile shower floor shower base for tile walls shower tile installation video tile shower floor diy tile ready shower pan. diy tile shower pan image titled make a shower pan step 1 diy ceramic tile shower floor. diy tile shower pan tile shower pan diy tile shower pan video. diy tile shower pan shower installing tile shower pan before and after love tile shower large size of tile diy tile shower base.
import .love02_backward_proofs_exercise_sheet /-! # LoVe Homework 3: Forward Proofs Homework must be done individually. -/ set_option pp.beta true set_option pp.generalized_field_notation false namespace LoVe /-! ## Question 1 (6 points): Connectives and Quantifiers 1.1 (2 points). We have proved or stated three of the six possible implications between `excluded_middle`, `peirce`, and `double_negation`. Prove the three missing implications using structured proofs, exploiting the three theorems we already have. -/ namespace backward_proofs #check peirce_of_em #check dn_of_peirce #check sorry_lemmas.em_of_dn lemma peirce_of_dn : double_negation → peirce := sorry lemma em_of_peirce : peirce → excluded_middle := sorry lemma dn_of_em : excluded_middle → double_negation := sorry end backward_proofs /-! 1.2 (4 points). Supply a structured proof of the commutativity of `∧` under an `∃` quantifier, using no other lemmas than the introduction and elimination rules for `∃`, `∧`, and `↔`. -/ lemma exists_and_commute {α : Type} (p q : α → Prop) : (∃x, p x ∧ q x) ↔ (∃x, q x ∧ p x) := sorry /-! ## Question 2 (3 points): Fokkink Logic Puzzles If you have studied Logic and Sets with Prof. Fokkink, you will know he is fond of logic puzzles. This question is a tribute. Recall the following tactical proof: -/ lemma weak_peirce : ∀a b : Prop, ((((a → b) → a) → a) → b) → b := begin intros a b habaab, apply habaab, intro habaa, apply habaa, intro ha, apply habaab, intro haba, apply ha end /-! 2.1 (1 point). Prove the same lemma again, this time by providing a proof term. Hint: There is an easy way. -/ lemma weak_peirce₂ : ∀a b : Prop, ((((a → b) → a) → a) → b) → b := sorry /-! 2.2 (2 points). Prove the same Fokkink lemma again, this time by providing a structured proof, with `assume`s and `show`s. -/ lemma weak_peirce₃ : ∀a b : Prop, ((((a → b) → a) → a) → b) → b := sorry end LoVe
lemma distr_cong: "M = K \<Longrightarrow> sets N = sets L \<Longrightarrow> (\<And>x. x \<in> space M \<Longrightarrow> f x = g x) \<Longrightarrow> distr M N f = distr K L g"
module Order data InclusiveEither : (typeLeft : Type) -> (typRight : Type) -> Type where OnlyLeft : typLeft -> Not typRight -> InclusiveEither typLeft typRight OnlyRight : Not typLeft -> typRight -> InclusiveEither typLeft typRight Both : typLeft -> typRight -> InclusiveEither typLeft typRight |||Type of proof that a relation is reflexive isReflexive : {typ : Type} -> (r : (typ -> typ -> Type)) -> Type isReflexive {typ} r = (a : typ) -> (r a a) |||Type of proof that a relation is symmetric isSymmetric : {typ : Type} -> (r : (typ -> typ -> Type)) -> Type isSymmetric {typ} r = (a : typ) -> (b : typ) -> (r a b) -> (r b a) |||Type of proof that a relation is anti-symmetric isAntiSymmetric : {typ : Type} -> (r : (typ -> typ -> Type)) -> Type isAntiSymmetric {typ} r = (a : typ) -> (b : typ) -> (r a b) -> (r b a) -> (a = b) |||Type of proof that a relation is transitive isTransitive : {typ : Type} -> (r : (typ -> typ -> Type)) -> Type isTransitive {typ} r = (a : typ) -> (b : typ) -> (c : typ) -> (r a b) -> (r b c) -> (r a c) |||Type of proof that a relation is an equivalence isEquivalence : {typ : Type} -> (r : (typ -> typ -> Type)) -> Type isEquivalence {typ} r = (isReflexive r, isSymmetric r, isTransitive r) |||Type of proof that a relation is a partial order isPartialOrder : {typ : Type} -> (r : (typ -> typ -> Type)) -> Type isPartialOrder {typ} r = (isReflexive r, isAntiSymmetric r, isTransitive r) |||Type of proof that a relation is a total order isTotalOrder : {typ : Type} -> (r : (typ -> typ -> Type)) -> Type isTotalOrder {typ} r = (isPartialOrder r, (a : typ) -> (b : typ) -> (InclusiveEither (r a b) (r b a)))
%+========================================================================+ %| | %| This script uses the GYPSILAB toolbox for Matlab | %| | %| COPYRIGHT : Matthieu Aussal, Marc Bakry (c) 2017-2019. | %| PROPERTY : Centre de Mathematiques Appliquees, Ecole polytechnique, | %| route de Saclay, 91128 Palaiseau, France. All rights reserved. | %| LICENCE : This program is free software, distributed in the hope that| %| it will be useful, but WITHOUT ANY WARRANTY. Natively, you can use, | %| redistribute and/or modify it under the terms of the GNU General Public| %| License, as published by the Free Software Foundation (version 3 or | %| later, http://www.gnu.org/licenses). For private use, dual licencing | %| is available, please contact us to activate a "pay for remove" option. | %| CONTACT : [email protected] | %| [email protected] | %| WEBSITE : www.cmap.polytechnique.fr/~aussal/gypsilab      | %| | %| Please acknowledge the gypsilab toolbox in programs or publications in | %| which you use it. | %|________________________________________________________________________| %| '&` | | %| # | FILE : nrtHmxVibroSlab2d.m | %| # | VERSION : 0.55 | %| _#_ | AUTHOR(S) : Matthieu Aussal & Marc Bakry | %| ( # ) | CREATION : 14.03.2019 | %| / 0 \ | LAST MODIF : | %| ( === ) | SYNOPSIS : | %| `---' | | %+========================================================================+ % Cleaning clear all close all clc % Gypsilab path run('../../addpathGypsilab.m') % Accuracy tol = 1e-3 % Width of the slab e = 0.5 % Frequency f = 900:500:4000 % Incident direction (from bottom) X0 = [0 1 0]; % Exterior domain (water) rho0 = 1000; % density (kg.m3) c0 = 1500; % celerity (m.s-1) k0 = 2*pi/c0.*f; % wave-number (m-1) lam0 = c0./f; % wave-length (m) % Interior domain (different from water) rhoS = 2*rho0; % density (kg.m3) cL = 2*c0; % celerity of longitudinal waves (m.s-1) kL = 2*pi.*f./cL; % wave-number of longitudinal waves (m-1) lamL = real(cL)./f; % wavelength of longitudinal waves (m) cT = 0; % celerity of transverse waves (m.s-1) kT = 2*pi.*f./cT; % wave-number of transverse waves (m-1) lamT = real(cT)./f; % wavelength of transverse waves (m) % Solution (pressure) sol = zeros(2,length(f)); % Loop for each frequency for i = 1:length(f) % Minimum wavelength tmp = [lam0(i),lamL(i),lamT(i)]; lmin = min(tmp(tmp>0)); % Slab mesh L = 60 * lmin; % 60 wavelength to simulate infinite slab nx = ceil(L/lmin * 6)+1; % 6 node per wavelength for L ny = ceil(e/lmin * 12)+1; % 12 node per wavelength for e N = nx * ny; % Total number of nodes mesh = mshSquare(N,[L e]) % Radiative mesh (fixed number of nodes) radiat = mshSquare(1e3,[L L]); % Boundary bound = swap(mesh.bnd) % Measurement points for trans and refl coeff (1 wavelenth from bound) Xmes = [0 -e/2-lmin 0 ; 0 e/2+lmin 0]; % Cut-off function (50% full, 10% decrease) cutoff = vibsCutoff(1,L/5,L/10); % Green kernel function Gxy = @(X,Y) femGreenKernel(X,Y,'[H0(kr)]',k0(i)); gradyGxy{1} = @(X,Y) femGreenKernel(X,Y,'grady[H0(kr)]1',k0(i)); gradyGxy{2} = @(X,Y) femGreenKernel(X,Y,'grady[H0(kr)]2',k0(i)); gradyGxy{3} = @(X,Y) femGreenKernel(X,Y,'grady[H0(kr)]3',k0(i)); % Plane wave function PW = @(X) exp(1i*k0(i)*X*X0') .* cutoff(X); gradxPW{1} = @(X) 1i*k0(i)*X0(1) .* PW(X); gradxPW{2} = @(X) 1i*k0(i)*X0(2) .* PW(X); gradxPW{3} = @(X) 1i*k0(i)*X0(3) .* PW(X); % Coupling coeff for Brackage-Werner simulation beta = 1i*k0(i); % Quadrature and finite elements (volumn) omega = dom(mesh,3); U = fem(mesh,'P1'); % Quadrature and finite elements (boundary) sigma = dom(bound,3); u = fem(bound,'P1'); % Left-hand side tic [A,B,C,D] = vibsHmxBlockOperator(omega,U,sigma,u,cL,cT,rhoS,c0,rho0,f(i),tol); toc % Add dirichlet condition to x unknows (penalization) A(sub2ind(size(A),1:length(U),1:length(U))) = 1e15; % Right-hand side V = cell(3,1); V{1} = - integral(sigma,ntimes(U,1),PW); V{2} = - integral(sigma,ntimes(U,2),PW); V{3} = integral(sigma,ntimes(u),gradxPW); % Resolution with Schur complement Fa = decomposition(A); LHS = @(V) D*V - C*(Fa \ (B.Ml*(B.Mr*V)) ); RHS = V{end} - C*(Fa \ cell2mat(V(1:end-1)) ); mu = mgcr(LHS,RHS,[],tol,100); lambda = beta*mu; % Measure of refexive and transmitted coeff tic Pmes = 1i/4 .* integral(Xmes,sigma,Gxy,u)*lambda - ... 1i/4 .* integral(Xmes,sigma,gradyGxy,ntimes(u))*mu; Pmes(2) = Pmes(2) + PW(Xmes(2,:)); toc % Save solution sol(:,i) = Pmes; end % Analytical solution tic c = ones(length(f),1) * [c0 cL c0]; rho = [rho0 rhoS rho0]; [R,T] = slabVibro(f,rho,c,e); toc % Comparison (db) ref = 20*log10(abs([R ; T])); sol = 20*log10(abs(sol)); norm(ref-sol)/norm(ref) % Graphical representation figure(100) subplot(1,2,1) plot(f,ref(1,:),'r',f,sol(1,:),'b+') grid on title('Reflexion coeffiscient') legend({'Analytical','Numerical'}) xlabel('Frequency (Hz)') ylabel('Amplitude (dB)') subplot(1,2,2) plot(f,ref(2,:),'r',f,sol(2,:),'b+') grid on title('Transmission coeffiscient') legend({'Analytical','Numerical'}) xlabel('Frequency (Hz)') ylabel('Amplitude (dB)') disp('~~> Michto gypsilab !')
The image of the path $z + r \exp(i \theta)$ for $\theta$ in the closed segment $[s, t]$ is the set of points $z + r \exp(i \theta)$ for $\theta$ in the closed segment $[s, t]$.
module LatticeBasisReduction export gram_schmidt, LLL, diophantine, gauss, sda include("misc.jl") include("gram_schmidt.jl") include("diophantine.jl") include("gauss.jl") include("sda.jl") include("lll.jl") end
K5 went on a visit to "Dolchezze" bakery, next to the Kinder. We tought some vocabulary in English to the employees and owner, and they tought us how they do their job. We learnt a lot and... We received some cookies at the end! Thank you very much "Dolchezze"!!! Escuchamos otra versión de la historia.
A predicate $p$ holds eventually at infinity if and only if there exists a positive number $b$ such that $p$ holds for all $x$ with $\|x\| \geq b$.
""" utility functions helping managing result list """ module ResultListUtils using CUDA,..MetaDataUtils, ..CUDAAtomicUtils export getResLinIndex,allocateResultLists,@addResult """ allocate memory on GPU for storing result list totalFpCount- total number of false positives TotalFNCount - total number of false negatives in the array first 3 entries will be x,y,z than isGold - 1 if it is related to gold pass dilatations , direction from which result was set and the iteration number in which it was covered in the second list we have the UInt32 Ids generated by the getResLinIndex function """ function allocateResultLists(totalFpCount,TotalFNCount) return CUDA.zeros(UInt16, (totalFpCount+ TotalFNCount+1),6 ) end#allocateResultList """ adding result to the result list at correct postion using data from metaData - so we get from the metadata offset and result counter metadata - 4 dimensional array holding metaData xMeta,yMeta,zMeta - x,y,z coordinates of block of intrest in meta Data resList - list of result (matrix to be more precise) where we will wrtie the results resListIndicies - list of indicies related to results x,y,z - coordinates where we found point of intrest dir - direction from which dilatation covering this voxel had happened queueNumber - what fp or fn queue we are intrested in modyfing now metaDataDims - dimensions of metadata array mainArrDims - dimensions of main array isGold - indicated is this a gold dilatation step (then it will evaluate to 1 otherwise 0 ) """ macro addResult(metaData ,xMeta,yMeta,zMeta, resList,x,y,z, dir,iterNumb,queueNumber,metaDataDims,mainArrDims ,isGold ) return esc(quote # linearIndex = resListPos = ($metaData[($xMeta+1),($yMeta+1),($zMeta+1), (getResOffsetsBeg() +$queueNumber) ]+atomicallyAddToSpot( metaData,(($xMeta+1) + ($yMeta)*$metaDataDims[1] + ($zMeta)*$metaDataDims[1]*$metaDataDims[2] + (getNewCountersBeg()+$queueNumber-1)*$metaDataDims[1]*$metaDataDims[2]*$metaDataDims[3]),UInt32(1) ))+1 # qn = $queueNumber # xm = $xMeta # ym = $yMeta # zm = $zMeta # xx = $x # yy= $y # zz=$z # dd= $dir # CUDA.@cuprint "\n resListPos $(resListPos) queueNumber $(qn) xMeta $(xm) yMeta $(ym) zMeta $(zm) x $(xx) y $(yy) z $(zz) dir $(Int64(dd)) \n " @inbounds $resList[ resListPos, 1]=$x @inbounds $resList[ resListPos, 2]=$y @inbounds $resList[ resListPos, 3]=$z @inbounds $resList[ resListPos, 4]= $isGold @inbounds $resList[ resListPos, 5]= $dir @inbounds $resList[ resListPos, 6]= $iterNumb # @inbounds $resListIndicies[resListPos]=getResLinIndex($x,$y,$z,$isGold,$mainArrDims) # CUDA.@cuprint "\n linIndex $(getResLinIndex(x,y,z,isGold,mainArrDims)) \n " #addResHelper(resListIndicies,resListPos,x) end)#quote end#addResult """ giver the result row that holds data about covered point and in what iteration, from what direction and in what pass it was covered resRow - array where first 3 entries are x,y,z positions then is gold, """ function getResLinIndex(x,y,z,isGold,mainArrDims)::UInt32 # last one is in order to differentiate between gold pass and other pass ... return x+ y*mainArrDims[1]+ z* mainArrDims[1]*mainArrDims[2]+ isGold*mainArrDims[1]*mainArrDims[2]*mainArrDims[3] end#getResLinIndex end#ResultListUtils
If two paths are homotopic, then they are homotopic in the opposite direction.
[STATEMENT] lemma open_Diff [continuous_intros, intro]: "open S \<Longrightarrow> closed T \<Longrightarrow> open (S - T)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>open S; closed T\<rbrakk> \<Longrightarrow> open (S - T) [PROOF STEP] by (simp add: closed_open Diff_eq open_Int)
State Before: G : Type u_1 inst✝² : Group G A : Type ?u.38880 inst✝¹ : AddGroup A N : Type ?u.38886 inst✝ : Group N s : Set G g✝ g : G ⊢ zpowers g = ⊥ ↔ g = 1 State After: no goals Tactic: rw [eq_bot_iff, zpowers_le, mem_bot]
Formal statement is: lemma content_dvd_contentI [intro]: "p dvd q \<Longrightarrow> content p dvd content q" Informal statement is: If $p$ divides $q$, then the content of $p$ divides the content of $q$.
module IdrisJvm.System import public Data.So import IdrisJvm.IO import Java.Util import Java.Math import Java.Lang %access public export getArgs : JVM_IO (List String) getArgs = do argsList <- invokeStatic RuntimeClass "getProgramArgs" (JVM_IO JList) Iterator.toList !(JList.iterator argsList) time : JVM_IO Integer time = believe_me <$> invokeStatic RuntimeClass "time" (JVM_IO BigInteger) getEnv : String -> JVM_IO (Maybe String) getEnv = System.getenv system : String -> JVM_IO Int system = invokeStatic RuntimeClass "runCommand" (String -> JVM_IO Int) usleep : (i : Int) -> { auto prf : So (i >= 0 && i <= 1000000) } -> JVM_IO () usleep interval = invokeStatic RuntimeClass "usleep" (Int -> JVM_IO ()) interval exit : Int -> JVM_IO a exit = believe_me . Java.Lang.System.exit ||| Programs can either terminate successfully, or end in a caught ||| failure. data ExitCode : Type where ||| Terminate successfully. ExitSuccess : ExitCode ||| Program terminated for some prescribed reason. ||| ||| @errNo A non-zero numerical value indicating failure. ||| @prf Proof that the int value is non-zero. ExitFailure : (errNo : Int) -> {auto prf : So (not $ errNo == 0)} -> ExitCode ||| Terminate the program with an `ExitCode`. This code indicates the ||| success of the program's execution, and returns the success code ||| to the program's caller. ||| ||| @code The `ExitCode` for program. exitWith : (code : ExitCode) -> JVM_IO a exitWith ExitSuccess = exit 0 exitWith (ExitFailure errNo) = exit errNo ||| Exit the program indicating failure. exitFailure : JVM_IO a exitFailure = exitWith (ExitFailure 1) ||| Exit the program after a successful run. exitSuccess : JVM_IO a exitSuccess = exitWith ExitSuccess ||| Wall clock time record Clock where constructor MkClock seconds : Integer nanoseconds : Integer ||| Get the system's wall clock time. clockTime : JVM_IO Clock clockTime = believe_me <$> invokeStatic (Class "io/github/mmhelloworld/idrisjvm/runtime/IdrisSystem") "getIdrisClock" (JVM_IO IdrisObject)
{-# OPTIONS --without-K --rewriting #-} open import HoTT open import homotopy.PtdMapSequence open import groups.HomSequence open import cohomology.Theory module cohomology.PtdMapSequence {i} (CT : CohomologyTheory i) where open CohomologyTheory CT -- FIXME maybe this should be named [ap-C-seq], -- but I do not know how to name [C-seq-isemap]. -favonia C-seq : ∀ {X Y : Ptd i} (n : ℤ) → PtdMapSequence X Y → HomSequence (C n Y) (C n X) C-seq n (X ⊙⊣|) = C n X ⊣|ᴳ C-seq n (X ⊙→⟨ f ⟩ seq) = HomSeq-snoc (C-seq n seq) (C-fmap n f) C-comm-square : ∀ (n : ℤ) {X₀ X₁ Y₀ Y₁ : Ptd i} → {f₀ : X₀ ⊙→ Y₀} {f₁ : X₁ ⊙→ Y₁} {hX : X₀ ⊙→ X₁} {hY : Y₀ ⊙→ Y₁} → CommSquare (fst f₀) (fst f₁) (fst hX) (fst hY) → CommSquareᴳ (C-fmap n f₁) (C-fmap n f₀) (C-fmap n hY) (C-fmap n hX) C-comm-square n {f₀ = f₀} {f₁} {hX} {hY} (comm-sqr □) = comm-sqrᴳ λ y₁ → ∘-CEl-fmap n hX f₁ y₁ ∙ CEl-fmap-base-indep' n (λ x → ! (□ x)) y₁ ∙ CEl-fmap-∘ n hY f₀ y₁ C-seq-fmap : ∀ {X₀ X₁ Y₀ Y₁ : Ptd i} (n : ℤ) {seq₀ : PtdMapSequence X₀ Y₀} {seq₁ : PtdMapSequence X₁ Y₁} {hX : X₀ ⊙→ X₁} {hY : Y₀ ⊙→ Y₁} → PtdMapSeqMap seq₀ seq₁ hX hY → HomSeqMap (C-seq n seq₁) (C-seq n seq₀) (C-fmap n hY) (C-fmap n hX) C-seq-fmap n (hX ⊙↓|) = C-fmap n hX ↓|ᴳ C-seq-fmap n (hX ⊙↓⟨ □ ⟩ seq) = HomSeqMap-snoc (C-seq-fmap n seq) (C-comm-square n □) C-seq-isemap : ∀ {X₀ X₁ Y₀ Y₁ : Ptd i} (n : ℤ) {seq₀ : PtdMapSequence X₀ Y₀} {seq₁ : PtdMapSequence X₁ Y₁} {hX : X₀ ⊙→ X₁} {hY : Y₀ ⊙→ Y₁} {seq-map : PtdMapSeqMap seq₀ seq₁ hX hY} → is-⊙seq-equiv seq-map → is-seqᴳ-equiv (C-seq-fmap n seq-map) C-seq-isemap n {seq-map = h ⊙↓|} h-is-equiv = CEl-isemap n h h-is-equiv C-seq-isemap n {seq-map = h ⊙↓⟨ □ ⟩ seq} (h-is-equiv , seq-is-equiv) = is-seqᴳ-equiv-snoc (C-seq-isemap n seq-is-equiv) (CEl-isemap n h h-is-equiv) C-seq-emap : ∀ {X₀ X₁ Y₀ Y₁ : Ptd i} (n : ℤ) {seq₀ : PtdMapSequence X₀ Y₀} {seq₁ : PtdMapSequence X₁ Y₁} {hX : X₀ ⊙→ X₁} {hY : Y₀ ⊙→ Y₁} → PtdMapSeqEquiv seq₀ seq₁ hX hY → HomSeqEquiv (C-seq n seq₁) (C-seq n seq₀) (C-fmap n hY) (C-fmap n hX) C-seq-emap n (seq , seq-ise) = C-seq-fmap n seq , C-seq-isemap n seq-ise
theory StackFlattener imports Main begin primrec stack_to_mem :: "int list \<Rightarrow> (nat \<Rightarrow> int) \<Rightarrow> nat \<Rightarrow> int" where "stack_to_mem [] \<mu> k = (if k = 0 then 0 else \<mu> k)" | "stack_to_mem (i # is) \<mu> k = ( if k = 0 then 1 + int (length is) else if k = Suc (length is) then i else stack_to_mem is \<mu> k)" lemma stack_same: "k > 0 \<Longrightarrow> k \<le> length \<sigma> \<Longrightarrow> stack_to_mem \<sigma> \<mu> k = stack_to_mem \<sigma> \<mu>' k" by (induction \<sigma>) simp_all lemma [simp]: "k > length \<sigma> \<Longrightarrow> stack_to_mem \<sigma> \<mu> k = \<mu> k" by (induction \<sigma>) simp_all lemma [simp]: "(stack_to_mem (a # \<sigma>) \<mu>)(nat (1 + int (length \<sigma>)) := b) = stack_to_mem (b # \<sigma>) \<mu>" proof fix x show "((stack_to_mem (a # \<sigma>) \<mu>)(nat (1 + int (length \<sigma>)) := b)) x = stack_to_mem (b # \<sigma>) \<mu> x" by auto qed lemma [simp]: "(stack_to_mem (i1 # \<sigma>) \<mu>)(0 := int (length \<sigma>)) = stack_to_mem \<sigma> (stack_to_mem (i1 # \<sigma>) \<mu>)" proof fix x show "((stack_to_mem (i1 # \<sigma>) \<mu>)(0 := int (length \<sigma>))) x = stack_to_mem \<sigma> (stack_to_mem (i1 # \<sigma>) \<mu>) x" by (cases "x > length \<sigma>") (simp_all add: stack_same) qed lemma [simp]: "(stack_to_mem (i1 # i2 # \<sigma>) \<mu>)(0 := 1 + int (length \<sigma>)) = stack_to_mem (i2 # \<sigma>) (stack_to_mem (i1 # i2 # \<sigma>) \<mu>)" proof fix x show "((stack_to_mem (i1 # i2 # \<sigma>) \<mu>)(0 := 1 + int (length \<sigma>))) x = stack_to_mem (i2 # \<sigma>) (stack_to_mem (i1 # i2 # \<sigma>) \<mu>) x" by (cases "x > length \<sigma>") (simp_all add: stack_same) qed lemma [simp]: "(stack_to_mem \<sigma> \<mu>)(0 := int (length \<sigma>) + 1, nat (int (length \<sigma>) + 1) := d) = stack_to_mem (d # \<sigma>) \<mu>" proof fix x show "((stack_to_mem \<sigma> \<mu>)(0 := int (length \<sigma>) + 1, nat (int (length \<sigma>) + 1) := d)) x = stack_to_mem (d # \<sigma>) \<mu> x" by auto qed lemma [simp]: "n < length \<sigma> \<Longrightarrow> stack_to_mem \<sigma> \<mu> (nat (int (length \<sigma>) - int n)) = \<sigma> ! n" proof (induction \<sigma> arbitrary: n) case Nil thus ?case by simp next case Cons thus ?case by (induction n) fastforce+ qed lemma [simp]: "n < length \<sigma> \<Longrightarrow> (stack_to_mem \<sigma> \<mu>)(nat (int (length \<sigma>) - int n) := d) = stack_to_mem (\<sigma>[n := d]) \<mu>" proof fix x assume "n < length \<sigma>" thus "((stack_to_mem \<sigma> \<mu>)(nat (int (length \<sigma>) - int n) := d)) x = stack_to_mem (\<sigma>[n := d]) \<mu> x" proof (induction \<sigma> arbitrary: n) case Nil thus ?case by simp next case Cons thus ?case by (induction n) fastforce+ qed qed end
function e = vgg_rms_rrror(M) % e = vgg_rms_rrror(M) % % Get RMS diff from zero of matrix or vector M e = sqrt(sum(sum(M.*M)) / prod(size(M)));
clear.memory = function( to.remove=ls() ) { rm( list=to.remove ) gc() }
\documentclass[10pt]{article} \usepackage[utf8]{inputenc} \usepackage[T1]{fontenc} \usepackage[a4paper,includeheadfoot, left=.7in,right=.7in,top=.12in,bottom=0mm]{geometry} \usepackage[compact]{titlesec} \usepackage{xcolor} \usepackage[hidelinks]{hyperref} \usepackage{fontawesome} \usepackage[notmath]{sansmathfonts} \hypersetup{ colorlinks=false, } \titleformat{\section}{\color{accent}\titlerule[0.8pt]\Large\bfseries\scshape}{}{0pt}{}[{\titlerule[0.8pt]}] \renewcommand{\familydefault}{\sfdefault} \pagestyle{empty} \urlstyle{same} \setlength{\parskip}{1pt} \setlength{\parsep}{1pt} \setlength{\headsep}{1pt} % \setlength{\topskip}{0pt} % \setlength{\topmargin}{0pt} \setlength{\topsep}{1pt} \setlength{\partopsep}{1pt} \definecolor{vividred}{RGB}{194,41,65} \definecolor{anotherred}{HTML}{D32F2F} \definecolor{accent}{HTML}{3182c8} \definecolor{accent_light}{HTML}{63a1d8} \definecolor{accent_lighter}{HTML}{aad4f5} \definecolor{accent_lightest}{HTML}{eff8ff} \definecolor{accent_dark}{HTML}{2368a2} \definecolor{accent_darker}{HTML}{1a4971} \definecolor{accent_darkest}{HTML}{12283a} \definecolor{grey}{HTML}{b8c4ce} \definecolor{grey_light}{HTML}{cfd6dd} \definecolor{grey_lighter}{HTML}{e1e7ec} \definecolor{grey_lightest}{HTML}{f8f9fa} \definecolor{grey_dark}{HTML}{8895a7} \definecolor{grey_darker}{HTML}{5e6b7a} \definecolor{grey_darkest}{HTML}{212934} \renewcommand{\textbf}[1]{{\bfseries\color{accent_darkest}#1}} \begin{document} \thispagestyle{empty} \begin{center} \textbf{\textsc{\color{anotherred}\Huge Raghava Dhanya}}\\[10pt] %\rule{\textwidth}{.4pt} \end{center} %%%%%%%%%%%%%%%%%%%%% % social % %%%%%%%%%%%%%%%%%%%%% \begin{center} \href{mailto:[email protected]}{\faEnvelope\ [email protected]} \ | \ % \href{tel:9148995472}{\faPhoneSquare\ 9148995472} \ | \ % \href{https://github.com/RaghavaDhanya}{ \underline{\faGithubSquare\ GitHub}} \ | \ \href{https://in.linkedin.com/in/raghavadhanya}{\underline {\faLinkedinSquare\ LinkedIn}} \end{center} %%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%% % Experience % %%%%%%%%%%%%%%%%%%%%% \section*{Experience} \textbf{Trainee Decision Scientist,} \textit{Innovation \& Development, Mu Sigma Labs}, Bengaluru \hfill \textcolor{grey_darker}{[\textit{July 2018 - Present}]} \begin{itemize} \itemsep0em \item Currently working on High velocity trading with Statistical arbitrage techniques \item Complete ownership of an internal analytical automation tool based on BPMN -- [\textit{Spot Award}] \item R\&D work on developing an analytical application on Cloud(GCP, AWS, Azure) -- [\textit{Spot Award}] \item Developed a model for identifying customers who are likely to churn out of telecom operator using R. \item Built an application to scrape and classify the Reddit comments on any given subreddit based on specific Mu Sigma usecase, using python and Scikit-learn. \end{itemize} \textbf{Technology Developer,} \textit{notNULL}, Bengaluru \hfill \textcolor{grey_darker}{[\textit{February 2016 - July 2016}]} \begin{itemize} \itemsep0em \item Worked on developing a verification technology for events. \end{itemize} \medskip \textbf{Software Development Intern,} \textit{Supertext}, Bengaluru \hfill \textcolor{grey_darker}{[\textit{August 2015 - December 2015}]} \begin{itemize} \item Developed an android app for company communication with vendors using parse and firebase as backend. \end{itemize} %%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%% % Education % %%%%%%%%%%%%%%%%%%%%% \section*{Education} \textbf{Bachelor of Engineering,} Computer Science and Engineering \hfill \textcolor{grey_darker}{[\textit{2014-2018}]}\\ Aggregate: \textit{75.92\%}\\ Electives: \textbf{\textit{`Pattern Recognition', `Artificial Intelligence', `Clouds,Grids and Clusters'.}}\\ \textit{Sir M. Visvesvaraya Institute of Technology,} Bengaluru \medskip \\ \textbf{Machine Learning} by Stanford University on Coursera.\hfill \textcolor{grey_darker}{[\textit {March 2017 - July 2017}]}\\ Certificate earned on July 21, 2017 \medskip\\ \textbf{Neural Networks and Deep Learning} by Deeplearning.ai on Coursera.\hfill \textcolor{grey_darker}{[\textit {February 2018 - March 2018}]}\\ Certificate earned on March 4, 2018 %%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%% % skills % %%%%%%%%%%%%%%%%%%%%% \section*{Skills} \textbf{Programming languages}: C/C++, Python, Java, R, Shell , HTML \& CSS, JS.\\ \textbf{Frameworks}: Flask, Spring Boot, Keras, Tensorflow.\\ \textbf{Technologies}: Amazon Web Services, Continuous Integration/Delivery , Docker, Kubernetes, Kafka. %%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%% % Projects % %%%%%%%%%%%%%%%%%%%%% \section*{Projects} \textbf{\underline{\href{https://github.com/RaghavaDhanya/Rudolf} {Image Regeneration with Generative Models}}} | \textit{Keras, Tensorflow, python.}\hfill\textcolor{grey_darker}{[\textit{March 2018 - May 2018}]}\\ An approach to use newly introduced CapsNet as a discriminator in Generative Adversarial Network and demonstrate its application using semantic inpainting on MNIST and face images. \smallskip \\ \textbf{\underline{\href{https://github.com/RaghavaDhanya/ReMorse} {ReMorse}}} | \textit{C++, OpenGL, Box2D} \hfill\textcolor{grey_darker}{[\textit{April 2017}]} \\ A 2D side scrolling game which tries to subconsciously teach Morse Code. \smallskip \\ \textbf{\underline{\href{https://github.com/abhijith0505/Tonite} {Tonite}}} | \textit{Android, Java.}\hfill\textcolor{grey_darker}{[\textit{April 2016}]}\\ Wardrobe assistant app to keep track of the clothes and gives purchase suggestions based on events in user's calendar. Built at Hackerramp 2016 (16th and 17th April ), Myntra office Bangalore. \smallskip \\ % \textbf{\underline{\href{https://github.com/RaghavaDhanya/Codesnap} % {Codesnap}}} | \textit{Chrome developer tools, JavaScript, CSS,HTML. } % \hfill\textcolor{grey_darker}{[\textit{August 2016}]} % \\ % A chrome extension which adds copy button to code segments in most % websites, also provides a clipboard history. % \smallskip % \\ \textbf{\underline{\href{https://github.com/RaghavaDhanya/FileHide} {FileHide}}} | \textit{Python, Tkinter.}\hfill\textcolor{grey_darker}{[\textit{May 2016}]} \\ A cross platform app which hides one file in another, the host will still work normally. \smallskip % \\ % \textbf{\underline{\href{https://github.com/RaghavaDhanya/Snake} % {Snake}}} | \textit{Python, Pygame.} \hfill\textcolor{grey_darker}{[\textit{December 2015}]} % \\ % Simple, classic and cross platform snake game. %%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%% % Activities & Interests % %%%%%%%%%%%%%%%%%%%%%%%%%% \section*{Activities and Interests} \begin{itemize} \itemsep0em \item Won two "Spot Award" within 6 months for my work in Mu Sigma Labs \item Won First prize in annual departmental project exhibition 2018 for the project "Image Regeneration with Generative Models" out of 40\texttt{+} teams. \item Twice second placed in coding and debugging competitions conducted during college fests. \item Presented technical seminar on "Quantum Machine Learning" \item Active participant in Competitive coding, Hackathons and Conferences. \hfill\textcolor{grey_darker}{[\underline{\href{https://www.codechef.com/users/raghavadhanya}{Codechef profile}}]} \item Presented a technical paper on "Process Scheduling optimization in OS using Machine Learning" at Papyrus 8 (Intra-college event) \item Volunteered for android app development for college fest Kalanjali-2015. \end{itemize} %%%%%%%%%%%%%%%%%%%%%%%%%% \end{document}
variables (X Y Z : Type) (f : X → Y) (g : Y → Z) open function example (hf : injective f) (hg : injective g) : injective (g ∘ f) := begin intros x₁ x₂ h, apply hf, apply hg, exact h, end
#include <ros/ros.h> #include <image_transport/image_transport.h> #include <opencv2/highgui/highgui.hpp> #include <cv_bridge/cv_bridge.h> #include <sstream> #include <signal.h> #include <opencv2/opencv.hpp> #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <boost/make_shared.hpp> #include <sys/time.h> #include <iomanip> #include <fcntl.h> #include <unistd.h> #include <iostream> #include <fstream> #include <math.h> #include "opencv2/core.hpp" #include <opencv2/core/utility.hpp> #include "opencv2/highgui.hpp" #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/opencv.hpp> #include "PAL.h" #include <std_msgs/Int32.h> #include <sys/time.h> using namespace std; using namespace cv; using namespace PAL; static const float Pi = 3.1415926535898f; /* Specify the absolute file path from which the settings are to be read. If the specified file can't be opened, default properties from the API are used. See PAL Documentation for more information. */ #define PROPERTIES_FILE_PATH "../catkin_ws/src/dreamvu_pal_tracking/src/SavedPalProperties.txt" static int camera_index = -1; int width = -1; int height = -1; bool g_bRosOK = true; PAL::CameraProperties g_CameraProperties; image_transport::Publisher leftpub1; void publishimage(cv::Mat imgmat, image_transport::Publisher &pub, string encoding, timeval timestamp) { int type; if (encoding == "mono8") type = CV_8UC1; else if (encoding == "mono16") type = CV_16SC1; else type = CV_8UC3; std_msgs::Header header; header.stamp.sec = timestamp.tv_sec; header.stamp.nsec = timestamp.tv_usec*1000; sensor_msgs::ImagePtr imgmsg = cv_bridge::CvImage(header, encoding, imgmat).toImageMsg(); pub.publish(imgmsg); } void set_id(const std_msgs::Int32& msg) { int id = msg.data; PAL::SetTrackID(id); } int main(int argc, char **argv) { ros::init(argc, argv, "following_node"); ros::NodeHandle nh; image_transport::ImageTransport it(nh); //Creating all the publishers leftpub1 = it.advertise("/dreamvu/pal/tracking/get/left", 1); ros::Subscriber id_sub = nh.subscribe("/dreamvu/pal/set/id", 1, set_id); int width, height, camera_index = -1, model_id = 1; bool EnableDepth = true; PAL::Mode mode = PAL::Mode::OBJECT_FOLLOWING; if(PAL::Init(width, height, camera_index, EnableDepth, model_id, &mode) != PAL::SUCCESS) //Connect to the PAL camera { printf("Init failed\n"); return 1; } //Loading properties from the file PAL::Acknowledgement ack_load1 = PAL::LoadProperties(PROPERTIES_FILE_PATH, &g_CameraProperties); if (ack_load1 != PAL::SUCCESS) { ROS_WARN("Not able to load PAL settings from properties file at default location.\n\n" "Please update the file location by setting the Macro: PROPERTIES_FILE_PATH in tracking_node.cpp and run catkin_make to build the package again."); ROS_INFO("Setting default properties to PAL."); } ros::Rate loop_rate(30); g_bRosOK = ros::ok(); for(int i=0; i<10; i++) { PAL::Data::TrackingResults discard; discard = GrabTrackingData(); } while (g_bRosOK) { //Getting no of subscribers for each publisher int left1Subnumber = leftpub1.getNumSubscribers(); int subnumber = left1Subnumber; PAL::Data::TrackingResults data1; if (subnumber > 0) { data1 = GrabTrackingData(); } if (left1Subnumber > 0) { publishimage(data1.left, leftpub1, "bgr8", data1.timestamp); } ros::spinOnce(); loop_rate.sleep(); g_bRosOK = ros::ok(); } PAL::Destroy(); }
using Documenter using EzXMLPatched makedocs( sitename="EzXMLPatched.jl", modules=[EzXMLPatched], pages=["index.md", "manual.md", "reference.md", "devnotes.md"]) deploydocs( repo="github.com/bicycle1885/EzXMLPatched.jl.git", target="build")
-- Intuitionistic propositional logic, de Bruijn approach, final encoding module Bf.Ip where open import Lib using (List; _,_; LMem; lzero; lsuc) -- Types infixl 2 _&&_ infixl 1 _||_ infixr 0 _=>_ data Ty : Set where UNIT : Ty _=>_ : Ty -> Ty -> Ty _&&_ : Ty -> Ty -> Ty _||_ : Ty -> Ty -> Ty FALSE : Ty infixr 0 _<=>_ _<=>_ : Ty -> Ty -> Ty a <=> b = (a => b) && (b => a) NOT : Ty -> Ty NOT a = a => FALSE TRUE : Ty TRUE = FALSE => FALSE -- Context and truth judgement Cx : Set Cx = List Ty isTrue : Ty -> Cx -> Set isTrue a tc = LMem a tc -- Terms TmRepr : Set1 TmRepr = Cx -> Ty -> Set module ArrMp where record Tm (tr : TmRepr) : Set1 where infixl 1 _$_ infixr 0 lam=>_ field var : forall {tc a} -> isTrue a tc -> tr tc a lam=>_ : forall {tc a b} -> tr (tc , a) b -> tr tc (a => b) _$_ : forall {tc a b} -> tr tc (a => b) -> tr tc a -> tr tc b v0 : forall {tc a} -> tr (tc , a) a v0 = var lzero v1 : forall {tc a b} -> tr (tc , a , b) a v1 = var (lsuc lzero) v2 : forall {tc a b c} -> tr (tc , a , b , c) a v2 = var (lsuc (lsuc lzero)) open Tm {{...}} public module Mp where record Tm (tr : TmRepr) : Set1 where field pair' : forall {tc a b} -> tr tc a -> tr tc b -> tr tc (a && b) fst : forall {tc a b} -> tr tc (a && b) -> tr tc a snd : forall {tc a b} -> tr tc (a && b) -> tr tc b left : forall {tc a b} -> tr tc a -> tr tc (a || b) right : forall {tc a b} -> tr tc b -> tr tc (a || b) case' : forall {tc a b c} -> tr tc (a || b) -> tr (tc , a) c -> tr (tc , b) c -> tr tc c isArrMp : ArrMp.Tm tr open ArrMp.Tm isArrMp public syntax pair' x y = [ x , y ] syntax case' xy x y = case xy => x => y open Tm {{...}} public module Ip where record Tm (tr : TmRepr) : Set1 where field abort : forall {tc a} -> tr tc FALSE -> tr tc a isMp : Mp.Tm tr open Mp.Tm isMp public open Tm {{...}} public Thm : Ty -> Set1 Thm a = forall {tr tc} {{_ : Tm tr}} -> tr tc a open Ip public -- Example theorems t1 : forall {a b} -> Thm (a => NOT a => b) t1 = lam=> lam=> abort (v0 $ v1) t2 : forall {a b} -> Thm (NOT a => a => b) t2 = lam=> lam=> abort (v1 $ v0) t3 : forall {a} -> Thm (a => NOT (NOT a)) t3 = lam=> lam=> v0 $ v1 t4 : forall {a} -> Thm (NOT a <=> NOT (NOT (NOT a))) t4 = [ lam=> lam=> v0 $ v1 , lam=> lam=> v1 $ (lam=> v0 $ v1) ]
"""Solving Darcy Flow using ConvNet with mixed residual loss Flow through Porous Media, 2D div (K(s) grad u(s)) = 0, s = (s1, s2) in (0, 1) x (0, 1) Boundary: u = 1, s1 = 0; u = 0, s1 = 1 u_s2 = 0, s2 in {0, 1} Optimizer: L-BFGS Considered nonlinear PDE. (nonlinear corrections to Darcy) """ import torch import torch.nn as nn import torch.autograd as ag import torch.nn.functional as F import torch.optim as optim from models.codec import Decoder from utils.image_gradient import SobelFilter from models.darcy import conv_continuity_constraint as continuity_constraint from models.darcy import conv_boundary_condition as boundary_condition from utils.plot import save_stats, plot_prediction_det, plot_prediction_det_animate2 from utils.misc import mkdirs, to_numpy import numpy as np import argparse import h5py import sys import time import os from pprint import pprint import matplotlib.pyplot as plt plt.switch_backend('agg') def main(): parser = argparse.ArgumentParser(description='CNN to solve PDE') parser.add_argument('--exp-dir', type=str, default='./experiments/solver', help='color map') parser.add_argument('--nonlinear', action='store_true', default=False, help='set True for nonlinear PDE') # data parser.add_argument('--data-dir', type=str, default="./datasets", help='directory to dataset') parser.add_argument('--data', type=str, default='grf', choices=['grf', 'channelized', 'warped_grf'], help='data type') parser.add_argument('--kle', type=int, default=512, help='# kle terms') parser.add_argument('--imsize', type=int, default=64, help='image size') parser.add_argument('--idx', type=int, default=8, help='idx of input, please use 0 ~ 999') parser.add_argument('--alpha1', type=float, default=1.0, help='coefficient for the squared term') parser.add_argument('--alpha2', type=float, default=1.0, help='coefficient for the cubic term') # latent size: (nz, sz, sz) parser.add_argument('--nz', type=int, default=1, help='# feature maps of latent z') # parser.add_argument('--sz', type=int, default=16, help='feature map size of latent z') parser.add_argument('--blocks', type=list, default=[4, 2], help='# layers in each dense block of the decoder') parser.add_argument('--weight-bound', type=float, default=10, help='weight for boundary condition loss') parser.add_argument('--lr', type=float, default=0.5, help='learning rate') parser.add_argument('--epochs', type=int, default=1, help='# epochs to train') parser.add_argument('--test-freq', type=int, default=1, help='every # epoch to test') parser.add_argument('--ckpt-freq', type=int, default=1, help='every # epoch to save model') parser.add_argument('--cmap', type=str, default='jet', help='color map') parser.add_argument('--same-scale', action='store_true', help='true for setting noise to be same scale as output') parser.add_argument('--animate', action='store_true', help='true to plot animate figures') parser.add_argument('--cuda', type=int, default=1, help='cuda number') parser.add_argument('-v', '--verbose', action='store_true', help='True for versbose output') args = parser.parse_args() pprint(vars(args)) device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available() else "cpu") dataset = f'{args.data}_kle{args.kle}' if args.data == 'grf' else args.data hyparams = f'{dataset}_idx{args.idx}_dz{args.nz}_blocks{args.blocks}_'\ f'lr{args.lr}_wb{args.weight_bound}_epochs{args.epochs}' if args.nonlinear: from utils.fenics import solve_nonlinear_poisson exp_name = 'conv_mixed_residual_nonlinear' from models.darcy import conv_constitutive_constraint_nonlinear as constitutive_constraint hyparams = hyparams + f'_alpha1_{args.alpha1}_alpha2_{args.alpha2}' else: exp_name = 'conv_mixed_residual' from models.darcy import conv_constitutive_constraint as constitutive_constraint run_dir = args.exp_dir + '/' + exp_name + '/' + hyparams mkdirs(run_dir) # load data assert args.idx < 1000 if args.data == 'grf': assert args.kle in [512, 128, 1024, 2048] ntest = 1000 if args.kle == 512 else 1024 hdf5_file = args.data_dir + f'/{args.imsize}x{args.imsize}/kle{args.kle}_lhs{ntest}_test.hdf5' elif args.data == 'warped_grf': hdf5_file = args.data_dir + f'/{args.imsize}x{args.imsize}/warped_gp_ng64_n1000.hdf5' elif args.data == 'channelized': hdf5_file = args.data_dir + f'/{args.imsize}x{args.imsize}/channel_ng64_n512_test.hdf5' else: raise ValueError('No dataset are found for the speficied parameters') print(f'dataset: {hdf5_file}') with h5py.File(hdf5_file, 'r') as f: input_data = f['input'][()] output_data = f['output'][()] print("Output values") print(f'input: {input_data.shape}') print(f'output: {output_data.shape}') print("Input field: ", input_data) # permeability, (1, 1, 64, 64) perm_arr = input_data[[args.idx]] # pressure, flux_hor, flux_ver, (3, 64, 64) if args.nonlinear: # solve nonlinear Darcy for perm_arr with FEniCS output_file = run_dir + '/output_fenics.npy' if os.path.isfile(output_file): output_arr = np.load(output_file) print('Loaded solved output field') else: print('Solve nonlinear poisson with FEniCS...') output_arr = solve_nonlinear_poisson(perm_arr[0, 0], args.alpha1, args.alpha2, run_dir) np.save(output_file, output_arr) else: output_arr = output_data[args.idx] print('output shape: ', output_arr.shape) # model model = Decoder(args.nz, out_channels=3, blocks=args.blocks).to(device) print(f'model size: {model.model_size}') fixed_latent = torch.randn(1, args.nz, 16, 16).to(device) * 0.5 perm_tensor = torch.FloatTensor(perm_arr).to(device) sobel_filter = SobelFilter(args.imsize, correct=True, device=device) optimizer = optim.LBFGS(model.parameters(), lr=args.lr, max_iter=20, history_size=50) logger = {} logger['loss'] = [] def train(epoch): model.train() def closure(): optimizer.zero_grad() output = model(fixed_latent) if args.nonlinear: energy = constitutive_constraint(perm_tensor, output, sobel_filter, args.alpha1, args.alpha2) \ + continuity_constraint(output, sobel_filter) else: energy = constitutive_constraint(perm_tensor, output, sobel_filter) + continuity_constraint(output, sobel_filter) loss_dirichlet, loss_neumann = boundary_condition(output) loss_boundary = loss_dirichlet + loss_neumann loss = energy + loss_boundary * args.weight_bound loss.backward() if args.verbose: print(f'epoch {epoch}: loss {loss.item():6f}, '\ f'energy {energy.item():.6f}, diri {loss_dirichlet.item():.6f}, '\ f'neum {loss_neumann.item():.6f}') return loss loss = optimizer.step(closure) loss_value = loss.item() if not isinstance(loss, float) else loss logger['loss'].append(loss_value) print(f'epoch {epoch}: loss {loss_value:.6f}') if epoch % args.ckpt_freq == 0: torch.save(model.state_dict(), run_dir + "/model_epoch{}.pth".format(epoch)) def test(epoch): if epoch % args.epochs == 0 or epoch % args.test_freq == 0: output = model(fixed_latent) output = to_numpy(output) if args.animate: i_plot = epoch // args.test_freq plot_prediction_det_animate2(run_dir, output_arr, output[0], epoch, args.idx, i_plot, plot_fn='imshow', cmap=args.cmap, same_scale=args.same_scale) else: plot_prediction_det(run_dir, output_arr, output[0], epoch, args.idx, plot_fn='imshow', cmap=args.cmap, same_scale=args.same_scale) np.save(run_dir + f'/epoch{epoch}.npy', output[0]) print('start training...') dryrun = False tic = time.time() for epoch in range(1, args.epochs + 1): if not dryrun: train(epoch) test(epoch) print(f'Finished optimization for {args.epochs} epochs using {(time.time()-tic)/60:.3f} minutes') save_stats(run_dir, logger, 'loss') # save input plt.imshow(perm_arr[0, 0]) plt.colorbar() plt.savefig(run_dir + '/input.png') plt.close() if __name__ == '__main__': main()
import numpy as np def chessboard(n, display = True): """ Creates a chessboard-matrix with dimensions n times n To account for uneven matrices, first a 2n times 2n matrix is created, then only the upper left part of the matrix (n times n) is sliced out """ even_row = np.array([1,0]*n) uneven_row = np.array([0,1]*n) stacked = np.row_stack((even_row, uneven_row)*n) matrix = stacked[:n,:n] if display: print(matrix) return matrix if __name__ == "__main__": chessboard(7)
$\newcommand{\bkt}[1]{\left(#1\right)}$ $\newcommand{\dsum}[1]{\displaystyle\sum}$ $\newcommand{\spade}{\bkt{\spadesuit}}$ $\newcommand{\club}{\bkt{\clubsuit}}$ Polynomial Interpolation == 1.1 **Introduction** Given the values of a function $f(x)$ at $n+1$ distinct locations of $x$, say $\{x_i\}_{i=0}^n$, we could approximate $f$ by a polynomial function $p_n(x)$ of degree $n$ that satisfies $$p_n\bkt{x_i} = f\bkt{x_i}$$ We can construct the polynomial $p_n(x)$ as $p_n(x) = a_0 + a_1 x + a_2 x^2 + \cdots + a_n x^n$. The $n+1$ coefficients are determined by forcing $p_n(x)$ to pass through the data points. This leads to $n+1$ equations in $n+1$ unknowns, $a_0,a_1,\ldots,a_n$, i.e., $$y_i = a_0 + a_1 x_i + a_2 x_i^2 + \cdots + a_n x_i^n$$ for $i \in \{0,1,2,\ldots,n\}$. This procedure for finding the coefficients of the polynomial is not very attractive. It involves solving a linear system, whose matrix is extremely ill-conditioned. See below. ```python import scipy as sp; import numpy as np; from scipy.stats import binom; import matplotlib.pylab as pl; from scipy import linalg from numpy import linalg from ipywidgets import interact; ``` ```python Nmax = 41; N = np.arange(2,Nmax); c = np.zeros(Nmax-2); for n in N: x = np.linspace(-1,1,n); V = np.vander(x,increasing="True"); c[n-2] = np.linalg.cond(V); pl.semilogy(N,c,'k-+'); ``` A better way to go about interpolating with polynomials is via Lagrange interpolation. Define the Lagrange polynomial $L_i(x)$ to be $1$ when $x=x_i$ and is zero at all the other nodes, i.e., $$L_i\bkt{x_j} = \delta_{ij}$$ We then have $$p_n(x) = \sum_{i=0}^n f_i L_i(x)$$ Since we want $L_i(x)$ to vanish at all $x_j$, where $j \neq i$, we have $L_i(x) = c_i \prod_{j \neq i}\bkt{x-x_j}$. Further, since $L_i\bkt{x_i} = 1$, we get that $c = \dfrac1{\prod_{j \neq i} \bkt{x_i-x_j}}$. Hence, we see that $$L_i\bkt{x} = \prod_{j \neq i} \bkt{\dfrac{x-x_j}{x_i-x_j}}$$ If we call $l_i(x) = \prod_{j \neq i} \bkt{x-x_j}$ and $w_i = \prod_{j \neq i} \bkt{\dfrac1{x_i-x_j}}$, we see that $L_i(x) = w_i l_i(x)$. Further, if we set $l(x) = \prod_{j=0}^n \bkt{x-x_j}$, we see that $l_i(x) = \dfrac{l(x)}{x-x_i}$, and hence $L_i(x) = \dfrac{w_il(x)}{x-x_i}$ and hence we see that \begin{align} p_n(x) = l(x) \bkt{\sum_{i=0}^n \dfrac{w_i f_i}{x-x_i}} \,\,\, \spade \end{align} Note that $\spade$ is an attractive way to compute the Lagrange interpolant. It requires $\mathcal{O}\bkt{n^2}$ work to calculate $w_i$'s, followed by $\mathcal{O}(n)$ work to compute the interpolant for each $x$. This is called as the ***first form of Barycentric interpolation***. What about updating when a new interpolation node $x_{n+1}$ is added? There are only two steps involved. - For $i \in\{0,1,\ldots,n\}$, divide each $w_i$ by $\bkt{x_i-x_{n+1}}$. Cost is $n+1$ flops. - Compute $w_{n+1}$ for another $n+1$ flops. Hence, we see that the Lagrange interpolant can also be updated at $\mathcal{O}(n)$ flops. The above barycentric formula can be made even more elegant in practice. Note that the function $1$ gets interpolated by any polynomial exactly. Hence, we see that $$1 = l(x) \bkt{\sum_{i=0}^n \dfrac{w_i}{x-x_i}}$$ which gives us that $$l(x) = \dfrac1{\bkt{\displaystyle\sum_{i=0}^n \dfrac{w_i}{x-x_i}}}$$ Hence, we obtain the ***second form of Barycentric interpolation*** \begin{align} p_n(x) = \dfrac{\bkt{\displaystyle\sum_{i=0}^n \dfrac{w_i f_i}{x-x_i}}}{\bkt{\displaystyle\sum_{i=0}^n \dfrac{w_i}{x-x_i}}} \,\,\, \club \end{align} Again note that it is fairly easy to incorporate a new intepolation node at a cost of $\mathcal{O}(n)$. Below is the Lagrange interpolation using the original form. ```python def function(x): # f = np.abs(x)+x/2-x**2; f = 1.0/(1+25*x*x); # f = np.abs(x+0.3) + np.abs(x-0.2) + + np.abs(x*x*x*x-0.8); return f; def Lagrange(xnodes,x,i): f = 1; nnodes = np.size(xnodes); for j in range(0,i): f = f*(x-xnodes[j])/(xnodes[i]-xnodes[j]); for j in range(i+1,nnodes): f = f*(x-xnodes[j])/(xnodes[i]-xnodes[j]); return f; def Chebyshev(nnodes,xplot): # Chebyshev node interpolation xnodes = np.cos(np.arange(0,nnodes)*np.pi/(nnodes-1)); fnodes = function(xnodes); fplot = 0; for i in range(0,nnodes): fplot = fplot + fnodes[i]*Lagrange(xnodes,xplot,i); return xnodes, fnodes, fplot; def Uniform(nnodes,xplot): # Uniform node interpolation xnodes = np.linspace(-1,1,nnodes); fnodes = function(xnodes); fplot = 0; for i in range(0,nnodes): fplot = fplot + fnodes[i]*Lagrange(xnodes,xplot,i); return xnodes, fnodes, fplot; def Bernstein(n,xplot): xnodes = np.linspace(-1,1,nnodes); fnodes = function(xnodes); fplot = 0; for i in range(0,nnodes): fplot = fplot + sp.stats.binom.pmf(i,nnodes-1,0.5+0.5*xplot)*function(xnodes[i]); return fplot; nplot = 1001; xplot = np.linspace(-1,1,nplot); f_actual = function(xplot); @interact def inter(nnodes=(5,45,2)): xnodes, fnodes, fplot = Chebyshev(nnodes,xplot); # xnodes, fnodes, fplot = Uniform(nnodes,xplot); # fplot = Bernstein(nnodes,xplot); error = f_actual-fplot; print(np.amax(np.abs(error))) pl.plot(xplot,f_actual,'-'); pl.plot(xplot,fplot,'r'); pl.rcParams["figure.figsize"] = [16,4]; ``` interactive(children=(IntSlider(value=25, description='nnodes', max=45, min=5, step=2), Output()), _dom_classe… ```python ```
If $X$ is a finite subset of $A$, then the measure of $X$ is the number of elements in $X$.
[STATEMENT] lemma is_root_merge: "\<lbrakk> is_root h1; is_root h2\<rbrakk> \<Longrightarrow> is_root (merge h1 h2)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>is_root h1; is_root h2\<rbrakk> \<Longrightarrow> is_root (merge h1 h2) [PROOF STEP] by (cases "(h1,h2)" rule: merge.cases) auto
module PostProcess include("../Types.jl") export run function run(input:: Channel, time_threshold) current = nothing return function (output:: Channel) for dp in input if current === nothing current = dp else # If the time difference exceeds the threshold, we have a confirmed step if (dp.time - current.time) > time_threshold current = dp put!(output, dp) else # Keep the point with the largest magnitude. if dp.magnitude > current.magnitude current = dp end end end end end end end
/** * Various binning and weighting routines for aperture pixel tables. */ #include <gsl/gsl_matrix.h> #include <gsl/gsl_vector.h> #include "aXe_grism.h" #include "aper_conf.h" #include "spc_driz.h" #include "spce_output.h" #include "spc_optimum.h" #include "spce_binning.h" #define SQR(x) ((x)*(x)) #define MIN(x,y) (((x)<(y))?(x):(y)) #define MAX(x,y) (((x)>(y))?(x):(y)) #define DEBUG_ME 0x200 #define PIXWEIGHT(x1,y1,x2,y2,pix) ((pix)->weight_function((x1), (y1), (x2),\ (y2),(pix))) #define NAIVE_VAL_TO_BININD(x) ((int)floor((x)+1e-6)) /** * Function: add_to_spec_table * adds some count to an entry in the spectrum table * * Parameters: * @param spec the spectrum table to work on * @param bin index of spectrum table entry to add cur_p to * @param cur_p the ap_pixel to add * @param weight weight of the count */ static void add_to_spec_table (spectrum * const spec, const int bin, const ap_pixel * const cur_p, const int quant_cont, const double weight) { spc_entry *const sp_e = spec->spec + bin; // some conditions which should not be violated if ((bin < 0) || (bin > spec->spec_len)) { aXe_message (aXe_M_WARN4, __FILE__, __LINE__, "Assignment out of spectrum: %d", bin); return; } if (weight < 0) { aXe_message (aXe_M_FATAL, __FILE__, __LINE__, "Weight cannot be negative " "but is %f", weight); } // check whether the spectral element is new // and without values up to now if (isnan (sp_e->lambda_mean)) { // initialize the spectral element sp_e->lambda_mean = cur_p->lambda; sp_e->dlambda = cur_p->dlambda; sp_e->lambda_max = cur_p->lambda; sp_e->lambda_min = cur_p->lambda; sp_e->weight = weight; sp_e->count = cur_p->count * weight; sp_e->error = fabs(cur_p->error) * weight; sp_e->dq = cur_p->dq; // initialize the contamination // this is different for quantitative and // geometrical contamination if (quant_cont) { if ((int)(sp_e->contam==-1)&&((int)cur_p->contam!=-1)) { sp_e->contam = cur_p->contam * weight; } } else { if ((int)(sp_e->contam==-1)&&((int)cur_p->contam!=-1)) { sp_e->contam = cur_p->contam; } } } else { // update an existing spectral bin // find new maxima and minima sp_e->lambda_max = MAX (cur_p->lambda, sp_e->lambda_max); sp_e->lambda_min = MIN (cur_p->lambda, sp_e->lambda_min); // find new mean lambda via weighted summation sp_e->lambda_mean = (sp_e->lambda_mean * sp_e->weight + cur_p->lambda * weight) / (weight + sp_e->weight); // NEWNEWNEW:: // find dlambda via weighted summation sp_e->dlambda = (sp_e->dlambda * sp_e->weight + cur_p->dlambda * weight) / (weight + sp_e->weight); // add the weight sp_e->weight += weight; // add the counts sp_e->count += cur_p->count * weight; // process the error sp_e->error = sqrt (SQR (sp_e->error) + SQR (fabs(cur_p->error) * weight)); // logically XOR the dq sp_e->dq = (sp_e->dq | cur_p->dq); // update the contamination, // take into account the quantitative // contamination if (quant_cont) { if (((int)sp_e->contam==-1)&&((int)cur_p->contam!=-1)) { sp_e->contam = cur_p->contam * weight; } if (((int)sp_e->contam!=-1)&&((int)cur_p->contam!=-1)) { sp_e->contam += cur_p->contam * weight; } } else { if (((int)sp_e->contam==-1)&&((int)cur_p->contam!=-1)) { sp_e->contam = cur_p->contam; } if (((int)sp_e->contam!=-1)&&((int)cur_p->contam!=-1)) { sp_e->contam += cur_p->contam; } } } } /** * Function: pixweight_x * computes a weight for the bin between coordinates (x1,y1) and (x2,y2) * if the pi/4<=angle<=3 pi/4. * * Parameters: * @param x1 - x coordinate for the start point of the bin on the trace * @param y1 - y coordinate for the start point of the bin on the trace * @param x2 - x coordinate for the end point of the bin on the trace * @param y2 - y coordinate for the end point of the bin on the trace * @param pix the pixel to compute the weight for in the form of a * w_pixel structure filled out by fill_w_pixel * * Returns: * @return sum - the pixel weight */ double pixweight_x (const double x1, const double y1, const double x2, const double y2, const struct w_pixel_s *const pix) { double a, b; double sum = 0; a = x1 - pix->cota * (y1 - pix->y0); b = x2 - pix->cota * (y2 - pix->y0); if ((b >= pix->p0) && (a <= pix->p1)) { sum += pix->slope / 2 * (MIN (b, pix->p1) - MAX (a, pix->p0)) * (MAX (a, pix->p0) - 2 * pix->p0 + MIN (b, pix->p1)) * sin (pix->angle); } if ((b >= pix->p1) && (a <= pix->p2)) { sum += pix->fmax * (MIN (b, pix->p2) - MAX (a, pix->p1)) * sin (pix->angle); } if ((b >= pix->p2) && (a <= pix->p3)) { sum += pix->slope / 2 * (MAX (a, pix->p2) - MIN (b, pix->p3)) * (MAX (a, pix->p2) - 2 * pix->p3 + MIN (b, pix->p3)) * sin (pix->angle); } return sum; } /** * Funtion: pixweight_y * Computes a weight for the bin between coordinates (x1,y1) and (x2,y2) * if the angle<=pi/4 or 3 pi/4<=angle. * * @param x1 - x coordinate for the start point of the bin on the trace * @param y1 - y coordinate for the start point of the bin on the trace * @param x2 - x coordinate for the end point of the bin on the trace * @param y2 - y coordinate for the end point of the bin on the trace * @param pix - the pixel to compute the weight for in the form of a * w_pixel structure filled out by fill_w_pixel * * Returns: * @return sum - the pixel weight */ double pixweight_y (const double x1, const double y1, const double x2, const double y2, const struct w_pixel_s *const pix) { double a, b; double sum = 0; a = y1 + pix->tana * (pix->x0 - x1); b = y2 + pix->tana * (pix->x0 - x2); if (a > b) { double tmp; tmp = a; a = b; b = tmp; } if ((b >= pix->p0) && (a <= pix->p1)) { sum += pix->slope / 2 * (MIN (b, pix->p1) - MAX (a, pix->p0)) * (MAX (a, pix->p0) - 2 * pix->p0 + MIN (b, pix->p1)) * cos (pix->angle); } if ((b >= pix->p1) && (a <= pix->p2)) { sum += pix->fmax * (MIN (b, pix->p2) - MAX (a, pix->p1)) * cos (pix->angle); } if ((b >= pix->p2) && (a <= pix->p3)) { sum += pix->slope / 2 * (MAX (a, pix->p2) - MIN (b, pix->p3)) * (MAX (a, pix->p2) - 2 * pix->p3 + MIN (b, pix->p3)) * cos (pix->angle); } return sum; } /** * Function: fill_w_pixel * precomputes some properties of a given pixel for purposes * of computing the weights it contributes to a given xi bin. * * Parameters: * @param pix a pointer to the w_pix structure to fill * @param x0 the x coordinate of the pixel's lower left corner * @param y0 the y coordinate of the pixel's lower left corner * @param size the size of the pixel * @param angle the orientation of the object that has generated the * spectrum */ /* since we're only interested in weights here, the size of the square * doesn't matter. Where I left explicit ones, you could write size * and get the true area instead of the fraction of the total area. * Dunno why you'd want to do this, though. */ static void fill_w_pixel (w_pixel * const pix, const double x0, const double y0, const double angle) { pix->tana = tan (angle); pix->cota = 1 / pix->tana; pix->angle = angle; pix->x0 = x0; pix->y0 = y0; if ((angle >= M_PI / 4) && (angle <= 3 * M_PI / 4)) { pix->p0 = MIN (x0, x0 - 1 * pix->cota); pix->p1 = MAX (x0, x0 - 1 * pix->cota); pix->p2 = MIN (x0 + 1, x0 + 1 - 1 * pix->cota); pix->p3 = MAX (x0 + 1, x0 + 1 - 1 * pix->cota); pix->fmax = 1 / sin (angle); if (fabs (pix->p1 - pix->p0) < 1e-7) pix->slope = 0; else pix->slope = pix->fmax / (pix->p1 - pix->p0); pix->weight_function = &pixweight_x; } else { /* angle between 0 and pi/4 or 3*pi/4 and pi */ pix->p0 = MIN (y0, y0 - 1 * pix->tana); pix->p1 = MAX (y0, y0 - 1 * pix->tana); pix->p2 = MIN (y0 + 1, y0 + 1 - 1 * pix->tana); pix->p3 = MAX (y0 + 1, y0 + 1 - 1 * pix->tana); pix->fmax = 1 / cos (angle); if (fabs (pix->p1 - pix->p0) < 1e-7) pix->slope = 0; else pix->slope = pix->fmax / (pix->p1 - pix->p0); pix->weight_function = &pixweight_y; } } /** * Function: bin_naive * computes a spectrum from a table of aperture pixels generated from * spc_extract. This is adds up the pixel values, distributing them * over the trace, taking into account the fracton of the pixel that * projects onto the given [xi,xi+1] interval. * Return NULL if ap_p is NULL * * Parameters: * @param ap_p the table of aperture pixels * @param px_width width of a pixel (=1 if not subsampled) * @param ob_width the width of the object that has generated the spectrum * @param ob_orientation the orientation of the object that has * generated the spectrum * @param flags problems that were accumulated in generating ap_p; * possible flags are defined for the warning member of the spectrum struct * * Returns: * @return spec - the 1D spectrum */ spectrum * bin_naive (const ap_pixel * const ap_p, const double ob_width, const double ob_orient, const int quant_cont) { const ap_pixel *cur_p; int upper, lower; int bin; spectrum *spec, *tspec; double phi_trace; spc_entry *spec_table; double d, frac; // immediately return empty PET's if (ap_p==NULL) return NULL; // go through the PET, // find the minimum and // maximum in trace distance cur_p = ap_p; upper = NAIVE_VAL_TO_BININD (cur_p->xi); lower = NAIVE_VAL_TO_BININD (cur_p->xi); while (cur_p->p_x != -1) { bin = NAIVE_VAL_TO_BININD (cur_p->xi); upper = MAX (bin, upper); lower = MIN (bin, lower); cur_p++; } // check whether the spectrum // will ahve a finite length, // exit if not if (upper == lower) { aXe_message (aXe_M_WARN4, __FILE__, __LINE__, "Pixel table empty.\n"); return NULL; } // Thresh in some headway so we don't need to worry too much about // accessing invalid elements now and then lower -= 10; upper += 10; spec = allocate_spectrum (upper - lower); spec_table = spec->spec; cur_p = ap_p; while (cur_p->p_x != -1) { // Compute any fractional pixel that might fall within the //desired extraction width d = ob_width; frac = 1.; // continue if the pixel is outside // of the extraction region if (fabs (cur_p->dist) > d + .5) { cur_p++; continue; } if ((fabs (cur_p->dist) >= d - .5) && (fabs (cur_p->dist) <= d + .5)) { frac = fabs (d - (fabs (cur_p->dist) - 0.5)); } // store the local trace angle phi_trace = cur_p->dxs; if (1) { double xi; w_pixel pix; double sinp = sin (phi_trace); double cosp = cos (phi_trace); double xc, yc; double w; double sum = 0; fill_w_pixel (&pix, cur_p->x, cur_p->y, ob_orient); xc = cur_p->xs; yc = cur_p->ys; // at cur_p->xi, there has to be some contribution. We go back // collecting, until w is zero for (xi = cur_p->xi;; xi -= 1) { bin = NAIVE_VAL_TO_BININD (xi); w = PIXWEIGHT (xc + (bin - cur_p->xi) * cosp, yc + (bin - cur_p->xi) * sinp, xc + (bin + 1 - cur_p->xi) * cosp, yc + (bin + 1 - cur_p->xi) * sinp, &pix); if (w < 1e-10) break; add_to_spec_table (spec, bin - lower, cur_p, quant_cont, w * frac * cur_p->weight); // if (cur_p->weight > 10.0){ // fprintf(stdout,"weight: %f, distance: %f, ewidth: %f\n", // cur_p->weight, cur_p->dist, d+0.5); // } sum += w; } /* Now collect contributions upward of cur_p->xi */ for (xi = cur_p->xi + 1;; xi += 1) { bin = NAIVE_VAL_TO_BININD (xi); w = PIXWEIGHT (xc + (bin - cur_p->xi) * cosp, yc + (bin - cur_p->xi) * sinp, xc + (bin + 1 - cur_p->xi) * cosp, yc + (bin + 1 - cur_p->xi) * sinp, &pix); if (w < 1e-10) break; add_to_spec_table (spec, bin - lower, cur_p, quant_cont, w * frac * cur_p->weight); sum += w; } if (fabs (sum - 1) > 1e-6) { fprintf(stdout, "Weights added up to only %f for pixel from %4d,%4d\n", sum, cur_p->p_x, cur_p->p_y); } } cur_p++; } /* Trimming the INDEF beginning and ending values in spectrum */ tspec = trim_spectrum (spec); free_spectrum (spec); spec = NULL; // return the spectrum return tspec; } /** * Function: bin_optimal * computes a spectrum from a table of aperture pixels generated from * spc_extract. This is adds up the pixel values, distributing them * over the trace, taking into account the fracton of the pixel that * projects onto the given [xi,xi+1] interval. * Return NULL if ap_p is NULL * * Parameters: * @param ap_p the table of aperture pixels * @param px_width width of a pixel (=1 if not subsampled) * @param ob_width the width of the object that has generated the spectrum * @param ob_orientation the orientation of the object that has * generated the spectrum * @param flags problems that were accumulated in generating ap_p; * possible flags are defined for the warning member of the spectrum struct * * Returns: * @return spec - the 1D spectrum */ spectrum * bin_optimal (const ap_pixel * const ap_p, const beam curbeam, const int quant_cont, const gsl_matrix *weights, const drzstamp_dim dimension,gsl_matrix *coverage) { const ap_pixel *cur_p; ap_pixel *tmp_p; spectrum *spec; spectrum *tspec; spc_entry *spec_table; quadrangle quad; // drzstamp_dim dimension; double jacob, arr; double frac, totweight; int jcen, icen; int jupp, iupp; int jlow, ilow; int ii, jj; int stpi, stpj; // return NULL if // empty PET if (ap_p==NULL) return NULL; // allocate memory tmp_p = (ap_pixel *) malloc(sizeof(ap_pixel)); // allocate memory for the spectrum spec = allocate_spectrum (weights->size1); spec_table = spec->spec; // go over each PET pixel cur_p = ap_p; for (cur_p = ap_p; cur_p->p_x != -1; cur_p++) { // continue if the pixel is outside // of the extraction region if (fabs (cur_p->dist) > curbeam.width + .5) continue; // determine which fraction // of the pixel is inside of the extraction area if ((fabs (cur_p->dist) >= curbeam.width - .5) && (fabs (cur_p->dist) <= curbeam.width + .5)) frac = fabs (curbeam.width - (fabs (cur_p->dist) - 0.5)); else frac = 1.; // transfer values to the temporary pixel tmp_p->lambda = cur_p->xi; tmp_p->dist = cur_p->dist; tmp_p->dxs = cur_p->dxs; tmp_p->dlambda = 1.0; // create the quadrangle for the current pixel quad = get_quad_from_pixel(tmp_p, curbeam.orient, dimension); // get the jacobian (well, easy here) // the term "cos(cur_p->dxs)" must be there // to correct the enlargement necessary // to cover the whole lambda-crossdispersion area! // NOT COMPLETELY understood jacob = cos(cur_p->dxs); // get the central pixel (icen, jcen) of the current PET-pixel icen = (int) floor(cur_p->xi - dimension.xstart+.5); jcen = (int) floor(cur_p->dist - dimension.ystart+.5); // get the uper and lower extend of the quadrangle in x iupp = (int)floor(quad.xmax - (double)icen + 0.5)+1; ilow = (int)floor(quad.xmin - (double)icen + 0.5); // get the uper and lower extend of the quadrangle in x jupp = (int)floor(quad.ymax - (double)jcen + 0.5)+1; jlow = (int)floor(quad.ymin - (double)jcen + 0.5); // go over the extend in x for (ii=ilow;ii<iupp;ii++) { // go over the extend in x for (jj=jlow;jj<jupp;jj++) { // get the coordinates of the current output pixel stpi = icen+ii; stpj = jcen+jj; // check whether the current output pixel is within // the stamp image; continue if not if ( (stpi>=dimension.xsize)||(stpi<0)||(stpj>=dimension.ysize)||(stpj<0) ) continue; // get the area which falls onto the current output pixel arr = boxer(stpi,stpj,quad.x,quad.y); // compute the pixel weight from // the various inputs totweight = arr*frac*jacob*gsl_matrix_get(weights, stpi, stpj); //totweight = arr*frac*jacob;//*gsl_matrix_get(weights, stpi, stpj); //gsl_matrix_set(coverage, stpi, stpj, gsl_matrix_get(coverage, stpi, stpj) + arr*frac*jacob); // add the pixel to the spectrum if (totweight > 0.0) add_to_spec_table (spec, stpi, cur_p, quant_cont,totweight); // gsl_matrix_set(coverage, stpi, stpj, sqrt (SQR (gsl_matrix_get(coverage, stpi, stpj)) + SQR (fabs(cur_p->error) * totweight))); } } } /* Trimming the INDEF beginning and ending values in spectrum */ tspec = trim_spectrum (spec); free_spectrum (spec); spec = NULL; free(tmp_p); // return the spectrum return tspec; } /** does a straight forward summation/binning of an aperture pixel table with appropriate weights (cf. Hornes 1986) @param ap_p the table of aperture pixels @param ob_orient the orientation of the object that has generated the spectrum @param flags problems that were accumulated in generating ap_p; possible flags are defined for the warning member of the spectrum struct @param n_sub subsampling factor @return an allocated spectrum @see spectrum */ spectrum * bin_weighted (const ap_pixel * const ap_p, const double ob_orient, const trace_func * const trace, const int n_sub, const int flags) { gsl_vector *binned_table; gsl_vector *wei_table; gsl_vector *wei2_table; double xi, d, w, wei, wei2; int xii, num_bin, bin; const ap_pixel *cur_p = ap_p; double upper = cur_p->xi, lower = cur_p->xi; spectrum *spec; while (cur_p->p_x != -1) { upper = MAX (cur_p->xi, upper); lower = MIN (cur_p->xi, lower); cur_p++; } lower -= 10; upper += 10; lower = floor (lower); upper = floor (upper + 1); num_bin = floor ((upper - lower) / n_sub); spec = allocate_spectrum (num_bin); binned_table = gsl_vector_alloc (num_bin); wei_table = gsl_vector_alloc (num_bin); wei2_table = gsl_vector_alloc (num_bin); gsl_vector_set_all (binned_table, 0); gsl_vector_set_all (wei_table, 0); gsl_vector_set_all (wei2_table, 0); cur_p = ap_p; while (cur_p->p_x != -1) { xi = (cur_p->xi - lower) / n_sub; xii = floor (xi); d = fabs(cur_p->dist); w = exp (-d * d / (2 * 6.66)); w = 1.; add_to_spec_table (spec, xii - 1,cur_p, 0, w * (1 - (xi - xii))); add_to_spec_table (spec, xii , cur_p, 0, w * (xi - xii)); gsl_vector_set (wei_table, xii - 1, gsl_vector_get (wei_table, xii - 1) + w); gsl_vector_set (wei_table, xii, gsl_vector_get (wei_table, xii) + w); gsl_vector_set (wei2_table, xii - 1, gsl_vector_get (wei2_table, xii - 1) + w * w); gsl_vector_set (wei2_table, xii, gsl_vector_get (wei2_table, xii) + w * w); //fprintf(stderr,"%f,%d,%d\n",xi,xii,xii+1); cur_p++; } cur_p = ap_p; for (bin = 0; bin < num_bin; bin++) { wei = gsl_vector_get (wei_table, bin); wei2 = gsl_vector_get (wei2_table, bin); if (wei2 != 0) { spec->spec[bin].count = spec->spec[bin].count * wei / wei2; } } spec->warning = 0; return spec; }
Brink’s U.S., a division of Brink’s, Incorporated, is the premier provider of armored car transportation, ATM servicing, currency and coin processing, document destruction and other value added services to financial institutions, retailers and other commercial and government entities. The company has a proud history of providing growth and advancement opportunities for its employees. We have a challenging opportunity for a Manager – Cash Logistics. Manager - Cash Logistics are responsible for managing the vault, Currency and Coin operations inside the facility and for typically supervising up to 20 employees. This position is responsible for assisting the City Manager or Senior Manager Operations in the secure, safe and efficient functioning of an armored car operation. The Cash Logistics Team Lead drives the execution of the business and motivates team members to achieve the best results and to drive continuous process improvement. This position will lead a dedicated team of Cash Logistics employees and will require extensive coordination with other on-site operations. This position functions in an armed environment. This position requires the enforcement of rules to protect the premises and property of Brink's and its customers, as well as the safety of persons on the premises of Brink's and its customers.
In the weeks leading up to coup , Saprang openly mobilised soldiers and northern residents to rebel against the government . Saprang played a key role on the evening of 19 September 2006 , securing Thaksin 's home town and power base of Chiang Mai . That same night , he was appointed assistant Secretary @-@ General of the CNS . The coup was executed just a week before the announcement of the Army 's annual reshuffle .
Teachers and scholars at the Vassal Lane Upper School celebrated summer reading as a community on Friday, September 7th. Last spring, every faculty member at Vassal Lane selected one book to read during the summer, and the staff created a Faculty Book List to share with incoming scholars. Each scholar was required to read one book from the Vassal Lane Faculty Book List and to return back to school in September ready to discuss the book in a small book group. On the first Friday of the school year, teachers and scholars met and discussed their books in more than forty small book groups. Book groups met outside on the field behind the school on a beautiful, sunny afternoon. One Vassal Lane scholar noted that students made their book selections both based on interest in the book and on their connections with particular teachers. Book groups had a mix of sixth, seventh, and eighth grade scholars, giving everyone a chance to get to know each other a little better outside of their regular classroom schedules and groupings. The most popular books on the Faculty Book List were The Fault in Our Stars by John Green, The Absolutely True Diary of a Part-Time Indian by Sherman Alexie, Every Day by David Leviathan, and Wonder by R.J. Palatio. In addition, a large number of scholars enjoyed two of the nonfiction titles on the Faculty Book List: A Black Hole is NOT a Hole by Carolyn DeCristofano and Race: A History Beyond Black and White by Marc Aronson. Faculty members and scholars are looking forward to the next opportunity to share books together as Vassal Lane continues to strengthen and celebrate its growing community of readers.
__precompile__() module MiniPETSc using Reexport @reexport using MPI const library = string(ENV["PETSC_DIR"], "/", ENV["PETSC_ARCH"], "/lib/libpetsc") include("PetscTypes.jl") function finalize() ccall((:PetscFinalize, library), PetscErrorCode, ()) # Somehow doesn't work here # MPI.Finalize() end function __init__() args = vcat("julia", ARGS) nargs = length(args) MPI.Init() ccall((:PetscInitializeNoPointers, library), PetscErrorCode, (Cint, Ptr{Ptr{UInt8}}, Cstring, Cstring), nargs, args, C_NULL, C_NULL) # Cleanup at the end atexit(finalize) end export PetscMat export setSize! export setPreallocation! export assemble! export viewMat export zero! export zeroRows! export PetscVec export GhostedPetscVec export setSize! export assemble! export viewVec export plusEquals! export zero! export copy! export serializeToZero export PetscKSP export setOperators export solve! include("Mat.jl") include("Vec.jl") include("KSP.jl") end
from dowhy.utils.dgp import DataGeneratingProcess from sklearn.neural_network import MLPRegressor import numpy as np import pandas as pd class RandomNeuralNetwork(DataGeneratingProcess): TRAINING_SAMPLE_SIZE = 10 RANDOM_STATE = 0 DEFAULT_ARCH = (50, 50, 50) DEFAULT_ARCH_DICT = { 'confounder=>treatment': DEFAULT_ARCH, 'confounder=>outcome': DEFAULT_ARCH, 'effect_modifier=>outcome': DEFAULT_ARCH, 'treatment=>outcome': DEFAULT_ARCH } NAME = "Random Neural Network" def __init__(self, **kwargs): ''' Understanding Architectures in MLP Regressor https://stackoverflow.com/questions/35363530/python-scikit-learn-mlpclassifier-hidden-layer-sizes More Information about Random State https://stackoverflow.com/questions/42191717/python-random-state-in-splitting-dataset/42197534 https://stats.stackexchange.com/questions/80407/am-i-creating-bias-by-using-the-same-random-seed-over-and-over ''' super().__init__(**kwargs) self.arch = kwargs.pop('arch', RandomNeuralNetwork.DEFAULT_ARCH_DICT) self.random_state = kwargs.pop('random_state', RandomNeuralNetwork.RANDOM_STATE) self.nn = {} self.nn['confounder=>treatment'] = MLPRegressor(random_state=self.random_state, hidden_layer_sizes=self.arch['confounder=>treatment']) self.nn['confounder=>outcome'] = MLPRegressor(random_state=self.random_state, hidden_layer_sizes=self.arch['confounder=>outcome']) self.nn['effect_modifier=>outcome'] = MLPRegressor(random_state=self.random_state, hidden_layer_sizes=self.arch['effect_modifier=>outcome']) self.nn['treatment=>outcome'] = MLPRegressor(random_state=self.random_state, hidden_layer_sizes=self.arch['treatment=>outcome']) def generate_data(self, sample_size): self.generation_process() confounder = np.random.randn(sample_size, len(self.confounder)) effect_modifier = np.random.randn(sample_size, len(self.effect_modifier)) control_value = np.zeros( (sample_size, len(self.treatment) ) ) treatment_value = np.ones( (sample_size, len(self.treatment) ) ) treatment = self.nn['confounder=>treatment'].predict(confounder) treatment = treatment[:, np.newaxis] if self.treatment_is_binary: treatment = self.convert_to_binary(treatment) if treatment.ndim == 1: treatment = np.reshape(treatment, (-1, 1)) outcome = self.nn['confounder=>outcome'].predict( np.hstack( (confounder, effect_modifier) ) ) + \ self.nn['effect_modifier=>outcome'].predict(effect_modifier) + \ self.nn['treatment=>outcome'].predict( np.hstack( (confounder, effect_modifier, treatment) ) ) y_control = self.nn['confounder=>outcome'].predict( np.hstack( (confounder, effect_modifier) ) ) + \ self.nn['effect_modifier=>outcome'].predict(effect_modifier) + \ self.nn['treatment=>outcome'].predict( np.hstack( (confounder, effect_modifier, control_value) ) ) y_treatment = self.nn['confounder=>outcome'].predict( np.hstack( (confounder, effect_modifier) ) ) + \ self.nn['effect_modifier=>outcome'].predict(effect_modifier) + \ self.nn['treatment=>outcome'].predict( np.hstack( (confounder, effect_modifier, treatment_value) ) ) # Understanding Neural Network weights # Refer to this link:https://stackoverflow.com/questions/50937628/mlp-classifier-neurons-weights self.weights = {key:self.nn[key].coefs_ for key in self.nn.keys()} self.bias = {key:self.nn[key].intercepts_ for key in self.nn.keys()} if outcome.ndim == 1: outcome = np.reshape(outcome, (-1, 1)) if y_control.ndim == 1: y_control = np.reshape(y_control, (-1, 1)) if y_treatment.ndim == 1: y_treatment = np.reshape(y_treatment, (-1, 1)) self.true_value = np.mean(y_treatment - y_control, axis=0) return pd.DataFrame(np.hstack( (effect_modifier, confounder, treatment, outcome) ), columns=self.effect_modifier + self.confounder + self.treatment + self.outcome) def generation_process(self): X = np.random.randn( RandomNeuralNetwork.TRAINING_SAMPLE_SIZE, len(self.confounder) ) y = np.random.randn( RandomNeuralNetwork.TRAINING_SAMPLE_SIZE, len(self.treatment) ) self.nn['confounder=>treatment'].fit(X, y) X = np.random.randn( RandomNeuralNetwork.TRAINING_SAMPLE_SIZE, len(self.confounder) + len(self.effect_modifier)) y = np.random.randn( RandomNeuralNetwork.TRAINING_SAMPLE_SIZE, len(self.outcome) ) self.nn['confounder=>outcome'].fit(X, y) X = np.random.randn( RandomNeuralNetwork.TRAINING_SAMPLE_SIZE, len(self.effect_modifier) ) y = np.random.randn( RandomNeuralNetwork.TRAINING_SAMPLE_SIZE, len(self.outcome) ) self.nn['effect_modifier=>outcome'].fit(X, y) X = np.random.randn( RandomNeuralNetwork.TRAINING_SAMPLE_SIZE, len(self.confounder) + len(self.effect_modifier) + len(self.treatment) ) y = np.random.randn( RandomNeuralNetwork.TRAINING_SAMPLE_SIZE, len(self.outcome) ) self.nn['treatment=>outcome'].fit(X, y) def __str__(self): rep = super().__str__() header = """ Random Neural Network Data Generating Process --------------------------------------------- """ rep += """ arch:{} nn:{} random_state:{} """.format(self.arch, self.nn, self.random_state) rep = header + rep return rep
(* Author: Tobias Nipkow, 2007 A simple certificate checker for q-free linear arithmetic: is linear combination of atoms and certificate contradictory? *) theory CertLin imports LinArith begin declare list_add_assoc [simp] instantiation atom :: monoid_add begin fun plus_atom :: "atom \<Rightarrow> atom \<Rightarrow> atom" where "(Eq r\<^sub>1 cs\<^sub>1) + (Eq r\<^sub>2 cs\<^sub>2) = Eq (r\<^sub>1+r\<^sub>2) (cs\<^sub>1+cs\<^sub>2)" | "(Eq r\<^sub>1 cs\<^sub>1) + (Less r\<^sub>2 cs\<^sub>2) = Less (r\<^sub>1+r\<^sub>2) (cs\<^sub>1+cs\<^sub>2)" | "(Less r\<^sub>1 cs\<^sub>1) + (Eq r\<^sub>2 cs\<^sub>2) = Less (r\<^sub>1+r\<^sub>2) (cs\<^sub>1+cs\<^sub>2)" | "(Less r\<^sub>1 cs\<^sub>1) + (Less r\<^sub>2 cs\<^sub>2) = Less (r\<^sub>1+r\<^sub>2) (cs\<^sub>1+cs\<^sub>2)" definition "0 = Eq 0 []" instance apply intro_classes apply(simp_all add: zero_atom_def) apply(case_tac a) apply(case_tac b) apply(case_tac c) apply simp_all apply(case_tac c) apply simp_all apply(case_tac b) apply(case_tac c) apply simp_all apply(case_tac c) apply simp_all apply(case_tac a) apply simp_all apply(case_tac a) apply simp_all done end lemma I_R_additive: "I\<^sub>R a xs \<Longrightarrow> I\<^sub>R b xs \<Longrightarrow> I\<^sub>R(a+b) xs" apply(case_tac a) apply(case_tac b) apply (simp_all add:iprod_left_add_distrib) apply(case_tac b) apply (simp_all add:iprod_left_add_distrib) done fun mult_atom :: "real \<Rightarrow> atom \<Rightarrow> atom" (infix "*\<^sub>a" 70) where "c *\<^sub>a Eq r cs = Eq (c*r) (c *\<^sub>s cs)" | "c *\<^sub>a Less r cs = (if c=0 then Eq 0 [] else Less (c*r) (c *\<^sub>s cs))" definition iprod_a :: "real list \<Rightarrow> atom list \<Rightarrow> atom" (infix "\<odot>\<^sub>a" 70) where "cs \<odot>\<^sub>a as = (\<Sum>(c,a) \<leftarrow> zip cs as. c *\<^sub>a a)" lemma iprod_a_Nil2[simp]: "cs \<odot>\<^sub>a [] = 0" by(simp add:iprod_a_def) definition contradict :: "atom list \<Rightarrow> real list \<Rightarrow> bool" where "contradict as cs \<longleftrightarrow> size cs = size as \<and> (\<forall>c\<in>set cs. c\<ge>0) \<and> (case cs \<odot>\<^sub>a as of Eq r cs \<Rightarrow> r \<noteq> 0 \<and> (\<forall>c\<in>set cs. c=0) | Less r cs \<Rightarrow> r \<ge> 0 \<and> (\<forall>c\<in>set cs. c=0))" definition "contradict_dnf ass = (\<exists>css. list_all2 contradict ass css)" lemma refute_I: "\<not> Logic.interpret h (Neg f) e \<Longrightarrow> Logic.interpret h f e" by simp lemma I_R_mult_atom: "c \<ge> 0 \<Longrightarrow> I\<^sub>R a xs \<Longrightarrow> I\<^sub>R (c *\<^sub>a a) xs" apply(cases a) apply(clarsimp) apply(simp) done lemma I_R_iprod_a: "size cs = size as \<Longrightarrow> \<forall>(c,a) \<in> set(zip cs as). I\<^sub>R (c *\<^sub>a a) xs \<Longrightarrow> I\<^sub>R (cs \<odot>\<^sub>a as) xs" apply(induct cs as rule:list_induct2) apply (simp add:zero_atom_def) apply(simp add:I_R_additive) done lemma contradictD: "contradict as cs \<Longrightarrow> \<exists>a\<in>set as. \<not> I\<^sub>R a xs" proof - assume "contradict as cs" have "\<not> I\<^sub>R (cs \<odot>\<^sub>a as) xs" proof (cases "cs \<odot>\<^sub>a as") case Less thus ?thesis using \<open>contradict as cs\<close> by(simp add:contradict_def iprod0_if_coeffs0) next case Eq thus ?thesis using \<open>contradict as cs\<close> by(simp add:contradict_def iprod0_if_coeffs0) qed thus ?thesis using \<open>contradict as cs\<close> by(force simp add:contradict_def intro: I_R_iprod_a I_R_mult_atom elim:in_set_zipE) qed lemma cyclic_dnfD: "qfree f \<Longrightarrow> contradict_dnf (dnf(R.nnf f)) \<Longrightarrow> \<not>R.I f xs" apply(subst R.I_nnf[symmetric]) apply(subst R.I_dnf[symmetric]) apply(erule R.nqfree_nnf) apply(auto simp add:contradict_dnf_def list_all2_iff in_set_conv_nth) apply(drule_tac x="(dnf(R.nnf f) ! i, css!i)" in bspec) apply (auto simp:set_zip) apply(drule_tac xs=xs in contradictD) apply auto done end
/- Copyright (c) 2018 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison, Bhavik Mehta -/ import category_theory.functor.const import category_theory.discrete_category /-! # The category `discrete punit` > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. We define `star : C ⥤ discrete punit` sending everything to `punit.star`, show that any two functors to `discrete punit` are naturally isomorphic, and construct the equivalence `(discrete punit ⥤ C) ≌ C`. -/ universes v u -- morphism levels before object levels. See note [category_theory universes]. namespace category_theory variables (C : Type u) [category.{v} C] namespace functor /-- The constant functor sending everything to `punit.star`. -/ @[simps] def star : C ⥤ discrete punit := (functor.const _).obj ⟨⟨⟩⟩ variable {C} /-- Any two functors to `discrete punit` are isomorphic. -/ @[simps] def punit_ext (F G : C ⥤ discrete punit) : F ≅ G := nat_iso.of_components (λ _, eq_to_iso dec_trivial) (λ _ _ _, dec_trivial) /-- Any two functors to `discrete punit` are *equal*. You probably want to use `punit_ext` instead of this. -/ lemma punit_ext' (F G : C ⥤ discrete punit) : F = G := functor.ext (λ _, dec_trivial) (λ _ _ _, dec_trivial) /-- The functor from `discrete punit` sending everything to the given object. -/ abbreviation from_punit (X : C) : discrete punit.{v+1} ⥤ C := (functor.const _).obj X /-- Functors from `discrete punit` are equivalent to the category itself. -/ @[simps] def equiv : (discrete punit ⥤ C) ≌ C := { functor := { obj := λ F, F.obj ⟨⟨⟩⟩, map := λ F G θ, θ.app ⟨⟨⟩⟩ }, inverse := functor.const _, unit_iso := begin apply nat_iso.of_components _ _, intro X, apply discrete.nat_iso, rintro ⟨⟨⟩⟩, apply iso.refl _, intros, ext ⟨⟨⟩⟩, simp, end, counit_iso := begin refine nat_iso.of_components iso.refl _, intros X Y f, dsimp, simp, -- See note [dsimp, simp]. end } end functor /-- A category being equivalent to `punit` is equivalent to it having a unique morphism between any two objects. (In fact, such a category is also a groupoid; see `groupoid.of_hom_unique`) -/ theorem equiv_punit_iff_unique : nonempty (C ≌ discrete punit) ↔ (nonempty C) ∧ (∀ x y : C, nonempty $ unique (x ⟶ y)) := begin split, { rintro ⟨h⟩, refine ⟨⟨h.inverse.obj ⟨⟨⟩⟩⟩, λ x y, nonempty.intro _⟩, apply (unique_of_subsingleton _), swap, { have hx : x ⟶ h.inverse.obj ⟨⟨⟩⟩ := by convert h.unit.app x, have hy : h.inverse.obj ⟨⟨⟩⟩ ⟶ y := by convert h.unit_inv.app y, exact hx ≫ hy, }, have : ∀ z, z = h.unit.app x ≫ (h.functor ⋙ h.inverse).map z ≫ h.unit_inv.app y, { intro z, simpa using congr_arg (≫ (h.unit_inv.app y)) (h.unit.naturality z), }, apply subsingleton.intro, intros a b, rw [this a, this b], simp only [functor.comp_map], congr, }, { rintro ⟨⟨p⟩, h⟩, haveI := λ x y, (h x y).some, refine nonempty.intro (category_theory.equivalence.mk ((functor.const _).obj ⟨⟨⟩⟩) ((functor.const _).obj p) _ (by apply functor.punit_ext)), exact nat_iso.of_components (λ _, { hom := default, inv := default }) (λ _ _ _, by tidy), }, end end category_theory
module SemVar.Sat import Extra.Debug as Debug import Data.List import Data.Maybe import Extra.Op import Fmt import SemVar.Data public export VersionNode : Type VersionNode = (List String, Version, List (List String, Requirement)) compareBy : Ord b => (a -> b) -> (a -> a -> Ordering) compareBy f = (\x, y => compare (f x) (f y)) versOf : VersionNode -> Version versOf (_, v, _) = v pkgVersOf : VersionNode -> (List String, Version) pkgVersOf (p, v, _) = (p, v) matchPkgVers : VersionNode -> VersionNode -> Bool matchPkgVers x y = pkgVersOf x == pkgVersOf y ||| I think the idea is that we'll walk the list of requirements and ||| narrow down from the list of choices until either we've satisfied ||| all requirements or we've found one we can't solve. ||| Two caveats: ||| a. We'll need to pick up new reqs as we go (for sub deps) ||| i. Note: we can't let ourselves get into an infinite loop here and may have to track "completed" ||| b. If we end up with multiple acceptable packages, we'll just choose the latest of all available options satisfyAll_ : List VersionNode -> List (List String, Version) -> List (List String, Requirement) -> Either String (List (List String, Version)) satisfyAll_ versions pinned [] = Right pinned satisfyAll_ versions pinned ((pkg, req) :: reqs) = -- let _ = Debug.log "Resolving req" (pkg, req, pinned) in case step pinned versions (pkg, req) of Left Nothing => -- This current req is incompatible with current reqs Left (fmt "Package %s req %s is incompatible" (show pkg) (show req)) Left (Just _) => -- This current req is already satisfied -- let _ = Debug.log "Using existing package for" pkg in satisfyAll_ versions pinned reqs Right matches => -- Let's see if any of these matches work let -- Try the highest versions first sorted = sortBy (compareBy versOf) matches -- This new req can't be satisfied, quit -- TODO: Show possible versions? baseErr = fmt "Cannot match %s req %s" (show pkg) (show req) in foldl tryVersion (Left baseErr) sorted where tryVersion : Either String (List (List String, Version)) -> VersionNode -> Either String (List (List String, Version)) tryVersion (Right r) _ = Right r tryVersion (Left _) hd = case find (matchPkgVers hd) versions of Nothing => -- TODO: Should we collect errors Left (fmt "Cannot find deps for %s" (show hd)) Just (_, _, depReqs) => -- Good, we found at least one potential match, let's start by just recursing on head -- TODO: We need to accumulate here, maybe? -- let _ = Debug.log " " hd in -- Non-TCO and non-total satisfyAll_ versions ((pkgVersOf hd) :: pinned) (reqs ++ depReqs) matchReq : List VersionNode -> (List String, Requirement) -> List VersionNode matchReq versions (pkg, req) = filter (\(name, version, _) => name == pkg && satisfy req version) versions step : List (List String, Version) -> List VersionNode -> (List String, Requirement) -> Either (Maybe Version) (List VersionNode) step pinned versions (pkg, req) = case find ((== pkg) . fst) pinned of Just (_, pin) => if satisfy req pin then Left (Just pin) else Left Nothing Nothing => Right $ matchReq versions (pkg, req) export satisfyAll : List VersionNode -> List (List String, Requirement) -> Either String (List (List String, Version)) satisfyAll versions reqs = satisfyAll_ versions [] reqs
function wrapperColorCues(inName, outName) I = imread(inName); dt = load('/work3/sgupta/eccv14-code/piotr-structured-edges-pami/models/forest/modelBsds.mat'); model = dt.model; %% set detection parameters (can set after training) model.opts.multiscale=1; % for top accuracy set multiscale=1 model.opts.sharpen=0; % for top speed set sharpen=0 model.opts.nTreesEval=4; % for top speed set nTreesEval=1 model.opts.nThreads=4; % max number threads for evaluation model.opts.nms=0; % set to true to enable nms [Es] = edgesDetect(I, model, zeros([size(I(:,:,1)), 0])); E = Es; save(outName, 'E', 'Es'); end
[STATEMENT] lemma face_of_convex_hull_aux: assumes eq: "x *\<^sub>R p = u *\<^sub>R a + v *\<^sub>R b + w *\<^sub>R c" and x: "u + v + w = x" "x \<noteq> 0" and S: "affine S" "a \<in> S" "b \<in> S" "c \<in> S" shows "p \<in> S" [PROOF STATE] proof (prove) goal (1 subgoal): 1. p \<in> S [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. p \<in> S [PROOF STEP] have "p = (u *\<^sub>R a + v *\<^sub>R b + w *\<^sub>R c) /\<^sub>R x" [PROOF STATE] proof (prove) goal (1 subgoal): 1. p = (u *\<^sub>R a + v *\<^sub>R b + w *\<^sub>R c) /\<^sub>R x [PROOF STEP] by (metis \<open>x \<noteq> 0\<close> eq mult.commute right_inverse scaleR_one scaleR_scaleR) [PROOF STATE] proof (state) this: p = (u *\<^sub>R a + v *\<^sub>R b + w *\<^sub>R c) /\<^sub>R x goal (1 subgoal): 1. p \<in> S [PROOF STEP] moreover [PROOF STATE] proof (state) this: p = (u *\<^sub>R a + v *\<^sub>R b + w *\<^sub>R c) /\<^sub>R x goal (1 subgoal): 1. p \<in> S [PROOF STEP] have "affine hull {a,b,c} \<subseteq> S" [PROOF STATE] proof (prove) goal (1 subgoal): 1. affine hull {a, b, c} \<subseteq> S [PROOF STEP] by (simp add: S hull_minimal) [PROOF STATE] proof (state) this: affine hull {a, b, c} \<subseteq> S goal (1 subgoal): 1. p \<in> S [PROOF STEP] moreover [PROOF STATE] proof (state) this: affine hull {a, b, c} \<subseteq> S goal (1 subgoal): 1. p \<in> S [PROOF STEP] have "(u *\<^sub>R a + v *\<^sub>R b + w *\<^sub>R c) /\<^sub>R x \<in> affine hull {a,b,c}" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (u *\<^sub>R a + v *\<^sub>R b + w *\<^sub>R c) /\<^sub>R x \<in> affine hull {a, b, c} [PROOF STEP] apply (simp add: affine_hull_3) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<exists>ua va wa. (u *\<^sub>R a + v *\<^sub>R b + w *\<^sub>R c) /\<^sub>R x = ua *\<^sub>R a + va *\<^sub>R b + wa *\<^sub>R c \<and> ua + va + wa = 1 [PROOF STEP] apply (rule_tac x="u/x" in exI) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<exists>va wa. (u *\<^sub>R a + v *\<^sub>R b + w *\<^sub>R c) /\<^sub>R x = (u / x) *\<^sub>R a + va *\<^sub>R b + wa *\<^sub>R c \<and> u / x + va + wa = 1 [PROOF STEP] apply (rule_tac x="v/x" in exI) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<exists>wa. (u *\<^sub>R a + v *\<^sub>R b + w *\<^sub>R c) /\<^sub>R x = (u / x) *\<^sub>R a + (v / x) *\<^sub>R b + wa *\<^sub>R c \<and> u / x + v / x + wa = 1 [PROOF STEP] apply (rule_tac x="w/x" in exI) [PROOF STATE] proof (prove) goal (1 subgoal): 1. (u *\<^sub>R a + v *\<^sub>R b + w *\<^sub>R c) /\<^sub>R x = (u / x) *\<^sub>R a + (v / x) *\<^sub>R b + (w / x) *\<^sub>R c \<and> u / x + v / x + w / x = 1 [PROOF STEP] using x [PROOF STATE] proof (prove) using this: u + v + w = x x \<noteq> 0 goal (1 subgoal): 1. (u *\<^sub>R a + v *\<^sub>R b + w *\<^sub>R c) /\<^sub>R x = (u / x) *\<^sub>R a + (v / x) *\<^sub>R b + (w / x) *\<^sub>R c \<and> u / x + v / x + w / x = 1 [PROOF STEP] apply (auto simp: field_split_simps) [PROOF STATE] proof (prove) goal: No subgoals! [PROOF STEP] done [PROOF STATE] proof (state) this: (u *\<^sub>R a + v *\<^sub>R b + w *\<^sub>R c) /\<^sub>R x \<in> affine hull {a, b, c} goal (1 subgoal): 1. p \<in> S [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: p = (u *\<^sub>R a + v *\<^sub>R b + w *\<^sub>R c) /\<^sub>R x affine hull {a, b, c} \<subseteq> S (u *\<^sub>R a + v *\<^sub>R b + w *\<^sub>R c) /\<^sub>R x \<in> affine hull {a, b, c} [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: p = (u *\<^sub>R a + v *\<^sub>R b + w *\<^sub>R c) /\<^sub>R x affine hull {a, b, c} \<subseteq> S (u *\<^sub>R a + v *\<^sub>R b + w *\<^sub>R c) /\<^sub>R x \<in> affine hull {a, b, c} goal (1 subgoal): 1. p \<in> S [PROOF STEP] by force [PROOF STATE] proof (state) this: p \<in> S goal: No subgoals! [PROOF STEP] qed
%% Test of different Fourier boundaries detection strategies clear all %% User setup % Choose the signal you want to analyze % (sig1,sig2,sig3,sig4=ECG,sig5=seismic,lena,textures) signal = 'sig4'; % Choose the wanted preprocessing (none,plaw,poly,morpho) params.preproc = 'none'; params.degree=5; % degree for the polynomial interpolation % Choose the wanted detection method (locmax,locmaxmin,ftc) params.method = 'locmaxmin'; params.N = 6; % maximum number of band for the locmaxmin method params.completion = 0; % Perform the detection on the log spectrum instead the spectrum params.log=0; %% Load signals switch lower(signal) case 'sig1' load('sig1.mat'); t=0:1/length(f):1-1/length(f); case 'sig2' load('sig2.mat'); t=0:1/length(f):1-1/length(f); case 'sig3' load('sig3.mat'); t=0:1/length(f):1-1/length(f); case 'sig4' load('sig4.mat'); t=0:length(f)-1; case 'sig5' load('seismic.mat'); f=f(10000:20000); %sub portion of the signal used in the paper t=0:length(f)-1; case 'lena' load lena l=round(size(f,2)/2); imR=[f(:,(l-1:-1:1)) f f(:,(end:-1:end-l+1))]; fftim=fft(imR'); ff=abs(sum(abs(fftim),2)/size(fftim,2)); case 'textures' load('texture.mat'); l=round(size(f,2)/2); imR=[f(:,(l-1:-1:1)) f f(:,(end:-1:end-l+1))]; fftim=fft(imR'); ff=abs(sum(abs(fftim),2)/size(fftim,2)); end if (~strcmp(signal,'lena')) && (~strcmp(signal,'textures')) % We extend the signal by miroring to deal with the boundaries l=round(length(f)/2); f=[f(l-1:-1:1);f;f(end:-1:end-l+1)]; % We compute the Fourier transform of f ff=abs(fft(f)); end %% Perform the detection and plot the detected boundaries boundaries = EWT_Boundaries_Detect(ff,params); boundaries = boundaries*pi/round(length(ff)/2); Show_EWT_Boundaries(abs(fft(f)),boundaries,10);
#ifndef setupdm_h #define setupdm_h #include <ceed.h> #include <petsc.h> #include <petscdmplex.h> #include <petscfe.h> #include "../include/structs.h" // ----------------------------------------------------------------------------- // Setup DM // ----------------------------------------------------------------------------- PetscErrorCode CreateBCLabel(DM dm, const char name[]); // Create FE by degree PetscErrorCode PetscFECreateByDegree(DM dm, PetscInt dim, PetscInt Nc, PetscBool is_simplex, const char prefix[], PetscInt order, PetscFE *fem); // Read mesh and distribute DM in parallel PetscErrorCode CreateDistributedDM(MPI_Comm comm, AppCtx app_ctx, DM *dm); // Setup DM with FE space of appropriate degree PetscErrorCode SetupDMByDegree(DM dm, AppCtx app_ctx, PetscInt order, PetscBool boundary, PetscInt num_comp_u); #endif // setupdm_h
\section*{Operations} \label{sec:op} \subsection*{Flight preparations} \label{subsec:op-prep} There are many software and hardware pre- and post-flight checks in place. However, manual verifications are still required to ensure performance and safety during flights, since elements such as mechanical integrity cannot be automatically verified. \subsubsection*{Pre-flight checklist} The following is a checklist of elements and statuses that must be verified, at the very least, once before each extended flight session. \vspace{-0.75cm} \begin{enumerate} \itemsep -5pt \item Verify that the overall mechanical structure is undamaged and that the payload is securely mounted \item Verify that the propellers spin in the right directions and are properly tightened \item Verify that the Li-Po batteries are sufficiently charged ($ \approx 16.8 $~V) and well fastened \item Verify that the battery alarm is connected and functional \item Turn on and enable the kill switch transmitter \item Turn on the manual control transmitter \item Connect the batteries and power on the onboard computer, flight controller, and peripherals \item Verify that radio calibration and presets are correct \item Verify that the optical flow and computer vision cameras lenses are focused (and lens covers are removed) \item Calibrate inertial sensors (magnetometers, gyroscopes, accelerometers) \item Verify that telemetry and network communications are functional \end{enumerate} \subsubsection*{Post-flight checklist} After each flight session or attempt, the following checks must be made. \vspace{-0.75cm} \begin{enumerate} \itemsep -5pt \item Properly terminate every process and data acquisition/logging \item Shut down the onboard computer and other peripherals \item Disconnect the batteries \item Turn off transmitters \item Verify that the battery levels are over the safety threshold and physically intact \item Verify that all component temperatures are within their normal operating temperatures \item Verify that every mount, propeller and screw is properly tightened (especially after a hard landing or a crash) \end{enumerate} \subsection*{Man/machine interface} \label{subsec:op-interface} Prior to a flight, as mentioned in the \textit{Target Detection} section, a remote calibration tool for the cameras is used to adjust to the environment’s lighting. In-flight, a SSH connection or a ROS remote connection allows us to monitor flight data over our Wi-Fi network. In case of emergency, the pilot’s transmitter offboard switch permits manual control at all time, and the kill switch’s dedicated transmitter cuts the power of all motors.
<a href="https://colab.research.google.com/github/nickwotton/MQP2019/blob/master/Nick/BSM_NN_v01.ipynb" target="_parent"></a> # Attempt to Replicate the Black Scholes Model Using a Neural Network ```python import torch import torch.nn as nn import numpy as np import torch.nn.functional as F import matplotlib.pyplot as plt import scipy.stats as ss ``` ## Define the Function Here we define our function, the Black Scholes Model (BSM). First, we must initialize the option class, then the Geometric Brownian Motion Class, and finally the BSM class. Then we test the equation with a test value of 2. ```python '''========= option class init ==========''' class VanillaOption: def __init__( self, otype = 1, # 1: 'call' # -1: 'put' strike = 110., maturity = 1., market_price = 10.): self.otype = otype # Put or Call self.strike = strike # Strike K self.maturity = maturity # Maturity T self.market_price = market_price #this will be used for calibration def payoff(self, s): #s: excercise price otype = self.otype k = self.strike maturity = self.maturity return np.max([0, (s - k)*otype]) ``` ```python '''============ Gbm class =============''' class Gbm: def __init__(self, init_state = 100., drift_ratio = .0475, vol_ratio = .2 ): self.init_state = init_state self.drift_ratio = drift_ratio self.vol_ratio = vol_ratio ``` ```python '''======== Black-Scholes-Merton formula. ==========''' def bsm_price(self, vanilla_option): s0 = self.init_state sigma = self.vol_ratio r = self.drift_ratio otype = vanilla_option.otype k = vanilla_option.strike maturity = vanilla_option.maturity d1 = 1/(sigma*np.sqrt(maturity))*(np.log(s0/k) + (r + np.power(sigma,2)/2)*(maturity)) d2 = 1/(sigma*np.sqrt(maturity))*(np.log(s0/k) + (r - np.power(sigma,2)/2)*(maturity)) return (otype * s0 * ss.norm.cdf(otype * d1) #line break needs parenthesis - otype * np.exp(-r * maturity) * k * ss.norm.cdf(otype * d2)) Gbm.bsm_price = bsm_price ``` ```python '''======= Get BSM prices given an option and a vector =======''' def vector_bsm(self, vanilla_option, data): outputData = [] for i in data: gbm1.init_state = i callPrice = gbm1.bsm_price(vanilla_option) outputData.append(callPrice) return outputData Gbm.vector_bsm = vector_bsm ``` ## Create Model Next, we create the neural network model. This is done first by setting the inner and outer dimensions with variables. Next we code the model and vary the internal dimensions to attempt to improve the model. At this level, this is essentially a simple linear algebra exercise: If we have input $x$, internal parameters $a,b$, and solution $f(x)$ then in the one-dimensional case we have: \begin{equation} \left( a_{1}x+b_{2} \right) a_{2} + b_{2} = f(x) \end{equation} However, we want to get a better estimate for the true equation. So we increase the interior dimension which corresponds to the number of neurons inside the network. For example, we raised the inner dimension to 3. In matrix form we have: \begin{equation} \left( \begin{bmatrix} x \end{bmatrix} \begin{bmatrix} a_{1} & a_{2} & a_{3} \end{bmatrix} + \begin{bmatrix} b_{1} & b_{2} & b_{3} \end{bmatrix} \right) \begin{bmatrix} a_{4} \\ a_{5} \\ a_{6} \end{bmatrix} + \begin{bmatrix} b_{4} \\ \end{bmatrix} = \begin{bmatrix} f(x) \end{bmatrix} \end{equation} Graphically, we can render this second neural network as: What we discovered here is that ReLU was slowing down the process, so since our function is Linear, we can just remove it. Additionally, we discerned that the higher the inner dimension, that is, the more nodes in each layer, the smaller the error and the better the performance. ```python #model #nn.Linear in_dim = 1 out_dim = 1 int_dim = 10 model = nn.Sequential( nn.Linear(in_dim, int_dim), nn.ReLU(), #nn.Linear(int_dim, int_dim), #nn.ReLU(), nn.Linear(int_dim, out_dim) ) ``` Here we define the Loss function as the Mean Squared Error(MSE). Note that by doing so, we are essentially 'cheating' the system. In most applications, we would not know the function $f$ so we would be unable to find the MSE. ```python #loss function criterion = nn.MSELoss() ``` Next we choose a learning rate and a method for learning. The learning rate is the percent of the data that is accepted in each iteration. The Methods we tried were SGD and Adam. ```python #optimizer learning_rate = 0.001 optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) ``` ## Train the Model First we create the training data. This is a batch of random points that we pass through the BSM. ```python #training data batch_size = 1000 x_trainPy = torch.randn(batch_size, 1) #print(x_trainPy) x_trainFlat = torch.flatten(x_trainPy) #print(x_trainFlat) x_train = np.abs(x_trainFlat.tolist()) # Convert to a list for ease of use, Take absolute value since we need stock prices > 0 #print(x_train) x_trainScaled = [x * 100 for x in x_train] # Scale list to make valid stock prices #print(x_trainScaled) gbm1 = Gbm() option1 = VanillaOption() y_train = gbm1.vector_bsm(option1, x_trainScaled) #print(y_train) #for i in x_trainScaled: # gbm1.init_state = i #callPrice = gbm1.bsm_price(option1) #y_train.append(callPrice) #print(x_train) #print(x_trainScaled) #print(y_train) x_trainTensor = torch.FloatTensor(x_trainScaled) # Convert the data to a Tensor again for use in training the model x_trainRotTensor = x_trainTensor.unsqueeze(1) # Transpose the tensor to make it into the form we need for training y_trainTensor = torch.FloatTensor(y_train) y_trainRotTensor = y_trainTensor.unsqueeze(1) #print(x_trainTensor) #print(x_trainRotTensor) #print(y_trainTensor) #print(y_trainRotTensor) ``` Once we have the training data, we pass this collection of inputs and solutions into the model. With each iteration we calculate the loss and attempt to optimize the model to further reduce the loss. ```python # Train the model num_epochs = 1000 for epoch in range(num_epochs): # Forward pass outputs = model(x_trainRotTensor) loss = criterion(outputs, y_trainRotTensor) # Backward and optimize optimizer.zero_grad() loss.backward() optimizer.step() if (epoch+1) % 50 == 0: print ('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item())) #print(x_trainRotTensor[0:10]) #print(outputs[0:10]) #print(y_trainRotTensor[0:10]) ``` Epoch [50/1000], Loss: 1208.3547 Epoch [100/1000], Loss: 1207.5942 Epoch [150/1000], Loss: 1206.9716 Epoch [200/1000], Loss: 1206.4617 Epoch [250/1000], Loss: 1206.0442 Epoch [300/1000], Loss: 1205.7023 Epoch [350/1000], Loss: 1205.4222 Epoch [400/1000], Loss: 1205.1929 Epoch [450/1000], Loss: 1205.0050 Epoch [500/1000], Loss: 1204.8511 Epoch [550/1000], Loss: 1204.7250 Epoch [600/1000], Loss: 1204.6216 Epoch [650/1000], Loss: 1204.5369 Epoch [700/1000], Loss: 1204.4674 Epoch [750/1000], Loss: 1204.4105 Epoch [800/1000], Loss: 1204.3638 Epoch [850/1000], Loss: 1204.3254 Epoch [900/1000], Loss: 1204.2939 Epoch [950/1000], Loss: 1204.2679 Epoch [1000/1000], Loss: 1204.2467 ## Testing the Model ```python #test x_Py = torch.randn(50,1) x_Flat = torch.flatten(x_Py) x_list = np.abs(x_Flat.tolist()) # Convert to a list for ease of use, Take absolute value since we need stock prices > 0 x_ = [x * 100 for x in x_list] y_ = gbm1.vector_bsm(option1, x_) x_Tensor = torch.FloatTensor(x_) # convert back to tensor x_RotTensor = x_Tensor.unsqueeze(1) y_Tensor = torch.FloatTensor(y_) # convert back to tensor y_RotTensor = y_Tensor.unsqueeze(1) #print(y_RotTensor) plt.scatter(x_RotTensor.detach().numpy(), y_RotTensor.detach().numpy(), label='true') y_pred = model(x_RotTensor) #print(y_pred) plt.scatter(x_RotTensor.detach().numpy(), y_pred.detach().numpy(), label='pred') plt.legend() ```
FUNCTION Twersky( Option, omega, HS, kx, rho0, c0 ) ! Dummy version IMPLICIT NONE REAL (KIND=8) :: omega, rho0, c0 COMPLEX (KIND=8) :: Twersky, kx CHARACTER (LEN=1) :: Option ! Halfspace properties TYPE HSInfo CHARACTER (LEN=1) :: BC ! Boundary condition type COMPLEX (KIND=8) :: cP, cS ! P-wave, S-wave speeds REAL (KIND=8) :: rho, BumpDensity, eta, xi ! density, boss parameters END TYPE HSInfo TYPE( HSInfo ) :: HS Twersky = 0.0 END FUNCTION Twersky
In an attempt to lure out and destroy a portion of the Grand Fleet , the German High Seas Fleet with 16 dreadnoughts , six pre @-@ dreadnoughts , six light cruisers and 31 torpedo boats commanded by Vice Admiral Reinhard Scheer , departed the Jade early on the morning of 31 May . The fleet sailed in concert with Rear Admiral Franz von Hipper 's five battlecruisers and supporting cruisers and torpedo boats . The Royal Navy 's Room 40 had intercepted and decrypted German radio traffic containing plans of the operation . The Admiralty ordered the Grand Fleet of 28 dreadnoughts and 9 battlecruisers , to sortie the night before to cut off and destroy the High Seas Fleet . On the day of the battle , Marlborough was stationed toward the rear of the British line in the 6th Division of the 1st Battle Squadron .
[GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 inst✝² : Group α inst✝¹ : MulAction α β inst✝ : One γ c : α f : β → γ ⊢ (mulSupport fun x => f (c⁻¹ • x)) = c • mulSupport f [PROOFSTEP] ext x [GOAL] case h α : Type u_1 β : Type u_2 γ : Type u_3 inst✝² : Group α inst✝¹ : MulAction α β inst✝ : One γ c : α f : β → γ x : β ⊢ (x ∈ mulSupport fun x => f (c⁻¹ • x)) ↔ x ∈ c • mulSupport f [PROOFSTEP] simp only [mem_smul_set_iff_inv_smul_mem, mem_mulSupport] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 inst✝² : Group α inst✝¹ : MulAction α β inst✝ : Zero γ c : α f : β → γ ⊢ (support fun x => f (c⁻¹ • x)) = c • support f [PROOFSTEP] ext x [GOAL] case h α : Type u_1 β : Type u_2 γ : Type u_3 inst✝² : Group α inst✝¹ : MulAction α β inst✝ : Zero γ c : α f : β → γ x : β ⊢ (x ∈ support fun x => f (c⁻¹ • x)) ↔ x ∈ c • support f [PROOFSTEP] simp only [mem_smul_set_iff_inv_smul_mem, mem_support] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 inst✝² : GroupWithZero α inst✝¹ : MulAction α β inst✝ : One γ c : α hc : c ≠ 0 f : β → γ ⊢ (mulSupport fun x => f (c⁻¹ • x)) = c • mulSupport f [PROOFSTEP] ext x [GOAL] case h α : Type u_1 β : Type u_2 γ : Type u_3 inst✝² : GroupWithZero α inst✝¹ : MulAction α β inst✝ : One γ c : α hc : c ≠ 0 f : β → γ x : β ⊢ (x ∈ mulSupport fun x => f (c⁻¹ • x)) ↔ x ∈ c • mulSupport f [PROOFSTEP] simp only [mem_smul_set_iff_inv_smul_mem₀ hc, mem_mulSupport] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 inst✝² : GroupWithZero α inst✝¹ : MulAction α β inst✝ : Zero γ c : α hc : c ≠ 0 f : β → γ ⊢ (support fun x => f (c⁻¹ • x)) = c • support f [PROOFSTEP] ext x [GOAL] case h α : Type u_1 β : Type u_2 γ : Type u_3 inst✝² : GroupWithZero α inst✝¹ : MulAction α β inst✝ : Zero γ c : α hc : c ≠ 0 f : β → γ x : β ⊢ (x ∈ support fun x => f (c⁻¹ • x)) ↔ x ∈ c • support f [PROOFSTEP] simp only [mem_smul_set_iff_inv_smul_mem₀ hc, mem_support]
/- Copyright (c) 2017 Microsoft Corporation. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Mario Carneiro, Neil Strickland -/ import data.nat.prime import data.pnat.basic /-! # Primality and GCD on pnat > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. This file extends the theory of `ℕ+` with `gcd`, `lcm` and `prime` functions, analogous to those on `nat`. -/ namespace nat.primes instance coe_pnat : has_coe nat.primes ℕ+ := ⟨λ p, ⟨(p : ℕ), p.property.pos⟩⟩ @[norm_cast] theorem coe_pnat_nat (p : nat.primes) : ((p : ℕ+) : ℕ) = p := rfl theorem coe_pnat_injective : function.injective (coe : nat.primes → ℕ+) := λ p q h, subtype.ext (congr_arg subtype.val h : _) @[norm_cast] theorem coe_pnat_inj (p q : nat.primes) : (p : ℕ+) = (q : ℕ+) ↔ p = q := coe_pnat_injective.eq_iff end nat.primes namespace pnat open _root_.nat /-- The greatest common divisor (gcd) of two positive natural numbers, viewed as positive natural number. -/ def gcd (n m : ℕ+) : ℕ+ := ⟨nat.gcd (n : ℕ) (m : ℕ), nat.gcd_pos_of_pos_left (m : ℕ) n.pos⟩ /-- The least common multiple (lcm) of two positive natural numbers, viewed as positive natural number. -/ def lcm (n m : ℕ+) : ℕ+ := ⟨nat.lcm (n : ℕ) (m : ℕ), by { let h := mul_pos n.pos m.pos, rw [← gcd_mul_lcm (n : ℕ) (m : ℕ), mul_comm] at h, exact pos_of_dvd_of_pos (dvd.intro (nat.gcd (n : ℕ) (m : ℕ)) rfl) h }⟩ @[simp, norm_cast] theorem gcd_coe (n m : ℕ+) : ((gcd n m) : ℕ) = nat.gcd n m := rfl @[simp, norm_cast] theorem lcm_coe (n m : ℕ+) : ((lcm n m) : ℕ) = nat.lcm n m := rfl theorem gcd_dvd_left (n m : ℕ+) : (gcd n m) ∣ n := dvd_iff.2 (nat.gcd_dvd_left (n : ℕ) (m : ℕ)) theorem gcd_dvd_right (n m : ℕ+) : (gcd n m) ∣ m := dvd_iff.2 (nat.gcd_dvd_right (n : ℕ) (m : ℕ)) theorem dvd_gcd {m n k : ℕ+} (hm : k ∣ m) (hn : k ∣ n) : k ∣ gcd m n := dvd_iff.2 (@nat.dvd_gcd (m : ℕ) (n : ℕ) (k : ℕ) (dvd_iff.1 hm) (dvd_iff.1 hn)) theorem dvd_lcm_left (n m : ℕ+) : n ∣ lcm n m := dvd_iff.2 (nat.dvd_lcm_left (n : ℕ) (m : ℕ)) theorem dvd_lcm_right (n m : ℕ+) : m ∣ lcm n m := dvd_iff.2 (nat.dvd_lcm_right (n : ℕ) (m : ℕ)) theorem lcm_dvd {m n k : ℕ+} (hm : m ∣ k) (hn : n ∣ k) : lcm m n ∣ k := dvd_iff.2 (@nat.lcm_dvd (m : ℕ) (n : ℕ) (k : ℕ) (dvd_iff.1 hm) (dvd_iff.1 hn)) theorem gcd_mul_lcm (n m : ℕ+) : (gcd n m) * (lcm n m) = n * m := subtype.eq (nat.gcd_mul_lcm (n : ℕ) (m : ℕ)) lemma eq_one_of_lt_two {n : ℕ+} : n < 2 → n = 1 := begin intro h, apply le_antisymm, swap, apply pnat.one_le, change n < 1 + 1 at h, rw pnat.lt_add_one_iff at h, apply h end section prime /-! ### Prime numbers -/ /-- Primality predicate for `ℕ+`, defined in terms of `nat.prime`. -/ def prime (p : ℕ+) : Prop := (p : ℕ).prime lemma prime.one_lt {p : ℕ+} : p.prime → 1 < p := nat.prime.one_lt lemma prime_two : (2 : ℕ+).prime := nat.prime_two lemma dvd_prime {p m : ℕ+} (pp : p.prime) : (m ∣ p ↔ m = 1 ∨ m = p) := by { rw pnat.dvd_iff, rw nat.dvd_prime pp, simp } lemma prime.ne_one {p : ℕ+} : p.prime → p ≠ 1 := by { intro pp, intro contra, apply nat.prime.ne_one pp, rw pnat.coe_eq_one_iff, apply contra } @[simp] lemma not_prime_one : ¬ (1: ℕ+).prime := nat.not_prime_one lemma prime.not_dvd_one {p : ℕ+} : p.prime → ¬ p ∣ 1 := λ pp : p.prime, by {rw dvd_iff, apply nat.prime.not_dvd_one pp} lemma exists_prime_and_dvd {n : ℕ+} (hn : n ≠ 1) : (∃ (p : ℕ+), p.prime ∧ p ∣ n) := begin obtain ⟨p, hp⟩ := nat.exists_prime_and_dvd (mt coe_eq_one_iff.mp hn), existsi (⟨p, nat.prime.pos hp.left⟩ : ℕ+), rw dvd_iff, apply hp end end prime section coprime /-! ### Coprime numbers and gcd -/ /-- Two pnats are coprime if their gcd is 1. -/ def coprime (m n : ℕ+) : Prop := m.gcd n = 1 @[simp, norm_cast] lemma coprime_coe {m n : ℕ+} : nat.coprime ↑m ↑n ↔ m.coprime n := by { unfold coprime, unfold nat.coprime, rw ← coe_inj, simp } lemma coprime.mul {k m n : ℕ+} : m.coprime k → n.coprime k → (m * n).coprime k := by { repeat {rw ← coprime_coe}, rw mul_coe, apply nat.coprime.mul } lemma coprime.mul_right {k m n : ℕ+} : k.coprime m → k.coprime n → k.coprime (m * n) := by { repeat {rw ← coprime_coe}, rw mul_coe, apply nat.coprime.mul_right } lemma gcd_comm {m n : ℕ+} : m.gcd n = n.gcd m := by { apply eq, simp only [gcd_coe], apply nat.gcd_comm } lemma gcd_eq_left_iff_dvd {m n : ℕ+} : m ∣ n ↔ m.gcd n = m := by { rw dvd_iff, rw nat.gcd_eq_left_iff_dvd, rw ← coe_inj, simp } lemma gcd_eq_right_iff_dvd {m n : ℕ+} : m ∣ n ↔ n.gcd m = m := by { rw gcd_comm, apply gcd_eq_left_iff_dvd, } lemma coprime.gcd_mul_left_cancel (m : ℕ+) {n k : ℕ+} : k.coprime n → (k * m).gcd n = m.gcd n := begin intro h, apply eq, simp only [gcd_coe, mul_coe], apply nat.coprime.gcd_mul_left_cancel, simpa end lemma coprime.gcd_mul_right_cancel (m : ℕ+) {n k : ℕ+} : k.coprime n → (m * k).gcd n = m.gcd n := begin rw mul_comm, apply coprime.gcd_mul_left_cancel, end lemma coprime.gcd_mul_left_cancel_right (m : ℕ+) {n k : ℕ+} : k.coprime m → m.gcd (k * n) = m.gcd n := begin intro h, iterate 2 {rw gcd_comm, symmetry}, apply coprime.gcd_mul_left_cancel _ h, end lemma coprime.gcd_mul_right_cancel_right (m : ℕ+) {n k : ℕ+} : k.coprime m → m.gcd (n * k) = m.gcd n := begin rw mul_comm, apply coprime.gcd_mul_left_cancel_right, end @[simp] lemma one_gcd {n : ℕ+} : gcd 1 n = 1 := by { rw ← gcd_eq_left_iff_dvd, apply one_dvd } @[simp] @[symm] lemma coprime.symm {m n : ℕ+} : m.coprime n → n.coprime m := by { unfold coprime, rw gcd_comm, simp } @[simp] lemma one_coprime {n : ℕ+} : (1 : ℕ+).coprime n := one_gcd @[simp] lemma coprime_one {n : ℕ+} : n.coprime 1 := coprime.symm one_coprime lemma coprime.coprime_dvd_left {m k n : ℕ+} : m ∣ k → k.coprime n → m.coprime n := by { rw dvd_iff, repeat {rw ← coprime_coe}, apply nat.coprime.coprime_dvd_left } lemma coprime.factor_eq_gcd_left {a b m n : ℕ+} (cop : m.coprime n) (am : a ∣ m) (bn : b ∣ n) : a = (a * b).gcd m := begin rw gcd_eq_left_iff_dvd at am, conv_lhs {rw ← am}, symmetry, apply coprime.gcd_mul_right_cancel a, apply coprime.coprime_dvd_left bn cop.symm, end lemma coprime.factor_eq_gcd_right {a b m n : ℕ+} (cop : m.coprime n) (am : a ∣ m) (bn : b ∣ n) : a = (b * a).gcd m := begin rw mul_comm, apply coprime.factor_eq_gcd_left cop am bn, end lemma coprime.factor_eq_gcd_left_right {a b m n : ℕ+} (cop : m.coprime n) (am : a ∣ m) (bn : b ∣ n) : a = m.gcd (a * b) := begin rw gcd_comm, apply coprime.factor_eq_gcd_left cop am bn, end lemma coprime.factor_eq_gcd_right_right {a b m n : ℕ+} (cop : m.coprime n) (am : a ∣ m) (bn : b ∣ n) : a = m.gcd (b * a) := begin rw gcd_comm, apply coprime.factor_eq_gcd_right cop am bn, end lemma coprime.gcd_mul (k : ℕ+) {m n : ℕ+} (h: m.coprime n) : k.gcd (m * n) = k.gcd m * k.gcd n := begin rw ← coprime_coe at h, apply eq, simp only [gcd_coe, mul_coe], apply nat.coprime.gcd_mul k h end lemma gcd_eq_left {m n : ℕ+} : m ∣ n → m.gcd n = m := by { rw dvd_iff, intro h, apply eq, simp only [gcd_coe], apply nat.gcd_eq_left h } lemma coprime.pow {m n : ℕ+} (k l : ℕ) (h : m.coprime n) : (m ^ k).coprime (n ^ l) := begin rw ← coprime_coe at *, simp only [pow_coe], apply nat.coprime.pow, apply h end end coprime end pnat
Formal statement is: lemma frontier_subset_compact: fixes S :: "'a::heine_borel set" shows "compact S \<Longrightarrow> frontier S \<subseteq> S" Informal statement is: If $S$ is a compact set, then the frontier of $S$ is a subset of $S$.
Require Export basic. Require Import Logics. Reserved Notation "# T" (at level 200). Set Implicit Arguments. (***************************************************************************) (** * 1. Sublogics, wrapped in modules and functors *) Module Type Sublogic. Parameter Inline Tr : Prop -> Prop. Parameter TrI : forall P:Prop, P -> Tr P. Parameter TrP : forall P:Prop, Tr (Tr P) -> Tr P. Parameter TrMono : forall (P Q:Prop), (P->Q)->Tr P->Tr Q. Notation "# T" := (Tr T). End Sublogic. (** Family of sublogic *) Module Type SublogicFamily. Parameter T : Type. Parameter Inline Tr : T -> Prop -> Prop. Parameter TrI : forall x (P:Prop), P -> Tr x P. Parameter TrP : forall x (P:Prop), Tr x (Tr x P) -> Tr x P. Parameter TrMono : forall x (P Q:Prop), (P->Q)->Tr x P->Tr x Q. End SublogicFamily. Module Type Type_sig. Parameter T : Type. End Type_sig. Module Type Elt_sig (T:Type_sig). Parameter x : T.T. End Elt_sig. Module InstSublogicFamily (L:SublogicFamily) (X:Elt_sig L) <: Sublogic. Import X. Definition Tr := L.Tr x. Definition TrI := L.TrI x. Definition TrP := @L.TrP x. Definition TrMono := @L.TrMono x. End InstSublogicFamily. Module Type ConsistentSublogic. Include Sublogic. Parameter TrCons : ~ Tr False. End ConsistentSublogic. (** Sublogics are monads *) Module AlternativeFormulations. Module Type LogicMonad. Parameter Inline M : Prop -> Prop. Parameter ret : forall P:Prop, P -> M P. Parameter bind : forall P Q:Prop, M P -> (P -> M Q) -> M Q. End LogicMonad. Module SublogicToMonad (M:LogicMonad) : Sublogic with Definition Tr := M.M. Import M. Definition Tr := M. Definition TrI (P:Prop) (p:P) : Tr P := ret p. Definition TrP (P:Prop) (p:Tr(Tr P)) : Tr P := bind p (fun x => x). Definition TrMono (P Q:Prop) (f:P->Q) (p:Tr P) : Tr Q := bind p (fun x => ret (f x)). End SublogicToMonad. Module MonadToSublogic (L:Sublogic) : LogicMonad with Definition M := L.Tr. Import L. Definition M := Tr. Definition ret (P:Prop) (p:P) : M P := TrI p. Definition bind (P Q:Prop) (p:Tr P) (f:P->Tr Q) : Tr Q := TrP (TrMono f p). End MonadToSublogic. End AlternativeFormulations. (** Sublogic equipped with tools useful for doing logics *) Module Type SublogicTheory. Include Sublogic. Definition isL (P:Prop) := Tr P -> P. Global Instance Tr_morph : Proper (iff==>iff) Tr. Admitted. Global Instance isL_morph : Proper (iff==>iff) isL. Admitted. (* monad bind *) Parameter TrB : forall (P Q:Prop), Tr P -> (P -> Tr Q) -> Tr Q. Parameter Tr_ind : forall (P Q:Prop) {i:isL Q}, (P -> Q) -> Tr P -> Q. (** The set of L-propositions: introduction rules *) Parameter Tr_isL : forall P, isL (Tr P). Parameter T_isL : forall P:Prop, P -> isL P. Parameter and_isL : forall P Q, isL P -> isL Q -> isL (P/\Q). Parameter fa_isL : forall A (P:A->Prop), (forall x, isL (P x)) -> isL(forall x, P x). Parameter imp_isL : forall P Q, isL Q -> isL (P -> Q). Parameter iff_isL : forall P Q, isL P -> isL Q -> isL (P <-> Q). Global Hint Resolve Tr_isL T_isL and_isL fa_isL imp_isL iff_isL. Parameter rFF : forall (Q:Prop), Tr False -> Tr Q. Parameter rFF': forall (Q:Prop), Tr False -> isL Q -> Q. (** Introduction tactics *) Ltac Tin := apply TrI. Ltac Texists t := Tin; exists t. Ltac Tleft := Tin; left. Ltac Tright := Tin; right. (** Elimination tactics: - Tabsurd replaces the current goal with Tr False (ex-falso) - Telim H implements rules H:Tr P |- G --> |- P->G when G is a L-prop - Tdestruct H is the equivalent of destruct on a hypothesis Tr(Ind x). The goal shall be an L-prop *) Ltac prove_isL := intros; lazymatch goal with | |- isL(Tr _) => apply Tr_isL | |- isL(_ /\ _) => apply and_isL; prove_isL | |- isL True => apply T_isL; exact I | |- isL(impl _ _) => apply imp_isL; prove_isL | |- isL(iff _ _) => apply iff_isL; prove_isL | |- isL(_ -> _) => apply imp_isL; prove_isL | |- isL(forall x, _) => apply fa_isL; intro; prove_isL | |- isL _ => auto 10; fail "Cannot prove isL side-condition" | |- _ => fail "Tactic prove_isL does not apply to this goal" end. Ltac Tabsurd := lazymatch goal with | |- Tr _ => apply rFF | |- _ => apply rFF';[|auto 10;fail"Cannot prove isL side-condition"] end. Ltac Telim H := lazymatch goal with | |- Tr _ => apply TrB with (1:=H); try clear H | |- _ => apply Tr_ind with (3:=H);[auto 10;fail"Cannot prove isL side-condition"|]; try clear H end. Tactic Notation "Tdestruct" constr(H) := Telim H; destruct 1. Tactic Notation "Tdestruct" constr(H) "as" simple_intropattern(p) := Telim H; intros p. End SublogicTheory. Module BuildLogic (L:Sublogic) <: SublogicTheory. Include L. (** Derived sublogic concepts: - more elimination rules (bind of the monad) - the set of L-propositions *) Global Instance Tr_morph : Proper (iff==>iff) Tr. split; apply TrMono; apply H. Qed. Definition isL (P:Prop) := Tr P -> P. Global Instance isL_morph : Proper (iff==>iff) isL. do 2 red; intros. unfold isL; rewrite H; reflexivity. Qed. (* bind *) Lemma TrB : forall (P Q:Prop), Tr P -> (P -> Tr Q) -> Tr Q. intros. apply TrP; revert H; apply TrMono; auto. Qed. Lemma Tr_ind : forall (P Q:Prop) {i:isL Q}, (P -> Q) -> Tr P -> Q. intros. apply i; revert H0; apply TrMono; trivial. Qed. (** The set of L-propositions: introduction rules. L-props are closed under all connectives of negative polarity. *) Lemma Tr_isL : forall P, isL (Tr P). Proof TrP. Lemma T_isL : forall P:Prop, P -> isL P. Proof (fun _ p _ => p). Lemma and_isL : forall P Q, isL P -> isL Q -> isL (P/\Q). compute; intros. split; revert H1; apply Tr_ind; firstorder. Qed. Lemma fa_isL : forall A (P:A->Prop), (forall x, isL (P x)) -> isL(forall x, P x). compute; intros. revert H0; apply Tr_ind; firstorder. Qed. Lemma imp_isL : forall P Q, isL Q -> isL (P -> Q). intros. apply fa_isL; trivial. Qed. Lemma iff_isL : forall P Q, isL P -> isL Q -> isL (P <-> Q). intros; apply and_isL; apply imp_isL; trivial. Qed. Global Hint Resolve Tr_isL T_isL and_isL fa_isL imp_isL iff_isL. (** Elimination rules for falsity *) Lemma rFF (Q:Prop) : Tr False -> Tr Q. apply TrMono; intros; contradiction. Qed. Lemma rFF' (Q:Prop) : Tr False -> isL Q -> Q. intros. apply H0; apply rFF; trivial. Qed. Ltac Tin := apply TrI. Ltac Texists t := Tin; exists t. Ltac Tleft := Tin; left. Ltac Tright := Tin; right. (** Elimination tactics: - Tabsurd replaces the current goal with Tr False (ex-falso) - Telim H implements rules H:Tr P |- G --> |- P->G when G is a L-prop - Tdestruct H is the equivalent of destruct on a hypothesis Tr(Ind x). The goal shall be an L-prop *) Ltac prove_isL := intros; lazymatch goal with | |- isL(Tr _) => apply Tr_isL | |- isL(_ /\ _) => apply and_isL; prove_isL | |- isL True => apply T_isL; exact I | |- isL(impl _ _) => apply imp_isL; prove_isL | |- isL(iff _ _) => apply iff_isL; prove_isL | |- isL(forall x, _) => (apply imp_isL || (apply fa_isL; intro)); prove_isL | |- isL _ => auto 10; fail "Cannot prove isL side-condition" | |- _ => fail "Tactic prove_isL does not apply to this goal" end. Ltac Tabsurd := lazymatch goal with | |- Tr _ => apply rFF | |- _ => apply rFF';[|auto 10;fail"Cannot prove isL side-condition"] end. Ltac Telim H := lazymatch goal with | |- Tr _ => apply TrB with (1:=H); try clear H | |- _ => apply Tr_ind with (3:=H);[auto 10;fail"Cannot prove isL side-condition"|]; try clear H end. Tactic Notation "Tdestruct" constr(H) := Telim H; destruct 1. Tactic Notation "Tdestruct" constr(H) "as" simple_intropattern(p) := Telim H; intros p. End BuildLogic. (** The same for consistent logics: False is now an L-prop *) Module BuildConsistentSublogic (L:ConsistentSublogic). Module tmp <: SublogicTheory := BuildLogic L. Include tmp. Lemma FF_isL : isL False. Proof L.TrCons. Global Hint Resolve FF_isL. End BuildConsistentSublogic. (***************************************************************************) (** * 2.Examples of sublogic modules *) (** ** Coq's intuitionistic logic *) Module CoqSublogic <: ConsistentSublogic. Definition Tr P:Prop := P. Definition TrI (P:Prop) (p:P) : Tr P := p. Definition TrP (P:Prop) (p:Tr (Tr P)) : Tr P := p. Definition TrMono (P Q:Prop) (f:P->Q) (p:Tr P) : Tr Q := f p. Definition TrCons : ~ Tr False := fun h => h. End CoqSublogic. Module CoqSublogicThms := BuildConsistentSublogic CoqSublogic. (** ** Classical logic through negated translation *) Module ClassicSublogic <: ConsistentSublogic. Definition Tr (P:Prop) := ~~P. Definition TrI (P:Prop) (p:P) : Tr P := fun np => np p. Definition TrP (P:Prop) (nnnnp:Tr (Tr P)) : Tr P := fun np => nnnnp (fun nnp => nnp np). Definition TrMono (P Q:Prop) (f:P->Q) (nnp:Tr P) : Tr Q := fun nq => nnp (fun p => nq (f p)). Definition TrCons : ~ Tr False := fun h => h (fun x => x). End ClassicSublogic. Module ClassicSublogicThms. Include BuildConsistentSublogic ClassicSublogic. Lemma nnpp (P:Prop) : ((P->False)->False) -> Tr P. Proof (fun h => h). (** excluded-middle: note that P need not be classical, which makes the positive case stronger. *) Lemma classic : forall P, Tr(P \/ (Tr P -> False)). intros P nem. apply nem; right; intro tp. apply Tr_ind with (3:=tp); intros; trivial. apply nem; left; assumption. Qed. End ClassicSublogicThms. (** ** Friedman's A-translation *) Module ASublogic <: SublogicFamily. Definition T:=Prop. Definition Tr A P := P \/ A. Definition TrI (A P:Prop) (p:P) : Tr A P := or_introl p. Definition TrP (A P:Prop) (p:(P\/A)\/A) := match p with | or_introl p => p | or_intror a => or_intror a end. Definition TrMono (A P Q:Prop) (f:P->Q) (p:P\/A) := match p with | or_introl p => or_introl (f p) | or_intror a => or_intror a end. End ASublogic. Module Type Aprop. Parameter x:Prop. End Aprop. Module ASublogicThms (A:Aprop) <: SublogicTheory. Module Asl := InstSublogicFamily ASublogic A. Import Asl. Notation A := A.x. Include BuildLogic Asl. Lemma Aconsistency : isL False <-> ~A. firstorder. Qed. Lemma atom_isL (P:Prop) : (A->P) -> isL P. firstorder. Qed. (** or does not need to be modified *) Lemma or_isL P Q : isL P \/ isL Q -> isL (P\/Q). firstorder. Qed. Global Hint Resolve or_isL. (** existential does need to be modified when one of the instances is an L-prop. *) Lemma ex_isL_raw T (P:T->Prop): (exists x, isL (P x)) -> isL(ex P). firstorder. Qed. (** A more usable rule (we can expect the forall x, isL (P x) assumption to be provable automatically), but which requires T to be inhabited. *) Lemma ex_isL T (P:T->Prop) : T -> (forall x, isL (P x)) -> isL (ex P). compute; intros. destruct H0; trivial. exists X; auto. Qed. Global Hint Resolve ex_isL. Lemma FF_a : Tr False <-> A. split. destruct 1;[contradiction|trivial]. right; trivial. Qed. End ASublogicThms. (** Example: if ~~exists x. P(x) is derivable, then so is exists x. P(x) *) Module AtransExample. Parameter (T:Type) (P : T->Prop). Module nnex. Definition x:=exists x, P x. End nnex. Module Atr := ASublogicThms nnex. Import nnex Atr. Lemma markov_rule : ((Tr(exists x, P x) -> Tr False) -> Tr False) -> exists x, P x. intro. apply FF_a. apply H; intro. apply Tr_ind with (3:=H0); intros; trivial. apply Atr.Tr_isL. apply FF_a. assumption. Qed. End AtransExample. (** ** Peirce translation *) Module PeirceTrans <: SublogicFamily. Definition T := Prop. Definition Tr (R A:Prop) := (A->R)->A. Definition TrI (R A:Prop) (a:A) : Tr R A := fun ar => a. Definition TrP (R A:Prop) (tta:Tr R (Tr R A)) : Tr R A := fun ar => tta (fun ara => ar (ara ar)) ar. Definition TrMono (R A B:Prop) (f:A->B) (ta:Tr R A) : Tr R B := fun br => f (ta (fun a => br (f a))). Definition TrCons (R:Prop) : ~ Tr R False := fun frf => frf (False_ind R). End PeirceTrans. Module PeirceSublogicThms (A:Aprop) <: SublogicTheory. Module Psl := InstSublogicFamily PeirceTrans A. Import Psl. Notation A := A.x. Include BuildLogic Psl. Lemma Pconsistency : isL False. firstorder. Qed. End PeirceSublogicThms. (** ** Intersection and cartesian product *) Module Inter (L:SublogicFamily) <: Sublogic. Definition Tr P := forall x, L.Tr x P. Definition TrI (P:Prop) (p:P) : Tr P := fun x => L.TrI x p. Definition TrP (P:Prop) (ttp:Tr(Tr P)) : Tr P := fun x => L.TrP (L.TrMono (fun p => p x) (ttp x)). Definition TrMono (P Q:Prop) (f:P->Q) (p:Tr P) : Tr Q := fun x => L.TrMono f (p x). (* If a member of the family is consistent, then so is the intersection. *) Lemma equiCons : Tr False <-> forall x, L.Tr x False. reflexivity. Qed. End Inter. Module Inter2 (L1 L2:Sublogic) <: Sublogic. Definition Tr P := L1.Tr P /\ L2.Tr P. Definition TrI (P:Prop) (p:P) : Tr P := conj (L1.TrI p) (L2.TrI p). Definition TrP (P:Prop) (ttp:Tr(Tr P)) : Tr P := conj (L1.TrP (L1.TrMono (fun p => proj1 p) (proj1 ttp))) (L2.TrP (L2.TrMono (fun p => proj2 p) (proj2 ttp))). Definition TrMono (P Q:Prop) (f:P->Q) (p:Tr P) : Tr Q := conj (L1.TrMono f (proj1 p)) (L2.TrMono f (proj2 p)). (* If L1 or L2 is consistent, then so is L1/\L2. *) Lemma equiCons : Tr False <-> L1.Tr False /\ L2.Tr False. reflexivity. Qed. Lemma isL_intro P : (L1.Tr P -> P) \/ (L2.Tr P -> P) -> (Tr P -> P). destruct 1; destruct 1; auto. Qed. End Inter2. (***************************************************************************) (** * 3. Building a higher-order logic with L-props. *) Module SublogicToHOLogic (L:SublogicTheory) <: HOLogic. Import L. Record prop_ := mkP { holds : Prop; isprop : isL holds }. Definition prop := prop_. Definition TT : prop. (*begin show*) exists True; auto. (*end show*) Defined. Definition FF : prop. (*begin show*) exists (Tr False); trivial. (*end show*) Defined. Definition Imp (P Q:prop) : prop. (*begin show*) exists (holds P->holds Q). (*end show*) apply imp_isL; apply isprop. Defined. Definition Not p := Imp p FF. Definition And (P Q:prop) : prop. (*begin show*) exists (holds P /\ holds Q). (*end show*) apply and_isL; apply isprop. Defined. Definition Or (P Q:prop) : prop. (*begin show*) exists (Tr(holds P \/ holds Q)). (*end show*) trivial. Defined. Definition Forall {A} (P:A->prop) : prop. (*begin show*) exists (forall x, holds (P x)). (*end show*) apply fa_isL; intros x; apply isprop. Defined. Definition Exist {A} (P:A->prop) : prop. (*begin show*) exists (Tr(exists x, holds (P x))). (*end show*) trivial. Defined. Definition Ex2 {A} (P Q:A->prop) : prop. (*begin show*) exists (Tr(exists2 x, holds (P x) & holds (Q x))). (*end show*) trivial. Defined. (** Inference rules *) Lemma rTT : holds TT. exact I. Qed. Lemma rFF P : holds FF -> holds P. simpl. apply Tr_ind; [apply isprop|contradiction]. Qed. Lemma rAnd P Q : holds (And P Q) <-> holds P /\ holds Q. reflexivity. Qed. Lemma rImp P Q : holds (Imp P Q) <-> (holds P -> holds Q). reflexivity. Qed. Lemma rForall A P : holds (Forall P) <-> forall x:A, holds (P x). reflexivity. Qed. Lemma rNot P : holds (Not P) <-> (holds P -> holds FF). reflexivity. Qed. Lemma rOrI P Q : holds P \/ holds Q -> holds (Or P Q). simpl. apply TrI. Qed. Lemma rOrE P Q C : (holds P \/ holds Q -> holds C) -> holds (Or P Q) -> holds C. intro; apply Tr_ind; [apply isprop|trivial]. Qed. Lemma rExI A P : (exists (x:A), holds (P x)) -> holds (Exist P). destruct 1; simpl; apply TrI; eauto. Qed. Lemma rExE A P C : (forall x:A, holds (P x) -> holds C) -> holds (Exist P) -> holds C. intro; apply Tr_ind; [apply isprop|]. destruct 1; eauto. Qed. Lemma rEx2I A (P Q:A->prop) : (exists2 x, holds (P x) & holds (Q x)) -> holds (Ex2 P Q). destruct 1; simpl; apply TrI; eauto. Qed. Lemma rEx2E A P Q C : (forall x:A, holds (P x) -> holds (Q x) -> holds C) -> holds (Ex2 P Q) -> holds C. intro; apply Tr_ind; [apply isprop|]. destruct 1; eauto. Qed. Lemma equiCons : Tr False <-> holds FF. reflexivity. Qed. End SublogicToHOLogic. (* begin hide *) Module TypeClasses. (***************************************************************************) (** * 4. The same ideas but using records and typeclasses *) Class sub_logic0 := mkSubLogic0 { Tr : Prop -> Prop; TrI : forall P:Prop, P -> Tr P; TrB : forall P Q:Prop, Tr P -> (P -> Tr Q) -> Tr Q; Teq1 (P Q:Prop) (m:Tr P) (f:P->Tr Q) (x:P): TrB (TrI x) f = f x; Teq2 (P:Prop) (m:Tr P) : TrB m (@TrI _) = m }. Parameter M0 : sub_logic0. Existing Instance M0. Definition mono (P Q:Prop) (f:P->Q) (m:Tr P) : Tr Q := TrB _ m (fun x => TrI (f x)). Definition proj (P:Prop) (m:Tr(Tr P)) : Tr P := TrB _ m (fun x => x). Class sub_logic := mkSubLogic { P2p : Prop -> Prop; P2p_mono : Proper (impl ==> impl) P2p; P2p_proj : forall P, P2p (P2p P) -> P2p P; P2pI : forall P:Prop, P -> P2p P (* eq1 (P:Prop) (m:P2p P) : P2p_proj (P2pI m) = m; eq2 (P Q:Prop) (f:P->Q) (x:P) : P2p_mono f (P2pI x) = P2pI (f x)*) }. Parameter M : sub_logic. Existing Instance M. Definition ret (P:Prop) (x:P) : P2p P := P2pI x. Definition bind (P Q:Prop) (a : P2p P) (b: P -> P2p Q) : P2p Q := P2p_proj _ (P2p_mono b a). Definition p1 := forall (P Q:Prop) (x:P) (f:P->P2p Q), bind _ (ret x) f = f x. Definition p2 := forall (P:Prop) (x:P), let m := ret x in bind _ m (fun y => ret y) = m. Definition p3 := forall (P Q R:Prop) (x:P) (f:P->P2p Q) (g:Q->P2p R), let m := ret x in bind R (bind Q m f) g = bind _ m (fun x => bind _ (f x) g). (* Lemma L1 : p1. unfold p1, ret, bind; intros. rewrite eq2. rewrite eq1. reflexivity. Qed. Lemma L2 : p2. unfold p2,ret,bind; intros. rewrite eq2. rewrite eq1. reflexivity. Qed. Lemma L3 : p3. unfold p3,ret,bind; intros. repeat rewrite eq2. do 2 rewrite eq1. reflexivity. Qed. *) Section SubLogicFacts. Hypothesis L : sub_logic. Instance P2p_morph : Proper (iff ==> iff) P2p. apply morph_impl_iff1; auto with *. intros P Q e. apply P2p_mono; destruct e; trivial. Qed. Class isL P : Prop := isFormula : P2p P -> P. Instance isL_morph : Proper (iff ==> iff) isL. unfold isL; do 2 red; intros. rewrite H; reflexivity. Qed. Lemma P2p_isL P : isL (P2p P). red; apply P2p_proj. Qed. Lemma P2pE : forall (P Q:Prop), isL Q -> (P -> Q) -> (P2p P -> Q). intros. apply H. revert H1; apply P2p_mono; trivial. Qed. (* Building the logic: *) Lemma T_isL (P:Prop) : P -> isL P. red; trivial. Qed. Lemma and_isL P Q : isL P -> isL Q -> isL (P/\Q). unfold isL; intros. split. apply H; revert H1; apply P2p_mono. red; destruct 1; trivial. apply H0; revert H1; apply P2p_mono. red; destruct 1; trivial. Qed. Lemma forall_isL : forall A (P:A->Prop), (forall x:A, isL (P x)) -> isL (forall x:A, P x). unfold isL; intros. apply H. revert H0; apply P2p_mono; red; auto. Qed. Lemma impl_isL : forall P Q, isL Q -> isL (P -> Q). red; intros. apply H. revert H0; apply P2p_mono; red; auto. Qed. (* Nothing about or, ex, False *) Definition FF := P2p False. Definition consistent := ~ FF. Hypothesis cons : consistent. Lemma False_isL : isL False. Proof cons. Lemma not_isL (P:Prop) : isL (~P). apply impl_isL; trivial. Qed. Definition Or P Q := P2p(P\/Q). Instance Or_morph : Proper (iff==>iff==>iff) Or. do 3 red; intros. apply P2p_morph; apply or_iff_morphism; trivial. Qed. Lemma orI P Q : P \/ Q -> Or P Q. red; intros; apply P2pI; trivial. Qed. Lemma orE (P Q C:Prop) : (P -> C) -> (Q -> C) -> Or P Q -> isL C -> C. intros. apply P2pE with (P \/ Q); trivial. destruct 1; auto. Qed. Definition Ex {A} (P:A->Prop) := P2p(ex P). Lemma exI A (P:A->Prop) x : P x -> Ex P. red; intros; apply P2pI. exists x; trivial. Qed. Lemma exE A (P:A->Prop) (C:Prop) : (forall x, P x -> C) -> Ex P -> isL C -> C. intros. apply P2pE with (3:=H0); trivial. destruct 1; eauto. Qed. Instance Ex_morph : forall A, Proper ((pointwise_relation A iff) ==> iff) (@Ex A). do 3 red; intros. apply P2p_morph; apply ex_morph; trivial. Qed. Definition Ex2 {A} (P Q:A->Prop) := P2p(ex2 P Q). Lemma ex2I A (P Q:A->Prop) x : P x -> Q x -> Ex2 P Q. red; intros; apply P2pI. exists x; trivial. Qed. Lemma ex2E A (P Q:A->Prop) (C:Prop) : (forall x, P x -> Q x -> C) -> Ex2 P Q -> isL C -> C. intros. apply P2pE with (3:=H0); trivial. destruct 1; eauto. Qed. Instance Ex2_morph : forall A, Proper (pointwise_relation A iff ==> pointwise_relation A iff ==> iff) (@Ex2 A). do 3 red; intros. apply P2p_morph; apply ex2_morph; trivial. Qed. (* Packaging the logic *) Record prop := mkP { tr : Prop; isprop : isL tr }. Definition TT : prop. exists True. apply T_isL; trivial. Defined. Definition FF' : prop. exists FF. unfold isL,FF; apply P2p_proj. Defined. Definition Imp (P Q:prop) : prop. exists (tr P->tr Q). apply impl_isL; apply isprop. Defined. Definition And (P Q:prop) : prop. exists (tr P /\ tr Q). apply and_isL; apply isprop. Defined. Definition Or' (P Q:prop) : prop. exists (Or (tr P) (tr Q)). unfold isL,Or; apply P2p_proj. Defined. Definition Forall {A} (P:A->prop) : prop. exists (forall x, tr (P x)). apply forall_isL; intros x; apply isprop. Defined. Definition Ex' {A} (P:A->prop) : prop. exists (Ex (fun x => tr (P x))). unfold isL,Ex; apply P2p_proj. Defined. Definition Not p := Imp p FF'. (** Inference rules *) Notation holds := tr. Lemma rTT : holds TT. exact I. Qed. Lemma rFF P : holds FF' -> holds P. simpl. apply P2pE. apply isprop. intros; contradiction. Qed. Lemma rAnd P Q : holds (And P Q) <-> holds P /\ holds Q. reflexivity. Qed. Lemma rImp P Q : holds (Imp P Q) <-> (holds P -> holds Q). reflexivity. Qed. Lemma rForall A P : holds (Forall P) <-> forall x:A, holds (P x). reflexivity. Qed. Lemma rNot P : holds (Not P) <-> (holds P -> holds FF'). reflexivity. Qed. Lemma rOrI P Q : holds P \/ holds Q -> holds (Or' P Q). simpl. apply orI. Qed. Lemma rOrE P Q C : (holds P \/ holds Q -> holds C) -> holds (Or' P Q) -> holds C. intros; apply orE with (3:=H0); auto. apply isprop. Qed. Lemma rExI A P : (exists (x:A), holds (P x)) -> holds (Ex' P). destruct 1. apply exI with x; trivial. Qed. Lemma rExE A P C : (forall x:A, holds (P x) -> holds C) -> holds (Ex' P) -> holds C. intros. apply exE with (2:=H0); auto. apply isprop; trivial. Qed. End SubLogicFacts. (** Coq logic *) Section Coq. Definition coq := fun (P:Prop) => P. Instance coq_logic : sub_logic := { P2p := coq }. firstorder. firstorder. firstorder. Defined. Lemma coq_isL (P:Prop) : isL coq_logic P. Proof (fun h=>h). Lemma coq_cons : consistent coq_logic. Proof (fun h => h). End Coq. (** Classical logic *) Section Classic. Definition nnt (P:Prop) := ~~P. Instance classic_logic : sub_logic := { P2p := nnt }. exact (fun P Q (f:P->Q) (nnp:~~P) (nq:~Q) => nnp(fun p => nq(f p))). exact (fun P nnnnp np => nnnnp(fun nnp => nnp np)). exact (fun P p => fun np => np p). Defined. Lemma em P : Or classic_logic P (~P). firstorder. Qed. Lemma cl_cons : consistent classic_logic. Proof (fun h => h(fun x => x)). Lemma cl_isL P : (~~P->P) -> isL classic_logic P. Proof (fun h => h). End Classic. (** Friedman's A-translation *) Section Atrans. Definition Atr A P := P \/ A. Instance Atrans A : sub_logic := { P2p := Atr A }. exact (fun P Q (f:P->Q) (p:P\/A) => match p with | or_introl p => or_introl (f p) | or_intror a => or_intror a end). exact (fun P (p:(P\/A)\/A) => match p with | or_introl p => p | or_intror a => or_intror a end). exact (fun P (p:P) => or_introl p). Defined. Lemma atr_atom (A P:Prop) : (A->P) -> isL (Atrans A) P. firstorder. Qed. (* or does not need to be modified *) Lemma atr_or_isL A P Q : isL (Atrans A) P \/ isL (Atrans A) Q -> isL (Atrans A) (P\/Q). firstorder. Qed. Lemma atr_a A : FF(Atrans A) <-> A. split. destruct 1;[contradiction|trivial]. right; trivial. Qed. Lemma atr_nnex T (P:T->Prop) : (forall A, ((Ex(Atrans A) P) -> FF(Atrans A))->FF(Atrans A)) -> exists x:T, P x. intros. set (A:=exists x, P x). apply atr_a. apply H; intro. apply (fun P Q => @P2pE (Atrans A) P (P2p Q)) with (3:=H0). apply P2p_isL. apply atr_a. Qed. End Atrans. (** Peirce's translation *) Section PeirceTrans. Definition Ptr (R A:Prop) := (A->R)->A. Instance Ptrans R : sub_logic := { P2p := Ptr R }. firstorder. firstorder. firstorder. Defined. Lemma Pcons R : consistent (Ptrans R). do 2 red. unfold FF, Ptrans. unfold P2p. unfold Ptr. intros. apply H; intros. contradiction. Qed. End PeirceTrans. End TypeClasses. (* end hide *)
\newacronym{jvm}{JVM}{Java Virtual Machine} \newacronym{rest}{ReST}{Representational State Transfer} \newacronym{dx}{DX}{Developer Experience} \newacronym{vcs}{VCS}{Version Control System} \newacronym{scm}{SCM}{Source Code Management} \newacronym{fs}{FS}{File System} \newacronym{ci}{CI}{Continuous Integration} \newacronym{saas}{SaaS}{Software as a Service} \newacronym{rdd}{RDD}{Resilient Distributed Dataset} \newacronym{soc}{SoC}{Separation of Concerns} \newacronym{dom}{DOM}{Document Object Model} \newacronym{spa}{SPA}{Single Page Application} \newacronym{seo}{SEO}{Search Engine Optimization} \newacronym{hmr}{HMR}{Hot Module Replacement} \newacronym{svg}{SVG}{Scalable Vector Graphics} \newacronym{tf-idf}{TF-IDF}{Term Frequency -- Inverse Document Frequency} \newacronym{utc}{UTC}{Coordinated Universal Time} \newacronym{gmt}{GMT}{Greenwich Mean Time} \section{Requirements Analysis} This is a vital part and first step of our methodology leading to a proposed solution. \subsection{UX Personas} In order to derive a meaningful list of design\index{design} requirements we make use of an instrument from designing\index{design} products called \gls{ux} personas. It has been pioneered for usage with software development by Cooper~\cite{Cooper2004}. The basic idea is to come up with some stereotypical ``personalities'' described by certain characteristics which represent our target user groups. An important aspect is the potential creation of empathy with our future users. Each of these personas is, typically, illustrated with a profile picture and at least a firstname. The notion is to create some degree of familiarity and identifiability for the \gls{ux} designer\index{design} and other parties involved in the design\index{design} process. Furthermore, usually, a persona is equipped with some demographical coordinates, some sort of tagline which serves as an executive summary, enhanced with background info, and motivations. All of this information is normally pointed and rather skimped. It should support in easily creating a vivid idea and image of the different users of the product in design\index{design}. Finally, scenarios or user stories briefly describe ways in which these particular user types would use the imaginary product. In the end, it is important the resulting personas can also be physically tangible -- for instance, printed out on cards and pinned onto a whiteboard. Personas are a valuable tool for subsequent design\index{design} of \gls{ui} and interactions. Table~\ref{tab:ux-personas} provides a high-level overview of the personas we came up with for our prototypical\index{prototype} software, focusing on skill set distribution. As one can see, persona skills range from overall highly to overall lowly skilled as well as individuals with focus on certain different skills. This makes for an interesting foundation as, all in all, quite a disperse set of potential users has to be catered for. The following pages contain our personas themselves. The profile pictures were created with an online avatar tool\footnote{\textcolor{blue}{\href{http://avachara.com/avatar/}{avachara.com/avatar/}}}. \begin{table} \centering \begin{tabular}{ccccccc} \toprule \emph{Skills} & Hugo & Alice & Bob & John & Jane & Walter \\ \midrule Technical & \textbf{low} & \textbf{high} & low & low & high & low \\ Scientific & medium & \textbf{high} & \textbf{low} & medium & high & high \\ Data-Related & medium & high & \textbf{medium} & medium & \textbf{high} & medium \\ Temporal Interest & medium & high & \textbf{low} & high & \textbf{high} & high \\ \bottomrule \end{tabular} \caption{UX personas skill summary and comparison. Edge entries in \textbf{bold}.} \label{tab:ux-personas} \end{table} \subsection{Requirements List} \label{sec:requirements-list} Through the creation of our personas we were able to properly visualize and dissect corresponding requirements for our prototype\index{prototype}. Consequently, we have derived these: \begin{itemize} \item \textbf{R1:} The prototype\index{prototype} must be capable of loading and working with diverse datasets \item \textbf{R2:} Moreover, it must be intuitive for casual users (i.e., less technically expertized) \item \textbf{R3:} Yet, some shortcuts for rather power users should be supported as well \item \textbf{R4:} Focus of our approach\index{approach} has to be on visual-interactive\index{visual-interactive} charting aid \item \textbf{R5:} These charts must be centered on applying time-oriented\index{time-oriented} data transformations\index{transformation} \item \textbf{R6:} Plus, they should provide extraordinary visual overview of the dataset at hand \item \textbf{R7:} Thus, focus has to be put on choosing most effective and efficient visualizations \item \textbf{R8:} Furthermore, interactively exploring data must be conveniently possible \item \textbf{R9:} A more traditional tabular editor should be available with direct manipulation \item \textbf{R10:} Editing time-oriented\index{time-oriented} data should be supported by specific \gls{ui} controls \item \textbf{R11:} Data quality issues need to be easily identifiable and effectually addressable \item \textbf{R12:} Conveniently spotting data anomalies respectively outliers should be possible \item \textbf{R13:} Concrete time-oriented\index{time-oriented} data transformation\index{transformation} operations to be supported: \begin{itemize} \item Data cleaning regarding missing and erroneous values \item Normalization concerning points in time and intervals \item Merging columns in an intuitive visual-interactive\index{visual-interactive} way \item Formatting cleanup, e.g., inconsistencies or conversion \end{itemize} \end{itemize} \subsection{Hugo} \includegraphics[scale=0.5]{figures/requirements/persona-avatar-hugo} \subsubsection{Demographics} \begin{itemize} \item Age: 35 \item Location: Vienna, Austria \item Job: Business Analyst \item Expertise: Marketing \& Statistics\index{statistics} \end{itemize} \subsubsection{Tagline} \textit{``I need to quickly filter out erroneous data from market survey results.''} \subsubsection{Background} \begin{itemize} \item Studied business administration focusing on marketing and specializing in statistics\index{statistics} \item Some years of working experience in the industry \item Responsible for pointing out business opportunities through analyses\index{analysis} \end{itemize} \subsubsection{Motivations} \begin{itemize} \item Wants to see the ``big picture'' \item Doesn't want to ``lose'' any time \end{itemize} \subsubsection{Scenarios (User Stories)} \begin{itemize} \item Got huge amounts of messy real-world data from various market surveys \item Wants to ``scan'' this data quickly for using it in market/business analyses\index{analysis} \item Often data is time-oriented\index{time-oriented}, as it denotes market-related developments over time \end{itemize} \subsection{Alice} \includegraphics[scale=0.5]{figures/requirements/persona-avatar-alice} \subsubsection{Demographics} \begin{itemize} \item Age: 31 \item Location: Vienna, Austria \item Job: Academic Researcher \item Expertise: Mathematics \& Statistics\index{statistics} \end{itemize} \subsubsection{Tagline} \textit{``I'm interested in spending less time wrangling\index{wrangle} datasets suitable for analysis\index{analysis}.''} \subsubsection{Background} \begin{itemize} \item Studied mathematics with a focus on statistics\index{statistics} resulting in a research position in the field (post-doctoral) \item Special focus of the research group is time-oriented\index{time-oriented} data, being involved in various international projects \end{itemize} \subsubsection{Motivations} \begin{itemize} \item Wants to analyze huge datasets, often containing flawed data \item She would rather spend time on analysis\index{analysis} than preparation \end{itemize} \subsubsection{Scenarios (User Stories)} \begin{itemize} \item Got various sample time-oriented\index{time-oriented} datasets and wants to analyze the data \item Furthermore, wrangling\index{wrangle} should take less effort to apply Occam's razor \end{itemize} \subsection{Bob} \includegraphics[scale=0.5]{figures/requirements/persona-avatar-bob} \subsubsection{Demographics} \begin{itemize} \item Age: 34 \item Location: Graz, Austria \item Job: Journalist \item Expertise: Journalism \& Politics \end{itemize} \subsubsection{Tagline} \textit{``I would like to be able to handle messy data for analysis\index{analysis} to be used in my articles.''} \subsubsection{Background} \begin{itemize} \item Graduate of communication studies with a specialization in politics \item Worked for several online news agencies \end{itemize} \subsubsection{Motivations} \begin{itemize} \item Is held back from doing real ``data journalism'' due to lack of technical skills \item Would get into this kind of journalism if tools were better suited to his needs \end{itemize} \subsubsection{Scenarios (User Stories)} \begin{itemize} \item Got an idea for a current news story based on some quite untidy political/economic data which is often of time-oriented\index{time-oriented} nature \item Is able to conveniently verify justification of story based on respective analysis\index{analysis} of wrangled\index{wrangle} data \end{itemize} \subsection{John} \includegraphics[scale=0.5]{figures/requirements/persona-avatar-john} \subsubsection{Demographics} \begin{itemize} \item Age: 40 \item Location: Salzburg, Austria \item Job: Political Analyst \item Expertise: Politics \& Statistics\index{statistics} \end{itemize} \subsubsection{Tagline} \textit{``I want to conveniently and visually prepare vast amounts of public poll data for analysis\index{analysis}.''} \subsubsection{Background} \begin{itemize} \item Studied political sciences with a focus on statistics\index{statistics} (Ph.D.) \item Works for news agencies, especially analyzing electoral situations \end{itemize} \subsubsection{Motivations} \begin{itemize} \item Strong need to be able to get lots of data from various polls into unified schema with as little hassle as possible \item Is not particularly technically skilled or interested, just wants to get the data to be able analyzing it \end{itemize} \subsubsection{Scenarios (User Stories)} \begin{itemize} \item Electoral poll data, consequently, mainly temporal natured, from various sources needs to get prepared respectively unified for analyzing \item Uses the visual-interactive\index{visual-interactive} tool being able to get the job done in a convenient way \end{itemize} \subsection{Jane} \includegraphics[scale=0.5]{figures/requirements/persona-avatar-jane} \subsubsection{Demographics} \begin{itemize} \item Age: 37 \item Location: Munich, Germany \item Job: Industrial Researcher \item Expertise: Biology \& Statistics\index{statistics} \end{itemize} \subsubsection{Tagline} \textit{``I need a quick(er) and more reliable way to experiment with biological test data.''} \subsubsection{Background} \begin{itemize} \item Graduated in bio engineering \item Works at a pharmaceutical company testing new ways of synthesizing cosmetics \end{itemize} \subsubsection{Motivations} \begin{itemize} \item Currently, the whole roundtrip of setting up test labs and analyzing results is cumbersome and takes much time \item Wants to be able to iterate in a quicker mode of operation by improving on wrangling\index{wrangle} test data applicable for actual analysis\index{analysis} \end{itemize} \subsubsection{Scenarios (User Stories)} \begin{itemize} \item Is able to reduce testing roundtrips by using the visual-interactive\index{visual-interactive} tool for making time series test data useful for analysis\index{analysis} \item Based on experience and results from previous iterations she is able to decrease overall throughput time even more \end{itemize} \subsection{Walter} \includegraphics[scale=0.5]{figures/requirements/persona-avatar-walter} \subsubsection{Demographics} \begin{itemize} \item Age: 46 \item Location: Vienna, Austria \item Job: Medical Doctor \item Expertise: Diabetes \end{itemize} \subsubsection{Tagline} \textit{``I want to be able to conveniently visualize temporal therapy data provided by patients.''} \subsubsection{Background} \begin{itemize} \item Studied medicine, graduating cum laude \item Works at special center focusing on diabetics treatment \end{itemize} \subsubsection{Motivations} \begin{itemize} \item Often, therapy data provided by patients is rather messy, that is, concerning missing respectively erroneous values, formatting, ... \item Wants to be able to visualize data to get to see the ``real'' picture \end{itemize} \subsubsection{Scenarios (User Stories)} \begin{itemize} \item Using the tool he is able to reduce time spent on getting time-series-based therapy data provided by his patients ready for analysis\index{analysis} and can focus on actually analyzing \item Might even encourage (at least some of) his patients to use the tool themselves to further reduce overhead \end{itemize} \section{Design of UI and Interactions} For designing\index{design} the \gls{ui} and interactions we have created mockups a.k.a. wireframes~\cite{Garrett2011}. \subsection{UI Mockups} The design\index{design} of our prototype\index{prototype} should meet our list of requirements. To this end, we created a number of mockups to be able to easily refine our designs\index{design}. We have created our \gls{ui} mockups using \emph{Balsamiq\footnote{\textcolor{blue}{\href{https://balsamiq.com/}{balsamiq.com}}}} as productive tool. An important aspect of wireframing is that it should be convenient creating the mockups. One needs to be able to quickly iterate on ideas and throw away things which did not lead into the right direction. Often, simply pencil and paper are being used which is already a good way to get to some first scribbles. A quite common mistake is to skip proper wireframing and jump to design\index{design} screens immediately. Most of the power within the creative design\index{design} process and flow is lost this way as design\index{design} screens take considerably more effort in producing them. Consequently, iterating on these is usually more sluggish and throwing results away rather avoided. The following pages contain our resulting wireframed mockups including some descriptions and further explanations regarding their functionality and respective underlying reasoning. Mockups were created iteratively and evaluated in qualitative feedback loops until satisfying results have been achieved, that went into prototypical\index{prototype} implementation. \subsubsection{Design Process} While iteratively designing\index{design} with the help of mockups we constantly refined our ideas, adapting our approach\index{approach}, and trashing things that did not work out as expected.\\ Some material thereby discovered and/or changed: \begin{itemize} \item Foremost, pie charts are, mostly, not useful in our context of transforming time-oriented\index{time-oriented} data \item On the other hand, bar charts are well suited to communicate quantities (e.g., of different table entries) \item Line charts, as commonly used for time series data, are not being emphasized on in our approach\index{approach}, mainly since we have found calendar heatmap visualizations to be superior for our use case, as our approach is generally not constrained to time series data but should be suitable for any time-oriented data \item Normalization functionality consciously left rather vague, to be fleshed out when actually developing the prototype\index{prototype} \item Modal dialogs containing interactive charts make sense for certain actions \item Visualizing the context of two different columns next to each other for exploratory comparison is beneficial \item A browser-based application is sufficient and a dedicated desktop one not needed in this case \end{itemize} \subsubsection{Upload Dialog} \begin{figure}[h] \centering \includegraphics[width=1.125\textwidth]{figures/design/mockup-0} \caption{UI mockup of the upload dialog.} \label{fig:mockup-0} \end{figure} Naturally, the first \gls{ui} component we have designed\index{design} is the one which feeds the application with data to operate on: the upload dialog (see Figure~\ref{fig:mockup-0}). \begin{itemize} \item \textbf{Description} \begin{itemize} \item The main goal here was simplicity \item Consequently, truly simple dialog \item An area for dropping off file \end{itemize} \item \textbf{Reasoning} \begin{itemize} \item As it is central to the application, uploading has to be really simple \item Thus, with as little effort as possible \item That is, affordance has to be intuitive \end{itemize} \end{itemize} The idea regarding interaction is that as soon as a file is selected, the upload commences automatically, giving visual feedback of its progress to the user via according animation effects, disappearing as soon as it is finished. \subsubsection{Table Editor} \begin{figure}[h] \centering \includegraphics[width=1.2\textwidth]{figures/design/mockup-1} \caption{UI mockup of the table editor.} \label{fig:mockup-1} \end{figure} The table editor page (see Figure~\ref{fig:mockup-1}) has been designed\index{design} to be one of the two main pages of the application, the charts page being the second one, intuitively accessible via tabbed navigation in the upper right of the screen. \begin{itemize} \item \textbf{Description} \begin{itemize} \item Enables direct manipulation editing of data \item Supports multi-row actions via check box selection \item Various search, sorting, and filtering options are available \item Provides menu access to further and more specialized dialogs \end{itemize} \item \textbf{Reasoning} \begin{itemize} \item The table editor is a well-known \gls{ui} metaphor for this use case \item Many users are familiar with editing tabular data from MS Excel \& co. \item It supplies the user with a straightforward and efficient mode of interaction \end{itemize} \end{itemize} \subsubsection{Missing Values Dialog} \begin{figure}[h] \centering \includegraphics[width=1.12\textwidth]{figures/design/mockup-2} \caption{UI mockup of the missing values dialog.} \label{fig:mockup-2} \end{figure} The missing values dialog (see Figure~\ref{fig:mockup-2}) has been designed\index{design} to be opened from the table editor page via according menu access. \begin{itemize} \item \textbf{Description} \begin{itemize} \item Concrete shape of chart not 100\% clear at this point \item Most probably, bar chart -- possibly, a horizontal one \item User is able to fill missing value entries or delete them \item Options provided are filling them with estimates or defaults \end{itemize} \item \textbf{Reasoning} \begin{itemize} \item Bar charts are capable of communicating distributions well \item Another possibility would be pie charts, but they are proven to be misleading \item To quote Tufte~\cite{Tufte2001}, p. 178: \emph{``Given their low density and failure to order numbers along a visual dimension, \textbf{pie charts should never be used}.''}\footnote{Cf. \textcolor{blue}{\href{https://www.edwardtufte.com/bboard/q-and-a-fetch-msg?msg\_id=00018S}{www.edwardtufte.com/bboard/q-and-a-fetch-msg?msg\_id=00018S}}} \end{itemize} \end{itemize} \subsubsection{Normalization Dialog} \begin{figure}[h] \centering \includegraphics[width=1.2\textwidth]{figures/design/mockup-3} \caption{UI mockup of the normalization dialog.} \label{fig:mockup-3} \end{figure} The interval-focused normalization dialog (see Figure~\ref{fig:mockup-3}) is meant to be accessible via menu from the table editor page, too. \begin{itemize} \item \textbf{Description} \begin{itemize} \item Its purpose is supposed to be enabling a user to normalize temporal intervals \item For batch-wise transforming\index{transformation} values of entries within a certain timespan \item Chart visualization is rather unclear at this stage \item Probably, bar chart as well -- but rather vertical one \item Interaction via point and click, including with chart \end{itemize} \item \textbf{Reasoning} \begin{itemize} \item The use case being worth covering became evident while gathering requirements \item Consequently, we will experiment with supporting it via meaningful interactive chart visualization \item Concrete characteristics will be shaped while iterative development itself \item Most probably, our color scheme of the chart visualization will be within neutral, plain gray to black range \item Since explicit coloring should only be used when it can communicate and, hence, convey meaning to the user, being intention-revealing, that is \item So, in the case of Figure~\ref{fig:mockup-2} it may make sense to use a noticeable color \end{itemize} \end{itemize} \subsubsection{Outlier Detection Alerting} \begin{figure}[h] \centering \includegraphics[width=1.1\textwidth]{figures/design/mockup-4} \caption{UI mockup of the outlier detection info alert.} \label{fig:mockup-4} \end{figure} When outliers are detected we need to show that to the user (see Figure~\ref{fig:mockup-4}). As we intend to apply \glslink{ml}{machine learning} for this purpose, we need some way to do so without sacrificing good \gls{ux}. \begin{itemize} \item \textbf{Description} \begin{itemize} \item Therefore, a simple modal dialog overlay is chosen \item It offers the user to display detected potential outliers \item When the user accepts, views are filtered accordingly \end{itemize} \item \textbf{Reasoning} \begin{itemize} \item Corresponding \gls{ml} processing has to happen in the background \item This is due to its potentially longer lasting computation time \item Consequently, informing the user about results has to be as unobtrusive as possible without interruption \item So, it should definitely not interfere with the current workflow, goals, and tasks of the user \item Thus, we intend to make use of an overlay which does not block the \gls{ui} and stays around for later use, more like an interactive notification-style message \end{itemize} \end{itemize} \subsubsection{Table Column Merging} \begin{figure}[h] \centering \includegraphics[width=1.175\textwidth]{figures/design/mockup-5} \caption{UI mockup of merging table columns via drag \& drop.} \label{fig:mockup-5} \end{figure} An interesting idea is to enable merging time-oriented\index{time-oriented} data columns via drag \& drop interaction metaphor (see Figure~\ref{fig:mockup-5}). \begin{itemize} \item \textbf{Description} \begin{itemize} \item So when dragging a temporal column unto another, a related merging operation should be initiated \item The initial idea is to offer optional choice regarding the merge via a menu then \item Options like what to do with missing values and how to merge values in general \item Another idea is enabling column extraction via drag \& drop as well, still somewhat vague, though \end{itemize} \item \textbf{Reasoning} \begin{itemize} \item Many users are familiar with the basic kind of this interaction from spreadsheet applications like Excel \item Consequently, when indicating via according cursor on hover it is likely users will give it a spin \item Corresponding coloring of drop targets while dragging would be helpful to support the user with the interaction \end{itemize} \end{itemize} \subsubsection{Charts Page} \begin{figure}[h] \centering \includegraphics[width=1.1\textwidth]{figures/design/mockup-6} \caption{UI mockup of the charts page including calendar heatmap visualization.} \label{fig:mockup-6} \end{figure} The charts page is the second of the two main pages of the application, next to the initial table editor one. \begin{itemize} \item \textbf{Description} \begin{itemize} \item It is headed by an interactive calendar heatmap visualization \item Below, two distribution bar charts are next to each other \item Controls allow interacting with the charts, plus their items should be interactive \item Table editor filtering is intended to be interconnected with charts page views \end{itemize} \item \textbf{Reasoning} \begin{itemize} \item Calendar heatmap visualizations are particularly useful for displaying time-oriented\index{time-oriented} data distributions \item Densities of data therein are usually visualized via appropriate color scheming, popularly ranging in the green spectrum \item Histogram-like bar charts are useful for viewing data distributions in general \item Having two of the latter next to each other is great for comparisons, interactive exploration, and discovery \end{itemize} \end{itemize} \section{Iterative Prototyping} Following the creation of our \gls{ui} mockups and agreeing that a satisfiable state had been reached, we started with implementing the corresponding prototypical\index{prototype} software. So, we developed in an agile manner, meaning close contact and collaboration with the ``client'', in this case the assisting thesis advisor. Plus, developing respective parts of the application iteratively, chunk by chunk, preferably with short iteration cycles. While developing new features there were also regular short phases in between, where focus was laid on bug fixing, cleanup, and polishing of existing things. Therefore, we have set up a live testing environment, easily accessible for the client, regularly shipping changes, and gathering feedback to be incorporated as promptly as possible. Technical details regarding the setup are described in Appendix~\ref{ch:appendix-a}. As workflows in this project were particularly lean and lightweight, no special issue management software was used. It generally sufficed to make use of simple tools like \emph{Wunderlist}\footnote{\textcolor{blue}{\href{https://www.wunderlist.com/}{www.wunderlist.com}}}, \emph{Simplenote}\footnote{\textcolor{blue}{\href{https://simplenote.com/}{simplenote.com}}}, and email communication for tracking, planning as well as discussing todos, tasks, and issues. Additionally, from time to time when felt necessary and considered potentially fruitful, personal meetings were held. Mainly for hands-on demoing and reviewing purposes, plus, talking about direction-giving decisions. This process was followed until the prototype\index{prototype} eventually reached feature-completeness. \section{TempMunger} This section goes into some details regarding the implemented prototype\index{prototype} itself. Extensive documentation of related software design\index{design} and architecture\index{architecture} can be found in Appendix~\ref{ch:appendix-a}. \subsection{Implementation Details} As \gls{ide}, \emph{IntelliJ IDEA\footnote{\textcolor{blue}{\href{https://www.jetbrains.com/idea/}{www.jetbrains.com/idea/}}}} was used. \\ For conveniently reloading compiled code on the backend without requiring server restarts, \emph{JRebel\footnote{\textcolor{blue}{\href{https://zeroturnaround.com/software/jrebel/}{zeroturnaround.com/software/jrebel/}}}} was employed. On the frontend, a technique called \emph{\gls{hmr}} is fulfilling similar tasks. \emph{Redux DevTools\footnote{\textcolor{blue}{\href{http://extension.remotedev.io/}{extension.remotedev.io}}}} is a useful Google Chrome browser extension when developing Redux/React apps, and \emph{PageSpeed\footnote{\textcolor{blue}{\href{https://developers.google.com/speed/pagespeed/}{developers.google.com/speed/pagespeed/}}}} for adhering to website performance best practices. Cross-browser development as well as responsiveness for mobile devices were not part of the thesis prototype\index{prototype}. Though, at least basic support may be present due to libraries used. So the application is primarily optimized and tested to run in a Google Chrome \textbf{desktop} browser. The source code might be made public as open source software at some point in time, most likely on GitHub. \subsubsection{Elasticsearch Aggregations} Foundational background regarding \gls{ir} and the search engine technology used for our prototype\index{prototype} can be found in Appendix~\ref{ch:appendix-b}. Its software architecture is covered in Appendix~\ref{ch:appendix-a}. Aggregations are a powerful way in which \emph{Elasticsearch} supports real-time analytics. They are used extensively throughout our application. The general idea is to aggregate occurrences of certain values in buckets with corresponding counts. Most of the charts implemented in our solution rely heavily on these. Moreover, Elasticsearch aggregations can be nested which renders lots of analytical variety possible. Thus, we are storing field values non-analyzed for aggregation as well as analyzed for full-text search purposes. \subsubsection{Our Data Model/Storage} The basic data model is a rather schema-less one. So, Elasticsearch is enabled to figure out data types automatically on first indexing of respective data when uploading a dataset to the application. Uploading data issues wiping the index before storing it. Generally, there are two data types made available to our solution, either text or temporal. For recognizing temporal data, various related formats are specified for parsing attempts. Our data model is also quite lenient when it comes to values which fail parsing, simply ignoring the failure and storing the value at hand anyway. This way missing or erroneous values can be treated separately. See Appendix~\ref{ch:appendix-c} for a list of supported formats. Temporal values are uniformly stored in our Elasticsearch index in \emph{\gls{utc}} timezone respectively \emph{\gls{gmt}}. When load as local date/time values on the frontend these are converted making use of timezone offset calculations. \subsubsection{On Spark RDDs} As explained in Appendix~\ref{ch:appendix-a}, \emph{Apache Spark} is operating on \gls{rdd}s. In our prototype\index{prototype}, these are being filled with data by querying Elasticsearch. Extensive caching and pre-loading of data is applied to boost performance. More concretely, for instance, the use case can be to transform\index{transformation} all dataset entries within a certain timespan for a specific temporal field to a specified other temporal value. \subsubsection{Via Elasticsearch Bridge} This is being accomplished via an Elasticsearch/Spark bridge, as described in Section~\ref{sec:es-hadoop}. Thus, a Spark context can be configured to connect to an Elasticsearch cluster. In the end, one can transparently operate on \gls{rdd}s with Spark's functional programming model\footnote{\textcolor{blue}{\href{https://spark.apache.org/docs/latest/programming-guide.html\#transformations}{spark.apache.org/docs/latest/programming-guide.html\#transformations}}}. \subsubsection{With Seamless Interop} The interop is, all in all, quite seamless. Especially also concerning Kotlin code calling the ES-Hadoop connector Java \textsc{API} as well as Spark's underlying Scala one when necessary. Loading data from and writing it back to Elasticsearch is mostly transparent. \begin{figure}[h] \centering \includegraphics[width=1.05\textwidth]{figures/implementation/transformation-sequence} \caption{Sequence diagram showing the general data transformation flow.} \label{fig:transformation-sequence} \end{figure} \subsection{Features of TempMunger} Our prototype\index{prototype} possesses the following main, high-level features regarding time-oriented\index{time-oriented} data, primarily focusing on visual-interactive\index{visual-interactive}, and particularly charting support: \begin{itemize} \item \textbf{Transformations}\index{transformation} \begin{itemize} \item \textbf{Direct manipulation} via \gls{ui} controls \item \textbf{Cleaning} of missing and erroneous values \item \textbf{Normalization} concerning: \begin{itemize} \item Points in time \item Intervals \end{itemize} \item \textbf{Deletion} of rows \item \textbf{Merging} of columns \item \textbf{Formatting} cleanup \end{itemize} \item \textbf{Outlier detection} \item \textbf{Visual overview} \end{itemize} Furthermore, a more traditional \textbf{tabular editor} is available as known from spreadsheet applications like, most prominently, Microsoft Excel. Users are used to the underlying interaction metaphor and, consequently, it makes sense as a foundation to build upon. \subsection{Transformations} A central part of the approach\index{approach} is transformation\index{transformation} of time-oriented\index{time-oriented} data. Generally, this is being achieved by making use of Apache Spark processing of Elasticsearch data. As mentioned above, transformation\index{transformation} operations include \textbf{cleaning}, \textbf{normalization}, and \textbf{merging}. Figure~\ref{fig:transformation-sequence} is presenting the general underlying flow via a sequence diagram. \subsection{Outlier Detection} Our prototype\index{prototype} applies some \gls{ml} techniques for its temporal outlier detection component. \subsubsection{K-Means Clustering} This is a popular algorithm of \emph{unsupervised learning}, i.e., \gls{ml} which does not rely on manual classification input, but rather classifies recognized patterns autonomously. Formula~\ref{eq:k-means} represents its core principle, partitioning real vectorized observations $x$ into $k$ class cluster sets $S$ by calculating mean distances to respective centers ($\mu$ being the mean of points in $S_i$), generally computationally applying statistics\index{statistics} to pattern recognition: \begin{equation} \argmin_{S} \sum_{i=1}^{k} \sum_{x \in S_{i}} \|x - \mu_{i}\|^{2} \label{eq:k-means} \end{equation} The following explains how this can be used for anomaly respectively outlier detection. \subsubsection{Outlier Detection Usage} The algorithm applied for our outlier detection component, basically, works as depicted in Algorithm~\ref{alg:temp-outlier-detection}. A peculiar detail of our approach\index{approach} is that there is no dedicated test set of ``new'' data. This is due to the fact there is only one dataset available at a time with no additional data coming in to extend it. Thus, after training on a randomly split set, the whole dataset is used as test set, in the end, leading to overall satisfactory results. Moreover, we are limiting the number of classes to be clustered to two. Hence, our simple heuristic for determining an outlier class is to take the one of the two with fewer members. When there is only one class, it is assumed no outliers could be detected. \subsubsection{The Temporal Dimension} Our use case revolves around finding outliers in time-oriented\index{time-oriented} data. Figure~\ref{fig:outlier-detection-sequence} shows the basic, related flow with a sequence diagram. In principle, we are using all time-oriented\index{time-oriented} data values available in the dataset at hand for vectorizing the observations to be input to clustering. Therefore, the corresponding epoch millisecond values are used and if a certain value cannot be parsed it is substituted with a max. large number. Consequently, missing and erroneous values are likely to be subsequently tagged as outliers as well. \newpage \begin{algorithm} \KwIn{A set of temporal field names $\varphi$ and a corresponding RDD (dataset) $\delta$} \KwOut{An RDD $\pi$ consisting of pairs of document ID to cluster class value} Vectorize dataset $\delta$ using field values via $\varphi$, see conditional (ll. 2-6)\; \eIf{a field value can be parsed as temporal}{ Use its epoch milli value\; }{ Use max. large number\; } Get training set $\tau$ from dataset $\delta$ via random split of $0.9 : 0.1$\; Create predictive model $\mu$ from vectors $\vec{x}$ of training set $\tau$\; $\to$ $k$-$means$ clustering yielding $2$ classes in $20$ iterations and $3$ parallel runs\; Predict points of dataset $\delta$ via model $\mu$, resulting in RDD $\pi$, see loop (ll. 11-14)\; \For{each point $\in$ dataset $\delta$} { Predict cluster class via model $\mu$\; Add result to RDD $\pi$\ via map op; } \Return{RDD $\pi$;} \caption{Temporal Outlier Detection} \label{alg:temp-outlier-detection} \end{algorithm} To further describe key points of the algorithm:\\ First, the dataset at hand is vectorized in order to enable applying it for clustering. Then, a training set is generated from it via random split. After that, a predictive model is created from the training set. Afterwards, the dataset at hand is predictively clustered via model. Finally, potential outliers are determined via aforementioned, simple heuristic. \begin{figure}[h] \centering \includegraphics[width=1.025\textwidth]{figures/implementation/outlier-detection-sequence} \caption{Sequence diagram showing the general outlier detection on upload flow.} \label{fig:outlier-detection-sequence} \end{figure} \newpage \subsection{Workflows and Screens} In this section, workflows plus related screens of the implemented prototype\index{prototype} are described and explained. Moreover, special emphasis is laid upon the reasoning behind the chosen path of the solution. \subsubsection{Upload and Outlier Detection} Initially, the user will want to upload some dataset to the application. Therefore, a modal upload dialog is pretty conveniently reachable in the upper right corner of the screen. This button is also visually especially noticeable via its peculiar coloring. The upload dialog itself is designed\index{design} as simple as possible (see Figure~\ref{fig:screenshot-upload}). One can either simply drag \& drop a file to it or select one via \gls{fs} dialog. As soon as a file is selected, the upload begins and is indicating its progress via related animations. In general, whenever data is being fetched respectively backend requests are issued, a spinning wheel effect is shown in the upper left corner of the screen. When uploading is finished the dialog disappears and the user is free to interact with the data. \begin{figure}[h] \centering \includegraphics[width=1.0\textwidth]{figures/implementation/screenshot-upload} \caption{Screenshot showing upload with corresponding modal dialog and animated effects regarding progress indication.} \label{fig:screenshot-upload} \end{figure} As presented more from its technical side before, when uploading, an outlier detection mechanism is triggered. On finished processing, the user is shown an according notification. This message is sent as desktop browser respectively system notification\footnote{\textcolor{blue}{\href{https://developer.mozilla.org/en-US/docs/Web/API/Notifications_API}{developer.mozilla.org/en-US/docs/Web/API/Notifications\_API}}} (see Figure~\ref{fig:screenshot-notification}) as well as within the application itself as some sort of flashing notification from the bottom of the screen (see Figure~\ref{fig:screenshot-table-editor}). The former stay around while the latter disappear. When clicking a notification, an action is triggered filtering all dataset entries down to the potentially outlying ones. The user may then proceed to act upon accordingly, having the potential outliers ready in sight and at her/his fingertips. Presence of this filter is indicated via a chip-like control at the top of the screen (see Figure~\ref{fig:screenshot-search+filtering}). It can be removed simply by clicking. \begin{figure}[h] \centering \includegraphics[width=0.66\textwidth]{figures/implementation/screenshot-notification} \caption{Screenshot showing desktop browser respectively system notification for interactive suggestive outlier detection indication.} \label{fig:screenshot-notification} \end{figure} \subsubsection{Table Editor} A central component of the \gls{ui} is the table editor view page (see Figure~\ref{fig:screenshot-table-editor}). There the user can directly manipulate the data at hand via editing corresponding cells. Pagination controls at the bottom of the page allow for convenient paging through the data. Page size can be adjusted as well as a particular page selected via related dropdowns. Multi-row deletion is supported via selection checkboxes at the left side of the table. It is possible to select rows one-by-one or (de)select all at once. The connected deletion button is located at the bottom left of the table. \subsubsection{Date/Time Picker} Date and time picker \gls{ui} controls are used whenever an editable input field contains time-oriented\index{time-oriented} data. The date picker allows the user to choose a date in an interactive way with the metaphor of a more traditional calendar (see Figure~\ref{fig:screenshot-date-picker}). Whereas the time picker uses the metaphor of an analog clock for choosing a time value (see Figure~\ref{fig:screenshot-time-picker}). \begin{figure}[h] \centering \includegraphics[width=0.525\textwidth]{figures/implementation/screenshot-date-picker} \caption{Screenshot showing exemplary modal date picker control with its calendar interaction metaphor.} \label{fig:screenshot-date-picker} \end{figure} \begin{figure}[h] \centering \includegraphics[width=0.525\textwidth]{figures/implementation/screenshot-time-picker} \caption{Screenshot showing exemplary modal time picker control with its clock interaction metaphor.} \label{fig:screenshot-time-picker} \end{figure} Both of these controls are commonly used throughout the application and normally in cooperation. For instance, the table editor employs the controls for editing temporal column row values. \subsubsection{Search and Filtering} There are various ways in which filtering, slicing, and dicing the dataset is supported: \begin{itemize} \item First of all, a search box is placed quite prominently at the top of the screen. This enables the user to filter data down via full-text search. \item Additionally, as mentioned above, multiple rows can be selected. Filter presence is indicated via aforementioned chip-like control. The filter affects displayed data in the charts view page as well. \item Rows can be sorted by column values in ascending or descending order. For this a simple click on the respective column header suffices. \item Furthermore, on the table view page there is a pagination available, see above. \end{itemize} All of these filtering options are working together correspondingly (see Figure~\ref{fig:screenshot-search+filtering}). \subsubsection{Missing Values Cleanup} It is possible to clean up missing and/or erroneous values via a modal dialog overlay (see Figure~\ref{fig:screenshot-missing-values}). Missing respectively erroneous in this context means that the values were not able to be parsed as temporal. The dialog can be accessed from the table editor view page at the bottom left via menu. Its menu option is only available when the currently loaded dataset contains at least one time-oriented\index{time-oriented} data field. On the modal dialog there is a dropdown to select one of the available temporal fields. Below it there is a horizontal multi-bar chart consisting of two bars. One of them represents all rows, and the other, missing values. The former are colored in a neutral gray, while the latter are colored orange. Consequently, a visual emphasis on the missing values is established. The bars can be displayed grouped or stacked to get a better feel of the quantities at hand. Below the chart there is a date and a time picker control for choosing a target value to fill the missing values with. It is originally set to the average value of all values of the respective field (excluding missing ones). At the bottom of the dialog there are action buttons to either apply the fill operation as described or to delete all rows with missing values. \begin{figure}[h] \centering \includegraphics[width=0.7\textwidth]{figures/implementation/screenshot-missing-values} \caption{Screenshot showing missing values cleanup modal dialog overlay with charts.} \label{fig:screenshot-missing-values} \end{figure} \subsubsection{Temporal Normalization} There are, basically, two types of temporal normalization operations supported by the prototype\index{prototype} -- this is not representing normalization in the strictest mathematical sense: \begin{enumerate} \item Transform\index{transformation} all values within a certain timespan or at a certain point in time to a specified date/time \item Transform\index{transformation} all values within a certain interval, effectively ``moving'' them in time \end{enumerate} The former can be accessed either by clicking on a calendar heatmap or a distribution chart bar item on the charts view page (see Figure~\ref{fig:screenshot-charts-dialog}). It is generally showing the selected timespan or point in time and enabling the user to set a target value via date and time picker controls for transforming\index{transformation} all affected values to. Alternatively, the user can choose to delete all rows within the temporal selection. \begin{figure}[h] \centering \includegraphics[width=0.66\textwidth]{figures/implementation/screenshot-charts-dialog} \caption{Screenshot showing charts page modal dialog on bar or heatmap item click.} \label{fig:screenshot-charts-dialog} \end{figure} The latter is accessible via a dedicated menu item at the bottom of the table editor view page, next to the one for missing values cleanup (see Figure~\ref{fig:screenshot-normalization}). There the user can select a temporal field and interval, offering year or month. When month is chosen at max. 10 bars in a chart are displayed each representing a month. When year is chosen the bars represent years. This is an aggregated view visualizing distribution of values with respective amounts sorted in descending order. Again, bars are colored in a neutral gray. When selecting a bar its color changes to black, signalizing the selection. Plus, temporal input field controls show up. In the case of year interval being selected, there is a numeric text input for a target year. In the case of month interval selection, there is additionally a dropdown with the 12 months of a year. In addition to transforming\index{transformation} values accordingly, the user can also choose to delete selected rows instead. \begin{figure}[h] \centering \includegraphics[width=1.0\textwidth]{figures/implementation/screenshot-normalization} \caption{Screenshot showing interval normalization modal dialog overlay with interactive bar charts and controls.} \label{fig:screenshot-normalization} \end{figure} \subsubsection{Table Column Merging} It is possible to merge time-oriented\index{time-oriented} data columns via drag \& drop of table editor headers (see Figure~\ref{fig:screenshot-column-merge}). Only columns containing temporal data are able to be drag \& dropped. Interaction flow, generally, works as follows: \begin{itemize} \item When starting to drag a header, possible drop targets (i.e., other temporal column headers) are highlighted in light yellow background color \item When dragging over a possible drop target, the hovered column header is highlighted in light green to signalize the drop possibility to the user \item When dragging over a non-temporal column header, it is highlighted in light orange color indicating that it is not a possible drop target \item When the user drops the dragged header on a possible target, a corresponding modal dialog overlay opens \end{itemize} This dialog asks the user to confirm merging columns as specified or cancel otherwise. Merging, basically, works in the following way: \begin{itemize} \item If both column values contain a temporal one, an average of these is used for the merged value \item If only one column value contains a temporal one, the missing one is substituted with the existing \item If both column values contain missing ones, an overall average of all values of the two columns is used \end{itemize} An alternative implementation could have given the user options to choose in a more fine-grained way. Yet, we have found that these sensible defaults should make sense in many cases and the user can still apply further refinements of the column data after merge, if desired. Naturally, on finished operation, the merge respectively drag source column is removed. \begin{figure}[h] \centering \includegraphics[width=1.0\textwidth]{figures/implementation/screenshot-column-merge} \caption{Screenshot showing table column merging via drag \& drop interaction.} \label{fig:screenshot-column-merge} \end{figure} \subsubsection{Charts Page} The central \gls{ui} component regarding charts is kind of a dashboard page (see Figure~\ref{fig:screenshot-charts-page}). When there are time-oriented\index{time-oriented} fields present in the respective dataset, it is headed by an interactive calendar heatmap visualization. It gives the user the opportunity to understand the temporal dimension of the data as well as transforming\index{transformation} it. Temporal fields can be selected via dropdown. In any case and below there are two distribution bar charts next to each other. These charts enable the user to get a grasp of the present data, plus transforming\index{transformation} it, too. Again, fields can be selected via dropdown. \subsubsection{Calendar Heatmap} The calendar heatmap visualization, generally, allows four temporal scales \\(see Figure~\ref{fig:screenshot-calendar-heatmap}): \begin{enumerate} \item Year \item Month (default) \item Week \item Day \end{enumerate} \begin{figure}[h] \centering \includegraphics[width=1.0\textwidth]{figures/implementation/screenshot-calendar-heatmap} \caption{Screenshot showing calendar heatmap visualization with interactive controls.} \label{fig:screenshot-calendar-heatmap} \end{figure} Depending on the chosen scale the calendar view adjusts accordingly. So, for instance, in the case of month scale, it is showing days of each month as boxes. Furthermore, a color scale from gray via light to dark green indicates the amount of dataset entries associated with a certain temporal value represented by such a calendar item box. On click of an item box, a modal dialog is shown, giving the user transform\index{transformation} options. \subsubsection{Distribution Charts} The two distribution bar charts can be seen as some sort of histogram visualizations, showing top aggregations. They are mainly pointed at enabling the user to understand general data distribution qualities of the dataset at hand. Plus, making it possible to easily compare these (see Figure~\ref{fig:screenshot-bar-charts}). Again, clicking a bar opens a modal dialog for further interactive transformation\index{transformation} operations, like deletion or time-oriented\index{time-oriented} normalization. \subsubsection{Export} At the end of the day, the user wants to export the wrangled\index{wrangle} data. Therefore, a button is quite prominently placed in the upper right corner of the application. When there is no time-oriented\index{time-oriented} data included, a button click simply initiates a \textsc{CSV} file export download. Otherwise, a modal dialog overlay is presented first. This dialog lets the user choose a format to apply to all time-oriented\index{time-oriented} data of the to be exported dataset (see Figure~\ref{fig:screenshot-export-dialog}). \begin{figure}[h] \centering \includegraphics[width=0.66\textwidth]{figures/implementation/screenshot-export-dialog} \caption{Screenshot showing modal export dialog with temporal format dropdown.} \label{fig:screenshot-export-dialog} \end{figure} Three options are implemented and, consequently, available: \begin{enumerate} \item \textsc{ISO}\footnote{\textcolor{blue}{\href{http://www.iso.org/iso/iso8601}{www.iso.org/iso/iso8601}}} date/time (e.g., \emph{2017-12-31T12:00:00}) \item \textsc{ISO} date (e.g., \emph{2017-12-31}) \item Epoch millis (e.g., \emph{1485037113334}) \end{enumerate} Since all time-oriented\index{time-oriented} data is formatted on export in a unified way as well as uniformly stored in \gls{utc}/\gls{gmt} timezone on upload, there is no need to support formatting during previous editing and transformation\index{transformation} interactions. \begin{sidewaysfigure}[h] \centering \includegraphics[width=0.9\textwidth]{figures/implementation/screenshot-table-editor} \caption{Screenshot showing table editor respectively main page including outlier detection info alert.} \label{fig:screenshot-table-editor} \end{sidewaysfigure} \begin{sidewaysfigure}[h] \centering \includegraphics[width=0.9\textwidth]{figures/implementation/screenshot-search+filtering} \caption{Screenshot showing various navigational, search, and filtering options with table editor view.} \label{fig:screenshot-search+filtering} \end{sidewaysfigure} \begin{figure}[h] \centering \includegraphics[width=0.9\textwidth]{figures/implementation/screenshot-charts-page} \caption{Screenshot of charts page with calendar heatmap, offering visual overview.} \label{fig:screenshot-charts-page} \end{figure} \begin{figure}[h] \centering \includegraphics[width=0.777\textwidth]{figures/implementation/screenshot-bar-charts} \caption{Screenshot showing distribution bar charts with their dropdown controls.} \label{fig:screenshot-bar-charts} \end{figure} \section{Qualitative Evaluation} \label{sec:qualitative-evaluation} Eventually, the results had to be evaluated. For that matter, we have employed two well-known approaches\index{approach} and tools from the domain of usability engineering. Both of which are introduced and extensively explained in \cite{Nielsen1993}: \begin{enumerate} \item \textbf{Heuristic evaluation} \item \textbf{User/usability tests} \end{enumerate} The former basically means that an application and especially its \gls{ui} is being evaluated by a group of usability experts, step by step examining the to be evaluated system. According to Holzinger~\cite{Holzinger2005} three to five usability experts are sufficient for this type of evaluation. Therefore, we have conducted an heuristic evaluation with three experts. Each of the experts should, further, evaluate independently from the others. The evaluation is generally based on heuristics related to usability. The classic ones as described by Nielsen~\cite{Nielsen1993} are: \begin{itemize} \item \emph{Visibility of system status} \item \emph{Match between system and the real world} \item \emph{User control and freedom} \item \emph{Consistency and standards} \item \emph{Error prevention} \item \emph{Recognition rather than recall} \item \emph{Flexibility and efficiency of use} \item \emph{Aesthetic and minimalist design\index{design}} \item \emph{Help users recognize, diagnose, and recover from errors} \item \emph{Help and documentation} \end{itemize} Forsell and Johansson~\cite{Forsell2010} identified heuristic sets which are especially useful when dealing with applications in the realm of \gls{infovis}. Therefore, we are adding the following: \begin{itemize} \item \emph{Information coding} \item \emph{Spatial organization} \item \emph{Remove the extraneous} \end{itemize} For the evaluation itself each expert is asked to perform a given set of tasks. Before that, a session going through the application and \gls{ui} in general is conducted. An observer is present at the evaluation, who is familiar with the application and can be asked related questions. The respective evaluator should note all issues. At the end, found results are gathered and summarized. These are the tasks we have established for our evaluation: \begin{enumerate} \item Upload a dataset using a given \textsc{CSV} file \item Edit time-oriented\index{time-oriented} data via table editor \item Find potential outliers in the given dataset \item Identify missing respectively erroneous values \item Fill these with actual temporal values and/or delete their entries \item Normalize all entries in a certain month, moving them to another one \item Move all entries on a specific day to another point in time \item Delete all entries within a certain timespan or on a certain date \item Merge time-oriented\index{time-oriented} data columns on the table editor view page \item Export data as \textsc{CSV}, choosing a format for time-oriented\index{time-oriented} values \end{enumerate} User or usability tests, on the other hand, are usually performed in some sort of lab environment. That is, users are given tasks to execute for reaching certain goals with the application and are being observed while doing so. Typically, the test participants should be actually potential users. We have performed such tests with two users, thus, in addition to the previous heuristic evaluation which we have conducted with three different usability and visualization experts. The users of the user/usability tests were given the same tasks to perform as the experts from heuristic evaluation before. Meanwhile testing and particularly afterwards they were interviewed regarding their experience, impressions, and opinions. Finally, we have analyzed and abstracted connected findings. The main questions all such user test participants as well as heuristic evaluation ones were asked: \begin{enumerate} \item What is your overall impression? \item What are the strengths of TempMunger? \item What are the shortcomings of TempMunger? \item Do you believe TempMunger can be useful for you? \item If not, what do you think is needed to make it so? \end{enumerate} Combined results of the heuristic evaluation and user tests are as follows, listing found issues and linking them to their respective related, violated heuristics: \begin{enumerate} \item Insufficient immediate and intuitive visual feedback regarding performed actions, e.g., on missing values cleanup ($\rightarrow$ \emph{visibility of system status}) \item Findability of modal dialogs for missing values cleanup and interval-based normalization is suboptimal ($\rightarrow$ \emph{spatial organization}) \item Separation of the two normalization dialogs as well as connected semantics are not intuitive and could be refined ($\rightarrow$ \emph{consistency and standards}) \item Granularity of temporal scale is partially incomplete ($\rightarrow$ \emph{user control and freedom}) \item Temporal scale coloring in calendar heatmap visualization is sometimes misleading ($\rightarrow$ \emph{information coding}) \item No dedicated highlighting of concrete outlier values, for instance, via corresponding coloring ($\rightarrow$ \emph{recognition rather than recall}) \item Outlier detection action could be made repeatable via button ($\rightarrow$ \emph{flexibility and efficiency of use}) \item Merging operation could be made more useful by offering options and information regarding algorithm ($\rightarrow$ \emph{user control and freedom}) \item Distribution charts for exploratory comparison on charts page could be enhanced, e.g., by adding drilling functionality ($\rightarrow$ \emph{information coding}) \item In some places labels could be added to make controls and respective intention clearer ($\rightarrow$ \emph{remove the extraneous}) \item Missing values cleanup dialog visualization could be refined, for instance, by making stacked charts view the default ($\rightarrow$ \emph{information coding}) \item Calendar heatmap visualization could be enabled to make use of drag \& drop interaction ($\rightarrow$ \emph{flexibility and efficiency of use}) \item Time-oriented\index{time-oriented} data could be displayed localized in controls and related input fields ($\rightarrow$ \emph{information coding}) \item Multi-delete action button could be moved to the top of screen or made contextual ($\rightarrow$ \emph{user control and freedom}) \item Large number of columns could lead to displaying glitches on the table editor view ($\rightarrow$ \emph{spatial organization}) \end{enumerate} The heuristic category which was noted most often is \emph{information coding}, followed by \emph{user control and freedom}. After that, \emph{spatial organization} as well as \emph{flexibility and efficiency of use} both got an equal amount of mentions. Other categories scored only once each. Hence, one can deduct a relative order concerning areas for possible improvements accordingly. Generally, the approach\index{approach} and prototype\index{prototype} was perceived positively and as moving into the right direction. In its current state it was rather seen as a nice proof of concept. To make it a real-world applicable tool it would mainly need to be completed regarding coverage of data types, apart from its focus on time-oriented\index{time-oriented} one now. Additionally, the currently provided set of transformation operations should be completed. Furthermore, the present chart visualizations could be refined and including additional ones was encouraged. Most frequently, possibly dotted, line charts were referred to in the context of time series data visualization as a potentially useful addition. A central issue to address is scalability of the visualizations, also in regard to data granularity. Aspects of our prototype most praised were the general visual design, the good visually interactive\index{visual-interactive} overview offered with charting aid, and smoothness regarding \gls{ux}. On the other hand, usability was also one of the most controversially discussed topics as it is by its nature a highly subjective and opinionated one. Moreover, an area for future work identified to be desirable would be to provide more transformation\index{transformation} suggestions interactively, in a proactive way. Also, additional focus could be laid on further increasing interactive data filtering and drilling capabilities. An interesting idea mentioned was that it could also be useful to some users being able to export visualizations in addition to the raw \textsc{CSV} data which is currently downloadable. Finally, a feedback given by one of the test participants, which we especially appreciate, was \emph{``it does what it's supposed to do''}. Regarding our interview questions more concretely and in detail: answers to the first question connected to overall impression can be summarized as TempMunger being seen as a nice tool for the use case of working with time-oriented data visual-interactively\index{visual-interactive}. Concerning strengths, most commonly pleasantness of general design, \gls{ux}, and quality of present visualizations were mentioned. The interactive calendar heatmap as well as histogram-like bar chart visualizations were mostly seen as basically fitting for their purpose of conveniently visualizing data distribution focusing on time-oriented aspects and, therefore, useful. Weaknesses were mainly identified to be related to lack of completeness of supported transformation operations and visualizations. Thus, mainly coverage of varied specificity was found to have to be completed in order to make TempMunger a really versatile tool, outgrowing being merely a research prototype. Further discussion of open issues and future work can be found in the conclusion sections following this one.
#for wrting new files with chain id as--> pdb id_chainid.pdb ptm<-proc.time() library(bio3d) file<-file("C:\\Users\\exam\\Desktop\\Halwa\\pdb_id_list.txt",open="r") x<-readLines(file) close(file) corrupted_id=NULL #takes CA only atoms id bengali_joker<-NULL #takes chain breaked id #files iterating---for---> 1 for(y in x){ pdb_file_name<-substr(y,1,5) #pdbid with chain pdb_id<-paste(substr(y,1,4),".pdb",sep="") #original file name with extension pdb_id_4<-substr(y,1,4) #original file name chain_id<-substr(y,5,5) #only chain id #cat(pdb_id,chain_id) #checking pdb file id with chain id folder_id<-"C:\\pdbfiles\\" full_pdb_path<-paste(folder_id,pdb_id,sep="") #pdb<-read.pdb(full_pdb_path,maxlines=1000000,verbose=FALSE) pdb<-read.pdb(full_pdb_path,maxlines=1000000,verbose=FALSE) inds<-atom.select(pdb,chain=chain_id,verbose=FALSE) pdb<-trim.pdb(pdb,inds) ghan_path=paste("C:\\Users\\exam\\Desktop\\pdb_chain_veer\\",pdb_id_4,"_",chain_id,".pdb",sep="") write.pdb(pdb,xyz=pdb$xyz,file=ghan_path) } #writing for ends
import os, commands, numpy from numpy.distutils.core import setup, Extension name = 'ext_gridloop' sources = ['ext_gridloop.i', 'gridloop.cpp', 'convert.cpp'] setup(name=name, include_dirs=[os.curdir, numpy.get_include()], ext_modules=[Extension('_' + name, # SWIG requires _ sources=sources)])
```python from sympy import * ``` ```python x,y,z = symbols('x y z') init_printing(use_unicode=True) ``` ### Basic *Calculus* ```python print('------') Limit((cos(x)-1),x,0) Limit((cos(x)-1),x,0).doit() print('------') Derivative(5*x**2,x) Derivative(5*x**2,x).doit() print('------') Integral(log(x)**2,x) Integral(log(x)**2,x).doit() ``` ```python limit(sin(x)/x,x,0) limit(sin(x)/x,x,oo) ``` ```python # use limit instead of subs (if there's a singularity) (x/x**x).subs(x,oo) limit((x/x**x),x,oo) # 'limit' one side only limit(1/x,x,0,'+') limit(1/x,x,0,'-') ``` ```python diff(cos(x),x) diff(5*x**3) diff(5*x**3,x) diff(5*x**3,x,1) diff(x**2*y**2,y) ``` ```python diff(5*x**3,x,0) diff(5*x**3*y**2,x,0,y,0) diff(5*x**3*y**2,x,x,y,y) diff(5*x**3*y**2,x,2,y,2) ``` ```python integrate(cos(x),x) # use 'oo' to indicates 'infinity' integrate(exp(-x),(x,0,oo)) integrate(exp(-x**2-y**2),(x,-oo,oo),(y,-oo,oo)) # oops! integrate(x**x,x) ``` ```python exp(sin(x)) exp(sin(x)).series(x,0,5) # hell yeah! exp(x-5).series(x) exp(x-5).series(x,x0=5) ``` ### *Solver* ```python # In Sympy, any expression not in an 'Eq' # is automatically assumed to equal 0 by solving funcs. Eq(x,y) # equals to 0? u can omit it! solveset(Eq(x**2-5,0),x) solveset(x**2-5,x) # use Eq or not is FINE solveset(Eq(x**2,x),x) solveset(x**2-x,x) ``` ```python solveset(Eq(x**2,x),x,domain=S.Reals) solveset(sin(x)-1,x,domain=S.Reals) ``` ```python # no solution exists solveset(exp(x),x) # not able to find solution # ( C代表虚数, 反V代表"与" ) solveset(cos(x)-x,x) ``` ```python linsolve([x+y+z-1,x+y+2*z-3],(x,y,z)) linsolve(Matrix(([1,1,1,1],[1,1,2,3])), (x,y,z)) ``` ```python # nonlinear shit a,b,c,d = symbols('a b c d',real=True) nonlinsolve([a**2+a,a-b],[a,b]) nonlinsolve([x*y-1,x-2],x,y) nonlinsolve([x**2+1,y**2+1],[x,y]) nonlinsolve([x**2-2*y**2-2,x*y-2],[x,y]) ``` ```python # differential equations f,g = symbols('f g',cls=Function) f(x) f(g(x)) ``` ```python eq = Eq(f(x).diff(x,2) - 2*f(x).diff(x) + f(x),sin(x)) eq dsolve(eq,f(x)) dsolve(Eq(f(x).diff(x))) dsolve(f(x).diff(x)*(1-sin(f(x))),f(x)) ```
# check fits of 16S Bahram taxonomic and functional group models. rm(list=ls()) source('paths.r') library(runjags) library(betareg) library(coda) library(ddpcr) # for quiet() source('NEFI_functions/crib_fun.r') #Load JAGS model. allfits <- readRDS(bahram_16S_prior_dmulti.ddirch_all.group_JAGSfits) allfits <- readRDS("/fs/data3/caverill/NEFI_data/16S/scc_gen/JAGS_output/bahram_16S_prior_dmulti.ddirch_fg_JAGSfits") # has everything converged? print any high prsf scores for all groups. for (i in 1:length(allfits)) { print(names(allfits)[i]) fit <- allfits[[i]] quiet(s <- summary(fit$jags_model)) print(s[which(s[,11] > 1.1),]) } fit$species_parameter_output # save plots. pdf("/fs/data3/caverill/NEFI_data/16S/pecan_gen/figures/prior_fit_dmulti_ddirch_all_groups.pdf") #check the plots. par(mfrow = c(2,2)) for (p in 1:length(allfits)) { fit <- allfits[[p]] for(i in 1:ncol(fit$predicted)) { if (colnames(fit$predicted)[i]=="other") next plot(fit$observed[,i]/rowSums(fit$observed) ~ fit$predicted[,i], pch = 16, xlab="predicted abundance", ylab="observed abundance") Axis(x="predicted", side=2) abline(0,1,lwd = 2) abline(lm(fit$observed[,i]/rowSums(fit$observed) ~ fit$predicted[,i]), lty = 2, col = 'purple') mod <- betareg::betareg(crib_fun(fit$observed[,i]/rowSums(fit$observed)) ~ crib_fun(fit$predicted[,i])) rsq <-round(summary(mod)$pseudo.r.squared, 3) mtext(colnames(fit$predicted)[i], side = 3) mtext(paste0('R2=',rsq), side = 3, line = -1.5, adj = 0.05) } } dev.off()
[GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝⁶ : CommSemiring R inst✝⁵ : StarRing R inst✝⁴ : TopologicalSpace A inst✝³ : Semiring A inst✝² : Algebra R A inst✝¹ : StarRing A inst✝ : StarModule R A S₁ S₂ : StarSubalgebra R A h : S₁ ≤ S₂ hS₁ : IsClosed ↑S₁ src✝ : Embedding ↑(inclusion h) := embedding_inclusion h ⊢ Subtype.val ⁻¹' ↑S₁ = range ↑(inclusion h) [PROOFSTEP] convert (Set.range_subtype_map id _).symm [GOAL] case h.e'_2.h.e'_4 R : Type u_1 A : Type u_2 B : Type u_3 inst✝⁶ : CommSemiring R inst✝⁵ : StarRing R inst✝⁴ : TopologicalSpace A inst✝³ : Semiring A inst✝² : Algebra R A inst✝¹ : StarRing A inst✝ : StarModule R A S₁ S₂ : StarSubalgebra R A h : S₁ ≤ S₂ hS₁ : IsClosed ↑S₁ src✝ : Embedding ↑(inclusion h) := embedding_inclusion h ⊢ ↑S₁ = id '' {x | x ∈ S₁} [PROOFSTEP] rw [Set.image_id] [GOAL] case h.e'_2.h.e'_4 R : Type u_1 A : Type u_2 B : Type u_3 inst✝⁶ : CommSemiring R inst✝⁵ : StarRing R inst✝⁴ : TopologicalSpace A inst✝³ : Semiring A inst✝² : Algebra R A inst✝¹ : StarRing A inst✝ : StarModule R A S₁ S₂ : StarSubalgebra R A h : S₁ ≤ S₂ hS₁ : IsClosed ↑S₁ src✝ : Embedding ↑(inclusion h) := embedding_inclusion h ⊢ ↑S₁ = {x | x ∈ S₁} [PROOFSTEP] rfl [GOAL] case convert_4 R : Type u_1 A : Type u_2 B : Type u_3 inst✝⁶ : CommSemiring R inst✝⁵ : StarRing R inst✝⁴ : TopologicalSpace A inst✝³ : Semiring A inst✝² : Algebra R A inst✝¹ : StarRing A inst✝ : StarModule R A S₁ S₂ : StarSubalgebra R A h : S₁ ≤ S₂ hS₁ : IsClosed ↑S₁ src✝ : Embedding ↑(inclusion h) := embedding_inclusion h ⊢ ∀ (x : A), x ∈ S₁ → id x ∈ S₂ [PROOFSTEP] intro _ h' [GOAL] case convert_4 R : Type u_1 A : Type u_2 B : Type u_3 inst✝⁶ : CommSemiring R inst✝⁵ : StarRing R inst✝⁴ : TopologicalSpace A inst✝³ : Semiring A inst✝² : Algebra R A inst✝¹ : StarRing A inst✝ : StarModule R A S₁ S₂ : StarSubalgebra R A h : S₁ ≤ S₂ hS₁ : IsClosed ↑S₁ src✝ : Embedding ↑(inclusion h) := embedding_inclusion h x✝ : A h' : x✝ ∈ S₁ ⊢ id x✝ ∈ S₂ [PROOFSTEP] apply h h' [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹² : CommSemiring R inst✝¹¹ : StarRing R inst✝¹⁰ : TopologicalSpace A inst✝⁹ : Semiring A inst✝⁸ : Algebra R A inst✝⁷ : StarRing A inst✝⁶ : StarModule R A inst✝⁵ : TopologicalSemiring A inst✝⁴ : ContinuousStar A inst✝³ : TopologicalSpace B inst✝² : Semiring B inst✝¹ : Algebra R B inst✝ : StarRing B s : Subalgebra R A ⊢ Subalgebra.topologicalClosure (star s) = star (Subalgebra.topologicalClosure s) [PROOFSTEP] suffices ∀ t : Subalgebra R A, (star t).topologicalClosure ≤ star t.topologicalClosure from le_antisymm (this s) (by simpa only [star_star] using Subalgebra.star_mono (this (star s))) [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹² : CommSemiring R inst✝¹¹ : StarRing R inst✝¹⁰ : TopologicalSpace A inst✝⁹ : Semiring A inst✝⁸ : Algebra R A inst✝⁷ : StarRing A inst✝⁶ : StarModule R A inst✝⁵ : TopologicalSemiring A inst✝⁴ : ContinuousStar A inst✝³ : TopologicalSpace B inst✝² : Semiring B inst✝¹ : Algebra R B inst✝ : StarRing B s : Subalgebra R A this : ∀ (t : Subalgebra R A), Subalgebra.topologicalClosure (star t) ≤ star (Subalgebra.topologicalClosure t) ⊢ star (Subalgebra.topologicalClosure s) ≤ Subalgebra.topologicalClosure (star s) [PROOFSTEP] simpa only [star_star] using Subalgebra.star_mono (this (star s)) [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹² : CommSemiring R inst✝¹¹ : StarRing R inst✝¹⁰ : TopologicalSpace A inst✝⁹ : Semiring A inst✝⁸ : Algebra R A inst✝⁷ : StarRing A inst✝⁶ : StarModule R A inst✝⁵ : TopologicalSemiring A inst✝⁴ : ContinuousStar A inst✝³ : TopologicalSpace B inst✝² : Semiring B inst✝¹ : Algebra R B inst✝ : StarRing B s : Subalgebra R A ⊢ ∀ (t : Subalgebra R A), Subalgebra.topologicalClosure (star t) ≤ star (Subalgebra.topologicalClosure t) [PROOFSTEP] exact fun t => (star t).topologicalClosure_minimal (Subalgebra.star_mono subset_closure) (isClosed_closure.preimage continuous_star) [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹³ : CommSemiring R inst✝¹² : StarRing R inst✝¹¹ : TopologicalSpace A inst✝¹⁰ : Semiring A inst✝⁹ : Algebra R A inst✝⁸ : StarRing A inst✝⁷ : StarModule R A inst✝⁶ : TopologicalSemiring A inst✝⁵ : ContinuousStar A inst✝⁴ : TopologicalSpace B inst✝³ : Semiring B inst✝² : Algebra R B inst✝¹ : StarRing B inst✝ : T2Space B S : StarSubalgebra R A φ ψ : { x // x ∈ topologicalClosure S } →⋆ₐ[R] B hφ : Continuous ↑φ hψ : Continuous ↑ψ h : StarAlgHom.comp φ (inclusion (_ : S ≤ topologicalClosure S)) = StarAlgHom.comp ψ (inclusion (_ : S ≤ topologicalClosure S)) ⊢ φ = ψ [PROOFSTEP] rw [FunLike.ext'_iff] [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹³ : CommSemiring R inst✝¹² : StarRing R inst✝¹¹ : TopologicalSpace A inst✝¹⁰ : Semiring A inst✝⁹ : Algebra R A inst✝⁸ : StarRing A inst✝⁷ : StarModule R A inst✝⁶ : TopologicalSemiring A inst✝⁵ : ContinuousStar A inst✝⁴ : TopologicalSpace B inst✝³ : Semiring B inst✝² : Algebra R B inst✝¹ : StarRing B inst✝ : T2Space B S : StarSubalgebra R A φ ψ : { x // x ∈ topologicalClosure S } →⋆ₐ[R] B hφ : Continuous ↑φ hψ : Continuous ↑ψ h : StarAlgHom.comp φ (inclusion (_ : S ≤ topologicalClosure S)) = StarAlgHom.comp ψ (inclusion (_ : S ≤ topologicalClosure S)) ⊢ ↑φ = ↑ψ [PROOFSTEP] have : Dense (Set.range <| inclusion (le_topologicalClosure S)) := by refine' embedding_subtype_val.toInducing.dense_iff.2 fun x => _ convert show ↑x ∈ closure (S : Set A) from x.prop rw [← Set.range_comp] exact Set.ext fun y => ⟨by rintro ⟨y, rfl⟩ exact y.prop, fun hy => ⟨⟨y, hy⟩, rfl⟩⟩ [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹³ : CommSemiring R inst✝¹² : StarRing R inst✝¹¹ : TopologicalSpace A inst✝¹⁰ : Semiring A inst✝⁹ : Algebra R A inst✝⁸ : StarRing A inst✝⁷ : StarModule R A inst✝⁶ : TopologicalSemiring A inst✝⁵ : ContinuousStar A inst✝⁴ : TopologicalSpace B inst✝³ : Semiring B inst✝² : Algebra R B inst✝¹ : StarRing B inst✝ : T2Space B S : StarSubalgebra R A φ ψ : { x // x ∈ topologicalClosure S } →⋆ₐ[R] B hφ : Continuous ↑φ hψ : Continuous ↑ψ h : StarAlgHom.comp φ (inclusion (_ : S ≤ topologicalClosure S)) = StarAlgHom.comp ψ (inclusion (_ : S ≤ topologicalClosure S)) ⊢ Dense (range ↑(inclusion (_ : S ≤ topologicalClosure S))) [PROOFSTEP] refine' embedding_subtype_val.toInducing.dense_iff.2 fun x => _ [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹³ : CommSemiring R inst✝¹² : StarRing R inst✝¹¹ : TopologicalSpace A inst✝¹⁰ : Semiring A inst✝⁹ : Algebra R A inst✝⁸ : StarRing A inst✝⁷ : StarModule R A inst✝⁶ : TopologicalSemiring A inst✝⁵ : ContinuousStar A inst✝⁴ : TopologicalSpace B inst✝³ : Semiring B inst✝² : Algebra R B inst✝¹ : StarRing B inst✝ : T2Space B S : StarSubalgebra R A φ ψ : { x // x ∈ topologicalClosure S } →⋆ₐ[R] B hφ : Continuous ↑φ hψ : Continuous ↑ψ h : StarAlgHom.comp φ (inclusion (_ : S ≤ topologicalClosure S)) = StarAlgHom.comp ψ (inclusion (_ : S ≤ topologicalClosure S)) x : { x // x ∈ topologicalClosure S } ⊢ ↑x ∈ closure (Subtype.val '' range ↑(inclusion (_ : S ≤ topologicalClosure S))) [PROOFSTEP] convert show ↑x ∈ closure (S : Set A) from x.prop [GOAL] case h.e'_5.h.e'_3 R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹³ : CommSemiring R inst✝¹² : StarRing R inst✝¹¹ : TopologicalSpace A inst✝¹⁰ : Semiring A inst✝⁹ : Algebra R A inst✝⁸ : StarRing A inst✝⁷ : StarModule R A inst✝⁶ : TopologicalSemiring A inst✝⁵ : ContinuousStar A inst✝⁴ : TopologicalSpace B inst✝³ : Semiring B inst✝² : Algebra R B inst✝¹ : StarRing B inst✝ : T2Space B S : StarSubalgebra R A φ ψ : { x // x ∈ topologicalClosure S } →⋆ₐ[R] B hφ : Continuous ↑φ hψ : Continuous ↑ψ h : StarAlgHom.comp φ (inclusion (_ : S ≤ topologicalClosure S)) = StarAlgHom.comp ψ (inclusion (_ : S ≤ topologicalClosure S)) x : { x // x ∈ topologicalClosure S } ⊢ Subtype.val '' range ↑(inclusion (_ : S ≤ topologicalClosure S)) = ↑S [PROOFSTEP] rw [← Set.range_comp] [GOAL] case h.e'_5.h.e'_3 R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹³ : CommSemiring R inst✝¹² : StarRing R inst✝¹¹ : TopologicalSpace A inst✝¹⁰ : Semiring A inst✝⁹ : Algebra R A inst✝⁸ : StarRing A inst✝⁷ : StarModule R A inst✝⁶ : TopologicalSemiring A inst✝⁵ : ContinuousStar A inst✝⁴ : TopologicalSpace B inst✝³ : Semiring B inst✝² : Algebra R B inst✝¹ : StarRing B inst✝ : T2Space B S : StarSubalgebra R A φ ψ : { x // x ∈ topologicalClosure S } →⋆ₐ[R] B hφ : Continuous ↑φ hψ : Continuous ↑ψ h : StarAlgHom.comp φ (inclusion (_ : S ≤ topologicalClosure S)) = StarAlgHom.comp ψ (inclusion (_ : S ≤ topologicalClosure S)) x : { x // x ∈ topologicalClosure S } ⊢ range (Subtype.val ∘ ↑(inclusion (_ : S ≤ topologicalClosure S))) = ↑S [PROOFSTEP] exact Set.ext fun y => ⟨by rintro ⟨y, rfl⟩ exact y.prop, fun hy => ⟨⟨y, hy⟩, rfl⟩⟩ [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹³ : CommSemiring R inst✝¹² : StarRing R inst✝¹¹ : TopologicalSpace A inst✝¹⁰ : Semiring A inst✝⁹ : Algebra R A inst✝⁸ : StarRing A inst✝⁷ : StarModule R A inst✝⁶ : TopologicalSemiring A inst✝⁵ : ContinuousStar A inst✝⁴ : TopologicalSpace B inst✝³ : Semiring B inst✝² : Algebra R B inst✝¹ : StarRing B inst✝ : T2Space B S : StarSubalgebra R A φ ψ : { x // x ∈ topologicalClosure S } →⋆ₐ[R] B hφ : Continuous ↑φ hψ : Continuous ↑ψ h : StarAlgHom.comp φ (inclusion (_ : S ≤ topologicalClosure S)) = StarAlgHom.comp ψ (inclusion (_ : S ≤ topologicalClosure S)) x : { x // x ∈ topologicalClosure S } y : A ⊢ y ∈ range (Subtype.val ∘ ↑(inclusion (_ : S ≤ topologicalClosure S))) → y ∈ ↑S [PROOFSTEP] rintro ⟨y, rfl⟩ [GOAL] case intro R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹³ : CommSemiring R inst✝¹² : StarRing R inst✝¹¹ : TopologicalSpace A inst✝¹⁰ : Semiring A inst✝⁹ : Algebra R A inst✝⁸ : StarRing A inst✝⁷ : StarModule R A inst✝⁶ : TopologicalSemiring A inst✝⁵ : ContinuousStar A inst✝⁴ : TopologicalSpace B inst✝³ : Semiring B inst✝² : Algebra R B inst✝¹ : StarRing B inst✝ : T2Space B S : StarSubalgebra R A φ ψ : { x // x ∈ topologicalClosure S } →⋆ₐ[R] B hφ : Continuous ↑φ hψ : Continuous ↑ψ h : StarAlgHom.comp φ (inclusion (_ : S ≤ topologicalClosure S)) = StarAlgHom.comp ψ (inclusion (_ : S ≤ topologicalClosure S)) x : { x // x ∈ topologicalClosure S } y : { x // x ∈ S } ⊢ (Subtype.val ∘ ↑(inclusion (_ : S ≤ topologicalClosure S))) y ∈ ↑S [PROOFSTEP] exact y.prop [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹³ : CommSemiring R inst✝¹² : StarRing R inst✝¹¹ : TopologicalSpace A inst✝¹⁰ : Semiring A inst✝⁹ : Algebra R A inst✝⁸ : StarRing A inst✝⁷ : StarModule R A inst✝⁶ : TopologicalSemiring A inst✝⁵ : ContinuousStar A inst✝⁴ : TopologicalSpace B inst✝³ : Semiring B inst✝² : Algebra R B inst✝¹ : StarRing B inst✝ : T2Space B S : StarSubalgebra R A φ ψ : { x // x ∈ topologicalClosure S } →⋆ₐ[R] B hφ : Continuous ↑φ hψ : Continuous ↑ψ h : StarAlgHom.comp φ (inclusion (_ : S ≤ topologicalClosure S)) = StarAlgHom.comp ψ (inclusion (_ : S ≤ topologicalClosure S)) this : Dense (range ↑(inclusion (_ : S ≤ topologicalClosure S))) ⊢ ↑φ = ↑ψ [PROOFSTEP] refine' Continuous.ext_on this hφ hψ _ [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹³ : CommSemiring R inst✝¹² : StarRing R inst✝¹¹ : TopologicalSpace A inst✝¹⁰ : Semiring A inst✝⁹ : Algebra R A inst✝⁸ : StarRing A inst✝⁷ : StarModule R A inst✝⁶ : TopologicalSemiring A inst✝⁵ : ContinuousStar A inst✝⁴ : TopologicalSpace B inst✝³ : Semiring B inst✝² : Algebra R B inst✝¹ : StarRing B inst✝ : T2Space B S : StarSubalgebra R A φ ψ : { x // x ∈ topologicalClosure S } →⋆ₐ[R] B hφ : Continuous ↑φ hψ : Continuous ↑ψ h : StarAlgHom.comp φ (inclusion (_ : S ≤ topologicalClosure S)) = StarAlgHom.comp ψ (inclusion (_ : S ≤ topologicalClosure S)) this : Dense (range ↑(inclusion (_ : S ≤ topologicalClosure S))) ⊢ EqOn (↑φ) (↑ψ) (range ↑(inclusion (_ : S ≤ topologicalClosure S))) [PROOFSTEP] rintro _ ⟨x, rfl⟩ [GOAL] case intro R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹³ : CommSemiring R inst✝¹² : StarRing R inst✝¹¹ : TopologicalSpace A inst✝¹⁰ : Semiring A inst✝⁹ : Algebra R A inst✝⁸ : StarRing A inst✝⁷ : StarModule R A inst✝⁶ : TopologicalSemiring A inst✝⁵ : ContinuousStar A inst✝⁴ : TopologicalSpace B inst✝³ : Semiring B inst✝² : Algebra R B inst✝¹ : StarRing B inst✝ : T2Space B S : StarSubalgebra R A φ ψ : { x // x ∈ topologicalClosure S } →⋆ₐ[R] B hφ : Continuous ↑φ hψ : Continuous ↑ψ h : StarAlgHom.comp φ (inclusion (_ : S ≤ topologicalClosure S)) = StarAlgHom.comp ψ (inclusion (_ : S ≤ topologicalClosure S)) this : Dense (range ↑(inclusion (_ : S ≤ topologicalClosure S))) x : { x // x ∈ S } ⊢ ↑φ (↑(inclusion (_ : S ≤ topologicalClosure S)) x) = ↑ψ (↑(inclusion (_ : S ≤ topologicalClosure S)) x) [PROOFSTEP] simpa only using FunLike.congr_fun h x [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹⁴ : CommSemiring R inst✝¹³ : StarRing R inst✝¹² : TopologicalSpace A inst✝¹¹ : Semiring A inst✝¹⁰ : Algebra R A inst✝⁹ : StarRing A inst✝⁸ : StarModule R A inst✝⁷ : TopologicalSemiring A inst✝⁶ : ContinuousStar A inst✝⁵ : TopologicalSpace B inst✝⁴ : Semiring B inst✝³ : Algebra R B inst✝² : StarRing B inst✝¹ : T2Space B F : Type u_4 S : StarSubalgebra R A inst✝ : StarAlgHomClass F R { x // x ∈ topologicalClosure S } B φ ψ : F hφ : Continuous ↑φ hψ : Continuous ↑ψ h : ∀ (x : { x // x ∈ S }), ↑φ (↑(inclusion (_ : S ≤ topologicalClosure S)) x) = ↑ψ (↑(inclusion (_ : S ≤ topologicalClosure S)) x) ⊢ φ = ψ [PROOFSTEP] have : (φ : S.topologicalClosure →⋆ₐ[R] B) = (ψ : S.topologicalClosure →⋆ₐ[R] B) := by refine StarAlgHom.ext_topologicalClosure (R := R) (A := A) (B := B) hφ hψ (StarAlgHom.ext ?_) simpa only [StarAlgHom.coe_comp, StarAlgHom.coe_coe] using h [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹⁴ : CommSemiring R inst✝¹³ : StarRing R inst✝¹² : TopologicalSpace A inst✝¹¹ : Semiring A inst✝¹⁰ : Algebra R A inst✝⁹ : StarRing A inst✝⁸ : StarModule R A inst✝⁷ : TopologicalSemiring A inst✝⁶ : ContinuousStar A inst✝⁵ : TopologicalSpace B inst✝⁴ : Semiring B inst✝³ : Algebra R B inst✝² : StarRing B inst✝¹ : T2Space B F : Type u_4 S : StarSubalgebra R A inst✝ : StarAlgHomClass F R { x // x ∈ topologicalClosure S } B φ ψ : F hφ : Continuous ↑φ hψ : Continuous ↑ψ h : ∀ (x : { x // x ∈ S }), ↑φ (↑(inclusion (_ : S ≤ topologicalClosure S)) x) = ↑ψ (↑(inclusion (_ : S ≤ topologicalClosure S)) x) ⊢ ↑φ = ↑ψ [PROOFSTEP] refine StarAlgHom.ext_topologicalClosure (R := R) (A := A) (B := B) hφ hψ (StarAlgHom.ext ?_) [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹⁴ : CommSemiring R inst✝¹³ : StarRing R inst✝¹² : TopologicalSpace A inst✝¹¹ : Semiring A inst✝¹⁰ : Algebra R A inst✝⁹ : StarRing A inst✝⁸ : StarModule R A inst✝⁷ : TopologicalSemiring A inst✝⁶ : ContinuousStar A inst✝⁵ : TopologicalSpace B inst✝⁴ : Semiring B inst✝³ : Algebra R B inst✝² : StarRing B inst✝¹ : T2Space B F : Type u_4 S : StarSubalgebra R A inst✝ : StarAlgHomClass F R { x // x ∈ topologicalClosure S } B φ ψ : F hφ : Continuous ↑φ hψ : Continuous ↑ψ h : ∀ (x : { x // x ∈ S }), ↑φ (↑(inclusion (_ : S ≤ topologicalClosure S)) x) = ↑ψ (↑(inclusion (_ : S ≤ topologicalClosure S)) x) ⊢ ∀ (x : { x // x ∈ S }), ↑(StarAlgHom.comp (↑φ) (inclusion (_ : S ≤ topologicalClosure S))) x = ↑(StarAlgHom.comp (↑ψ) (inclusion (_ : S ≤ topologicalClosure S))) x [PROOFSTEP] simpa only [StarAlgHom.coe_comp, StarAlgHom.coe_coe] using h [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹⁴ : CommSemiring R inst✝¹³ : StarRing R inst✝¹² : TopologicalSpace A inst✝¹¹ : Semiring A inst✝¹⁰ : Algebra R A inst✝⁹ : StarRing A inst✝⁸ : StarModule R A inst✝⁷ : TopologicalSemiring A inst✝⁶ : ContinuousStar A inst✝⁵ : TopologicalSpace B inst✝⁴ : Semiring B inst✝³ : Algebra R B inst✝² : StarRing B inst✝¹ : T2Space B F : Type u_4 S : StarSubalgebra R A inst✝ : StarAlgHomClass F R { x // x ∈ topologicalClosure S } B φ ψ : F hφ : Continuous ↑φ hψ : Continuous ↑ψ h : ∀ (x : { x // x ∈ S }), ↑φ (↑(inclusion (_ : S ≤ topologicalClosure S)) x) = ↑ψ (↑(inclusion (_ : S ≤ topologicalClosure S)) x) this : ↑φ = ↑ψ ⊢ φ = ψ [PROOFSTEP] rw [FunLike.ext'_iff, ← StarAlgHom.coe_coe] [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹⁴ : CommSemiring R inst✝¹³ : StarRing R inst✝¹² : TopologicalSpace A inst✝¹¹ : Semiring A inst✝¹⁰ : Algebra R A inst✝⁹ : StarRing A inst✝⁸ : StarModule R A inst✝⁷ : TopologicalSemiring A inst✝⁶ : ContinuousStar A inst✝⁵ : TopologicalSpace B inst✝⁴ : Semiring B inst✝³ : Algebra R B inst✝² : StarRing B inst✝¹ : T2Space B F : Type u_4 S : StarSubalgebra R A inst✝ : StarAlgHomClass F R { x // x ∈ topologicalClosure S } B φ ψ : F hφ : Continuous ↑φ hψ : Continuous ↑ψ h : ∀ (x : { x // x ∈ S }), ↑φ (↑(inclusion (_ : S ≤ topologicalClosure S)) x) = ↑ψ (↑(inclusion (_ : S ≤ topologicalClosure S)) x) this : ↑φ = ↑ψ ⊢ ↑↑φ = ↑ψ [PROOFSTEP] apply congrArg _ this [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹² : CommSemiring R inst✝¹¹ : StarRing R inst✝¹⁰ : TopologicalSpace A inst✝⁹ : Semiring A inst✝⁸ : StarRing A inst✝⁷ : TopologicalSemiring A inst✝⁶ : ContinuousStar A inst✝⁵ : Algebra R A inst✝⁴ : StarModule R A inst✝³ : TopologicalSpace B inst✝² : Semiring B inst✝¹ : StarRing B inst✝ : Algebra R B x : A ⊢ IsClosed (range Subtype.val) [PROOFSTEP] convert elementalStarAlgebra.isClosed R x [GOAL] case h.e'_3 R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹² : CommSemiring R inst✝¹¹ : StarRing R inst✝¹⁰ : TopologicalSpace A inst✝⁹ : Semiring A inst✝⁸ : StarRing A inst✝⁷ : TopologicalSemiring A inst✝⁶ : ContinuousStar A inst✝⁵ : Algebra R A inst✝⁴ : StarModule R A inst✝³ : TopologicalSpace B inst✝² : Semiring B inst✝¹ : StarRing B inst✝ : Algebra R B x : A ⊢ range Subtype.val = ↑(elementalStarAlgebra R x) [PROOFSTEP] exact Set.ext fun y => ⟨by rintro ⟨y, rfl⟩ exact y.prop, fun hy => ⟨⟨y, hy⟩, rfl⟩⟩ [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹² : CommSemiring R inst✝¹¹ : StarRing R inst✝¹⁰ : TopologicalSpace A inst✝⁹ : Semiring A inst✝⁸ : StarRing A inst✝⁷ : TopologicalSemiring A inst✝⁶ : ContinuousStar A inst✝⁵ : Algebra R A inst✝⁴ : StarModule R A inst✝³ : TopologicalSpace B inst✝² : Semiring B inst✝¹ : StarRing B inst✝ : Algebra R B x y : A ⊢ y ∈ range Subtype.val → y ∈ ↑(elementalStarAlgebra R x) [PROOFSTEP] rintro ⟨y, rfl⟩ [GOAL] case intro R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹² : CommSemiring R inst✝¹¹ : StarRing R inst✝¹⁰ : TopologicalSpace A inst✝⁹ : Semiring A inst✝⁸ : StarRing A inst✝⁷ : TopologicalSemiring A inst✝⁶ : ContinuousStar A inst✝⁵ : Algebra R A inst✝⁴ : StarModule R A inst✝³ : TopologicalSpace B inst✝² : Semiring B inst✝¹ : StarRing B inst✝ : Algebra R B x : A y : { x_1 // x_1 ∈ ↑(elementalStarAlgebra R x) } ⊢ ↑y ∈ ↑(elementalStarAlgebra R x) [PROOFSTEP] exact y.prop [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹⁴ : CommSemiring R inst✝¹³ : StarRing R inst✝¹² : TopologicalSpace A inst✝¹¹ : Semiring A inst✝¹⁰ : StarRing A inst✝⁹ : TopologicalSemiring A inst✝⁸ : ContinuousStar A inst✝⁷ : Algebra R A inst✝⁶ : StarModule R A inst✝⁵ : TopologicalSpace B inst✝⁴ : Semiring B inst✝³ : StarRing B inst✝² : Algebra R B inst✝¹ : T2Space B F : Type u_4 a : A inst✝ : StarAlgHomClass F R { x // x ∈ elementalStarAlgebra R a } B φ ψ : F hφ : Continuous ↑φ hψ : Continuous ↑ψ h : ↑φ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } = ↑ψ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } ⊢ φ = ψ [PROOFSTEP] refine StarAlgHomClass.ext_topologicalClosure hφ hψ fun x => ?_ [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹⁴ : CommSemiring R inst✝¹³ : StarRing R inst✝¹² : TopologicalSpace A inst✝¹¹ : Semiring A inst✝¹⁰ : StarRing A inst✝⁹ : TopologicalSemiring A inst✝⁸ : ContinuousStar A inst✝⁷ : Algebra R A inst✝⁶ : StarModule R A inst✝⁵ : TopologicalSpace B inst✝⁴ : Semiring B inst✝³ : StarRing B inst✝² : Algebra R B inst✝¹ : T2Space B F : Type u_4 a : A inst✝ : StarAlgHomClass F R { x // x ∈ elementalStarAlgebra R a } B φ ψ : F hφ : Continuous ↑φ hψ : Continuous ↑ψ h : ↑φ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } = ↑ψ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } x : { x // x ∈ adjoin R {a} } ⊢ ↑φ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) x) = ↑ψ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) x) [PROOFSTEP] apply adjoin_induction' x ?_ ?_ ?_ ?_ ?_ [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹⁴ : CommSemiring R inst✝¹³ : StarRing R inst✝¹² : TopologicalSpace A inst✝¹¹ : Semiring A inst✝¹⁰ : StarRing A inst✝⁹ : TopologicalSemiring A inst✝⁸ : ContinuousStar A inst✝⁷ : Algebra R A inst✝⁶ : StarModule R A inst✝⁵ : TopologicalSpace B inst✝⁴ : Semiring B inst✝³ : StarRing B inst✝² : Algebra R B inst✝¹ : T2Space B F : Type u_4 a : A inst✝ : StarAlgHomClass F R { x // x ∈ elementalStarAlgebra R a } B φ ψ : F hφ : Continuous ↑φ hψ : Continuous ↑ψ h : ↑φ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } = ↑ψ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } x : { x // x ∈ adjoin R {a} } ⊢ ∀ (x : A) (h : x ∈ {a}), ↑φ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) { val := x, property := (_ : x ∈ ↑(adjoin R {a})) }) = ↑ψ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) { val := x, property := (_ : x ∈ ↑(adjoin R {a})) }) R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹⁴ : CommSemiring R inst✝¹³ : StarRing R inst✝¹² : TopologicalSpace A inst✝¹¹ : Semiring A inst✝¹⁰ : StarRing A inst✝⁹ : TopologicalSemiring A inst✝⁸ : ContinuousStar A inst✝⁷ : Algebra R A inst✝⁶ : StarModule R A inst✝⁵ : TopologicalSpace B inst✝⁴ : Semiring B inst✝³ : StarRing B inst✝² : Algebra R B inst✝¹ : T2Space B F : Type u_4 a : A inst✝ : StarAlgHomClass F R { x // x ∈ elementalStarAlgebra R a } B φ ψ : F hφ : Continuous ↑φ hψ : Continuous ↑ψ h : ↑φ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } = ↑ψ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } x : { x // x ∈ adjoin R {a} } ⊢ ∀ (r : R), ↑φ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) (↑(algebraMap R { x // x ∈ adjoin R {a} }) r)) = ↑ψ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) (↑(algebraMap R { x // x ∈ adjoin R {a} }) r)) R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹⁴ : CommSemiring R inst✝¹³ : StarRing R inst✝¹² : TopologicalSpace A inst✝¹¹ : Semiring A inst✝¹⁰ : StarRing A inst✝⁹ : TopologicalSemiring A inst✝⁸ : ContinuousStar A inst✝⁷ : Algebra R A inst✝⁶ : StarModule R A inst✝⁵ : TopologicalSpace B inst✝⁴ : Semiring B inst✝³ : StarRing B inst✝² : Algebra R B inst✝¹ : T2Space B F : Type u_4 a : A inst✝ : StarAlgHomClass F R { x // x ∈ elementalStarAlgebra R a } B φ ψ : F hφ : Continuous ↑φ hψ : Continuous ↑ψ h : ↑φ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } = ↑ψ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } x : { x // x ∈ adjoin R {a} } ⊢ ∀ (x y : { x // x ∈ adjoin R {a} }), ↑φ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) x) = ↑ψ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) x) → ↑φ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) y) = ↑ψ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) y) → ↑φ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) (x + y)) = ↑ψ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) (x + y)) R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹⁴ : CommSemiring R inst✝¹³ : StarRing R inst✝¹² : TopologicalSpace A inst✝¹¹ : Semiring A inst✝¹⁰ : StarRing A inst✝⁹ : TopologicalSemiring A inst✝⁸ : ContinuousStar A inst✝⁷ : Algebra R A inst✝⁶ : StarModule R A inst✝⁵ : TopologicalSpace B inst✝⁴ : Semiring B inst✝³ : StarRing B inst✝² : Algebra R B inst✝¹ : T2Space B F : Type u_4 a : A inst✝ : StarAlgHomClass F R { x // x ∈ elementalStarAlgebra R a } B φ ψ : F hφ : Continuous ↑φ hψ : Continuous ↑ψ h : ↑φ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } = ↑ψ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } x : { x // x ∈ adjoin R {a} } ⊢ ∀ (x y : { x // x ∈ adjoin R {a} }), ↑φ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) x) = ↑ψ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) x) → ↑φ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) y) = ↑ψ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) y) → ↑φ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) (x * y)) = ↑ψ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) (x * y)) R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹⁴ : CommSemiring R inst✝¹³ : StarRing R inst✝¹² : TopologicalSpace A inst✝¹¹ : Semiring A inst✝¹⁰ : StarRing A inst✝⁹ : TopologicalSemiring A inst✝⁸ : ContinuousStar A inst✝⁷ : Algebra R A inst✝⁶ : StarModule R A inst✝⁵ : TopologicalSpace B inst✝⁴ : Semiring B inst✝³ : StarRing B inst✝² : Algebra R B inst✝¹ : T2Space B F : Type u_4 a : A inst✝ : StarAlgHomClass F R { x // x ∈ elementalStarAlgebra R a } B φ ψ : F hφ : Continuous ↑φ hψ : Continuous ↑ψ h : ↑φ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } = ↑ψ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } x : { x // x ∈ adjoin R {a} } ⊢ ∀ (x : { x // x ∈ adjoin R {a} }), ↑φ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) x) = ↑ψ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) x) → ↑φ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) (star x)) = ↑ψ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) (star x)) [PROOFSTEP] exacts [fun y hy => by simpa only [Set.mem_singleton_iff.mp hy] using h, fun r => by simp only [AlgHomClass.commutes], fun x y hx hy => by simp only [map_add, hx, hy], fun x y hx hy => by simp only [map_mul, hx, hy], fun x hx => by simp only [map_star, hx]] [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹⁴ : CommSemiring R inst✝¹³ : StarRing R inst✝¹² : TopologicalSpace A inst✝¹¹ : Semiring A inst✝¹⁰ : StarRing A inst✝⁹ : TopologicalSemiring A inst✝⁸ : ContinuousStar A inst✝⁷ : Algebra R A inst✝⁶ : StarModule R A inst✝⁵ : TopologicalSpace B inst✝⁴ : Semiring B inst✝³ : StarRing B inst✝² : Algebra R B inst✝¹ : T2Space B F : Type u_4 a : A inst✝ : StarAlgHomClass F R { x // x ∈ elementalStarAlgebra R a } B φ ψ : F hφ : Continuous ↑φ hψ : Continuous ↑ψ h : ↑φ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } = ↑ψ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } x : { x // x ∈ adjoin R {a} } y : A hy : y ∈ {a} ⊢ ↑φ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) { val := y, property := (_ : y ∈ ↑(adjoin R {a})) }) = ↑ψ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) { val := y, property := (_ : y ∈ ↑(adjoin R {a})) }) [PROOFSTEP] simpa only [Set.mem_singleton_iff.mp hy] using h [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹⁴ : CommSemiring R inst✝¹³ : StarRing R inst✝¹² : TopologicalSpace A inst✝¹¹ : Semiring A inst✝¹⁰ : StarRing A inst✝⁹ : TopologicalSemiring A inst✝⁸ : ContinuousStar A inst✝⁷ : Algebra R A inst✝⁶ : StarModule R A inst✝⁵ : TopologicalSpace B inst✝⁴ : Semiring B inst✝³ : StarRing B inst✝² : Algebra R B inst✝¹ : T2Space B F : Type u_4 a : A inst✝ : StarAlgHomClass F R { x // x ∈ elementalStarAlgebra R a } B φ ψ : F hφ : Continuous ↑φ hψ : Continuous ↑ψ h : ↑φ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } = ↑ψ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } x : { x // x ∈ adjoin R {a} } r : R ⊢ ↑φ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) (↑(algebraMap R { x // x ∈ adjoin R {a} }) r)) = ↑ψ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) (↑(algebraMap R { x // x ∈ adjoin R {a} }) r)) [PROOFSTEP] simp only [AlgHomClass.commutes] [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹⁴ : CommSemiring R inst✝¹³ : StarRing R inst✝¹² : TopologicalSpace A inst✝¹¹ : Semiring A inst✝¹⁰ : StarRing A inst✝⁹ : TopologicalSemiring A inst✝⁸ : ContinuousStar A inst✝⁷ : Algebra R A inst✝⁶ : StarModule R A inst✝⁵ : TopologicalSpace B inst✝⁴ : Semiring B inst✝³ : StarRing B inst✝² : Algebra R B inst✝¹ : T2Space B F : Type u_4 a : A inst✝ : StarAlgHomClass F R { x // x ∈ elementalStarAlgebra R a } B φ ψ : F hφ : Continuous ↑φ hψ : Continuous ↑ψ h : ↑φ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } = ↑ψ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } x✝ x y : { x // x ∈ adjoin R {a} } hx : ↑φ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) x) = ↑ψ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) x) hy : ↑φ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) y) = ↑ψ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) y) ⊢ ↑φ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) (x + y)) = ↑ψ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) (x + y)) [PROOFSTEP] simp only [map_add, hx, hy] [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹⁴ : CommSemiring R inst✝¹³ : StarRing R inst✝¹² : TopologicalSpace A inst✝¹¹ : Semiring A inst✝¹⁰ : StarRing A inst✝⁹ : TopologicalSemiring A inst✝⁸ : ContinuousStar A inst✝⁷ : Algebra R A inst✝⁶ : StarModule R A inst✝⁵ : TopologicalSpace B inst✝⁴ : Semiring B inst✝³ : StarRing B inst✝² : Algebra R B inst✝¹ : T2Space B F : Type u_4 a : A inst✝ : StarAlgHomClass F R { x // x ∈ elementalStarAlgebra R a } B φ ψ : F hφ : Continuous ↑φ hψ : Continuous ↑ψ h : ↑φ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } = ↑ψ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } x✝ x y : { x // x ∈ adjoin R {a} } hx : ↑φ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) x) = ↑ψ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) x) hy : ↑φ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) y) = ↑ψ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) y) ⊢ ↑φ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) (x * y)) = ↑ψ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) (x * y)) [PROOFSTEP] simp only [map_mul, hx, hy] [GOAL] R : Type u_1 A : Type u_2 B : Type u_3 inst✝¹⁴ : CommSemiring R inst✝¹³ : StarRing R inst✝¹² : TopologicalSpace A inst✝¹¹ : Semiring A inst✝¹⁰ : StarRing A inst✝⁹ : TopologicalSemiring A inst✝⁸ : ContinuousStar A inst✝⁷ : Algebra R A inst✝⁶ : StarModule R A inst✝⁵ : TopologicalSpace B inst✝⁴ : Semiring B inst✝³ : StarRing B inst✝² : Algebra R B inst✝¹ : T2Space B F : Type u_4 a : A inst✝ : StarAlgHomClass F R { x // x ∈ elementalStarAlgebra R a } B φ ψ : F hφ : Continuous ↑φ hψ : Continuous ↑ψ h : ↑φ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } = ↑ψ { val := a, property := (_ : a ∈ elementalStarAlgebra R a) } x✝ x : { x // x ∈ adjoin R {a} } hx : ↑φ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) x) = ↑ψ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) x) ⊢ ↑φ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) (star x)) = ↑ψ (↑(StarSubalgebra.inclusion (_ : adjoin R {a} ≤ topologicalClosure (adjoin R {a}))) (star x)) [PROOFSTEP] simp only [map_star, hx]
# The vector product of two vectors with 3 elements. BindGlobal("VectorProduct", function(u, v) return [u[2]*v[3]-u[3]*v[2], u[3]*v[1]-u[1]*v[3], u[1]*v[2]-u[2]*v[1]]; end); # Multiplication in Hall algebras BindGlobal("HallMultiplication", function(p) local r; r := CoefficientsOfUnivariatePolynomial(p)[2]; return function(x, y) if IsZero(y[2]) then return x*y[1]; else return [x[1]*y[1] - x[2]/y[2]*Value(p, y[1]), x[1]*y[2] - x[2]*(y[1] + r)]; fi; end; end); # Multiplication in Dickson near-fields BindGlobal("DicksonMultiplication", q -> function(x, y) if IsZero(y) then return 0*Z(q); else return x^(q^LogFFE(y, Z(q^2))) * y; fi; end); # Right division in Dickson near-fields BindGlobal("DicksonRightDivision", q -> function(x, y) if IsZero(x) then return 0*Z(q); else return (x / y)^(q^LogFFE(y, Z(q^2))); fi; end); # Multiplication in exceptional near-fields BindGlobal("ExceptionalMultiplication", function(q, F, B) local mat; mat := ToExceptionalMatrix(q, F, B); return function(x, y) local M; M := mat(x) * mat(y); return M[1]*B; end; end); # Right division in exceptional near-fields BindGlobal("ExceptionalRightDivision", function(q, F, B) local mat; mat := ToExceptionalMatrix(q, F, B); return function(x, y) local M; M := mat(x) * mat(y)^-1; return M[1]*B; end; end); # Normalize a vector over a semifield given the semifield right division. BindGlobal("NormalizeSemifieldVector", div -> function(v) local n; n := First(v, x -> not IsZero(x)); return List(v, x -> div(x, n)); end);