text
stringlengths 0
3.34M
|
---|
module Control.Monad.Algebra
import Control.Algebra
import Control.Monad.Identity
%default total
public export
SemigroupV ty => SemigroupV (Identity ty) where
semigroupOpIsAssociative (Id l) (Id c) (Id r) =
rewrite semigroupOpIsAssociative l c r in Refl
public export
MonoidV ty => MonoidV (Identity ty) where
monoidNeutralIsNeutralL (Id l) =
rewrite monoidNeutralIsNeutralL l in Refl
monoidNeutralIsNeutralR (Id r) =
rewrite monoidNeutralIsNeutralR r in Refl
|
PERUVIAN SACRED & ADVENTURE S.R.LTDA is a company from Cusco that has extensive experience in tourism at the national level. For more than 12 years it has been executing in an objective manner the provision of tourist services in the different modalities of tourism, especially in the form of adventure through walking and trekking or trekking within the Cusco region and the Red de Caminos Inka of the Historic Sanctuary of Machu Picchu from the year 2005 to the present.
PERUVIAN SACRED & ADVENTURE S.R.LTDA has the corresponding authorizations of Law, in compliance with Supreme Decree N ° 004-2016-MINCETUR that regulates the provision of the Travel agency service as stated in the Certificate N ° 346-2017-GR-CUSCO / DIRCETUR-DT issued by the Department of Foreign Trade and Tourism Cusco. Operating License N ° 005956 of our premises issued by the Municipality of Cusco and the authorization N ° 0145 of operation in the Inka Road Network issued by the Headquarters of the Historic Sanctuary of Machu Picchu of SERNANP in January 2016 effective until this year and temporarily expanded according to the new tourist use regulation of the SHM. We have the Constancy of the Code of Conduct ESNNA issued by MINCETUR, We are also active members of the Association of Tourism Agencies Cusco-AATC.
Our travel agency and Tourism has extensive experience in the conduct and operation of activities in the form of adventure tourism in the different tourist attractions of our country being the most requested the Inka Trail – Machupicchu in its different routes as well as Salkantay, Lares , Choquequirao, Huchuyqosqo, Wininkunka, Ausangate, Vilcabamba, Pumamarca, Ancascocha etc. among others in and outside the Cusco region. |
#include <blitzml/sparse_linear/sparse_linear_solver.h>
#include <blitzml/base/math_util.h>
#include <blitzml/base/vector_util.h>
#include <blitzml/base/timer.h>
#include <blitzml/base/subproblem_controller.h>
#include <blitzml/smooth_loss/squared_loss.h>
#include <blitzml/smooth_loss/logistic_loss.h>
#include <blitzml/smooth_loss/squared_hinge_loss.h>
#include <blitzml/smooth_loss/smoothed_hinge_loss.h>
#include <blitzml/smooth_loss/huber_loss.h>
using std::vector;
namespace BlitzML {
value_t SparseLinearSolver::compute_dual_obj() const {
value_t loss = 0.;
for (index_t j = 0; j < num_examples; ++j) {
loss += loss_function->compute_loss(Aomega[j], data->b_value(j));
}
return -(loss + l1_penalty * l1_norm(omega));
}
value_t SparseLinearSolver::compute_primal_obj_x() const {
value_t obj = 0.;
for (index_t j = 0; j < num_examples; ++j) {
obj += loss_function->compute_conjugate(kappa_x * x[j], data->b_value(j));
}
return obj;
}
value_t SparseLinearSolver::compute_primal_obj_y() const {
value_t obj = 0.;
for (index_t j = 0; j < num_examples; ++j) {
obj += loss_function->compute_conjugate(y[j], data->b_value(j));
}
return obj;
}
void SparseLinearSolver::update_subproblem_obj_vals() {
value_t primal_obj_x = compute_primal_obj_x();
value_t dual_obj = compute_dual_obj();
obj_vals.set_primal_obj_x(primal_obj_x);
obj_vals.set_dual_obj(dual_obj);
}
void SparseLinearSolver::solve_subproblem() {
SubproblemController controller(subproblem_params);
SubproblemState initial_state;
set_initial_subproblem_state(initial_state);
set_initial_subproblem_z();
for (int newton_itr = 0; newton_itr < subproblem_params.max_iterations; ++newton_itr) {
setup_proximal_newton_problem();
value_t cd_threshold = compute_cd_threshold();
scale_Delta_Aomega_by_2nd_derivatives();
int cd_itr, max_num_cd_itr, min_num_cd_itr;
set_max_and_min_num_cd_itr(max_num_cd_itr, min_num_cd_itr);
Timer t1;
for (cd_itr = 1; cd_itr <= max_num_cd_itr; ++cd_itr) {
value_t est_grad_sq = update_coordinates_in_working_set();
if (cd_itr >= min_num_cd_itr) {
if (controller.should_compute_duality_gap()) {
break;
}
if (est_grad_sq < cd_threshold) {
break;
}
}
}
unscale_Delta_Aomega_by_2nd_derivatives();
perform_backtracking();
update_bias_subproblem();
update_kappa_x();
update_subproblem_obj_vals();
update_z();
bool sufficient_dual_progress =
check_sufficient_dual_progress(initial_state, subproblem_params.epsilon);
if (controller.should_terminate(obj_vals, sufficient_dual_progress)
|| (newton_itr == subproblem_params.max_iterations)) {
return;
}
}
}
value_t SparseLinearSolver::update_coordinates_in_working_set() {
value_t ret = 0.;
for (const_index_itr ind = ws.begin_indices(); ind != ws.end_indices(); ++ind) {
value_t subgrad = update_feature(*ind);
ret += sq(subgrad);
}
ws.shuffle_indices();
return ret;
}
void SparseLinearSolver::
set_max_and_min_num_cd_itr(int &max_num_cd_itr, int &min_num_cd_itr) const {
if (iteration_number == 1) {
min_num_cd_itr = 1;
max_num_cd_itr = 1;
} else {
min_num_cd_itr = 3;
max_num_cd_itr = 64;
}
}
value_t SparseLinearSolver::compute_cd_threshold() const {
value_t ret = 0.;
for (const_index_itr i = ws.begin(); i != ws.end(); ++i) {
if (omega[*i] != 0.) {
ret += sq(fabs(ATx[*i]) - l1_penalty);
} else {
ret += sq(soft_threshold(ATx[*i], l1_penalty));
}
}
return 0.25 * ret;
}
value_t SparseLinearSolver::norm_diff_sq_z_initial_x(const SubproblemState& initial_state) const {
value_t norm_diff_sq = 0.;
for (index_t j = 0; j < num_examples; ++j) {
norm_diff_sq += sq(initial_state.x[j] - kappa_z * z[j]);
}
return norm_diff_sq;
}
void SparseLinearSolver::setup_subproblem_no_working_sets() {
Solver::setup_subproblem_no_working_sets();
subproblem_params.max_iterations = 1;
}
void SparseLinearSolver::set_initial_subproblem_z() {
z = y;
for (const_index_itr i = ws.begin_sorted(); i != ws.end_sorted(); ++i) {
ATz[*i] = ATy[*i];
}
kappa_z = 1.0;
obj_vals.set_primal_obj_z(obj_vals.primal_obj_y());
z_match_y = true;
z_match_x = false;
}
void SparseLinearSolver::setup_proximal_newton_problem() {
Delta_omega.assign(ws.size(), 0.);
Delta_Aomega.assign(num_examples, 0.);
value_t hessian_extra = 1e-12;
update_newton_2nd_derivatives(hessian_extra);
Delta_bias = (use_bias) ? -sum_x / sum_newton_2nd_derivatives : 0.;
inv_newton_2nd_derivative_cache.resize(ws.size());
col_ip_newton_2nd_derivative_cache.resize(ws.size());
bool s_d_is_const = is_vector_const(newton_2nd_derivatives, 1e-12);
for (size_t ind = 0; ind != ws.size(); ++ind) {
index_t i = ws.ith_member(ind);
const Column& col = *A_cols[i];
value_t weighted_norm_sq = 0;
if (s_d_is_const) {
value_t const_val = newton_2nd_derivatives[0];
weighted_norm_sq = const_val / (inv_lipschitz_cache[i] * loss_function->lipschitz_constant());
} else {
weighted_norm_sq = col.weighted_norm_sq(newton_2nd_derivatives);
}
if (use_bias) {
if (s_d_is_const) {
value_t const_val = newton_2nd_derivatives[0];
col_ip_newton_2nd_derivative_cache[ind] = col_means_cache[i] * const_val * num_examples;
} else {
col_ip_newton_2nd_derivative_cache[ind] = col.inner_product(newton_2nd_derivatives);
weighted_norm_sq += col_means_cache[i] * (col_means_cache[i] * sum_newton_2nd_derivatives - 2 * col_ip_newton_2nd_derivative_cache[ind]);
}
}
if (weighted_norm_sq > 0.) {
inv_newton_2nd_derivative_cache[ind] = 1. / weighted_norm_sq;
} else {
inv_newton_2nd_derivative_cache[ind] = -1.;
}
}
}
void SparseLinearSolver::update_z() {
bool is_best = obj_vals.primal_obj_x() <= obj_vals.primal_obj_z();
if (is_best) {
obj_vals.set_primal_obj_z(obj_vals.primal_obj_x());
z = x;
kappa_z = kappa_x;
for (const_index_itr i = ws.begin_sorted(); i != ws.end_sorted(); ++i) {
ATz[*i] = ATx[*i];
}
z_match_x = true;
z_match_y = false;
} else {
z_match_x = false;
}
}
void SparseLinearSolver::scale_Delta_Aomega_by_2nd_derivatives() {
for (index_t i = 0; i < num_examples; ++i) {
Delta_Aomega[i] *= newton_2nd_derivatives[i];
}
}
void SparseLinearSolver::unscale_Delta_Aomega_by_2nd_derivatives() {
for (index_t i = 0; i < num_examples; ++i) {
Delta_Aomega[i] /= newton_2nd_derivatives[i];
}
}
value_t SparseLinearSolver::update_feature(index_t working_set_ind) {
// note: Delta_Aomega is scaled by newton 2nd derivatives
value_t inv_L = inv_newton_2nd_derivative_cache[working_set_ind];
if (inv_L < 0) {
return 0.;
}
index_t feature_ind = ws.ith_member(working_set_ind);
const Column& col = *A_cols[feature_ind];
value_t grad = ATx[feature_ind] +
col.inner_product(Delta_Aomega) +
col_ip_newton_2nd_derivative_cache[working_set_ind] * Delta_bias;
value_t current_value = omega[feature_ind] + Delta_omega[working_set_ind];
value_t shrunk_grad = fabs(grad) - l1_penalty;
if (current_value == 0. && shrunk_grad < 0) {
return 0.;
}
value_t pre_shrink = current_value - grad * inv_L;
value_t new_value = soft_threshold(pre_shrink, l1_penalty * inv_L);
value_t delta = new_value - current_value;
col.weighted_add_multiple(Delta_Aomega, newton_2nd_derivatives, delta);
Delta_omega[working_set_ind] += delta;
if (use_bias) {
value_t grad_bias =
col_ip_newton_2nd_derivative_cache[working_set_ind] * delta;
Delta_bias -= grad_bias * (1 / sum_newton_2nd_derivatives);
}
return shrunk_grad;
}
void SparseLinearSolver::update_bias_subproblem() {
update_bias();
}
void SparseLinearSolver::update_bias(int max_newton_itr) {
if (!use_bias) {
return;
}
Delta_Aomega.assign(num_examples, 0.);
for (int newton_itr = 0; newton_itr < max_newton_itr; ++newton_itr) {
value_t change = perform_newton_update_on_bias();
if (fabs(change) == 0.0) {
return;
}
}
}
value_t SparseLinearSolver::perform_newton_update_on_bias() {
update_newton_2nd_derivatives();
Delta_bias = (sum_newton_2nd_derivatives > 0) ?
-sum_x / sum_newton_2nd_derivatives : -100 * sign(sum_x);
Delta_bias = (fabs(Delta_bias) < 100) ? Delta_bias : 100 * sign(Delta_bias);
value_t step_size = 1.;
unsigned backtrack_itr = 0;
while (++backtrack_itr <= 6) {
update_x(step_size);
if (sum_x * Delta_bias <= 0.) {
value_t change = step_size * Delta_bias;
bias += change;
add_scalar_to_vector(Aomega, change);
return change;
}
if (backtrack_itr <= 2) {
step_size *= 0.5;
} else {
step_size *= 0.1;
}
if (step_size * Delta_bias == 0.) {
break;
}
}
update_x(0.);
return 0;
}
void SparseLinearSolver::perform_backtracking() {
value_t step_size = 1.;
unsigned backtrack_itr = 0;
while (++backtrack_itr) {
update_x(step_size);
value_t derivative = compute_backtracking_step_size_derivative(step_size);
if (derivative <= 1e-12) {
break;
} else if (backtrack_itr > MAX_BACKTRACK_STEPS) {
compute_Aomega();
update_x(0.);
return;
}
step_size *= 0.5;
}
for (const_index_itr ind = ws.begin_indices(); ind != ws.end_indices(); ++ind) {
omega[ws.ith_member(*ind)] += step_size * Delta_omega[*ind];
}
bias += step_size * Delta_bias;
for (index_t i = 0; i < num_examples; ++i) {
Aomega[i] += step_size * (Delta_Aomega[i] + Delta_bias);
}
}
value_t SparseLinearSolver::
compute_backtracking_step_size_derivative(value_t step_size) const {
value_t derivative_loss = inner_product(Delta_Aomega, x) + Delta_bias * sum_x;
value_t derivative_l1 = 0.;
for (const_index_itr ind = ws.begin_indices(); ind != ws.end_indices(); ++ind) {
index_t i = ws.ith_member(*ind);
value_t omega_i = omega[i] + step_size * Delta_omega[*ind];
if (omega_i == 0.) {
derivative_l1 -= l1_penalty * fabs(Delta_omega[*ind]);
} else {
derivative_l1 += l1_penalty * Delta_omega[*ind] * sign(omega_i);
}
}
return derivative_loss + derivative_l1;
}
void SparseLinearSolver::update_x(value_t step_size) {
const value_t* labels = data->b_values();
for (index_t i = 0; i < num_examples; ++i) {
value_t diff_aiTomega = step_size * (Delta_Aomega[i] + Delta_bias);
x[i] = loss_function->compute_deriative(Aomega[i] + diff_aiTomega, labels[i]);
}
sum_x = sum_vector(x);
z_match_x = false;
z_match_y = false;
}
void SparseLinearSolver::update_newton_2nd_derivatives(value_t epsilon_to_add) {
for (index_t i = 0; i < num_examples; ++i) {
value_t a_dot_omega = Aomega[i];
value_t label = data->b_value(i);
newton_2nd_derivatives[i] =
loss_function->compute_2nd_derivative(a_dot_omega, label) + epsilon_to_add;
}
sum_newton_2nd_derivatives = sum_vector(newton_2nd_derivatives);
}
void SparseLinearSolver::update_kappa_x() {
value_t max_abs_grad = l1_penalty;
for (const_index_itr i = ws.begin_sorted(); i != ws.end_sorted(); ++i) {
value_t grad = A_cols[*i]->inner_product(x);
ATx[*i] = grad;
if (fabs(grad) > max_abs_grad) {
max_abs_grad = fabs(grad);
}
}
kappa_x = (l1_penalty < max_abs_grad) ? l1_penalty / max_abs_grad : 1.;
}
value_t SparseLinearSolver::compute_alpha() {
update_non_working_set_gradients();
value_t alpha = 1.;
for (index_t j = 0; j < num_components; ++j) {
value_t alpha_j = compute_alpha_for_feature(j);
if (alpha_j < alpha) {
alpha = alpha_j;
}
}
return alpha;
}
void SparseLinearSolver::update_non_working_set_gradients() {
for (index_t i = 0; i < num_components; ++i) {
if (!ws.is_in_working_set(i)) {
ATx[i] = A_cols[i]->inner_product(x);
}
}
if (z_match_y) {
ATz = ATy;
} else if (z_match_x) {
ATz = ATx;
} else {
for (index_t i = 0; i < num_components; ++i) {
if (!ws.is_in_working_set(i)) {
ATz[i] = A_cols[i]->inner_product(z);
}
}
}
}
value_t SparseLinearSolver::compute_alpha_for_feature(index_t i) const {
if (ws.is_in_working_set(i)) {
return 1.;
}
value_t kappa_x_AiTz = kappa_z * ATz[i];
if (fabs(kappa_x_AiTz) <= l1_penalty) {
return 1.;
}
value_t AiTy = ATy[i];
if (kappa_x_AiTz == AiTy) {
return 1.;
}
value_t value = (kappa_x_AiTz < 0) ? -l1_penalty : l1_penalty;
return (value - AiTy) / (kappa_x_AiTz - AiTy);
}
void SparseLinearSolver::update_y() {
value_t alpha_kappa_z = alpha * kappa_z;
for (index_t j = 0; j < num_examples; ++j) {
y[j] = alpha_kappa_z * z[j] + (1 - alpha) * y[j];
}
for (index_t i = 0; i < num_components; ++i) {
ATy[i] = alpha_kappa_z * ATz[i] + (1 - alpha) * ATy[i];
}
}
unsigned short SparseLinearSolver::compute_priority_value(index_t i) const {
if (omega[i] != 0.) {
return 0;
}
value_t ATyi = ATy[i];
value_t ATxi = ATx[i];
value_t diffi = ATxi - ATyi;
value_t inv_norm_sq = inv_lipschitz_cache[i] * loss_function->lipschitz_constant();
unsigned short top = capsule_candidates.size();
unsigned short bottom = 0;
bool in_working_set = false;
do {
unsigned short ind = (top + bottom) / 2;
const CapsuleParams& cap = capsule_candidates[ind];
value_t beta_l = cap.left_beta;
value_t val_l = ATyi + beta_l * diffi;
value_t beta_r = cap.right_beta;
value_t val_r = ATyi + beta_r * diffi;
value_t val = std::max(std::fabs(val_l), std::fabs(val_r));
value_t dist_sq = (val >= l1_penalty) ? 0.
: sq(l1_penalty - val) * inv_norm_sq;
in_working_set = (dist_sq <= sq(cap.radius));
if (in_working_set) {
top = ind;
} else {
bottom = ind;
}
} while (top > bottom + 1);
return top;
}
void SparseLinearSolver::screen_components() {
vector<bool> should_screen;
bool any_screened = mark_components_to_screen(should_screen);
if (any_screened) {
apply_screening_result(should_screen);
}
}
bool SparseLinearSolver::mark_components_to_screen(vector<bool> &should_screen) const {
value_t d_sq = compute_d_sq();
value_t thresh_sq = obj_vals.duality_gap() - d_sq / 4;
if (thresh_sq <= 0.) {
return false;
}
bool impossible_thresh = sq(l1_penalty) * max_inv_lipschitz_cache < thresh_sq;
if (impossible_thresh && iteration_number != 1) {
return false;
}
should_screen.assign(num_components, false);
bool any_screened = false;
for (index_t i = 0; i < num_components; ++i) {
if (inv_lipschitz_cache[i] < 0) {
should_screen[i] = true;
any_screened = true;
}
if (omega[i] != 0.) {
continue;
}
value_t val = (ATx[i] + ATy[i]) / 2;
value_t dist = l1_penalty - fabs(val);
if (dist < 0.) {
continue;
}
bool screen_i = (sq(dist) * inv_lipschitz_cache[i] >= thresh_sq);
if (screen_i) {
should_screen[i] = true;
any_screened = true;
}
}
return any_screened;
}
void SparseLinearSolver::
apply_screening_result(const vector<bool> &should_screen) {
index_t i = 0;
for (index_t ind = 0; ind < num_components; ++ind) {
if (!should_screen[ind]) {
ATy[i] = ATy[ind];
ATx[i] = ATx[ind];
omega[i] = omega[ind];
inv_lipschitz_cache[i] = inv_lipschitz_cache[ind];
col_means_cache[i] = col_means_cache[ind];
A_cols[i] = A_cols[ind];
screen_indices_map[i] = screen_indices_map[ind];
++i;
}
}
num_components = i;
omega.resize(num_components);
ws.reduce_max_size(num_components);
if (!use_working_sets()) {
ws.clear();
for (index_t i = 0; i < num_components; ++i) {
ws.add_index(i);
}
}
}
size_t SparseLinearSolver::size_of_component(index_t i) const {
return A_cols[i]->nnz();
}
index_t SparseLinearSolver::initial_problem_size() const {
return data->num_cols();
}
void SparseLinearSolver::initialize(value_t *initial_conditions) {
deserialize_parameters();
set_data_dimensions();
initialize_proximal_newton_variables();
cache_feature_info();
initialize_blitz_variables(initial_conditions);
}
value_t SparseLinearSolver::strong_convexity_constant() const {
return 1 / loss_function->lipschitz_constant();
}
void SparseLinearSolver::deserialize_parameters() {
l1_penalty = (*params)[0];
if ((*params)[2] != 0.) {
use_bias = true;
} else {
use_bias = false;
}
int loss_type = static_cast<int>((*params)[3]);
delete_loss_function();
switch (loss_type) {
case 0:
loss_function = new SquaredLoss();
break;
case 1:
loss_function = new HuberLoss();
break;
case 2:
loss_function = new LogisticLoss();
break;
case 3:
loss_function = new SquaredHingeLoss();
break;
case 4:
loss_function = new SmoothedHingeLoss();
break;
default:
loss_function = new SquaredLoss();
break;
}
}
void SparseLinearSolver::set_data_dimensions() {
num_examples = data->num_rows();
num_components = data->num_cols();
}
void SparseLinearSolver::initialize_blitz_variables(
value_t* initial_conditions) {
initialize_model(initial_conditions);
compute_Aomega();
x.resize(num_examples);
ATx.resize(num_components);
kappa_x = 1.;
y.assign(num_examples, 0.);
ATy.assign(num_components, 0.);
update_x(0.0);
update_bias(15);
z = x;
ATz.assign(num_components, 0.);
kappa_z = 1.0;
z_match_x = true;
z_match_y = false;
}
void SparseLinearSolver::initialize_model(value_t* initial_conditions) {
if (initial_conditions != NULL) {
omega.assign(initial_conditions, initial_conditions + num_components);
} else {
omega.assign(num_components, 0.);
}
bias = 0.;
}
void SparseLinearSolver::compute_Aomega() {
Aomega.assign(num_examples, bias);
for (index_t i = 0; i < num_components; ++i) {
if (omega[i] != 0.) {
A_cols[i]->add_multiple(Aomega, omega[i]);
}
}
}
void SparseLinearSolver::cache_feature_info() {
A_cols.resize(num_components);
inv_lipschitz_cache.resize(num_components);
col_means_cache.resize(num_components);
value_t lipschitz_constant = loss_function->lipschitz_constant();
for (index_t i = 0; i < num_components; ++i) {
A_cols[i] = data->column(i);
const Column& col = *A_cols[i];
value_t norm_sq = col.l2_norm_sq();
value_t mean = col.mean();
if (use_bias) {
norm_sq -= num_examples * sq(mean);
}
if (norm_sq <= 1e-12) {
norm_sq = -1.;
}
inv_lipschitz_cache[i] = 1 / (norm_sq * lipschitz_constant);
col_means_cache[i] = mean;
}
max_inv_lipschitz_cache = max_vector(inv_lipschitz_cache);
screen_indices_map.resize(num_components);
for (index_t i = 0; i < num_components; ++i) {
screen_indices_map[i] = i;
}
}
void SparseLinearSolver::initialize_proximal_newton_variables() {
newton_2nd_derivatives.resize(num_examples);
Delta_Aomega.assign(num_examples, 0);
Delta_bias = 0.;
}
void SparseLinearSolver::log_variables(Logger &logger) const {
std::vector<index_t> indices;
std::vector<value_t> values;
size_t nnz_weights = l0_norm(omega);
indices.reserve(nnz_weights);
values.reserve(nnz_weights);
for (index_t k = 0; k < num_components; ++k) {
if (omega[k] != 0.) {
indices.push_back(screen_indices_map[k]);
values.push_back(omega[k]);
}
}
if (log_vectors()) {
logger.log_vector<index_t>("weight_indices", indices);
logger.log_vector<value_t>("weight_values", values);
logger.log_vector<value_t>("z", z);
}
logger.log_value<value_t>("bias", bias);
logger.log_value<value_t>("l1_penalty", l1_penalty);
logger.log_value<size_t>("number_nonzero_weights", nnz_weights);
}
void SparseLinearSolver::fill_result(value_t *result) const {
size_t ind = 0;
// fill in weights
for (index_t i = 0; i < data->num_cols(); ++i) {
result[ind++] = 0.;
}
for (size_t ind = 0; ind < omega.size(); ++ind) {
result[screen_indices_map[ind]] = omega[ind];
}
result[ind++] = bias;
// fill in dual solution
for (index_t i = 0; i < num_examples; ++i) {
result[ind++] = y[i];
}
result[ind++] = obj_vals.duality_gap();
result[ind++] = obj_vals.dual_obj();
}
value_t SparseLinearSolver::compute_max_l1_penalty(
const Dataset *data, const Parameters *params) {
this->data = data;
this->params = params;
initialize(NULL);
for (index_t j = 0; j < num_components; ++j) {
ATx[j] = A_cols[j]->inner_product(x);
}
return max_abs(ATx);
}
void SparseLinearSolver::delete_loss_function() {
if (loss_function != NULL) {
delete loss_function;
loss_function = NULL;
}
}
} // namespace BlitzML
|
open import Prelude hiding (subst)
module Implicits.Substitutions.Term where
open import Implicits.Syntax.Type
open import Implicits.Syntax.Term
open import Data.Fin.Substitution
open import Data.Star as Star hiding (map)
open import Data.Star.Properties
open import Data.Vec hiding ([_])
open import Implicits.Substitutions.Type as TypeSubst using ()
module TermTypeSubst where
module TermTypeApp {T} (l : Lift T Type) where
open Lift l hiding (var)
open TypeSubst.TypeApp l renaming (_/_ to _/tp_)
infixl 8 _/_
-- Apply a type substitution to a term
_/_ : ∀ {ν μ n} → Term ν n → Sub T ν μ → Term μ n
var x / σ = var x
Λ t / σ = Λ (t / σ ↑)
λ' a t / σ = λ' (a /tp σ) (t / σ)
t [ a ] / σ = (t / σ) [ a /tp σ ]
s · t / σ = (s / σ) · (t / σ)
ρ a t / σ = ρ (a /tp σ) (t / σ)
r ⟨⟩ / σ = (r / σ) ⟨⟩
r with' e / σ = (r / σ) with' (e / σ)
open TypeSubst using (varLift; termLift; sub)
module Lifted {T} (lift : Lift T Type) {n} where
application : Application (λ ν → Term ν n) T
application = record { _/_ = TermTypeApp._/_ lift {n = n} }
open Application application public
open Lifted termLift public
-- apply a type variable substitution (renaming) to a term
_/Var_ : ∀ {ν μ n} → Term ν n → Sub Fin ν μ → Term μ n
_/Var_ = TermTypeApp._/_ varLift
-- weaken a term with an additional type variable
weaken : ∀ {ν n} → Term ν n → Term (suc ν) n
weaken t = t /Var VarSubst.wk
infix 8 _[/_]
-- shorthand for single-variable type substitutions in terms
_[/_] : ∀ {ν n} → Term (suc ν) n → Type ν → Term ν n
t [/ b ] = t / sub b
module TermTermSubst where
-- Substitutions of terms in terms
TermSub : (ℕ → ℕ → Set) → ℕ → ℕ → ℕ → Set
TermSub T ν m n = Sub (T ν) m n
record TermLift (T : ℕ → ℕ → Set) : Set where
infix 10 _↑tm _↑tp
field
lift : ∀ {ν n} → T ν n → Term ν n
_↑tm : ∀ {ν m n} → TermSub T ν m n → TermSub T ν (suc m) (suc n)
_↑tp : ∀ {ν m n} → TermSub T ν m n → TermSub T (suc ν) m n
module TermTermApp {T} (l : TermLift T) where
open TermLift l
infixl 8 _/_
-- Apply a term substitution to a term
_/_ : ∀ {ν m n} → Term ν m → TermSub T ν m n → Term ν n
var x / σ = lift $ lookup x σ
Λ t / σ = Λ (t / σ ↑tp)
λ' a t / σ = λ' a (t / σ ↑tm)
t [ a ] / σ = (t / σ) [ a ]
s · t / σ = (s / σ) · (t / σ)
ρ a t / σ = ρ a (t / σ ↑tm)
r ⟨⟩ / σ = (r / σ) ⟨⟩
r with' e / σ = (r / σ) with' (e / σ)
Fin′ : ℕ → ℕ → Set
Fin′ _ m = Fin m
varLift : TermLift Fin′
varLift = record { lift = var; _↑tm = VarSubst._↑; _↑tp = Prelude.id }
infixl 8 _/Var_
_/Var_ : ∀ {ν m n} → Term ν m → Sub Fin m n → Term ν n
_/Var_ = TermTermApp._/_ varLift
private
module ExpandSimple {n : ℕ} where
simple : Simple (Term n)
simple = record { var = var; weaken = λ t → t /Var VarSubst.wk }
open Simple simple public
open ExpandSimple using (_↑; simple)
open TermTypeSubst using () renaming (weaken to weakenTp)
termLift : TermLift Term
termLift = record
{ lift = Prelude.id; _↑tm = _↑ ; _↑tp = λ ρ → map weakenTp ρ }
private
module ExpandSubst {ν : ℕ} where
app : Application (Term ν) (Term ν)
app = record { _/_ = TermTermApp._/_ termLift {ν = ν} }
subst : Subst (Term ν)
subst = record
{ simple = simple
; application = app
}
open Subst subst public
open ExpandSubst public hiding (var; simple)
infix 8 _[/_]
-- Shorthand for single-variable term substitutions in terms
_[/_] : ∀ {ν n} → Term ν (suc n) → Term ν n → Term ν n
s [/ t ] = s / sub t
|
/-
Copyright (c) 2023 Kevin Buzzard. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author : Kevin Buzzard
-/
import number_theory.number_field.class_number
import tactic
/-
# Class groups
In Lean we don't talk about the class group of a number field, we talk about
the class group of its integer ring.
-/
variables (R : Type) [comm_ring R] [is_domain R]
example : Type := class_group R
noncomputable example : comm_group (class_group R) := infer_instance
/-
So what is this group?
A *fractional ideal* of `R` is an `R`-submodule `J` of the field of fractions of `R`
such that there exists a nonzero element `a` of `R` such that `a * J ⊆ R`.
-/
open_locale non_zero_divisors -- to get notation R⁰ for the submonoid of nonzero divisors of R
-- (of course in this case it's just R \ {0}).
-- the fractional ideals of R
example : Type := fractional_ideal (R⁰) (fraction_ring R)
-- Note that (0) is a fractional ideal with this definition. So fractional ideals aren't
-- a group under multiplication, only a monoid.
example : comm_monoid (fractional_ideal (R⁰) (fraction_ring R)) := infer_instance
-- However the invertible fractional ideals (for a number field this is the same as the
-- nonzero fractional ideals) are a group:
example : comm_group (fractional_ideal (R⁰) (fraction_ring R))ˣ := infer_instance
-- There's a group homomorphism from the units of `fraction_ring R` to the invertible
-- fractional ideals
noncomputable example :
(fraction_ring R)ˣ →* (fractional_ideal (R⁰) (fraction_ring R))ˣ :=
to_principal_ideal R _
-- And the class group of `R` is defined to be the quotient of the invertible fractional
-- ideals by the image of this map.
example : class_group R =
((fractional_ideal R⁰ (fraction_ring R))ˣ ⧸ (to_principal_ideal R (fraction_ring R)).range) := rfl
-- For a general integral domain, the class group may be infinite. But the class group
-- of the integers of a number field is known by Lean to be finite.
open number_field
noncomputable example (K : Type) [field K] [number_field K] :
fintype (class_group (ring_of_integers K)) :=
infer_instance
-- Proved in 2021 in Lean. See https://arxiv.org/abs/2102.02600 to see how it was done.
-- My PhD student Ashvni Narayanan was one of the people involved in the proof.
|
[STATEMENT]
lemma (in intruder_model) wf\<^sub>t\<^sub>r\<^sub>m_code[code_unfold]:
"wf\<^sub>t\<^sub>r\<^sub>m t = wf\<^sub>t\<^sub>r\<^sub>m' arity t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wf\<^sub>t\<^sub>r\<^sub>m t = wf\<^sub>t\<^sub>r\<^sub>m' arity t
[PROOF STEP]
unfolding wf\<^sub>t\<^sub>r\<^sub>m_def wf\<^sub>t\<^sub>r\<^sub>m'_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<forall>f T. Fun f T \<sqsubseteq> t \<longrightarrow> length T = arity f) = (\<forall>s\<in>subterms t. is_Fun s \<longrightarrow> arity (the_Fun s) = length (args s))
[PROOF STEP]
by auto |
-- -------------------------------------------------------------- [ Lens.idr ]
-- Description : Idris port of Control.Lens
-- Copyright : (c) Huw Campbell
-- --------------------------------------------------------------------- [ EOH ]
module Data.Curried
import Data.Morphisms
%access public export
data Curried : ( g : Type -> Type ) -> ( h : Type -> Type ) -> ( a : Type ) -> Type where
MkCurried : ({r : Type} -> g (a -> r) -> h r) -> Curried g h a
Functor g => Functor (Curried g h) where
map f (MkCurried g) = MkCurried (g . map (.f))
Functor g => Applicative (Curried g g) where
pure a = MkCurried (map (\f => f a))
(MkCurried mf) <*> (MkCurried ma) = MkCurried (ma . mf . map (.))
liftCurried : Applicative f => f a -> Curried f f a
liftCurried fa = MkCurried (<*> fa)
lowerCurried : Applicative f => Curried f g a -> g a
lowerCurried (MkCurried f) = f (pure id)
|
using Turkie
using Test
@testset "Turkie.jl" begin
# Write your tests here.
end
|
#include "algorithm.hpp"
#include "smpp_protocol_impl.hpp"
#include <boost/thread.hpp>
#include <boost/format.hpp>
#include <boost/date_time/gregorian/gregorian.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
#include "smpp_pdu_context.hpp"
#include "md5.h"
#include "smpp_datacoding_toolkit.h"
#include "smppserver_runtime.h"
smpp_impl::smpp_impl()
{
boost::mutex::scoped_lock lock(process_operation_mutex_);
SMPP_HEADER_LENGTH = 16;
SMPP_SERVER_SYSID = std::string("SMPP"); //read from config file later
OTHER_ERROR_TOLERANCE = 3;//read from config file later
BIND_ERROR_TOLERANCE = 3;//read from config file later
current_session_state_ = SESSION_OPEN;
bind_error_count_ = 0;
other_error_count_ = 0;
total_qurey_ = 0;
total_submit_ = 0;
error_occured_ = false;
}
smpp_impl::~smpp_impl()
{
logger_log(__FUNCTION__, LEVEL_DEBUG, "%s(%s:%d) leaves from me", sysid_.c_str(), tcp_client_ip_.c_str(), (int)tcp_client_port_);
clear_resource();
}
bool smpp_impl::sequencenumber_valid_no_lock(memory_buffer_ptr& bp)
{
Smpp::Uint32 sequencenumber = Smpp::get_sequence_number(bp.get()->data());
if(!Smpp::SequenceNumber::valid(sequencenumber))
{
logger_log(__FUNCTION__, LEVEL_WARNING, "%u is not a valid sequence number. ", sequencenumber);
error_occured_ = true;
return false;
}
return true;
}
void smpp_impl::other_error_occur()
{
other_error_count_++;
if(other_error_count_ >= OTHER_ERROR_TOLERANCE)
{
error_occured_ = true;
logger_log(__FUNCTION__, LEVEL_WARNING, "too many time errors, so we set the smpp connecting status to error. ");
}
}
void smpp_impl::bind_error_occur()
{
bind_error_count_++;
if(bind_error_count_ >= BIND_ERROR_TOLERANCE)
{
error_occured_ = true;
logger_log(__FUNCTION__, LEVEL_WARNING, "too many time bind errors, so we set the smpp connecting status to error. ");
}
}
Smpp::SequenceNumber smpp_impl::generate_sequence_number()
{
return Smpp::SequenceNumber(generate_sn());
}
bool smpp_impl::check_pdu_context_valid(Smpp::CommandId& commandid)
{
return smpp_pdu_context::valid(current_session_state_, commandid);
}
void smpp_impl::process(memory_buffer_ptr& bp)
{
boost::mutex::scoped_lock lock(process_operation_mutex_);
if(!sequencenumber_valid_no_lock(bp))
{
return;
}
Smpp::Uint32 commandid = Smpp::get_command_id(bp.get()->data());
Smpp::Uint32 sequencenumber = Smpp::get_sequence_number(bp.get()->data());
Smpp::CommandId cid(commandid);
Smpp::SequenceNumber sn(sequencenumber);
bool context_valid = this->check_pdu_context_valid(cid);
if(!context_valid)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "context invalid: current state: %d, received command id: %u ",
(int)current_session_state_, commandid);
generate_and_send_nack(Smpp::CommandStatus::ESME_RINVCMDID, sn);
other_error_occur();
return;
}
if(commandid == Smpp::CommandId::BindTransceiver)
{
Smpp::BindTransceiver transceiver;
try
{
transceiver.decode(bp.get()->data());
}
catch(Smpp::Error& e)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "decode failed, receive an invalid BindTransceiver pdu(%s).", e.what());
generate_and_send_nack(Smpp::CommandStatus::ESME_RINVMSGLEN, sn);
other_error_occur();
return ;
}
timer_manager_.bind_transceiver_received();
bind_time_ = boost::posix_time::second_clock::local_time();
process_bind_transceiver(transceiver);
}
else if(commandid == Smpp::CommandId::BindTransmitter)
{
Smpp::BindTransmitter transmitter;
try
{
transmitter.decode(bp.get()->data());
}
catch(Smpp::Error& e)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "decode failed, receive an invalid BindTransmitter pdu(%s).", e.what());
generate_and_send_nack(Smpp::CommandStatus::ESME_RINVMSGLEN, sn);
other_error_occur();
return ;
}
timer_manager_.bind_transmitter_received();
bind_time_ = boost::posix_time::second_clock::local_time();
process_bind_transmitter(transmitter);
}
else if(commandid == Smpp::CommandId::BindReceiver)
{
Smpp::BindReceiver receiver;
try
{
receiver.decode(bp.get()->data());
}
catch(Smpp::Error& e)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "decode failed, receive an invalid BindReceiver pdu(%s).", e.what());
generate_and_send_nack(Smpp::CommandStatus::ESME_RINVMSGLEN, sn);
other_error_occur();
return ;
}
timer_manager_.bind_receiver_received();
bind_time_ = boost::posix_time::second_clock::local_time();
process_bind_receiver(receiver);
}
else if(commandid == Smpp::CommandId::SubmitSm)
{
Smpp::SubmitSm submit_sm;
try
{
submit_sm.decode(bp.get()->data());
}
catch(Smpp::Error& e)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "decode failed, receive an invalid SubmitSm pdu(%s).", e.what());
generate_and_send_nack(Smpp::CommandStatus::ESME_RINVMSGLEN, sn);
other_error_occur();
return ;
}
if(current_session_state_ == SESSION_BOUND_RX)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "bind type is recieve only.");
generate_and_send_nack(Smpp::CommandStatus::ESME_RUNKNOWN, sn);
other_error_occur();
return;
}
timer_manager_.submit_sm_received();
//#define TEST_SUBMIT_FAILED
#ifdef TEST_SUBMIT_FAILED
static int count = 0;
count++;
if(count % 5 == 0)
{
Smpp::SubmitSmResp pduResp(Smpp::CommandStatus::ESME_RINVNUMDESTS, sn, Smpp::MessageId(""));
memory_buffer_ptr bpresp(new memory_buffer(pduResp.command_length()));
memcpy((void*)bpresp.get()->data(), (void*)pduResp.encode(), pduResp.command_length());
add_to_out_queue(bpresp);
return;
}
#endif
process_submitsm(submit_sm);
}
else if(commandid == Smpp::CommandId::QuerySm)
{
}
else if(commandid == Smpp::CommandId::Unbind)
{
Smpp::Unbind unbind;
try
{
unbind.decode(bp.get()->data());
}
catch(Smpp::Error& e)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "decode failed, receive an invalid Unbind pdu(%s).", e.what());
generate_and_send_nack(Smpp::CommandStatus::ESME_RINVMSGLEN, sn);
other_error_occur();
return ;
}
timer_manager_.unbind_received();
bind_time_ = boost::posix_time::second_clock::local_time();
process_unbind(unbind);
}
else if(commandid == Smpp::CommandId::EnquireLink)
{
timer_manager_.enquire_link_received();
process_enquirelink(sn);
}
else if(commandid == Smpp::CommandId::DeliverSmResp)
{
Smpp::DeliverSmResp dlr_resp;
try
{
dlr_resp.decode(bp.get()->data());
}
catch(Smpp::Error& e)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "decode failed, receive an invalid DeliverSmResp pdu(%s).", e.what());
other_error_occur();
return ;
}
logger_log(__FUNCTION__, LEVEL_DEBUG, "receive DeliverSmResp sn(%u).", dlr_resp.sequence_number());
Smpp::Uint32 sn = dlr_resp.sequence_number();
smppserver_datasource::get_instance()->dlrresp_received((unsigned int)sn);
}
else
{
logger_log(__FUNCTION__, LEVEL_WARNING, "receive an invalid operation pdu, command id: 0x%x.",
commandid);
generate_and_send_nack(Smpp::CommandStatus::ESME_RINVCMDID, sn);
other_error_occur();
return ;
}
}
void smpp_impl::generate_and_send_nack(const Smpp::CommandStatus& status, const Smpp::SequenceNumber& sn)
{
try
{
Smpp::GenericNack pduNack(status, sn);
memory_buffer_ptr bpresp(new memory_buffer(pduNack.command_length()));
memcpy((void*)bpresp.get()->data(), (void*)pduNack.encode(), pduNack.command_length());
if(data_source_flag_.length() == 0)
{
private_data_source_.add_to_out_queue(bpresp);
}
else
{
protocol_data_source_manager::get_instance()->add_to_out_queue_A(data_source_flag_, bpresp);
}
} catch (Smpp::Error& e)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "generate pdu failed, reason is: %s.",
e.what());
}
}
void smpp_impl::process_bind_transceiver(const Smpp::BindTransceiver& transceiver)
{
Smpp::SystemId sysid = transceiver.system_id();
Smpp::Password pwd = transceiver.password();
Smpp::CommandStatus resp_status = Smpp::CommandStatus::ESME_ROK;
Smpp::SequenceNumber sn = transceiver.sequence_number();
if(smppserver_runtime::get_instance()->need_authorize())
{
resp_status = vendor_runtime_->check_sysid_pw_bindip((Smpp::String)sysid, (Smpp::String)pwd, tcp_client_ip_);
}
logger_log(__FUNCTION__, LEVEL_DEBUG, "%s wants to bind transceiver to me, from %s:%d", ((Smpp::String)sysid).c_str(), tcp_client_ip_.c_str(), (int)tcp_client_port_);
if(resp_status != Smpp::CommandStatus::ESME_ROK)
{
try
{
Smpp::BindTransceiverResp pduResp(resp_status, sn, Smpp::SystemId(SMPP_SERVER_SYSID));
memory_buffer_ptr bpresp(new memory_buffer(pduResp.command_length()));
memcpy((void*)bpresp.get()->data(), (void*)pduResp.encode(), pduResp.command_length());
private_data_source_.add_to_out_queue(bpresp);
bind_error_occur();
logger_log(__FUNCTION__, LEVEL_WARNING, "%s binds to me failed, resons is: 0x%x", ((std::string)sysid).c_str(), (unsigned int)resp_status);
}
catch(Smpp::Error& e)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "generate pdu failed, reason is: %s.",
e.what());
generate_and_send_nack(Smpp::CommandStatus::ESME_RINVCMDID, sn);
other_error_occur();
return;
}
}
else
{
try
{
Smpp::BindTransceiverResp pduResp(Smpp::CommandStatus::ESME_ROK, transceiver.sequence_number(),
Smpp::SystemId(SMPP_SERVER_SYSID));
memory_buffer_ptr bpresp(new memory_buffer(pduResp.command_length()));
memcpy((void*)bpresp.get()->data(), (void*)pduResp.encode(), pduResp.command_length());
data_source_flag_ = (std::string)sysid;
private_data_source_.add_to_out_queue(bpresp);
logger_log(__FUNCTION__, LEVEL_DEBUG, "%s binds to me successfully", ((std::string)sysid).c_str());
}
catch(Smpp::Error& e)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "generate pdu failed, reason is: %s.",
e.what());
generate_and_send_nack(Smpp::CommandStatus::ESME_RINVCMDID, sn);
other_error_occur();
return;
}
sysid_ = (Smpp::String)sysid;
current_session_state_ = SESSION_BOUND_TRX;
}
}
void smpp_impl::process_bind_transmitter(const Smpp::BindTransmitter& transmitter)
{
Smpp::SystemId sysid = transmitter.system_id();
Smpp::Password pwd = transmitter.password();
Smpp::CommandStatus resp_status = Smpp::CommandStatus::ESME_ROK;
Smpp::SequenceNumber sn = transmitter.sequence_number();
resp_status = vendor_runtime_->check_sysid_pw_bindip((Smpp::String)sysid, (Smpp::String)pwd,tcp_client_ip_);
logger_log(__FUNCTION__, LEVEL_DEBUG, "%s wants to bind transmitter to me, from %s:%d", ((Smpp::String)sysid).c_str(), tcp_client_ip_.c_str(), (int)tcp_client_port_);
if(resp_status != Smpp::CommandStatus::ESME_ROK)
{
try
{
Smpp::BindTransmitterResp pduResp(resp_status, sn, Smpp::SystemId(SMPP_SERVER_SYSID));
memory_buffer_ptr bpresp(new memory_buffer(pduResp.command_length()));
memcpy((void*)bpresp.get()->data(), (void*)pduResp.encode(), pduResp.command_length());
private_data_source_.add_to_out_queue(bpresp);
bind_error_occur();
logger_log(__FUNCTION__, LEVEL_WARNING, "%s binds to me failed, resons is: 0x%x", ((std::string)sysid).c_str(), (unsigned int)resp_status);
}
catch(Smpp::Error& e)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "generate pdu failed, reason is: %s.",
e.what());
generate_and_send_nack(Smpp::CommandStatus::ESME_RINVCMDID, sn);
other_error_occur();
return;
}
}
else
{
try
{
Smpp::BindTransmitterResp pduResp(Smpp::CommandStatus::ESME_ROK, sn,
Smpp::SystemId(SMPP_SERVER_SYSID));
memory_buffer_ptr bpresp(new memory_buffer(pduResp.command_length()));
memcpy((void*)bpresp.get()->data(), (void*)pduResp.encode(), pduResp.command_length());
data_source_flag_ = (std::string)sysid;
private_data_source_.add_to_out_queue(bpresp);
logger_log(__FUNCTION__, LEVEL_DEBUG, "%s binds to me successfully", ((std::string)sysid).c_str());
}
catch (Smpp::Error& e)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "generate pdu failed, reason is: %s.",
e.what());
generate_and_send_nack(Smpp::CommandStatus::ESME_RINVCMDID, sn);
other_error_occur();
return;
}
sysid_ = (Smpp::String)sysid;
current_session_state_ = SESSION_BOUND_TX;
}
}
void smpp_impl::process_bind_receiver(const Smpp::BindReceiver& receiver)
{
Smpp::SystemId sysid = receiver.system_id();
Smpp::Password pwd = receiver.password();
Smpp::CommandStatus resp_status = Smpp::CommandStatus::ESME_ROK;
Smpp::SequenceNumber sn = receiver.sequence_number();
resp_status = vendor_runtime_->check_sysid_pw_bindip((Smpp::String)sysid, (Smpp::String)pwd, tcp_client_ip_);
logger_log(__FUNCTION__, LEVEL_DEBUG, "%s wants to bind receiver to me, from %s:%d", ((Smpp::String)sysid).c_str(), tcp_client_ip_.c_str(), (int)tcp_client_port_);
if(resp_status != Smpp::CommandStatus::ESME_ROK)
{
try
{
Smpp::BindReceiverResp pduResp(resp_status, sn, Smpp::SystemId(SMPP_SERVER_SYSID));
memory_buffer_ptr bpresp(new memory_buffer(pduResp.command_length()));
memcpy((void*)bpresp.get()->data(), (void*)pduResp.encode(), pduResp.command_length());
private_data_source_.add_to_out_queue(bpresp);
bind_error_occur();
logger_log(__FUNCTION__, LEVEL_WARNING, "%s binds to me failed, resons is: 0x%x", ((std::string)sysid).c_str(), (unsigned int)resp_status);
return;
}
catch(Smpp::Error& e)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "generate pdu failed, reason is: %s.",
e.what());
generate_and_send_nack(Smpp::CommandStatus::ESME_RINVCMDID, sn);
other_error_occur();
return;
}
}
else
{
try
{
Smpp::BindReceiverResp pduResp(Smpp::CommandStatus::ESME_ROK, sn,
Smpp::SystemId(SMPP_SERVER_SYSID));
memory_buffer_ptr bpresp(new memory_buffer(pduResp.command_length()));
memcpy((void*)bpresp.get()->data(), (void*)pduResp.encode(), pduResp.command_length());
data_source_flag_ = (std::string)sysid;
private_data_source_.add_to_out_queue(bpresp);
logger_log(__FUNCTION__, LEVEL_DEBUG, "%s bind to me successfully", ((std::string)sysid).c_str());
}
catch(Smpp::Error& e)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "generate pdu failed, reason is: %s.",
e.what());
generate_and_send_nack(Smpp::CommandStatus::ESME_RINVCMDID, sn);
other_error_occur();
return;
}
sysid_ = (Smpp::String)sysid;
current_session_state_ = SESSION_BOUND_RX;
}
}
std::string smpp_impl::convert_to_utf8(int offset, const Smpp::ShortMessage& sm, Smpp::DataCoding::DataCodingType dc)
{
std::vector<Smpp::Uint8>::const_iterator it = sm.begin();
unsigned char* p = (unsigned char*)&*it;
std::string content;
if(dc == Smpp::DataCoding::UCS2)
{
unsigned short* ucs2_content = (unsigned short*)(p + offset);
int ucs2_content_length = (int)((sm.length() - offset) >> 1);
content = smpp_datacoding_convert_ucs2_to_utf8(ucs2_content, ucs2_content_length);
}
else
{
it += offset;
content = std::string(it, sm.end());
}
return content;
}
void smpp_impl::process_submitsm(const Smpp::SubmitSm& submit_sm)
{
Smpp::MessageId message_id(generate_message_id());
Smpp::SequenceNumber sn = submit_sm.sequence_number();
try
{
Smpp::SubmitSmResp pduResp(Smpp::CommandStatus::ESME_ROK, sn, message_id);
memory_buffer_ptr bpresp(new memory_buffer(pduResp.command_length()));
memcpy((void*)bpresp.get()->data(), (void*)pduResp.encode(), pduResp.command_length());
// logger_log(__FUNCTION__, LEVEL_DEBUG, "submit from: %s(%s:%d), sn: %d, response message id: %s",sysid_.c_str(), tcp_client_ip_.c_str(), (int)tcp_client_port_, (unsigned int)sn, ((std::string)message_id).c_str());
if(data_source_flag_.length() == 0)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "data_source_flag_ should not empty here");
}
else
{
protocol_data_source_manager::get_instance()->add_to_out_queue_A(data_source_flag_, bpresp);
smppserver_statistic::get_instance()->ack_sent(sysid_);
}
}
catch(Smpp::Error& e)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "generate pdu failed, reason is %s.", e.what());
generate_and_send_nack(Smpp::CommandStatus::ESME_RINVMSGLEN, sn);
other_error_occur();
return ;
}
total_submit_++;
cb_sm_information smi;
smi.ip = tcp_client_ip_;
smi.systemid = sysid_;
smi.message_id_of_protocol_server = message_id;
Smpp::ShortMessage sm = submit_sm.short_message();
const Smpp::DataCoding::DataCodingType& dc_type = submit_sm.data_coding().get_type();
smi.datacoding_type = (unsigned char)dc_type;
smi.udhi_reference = 0;
smi.udhi_total_parts = 1;
smi.udhi_part_index = 1;
smi.submit_time_of_client = (boost::uint64_t)time(NULL);
int offset = 0;
if(sm.length() > UDHI_HEADER_TOTAL_BYTES)
{
std::vector<Smpp::Uint8>::const_iterator it = sm.begin();
unsigned char c0 = *(it++);
unsigned char c1 = *(it++);
unsigned char c2 = *(it++);
if((c0 == UDHI_0_BYTES_VALUE) && (c1 == UDHI_1_BYTES_VALUE) && (c2 == UDHI_2_BYTES_VALUE))
{
smi.udhi_reference = *(it++);
smi.udhi_total_parts = *(it++);
smi.udhi_part_index = *(it++);
offset = UDHI_HEADER_TOTAL_BYTES;
}
smi.content = convert_to_utf8(offset, sm, dc_type);
}
else
{
smi.content = convert_to_utf8(offset, sm, dc_type);
}
generate_hash_md5((unsigned char*)smi.content.c_str(), (unsigned int)smi.content.length(), smi.content_hash);
smi.dst_addr = submit_sm.destination_addr().address();
smi.src_addr = submit_sm.source_addr().address();
smi.sn_by_client = (unsigned int)submit_sm.sequence_number();
smppserver_datasource::get_instance()->smi_received(smi);
logger_log(__FUNCTION__, LEVEL_DEBUG, "submit from: %s(%s:%d), sn: %d, response message id: %s, content: %s",sysid_.c_str(), tcp_client_ip_.c_str(), (int)tcp_client_port_, (unsigned int)sn, ((std::string)message_id).c_str(), smi.content.c_str());
}
std::string smpp_impl::format_to_AppendixB_time(time_t t)
{
struct tm* tt = localtime(&t);
int year = tt->tm_year;
year += 1900;
std::string syear = datatype_convert<std::string>(year);
syear = syear.substr(syear.length() - 2, 2);
std::stringstream ss;
ss<<syear<<format_uint32_to_lenght(tt->tm_mon, 2)<<format_uint32_to_lenght(tt->tm_mday, 2)
<<format_uint32_to_lenght(tt->tm_hour, 2)<<format_uint32_to_lenght(tt->tm_min, 2);
std::string s = ss.str();
return s;
}
bool smpp_impl::send_delivery_sm()
{
cb_sm_information smi;
bool peek_dlr = smppserver_datasource::get_instance()->peek_dlr(sysid_, smi);
if(peek_dlr)
{
try
{
Smpp::DeliverSm pdu;
pdu.destination_addr(
Smpp::SmeAddress(Smpp::Ton(Smpp::Ton::International),
Smpp::Npi(Smpp::Npi::E164),
Smpp::Address(smi.src_addr)));
pdu.source_addr(
Smpp::SmeAddress(Smpp::Ton(Smpp::Ton::International),
Smpp::Npi(Smpp::Npi::E164),
Smpp::Address(smi.dst_addr)));
pdu.sequence_number(smi.sn_by_protocol_server);
pdu.esm_class(0x4);
pdu.protocol_id(0);
pdu.priority_flag(0);
pdu.schedule_delivery_time("");
pdu.validity_period("");
pdu.registered_delivery(0);
pdu.data_coding(Smpp::DataCoding::Alphabet);
pdu.sm_default_msg_id(0);
std::stringstream ss;
std::string message_id = smi.message_id_of_protocol_server;
if(smppserver_runtime::get_instance()->use_hex_decimal_message_id())
{
message_id = format_hex_string_to_decimal_string(message_id, 10);
//logger_log(__FUNCTION__, LEVEL_DEBUG, "prepare to send dlr to: %s(%s:%d), hex message id: %s, decimal message id: %s", sysid_.c_str(), tcp_client_ip_.c_str(), (int)tcp_client_port_, smi.message_id.c_str(), message_id.c_str());
}
smi.delivery_time_of_client = (boost::uint64_t)time(NULL);
ss<<"id:"<<message_id<<" "
<<"sub:001"<<" "<<"dlvrd:001"<<" "
<<"submit date:"<<format_to_AppendixB_time(smi.submit_time_of_client)<<" "
<<"done date:"<<format_to_AppendixB_time(smi.delivery_time_of_client)<<" "
<<"stat:"<<smi.error_status<<" "<<"err:"<<format_uint32_to_lenght(smi.error_code, 3)<<" "<<"text:000";
pdu.short_message(reinterpret_cast<const Smpp::Uint8*>(ss.str().data()),
ss.str().length());
Smpp::Uint8* d = (Smpp::Uint8*)pdu.encode();
memory_buffer_ptr bp;
bp.reset(new memory_buffer(pdu.command_length()));
memcpy((void*)bp.get()->data(), d, pdu.command_length());
if(data_source_flag_.length() == 0)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "data_source_flag_ should not empty here");
}
else
{
protocol_data_source_manager::get_instance()->add_to_out_queue_B(data_source_flag_, bp);
}
logger_log(__FUNCTION__, LEVEL_DEBUG, "send dlr to: %s, sn: %d, dst: %s, dlr content: %s", sysid_.c_str(), smi.sn_by_client, smi.dst_addr.c_str(), ss.str().c_str());
}
catch(Smpp::Error& e)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "generate pdu failed, reason is %s.", e.what());
}
return true;
}
return false;
}
bool smpp_impl::send_mo()
{
cb_sm_information smi;
bool peek_mo = smppserver_datasource::get_instance()->peek_mo(sysid_, smi);
if(peek_mo)
{
try
{
Smpp::DeliverSm pdu;
pdu.destination_addr(
Smpp::SmeAddress(Smpp::Ton(Smpp::Ton::International),
Smpp::Npi(Smpp::Npi::E164),
Smpp::Address(smi.dst_addr)));
pdu.source_addr(
Smpp::SmeAddress(Smpp::Ton(Smpp::Ton::International),
Smpp::Npi(Smpp::Npi::E164),
Smpp::Address(smi.src_addr)));
pdu.sequence_number(smi.sn_by_protocol_server);
pdu.esm_class(0);
pdu.protocol_id(0);
pdu.priority_flag(0);
pdu.schedule_delivery_time("");
pdu.validity_period("");
pdu.registered_delivery(0);
pdu.sm_default_msg_id(0);
Smpp::DataCoding dc = Smpp::DataCoding::Alphabet;
memory_buffer_ptr content;
int content_utf8_len = (int)smi.content.length();
content.reset(new memory_buffer(content_utf8_len * 4 + 2 + UDHI_HEADER_TOTAL_BYTES));
int ucs2_content_length = 0;
unsigned char* ucs2_content = (unsigned char*)content->data();;
if(smpp_datacoding_is_pure_ascii(smi.content.c_str()))
{
dc = Smpp::DataCoding::Alphabet;
memcpy(ucs2_content, smi.content.c_str(), smi.content.length());
ucs2_content_length = (int)smi.content.length();
}
else
{
dc = Smpp::DataCoding::UCS2;
smpp_datacoding_convert_utf8_to_ucs2((unsigned char*)smi.content.c_str(), ucs2_content, ucs2_content_length);
}
pdu.data_coding(dc);
pdu.short_message(reinterpret_cast<const Smpp::Uint8*>(content->data()), ucs2_content_length);
Smpp::Uint8* d = (Smpp::Uint8*)pdu.encode();
memory_buffer_ptr bp;
bp.reset(new memory_buffer(pdu.command_length()));
memcpy((void*)bp.get()->data(), d, pdu.command_length());
if(data_source_flag_.length() == 0)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "data_source_flag_ should not empty here");
}
else
{
protocol_data_source_manager::get_instance()->add_to_out_queue_B(data_source_flag_, bp);
}
logger_log(__FUNCTION__, LEVEL_DEBUG, "send mo to: %s, sn: %d, dst: %s, mo content: %s", smi.systemid.c_str(), smi.sn_by_protocol_server, smi.dst_addr.c_str(), smi.content.c_str());
}
catch(Smpp::Error& e)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "generate pdu failed, reason is %s.", e.what());
}
return true;
}
return false;
}
void smpp_impl::process_enquirelink(const Smpp::SequenceNumber& sn)
{
try
{
Smpp::EnquireLinkResp pduResp(Smpp::CommandStatus::ESME_ROK, sn);
memory_buffer_ptr bpresp(new memory_buffer(pduResp.command_length()));
memcpy((void*)bpresp.get()->data(), (void*)pduResp.encode(), pduResp.command_length());
private_data_source_.add_to_out_queue(bpresp);
}
catch (Smpp::Error& e)
{
generate_and_send_nack(Smpp::CommandStatus::ESME_RINVMSGLEN, sn);
other_error_occur();
logger_log(__FUNCTION__, LEVEL_WARNING, "generate pdu failed, reason is %s.", e.what());
}
}
void smpp_impl::process_unbind(const Smpp::Unbind& unbind)
{
current_session_state_ = SESSION_CLOSED;
Smpp::SequenceNumber sn = unbind.sequence_number();
try
{
Smpp::UnbindResp pduResp(Smpp::CommandStatus::ESME_ROK,sn);
memory_buffer_ptr bpresp(new memory_buffer(pduResp.command_length()));
memcpy((void*)bpresp.get()->data(), (void*)pduResp.encode(), pduResp.command_length());
private_data_source_.add_to_out_queue(bpresp);
}
catch (Smpp::Error& e)
{
logger_log(__FUNCTION__, LEVEL_WARNING, "generate pdu failed, reason is %s.", e.what());
generate_and_send_nack(Smpp::CommandStatus::ESME_RINVMSGLEN, sn);
other_error_occur();
return ;
}
}
bool smpp_impl::timer_timeout()
{
SMPP_TIMER_EXPIRED_ACTION action;
return timer_manager_.expired(action);
}
bool smpp_impl::need_send()
{
if(is_bound())
{
send_delivery_sm();
send_mo();
}
bool need = private_data_source_.need_send();
if(data_source_flag_.length() > 0)
{
if(smpp_pdu_context::is_state_bound_txable(current_session_state_))
{
need |= protocol_data_source_manager::get_instance()->need_send_A(get_data_source_flag());
}
if(smpp_pdu_context::is_state_bound_rxable(current_session_state_))
{
need |= protocol_data_source_manager::get_instance()->need_send_B(get_data_source_flag());
}
}
return need;
}
bool smpp_impl::need_process()
{
bool need = private_data_source_.need_process();
if(data_source_flag_.length() > 0)
{
need |= protocol_data_source_manager::get_instance()->need_process(get_data_source_flag());
}
return need;
}
int smpp_impl::clear_resource()
{
private_data_source_.clear_packets();
if(data_source_flag_.length() > 0)
{
//do not clear the packets in protocol_data_source_manager
//protocol_data_source_manager::get_instance()->clear_packets(data_source_flag_);
}
return 0;
}
bool smpp_impl::get_from_out_queue(memory_buffer_ptr& bp)
{
bool has = private_data_source_.get_from_out_queue(bp);
if(data_source_flag_.length() == 0)
{
return has;
}
if(!has)
{
if(smpp_pdu_context::is_state_bound_txable(current_session_state_))
{
has = protocol_data_source_manager::get_instance()->get_from_out_queue_A(data_source_flag_, bp);
}
if(has)
{
return has;
}
if(smpp_pdu_context::is_state_bound_rxable(current_session_state_))
{
has = protocol_data_source_manager::get_instance()->get_from_out_queue_B(data_source_flag_, bp);
}
}
return has;
}
|
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* The B97 function g *)
b97_g := (gamma, cc, x) -> add(cc[i]*(gamma*x^2/(1 + gamma*x^2))^(i-1), i=1..5):
(* The parallel and perpendicular components of the energy *)
b97_fpar := (lda_func, mgamma, cc, rs, z, xs0, xs1) ->
+ lda_stoll_par(lda_func, rs, z) * b97_g(mgamma, cc, xs0)
+ lda_stoll_par(lda_func, rs, -z) * b97_g(mgamma, cc, xs1):
b97_fperp := (lda_func, mgamma, cc, rs, z, xs0, xs1) ->
lda_stoll_perp(lda_func, rs, z) * b97_g(mgamma, cc, sqrt(xs0^2 + xs1^2)/sqrt(2)):
b97_f := (lda_func, gamma_ss, cc_ss, gamma_ab, cc_ab, rs, z, xs0, xs1) ->
+ b97_fpar (lda_func, gamma_ss, cc_ss, rs, z, xs0, xs1)
+ b97_fperp(lda_func, gamma_ab, cc_ab, rs, z, xs0, xs1):
|
(* Title: HOL/Auth/n_flash_lemma_on_inv__101.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_flash Protocol Case Study*}
theory n_flash_lemma_on_inv__101 imports n_flash_base
begin
section{*All lemmas on causal relation between inv__101 and some rule r*}
lemma n_PI_Remote_GetVsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_Get src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_Get src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_PI_Remote_GetXVsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_GetX src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_GetX src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_NakVsinv__101:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Nak dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Nak dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__0Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__1Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__2Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Get__part__0Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Get__part__1Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''ShWbMsg'') ''Cmd'')) (Const SHWB_ShWb)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Pending'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Put_HeadVsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_PutVsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Put_DirtyVsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_NakVsinv__101:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_PutVsinv__101:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''Proc'') p__Inv4) ''CacheState'')) (Const CACHE_E))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') src) ''Cmd'')) (Const UNI_Get))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') src) ''HomeProc'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__0Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__1Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__2Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_GetX__part__0Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_GetX__part__1Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_1Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_2Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_3Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_4Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_5Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_6Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7__part__0Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7__part__1Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__0Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__1Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_HomeVsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_Home_NODE_GetVsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8Vsinv__101:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_NODE_GetVsinv__101:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_9__part__0Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_9__part__1Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_10_HomeVsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_10Vsinv__101:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_11Vsinv__101:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_NakVsinv__101:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_PutXVsinv__101:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_PutVsinv__101:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Put dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Put dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_PutXVsinv__101:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_PutX dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_PutX dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_FAckVsinv__101:
assumes a1: "(r=n_NI_FAck )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_NI_ShWbVsinv__101:
assumes a1: "(r=n_NI_ShWb N )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__101 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_NI_Remote_GetX_PutX_HomeVsinv__101:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_PutX_Home dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX__part__0Vsinv__101:
assumes a1: "r=n_PI_Local_GetX_PutX__part__0 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_WbVsinv__101:
assumes a1: "r=n_NI_Wb " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_StoreVsinv__101:
assumes a1: "\<exists> src data. src\<le>N\<and>data\<le>N\<and>r=n_Store src data" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_3Vsinv__101:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_3 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_1Vsinv__101:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_1 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_GetX__part__1Vsinv__101:
assumes a1: "r=n_PI_Local_GetX_GetX__part__1 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_GetX__part__0Vsinv__101:
assumes a1: "r=n_PI_Local_GetX_GetX__part__0 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Remote_ReplaceVsinv__101:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_PI_Remote_Replace src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_Store_HomeVsinv__101:
assumes a1: "\<exists> data. data\<le>N\<and>r=n_Store_Home data" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_ReplaceVsinv__101:
assumes a1: "r=n_PI_Local_Replace " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_existsVsinv__101:
assumes a1: "\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_InvAck_exists src pp" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Remote_PutXVsinv__101:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_PI_Remote_PutX dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Remote_Get_Put_HomeVsinv__101:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Put_Home dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvVsinv__101:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Inv dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_PutXVsinv__101:
assumes a1: "r=n_PI_Local_PutX " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_Get_PutVsinv__101:
assumes a1: "r=n_PI_Local_Get_Put " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX_HeadVld__part__0Vsinv__101:
assumes a1: "r=n_PI_Local_GetX_PutX_HeadVld__part__0 N " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_ReplaceVsinv__101:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Replace src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Remote_GetX_Nak_HomeVsinv__101:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_Nak_Home dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Local_PutXAcksDoneVsinv__101:
assumes a1: "r=n_NI_Local_PutXAcksDone " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX__part__1Vsinv__101:
assumes a1: "r=n_PI_Local_GetX_PutX__part__1 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Remote_Get_Nak_HomeVsinv__101:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Nak_Home dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_exists_HomeVsinv__101:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_exists_Home src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Replace_HomeVsinv__101:
assumes a1: "r=n_NI_Replace_Home " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Local_PutVsinv__101:
assumes a1: "r=n_NI_Local_Put " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Nak_ClearVsinv__101:
assumes a1: "r=n_NI_Nak_Clear " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_Get_GetVsinv__101:
assumes a1: "r=n_PI_Local_Get_Get " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Nak_HomeVsinv__101:
assumes a1: "r=n_NI_Nak_Home " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_2Vsinv__101:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_2 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX_HeadVld__part__1Vsinv__101:
assumes a1: "r=n_PI_Local_GetX_PutX_HeadVld__part__1 N " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__101 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
Formal statement is: lemma contour_integral_local_primitive: fixes f :: "complex \<Rightarrow> complex" assumes g: "valid_path g" "path_image g \<subseteq> S" and dh: "\<And>x. x \<in> S \<Longrightarrow> \<exists>d h. 0 < d \<and> (\<forall>y. norm(y - x) < d \<longrightarrow> (h has_field_derivative f y) (at y within S))" shows "f contour_integrable_on g" Informal statement is: If $f$ is locally integrable on a set $S$ and $g$ is a path in $S$, then $f$ is integrable along $g$. |
[STATEMENT]
lemma N_col_list_map: "j < \<b> \<Longrightarrow> col N j = map_vec (\<lambda> x . if (x \<in> (\<B>s ! j)) then 1 else 0) (vec_of_list \<V>s)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. j < \<b> \<Longrightarrow> col N j = map_vec (\<lambda>x. if x \<in> \<B>s ! j then 1 else 0) (vec_of_list \<V>s)
[PROOF STEP]
by (metis inc_mat_col_list_map blocks_list_length) |
main : IO ()
main = printLn "one\ntwo"
|
% Set the default paper to a4paper and the fontsize to 10pt
\documentclass[a4paper, 10pt]{article}
% Margins of the cheatsheet
\usepackage[landscape, margin=0pt]{geometry}
\advance\topmargin 5mm
% Remove page numbering
\pagenumbering{gobble}
% For importing images
\usepackage{graphicx}
% For using custom font compile with xelatex
\usepackage{fontspec}
\setmainfont{Hack Nerd Font}
% Section formatting
% \colorbox{shadecolor}
\usepackage{titlesec}
\titlespacing{\section}{0pt}{1mm}{1mm}
% Background color for sections
\usepackage{xcolor}
\definecolor{shadecolor}{RGB}{200,200,200}
\titleformat{\section} % command
[block] % shape
{\large\bfseries} % format
{} % label
{} % sep
{} % before-code
[] % after-code
\newcommand{\mySection}[1]{
\noindent\colorbox{shadecolor}{
\parbox{\textwidth}{\section*{#1}}
}
}
% vertical spacing of the paragraphs
\setlength{\parskip}{2mm}
% indentation inside minipage not working
\newcommand{\tab}{\hspace*{4mm}}
% package for syntax
\usepackage{listings}
% space between sections: \hspace{5mm}
% new line: \par
\begin{document}
\begin{center}{
\huge{\textbf{ Python 3 Cheatsheet}}}
\\{\large By Fernando Chavez Riquelme}
\end{center}
\begin{minipage}[t]{0.2\textwidth}
\mySection{Basics}
\begin{lstlisting}[language=Python]
# Comments, no multiline
print('')
varTrue = True
varFalse = False
\end{lstlisting}
\end{minipage}
\hspace{5mm}
\begin{minipage}[t]{0.13\textwidth}
\mySection{Numbers}
float division /\\
floor division //
\begin{lstlisting}[language=Python]
pow = 2**2 # 4
int("9")
a, b = 1, 2 # swap
a, b = b, a # swap
\end{lstlisting}
\end{minipage}
\hspace{5mm}
\begin{minipage}[t]{0.26\textwidth}
\mySection{Strings}
Strings are immutable
\begin{lstlisting}[language=Python]
'' = "" # Single quotes
s = 'py'+'thon str'
len(s) # 6
nine = str(9)
sLower = s.lower()
s.count('p') # 1 occurrences of substring
s.split(' ') # ['python', 'str']
':'.join(['A','B','C']) # 'A:B:C'
s.replace('str', 'int') # python int
\end{lstlisting}
\end{minipage}
\par
\begin{minipage}[t]{0.33\textwidth}
\mySection{Sets}
Like lists but no duplicates
\begin{lstlisting}[language=Python]
emptySet = set()
set = {'a', 'o', 'p'}
'a' in set # true
a - b # letters in a but not in b
a | b # letters in a or b or both
a & b # letters in both a and b
a ^ b # letters in a or b but not both
\end{lstlisting}
Comprehension is also supported
\end{minipage}
\end{document}
|
function cmap = classpal
nClasses = 51;
hFig = gcf;
hAxes = gca;
if strcmpi(hAxes.Visible, 'on')
bgColor = hAxes.Color;
else
bgColor = hFig.Color;
end
cmap = distinguishable_colors(nClasses, bgColor);
end |
module Web.Internal.StreamsPrim
import JS
import Web.Internal.Types
--------------------------------------------------------------------------------
-- Interfaces
--------------------------------------------------------------------------------
namespace ByteLengthQueuingStrategy
export
%foreign "browser:lambda:(a)=> new ByteLengthQueuingStrategy(a)"
prim__new : QueuingStrategyInit -> PrimIO ByteLengthQueuingStrategy
export
%foreign "browser:lambda:x=>x.highWaterMark"
prim__highWaterMark : ByteLengthQueuingStrategy -> PrimIO Double
export
%foreign "browser:lambda:x=>x.size"
prim__size : ByteLengthQueuingStrategy -> PrimIO Function
namespace CountQueuingStrategy
export
%foreign "browser:lambda:(a)=> new CountQueuingStrategy(a)"
prim__new : QueuingStrategyInit -> PrimIO CountQueuingStrategy
export
%foreign "browser:lambda:x=>x.highWaterMark"
prim__highWaterMark : CountQueuingStrategy -> PrimIO Double
export
%foreign "browser:lambda:x=>x.size"
prim__size : CountQueuingStrategy -> PrimIO Function
namespace ReadableByteStreamController
export
%foreign "browser:lambda:x=>x.byobRequest"
prim__byobRequest : ReadableByteStreamController
-> PrimIO (Nullable ReadableStreamBYOBRequest)
export
%foreign "browser:lambda:x=>x.desiredSize"
prim__desiredSize : ReadableByteStreamController -> PrimIO (Nullable Double)
export
%foreign "browser:lambda:x=>x.close()"
prim__close : ReadableByteStreamController -> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.enqueue(a)"
prim__enqueue : ReadableByteStreamController
-> Union10 Int8Array
Int16Array
Int32Array
UInt8Array
UInt8Array
UInt8Array
UInt8ClampedArray
Float32Array
Float64Array
DataView
-> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.error(a)"
prim__error : ReadableByteStreamController -> UndefOr AnyPtr -> PrimIO ()
namespace ReadableStream
export
%foreign "browser:lambda:(a,b)=> new ReadableStream(a,b)"
prim__new : UndefOr Object -> UndefOr QueuingStrategy -> PrimIO ReadableStream
export
%foreign "browser:lambda:x=>x.locked"
prim__locked : ReadableStream -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a)=>x.cancel(a)"
prim__cancel : ReadableStream -> UndefOr AnyPtr -> PrimIO (Promise Undefined)
export
%foreign "browser:lambda:(x,a)=>x.getReader(a)"
prim__getReader : ReadableStream
-> UndefOr ReadableStreamGetReaderOptions
-> PrimIO (Union2 ReadableStreamDefaultReader
ReadableStreamBYOBReader)
export
%foreign "browser:lambda:(x,a,b)=>x.pipeThrough(a,b)"
prim__pipeThrough : ReadableStream
-> ReadableWritablePair
-> UndefOr StreamPipeOptions
-> PrimIO ReadableStream
export
%foreign "browser:lambda:(x,a,b)=>x.pipeTo(a,b)"
prim__pipeTo : ReadableStream
-> WritableStream
-> UndefOr StreamPipeOptions
-> PrimIO (Promise Undefined)
export
%foreign "browser:lambda:x=>x.tee()"
prim__tee : ReadableStream -> PrimIO (Array ReadableStream)
namespace ReadableStreamBYOBReader
export
%foreign "browser:lambda:(a)=> new ReadableStreamBYOBReader(a)"
prim__new : ReadableStream -> PrimIO ReadableStreamBYOBReader
export
%foreign "browser:lambda:(x,a)=>x.read(a)"
prim__read : ReadableStreamBYOBReader
-> Union10 Int8Array
Int16Array
Int32Array
UInt8Array
UInt8Array
UInt8Array
UInt8ClampedArray
Float32Array
Float64Array
DataView
-> PrimIO (Promise ReadableStreamBYOBReadResult)
export
%foreign "browser:lambda:x=>x.releaseLock()"
prim__releaseLock : ReadableStreamBYOBReader -> PrimIO ()
namespace ReadableStreamBYOBRequest
export
%foreign "browser:lambda:x=>x.view"
prim__view : ReadableStreamBYOBRequest
-> PrimIO (Nullable (Union10 Int8Array
Int16Array
Int32Array
UInt8Array
UInt8Array
UInt8Array
UInt8ClampedArray
Float32Array
Float64Array
DataView))
export
%foreign "browser:lambda:(x,a)=>x.respond(a)"
prim__respond : ReadableStreamBYOBRequest -> JSBits64 -> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.respondWithNewView(a)"
prim__respondWithNewView : ReadableStreamBYOBRequest
-> Union10 Int8Array
Int16Array
Int32Array
UInt8Array
UInt8Array
UInt8Array
UInt8ClampedArray
Float32Array
Float64Array
DataView
-> PrimIO ()
namespace ReadableStreamDefaultController
export
%foreign "browser:lambda:x=>x.desiredSize"
prim__desiredSize : ReadableStreamDefaultController
-> PrimIO (Nullable Double)
export
%foreign "browser:lambda:x=>x.close()"
prim__close : ReadableStreamDefaultController -> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.enqueue(a)"
prim__enqueue : ReadableStreamDefaultController -> UndefOr AnyPtr -> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.error(a)"
prim__error : ReadableStreamDefaultController -> UndefOr AnyPtr -> PrimIO ()
namespace ReadableStreamDefaultReader
export
%foreign "browser:lambda:(a)=> new ReadableStreamDefaultReader(a)"
prim__new : ReadableStream -> PrimIO ReadableStreamDefaultReader
export
%foreign "browser:lambda:x=>x.read()"
prim__read : ReadableStreamDefaultReader
-> PrimIO (Promise ReadableStreamDefaultReadResult)
export
%foreign "browser:lambda:x=>x.releaseLock()"
prim__releaseLock : ReadableStreamDefaultReader -> PrimIO ()
namespace TransformStream
export
%foreign "browser:lambda:(a,b,c)=> new TransformStream(a,b,c)"
prim__new : UndefOr Object
-> UndefOr QueuingStrategy
-> UndefOr QueuingStrategy
-> PrimIO TransformStream
export
%foreign "browser:lambda:x=>x.readable"
prim__readable : TransformStream -> PrimIO ReadableStream
export
%foreign "browser:lambda:x=>x.writable"
prim__writable : TransformStream -> PrimIO WritableStream
namespace TransformStreamDefaultController
export
%foreign "browser:lambda:x=>x.desiredSize"
prim__desiredSize : TransformStreamDefaultController
-> PrimIO (Nullable Double)
export
%foreign "browser:lambda:(x,a)=>x.enqueue(a)"
prim__enqueue : TransformStreamDefaultController
-> UndefOr AnyPtr
-> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.error(a)"
prim__error : TransformStreamDefaultController -> UndefOr AnyPtr -> PrimIO ()
export
%foreign "browser:lambda:x=>x.terminate()"
prim__terminate : TransformStreamDefaultController -> PrimIO ()
namespace WritableStream
export
%foreign "browser:lambda:(a,b)=> new WritableStream(a,b)"
prim__new : UndefOr Object -> UndefOr QueuingStrategy -> PrimIO WritableStream
export
%foreign "browser:lambda:x=>x.locked"
prim__locked : WritableStream -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a)=>x.abort(a)"
prim__abort : WritableStream -> UndefOr AnyPtr -> PrimIO (Promise Undefined)
export
%foreign "browser:lambda:x=>x.close()"
prim__close : WritableStream -> PrimIO (Promise Undefined)
export
%foreign "browser:lambda:x=>x.getWriter()"
prim__getWriter : WritableStream -> PrimIO WritableStreamDefaultWriter
namespace WritableStreamDefaultController
export
%foreign "browser:lambda:(x,a)=>x.error(a)"
prim__error : WritableStreamDefaultController -> UndefOr AnyPtr -> PrimIO ()
namespace WritableStreamDefaultWriter
export
%foreign "browser:lambda:(a)=> new WritableStreamDefaultWriter(a)"
prim__new : WritableStream -> PrimIO WritableStreamDefaultWriter
export
%foreign "browser:lambda:x=>x.closed"
prim__closed : WritableStreamDefaultWriter -> PrimIO (Promise Undefined)
export
%foreign "browser:lambda:x=>x.desiredSize"
prim__desiredSize : WritableStreamDefaultWriter -> PrimIO (Nullable Double)
export
%foreign "browser:lambda:x=>x.ready"
prim__ready : WritableStreamDefaultWriter -> PrimIO (Promise Undefined)
export
%foreign "browser:lambda:(x,a)=>x.abort(a)"
prim__abort : WritableStreamDefaultWriter
-> UndefOr AnyPtr
-> PrimIO (Promise Undefined)
export
%foreign "browser:lambda:x=>x.close()"
prim__close : WritableStreamDefaultWriter -> PrimIO (Promise Undefined)
export
%foreign "browser:lambda:x=>x.releaseLock()"
prim__releaseLock : WritableStreamDefaultWriter -> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.write(a)"
prim__write : WritableStreamDefaultWriter
-> UndefOr AnyPtr
-> PrimIO (Promise Undefined)
--------------------------------------------------------------------------------
-- Mixins
--------------------------------------------------------------------------------
namespace GenericTransformStream
export
%foreign "browser:lambda:x=>x.readable"
prim__readable : GenericTransformStream -> PrimIO ReadableStream
export
%foreign "browser:lambda:x=>x.writable"
prim__writable : GenericTransformStream -> PrimIO WritableStream
namespace ReadableStreamGenericReader
export
%foreign "browser:lambda:x=>x.closed"
prim__closed : ReadableStreamGenericReader -> PrimIO (Promise Undefined)
export
%foreign "browser:lambda:(x,a)=>x.cancel(a)"
prim__cancel : ReadableStreamGenericReader
-> UndefOr AnyPtr
-> PrimIO (Promise Undefined)
--------------------------------------------------------------------------------
-- Dictionaries
--------------------------------------------------------------------------------
namespace QueuingStrategy
export
%foreign "browser:lambda:(a,b)=> {highWaterMark: a,size: b}"
prim__new : UndefOr Double
-> UndefOr QueuingStrategySize
-> PrimIO QueuingStrategy
export
%foreign "browser:lambda:x=>x.highWaterMark"
prim__highWaterMark : QueuingStrategy -> PrimIO (UndefOr Double)
export
%foreign "browser:lambda:(x,v)=>{x.highWaterMark = v}"
prim__setHighWaterMark : QueuingStrategy -> UndefOr Double -> PrimIO ()
export
%foreign "browser:lambda:x=>x.size"
prim__size : QueuingStrategy -> PrimIO (UndefOr QueuingStrategySize)
export
%foreign "browser:lambda:(x,v)=>{x.size = v}"
prim__setSize : QueuingStrategy -> UndefOr QueuingStrategySize -> PrimIO ()
namespace QueuingStrategyInit
export
%foreign "browser:lambda:(a)=> {highWaterMark: a}"
prim__new : Double -> PrimIO QueuingStrategyInit
export
%foreign "browser:lambda:x=>x.highWaterMark"
prim__highWaterMark : QueuingStrategyInit -> PrimIO Double
export
%foreign "browser:lambda:(x,v)=>{x.highWaterMark = v}"
prim__setHighWaterMark : QueuingStrategyInit -> Double -> PrimIO ()
namespace ReadableStreamBYOBReadResult
export
%foreign "browser:lambda:(a,b)=> {value: a,done: b}"
prim__new : UndefOr (Union10 Int8Array
Int16Array
Int32Array
UInt8Array
UInt8Array
UInt8Array
UInt8ClampedArray
Float32Array
Float64Array
DataView)
-> UndefOr Boolean
-> PrimIO ReadableStreamBYOBReadResult
export
%foreign "browser:lambda:x=>x.done"
prim__done : ReadableStreamBYOBReadResult -> PrimIO (UndefOr Boolean)
export
%foreign "browser:lambda:(x,v)=>{x.done = v}"
prim__setDone : ReadableStreamBYOBReadResult -> UndefOr Boolean -> PrimIO ()
export
%foreign "browser:lambda:x=>x.value"
prim__value : ReadableStreamBYOBReadResult
-> PrimIO (UndefOr (Union10 Int8Array
Int16Array
Int32Array
UInt8Array
UInt8Array
UInt8Array
UInt8ClampedArray
Float32Array
Float64Array
DataView))
export
%foreign "browser:lambda:(x,v)=>{x.value = v}"
prim__setValue : ReadableStreamBYOBReadResult
-> UndefOr (Union10 Int8Array
Int16Array
Int32Array
UInt8Array
UInt8Array
UInt8Array
UInt8ClampedArray
Float32Array
Float64Array
DataView)
-> PrimIO ()
namespace ReadableStreamDefaultReadResult
export
%foreign "browser:lambda:(a,b)=> {value: a,done: b}"
prim__new : UndefOr AnyPtr
-> UndefOr Boolean
-> PrimIO ReadableStreamDefaultReadResult
export
%foreign "browser:lambda:x=>x.done"
prim__done : ReadableStreamDefaultReadResult -> PrimIO (UndefOr Boolean)
export
%foreign "browser:lambda:(x,v)=>{x.done = v}"
prim__setDone : ReadableStreamDefaultReadResult
-> UndefOr Boolean
-> PrimIO ()
export
%foreign "browser:lambda:x=>x.value"
prim__value : ReadableStreamDefaultReadResult -> PrimIO (UndefOr AnyPtr)
export
%foreign "browser:lambda:(x,v)=>{x.value = v}"
prim__setValue : ReadableStreamDefaultReadResult
-> UndefOr AnyPtr
-> PrimIO ()
namespace ReadableStreamGetReaderOptions
export
%foreign "browser:lambda:(a)=> {mode: a}"
prim__new : UndefOr String -> PrimIO ReadableStreamGetReaderOptions
export
%foreign "browser:lambda:x=>x.mode"
prim__mode : ReadableStreamGetReaderOptions -> PrimIO (UndefOr String)
export
%foreign "browser:lambda:(x,v)=>{x.mode = v}"
prim__setMode : ReadableStreamGetReaderOptions -> UndefOr String -> PrimIO ()
namespace ReadableStreamIteratorOptions
export
%foreign "browser:lambda:(a)=> {preventCancel: a}"
prim__new : UndefOr Boolean -> PrimIO ReadableStreamIteratorOptions
export
%foreign "browser:lambda:x=>x.preventCancel"
prim__preventCancel : ReadableStreamIteratorOptions
-> PrimIO (UndefOr Boolean)
export
%foreign "browser:lambda:(x,v)=>{x.preventCancel = v}"
prim__setPreventCancel : ReadableStreamIteratorOptions
-> UndefOr Boolean
-> PrimIO ()
namespace ReadableWritablePair
export
%foreign "browser:lambda:(a,b)=> {readable: a,writable: b}"
prim__new : ReadableStream -> WritableStream -> PrimIO ReadableWritablePair
export
%foreign "browser:lambda:x=>x.readable"
prim__readable : ReadableWritablePair -> PrimIO ReadableStream
export
%foreign "browser:lambda:(x,v)=>{x.readable = v}"
prim__setReadable : ReadableWritablePair -> ReadableStream -> PrimIO ()
export
%foreign "browser:lambda:x=>x.writable"
prim__writable : ReadableWritablePair -> PrimIO WritableStream
export
%foreign "browser:lambda:(x,v)=>{x.writable = v}"
prim__setWritable : ReadableWritablePair -> WritableStream -> PrimIO ()
namespace StreamPipeOptions
export
%foreign "browser:lambda:(a,b,c,d)=> {preventClose: a,preventAbort: b,preventCancel: c,signal: d}"
prim__new : UndefOr Boolean
-> UndefOr Boolean
-> UndefOr Boolean
-> UndefOr AbortSignal
-> PrimIO StreamPipeOptions
export
%foreign "browser:lambda:x=>x.preventAbort"
prim__preventAbort : StreamPipeOptions -> PrimIO (UndefOr Boolean)
export
%foreign "browser:lambda:(x,v)=>{x.preventAbort = v}"
prim__setPreventAbort : StreamPipeOptions -> UndefOr Boolean -> PrimIO ()
export
%foreign "browser:lambda:x=>x.preventCancel"
prim__preventCancel : StreamPipeOptions -> PrimIO (UndefOr Boolean)
export
%foreign "browser:lambda:(x,v)=>{x.preventCancel = v}"
prim__setPreventCancel : StreamPipeOptions -> UndefOr Boolean -> PrimIO ()
export
%foreign "browser:lambda:x=>x.preventClose"
prim__preventClose : StreamPipeOptions -> PrimIO (UndefOr Boolean)
export
%foreign "browser:lambda:(x,v)=>{x.preventClose = v}"
prim__setPreventClose : StreamPipeOptions -> UndefOr Boolean -> PrimIO ()
export
%foreign "browser:lambda:x=>x.signal"
prim__signal : StreamPipeOptions -> PrimIO (UndefOr AbortSignal)
export
%foreign "browser:lambda:(x,v)=>{x.signal = v}"
prim__setSignal : StreamPipeOptions -> UndefOr AbortSignal -> PrimIO ()
namespace Transformer
export
%foreign "browser:lambda:(a,b,c,d,e)=> {start: a,transform: b,flush: c,readableType: d,writableType: e}"
prim__new : UndefOr TransformerStartCallback
-> UndefOr TransformerTransformCallback
-> UndefOr TransformerFlushCallback
-> UndefOr AnyPtr
-> UndefOr AnyPtr
-> PrimIO Transformer
export
%foreign "browser:lambda:x=>x.flush"
prim__flush : Transformer -> PrimIO (UndefOr TransformerFlushCallback)
export
%foreign "browser:lambda:(x,v)=>{x.flush = v}"
prim__setFlush : Transformer -> UndefOr TransformerFlushCallback -> PrimIO ()
export
%foreign "browser:lambda:x=>x.readableType"
prim__readableType : Transformer -> PrimIO (UndefOr AnyPtr)
export
%foreign "browser:lambda:(x,v)=>{x.readableType = v}"
prim__setReadableType : Transformer -> UndefOr AnyPtr -> PrimIO ()
export
%foreign "browser:lambda:x=>x.start"
prim__start : Transformer -> PrimIO (UndefOr TransformerStartCallback)
export
%foreign "browser:lambda:(x,v)=>{x.start = v}"
prim__setStart : Transformer -> UndefOr TransformerStartCallback -> PrimIO ()
export
%foreign "browser:lambda:x=>x.transform"
prim__transform : Transformer -> PrimIO (UndefOr TransformerTransformCallback)
export
%foreign "browser:lambda:(x,v)=>{x.transform = v}"
prim__setTransform : Transformer
-> UndefOr TransformerTransformCallback
-> PrimIO ()
export
%foreign "browser:lambda:x=>x.writableType"
prim__writableType : Transformer -> PrimIO (UndefOr AnyPtr)
export
%foreign "browser:lambda:(x,v)=>{x.writableType = v}"
prim__setWritableType : Transformer -> UndefOr AnyPtr -> PrimIO ()
namespace UnderlyingSink
export
%foreign "browser:lambda:(a,b,c,d,e)=> {start: a,write: b,close: c,abort: d,type: e}"
prim__new : UndefOr UnderlyingSinkStartCallback
-> UndefOr UnderlyingSinkWriteCallback
-> UndefOr UnderlyingSinkCloseCallback
-> UndefOr UnderlyingSinkAbortCallback
-> UndefOr AnyPtr
-> PrimIO UnderlyingSink
export
%foreign "browser:lambda:x=>x.abort"
prim__abort : UnderlyingSink -> PrimIO (UndefOr UnderlyingSinkAbortCallback)
export
%foreign "browser:lambda:(x,v)=>{x.abort = v}"
prim__setAbort : UnderlyingSink
-> UndefOr UnderlyingSinkAbortCallback
-> PrimIO ()
export
%foreign "browser:lambda:x=>x.close"
prim__close : UnderlyingSink -> PrimIO (UndefOr UnderlyingSinkCloseCallback)
export
%foreign "browser:lambda:(x,v)=>{x.close = v}"
prim__setClose : UnderlyingSink
-> UndefOr UnderlyingSinkCloseCallback
-> PrimIO ()
export
%foreign "browser:lambda:x=>x.start"
prim__start : UnderlyingSink -> PrimIO (UndefOr UnderlyingSinkStartCallback)
export
%foreign "browser:lambda:(x,v)=>{x.start = v}"
prim__setStart : UnderlyingSink
-> UndefOr UnderlyingSinkStartCallback
-> PrimIO ()
export
%foreign "browser:lambda:x=>x.type"
prim__type : UnderlyingSink -> PrimIO (UndefOr AnyPtr)
export
%foreign "browser:lambda:(x,v)=>{x.type = v}"
prim__setType : UnderlyingSink -> UndefOr AnyPtr -> PrimIO ()
export
%foreign "browser:lambda:x=>x.write"
prim__write : UnderlyingSink -> PrimIO (UndefOr UnderlyingSinkWriteCallback)
export
%foreign "browser:lambda:(x,v)=>{x.write = v}"
prim__setWrite : UnderlyingSink
-> UndefOr UnderlyingSinkWriteCallback
-> PrimIO ()
namespace UnderlyingSource
export
%foreign "browser:lambda:(a,b,c,d,e)=> {start: a,pull: b,cancel: c,type: d,autoAllocateChunkSize: e}"
prim__new : UndefOr UnderlyingSourceStartCallback
-> UndefOr UnderlyingSourcePullCallback
-> UndefOr UnderlyingSourceCancelCallback
-> UndefOr String
-> UndefOr JSBits64
-> PrimIO UnderlyingSource
export
%foreign "browser:lambda:x=>x.autoAllocateChunkSize"
prim__autoAllocateChunkSize : UnderlyingSource -> PrimIO (UndefOr JSBits64)
export
%foreign "browser:lambda:(x,v)=>{x.autoAllocateChunkSize = v}"
prim__setAutoAllocateChunkSize : UnderlyingSource
-> UndefOr JSBits64
-> PrimIO ()
export
%foreign "browser:lambda:x=>x.cancel"
prim__cancel : UnderlyingSource
-> PrimIO (UndefOr UnderlyingSourceCancelCallback)
export
%foreign "browser:lambda:(x,v)=>{x.cancel = v}"
prim__setCancel : UnderlyingSource
-> UndefOr UnderlyingSourceCancelCallback
-> PrimIO ()
export
%foreign "browser:lambda:x=>x.pull"
prim__pull : UnderlyingSource -> PrimIO (UndefOr UnderlyingSourcePullCallback)
export
%foreign "browser:lambda:(x,v)=>{x.pull = v}"
prim__setPull : UnderlyingSource
-> UndefOr UnderlyingSourcePullCallback
-> PrimIO ()
export
%foreign "browser:lambda:x=>x.start"
prim__start : UnderlyingSource
-> PrimIO (UndefOr UnderlyingSourceStartCallback)
export
%foreign "browser:lambda:(x,v)=>{x.start = v}"
prim__setStart : UnderlyingSource
-> UndefOr UnderlyingSourceStartCallback
-> PrimIO ()
export
%foreign "browser:lambda:x=>x.type"
prim__type : UnderlyingSource -> PrimIO (UndefOr String)
export
%foreign "browser:lambda:(x,v)=>{x.type = v}"
prim__setType : UnderlyingSource -> UndefOr String -> PrimIO ()
--------------------------------------------------------------------------------
-- Callbacks
--------------------------------------------------------------------------------
namespace QueuingStrategySize
export
%foreign "browser:lambda:x=>(a)=>x(a)()"
prim__toQueuingStrategySize : ( UndefOr AnyPtr -> IO Double )
-> PrimIO QueuingStrategySize
namespace TransformerFlushCallback
export
%foreign "browser:lambda:x=>(a)=>x(a)()"
prim__toTransformerFlushCallback : ( TransformStreamDefaultController
-> IO (Promise Undefined)
)
-> PrimIO TransformerFlushCallback
namespace TransformerStartCallback
export
%foreign "browser:lambda:x=>(a)=>x(a)()"
prim__toTransformerStartCallback : ( TransformStreamDefaultController
-> IO AnyPtr
)
-> PrimIO TransformerStartCallback
namespace TransformerTransformCallback
export
%foreign "browser:lambda:x=>(a,b)=>x(a,b)()"
prim__toTransformerTransformCallback : ( AnyPtr
-> TransformStreamDefaultController
-> IO (Promise Undefined)
)
-> PrimIO TransformerTransformCallback
namespace UnderlyingSinkAbortCallback
export
%foreign "browser:lambda:x=>(a)=>x(a)()"
prim__toUnderlyingSinkAbortCallback : ( UndefOr AnyPtr
-> IO (Promise Undefined)
)
-> PrimIO UnderlyingSinkAbortCallback
namespace UnderlyingSinkCloseCallback
export
%foreign "browser:lambda:x=>()=>x()()"
prim__toUnderlyingSinkCloseCallback : (() -> IO (Promise Undefined))
-> PrimIO UnderlyingSinkCloseCallback
namespace UnderlyingSinkStartCallback
export
%foreign "browser:lambda:x=>(a)=>x(a)()"
prim__toUnderlyingSinkStartCallback : ( WritableStreamDefaultController
-> IO AnyPtr
)
-> PrimIO UnderlyingSinkStartCallback
namespace UnderlyingSinkWriteCallback
export
%foreign "browser:lambda:x=>(a,b)=>x(a,b)()"
prim__toUnderlyingSinkWriteCallback : ( AnyPtr
-> WritableStreamDefaultController
-> IO (Promise Undefined)
)
-> PrimIO UnderlyingSinkWriteCallback
namespace UnderlyingSourceCancelCallback
export
%foreign "browser:lambda:x=>(a)=>x(a)()"
prim__toUnderlyingSourceCancelCallback : ( UndefOr AnyPtr
-> IO (Promise Undefined)
)
-> PrimIO UnderlyingSourceCancelCallback
namespace UnderlyingSourcePullCallback
export
%foreign "browser:lambda:x=>(a)=>x(a)()"
prim__toUnderlyingSourcePullCallback : ( Union2 ReadableStreamDefaultController
ReadableByteStreamController
-> IO (Promise Undefined)
)
-> PrimIO UnderlyingSourcePullCallback
namespace UnderlyingSourceStartCallback
export
%foreign "browser:lambda:x=>(a)=>x(a)()"
prim__toUnderlyingSourceStartCallback : ( Union2 ReadableStreamDefaultController
ReadableByteStreamController
-> IO AnyPtr
)
-> PrimIO UnderlyingSourceStartCallback
|
MODULE LogBeta
! Log of the beta function
! Includes log of the gamma function
IMPLICIT NONE
CONTAINS
FUNCTION betaln(a0, b0) RESULT(fn_val)
!-----------------------------------------------------------------------
! EVALUATION OF THE LOGARITHM OF THE BETA FUNCTION
!-----------------------------------------------------------------------
! E = 0.5*LN(2*PI)
!--------------------------
IMPLICIT NONE
INTEGER, PARAMETER :: dp = SELECTED_REAL_KIND(15, 100)
REAL (dp), INTENT(IN) :: a0, b0
REAL (dp) :: fn_val
! Local variables
REAL (dp), PARAMETER :: e = .918938533204673_dp
REAL (dp) :: a, b, c, h, u, v, w, z
INTEGER :: i, n
REAL (dp), PARAMETER :: half = 0.5_dp, one = 1.0_dp, two = 2.0_dp, &
eight = 8.0_dp
!--------------------------
a = MIN(a0, b0)
b = MAX(a0, b0)
IF (a < eight) THEN
IF (a < one) THEN
!-----------------------------------------------------------------------
! PROCEDURE WHEN A < 1
!-----------------------------------------------------------------------
IF (b < eight) THEN
fn_val = gamln(a) + (gamln(b) - gamln(a+b))
RETURN
END IF
fn_val = gamln(a) + algdiv(a,b)
RETURN
END IF
!-----------------------------------------------------------------------
! PROCEDURE WHEN 1 <= A < 8
!-----------------------------------------------------------------------
IF (a <= two) THEN
IF (b <= two) THEN
fn_val = gamln(a) + gamln(b) - gsumln(a,b)
RETURN
END IF
w = 0.0
IF (b < eight) GO TO 20
fn_val = gamln(a) + algdiv(a,b)
RETURN
END IF
! REDUCTION OF A WHEN B <= 1000
IF (b > 1000.0_dp) GO TO 40
n = a - one
w = one
DO i = 1, n
a = a - one
h = a / b
w = w * (h/(one + h))
END DO
w = LOG(w)
IF (b >= eight) THEN
fn_val = w + gamln(a) + algdiv(a,b)
RETURN
END IF
! REDUCTION OF B WHEN B < 8
20 n = b - one
z = one
DO i = 1, n
b = b - one
z = z * (b/(a+b))
END DO
fn_val = w + LOG(z) + (gamln(a) + (gamln(b) - gsumln(a,b)))
RETURN
! REDUCTION OF A WHEN B > 1000
40 n = a - one
w = one
DO i = 1, n
a = a - one
w = w * (a/(one+a/b))
END DO
fn_val = (LOG(w) - n*LOG(b)) + (gamln(a) + algdiv(a,b))
RETURN
END IF
!-----------------------------------------------------------------------
! PROCEDURE WHEN A >= 8
!-----------------------------------------------------------------------
w = bcorr(a,b)
h = a / b
c = h / (one + h)
u = -(a-half) * LOG(c)
v = b * alnrel(h)
IF (u > v) THEN
fn_val = (((-half*LOG(b) + e) + w) - v) - u
RETURN
END IF
fn_val = (((-half*LOG(b) + e) + w) - u) - v
RETURN
END FUNCTION betaln
FUNCTION gamln(a) RESULT(fn_val)
!-----------------------------------------------------------------------
! EVALUATION OF LN(GAMMA(A)) FOR POSITIVE A
!-----------------------------------------------------------------------
! WRITTEN BY ALFRED H. MORRIS
! NAVAL SURFACE WARFARE CENTER
! DAHLGREN, VIRGINIA
!--------------------------
! D = 0.5*(LN(2*PI) - 1)
!--------------------------
IMPLICIT NONE
INTEGER, PARAMETER :: dp = SELECTED_REAL_KIND(15, 100)
REAL (dp), INTENT(IN) :: a
REAL (dp) :: fn_val
! Local variables
REAL (dp), PARAMETER :: d = 0.418938533204673_dp, c0 = 0.833333333333333D-01, &
c1 = -0.277777777760991D-02, c2 = 0.793650666825390D-03, &
c3 = -0.595202931351870D-03, c4 = 0.837308034031215D-03, &
c5 = -0.165322962780713D-02
REAL (dp) :: t, w
INTEGER :: i, n
!-----------------------------------------------------------------------
IF (a <= 0.8_dp) THEN
fn_val = gamln1(a) - LOG(a)
RETURN
END IF
IF (a <= 2.25_dp) THEN
t = (a-0.5_dp) - 0.5_dp
fn_val = gamln1(t)
RETURN
END IF
IF (a < 10.0_dp) THEN
n = a - 1.25_dp
t = a
w = 1.0_dp
DO i = 1, n
t = t - 1.0_dp
w = t * w
END DO
fn_val = gamln1(t-1.0) + LOG(w)
RETURN
END IF
t = (1.0/a) ** 2
w = (((((c5*t + c4)*t + c3)*t + c2)*t + c1)*t + c0) / a
fn_val = (d+w) + (a-0.5) * (LOG(a)-1.0)
RETURN
END FUNCTION gamln
FUNCTION algdiv(a, b) RESULT(fn_val)
!-----------------------------------------------------------------------
! COMPUTATION OF LN(GAMMA(B)/GAMMA(A+B)) WHEN B >= 8
! --------
! IN THIS ALGORITHM, DEL(X) IS THE FUNCTION DEFINED BY
! LN(GAMMA(X)) = (X - 0.5)*LN(X) - X + 0.5*LN(2*PI) + DEL(X).
!-----------------------------------------------------------------------
IMPLICIT NONE
INTEGER, PARAMETER :: dp = SELECTED_REAL_KIND(15, 100)
REAL (dp), INTENT(IN) :: a, b
REAL (dp) :: fn_val
! Local variables
REAL (dp), PARAMETER :: c0 = .833333333333333D-01, c1 = -.277777777760991D-02, &
c2 = .793650666825390D-03, c3 = -.595202931351870D-03, &
c4 = .837308034031215D-03, c5 = -.165322962780713D-02
REAL (dp) :: c, d, h, s3, s5, s7, s9, s11, t, u, v, w, x, x2
!------------------------
IF (a > b) THEN
h = b / a
c = 1.0 / (1.0_dp + h)
x = h / (1.0_dp + h)
d = a + (b - 0.5_dp)
ELSE
h = a / b
c = h / (1.0_dp + h)
x = 1.0 / (1.0_dp + h)
d = b + (a - 0.5_dp)
END IF
! SET SN = (1 - X**N)/(1 - X)
x2 = x * x
s3 = 1.0 + (x + x2)
s5 = 1.0 + (x + x2*s3)
s7 = 1.0 + (x + x2*s5)
s9 = 1.0 + (x + x2*s7)
s11 = 1.0 + (x + x2*s9)
! SET W = DEL(B) - DEL(A + B)
t = (1.0_dp/b) ** 2
w = ((((c5*s11*t + c4*s9)*t + c3*s7)*t + c2*s5)*t + c1*s3) * t + c0
w = w * (c/b)
! COMBINE THE RESULTS
u = d * alnrel(a/b)
v = a * (LOG(b) - 1.0_dp)
IF (u > v) THEN
fn_val = (w-v) - u
RETURN
END IF
fn_val = (w-u) - v
RETURN
END FUNCTION algdiv
FUNCTION gsumln(a, b) RESULT(fn_val)
!-----------------------------------------------------------------------
! EVALUATION OF THE FUNCTION LN(GAMMA(A + B))
! FOR 1 <= A <= 2 AND 1 <= B <= 2
!-----------------------------------------------------------------------
IMPLICIT NONE
INTEGER, PARAMETER :: dp = SELECTED_REAL_KIND(15, 100)
REAL (dp), INTENT(IN) :: a, b
REAL (dp) :: fn_val
! Local variables
REAL (dp) :: x
x = a + b - 2.0_dp
IF (x <= 0.25_dp) THEN
fn_val = gamln1(1.0_dp + x)
RETURN
END IF
IF (x <= 1.25_dp) THEN
fn_val = gamln1(x) + alnrel(x)
RETURN
END IF
fn_val = gamln1(x - 1.0_dp) + LOG(x*(1.0_dp + x))
RETURN
END FUNCTION gsumln
FUNCTION bcorr(a0, b0) RESULT(fn_val)
!-----------------------------------------------------------------------
! EVALUATION OF DEL(A0) + DEL(B0) - DEL(A0 + B0) WHERE
! LN(GAMMA(A)) = (A - 0.5)*LN(A) - A + 0.5*LN(2*PI) + DEL(A).
! IT IS ASSUMED THAT A0 >= 8 AND B0 >= 8.
!-----------------------------------------------------------------------
IMPLICIT NONE
INTEGER, PARAMETER :: dp = SELECTED_REAL_KIND(15, 100)
REAL (dp), INTENT(IN) :: a0, b0
REAL (dp) :: fn_val
! Local variables
REAL (dp), PARAMETER :: c0 = .833333333333333D-01, c1 = -.277777777760991D-02, &
c2 = .793650666825390D-03, c3 = -.595202931351870D-03, &
c4 = .837308034031215D-03, c5 = -.165322962780713D-02
REAL (dp) :: a, b, c, h, s3, s5, s7, s9, s11, t, w, x, x2
!------------------------
a = MIN(a0,b0)
b = MAX(a0,b0)
h = a / b
c = h / (1.0_dp + h)
x = 1.0 / (1.0_dp + h)
x2 = x * x
! SET SN = (1 - X**N)/(1 - X)
s3 = 1.0 + (x + x2)
s5 = 1.0 + (x + x2*s3)
s7 = 1.0 + (x + x2*s5)
s9 = 1.0 + (x + x2*s7)
s11 = 1.0 + (x + x2*s9)
! SET W = DEL(B) - DEL(A + B)
t = (1.0_dp/b) ** 2
w = ((((c5*s11*t + c4*s9)*t + c3*s7)*t + c2*s5)*t + c1*s3) * t + c0
w = w * (c/b)
! COMPUTE DEL(A) + W
t = (1.0_dp/a) ** 2
fn_val = (((((c5*t + c4)*t + c3)*t + c2)*t + c1)*t + c0) / a + w
RETURN
END FUNCTION bcorr
FUNCTION alnrel(a) RESULT(fn_val)
!-----------------------------------------------------------------------
! EVALUATION OF THE FUNCTION LN(1 + A)
!-----------------------------------------------------------------------
IMPLICIT NONE
INTEGER, PARAMETER :: dp = SELECTED_REAL_KIND(15, 100)
REAL (dp), INTENT(IN) :: a
REAL (dp) :: fn_val
! Local variables
REAL (dp), PARAMETER :: p1 = -.129418923021993D+01, p2 = .405303492862024D+00, &
p3 = -.178874546012214D-01, q1 = -.162752256355323D+01, &
q2 = .747811014037616D+00, q3 = -.845104217945565D-01, &
zero = 0.0_dp, half = 0.5_dp, one = 1.0_dp, two = 2.0_dp
REAL (dp) :: t, t2, w, x
!--------------------------
IF (ABS(a) <= 0.375_dp) THEN
t = a / (a + two)
t2 = t * t
w = (((p3*t2 + p2)*t2 + p1)*t2 + one) / (((q3*t2 + q2)*t2 + q1)*t2 + one)
fn_val = two * t * w
RETURN
END IF
x = one + a
IF (a < zero) x = (a + half) + half
fn_val = LOG(x)
RETURN
END FUNCTION alnrel
FUNCTION gamln1(a) RESULT(fn_val)
!-----------------------------------------------------------------------
! EVALUATION OF LN(GAMMA(1 + A)) FOR -0.2 <= A <= 1.25
!-----------------------------------------------------------------------
IMPLICIT NONE
INTEGER, PARAMETER :: dp = SELECTED_REAL_KIND(15, 100)
REAL (dp), INTENT(IN) :: a
REAL (dp) :: fn_val
! Local variables
REAL (dp), PARAMETER :: p0 = .577215664901533D+00, p1 = .844203922187225D+00, &
p2 = -.168860593646662D+00, p3 = -.780427615533591D+00, &
p4 = -.402055799310489D+00, p5 = -.673562214325671D-01, &
p6 = -.271935708322958D-02, q1 = .288743195473681D+01, &
q2 = .312755088914843D+01, q3 = .156875193295039D+01, &
q4 = .361951990101499D+00, q5 = .325038868253937D-01, &
q6 = .667465618796164D-03, r0 = .422784335098467D+00, &
r1 = .848044614534529D+00, r2 = .565221050691933D+00, &
r3 = .156513060486551D+00, r4 = .170502484022650D-01, &
r5 = .497958207639485D-03, s1 = .124313399877507D+01, &
s2 = .548042109832463D+00, s3 = .101552187439830D+00, &
s4 = .713309612391000D-02, s5 = .116165475989616D-03
REAL (dp) :: w, x
!----------------------
IF (a < 0.6_dp) THEN
w = ((((((p6*a + p5)*a + p4)*a + p3)*a + p2)*a + p1)*a + p0) / &
((((((q6*a + q5)*a + q4)*a + q3)*a + q2)*a + q1)*a + 1.0)
fn_val = -a * w
RETURN
END IF
x = (a - 0.5_dp) - 0.5_dp
w = (((((r5*x + r4)*x + r3)*x + r2)*x + r1)*x + r0) / (((((s5*x + s4)*x + &
s3)*x + s2)*x + s1)*x + 1.0_dp)
fn_val = x * w
RETURN
END FUNCTION gamln1
END MODULE LogBeta
FUNCTION betain(x, p, q, beta) RESULT(fn_val)
! Algorithm AS 63 Appl. Statist. (1973), vol.22, no.3
! Computes incomplete beta function ratio for arguments
! x between zero and one, p and q positive.
! Log of complete beta function, beta, is assumed to be known
! ELF90-compatible version by Alan Miller
! N.B. Argument IFAULT has been removed
! Latest revision - 5 July 2003
USE LogBeta
IMPLICIT NONE
INTEGER, PARAMETER :: dp = SELECTED_REAL_KIND(15, 100)
REAL (dp), INTENT(IN) :: x, p, q, beta
REAL (dp) :: fn_val
! Local variables
LOGICAL :: indx
INTEGER :: ns
REAL (dp) :: psq, cx, xx, pp, qq, term, ai, rx, temp
! Define accuracy and initialise
REAL (dp), PARAMETER :: zero = 0.0_dp, one = 1.0_dp, acu = 1.0E-14_dp
fn_val = x
! Test for admissibility of arguments
IF(p <= zero .OR. q <= zero) THEN
WRITE(*, *) 'AS63: Either p or q <= 0'
RETURN
END IF
IF(x < zero .OR. x > one) THEN
WRITE(*, *) 'AS63: Argument x outside range (0, 1)'
RETURN
END IF
IF(x == zero .OR. x == one) RETURN
! Change tail if necessary and determine s
psq = p + q
cx = one - x
IF(p < psq*x) THEN
xx = cx
cx = x
pp = q
qq = p
indx = .true.
ELSE
xx = x
pp = p
qq = q
indx = .false.
END IF
term = one
ai = one
fn_val = one
ns = qq + cx*psq
! Use Soper's reduction formulae.
rx = xx/cx
3 temp = qq - ai
IF(ns == 0) rx = xx
4 term = term*temp*rx/(pp+ai)
fn_val = fn_val + term
temp = ABS(term)
IF(temp <= acu .AND. temp <= acu*fn_val) GO TO 5
ai = ai + one
ns = ns - 1
IF(ns >= 0) GO TO 3
temp = psq
psq = psq+one
GO TO 4
! Calculate result
5 fn_val = fn_val*EXP(pp*LOG(xx) + (qq-one)*LOG(cx) - beta)/pp
IF(indx) fn_val = one - fn_val
RETURN
END FUNCTION betain
|
|||
module Toolkit.Data.Graph.EdgeBounded.HasExactDegree.All
import Decidable.Equality
import Data.String
import Data.List.Elem
import Data.List.Quantifiers
import Toolkit.Decidable.Do
import Toolkit.Decidable.Informative
import Toolkit.Data.Nat
import Toolkit.Data.Pair
import Toolkit.Data.List.Size
import Toolkit.Data.List.Occurs.Does
import Toolkit.Data.Graph.EdgeBounded
import public Toolkit.Data.Graph.EdgeBounded.DegreeCommon
import public Toolkit.Data.Graph.EdgeBounded.HasExactDegree
%default total
public export
HasExactDegrees : Vertices type -> Edges -> Type
HasExactDegrees vs es = All (\v => HasExactDegree v es) vs
errorHead : {x : type}
-> {p : type -> Type}
-> (p x -> Void)
-> All p (x :: xs) -> Void
errorHead contra (y :: z) = contra y
errorTail : (All p xs -> Void) -> All p (x :: xs) -> Void
errorTail f (y :: z) = f z
all : {type : Type}
-> {p : type -> Type}
-> (f : (x : type) -> DecInfo e (p x))
-> (xs : List type)
-> DecInfo e (All p xs)
all f [] = Yes []
all f (x :: xs) with (f x)
all f (x :: xs) | (Yes prfWhy) with (all f xs)
all f (x :: xs) | (Yes prfWhy) | (Yes y)
= Yes (prfWhy :: y)
all f (x :: xs) | (Yes prfWhy) | (No msgWhyNot prfWhyNot)
= No msgWhyNot (errorTail prfWhyNot)
all f (x :: xs) | (No msgWhyNot prfWhyNot) = No msgWhyNot (errorHead prfWhyNot)
export
hasExactDegrees : {type : Type}
-> (vs : Vertices type)
-> (es : Edges)
-> DecInfo (HasExactDegree.Error type)
(HasExactDegrees vs es)
hasExactDegrees vs es = all (\v => hasExactDegree v es) vs
-- [ EOF ]
|
# ---
# title: 376. Wiggle Subsequence
# id: problem376
# author: Indigo
# date: 2020-12-12
# difficulty: Medium
# categories: Dynamic Programming, Greedy
# link: <https://leetcode.com/problems/wiggle-subsequence/description/>
# hidden: true
# ---
#
# A sequence of numbers is called a **wiggle sequence** if the differences
# between successive numbers strictly alternate between positive and negative.
# The first difference (if one exists) may be either positive or negative. A
# sequence with fewer than two elements is trivially a wiggle sequence.
#
# For example, `[1,7,4,9,2,5]` is a wiggle sequence because the differences
# `(6,-3,5,-7,3)` are alternately positive and negative. In contrast,
# `[1,4,7,2,5]` and `[1,7,4,5,5]` are not wiggle sequences, the first because
# its first two differences are positive and the second because its last
# difference is zero.
#
# Given a sequence of integers, return the length of the longest subsequence
# that is a wiggle sequence. A subsequence is obtained by deleting some number
# of elements (eventually, also zero) from the original sequence, leaving the
# remaining elements in their original order.
#
# **Example 1:**
#
#
#
# Input: [1,7,4,9,2,5]
# Output: 6
# Explanation: The entire sequence is a wiggle sequence.
#
# **Example 2:**
#
#
#
# Input: [1,17,5,10,13,15,10,5,16,8]
# Output: 7
# Explanation: There are several subsequences that achieve this length. One is [1,17,10,13,10,16,8].
#
# **Example 3:**
#
#
#
# Input: [1,2,3,4,5,6,7,8,9]
# Output: 2
#
# **Follow up:**
# Can you do it in O( _n_ ) time?
#
#
## @lc code=start
using LeetCode
function wiggle_max_length(nums::Vector{Int})::Int
len = length(nums)
if len < 2
return len
end
pre_diff = nums[2] - nums[1]
res = (pre_diff == 0 ? 1 : 2)
for i in 3:len
diff = nums[i] - nums[i - 1]
if diff > 0 && pre_diff <= 0 || diff < 0 && pre_diff > 0
res += 1
pre_diff = diff
end
end
return res
end
## @lc code=end
|
Formal statement is: lemma ring_of_finite_sets: "ring_of_sets (space M) {A\<in>sets M. emeasure M A \<noteq> top}" Informal statement is: The collection of measurable sets of finite measure is a ring. |
(* Author: Tobias Nipkow *)
header "Completeness of Archive Test"
theory ArchCompProps
imports TameEnumProps ArchCompAux
begin
lemma mgp_pre_iso_test: "minGraphProps g \<Longrightarrow> pre_iso_test(fgraph g)"
apply(simp add:pre_iso_test_def fgraph_def image_def)
apply(rule conjI) apply(blast dest: mgp_vertices_nonempty[symmetric])
apply(rule conjI) apply(blast intro:minGraphProps)
apply(drule minGraphProps11)
apply(simp add:normFaces_def normFace_def verticesFrom_def minVertex_def
rotate_min_def o_def)
done
corollary iso_test_correct:
"\<lbrakk> pre_iso_test Fs\<^sub>1; pre_iso_test Fs\<^sub>2 \<rbrakk> \<Longrightarrow>
iso_test Fs\<^sub>1 Fs\<^sub>2 = (Fs\<^sub>1 \<simeq> Fs\<^sub>2)"
by(simp add:pre_iso_test_def iso_correct inj_on_rotate_min_iff[symmetric]
distinct_map nof_vertices_def length_remdups_concat)
lemma trie_all_eq_set_of_trie:
"Tries.inv t \<Longrightarrow> Tries.all P t = (\<forall>v \<in> Tries.set_of t. P v)"
apply(induct t rule:Tries.inv.induct)
apply (auto simp: Tries.set_of_def)
apply(case_tac a)
apply simp
apply auto
apply blast
apply(erule allE)
apply(erule impE)
apply(rule_tac x = "[]" in exI)
apply(rule HOL.refl)
apply simp
apply(erule meta_allE)+
apply(erule meta_impE)
apply assumption
apply(erule meta_impE)
apply fast
apply(erule meta_impE)
apply fast
apply clarsimp
apply(erule allE)
apply(erule impE)
apply(rule_tac x = "a#aa" in exI)
apply(rule HOL.refl)
apply auto
done
lemma samet_imp_iso_seteq:
assumes pre1: "\<And>gs g. gsopt = Some gs \<Longrightarrow> g \<in> Tries.set_of gs \<Longrightarrow> pre_iso_test g"
and pre2: "\<And>g. g \<in> set arch \<Longrightarrow> pre_iso_test g"
and inv: "!!gs. gsopt = Some gs \<Longrightarrow> Tries.inv gs"
and same: "samet gsopt arch"
shows "\<exists>gs. gsopt = Some gs \<and> Tries.set_of gs =\<^sub>\<simeq> set arch"
proof -
obtain gs where [simp]: "gsopt = Some gs" and test1: "\<And>g. g \<in> Tries.set_of gs \<Longrightarrow>
\<exists>h \<in> set arch. iso_test g h" and test2: "\<And>g. g \<in> set arch \<Longrightarrow>
\<exists>h \<in> Tries.set_of gs. iso_test g h"
using same inv
by(force simp: samet_def trie_all_eq_set_of_trie inv_of_list
split:option.splits
dest: in_set_lookup_of_listD in_set_lookup_set_ofD)
have "Tries.set_of gs \<subseteq>\<^sub>\<simeq> set arch"
proof (auto simp:qle_gr.defs)
fix g assume g: "g \<in> Tries.set_of gs"
obtain h where h: "h \<in> set arch" and test: "iso_test g h"
using test1[OF g] by blast
thus "\<exists>h\<in>set arch. g \<simeq> h"
using h pre1[OF _ g] pre2[OF h] by (auto simp:iso_test_correct)
qed
moreover
have "set arch \<subseteq>\<^sub>\<simeq> Tries.set_of gs"
proof (auto simp:qle_gr.defs)
fix g assume g: "g \<in> set arch"
obtain h where h: "h \<in> Tries.set_of gs" and test: "iso_test g h"
using test2[OF g] by blast
thus "\<exists>h \<in> Tries.set_of gs. g \<simeq> h"
using h pre1[OF _ h] pre2[OF g] by (auto simp:iso_test_correct)
qed
ultimately show ?thesis by (auto simp: qle_gr.seteq_qle_def)
qed
lemma samet_imp_iso_subseteq:
assumes pre1: "\<And>gs g. gsopt = Some gs \<Longrightarrow> g \<in> Tries.set_of gs \<Longrightarrow> pre_iso_test g"
and pre2: "\<And>g. g \<in> set arch \<Longrightarrow> pre_iso_test g"
and inv: "!!gs. gsopt = Some gs \<Longrightarrow> Tries.inv gs"
and same: "samet gsopt arch"
shows "\<exists>gs. gsopt = Some gs \<and> Tries.set_of gs \<subseteq>\<^sub>\<simeq> set arch"
using qle_gr.seteq_qle_def assms samet_imp_iso_seteq by metis
definition [code del]:
"insert_mod_trie = set_mod_maps.insert_mod Tries.update Tries.lookup iso_test hash"
definition [code del]:
"worklist_tree_coll_trie = set_modulo.worklist_tree_coll (Tries [] []) insert_mod_trie"
definition [code del]:
"worklist_tree_coll_aux_trie = set_modulo.worklist_tree_coll_aux insert_mod_trie"
definition [code del]:
"insert_mod2_trie = set_modulo.insert_mod2 insert_mod_trie"
interpretation set_mod_trie:
set_mod_maps "Tries [] []" Tries.update Tries.lookup Tries.inv "op \<simeq>" iso_test pre_iso_test hash
where "set_modulo.worklist_tree_coll (Tries [] []) insert_mod_trie = worklist_tree_coll_trie"
and "set_modulo.worklist_tree_coll_aux insert_mod_trie = worklist_tree_coll_aux_trie"
and "set_mod_maps.insert_mod Tries.update Tries.lookup iso_test hash = insert_mod_trie"
and "set_modulo.insert_mod2 insert_mod_trie = insert_mod2_trie"
proof unfold_locales
qed (auto simp:iso_test_correct worklist_tree_coll_trie_def worklist_tree_coll_aux_trie_def insert_mod_trie_def insert_mod2_trie_def)
definition enum_filter_finals ::
"(graph \<Rightarrow> graph list) \<Rightarrow> graph list
\<Rightarrow> (nat,nat fgraph) tries option" where
"enum_filter_finals succs = set_mod_trie.worklist_tree_coll succs final fgraph"
definition tameEnumFilter :: "nat \<Rightarrow> (nat,nat fgraph)tries option" where
"tameEnumFilter p = enum_filter_finals (next_tame p) [Seed p]"
lemma TameEnum_tameEnumFilter:
"tameEnumFilter p = Some t \<Longrightarrow> Tries.set_of t =\<^sub>\<simeq> fgraph ` TameEnum\<^bsub>p\<^esub>"
apply(auto simp: tameEnumFilter_def TameEnumP_def enum_filter_finals_def)
apply(drule set_mod_trie.worklist_tree_coll_equiv[OF _ inv_inv_next_tame])
apply (auto simp: Tries.set_of_conv inv_Seed mgp_pre_iso_test RTranCl_conv)
done
lemma tameEnumFilter_subseteq_TameEnum:
"tameEnumFilter p = Some t \<Longrightarrow> Tries.set_of t <= fgraph ` TameEnum\<^bsub>p\<^esub>"
by(auto simp add:tameEnumFilter_def TameEnumP_def enum_filter_finals_def
Tries.set_of_conv inv_Seed mgp_pre_iso_test RTranCl_conv
dest!: set_mod_trie.worklist_tree_coll_subseteq[OF _ inv_inv_next_tame])
lemma inv_tries_tameEnumFilter:
"tameEnumFilter p = Some t \<Longrightarrow> Tries.inv t"
unfolding tameEnumFilter_def enum_filter_finals_def
by(erule set_mod_trie.worklist_tree_coll_inv)
theorem combine_evals_filter:
"\<forall>g \<in> set arch. pre_iso_test g \<Longrightarrow> samet (tameEnumFilter p) arch
\<Longrightarrow> fgraph ` TameEnum\<^bsub>p\<^esub> \<subseteq>\<^sub>\<simeq> set arch"
apply(subgoal_tac "\<exists>t. tameEnumFilter p = Some t \<and> Tries.set_of t \<subseteq>\<^sub>\<simeq> set arch")
apply(metis TameEnum_tameEnumFilter qle_gr.seteq_qle_def qle_gr.subseteq_qle_trans)
apply(fastforce intro!: samet_imp_iso_subseteq
dest: inv_tries_tameEnumFilter tameEnumFilter_subseteq_TameEnum mgp_TameEnum mgp_pre_iso_test)
done
end
|
/-
Copyright (c) 2020 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import algebra.hom.equiv
/-!
# `ulift` instances for groups and monoids
This file defines instances for group, monoid, semigroup and related structures on `ulift` types.
(Recall `ulift α` is just a "copy" of a type `α` in a higher universe.)
We use `tactic.pi_instance_derive_field`, even though it wasn't intended for this purpose,
which seems to work fine.
We also provide `ulift.mul_equiv : ulift R ≃* R` (and its additive analogue).
-/
universes u v
variables {α : Type u} {x y : ulift.{v} α}
namespace ulift
@[to_additive] instance has_one [has_one α] : has_one (ulift α) := ⟨⟨1⟩⟩
@[simp, to_additive] lemma one_down [has_one α] : (1 : ulift α).down = 1 := rfl
@[to_additive] instance has_mul [has_mul α] : has_mul (ulift α) := ⟨λ f g, ⟨f.down * g.down⟩⟩
@[simp, to_additive] lemma mul_down [has_mul α] : (x * y).down = x.down * y.down := rfl
@[to_additive] instance has_div [has_div α] : has_div (ulift α) := ⟨λ f g, ⟨f.down / g.down⟩⟩
@[simp, to_additive] lemma div_down [has_div α] : (x / y).down = x.down / y.down := rfl
@[to_additive] instance has_inv [has_inv α] : has_inv (ulift α) := ⟨λ f, ⟨f.down⁻¹⟩⟩
@[simp, to_additive] lemma inv_down [has_inv α] : x⁻¹.down = (x.down)⁻¹ := rfl
/--
The multiplicative equivalence between `ulift α` and `α`.
-/
@[to_additive "The additive equivalence between `ulift α` and `α`."]
def _root_.mul_equiv.ulift [has_mul α] : ulift α ≃* α :=
{ map_mul' := λ x y, rfl,
.. equiv.ulift }
@[to_additive]
instance semigroup [semigroup α] : semigroup (ulift α) :=
mul_equiv.ulift.injective.semigroup _ $ λ x y, rfl
@[to_additive]
instance comm_semigroup [comm_semigroup α] : comm_semigroup (ulift α) :=
equiv.ulift.injective.comm_semigroup _ $ λ x y, rfl
@[to_additive]
instance mul_one_class [mul_one_class α] : mul_one_class (ulift α) :=
equiv.ulift.injective.mul_one_class _ rfl $ λ x y, rfl
instance mul_zero_one_class [mul_zero_one_class α] : mul_zero_one_class (ulift α) :=
equiv.ulift.injective.mul_zero_one_class _ rfl rfl $ λ x y, rfl
@[to_additive has_vadd]
instance has_scalar {β : Type*} [has_scalar α β] : has_scalar α (ulift β) :=
⟨λ n x, up (n • x.down)⟩
@[to_additive has_scalar, to_additive_reorder 1]
instance has_pow {β : Type*} [has_pow α β] : has_pow (ulift α) β :=
⟨λ x n, up (x.down ^ n)⟩
@[to_additive]
instance monoid [monoid α] : monoid (ulift α) :=
equiv.ulift.injective.monoid _ rfl (λ _ _, rfl) (λ _ _, rfl)
@[to_additive]
instance comm_monoid [comm_monoid α] : comm_monoid (ulift α) :=
equiv.ulift.injective.comm_monoid _ rfl (λ _ _, rfl) (λ _ _, rfl)
instance monoid_with_zero [monoid_with_zero α] : monoid_with_zero (ulift α) :=
equiv.ulift.injective.monoid_with_zero _ rfl rfl (λ _ _, rfl) (λ _ _, rfl)
instance comm_monoid_with_zero [comm_monoid_with_zero α] : comm_monoid_with_zero (ulift α) :=
equiv.ulift.injective.comm_monoid_with_zero _ rfl rfl (λ _ _, rfl) (λ _ _, rfl)
@[to_additive]
instance div_inv_monoid [div_inv_monoid α] : div_inv_monoid (ulift α) :=
equiv.ulift.injective.div_inv_monoid _ rfl (λ _ _, rfl) (λ _, rfl)
(λ _ _, rfl) (λ _ _, rfl) (λ _ _, rfl)
@[to_additive]
instance group [group α] : group (ulift α) :=
equiv.ulift.injective.group _ rfl (λ _ _, rfl) (λ _, rfl)
(λ _ _, rfl) (λ _ _, rfl) (λ _ _, rfl)
@[to_additive]
instance comm_group [comm_group α] : comm_group (ulift α) :=
equiv.ulift.injective.comm_group _ rfl (λ _ _, rfl) (λ _, rfl)
(λ _ _, rfl) (λ _ _, rfl) (λ _ _, rfl)
instance group_with_zero [group_with_zero α] : group_with_zero (ulift α) :=
equiv.ulift.injective.group_with_zero _ rfl rfl (λ _ _, rfl) (λ _, rfl) (λ _ _, rfl) (λ _ _, rfl)
(λ _ _, rfl)
instance comm_group_with_zero [comm_group_with_zero α] : comm_group_with_zero (ulift α) :=
equiv.ulift.injective.comm_group_with_zero _ rfl rfl (λ _ _, rfl) (λ _, rfl) (λ _ _, rfl)
(λ _ _, rfl) (λ _ _, rfl)
@[to_additive add_left_cancel_semigroup]
instance left_cancel_semigroup [left_cancel_semigroup α] :
left_cancel_semigroup (ulift α) :=
equiv.ulift.injective.left_cancel_semigroup _ (λ _ _, rfl)
@[to_additive add_right_cancel_semigroup]
instance right_cancel_semigroup [right_cancel_semigroup α] :
right_cancel_semigroup (ulift α) :=
equiv.ulift.injective.right_cancel_semigroup _ (λ _ _, rfl)
@[to_additive add_left_cancel_monoid]
instance left_cancel_monoid [left_cancel_monoid α] :
left_cancel_monoid (ulift α) :=
equiv.ulift.injective.left_cancel_monoid _ rfl (λ _ _, rfl) (λ _ _, rfl)
@[to_additive add_right_cancel_monoid]
instance right_cancel_monoid [right_cancel_monoid α] :
right_cancel_monoid (ulift α) :=
equiv.ulift.injective.right_cancel_monoid _ rfl (λ _ _, rfl) (λ _ _, rfl)
@[to_additive add_cancel_monoid]
instance cancel_monoid [cancel_monoid α] :
cancel_monoid (ulift α) :=
equiv.ulift.injective.cancel_monoid _ rfl (λ _ _, rfl) (λ _ _, rfl)
@[to_additive add_cancel_monoid]
instance cancel_comm_monoid [cancel_comm_monoid α] :
cancel_comm_monoid (ulift α) :=
equiv.ulift.injective.cancel_comm_monoid _ rfl (λ _ _, rfl) (λ _ _, rfl)
instance nontrivial [nontrivial α] : nontrivial (ulift α) :=
equiv.ulift.symm.injective.nontrivial
-- TODO we don't do `ordered_cancel_comm_monoid` or `ordered_comm_group`
-- We'd need to add instances for `ulift` in `order.basic`.
end ulift
|
SUBROUTINE radial_ext (ncoil, theta, dr_ext)
!-----------------------------------------------
! M o d u l e s
!-----------------------------------------------
USE stel_constants
USE modular_coils
IMPLICIT NONE
!-----------------------------------------------
! L o c a l V a r i a b l e s
!-----------------------------------------------
INTEGER :: ncoil
REAL(rprec) :: theta, dr_ext
dr_ext = 0
IF ((theta.le.pi/2).or.(theta.ge.(3*pi)/2))
1 dr_ext = r_ext(ncoil)*COS(theta)
END SUBROUTINE radial_ext
|
-- Idris2
import System.Concurrency
||| Test basic lock/acquire and unlock/release functionality
main : IO ()
main =
do m <- makeMutex
mutexAcquire m
putStrLn "Mutex acquired"
mutexRelease m
putStrLn "Mutex released"
|
A new month brings a new bookish prompt in Six for Sunday world.
For those who don’t already know, Six for Sunday is a weekly list-based meme created by Steph @ALittleButALot and has a different weekly prompt based on a monthly theme. April is all about children’s literature and as a primary school teacher I am definitely ready for ‘Kids Lit Represent’!
This Sunday, we are discussing children’s books we love. This was quite a difficult topic in many ways; not because I can’t think of any books I loved…but because as a child, I would always have my nose in a book. Many of them have a place in my heart for multiple reasons that I could probably write a Sixty for Sunday instead. I also feel that the meanings of books, or the reasons why you fell in love with them changes as you get older and the morals and messages translated within them also take on new life as society changes and adapts to our modern world. To me, this is why children’s literature is so fascinating; you could read it at different times or stages of your life and still take away something new.
Now… yes, I talk about this series a lot; yes, it features in nearly, but not every, list I write about books; yes, I am positive this comes under the umbrella of children’s books which is why it’s here!
Not only did this book keep my love for reading alive, even through those teenage years when “reading wasn’t cool unless you were reading Cosmo or one of your Nan’s Mills & Boon novels” (you can’t see me air-quoting and eye-rolling but believe me, I am…), it has also inspired so many children I have taught to actually pick up a book by choice rather than their Xbox or Nintendo and start to enjoy reading. That’s just one reason why I love it and why it’s here.
When I was at primary school, my Mum was doing a course which involved going to the library a lot to use the computers; this was the 90s after all and we didn’t have one at home yet. When I went with her after-school, I would have the entire run of the Kids’ section, which was huge to a 7 year old. It was filled with squashy beanbags in reading nooks and was decorated with brightly painted animals on the walls. It really was a special place. Whilst there I read so many books, but one set that stood out was a set all about Wizziwig the Witch by Geraldine McCaughrean. I’ve mentioned them in a previous post but I never see these books anymore so they’re probably out of print. I’d hire them out of the library multiple times just to reread them. There was one with a crazy cooker, a singing car, a sweet machine and I’m sure there was another one with either a washing machine…or it could have been a time machine! Either way, I loved these books so much that I wanted to grow up and BE Wizziwig!
This was my first ever Roald Dahl book and it was a prize won from cereal tokens. I remember collecting the tokens, sending them off, then sitting on the stairs every morning waiting for my book to come in the post. When it did…😍Roald Dahl really is a staple in any children’s literature list; his storytelling is amazing and his books just seem to have an edge that others didn’t. Perhaps it was because he made up words such as ‘snozzcumber’, or perhaps it was because he created a plethora of amazing characters which were either talking animals, dream-eating giants or witches who hated children. I could have included any of his books here, but the whole pretense of waiting for a book to arrive in the post – let’s face it, this was my first ever book mail – and then finding out it’s all about a boy who inherits a chocolate factory…what kid wouldn’t love that!
As a child this book scared me slightly, however I used to know the words off by heart as it had lines which were repeated at certain points throughout the story and the plot was so cleverly constructed. Heckedy Peg is a witch who lures away a Mother’s children whilst she is out at the market. She turns them into items of food and their Mother, after she has tracked down Heckedy Peg, has to guess which of her children is which to break the spell and get them back. For children it’s a pretty frightening story which shows you what can happen if you disobey your parents and let strangers into your house. The illustrations were fantastic and highly detailed too.
Part story, part activity book I loved trying to solve the mystery of who stole the ruby red herring. You had to use the text and the pictures to solve the clues and work out who the thief was. In true crime novel style, all of the characters had a hidden motive and backstory which attempted to catch you out. All of the pictures in the book also had a number of hidden fish and you had to try and find them all. It kept me entertained for ages and is still on my bookshelves today.
Although this is a picture book it is one of my all-time favourites. It was the winner of the Caldecott Medal in 2007. I only discovered it a few years ago when I went on some English training for school. The two ladies who lead the course showed us how you could plan an entire curriculum topic just from that one book. It’s really changed my attitude to teaching through texts in the classroom. The illustrations are stunning and the story takes a few twists and turns that you don’t expect. I’d strongly recommend any teacher, or anyone who loves picture books to pick Flotsam up.
Where’s Wally– can you tell I like finding things in pictures…?
Care of Henry by Anne Fine– A cute dog story where the cover had Henry’s name fit really snuggly onto his collar.
Scribbleboy by Philip Ridley– I read this in secondary school and bought a copy for myself a few years back.
The Queen’s Knickers by Nicholas Allan– another great one for the classroom!
What would make your top list of children’s books you love? Have you read any of the ones on my list? As always, drop me a comment to chat!
One of my favorite children’s books is “Miss Rumphius” by Barbara Cooney. I read it to my class every year at least twice. To be fair, I didn’t actually read it as a child; I was in college when I discovered it. I also really like Charlie and the Chocolate Factory; it was definitely a fav as a kid. |
[STATEMENT]
lemma SA_funs_to_SA_decomp_in_algebra:
assumes "finite Fs"
shows "SA_funs_to_SA_decomp n Fs S \<subseteq> gen_boolean_algebra S (SA_zero_set n ` Fs \<union> SA_nonzero_set n ` Fs)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. SA_funs_to_SA_decomp n Fs S \<subseteq> gen_boolean_algebra S (SA_zero_set n ` Fs \<union> SA_nonzero_set n ` Fs)
[PROOF STEP]
unfolding SA_funs_to_SA_decomp_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. atoms_of ((\<inter>) S ` (SA_zero_set n ` Fs \<union> SA_nonzero_set n ` Fs)) \<subseteq> gen_boolean_algebra S (SA_zero_set n ` Fs \<union> SA_nonzero_set n ` Fs)
[PROOF STEP]
apply(rule atoms_of_gen_boolean_algebra)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. (\<inter>) S ` (SA_zero_set n ` Fs \<union> SA_nonzero_set n ` Fs) \<subseteq> gen_boolean_algebra S (SA_zero_set n ` Fs \<union> SA_nonzero_set n ` Fs)
2. finite ((\<inter>) S ` (SA_zero_set n ` Fs \<union> SA_nonzero_set n ` Fs))
[PROOF STEP]
using pre_SA_funs_to_SA_decomp_in_algebra[of S n Fs]
[PROOF STATE]
proof (prove)
using this:
(\<inter>) S ` (SA_zero_set n ` Fs \<union> SA_nonzero_set n ` Fs) \<subseteq> gen_boolean_algebra S (SA_zero_set n ` Fs \<union> SA_nonzero_set n ` Fs)
goal (2 subgoals):
1. (\<inter>) S ` (SA_zero_set n ` Fs \<union> SA_nonzero_set n ` Fs) \<subseteq> gen_boolean_algebra S (SA_zero_set n ` Fs \<union> SA_nonzero_set n ` Fs)
2. finite ((\<inter>) S ` (SA_zero_set n ` Fs \<union> SA_nonzero_set n ` Fs))
[PROOF STEP]
apply blast
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite ((\<inter>) S ` (SA_zero_set n ` Fs \<union> SA_nonzero_set n ` Fs))
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
finite Fs
goal (1 subgoal):
1. finite ((\<inter>) S ` (SA_zero_set n ` Fs \<union> SA_nonzero_set n ` Fs))
[PROOF STEP]
by blast |
module _ where
open import Common.Prelude hiding (_>>=_)
open import Common.Reflection
open import Common.Equality
open import Imports.StaleMetaLiteral
macro
unquoteMeta : Meta → Tactic
unquoteMeta m = give (meta m [])
thm : unquoteMeta staleMeta ≡ 42
thm = refl
|
"""
========================================
Ammonia inversion transition TKIN fitter
========================================
Ammonia inversion transition TKIN fitter translated from Erik Rosolowsky's
http://svn.ok.ubc.ca/svn/signals/nh3fit/
.. moduleauthor:: Adam Ginsburg <[email protected]>
Module API
^^^^^^^^^^
"""
import numpy as np
from pyspeckit.mpfit import mpfit
import fitter
import matplotlib.cbook as mpcb
import copy
freq_dict = {
'oneone': 23.694506e9,
'twotwo': 23.722633335e9,
'threethree': 23.8701296e9,
'fourfour': 24.1394169e9,
}
aval_dict = {
'oneone': 1.712e-7, #64*!pi**4/(3*h*c**3)*nu11**3*mu0**2*(1/2.)
'twotwo': 2.291e-7, #64*!pi**4/(3*h*c**3)*nu22**3*mu0**2*(2/3.)
'threethree': 2.625e-7, #64*!pi**4/(3*h*c**3)*nu33**3*mu0**2*(3/4.)
'fourfour': 3.167e-7, #64*!pi**4/(3*h*c**3)*nu44**3*mu0**2*(4/5.)
}
ortho_dict = {
'oneone': False,
'twotwo': False,
'threethree': True,
'fourfour': False,
}
n_ortho = np.arange(0,28,3) # 0..3..27
n_para = np.array([x for x in range(28) if x % 3 != 0])
voff_lines_dict = {
'oneone': [19.8513, 19.3159, 7.88669, 7.46967, 7.35132, 0.460409, 0.322042,
-0.0751680, -0.213003, 0.311034, 0.192266, -0.132382, -0.250923, -7.23349,
-7.37280, -7.81526, -19.4117, -19.5500],
'twotwo':[26.5263, 26.0111, 25.9505, 16.3917, 16.3793, 15.8642, 0.562503,
0.528408, 0.523745, 0.0132820, -0.00379100, -0.0132820, -0.501831,
-0.531340, -0.589080, -15.8547, -16.3698, -16.3822, -25.9505, -26.0111,
-26.5263],
'threethree':[29.195098, 29.044147, 28.941877, 28.911408, 21.234827,
21.214619, 21.136387, 21.087456, 1.005122, 0.806082, 0.778062,
0.628569, 0.016754, -0.005589, -0.013401, -0.639734, -0.744554,
-1.031924, -21.125222, -21.203441, -21.223649, -21.076291, -28.908067,
-28.938523, -29.040794, -29.191744],
'fourfour':[ 0. , -30.49783692, 30.49783692, 0., 24.25907811,
-24.25907811, 0. ]
}
tau_wts_dict = {
'oneone': [0.0740740, 0.148148, 0.0925930, 0.166667, 0.0185190, 0.0370370,
0.0185190, 0.0185190, 0.0925930, 0.0333330, 0.300000, 0.466667,
0.0333330, 0.0925930, 0.0185190, 0.166667, 0.0740740, 0.148148],
'twotwo': [0.00418600, 0.0376740, 0.0209300, 0.0372090, 0.0260470,
0.00186000, 0.0209300, 0.0116280, 0.0106310, 0.267442, 0.499668,
0.146512, 0.0116280, 0.0106310, 0.0209300, 0.00186000, 0.0260470,
0.0372090, 0.0209300, 0.0376740, 0.00418600],
'threethree': [0.012263, 0.008409, 0.003434, 0.005494, 0.006652, 0.008852,
0.004967, 0.011589, 0.019228, 0.010387, 0.010820, 0.009482, 0.293302,
0.459109, 0.177372, 0.009482, 0.010820, 0.019228, 0.004967, 0.008852,
0.006652, 0.011589, 0.005494, 0.003434, 0.008409, 0.012263],
'fourfour': [0.2431, 0.0162, 0.0162, 0.3008, 0.0163, 0.0163, 0.3911]}
line_names = freq_dict.keys()
# line_names = ['oneone','twotwo','threethree','fourfour']
ckms = units.speedoflight_ms / 1e3 #2.99792458e5
# doesn't work for nh3voff_lines_dict = dict([(k,(v-93.176261e9)/93.176261e9*ckms) for k,v in freq_dict.iteritems()])
nh3_vtau = hyperfine.hyperfinemodel(line_names, voff_lines_dict, freq_dict, line_strength_dict, relative_strength_total_degeneracy)
nh3_vtau_fitter = nh3_vtau.fitter
nh3_vtau_vheight_fitter = nh3_vtau.vheight_fitter
nh3_radex = radex_modelgrid(xarr, density=4, column=13, xoff_v=0.0, width=1.0,
grid_vwidth=1.0,
grid_vwidth_scale=False,
texgrid=None,
taugrid=None,
hdr=None,
path_to_texgrid='',
path_to_taugrid='',
temperature_gridnumber=3,
modelfunc=nh3_vtau,
debug=False,
verbose=False)
|
lemma LIM_less_bound: fixes f :: "real \<Rightarrow> real" assumes ev: "b < x" "\<forall> x' \<in> { b <..< x}. 0 \<le> f x'" and "isCont f x" shows "0 \<le> f x" |
theory Object2
imports Main
begin
section \<open>Datatypes\<close>
text \<open>We begin by defining a datatype as an initial value plus a transition function, which takes a
verson and argument to a (value', ret-val) tuple. We define a datatype to encapsulate this tuple:\<close>
datatype ('v, 'r) write_out = WriteOut 'v 'r
primrec write_out_value :: "('v, 'r) write_out \<Rightarrow> 'v" where
"write_out_value (WriteOut v r) = v"
primrec write_out_ret :: "('v, 'r) write_out \<Rightarrow> 'r" where
"write_out_ret (WriteOut v r) = r"
text \<open>And a locale which fixes the initial value and transition function w.\<close>
locale data_type =
fixes init :: "'v"
and w :: "'v \<Rightarrow> 'a \<Rightarrow> ('v, 'r) write_out"
begin
text \<open>Given a sequence of write arguments, we can construct a function which applies those arguments
in sequence.\<close>
primrec apply_args :: "'v \<Rightarrow> 'a list \<Rightarrow> 'v" where
"apply_args v [] = v" |
"apply_args v (a # as) = apply_args (write_out_value (w v a)) as"
text \<open>Some lemmata around apply_args\<close>
lemma apply_args_Cons: "apply_args v (a#as) = apply_args (write_out_value (w v a)) as"
by auto
text \<open>A database is traceable if, for any version, there exists exactly one sequence of args that
leads to that version.\<close>
definition is_traceable :: "bool" where
"is_traceable \<equiv> \<forall> args1 args2 . apply_args init args1 = apply_args init args2 \<longrightarrow> args1 = args2"
end
locale traceable_data_type = data_type init w for init w +
assumes traceable:"is_traceable"
begin
text \<open>Here, we prove facts about traceable data types.\<close>
end
section \<open>Append-only lists\<close>
text \<open>We begin by showing that list append over lists of naturals can form a data type.\<close>
definition list_append_w :: "'x list \<Rightarrow> 'x \<Rightarrow> ('x list, bool) write_out" where
"list_append_w xs x \<equiv> WriteOut (xs @ [x]) True"
value "list_append_w [a, b] c"
interpretation list_append: data_type "[]" list_append_w .
text \<open>We want to show this datatype is traceable. First, we prove that applying a sequence of args
produces that list of args itself.\<close>
value "list_append.apply_args x [a,b]"
lemma list_append_args_are_value:"list_append.apply_args [] xs = xs"
proof (induct xs)
case Nil
then show ?case
by simp
next
case (Cons x xs)
then show ?case
apply (simp add: data_type.apply_args_Cons)
qed
interpretation list_append_traceable:traceable_data_type "[]" "list_append_w"
using list_append.is_traceable_def traceable_data_type_def
end
|
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.Algebra.Monoid.Base where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.Equiv.HalfAdjoint
open import Cubical.Foundations.Function
open import Cubical.Foundations.HLevels
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.Univalence
open import Cubical.Foundations.Transport
open import Cubical.Foundations.SIP
open import Cubical.Data.Sigma
open import Cubical.Structures.Axioms
open import Cubical.Structures.Auto
open import Cubical.Algebra.Semigroup hiding (⟨_⟩)
open Iso
private
variable
ℓ : Level
record IsMonoid {A : Type ℓ} (ε : A) (_·_ : A → A → A) : Type ℓ where
constructor ismonoid
field
isSemigroup : IsSemigroup _·_
identity : (x : A) → (x · ε ≡ x) × (ε · x ≡ x)
open IsSemigroup isSemigroup public
lid : (x : A) → ε · x ≡ x
lid x = identity x .snd
rid : (x : A) → x · ε ≡ x
rid x = identity x .fst
record Monoid : Type (ℓ-suc ℓ) where
constructor monoid
field
Carrier : Type ℓ
ε : Carrier
_·_ : Carrier → Carrier → Carrier
isMonoid : IsMonoid ε _·_
infixl 7 _·_
open IsMonoid isMonoid public
-- semigrp : Semigroup
-- semigrp = record { isSemigroup = isSemigroup }
-- open Semigroup semigrp public
-- Extractor for the carrier type
⟨_⟩ : Monoid → Type ℓ
⟨_⟩ = Monoid.Carrier
η-isMonoid : {A : Type ℓ} {ε : A} {_∙_ : A → A → A} (b : IsMonoid ε _∙_)
→ ismonoid (IsMonoid.isSemigroup b) (IsMonoid.identity b) ≡ b
IsMonoid.isSemigroup (η-isMonoid b i) = IsMonoid.isSemigroup b
IsMonoid.identity (η-isMonoid b i) = IsMonoid.identity b
-- Easier to use constructors
makeIsMonoid : {M : Type ℓ} {ε : M} {_·_ : M → M → M}
(is-setM : isSet M)
(assoc : (x y z : M) → x · (y · z) ≡ (x · y) · z)
(rid : (x : M) → x · ε ≡ x)
(lid : (x : M) → ε · x ≡ x)
→ IsMonoid ε _·_
IsMonoid.isSemigroup (makeIsMonoid is-setM assoc rid lid) = issemigroup is-setM assoc
IsMonoid.identity (makeIsMonoid is-setM assoc rid lid) = λ x → rid x , lid x
makeMonoid : {M : Type ℓ} (ε : M) (_·_ : M → M → M)
(is-setM : isSet M)
(assoc : (x y z : M) → x · (y · z) ≡ (x · y) · z)
(rid : (x : M) → x · ε ≡ x)
(lid : (x : M) → ε · x ≡ x)
→ Monoid
makeMonoid ε _·_ is-setM assoc rid lid =
monoid _ ε _·_ (makeIsMonoid is-setM assoc rid lid)
record MonoidEquiv (M N : Monoid {ℓ}) : Type ℓ where
constructor monoidiso
private
module M = Monoid M
module N = Monoid N
field
e : ⟨ M ⟩ ≃ ⟨ N ⟩
presε : equivFun e M.ε ≡ N.ε
isHom : (x y : ⟨ M ⟩) → equivFun e (x M.· y) ≡ equivFun e x N.· equivFun e y
module MonoidΣTheory {ℓ} where
RawMonoidStructure : Type ℓ → Type ℓ
RawMonoidStructure X = X × (X → X → X)
RawMonoidEquivStr = AutoEquivStr RawMonoidStructure
rawMonoidUnivalentStr : UnivalentStr _ RawMonoidEquivStr
rawMonoidUnivalentStr = autoUnivalentStr RawMonoidStructure
MonoidAxioms : (M : Type ℓ) → RawMonoidStructure M → Type ℓ
MonoidAxioms M (e , _·_) = IsSemigroup _·_
× ((x : M) → (x · e ≡ x) × (e · x ≡ x))
MonoidStructure : Type ℓ → Type ℓ
MonoidStructure = AxiomsStructure RawMonoidStructure MonoidAxioms
MonoidΣ : Type (ℓ-suc ℓ)
MonoidΣ = TypeWithStr ℓ MonoidStructure
isPropMonoidAxioms : (M : Type ℓ) (s : RawMonoidStructure M) → isProp (MonoidAxioms M s)
isPropMonoidAxioms M (e , _·_) =
isPropΣ (isPropIsSemigroup _·_)
λ α → isPropΠ λ _ → isProp× (IsSemigroup.is-set α _ _) (IsSemigroup.is-set α _ _)
MonoidEquivStr : StrEquiv MonoidStructure ℓ
MonoidEquivStr = AxiomsEquivStr RawMonoidEquivStr MonoidAxioms
MonoidAxiomsIsoIsMonoid : {M : Type ℓ} (s : RawMonoidStructure M)
→ Iso (MonoidAxioms M s) (IsMonoid (s .fst) (s .snd))
fun (MonoidAxiomsIsoIsMonoid s) (x , y) = ismonoid x y
inv (MonoidAxiomsIsoIsMonoid s) a = (IsMonoid.isSemigroup a) , IsMonoid.identity a
rightInv (MonoidAxiomsIsoIsMonoid s) b = η-isMonoid b
leftInv (MonoidAxiomsIsoIsMonoid s) _ = refl
MonoidAxioms≡IsMonoid : {M : Type ℓ} (s : RawMonoidStructure M)
→ MonoidAxioms M s ≡ IsMonoid (s .fst) (s .snd)
MonoidAxioms≡IsMonoid s = isoToPath (MonoidAxiomsIsoIsMonoid s)
open Monoid
Monoid→MonoidΣ : Monoid → MonoidΣ
Monoid→MonoidΣ M =
⟨ M ⟩ , ((ε M) , _·_ M) , MonoidAxiomsIsoIsMonoid ((ε M) , _·_ M) .inv (isMonoid M)
MonoidΣ→Monoid : MonoidΣ → Monoid
MonoidΣ→Monoid (M , (ε , _·_) , isMonoidΣ) =
monoid M ε _·_ (MonoidAxiomsIsoIsMonoid (ε , _·_) .fun isMonoidΣ)
MonoidIsoMonoidΣ : Iso Monoid MonoidΣ
MonoidIsoMonoidΣ =
iso Monoid→MonoidΣ MonoidΣ→Monoid (λ _ → refl) helper
where
helper : _
Carrier (helper a i) = ⟨ a ⟩
ε (helper a i) = ε a
_·_ (helper a i) = _·_ a
isMonoid (helper a i) = η-isMonoid (isMonoid a) i
monoidUnivalentStr : UnivalentStr MonoidStructure MonoidEquivStr
monoidUnivalentStr = axiomsUnivalentStr _ isPropMonoidAxioms rawMonoidUnivalentStr
MonoidΣPath : (M N : MonoidΣ) → (M ≃[ MonoidEquivStr ] N) ≃ (M ≡ N)
MonoidΣPath = SIP monoidUnivalentStr
MonoidEquivΣ : (M N : Monoid) → Type ℓ
MonoidEquivΣ M N = Monoid→MonoidΣ M ≃[ MonoidEquivStr ] Monoid→MonoidΣ N
MonoidIsoΣPath : {M N : Monoid} → Iso (MonoidEquiv M N) (MonoidEquivΣ M N)
fun MonoidIsoΣPath (monoidiso e h1 h2) = (e , h1 , h2)
inv MonoidIsoΣPath (e , h1 , h2) = monoidiso e h1 h2
rightInv MonoidIsoΣPath _ = refl
leftInv MonoidIsoΣPath _ = refl
MonoidPath : (M N : Monoid) → (MonoidEquiv M N) ≃ (M ≡ N)
MonoidPath M N =
MonoidEquiv M N ≃⟨ isoToEquiv MonoidIsoΣPath ⟩
MonoidEquivΣ M N ≃⟨ MonoidΣPath _ _ ⟩
Monoid→MonoidΣ M ≡ Monoid→MonoidΣ N ≃⟨ isoToEquiv (invIso (congIso MonoidIsoMonoidΣ)) ⟩
M ≡ N ■
RawMonoidΣ : Type (ℓ-suc ℓ)
RawMonoidΣ = TypeWithStr ℓ RawMonoidStructure
Monoid→RawMonoidΣ : Monoid → RawMonoidΣ
Monoid→RawMonoidΣ A = ⟨ A ⟩ , (ε A) , (_·_ A)
InducedMonoid : (M : Monoid) (N : RawMonoidΣ) (e : M .Monoid.Carrier ≃ N .fst)
→ RawMonoidEquivStr (Monoid→RawMonoidΣ M) N e → Monoid
InducedMonoid M N e r =
MonoidΣ→Monoid (transferAxioms rawMonoidUnivalentStr (Monoid→MonoidΣ M) N (e , r))
InducedMonoidPath : (M : Monoid {ℓ}) (N : RawMonoidΣ) (e : M .Monoid.Carrier ≃ N .fst)
(E : RawMonoidEquivStr (Monoid→RawMonoidΣ M) N e)
→ M ≡ InducedMonoid M N e E
InducedMonoidPath M N e E =
MonoidPath M (InducedMonoid M N e E) .fst (monoidiso e (E .fst) (E .snd))
-- We now extract the important results from the above module
isPropIsMonoid : {M : Type ℓ} (ε : M) (_·_ : M → M → M) → isProp (IsMonoid ε _·_)
isPropIsMonoid ε _·_ =
subst isProp (MonoidΣTheory.MonoidAxioms≡IsMonoid (ε , _·_))
(MonoidΣTheory.isPropMonoidAxioms _ (ε , _·_))
MonoidPath : (M N : Monoid {ℓ}) → (MonoidEquiv M N) ≃ (M ≡ N)
MonoidPath = MonoidΣTheory.MonoidPath
InducedMonoid : (M : Monoid {ℓ}) (N : MonoidΣTheory.RawMonoidΣ) (e : M .Monoid.Carrier ≃ N .fst)
→ MonoidΣTheory.RawMonoidEquivStr (MonoidΣTheory.Monoid→RawMonoidΣ M) N e
→ Monoid
InducedMonoid = MonoidΣTheory.InducedMonoid
InducedMonoidPath : (M : Monoid {ℓ}) (N : MonoidΣTheory.RawMonoidΣ) (e : M .Monoid.Carrier ≃ N .fst)
(E : MonoidΣTheory.RawMonoidEquivStr (MonoidΣTheory.Monoid→RawMonoidΣ M) N e)
→ M ≡ InducedMonoid M N e E
InducedMonoidPath = MonoidΣTheory.InducedMonoidPath
module MonoidTheory {ℓ} (M' : Monoid {ℓ}) where
open Monoid M' renaming ( Carrier to M )
-- Added for its use in groups
-- If there exists a inverse of an element it is unique
inv-lemma : (x y z : M) → y · x ≡ ε → x · z ≡ ε → y ≡ z
inv-lemma x y z left-inverse right-inverse =
y ≡⟨ sym (rid y) ⟩
y · ε ≡⟨ cong (λ - → y · -) (sym right-inverse) ⟩
y · (x · z) ≡⟨ assoc y x z ⟩
(y · x) · z ≡⟨ cong (λ - → - · z) left-inverse ⟩
ε · z ≡⟨ lid z ⟩
z ∎
|
Formal statement is: lemma sum_emeasure: "F`I \<subseteq> sets M \<Longrightarrow> disjoint_family_on F I \<Longrightarrow> finite I \<Longrightarrow> (\<Sum>i\<in>I. emeasure M (F i)) = emeasure M (\<Union>i\<in>I. F i)" Informal statement is: If $F$ is a finite family of disjoint measurable sets, then the sum of the measures of the sets in $F$ is equal to the measure of the union of the sets in $F$. |
(*
* Copyright 2014, General Dynamics C4 Systems
*
* SPDX-License-Identifier: GPL-2.0-only
*)
theory ArchArch_AI
imports "../Arch_AI"
begin
context Arch begin global_naming X64
definition
"valid_aci aci \<equiv> case aci of MakePool frame slot parent base \<Rightarrow>
\<lambda>s. cte_wp_at (\<lambda>c. c = NullCap) slot s \<and> real_cte_at slot s \<and>
ex_cte_cap_wp_to is_cnode_cap slot s \<and>
slot \<noteq> parent \<and>
cte_wp_at (\<lambda>cap. \<exists>idx. cap = UntypedCap False frame pageBits idx) parent s \<and>
descendants_of parent (cdt s) = {} \<and>
is_aligned base asid_low_bits \<and> asid_wf base \<and>
x64_asid_table (arch_state s) (asid_high_bits_of base) = None"
definition
valid_iocontrol_inv :: "io_port_control_invocation \<Rightarrow> 'a::state_ext state \<Rightarrow> bool"
where
"valid_iocontrol_inv iopc \<equiv> case iopc of
IOPortControlInvocation f l dest_slot src_slot \<Rightarrow> (cte_wp_at ((=) NullCap) dest_slot
and cte_wp_at ((=) (ArchObjectCap IOPortControlCap)) src_slot
and ex_cte_cap_wp_to is_cnode_cap dest_slot
and real_cte_at dest_slot
and (\<lambda>s. {f..l} \<inter> issued_ioports (arch_state s) = {})
and K (f \<le> l))"
lemma safe_parent_strg:
"cte_wp_at (\<lambda>cap. cap = UntypedCap False frame pageBits idx) p s \<and>
descendants_of p (cdt s) = {} \<and>
valid_objs s
\<longrightarrow>
cte_wp_at (safe_parent_for (cdt s) p
(ArchObjectCap (ASIDPoolCap frame base)))
p s"
apply (clarsimp simp: cte_wp_at_caps_of_state safe_parent_for_def is_physical_def arch_is_physical_def)
apply (rule is_aligned_no_overflow)
apply (drule (1) caps_of_state_valid_cap)
apply (clarsimp simp: valid_cap_def cap_aligned_def)
done
(* 32-bit instance of Detype_AI.range_cover_full *)
lemma range_cover_full:
"\<lbrakk>is_aligned ptr sz; sz<word_bits\<rbrakk> \<Longrightarrow> range_cover (ptr::machine_word) sz sz (Suc 0)"
by (clarsimp simp:range_cover_def unat_eq_0 le_mask_iff[symmetric] word_and_le1 word_bits_def)
definition
valid_arch_inv :: "arch_invocation \<Rightarrow> 'z::state_ext state \<Rightarrow> bool"
where
"valid_arch_inv ai \<equiv> case ai of
InvokePageTable pti \<Rightarrow> valid_pti pti
| InvokePageDirectory pdi \<Rightarrow> valid_pdi pdi
| InvokePDPT pdpti \<Rightarrow> valid_pdpti pdpti
| InvokePage pgi \<Rightarrow> valid_page_inv pgi
| InvokeASIDControl aci \<Rightarrow> valid_aci aci
| InvokeASIDPool api \<Rightarrow> valid_apinv api
| InvokeIOPort iopi \<Rightarrow> \<top>
| InvokeIOPortControl iopci \<Rightarrow> valid_iocontrol_inv iopci"
lemma check_vp_wpR [wp]:
"\<lbrace>\<lambda>s. vmsz_aligned w sz \<longrightarrow> P () s\<rbrace>
check_vp_alignment sz w \<lbrace>P\<rbrace>, -"
apply (simp add: check_vp_alignment_def unlessE_whenE cong: vmpage_size.case_cong)
apply (rule hoare_pre)
apply (wp hoare_whenE_wp|wpc)+
apply (simp add: vmsz_aligned_def)
done
lemma check_vp_inv: "\<lbrace>P\<rbrace> check_vp_alignment sz w \<lbrace>\<lambda>_. P\<rbrace>"
apply (simp add: check_vp_alignment_def unlessE_whenE cong: vmpage_size.case_cong)
apply (rule hoare_pre)
apply (wp hoare_whenE_wp|wpc)+
apply simp
done
lemma p2_low_bits_max:
"(2 ^ asid_low_bits - 1) = (max_word :: 9 word)"
by (simp add: asid_low_bits_def max_word_def)
lemma dom_ucast_eq:
"(- dom (\<lambda>a::9 word. p (ucast a::machine_word)) \<inter> {x. ucast x + y \<noteq> 0} = {}) =
(- dom p \<inter> {x. x \<le> 2 ^ asid_low_bits - 1 \<and> x + y \<noteq> 0} = {})"
apply safe
apply clarsimp
apply (rule ccontr)
apply (erule_tac x="ucast x" in in_emptyE)
apply (clarsimp simp: p2_low_bits_max)
apply (rule conjI)
apply (clarsimp simp: ucast_ucast_mask)
apply (subst (asm) less_mask_eq)
apply (rule word_less_sub_le [THEN iffD1])
apply (simp add: word_bits_def)
apply (simp add: asid_low_bits_def)
apply simp
apply (clarsimp simp: ucast_ucast_mask)
apply (subst (asm) less_mask_eq)
apply (rule word_less_sub_le [THEN iffD1])
apply (simp add: word_bits_def)
apply (simp add: asid_low_bits_def)
apply simp
apply (clarsimp simp: p2_low_bits_max)
apply (rule ccontr)
apply simp
apply (erule_tac x="ucast x" in in_emptyE)
apply clarsimp
apply (rule conjI, blast)
apply (rule word_less_sub_1)
apply (rule order_less_le_trans)
apply (rule ucast_less, simp)
apply (simp add: asid_low_bits_def)
done
lemma asid_high_bits_max_word:
"(2 ^ asid_high_bits - 1) = (max_word :: 3 word)"
by (simp add: asid_high_bits_def max_word_def)
lemma dom_ucast_eq_8:
"(- dom (\<lambda>a::3 word. p (ucast a::machine_word)) = {}) =
(- dom p \<inter> {x. x \<le> 2 ^ asid_high_bits - 1} = {})"
apply safe
apply clarsimp
apply (rule ccontr)
apply (erule_tac x="ucast x" in in_emptyE)
apply (clarsimp simp: asid_high_bits_max_word)
apply (clarsimp simp: ucast_ucast_mask)
apply (subst (asm) less_mask_eq)
apply (rule word_less_sub_le [THEN iffD1])
apply (simp add: word_bits_def)
apply (simp add: asid_high_bits_def)
apply simp
apply (clarsimp simp: asid_high_bits_max_word)
apply (rule ccontr)
apply simp
apply (erule_tac x="ucast x" in in_emptyE)
apply clarsimp
apply (rule conjI, blast)
apply (rule word_less_sub_1)
apply (rule order_less_le_trans)
apply (rule ucast_less, simp)
apply (simp add: asid_high_bits_def)
done
lemma ucast_fst_hd_assocs:
"- dom (\<lambda>x. pool (ucast (x::9 word)::machine_word)) \<inter> {x. ucast x + (w::machine_word) \<noteq> 0} \<noteq> {}
\<Longrightarrow>
fst (hd [(x, y)\<leftarrow>assocs pool . x \<le> 2 ^ asid_low_bits - 1 \<and> x + w \<noteq> 0 \<and> y = None]) =
ucast (fst (hd [(x, y)\<leftarrow>assocs (\<lambda>a::9 word. pool (ucast a)) .
x \<le> 2 ^ asid_low_bits - 1 \<and>
ucast x + w \<noteq> 0 \<and> y = None]))"
apply (simp add: ucast_assocs[unfolded o_def])
apply (simp add: filter_map split_def)
apply (simp cong: conj_cong add: ucast_ucast_len)
apply (simp add: asid_low_bits_def minus_one_norm)
apply (simp add: ord_le_eq_trans [OF word_n1_ge])
apply (simp add: word_le_make_less)
apply (subgoal_tac "P" for P) (* cut_tac but more awesome *)
apply (subst hd_map, assumption)
apply simp
apply (rule sym, rule ucast_ucast_len)
apply (drule hd_in_set)
apply simp
apply (simp add: assocs_empty_dom_comp null_def split_def)
apply (simp add: ucast_assocs[unfolded o_def] filter_map split_def)
apply (simp cong: conj_cong add: ucast_ucast_len)
done
crunch typ_at [wp]:
perform_page_table_invocation, perform_page_directory_invocation, perform_pdpt_invocation,
perform_page_invocation, perform_asid_pool_invocation, perform_io_port_invocation,
perform_ioport_control_invocation
"\<lambda>s. P (typ_at T p s)"
(wp: crunch_wps)
lemmas perform_page_table_invocation_typ_ats [wp] =
abs_typ_at_lifts [OF perform_page_table_invocation_typ_at]
lemmas perform_page_directory_invocation_typ_ats [wp] =
abs_typ_at_lifts [OF perform_page_directory_invocation_typ_at]
lemmas perform_pdpt_invocation_typ_ats [wp] =
abs_typ_at_lifts [OF perform_pdpt_invocation_typ_at]
lemmas perform_page_invocation_typ_ats [wp] =
abs_typ_at_lifts [OF perform_page_invocation_typ_at]
lemmas perform_asid_pool_invocation_typ_ats [wp] =
abs_typ_at_lifts [OF perform_asid_pool_invocation_typ_at]
lemmas perform_io_port_invocation_typ_ats [wp] =
abs_typ_at_lifts [OF perform_io_port_invocation_typ_at]
lemmas perform_ioport_control_invocation_typ_ats [wp] =
abs_typ_at_lifts [OF perform_ioport_control_invocation_typ_at]
lemma perform_asid_control_invocation_tcb_at:
"\<lbrace>invs and valid_aci aci and st_tcb_at active p and
K (\<forall>w a b c. aci = asid_control_invocation.MakePool w a b c \<longrightarrow> w \<noteq> p)\<rbrace>
perform_asid_control_invocation aci
\<lbrace>\<lambda>rv. tcb_at p\<rbrace>"
apply (simp add: perform_asid_control_invocation_def)
apply (cases aci)
apply clarsimp
apply (wp |simp)+
apply (wp obj_at_delete_objects retype_region_obj_at_other2 hoare_vcg_const_imp_lift|assumption)+
apply (intro impI conjI)
apply (clarsimp simp: retype_addrs_def obj_bits_api_def default_arch_object_def image_def ptr_add_def)
apply (clarsimp simp: st_tcb_at_tcb_at)+
apply (frule st_tcb_ex_cap)
apply fastforce
apply (clarsimp split: Structures_A.thread_state.splits)
apply auto[1]
apply (clarsimp simp: ex_nonz_cap_to_def valid_aci_def)
apply (frule invs_untyped_children)
apply (clarsimp simp:cte_wp_at_caps_of_state)
apply (erule_tac ptr="(aa,ba)" in untyped_children_in_mdbE[where P="\<lambda>c. t \<in> zobj_refs c" for t])
apply (simp add: cte_wp_at_caps_of_state)
apply simp
apply (simp add:cte_wp_at_caps_of_state)
apply fastforce
apply (clarsimp simp: zobj_refs_to_obj_refs)
apply (erule(1) in_empty_interE)
apply (clarsimp simp:page_bits_def)
apply simp
done
lemma ucast_asid_high_btis_of_le [simp]:
"ucast (asid_high_bits_of w) \<le> (2 ^ asid_high_bits - 1 :: word32)"
apply (simp add: asid_high_bits_of_def)
apply (rule word_less_sub_1)
apply (rule order_less_le_trans)
apply (rule ucast_less)
apply simp
apply (simp add: asid_high_bits_def)
done
lemma invoke_arch_tcb:
"\<lbrace>invs and valid_arch_inv ai and st_tcb_at active tptr\<rbrace>
arch_perform_invocation ai
\<lbrace>\<lambda>rv. tcb_at tptr\<rbrace>"
apply (simp add: arch_perform_invocation_def)
apply (cases ai; simp; (wp; clarsimp simp add: st_tcb_at_tcb_at)?)
apply (wp perform_asid_control_invocation_tcb_at)
apply (clarsimp simp add: valid_arch_inv_def)
apply (clarsimp simp: valid_aci_def)
apply (frule st_tcb_ex_cap)
apply fastforce
apply (clarsimp split: Structures_A.thread_state.splits)
apply auto[1]
apply (clarsimp simp: ex_nonz_cap_to_def)
apply (frule invs_untyped_children)
apply (clarsimp simp:cte_wp_at_caps_of_state)
apply (erule_tac ptr="(aa,ba)" in untyped_children_in_mdbE[where P="\<lambda>c. t \<in> zobj_refs c" for t])
apply (simp add: cte_wp_at_caps_of_state)+
apply fastforce
apply (clarsimp simp: zobj_refs_to_obj_refs cte_wp_at_caps_of_state)
apply (drule_tac p="(aa,ba)" in caps_of_state_valid_cap, fastforce)
apply (clarsimp simp: valid_cap_def cap_aligned_def)
apply (drule_tac x=tptr in base_member_set, simp)
apply (simp add: pageBits_def field_simps del: atLeastAtMost_iff)
apply (metis (no_types) orthD1 x_power_minus_1)
apply simp
done
end
locale asid_update = Arch +
fixes ap asid s s'
assumes ko: "ko_at (ArchObj (ASIDPool Map.empty)) ap s"
assumes empty: "x64_asid_table (arch_state s) asid = None"
defines "s' \<equiv> s\<lparr>arch_state := arch_state s\<lparr>x64_asid_table := x64_asid_table (arch_state s)(asid \<mapsto> ap)\<rparr>\<rparr>"
context asid_update begin
lemma vs_lookup1' [simp]:
"vs_lookup1 s' = vs_lookup1 s"
by (simp add: vs_lookup1_def s'_def)
lemma vs_lookup_pages1' [simp]:
"vs_lookup_pages1 s' = vs_lookup_pages1 s"
by (simp add: vs_lookup_pages1_def s'_def)
lemma vs_asid_refs' [simp]:
"vs_asid_refs (x64_asid_table (arch_state s')) =
vs_asid_refs (x64_asid_table (arch_state s)) \<union> {([VSRef (ucast asid) None], ap)}"
apply (simp add: s'_def)
apply (rule set_eqI)
apply (rule iffI)
apply (auto simp: vs_asid_refs_def split: if_split_asm)[1]
apply clarsimp
apply (erule disjE)
apply (auto simp: vs_asid_refs_def)[1]
apply (subst (asm) vs_asid_refs_def)
apply (clarsimp dest!: graph_ofD)
apply (rule vs_asid_refsI)
apply (clarsimp simp: empty)
done
lemma vs_lookup':
"vs_lookup s' = vs_lookup s \<union> {([VSRef (ucast asid) None], ap)}"
using ko
apply (simp add: vs_lookup_def)
apply (rule rtrancl_insert)
apply (clarsimp simp: vs_lookup1_def obj_at_def vs_refs_def)
done
lemma vs_lookup_pages':
"vs_lookup_pages s' = vs_lookup_pages s \<union> {([VSRef (ucast asid) None], ap)}"
using ko
apply (simp add: vs_lookup_pages_def)
apply (rule rtrancl_insert)
apply (clarsimp simp: vs_lookup_pages1_def obj_at_def vs_refs_pages_def)
done
lemma obj_at [simp]:
"obj_at P p s' = obj_at P p s"
by (simp add: s'_def)
lemma vs_lookup_neq: "\<lbrakk>(rs \<rhd> p) s' ; p \<noteq> ap\<rbrakk> \<Longrightarrow> (rs \<rhd> p) s"
by (clarsimp simp: vs_lookup')
lemma vspace_objs':
"valid_vspace_objs s \<Longrightarrow> valid_vspace_objs s'"
using ko
apply (clarsimp simp: valid_vspace_objs_def)
apply (erule_tac x=p in allE)
apply (case_tac "p = ap";
case_tac ao;
fastforce simp: obj_at_def s'_def
intro: vs_lookup_neq)
done
lemma global_objs':
"valid_global_objs s \<Longrightarrow> valid_global_objs s'"
apply (clarsimp simp: valid_global_objs_def valid_ao_at_def second_level_tables_def)
apply (auto simp: s'_def)
done
lemma caps_of_state_s':
"caps_of_state s' = caps_of_state s"
by (rule caps_of_state_pspace, simp add: s'_def)
lemma valid_vs_lookup':
"\<lbrakk> valid_vs_lookup s;
\<exists>ptr cap. caps_of_state s ptr = Some cap
\<and> ap \<in> obj_refs cap \<and> vs_cap_ref cap = Some [VSRef (ucast asid) None] \<rbrakk>
\<Longrightarrow> valid_vs_lookup s'"
by (clarsimp simp: valid_vs_lookup_def caps_of_state_s' vs_lookup_pages')
lemma valid_table_caps':
"\<lbrakk> valid_table_caps s \<rbrakk>
\<Longrightarrow> valid_table_caps s'"
apply (simp add: valid_table_caps_def caps_of_state_s' second_level_tables_def)
apply (simp add: s'_def)
done
lemma valid_arch_caps:
"\<lbrakk> valid_arch_caps s;
\<exists>ptr cap. caps_of_state s ptr = Some cap
\<and> ap \<in> obj_refs cap \<and> vs_cap_ref cap = Some [VSRef (ucast asid) None] \<rbrakk>
\<Longrightarrow> valid_arch_caps s'"
by (simp add: valid_arch_caps_def caps_of_state_s'
valid_table_caps' valid_vs_lookup')
lemma valid_asid_map':
"valid_asid_map s \<Longrightarrow> valid_asid_map s'"
by (clarsimp simp: valid_asid_map_def)
end
context Arch begin global_naming X64
lemma valid_arch_state_strg:
"valid_arch_state s \<and> ap \<notin> ran (x64_asid_table (arch_state s)) \<and> asid_pool_at ap s \<longrightarrow>
valid_arch_state (s\<lparr>arch_state := arch_state s\<lparr>x64_asid_table := x64_asid_table (arch_state s)(asid \<mapsto> ap)\<rparr>\<rparr>)"
apply (clarsimp simp: valid_arch_state_def)
apply (clarsimp simp: valid_asid_table_def ran_def)
apply (fastforce intro!: inj_on_fun_updI)
done
lemma valid_vs_lookup_at_upd_strg:
"valid_vs_lookup s \<and>
ko_at (ArchObj (ASIDPool Map.empty)) ap s \<and>
x64_asid_table (arch_state s) asid = None \<and>
(\<exists>ptr cap. caps_of_state s ptr = Some cap \<and> ap \<in> obj_refs cap \<and>
vs_cap_ref cap = Some [VSRef (ucast asid) None])
\<longrightarrow>
valid_vs_lookup (s\<lparr>arch_state := arch_state s\<lparr>x64_asid_table := x64_asid_table (arch_state s)(asid \<mapsto> ap)\<rparr>\<rparr>)"
apply clarsimp
apply (subgoal_tac "asid_update ap asid s")
prefer 2
apply unfold_locales[1]
apply assumption+
apply (erule (1) asid_update.valid_vs_lookup')
apply fastforce
done
lemma retype_region_ap:
"\<lbrace>\<top>\<rbrace>
retype_region ap 1 0 (ArchObject ASIDPoolObj) dev
\<lbrace>\<lambda>_. ko_at (ArchObj (ASIDPool Map.empty)) ap\<rbrace>"
apply (rule hoare_post_imp)
prefer 2
apply (rule retype_region_obj_at)
apply simp
apply simp
apply (clarsimp simp: retype_addrs_def obj_bits_api_def default_arch_object_def)
apply (clarsimp simp: obj_at_def default_object_def default_arch_object_def)
done
lemma retype_region_ap':
"\<lbrace>\<top>\<rbrace> retype_region ap 1 0 (ArchObject ASIDPoolObj) dev \<lbrace>\<lambda>rv. asid_pool_at ap\<rbrace>"
apply (rule hoare_strengthen_post, rule retype_region_ap)
apply (clarsimp simp: a_type_def elim!: obj_at_weakenE)
done
lemma no_cap_to_obj_with_diff_ref_null_filter:
"no_cap_to_obj_with_diff_ref cap S
= (\<lambda>s. \<forall>c \<in> ran (null_filter (caps_of_state s) |` (- S)).
obj_refs c = obj_refs cap
\<longrightarrow> table_cap_ref c = table_cap_ref cap)"
apply (simp add: no_cap_to_obj_with_diff_ref_def
ball_ran_eq cte_wp_at_caps_of_state)
apply (simp add: Ball_def)
apply (intro iff_allI ext)
apply (simp add: restrict_map_def null_filter_def)
apply (auto dest!: obj_ref_none_no_asid[rule_format]
simp: table_cap_ref_def)
done
lemma retype_region_no_cap_to_obj:
"\<lbrace>valid_pspace and valid_mdb
and caps_overlap_reserved {ptr..ptr + 2 ^ obj_bits_api ty us - 1}
and caps_no_overlap ptr sz
and pspace_no_overlap_range_cover ptr sz
and no_cap_to_obj_with_diff_ref cap S
and (\<lambda>s. \<exists>slot. cte_wp_at (\<lambda>c. up_aligned_area ptr sz \<subseteq> cap_range c \<and> cap_is_device c = dev) slot s)
and K (ty = CapTableObject \<longrightarrow> 0 < us)
and K (range_cover ptr sz (obj_bits_api ty us) 1) \<rbrace>
retype_region ptr 1 us ty dev
\<lbrace>\<lambda>rv. no_cap_to_obj_with_diff_ref cap S\<rbrace>"
apply (rule hoare_gen_asm)+
apply (simp add: no_cap_to_obj_with_diff_ref_null_filter)
apply (wp retype_region_caps_of | simp)+
apply fastforce
done
lemma valid_table_caps_asid_upd [iff]:
"valid_table_caps (s\<lparr>arch_state := (x64_asid_table_update f (arch_state s))\<rparr>) =
valid_table_caps s"
by (simp add: valid_table_caps_def second_level_tables_def)
lemma vs_asid_ref_upd:
"([VSRef (ucast (asid_high_bits_of asid')) None] \<rhd> ap')
(s\<lparr>arch_state := arch_state s\<lparr>x64_asid_table := x64_asid_table (arch_state s)(asid_high_bits_of asid \<mapsto> ap)\<rparr>\<rparr>)
= (if asid_high_bits_of asid' = asid_high_bits_of asid
then ap' = ap
else ([VSRef (ucast (asid_high_bits_of asid')) None] \<rhd> ap') s)"
by (fastforce intro: vs_lookup_atI elim: vs_lookup_atE)
lemma vs_asid_ref_eq:
"([VSRef (ucast asid) None] \<rhd> ap) s
= (x64_asid_table (arch_state s) asid = Some ap)"
by (fastforce elim: vs_lookup_atE intro: vs_lookup_atI)
lemma set_cap_reachable_pg_cap:
"\<lbrace>\<lambda>s. P (reachable_pg_cap cap s)\<rbrace> set_cap x y \<lbrace>\<lambda>_ s. P (reachable_pg_cap cap s)\<rbrace>"
by (unfold reachable_pg_cap_def, wp hoare_vcg_ex_lift set_cap.vs_lookup_pages)
lemma cap_insert_simple_arch_caps_ap:
"\<lbrace>valid_arch_caps and (\<lambda>s. cte_wp_at (safe_parent_for (cdt s) src cap) src s)
and no_cap_to_obj_with_diff_ref cap {dest}
and (\<lambda>s. x64_asid_table (arch_state s) (asid_high_bits_of asid) = None)
and ko_at (ArchObj (ASIDPool Map.empty)) ap
and K (cap = ArchObjectCap (ASIDPoolCap ap asid)) \<rbrace>
cap_insert cap src dest
\<lbrace>\<lambda>rv s. valid_arch_caps (s\<lparr>arch_state := arch_state s
\<lparr>x64_asid_table := x64_asid_table (arch_state s)(asid_high_bits_of asid \<mapsto> ap)\<rparr>\<rparr>)\<rbrace>"
apply (simp add: cap_insert_def update_cdt_def set_cdt_def valid_arch_caps_def
set_untyped_cap_as_full_def bind_assoc)
apply (strengthen valid_vs_lookup_at_upd_strg)
apply (wp get_cap_wp set_cap_valid_vs_lookup set_cap_arch_obj
set_cap_valid_table_caps hoare_vcg_all_lift
| simp split del: if_split)+
apply (rule_tac P = "cte_wp_at ((=) src_cap) src" in set_cap_orth)
apply (wp hoare_vcg_imp_lift hoare_vcg_ball_lift set_free_index_final_cap
hoare_vcg_disj_lift set_cap_reachable_pg_cap set_cap.vs_lookup_pages
| clarsimp)+
apply (wp set_cap_arch_obj set_cap_valid_table_caps hoare_vcg_ball_lift
get_cap_wp static_imp_wp)+
apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps)
apply (rule conjI)
apply (clarsimp simp: vs_cap_ref_def)
apply (rule_tac x="fst dest" in exI)
apply (rule_tac x="snd dest" in exI)
apply simp
apply (rule conjI)
apply (simp add: unique_table_caps_def is_cap_simps)
apply (subst unique_table_refs_def)
apply (intro allI impI)
apply (simp split: if_split_asm)
apply (simp add: no_cap_to_obj_with_diff_ref_def cte_wp_at_caps_of_state)
apply (simp add: no_cap_to_obj_with_diff_ref_def cte_wp_at_caps_of_state)
apply (erule (3) unique_table_refsD)
done
lemma valid_asid_map_asid_upd_strg:
"valid_asid_map s \<and>
ko_at (ArchObj (ASIDPool Map.empty)) ap s \<and>
x64_asid_table (arch_state s) asid = None \<longrightarrow>
valid_asid_map (s\<lparr>arch_state := arch_state s\<lparr>x64_asid_table := x64_asid_table (arch_state s)(asid \<mapsto> ap)\<rparr>\<rparr>)"
apply clarsimp
apply (subgoal_tac "asid_update ap asid s")
prefer 2
apply unfold_locales[1]
apply assumption+
apply (erule (1) asid_update.valid_asid_map')
done
lemma valid_vspace_objs_asid_upd_strg:
"valid_vspace_objs s \<and>
ko_at (ArchObj (ASIDPool Map.empty)) ap s \<and>
x64_asid_table (arch_state s) asid = None \<longrightarrow>
valid_vspace_objs (s\<lparr>arch_state := arch_state s\<lparr>x64_asid_table := x64_asid_table (arch_state s)(asid \<mapsto> ap)\<rparr>\<rparr>)"
apply clarsimp
apply (subgoal_tac "asid_update ap asid s")
prefer 2
apply unfold_locales[1]
apply assumption+
apply (erule (1) asid_update.vspace_objs')
done
lemma valid_global_objs_asid_upd_strg:
"valid_global_objs s \<and>
ko_at (ArchObj (arch_kernel_obj.ASIDPool Map.empty)) ap s \<and>
x64_asid_table (arch_state s) asid = None \<longrightarrow>
valid_global_objs (s\<lparr>arch_state := arch_state s\<lparr>x64_asid_table := x64_asid_table (arch_state s)(asid \<mapsto> ap)\<rparr>\<rparr>)"
by clarsimp
lemma safe_parent_cap_is_device:
"safe_parent_for m p cap pcap \<Longrightarrow> cap_is_device cap = cap_is_device pcap"
by (simp add: safe_parent_for_def)
lemma cap_insert_ioports_ap:
"\<lbrace>valid_ioports and (\<lambda>s. cte_wp_at (\<lambda>cap'. safe_ioport_insert cap cap' s) dest s) and
K (is_ap_cap cap)\<rbrace>
cap_insert cap src dest
\<lbrace>\<lambda>rv. valid_ioports\<rbrace>"
apply (simp add: cap_insert_def)
apply (wp get_cap_wp set_cap_ioports' set_untyped_cap_as_full_ioports
set_untyped_cap_as_full_gross_ioports
| wpc | simp split del: if_splits)+
done
lemma cap_insert_ap_invs:
"\<lbrace>invs and valid_cap cap and tcb_cap_valid cap dest and
ex_cte_cap_wp_to (appropriate_cte_cap cap) dest and
cte_wp_at (\<lambda>c. c = NullCap) dest and
no_cap_to_obj_with_diff_ref cap {dest} and
(\<lambda>s. cte_wp_at (safe_parent_for (cdt s) src cap) src s) and
K (cap = ArchObjectCap (ASIDPoolCap ap asid)) and
(\<lambda>s. \<forall>irq \<in> cap_irqs cap. irq_issued irq s) and
ko_at (ArchObj (ASIDPool Map.empty)) ap and
(\<lambda>s. ap \<notin> ran (x64_asid_table (arch_state s)) \<and>
x64_asid_table (arch_state s) (asid_high_bits_of asid) = None)\<rbrace>
cap_insert cap src dest
\<lbrace>\<lambda>rv s. invs (s\<lparr>arch_state := arch_state s
\<lparr>x64_asid_table := (x64_asid_table \<circ> arch_state) s(asid_high_bits_of asid \<mapsto> ap)\<rparr>\<rparr>)\<rbrace>"
apply (simp add: invs_def valid_state_def valid_pspace_def)
apply (strengthen valid_arch_state_strg valid_vspace_objs_asid_upd_strg
valid_asid_map_asid_upd_strg )
apply (simp cong: conj_cong)
apply (rule hoare_pre)
apply (wp cap_insert_simple_mdb cap_insert_iflive
cap_insert_zombies cap_insert_ifunsafe cap_insert_ioports_ap
cap_insert_valid_global_refs cap_insert_idle
valid_irq_node_typ cap_insert_simple_arch_caps_ap)
apply (clarsimp simp: is_simple_cap_def cte_wp_at_caps_of_state is_cap_simps)
apply (frule safe_parent_cap_is_device)
apply (drule safe_parent_cap_range)
apply simp
apply (rule conjI)
prefer 2
apply (clarsimp simp: obj_at_def a_type_def)
apply (clarsimp simp: cte_wp_at_caps_of_state)
apply (drule_tac p="(a,b)" in caps_of_state_valid_cap, fastforce)
apply (auto simp: obj_at_def is_tcb_def is_cap_table_def
valid_cap_def [where c="cap.Zombie a b x" for a b x]
dest: obj_ref_is_tcb obj_ref_is_cap_table split: option.splits)
done
lemma max_index_upd_no_cap_to:
"\<lbrace>\<lambda>s. no_cap_to_obj_with_diff_ref cap {slot} s \<and>
cte_wp_at ((=) ucap) cref s \<and> is_untyped_cap ucap\<rbrace>
set_cap (max_free_index_update ucap) cref
\<lbrace>\<lambda>rv s. no_cap_to_obj_with_diff_ref cap {slot} s \<rbrace>"
apply (clarsimp simp:no_cap_to_obj_with_diff_ref_def)
apply (wp hoare_vcg_ball_lift set_cap_cte_wp_at_neg)
apply (clarsimp simp:cte_wp_at_caps_of_state free_index_update_def is_cap_simps)
apply (drule_tac x = cref in bspec)
apply clarsimp
apply (clarsimp simp:table_cap_ref_def)
done
lemma perform_asid_control_invocation_st_tcb_at:
"\<lbrace>st_tcb_at (P and (Not \<circ> inactive) and (Not \<circ> idle)) t
and ct_active and invs and valid_aci aci\<rbrace>
perform_asid_control_invocation aci
\<lbrace>\<lambda>y. st_tcb_at P t\<rbrace>"
supply
is_aligned_neg_mask_eq[simp del]
is_aligned_neg_mask_weaken[simp del]
apply (clarsimp simp: perform_asid_control_invocation_def split: asid_control_invocation.splits)
apply (rename_tac word1 a b aa ba word2)
apply (rule hoare_name_pre_state)
apply (subgoal_tac "is_aligned word1 page_bits")
prefer 2
apply (clarsimp simp: valid_aci_def cte_wp_at_caps_of_state)
apply (drule(1) caps_of_state_valid[rotated])+
apply (simp add:valid_cap_simps cap_aligned_def page_bits_def)
apply (subst delete_objects_rewrite)
apply (simp add:page_bits_def word_bits_def pageBits_def word_size_bits_def)+
apply (simp add:is_aligned_neg_mask_eq)
apply (wp hoare_vcg_const_imp_lift retype_region_st_tcb_at[where sz=page_bits] set_cap_no_overlap|simp)+
apply (strengthen invs_valid_objs invs_psp_aligned)
apply (clarsimp simp:conj_comms)
apply (wp max_index_upd_invs_simple get_cap_wp)+
apply (clarsimp simp: valid_aci_def)
apply (frule intvl_range_conv)
apply (simp add:word_bits_def page_bits_def pageBits_def)
apply (clarsimp simp:detype_clear_um_independent page_bits_def is_aligned_neg_mask_eq)
apply (rule conjI)
apply (clarsimp simp:cte_wp_at_caps_of_state)
apply (simp only: field_simps)
apply (rule pspace_no_overlap_detype')
apply (rule caps_of_state_valid_cap)
apply (simp add:page_bits_def)+
apply (simp add:invs_valid_objs invs_psp_aligned)+
apply (rule conjI)
apply (erule pred_tcb_weakenE, simp)
apply (rule conjI)
apply (frule st_tcb_ex_cap)
apply clarsimp
apply (clarsimp split: Structures_A.thread_state.splits)
apply (clarsimp simp: ex_nonz_cap_to_def)
apply (frule invs_untyped_children)
apply (clarsimp simp:cte_wp_at_caps_of_state)
apply (erule_tac ptr="(aa,ba)" in untyped_children_in_mdbE[where P="\<lambda>c. t \<in> zobj_refs c" for t])
apply (simp add: cte_wp_at_caps_of_state)+
apply fastforce
apply (clarsimp simp: zobj_refs_to_obj_refs)
apply (fastforce simp:page_bits_def)
apply simp
apply (clarsimp simp:obj_bits_api_def arch_kobj_size_def cte_wp_at_caps_of_state
default_arch_object_def empty_descendants_range_in)
apply (frule_tac cap = "(cap.UntypedCap False word1 pageBits idx)"
in detype_invariants[rotated 3],clarsimp+)
apply (simp add:cte_wp_at_caps_of_state
empty_descendants_range_in descendants_range_def2)+
apply (thin_tac "x = Some cap.NullCap" for x)+
apply (drule(1) caps_of_state_valid_cap[OF _ invs_valid_objs])
apply (intro conjI)
apply (clarsimp simp:valid_cap_def cap_aligned_def range_cover_full
invs_psp_aligned invs_valid_objs page_bits_def)
apply (erule pspace_no_overlap_detype)
apply (auto simp:page_bits_def detype_clear_um_independent)
done
lemma set_cap_idx_up_aligned_area:
"\<lbrace>K (\<exists>idx. pcap = UntypedCap dev ptr pageBits idx) and cte_wp_at ((=) pcap) slot
and valid_objs\<rbrace> set_cap (max_free_index_update pcap) slot
\<lbrace>\<lambda>rv s. (\<exists>slot. cte_wp_at (\<lambda>c. up_aligned_area ptr pageBits \<subseteq> cap_range c \<and> cap_is_device c = dev) slot s)\<rbrace>"
apply (rule hoare_pre)
apply (wp hoare_vcg_ex_lift set_cap_cte_wp_at)
apply (rule_tac x = slot in exI)
apply clarsimp
apply (frule(1) cte_wp_valid_cap)
apply (clarsimp simp: cte_wp_at_caps_of_state is_aligned_neg_mask_eq
p_assoc_help valid_cap_def valid_untyped_def cap_aligned_def)
done
primrec(nonexhaustive)
get_untyped_cap_idx :: "cap \<Rightarrow> nat"
where
"get_untyped_cap_idx (UntypedCap dev ref sz idx) = idx"
lemma aci_invs':
assumes Q_ignores_arch[simp]: "\<And>f s. Q (arch_state_update f s) = Q s"
assumes Q_ignore_machine_state[simp]: "\<And>f s. Q (machine_state_update f s) = Q s"
assumes Q_detype[simp]: "\<And>f s. Q (detype f s) = Q s"
assumes cap_insert_Q: "\<And>cap src dest. \<lbrace>Q and invs and K (src \<noteq> dest)\<rbrace>
cap_insert cap src dest
\<lbrace>\<lambda>_.Q\<rbrace>"
assumes retype_region_Q[wp]:"\<And>a b c d e. \<lbrace>Q\<rbrace> retype_region a b c d e \<lbrace>\<lambda>_.Q\<rbrace>"
assumes set_cap_Q[wp]: "\<And>a b. \<lbrace>Q\<rbrace> set_cap a b \<lbrace>\<lambda>_.Q\<rbrace>"
shows
"\<lbrace>invs and Q and ct_active and valid_aci aci\<rbrace> perform_asid_control_invocation aci \<lbrace>\<lambda>y s. invs s \<and> Q s\<rbrace>"
proof -
have cap_insert_invsQ:
"\<And>cap src dest ap asid.
\<lbrace>Q and (invs and valid_cap cap and tcb_cap_valid cap dest and
ex_cte_cap_wp_to (appropriate_cte_cap cap) dest and
cte_wp_at (\<lambda>c. c = NullCap) dest and
no_cap_to_obj_with_diff_ref cap {dest} and
(\<lambda>s. cte_wp_at (safe_parent_for (cdt s) src cap) src s) and
K (cap = ArchObjectCap (ASIDPoolCap ap asid)) and
(\<lambda>s. \<forall>irq\<in>cap_irqs cap. irq_issued irq s) and
ko_at (ArchObj (ASIDPool Map.empty)) ap and
(\<lambda>s. ap \<notin> ran (x64_asid_table (arch_state s)) \<and>
x64_asid_table (arch_state s) (asid_high_bits_of asid) = None))\<rbrace>
cap_insert cap src dest
\<lbrace>\<lambda>rv s.
invs
(s\<lparr>arch_state := arch_state s
\<lparr>x64_asid_table := (x64_asid_table \<circ> arch_state) s
(asid_high_bits_of asid \<mapsto> ap)\<rparr>\<rparr>) \<and>
Q
(s\<lparr>arch_state := arch_state s
\<lparr>x64_asid_table := (x64_asid_table \<circ> arch_state) s
(asid_high_bits_of asid \<mapsto> ap)\<rparr>\<rparr>)\<rbrace>"
apply (wp cap_insert_ap_invs)
apply simp
apply (rule hoare_pre)
apply (rule cap_insert_Q)
apply (auto simp: cte_wp_at_caps_of_state)
done
show ?thesis
apply (clarsimp simp: perform_asid_control_invocation_def valid_aci_def
split: asid_control_invocation.splits)
apply (rename_tac word1 a b aa ba word2)
apply (rule hoare_pre)
apply (wp hoare_vcg_const_imp_lift)
apply (wp cap_insert_invsQ hoare_vcg_ex_lift
| simp)+
apply (simp add: valid_cap_def |
strengthen real_cte_tcb_valid safe_parent_strg
invs_vobjs_strgs
ex_cte_cap_to_cnode_always_appropriate_strg)+
apply (wp hoare_vcg_const_imp_lift set_free_index_invs
retype_region_plain_invs[where sz = pageBits]
retype_cte_wp_at[where sz = pageBits] hoare_vcg_ex_lift
retype_region_obj_at_other3[where P="is_cap_table n" and sz = pageBits for n]
retype_region_ex_cte_cap_to[where sz = pageBits]
retype_region_ap[simplified]
retype_region_ap'[simplified]
retype_region_no_cap_to_obj[where sz = pageBits,simplified]
| simp del: split_paired_Ex)+
apply (strengthen invs_valid_objs invs_psp_aligned
invs_mdb invs_valid_pspace
exI[where x="case aci of MakePool frame slot parent base \<Rightarrow> parent"]
exI[where x="case aci of MakePool frame slot parent base \<Rightarrow> parent",
simplified]
caps_region_kernel_window_imp[where
p = "case aci of MakePool frame slot parent base \<Rightarrow> parent"]
invs_cap_refs_in_kernel_window)+
apply (wp set_cap_caps_no_overlap set_cap_no_overlap get_cap_wp
max_index_upd_caps_overlap_reserved max_index_upd_invs_simple
set_cap_cte_cap_wp_to set_cap_cte_wp_at max_index_upd_no_cap_to
| simp split del: if_split | wp (once) hoare_vcg_ex_lift)+
apply (rule_tac P = "is_aligned word1 page_bits" in hoare_gen_asm)
apply (subst delete_objects_rewrite)
apply (simp add:page_bits_def pageBits_def word_size_bits_def)
apply (simp add:page_bits_def pageBits_def word_bits_def)
apply (simp add:is_aligned_neg_mask_eq)
apply wp
apply (clarsimp simp: cte_wp_at_caps_of_state if_option_Some
Misc_Arithmetic.if_bool_simps
split del: if_split)
apply (strengthen refl)
apply (frule_tac cap = "(cap.UntypedCap False word1 pageBits idx)"
in detype_invariants[rotated 3],clarsimp+)
apply (simp add:cte_wp_at_caps_of_state)+
apply (simp add:descendants_range_def2 empty_descendants_range_in)
apply (simp add:invs_mdb invs_valid_pspace invs_psp_aligned invs_valid_objs)
apply (clarsimp dest!:caps_of_state_cteD)
apply (frule(1) unsafe_protected[where p=t and p'=t for t])
apply (simp add:empty_descendants_range_in)+
apply fastforce
apply clarsimp
apply (frule_tac p = "(aa,ba)" in cte_wp_valid_cap)
apply fastforce
apply (clarsimp simp: detype_clear_um_independent obj_bits_api_def arch_kobj_size_def
default_arch_object_def conj_comms)
apply (rule conjI)
apply (clarsimp simp:valid_cap_simps cap_aligned_def page_bits_def not_le)
apply clarsimp
apply (simp add:empty_descendants_range_in)
apply (frule valid_cap_aligned)
apply (clarsimp simp: cap_aligned_def is_aligned_neg_mask_eq)
apply (subst caps_no_overlap_detype[OF descendants_range_caps_no_overlapI],
assumption, simp add: is_aligned_neg_mask_eq,
simp add: empty_descendants_range_in)
apply (frule pspace_no_overlap_detype, clarify+)
apply (frule intvl_range_conv[where bits = pageBits])
apply (simp add:pageBits_def word_bits_def)
apply (simp add:is_aligned_neg_mask_eq)
apply (clarsimp simp:is_aligned_neg_mask_eq page_bits_def)
apply (frule(1) ex_cte_cap_protects)
apply (simp add:empty_descendants_range_in)
apply fastforce
apply (rule subset_refl)
apply fastforce
apply (clarsimp simp: field_simps)
apply (intro conjI impI,
simp_all add:free_index_of_def valid_cap_simps valid_untyped_def
empty_descendants_range_in range_cover_full clear_um_def max_free_index_def,
(clarsimp simp:valid_untyped_def valid_cap_simps)+)[1]
apply (erule(1) cap_to_protected)
apply (simp add:empty_descendants_range_in descendants_range_def2)+
apply clarsimp
apply (drule invs_arch_state)+
apply (clarsimp simp: valid_arch_state_def valid_asid_table_def)
apply (drule (1) bspec)+
apply clarsimp
apply (erule notE, erule is_aligned_no_overflow)
apply (clarsimp simp: no_cap_to_obj_with_diff_ref_def)
apply (thin_tac "cte_wp_at ((=) cap.NullCap) p s" for p s)
apply (subst(asm) eq_commute,
erule(1) untyped_children_in_mdbE[where cap="cap.UntypedCap dev p bits idx" for dev p bits idx,
simplified, rotated])
apply (simp add: is_aligned_no_overflow)
apply simp
apply clarsimp
done
qed
lemmas aci_invs[wp] = aci_invs'[where Q=\<top>,simplified hoare_post_taut, OF refl refl refl TrueI TrueI TrueI,simplified]
lemma set_ioport_mask_tcb_cap_valid[wp]:
"\<lbrace>tcb_cap_valid a b\<rbrace> set_ioport_mask f l bl \<lbrace>\<lambda>rv. tcb_cap_valid a b\<rbrace>"
apply (wpsimp simp: set_ioport_mask_def)
by (clarsimp simp: tcb_cap_valid_def)
lemma set_ioport_mask_ex_cte_cap_wp_to[wp]:
"\<lbrace>ex_cte_cap_wp_to a b\<rbrace> set_ioport_mask f l bl \<lbrace>\<lambda>rv. ex_cte_cap_wp_to a b\<rbrace>"
apply (wpsimp simp: set_ioport_mask_def)
by (clarsimp simp: ex_cte_cap_wp_to_def)
lemma no_cap_to_obj_with_diff_IOPort_ARCH:
"no_cap_to_obj_with_diff_ref (ArchObjectCap (IOPortCap f l)) S = \<top>"
by (rule ext, simp add: no_cap_to_obj_with_diff_ref_def
cte_wp_at_caps_of_state
obj_ref_none_no_asid)
lemma IOPort_valid:
"(s \<turnstile> cap.ArchObjectCap (IOPortCap f l)) = (f \<le> l)"
by (simp add: valid_cap_def cap_aligned_def word_bits_conv)
lemma set_ioport_mask_safe_parent_for:
"\<lbrace>\<lambda>s. cte_wp_at (safe_parent_for (cdt s) sl ac) sl s \<and> ac = ArchObjectCap (IOPortCap x1 x2)\<rbrace>
set_ioport_mask x1 x2 True
\<lbrace>\<lambda>rv s. cte_wp_at (safe_parent_for (cdt s) sl ac) sl s\<rbrace>"
apply (rule hoare_pre, wps, wpsimp)
by (clarsimp simp: cte_wp_at_caps_of_state)
lemma set_ioport_mask_safe_ioport_insert:
"\<lbrace>\<lambda>s. cte_wp_at ((=) NullCap) sl s \<and> (\<forall>cap\<in>ran (caps_of_state s). cap_ioports ac \<inter> cap_ioports cap = {}) \<and> ac = (ArchObjectCap (IOPortCap x1 x2))\<rbrace>
set_ioport_mask x1 x2 True
\<lbrace>\<lambda>rv s. cte_wp_at (\<lambda>c. safe_ioport_insert ac c s) sl s\<rbrace>"
apply (clarsimp simp: safe_ioport_insert_def issued_ioports_def set_ioport_mask_def)
apply wpsimp
apply (clarsimp simp: cte_wp_at_caps_of_state)
done
lemma perform_ioport_control_invocation_invs[wp]:
"\<lbrace>invs and valid_iocontrol_inv iopinv\<rbrace> perform_ioport_control_invocation iopinv \<lbrace>\<lambda>rv. invs\<rbrace>"
apply (clarsimp simp: perform_ioport_control_invocation_def)
apply (wpsimp wp: set_ioport_mask_invs set_ioport_mask_cte_wp_at cap_insert_simple_invs
set_ioport_mask_safe_parent_for set_ioport_mask_safe_ioport_insert
simp: is_cap_simps is_simple_cap_def no_cap_to_obj_with_diff_IOPort_ARCH IOPort_valid
| strengthen real_cte_tcb_valid)+
apply (clarsimp simp: valid_iocontrol_inv_def cte_wp_at_caps_of_state tcb_cap_valid_def
ex_cte_cap_to_cnode_always_appropriate_strg safe_parent_for_def
safe_parent_for_arch_def safe_ioport_insert_def)
apply (clarsimp dest!: invs_valid_ioports simp: valid_ioports_def all_ioports_issued_def)
apply (drule_tac x=cap in bspec, assumption)
by blast
lemma invoke_arch_invs[wp]:
"\<lbrace>invs and ct_active and valid_arch_inv ai\<rbrace>
arch_perform_invocation ai
\<lbrace>\<lambda>rv. invs\<rbrace>"
apply (cases ai, simp_all add: valid_arch_inv_def arch_perform_invocation_def)
apply (wp|simp)+
done
lemma
shows sts_empty_pde [wp]: "\<lbrace>empty_pde_at p\<rbrace> set_thread_state t st \<lbrace>\<lambda>rv. empty_pde_at p\<rbrace>"
and sts_empty_pdpte [wp]: "\<lbrace>empty_pdpte_at p\<rbrace> set_thread_state t st \<lbrace>\<lambda>rv. empty_pdpte_at p\<rbrace>"
and sts_empty_pml4e [wp]: "\<lbrace>empty_pml4e_at p\<rbrace> set_thread_state t st \<lbrace>\<lambda>rv. empty_pml4e_at p\<rbrace>"
by (simp add: empty_pde_at_def empty_pdpte_at_def empty_pml4e_at_def;
wp hoare_vcg_ex_lift set_thread_state_ko;
clarsimp simp: is_tcb_def)+
lemma sts_vspace_at_asid [wp]:
"\<lbrace>vspace_at_asid asid pd\<rbrace> set_thread_state t st \<lbrace>\<lambda>rv. vspace_at_asid asid pd\<rbrace>"
apply (simp add: vspace_at_asid_def)
apply wp
done
lemma sts_same_refs_inv[wp]:
"\<lbrace>\<lambda>s. same_refs m cap s\<rbrace> set_thread_state t st \<lbrace>\<lambda>rv s. same_refs m cap s\<rbrace>"
by (cases m, (clarsimp simp: same_refs_def, wp)+)
lemma sts_valid_slots_inv[wp]:
"\<lbrace>valid_slots m\<rbrace> set_thread_state t st \<lbrace>\<lambda>rv. valid_slots m\<rbrace>"
by (cases m; case_tac a; clarsimp simp: valid_slots_def; wp hoare_vcg_ball_lift sts_typ_ats)
lemma sts_valid_page_inv[wp]:
"\<lbrace>valid_page_inv page_invocation\<rbrace> set_thread_state t st \<lbrace>\<lambda>rv. valid_page_inv page_invocation\<rbrace>"
by (cases page_invocation,
(wp hoare_vcg_ex_lift hoare_vcg_disj_lift sts_typ_ats
| clarsimp simp: valid_page_inv_def same_refs_def
| wps)+)
crunch global_refs_inv[wp]: set_thread_state "\<lambda>s. P (global_refs s)"
lemma sts_empty_table[wp]:
"\<lbrace>\<lambda>s. obj_at (empty_table (set (second_level_tables (arch_state s)))) p s\<rbrace>
set_thread_state t st
\<lbrace>\<lambda>rv s. obj_at (empty_table (set (second_level_tables (arch_state s)))) p s\<rbrace>"
by (rule hoare_lift_Pf[OF sts.aobj_at[OF empty_table.arch_only] sts.arch_state])
lemma sts_valid_vspace_table_inv[wp]:
"\<And>i. \<lbrace>valid_pdpti i\<rbrace> set_thread_state t st \<lbrace>\<lambda>rv. valid_pdpti i\<rbrace>"
"\<And>i. \<lbrace>valid_pdi i\<rbrace> set_thread_state t st \<lbrace>\<lambda>rv. valid_pdi i\<rbrace>"
"\<And>i. \<lbrace>valid_pti i\<rbrace> set_thread_state t st \<lbrace>\<lambda>rv. valid_pti i\<rbrace>"
by (case_tac i; simp add: valid_pdpti_def valid_pdi_def valid_pti_def;
wp sts_typ_ats hoare_vcg_ex_lift; clarsimp)+
lemma sts_valid_arch_inv:
"\<lbrace>valid_arch_inv ai\<rbrace> set_thread_state t st \<lbrace>\<lambda>rv. valid_arch_inv ai\<rbrace>"
apply (cases ai; simp add: valid_arch_inv_def; wp?)
apply (rename_tac asid_control_invocation)
apply (case_tac asid_control_invocation)
apply (clarsimp simp: valid_aci_def cte_wp_at_caps_of_state)
apply (rule hoare_pre, wp hoare_vcg_ex_lift cap_table_at_typ_at)
apply clarsimp
apply (clarsimp simp: valid_apinv_def split: asid_pool_invocation.splits)
apply (rule hoare_pre)
apply (wp hoare_vcg_ex_lift set_thread_state_ko)
apply (clarsimp simp: is_tcb_def)
apply (rename_tac ioc, case_tac ioc)
apply (clarsimp simp: valid_iocontrol_inv_def)
apply (wpsimp simp: safe_ioport_insert_def)
done
(* the induct rule matches the wrong parameters first -> crunch blows up *)
lemma create_mapping_entries_inv [wp]:
"\<lbrace>P\<rbrace> create_mapping_entries base vptr vmsz R A pd \<lbrace>\<lambda>_. P\<rbrace>"
by (cases vmsz; clarsimp; wp lookup_pt_slot_inv; simp)
crunch_ignore (add: select_ext)
crunch inv [wp]: arch_decode_invocation "P"
(wp: crunch_wps select_wp select_ext_weak_wp simp: crunch_simps)
lemma create_mappings_empty [wp]:
"\<lbrace>\<top>\<rbrace> create_mapping_entries base vptr vmsz R A pd \<lbrace>\<lambda>m s. empty_refs m\<rbrace>, -"
by (cases vmsz; wpsimp simp: pdpte_ref_def empty_refs_def)
lemma empty_pde_atI:
"\<lbrakk> ko_at (ArchObj (PageDirectory pd)) (p && ~~ mask pd_bits) s;
pd (ucast (p && mask pd_bits >> word_size_bits)) = InvalidPDE \<rbrakk> \<Longrightarrow>
empty_pde_at p s"
by (fastforce simp add: empty_pde_at_def)
lemma empty_pdpte_atI:
"\<lbrakk> ko_at (ArchObj (PDPointerTable pdpt)) (p && ~~ mask pdpt_bits) s;
pdpt (ucast (p && mask pdpt_bits >> word_size_bits)) = InvalidPDPTE \<rbrakk> \<Longrightarrow>
empty_pdpte_at p s"
by (fastforce simp add: empty_pdpte_at_def)
lemma empty_pml4e_atI:
"\<lbrakk> ko_at (ArchObj (PageMapL4 pml4)) (p && ~~ mask pml4_bits) s;
pml4 (ucast (p && mask pml4_bits >> word_size_bits)) = InvalidPML4E \<rbrakk> \<Longrightarrow>
empty_pml4e_at p s"
by (fastforce simp add: empty_pml4e_at_def)
declare lookup_slot_for_cnode_op_cap_to [wp]
lemma shiftr_irrelevant:
"x < 2 ^ asid_low_bits \<Longrightarrow> is_aligned (y :: machine_word) asid_low_bits \<Longrightarrow>
x + y >> asid_low_bits = y >> asid_low_bits"
apply (subst word_plus_and_or_coroll)
apply (rule word_eqI)
apply (clarsimp simp: is_aligned_nth)
apply (drule(1) nth_bounded)
apply (simp add: asid_low_bits_def word_bits_def)
apply simp
apply (rule word_eqI)
apply (simp add: nth_shiftr)
apply safe
apply (drule(1) nth_bounded)
apply (simp add: asid_low_bits_def word_bits_def)
apply simp
done
lemma lookup_pdpt_slot_cap_to:
"\<lbrace>invs and \<exists>\<rhd>pm and K (is_aligned pm pml4_bits \<and> vptr < pptr_base \<and> canonical_address vptr)\<rbrace>
lookup_pdpt_slot pm vptr
\<lbrace>\<lambda>rv s. \<exists>a b cap. caps_of_state s (a, b) = Some cap \<and> is_pdpt_cap cap
\<and> rv && ~~ mask pdpt_bits \<in> obj_refs cap
\<and> s \<turnstile> cap \<and> cap_asid cap \<noteq> None\<rbrace>, -"
apply (rule hoare_gen_asmE)
apply (wp lookup_pdpt_slot_wp)
apply clarsimp
apply (drule_tac x=ref in spec)
apply (erule impE, fastforce, clarsimp)
apply (thin_tac "(_ \<rhd> pm) _")
apply (frule valid_vs_lookupD[OF vs_lookup_pages_vs_lookupI]; clarsimp)
apply (intro exI, rule conjI, assumption)
apply (frule (1) caps_of_state_valid)
apply (clarsimp simp: pdpte_at_def)
apply (frule (2) valid_cap_to_pdpt_cap')
by (auto simp: vs_cap_ref_def is_pdpt_cap_def
split: cap.splits arch_cap.splits option.splits)
lemma lookup_pd_slot_cap_to:
"\<lbrace>invs and \<exists>\<rhd>pm and K (is_aligned pm pml4_bits \<and> vptr < pptr_base \<and> canonical_address vptr)\<rbrace>
lookup_pd_slot pm vptr
\<lbrace>\<lambda>rv s. \<exists>a b cap. caps_of_state s (a, b) = Some cap \<and> is_pd_cap cap
\<and> rv && ~~ mask pd_bits \<in> obj_refs cap
\<and> s \<turnstile> cap \<and> cap_asid cap \<noteq> None\<rbrace>, -"
apply (rule hoare_gen_asmE)
apply (wp lookup_pd_slot_wp)
apply clarsimp
apply (drule_tac x=ref in spec)
apply (erule impE, fastforce, clarsimp)
apply (thin_tac "(_ \<rhd> pm) _")
apply (frule valid_vs_lookupD[OF vs_lookup_pages_vs_lookupI]; clarsimp)
apply (intro exI, rule conjI, assumption)
apply (frule (1) caps_of_state_valid)
apply (clarsimp simp: pde_at_def)
apply (frule (2) valid_cap_to_pd_cap')
by (auto simp: vs_cap_ref_def is_pd_cap_def
split: cap.splits arch_cap.splits option.splits)
lemma lookup_pt_slot_cap_to:
"\<lbrace>invs and \<exists>\<rhd>pm and K (is_aligned pm pml4_bits \<and> vptr < pptr_base \<and> canonical_address vptr)\<rbrace>
lookup_pt_slot pm vptr
\<lbrace>\<lambda>rv s. \<exists>a b cap. caps_of_state s (a, b) = Some cap \<and> is_pt_cap cap
\<and> rv && ~~ mask pt_bits \<in> obj_refs cap
\<and> s \<turnstile> cap \<and> cap_asid cap \<noteq> None\<rbrace>, -"
apply (rule hoare_gen_asmE)
apply (wp lookup_pt_slot_wp)
apply clarsimp
apply (drule_tac x=ref in spec)
apply (erule impE, fastforce, clarsimp)
apply (thin_tac "(_ \<rhd> pm) _")
apply (frule valid_vs_lookupD[OF vs_lookup_pages_vs_lookupI]; clarsimp)
apply (intro exI, rule conjI, assumption)
apply (frule (1) caps_of_state_valid)
apply (clarsimp simp: pte_at_def)
apply (frule (2) valid_cap_to_pt_cap')
by (auto simp: vs_cap_ref_def is_pt_cap_def
split: cap.splits arch_cap.splits option.splits)
lemma create_mapping_entries_parent_for_refs:
"\<lbrace>invs and \<exists>\<rhd> pm and page_map_l4_at pm
and K (is_aligned pm pml4_bits \<and> vmsz_aligned vptr pgsz
\<and> vptr < pptr_base \<and> canonical_address vptr)\<rbrace>
create_mapping_entries ptr vptr pgsz rights attribs pm
\<lbrace>\<lambda>rv s. \<exists>a b. cte_wp_at (parent_for_refs rv) (a, b) s\<rbrace>, -"
apply (rule hoare_gen_asmE)
apply (cases pgsz; simp add: vmsz_aligned_def)
by (wp,
rule hoare_post_imp_R,
rule lookup_pt_slot_cap_to lookup_pd_slot_cap_to lookup_pdpt_slot_cap_to,
elim exEI,
clarsimp simp: cte_wp_at_caps_of_state parent_for_refs_def,
simp)+
lemma find_vspace_for_asid_ref_offset_voodoo:
"\<lbrace>pspace_aligned and valid_vspace_objs and
K (ref = [VSRef (ucast (asid_low_bits_of asid)) (Some AASIDPool),
VSRef (ucast (asid_high_bits_of asid)) None])\<rbrace>
find_vspace_for_asid asid
\<lbrace>\<lambda>rv. (ref \<rhd> (rv + (get_pml4_index v << word_size_bits) && ~~ mask pml4_bits))\<rbrace>,-"
apply (rule hoare_gen_asmE)
apply (rule_tac Q'="\<lambda>rv s. is_aligned rv pml4_bits \<and> (ref \<rhd> rv) s"
in hoare_post_imp_R)
apply simp
apply (rule hoare_pre, wp find_vspace_for_asid_lookup_ref)
apply simp
apply (simp add: pml4_shifting)
done
declare asid_high_bits_of_shift [simp]
declare mask_shift [simp]
declare word_less_sub_le [simp del]
declare ptrFormPAddr_addFromPPtr [simp]
(* FIXME: move *)
lemma valid_mask_vm_rights[simp]:
"mask_vm_rights V R \<in> valid_vm_rights"
by (simp add: mask_vm_rights_def)
lemma vs_lookup_and_unique_refs:
"\<lbrakk>(ref \<rhd> p) s; caps_of_state s cptr = Some cap; table_cap_ref cap = Some ref';
p \<in> obj_refs cap; valid_vs_lookup s; unique_table_refs (caps_of_state s)\<rbrakk>
\<Longrightarrow> ref = ref'"
apply (frule_tac ref=ref in valid_vs_lookupD[OF vs_lookup_pages_vs_lookupI], assumption)
apply clarsimp
apply (frule_tac cap'=capa in unique_table_refsD)
apply simp+
apply (case_tac capa, simp_all)
apply ((case_tac cap, simp_all)+)[6]
apply (clarsimp simp add: table_cap_ref_def vs_cap_ref_def split: cap.splits arch_cap.splits option.splits)
done
lemma valid_global_ptsD2:
"\<lbrakk>r \<in> set (x64_global_pts (arch_state s)); valid_global_pts s\<rbrakk>
\<Longrightarrow> \<exists>pt. ko_at (ArchObj (PageTable pt)) r s"
apply (clarsimp simp: valid_global_pts_def)
apply (drule (1) bspec)
apply (clarsimp simp: obj_at_def)
done
lemma valid_global_pdsD2:
"\<lbrakk>r \<in> set (x64_global_pds (arch_state s)); valid_global_pds s\<rbrakk>
\<Longrightarrow> \<exists>pd. ko_at (ArchObj (PageDirectory pd)) r s"
apply (clarsimp simp: valid_global_pds_def)
apply (drule (1) bspec)
apply (clarsimp simp: obj_at_def)
done
lemma valid_global_pdptsD2:
"\<lbrakk>r \<in> set (x64_global_pdpts (arch_state s)); valid_global_pdpts s\<rbrakk>
\<Longrightarrow> \<exists>pdpt. ko_at (ArchObj (PDPointerTable pdpt)) r s"
apply (clarsimp simp: valid_global_pdpts_def)
apply (drule (1) bspec)
apply (clarsimp simp: obj_at_def)
done
context begin
private method try_solve methods m = (m; fail)?
private method ref_is_unique =
(drule (1) ref_is_unique;
try_solve \<open>simp add: get_pml4_index_def get_pdpt_index_def get_pd_index_def get_pt_index_def
valid_arch_state_def bit_simps\<close>)
lemma create_mapping_entries_same_refs:
"\<lbrace>valid_arch_state and valid_vspace_objs and valid_vs_lookup
and (\<lambda>s. unique_table_refs (caps_of_state s))
and pspace_aligned and valid_objs and valid_kernel_mappings and \<exists>\<rhd> pm
and (\<lambda>s. \<exists>pm_cap pm_cptr. cte_wp_at ((=) pm_cap) pm_cptr s
\<and> pm_cap = ArchObjectCap (PML4Cap pm (Some asid)))
and page_map_l4_at pm
and K (vaddr < pptr_base \<and> canonical_address vaddr
\<and> cap = ArchObjectCap (PageCap dev p rights' mt pgsz (Some (asid, vaddr))))\<rbrace>
create_mapping_entries (addrFromPPtr p) vaddr pgsz rights attribs pm
\<lbrace>\<lambda>rv s. same_refs rv cap s\<rbrace>,-"
apply (rule hoare_gen_asmE; clarsimp; induct pgsz;
wpsimp wp: get_pde_wp get_pdpte_wp get_pml4e_wp
simp: lookup_pt_slot_def lookup_pd_slot_def lookup_pdpt_slot_def
same_refs_def vs_cap_ref_def
valid_arch_state_def
pte_ref_pages_def pdpte_ref_pages_def
lookup_pml4_slot_def)
apply (all \<open>clarsimp simp: cte_wp_at_caps_of_state mask_cap_def\<close>)
apply (all \<open>frule valid_objs_caps\<close>)
apply (all \<open>frule (1) is_aligned_pml4; clarsimp simp: pml4_shifting\<close>)
apply (all \<open>frule (2) valid_vspace_objsD[where ao="PageMapL4 t" for t, rotated]; clarsimp\<close>)
apply (all \<open>frule (1) iffD2[OF Compl_iff, OF kernel_base_kernel_mapping_slots];
drule (1) bspec; clarsimp\<close>)
apply (all \<open>frule (1) is_aligned_pdpt; clarsimp simp: pdpt_shifting\<close>)
apply (all \<open>frule (4) vs_lookup_step[OF _ vs_lookup1I[OF _ vs_refs_get_pml4_index refl]]\<close>)
apply (all \<open>frule (1) vs_lookup_and_unique_refs;
try_solve \<open>simp add: table_cap_ref_def obj_refs_def\<close>; clarsimp\<close>)
prefer 3 subgoal by (ref_is_unique; rule not_kernel_slot_not_global_pml4; simp add: obj_at_def)
apply (all \<open>frule (2) valid_vspace_objsD[where ao="PDPointerTable t" for t, rotated]; clarsimp\<close>)
apply (all \<open>drule spec[of _ "ucast (get_pdpt_index vaddr)"]; clarsimp\<close>)
apply (all \<open>frule (1) is_aligned_pd; clarsimp simp: pd_shifting\<close>)
apply (all \<open>frule (2) vs_lookup_step[OF _ vs_lookup1I[OF _ vs_refs_get_pdpt_index refl]]\<close>)
prefer 2 subgoal by (ref_is_unique; clarsimp dest!: valid_global_pdptsD2
simp: obj_at_def second_level_tables_def)
apply (frule (2) valid_vspace_objsD[where ao="PageDirectory t" for t, rotated]; clarsimp)
apply (drule spec[of _ "ucast (get_pd_index vaddr)"]; clarsimp)
apply (frule (1) is_aligned_pt; clarsimp simp: pt_shifting)
apply (frule (2) vs_lookup_step[OF _ vs_lookup1I[OF _ vs_refs_get_pd_index refl]])
by (ref_is_unique; clarsimp dest!: valid_global_pdptsD2 simp: obj_at_def second_level_tables_def)
end
lemma create_mapping_entries_same_refs_ex:
"\<lbrace>valid_arch_state and valid_vspace_objs and valid_vs_lookup and (\<lambda>s. unique_table_refs (caps_of_state s))
and pspace_aligned and valid_objs and valid_kernel_mappings and \<exists>\<rhd> pm and
(\<lambda>s. \<exists>dev pm_cap pm_cptr asid rights' mt. cte_wp_at ((=) pm_cap) pm_cptr s
\<and> pm_cap = ArchObjectCap (PML4Cap pm (Some asid))
\<and> page_map_l4_at pm s \<and> vaddr < pptr_base \<and> canonical_address vaddr
\<and> (cap = (ArchObjectCap (PageCap dev p rights' mt pgsz (Some (asid, vaddr))))))\<rbrace>
create_mapping_entries (addrFromPPtr p) vaddr pgsz rights attribs pm
\<lbrace>\<lambda>rv s. same_refs rv cap s\<rbrace>,-"
apply (clarsimp simp: validE_R_def validE_def valid_def split: sum.split)
apply (erule use_validE_R[OF _ create_mapping_entries_same_refs])
apply fastforce
done
lemma find_vspace_for_asid_lookup_vspace_wp:
"\<lbrace> \<lambda>s. valid_vspace_objs s \<and> (\<forall>pm. vspace_at_asid asid pm s \<and> page_map_l4_at pm s
\<and> (\<exists>\<rhd> pm) s \<longrightarrow> Q pm s) \<rbrace> find_vspace_for_asid asid \<lbrace> Q \<rbrace>, -"
(is "\<lbrace> \<lambda>s. ?v s \<and> (\<forall>pm. ?vpm pm s \<longrightarrow> Q pm s) \<rbrace> ?f \<lbrace> Q \<rbrace>, -")
apply (rule_tac Q'="\<lambda>rv s. ?vpm rv s \<and> (\<forall>pm. ?vpm pm s \<longrightarrow> Q pm s)" in hoare_post_imp_R)
apply wpsimp
apply (simp | fast)+
done
lemma aligned_sum_less:
fixes p :: "'a::len word"
shows "\<lbrakk> is_aligned p sz; is_aligned q sz; sz < LENGTH('a) \<rbrakk>
\<Longrightarrow> (p + 2 ^ sz - 1 < q) = (p < q)"
apply (rule iffI)
apply (rule le_less_trans)
apply (rule is_aligned_no_overflow)
apply (simp add: vmsz_aligned_def)
apply simp
apply (simp add: field_simps[symmetric])
apply (erule gap_between_aligned; simp add: vmsz_aligned_def)
done
lemma aligned_sum_le:
fixes p :: "'a::len word"
shows "\<lbrakk> is_aligned p sz; is_aligned (q+1) sz; 0 < sz; sz < LENGTH('a) \<rbrakk>
\<Longrightarrow> (p + 2 ^ sz - 1 \<le> q) = (p \<le> q)"
using aligned_sum_less[where q="q+1"]
by (case_tac "q = max_word"; simp add: word_Suc_leq)
lemma aligned_sum_less_kernel_base:
"vmsz_aligned p sz \<Longrightarrow> (p + 2 ^ pageBitsForSize sz - 1 < kernel_base) = (p < kernel_base)"
by (cases sz
; rule aligned_sum_less
; simp add: vmsz_aligned_def kernel_base_def bit_simps is_aligned_def)
lemma aligned_sum_le_user_vtop:
"vmsz_aligned p sz \<Longrightarrow> (p + 2 ^ pageBitsForSize sz - 1 \<le> user_vtop) = (p \<le> user_vtop)"
by (cases sz
; rule aligned_sum_le
; simp add: vmsz_aligned_def user_vtop_def pptrUserTop_def bit_simps is_aligned_def)
lemma pml4e_at_shifting_magic:
"\<lbrakk>ako_at (PageMapL4 pm) xa s; is_aligned xa pml4_bits\<rbrakk> \<Longrightarrow>
pml4e_at (xa + (get_pml4_index (args ! 0 && ~~ mask pml4_shift_bits) << word_size_bits)) s"
apply (clarsimp simp: pml4e_at_def pml4_shifting page_map_l4_at_def2)
apply (rule conjI, fastforce)
apply (rule is_aligned_add)
apply (simp add: is_aligned_mask)
apply (erule is_aligned_AND_less_0, simp add: bit_simps mask_def)
by (simp add: word_size_bits_def is_aligned_shift)
lemma le_user_vtop_less_pptr_base[simp]:
"x \<le> user_vtop \<Longrightarrow> x < pptr_base"
apply (clarsimp simp: user_vtop_def pptrUserTop_def pptr_base_def pptrBase_def)
by (word_bitwise; simp)
lemma le_user_vtop_canonical_address[simp]:
"x \<le> user_vtop \<Longrightarrow> canonical_address x"
by (clarsimp simp: user_vtop_def pptrUserTop_def canonical_address_range mask_def)
lemma le_user_vtop_and_user_vtop_eq:
"x && ~~ mask pml4_shift_bits \<le> user_vtop \<Longrightarrow> x && user_vtop = x"
apply (clarsimp simp add: user_vtop_def pptrUserTop_def bit_simps)
by (word_bitwise; simp)
lemma and_not_mask_pml4_not_kernel_mapping_slots:
"x && ~~ mask pml4_shift_bits \<le> user_vtop \<Longrightarrow> ucast (x >> pml4_shift_bits) \<notin> kernel_mapping_slots"
apply (subgoal_tac "ucast (x >> pml4_shift_bits) = ((ucast (get_pml4_index x))::9 word)")
prefer 2
apply (clarsimp simp: get_pml4_index_def bit_simps ucast_mask_drop)
apply clarsimp
apply (subgoal_tac "(get_pml4_index x) = (get_pml4_index (x && user_vtop))")
apply (simp add: user_vtop_kernel_mapping_slots)
apply (simp add: le_user_vtop_and_user_vtop_eq)
done
lemma decode_page_invocation_wf[wp]:
"arch_cap = PageCap dev word rights map_type vmpage_size option \<Longrightarrow>
\<lbrace>invs and valid_cap (ArchObjectCap arch_cap) and
cte_wp_at ((=) (ArchObjectCap arch_cap)) slot and
(\<lambda>s. \<forall>x \<in> set excaps. cte_wp_at ((=) (fst x)) (snd x) s)\<rbrace>
decode_page_invocation label args slot arch_cap excaps
\<lbrace>valid_arch_inv\<rbrace>,-"
apply (simp add: arch_decode_invocation_def decode_page_invocation_def Let_def split_def
cong: if_cong split del: if_split)
apply (cases "invocation_type label = ArchInvocationLabel X64PageMap")
apply (simp split del: if_split)
apply (rule hoare_pre)
apply (wpsimp wp: whenE_throwError_wp check_vp_wpR hoare_vcg_const_imp_lift_R
hoare_vcg_disj_lift_R hoare_vcg_conj_lift_R create_mapping_entries_parent_for_refs
hoare_vcg_ex_lift_R find_vspace_for_asid_vspace_at_asid
create_mapping_entries_valid_slots create_mapping_entries_same_refs_ex
find_vspace_for_asid_lookup_vspace_wp
simp: valid_arch_inv_def valid_page_inv_def is_pg_cap_def
cte_wp_at_def[where P="(\<lambda>c. same_refs rv c s)" for rv s])
apply (intro allI conjI impI; clarsimp)
apply (rule conjI)
apply (clarsimp simp: cte_wp_at_def is_arch_update_def, rule conjI)
apply (clarsimp simp: is_arch_cap_def)
apply (clarsimp simp: cap_master_cap_simps)
apply (clarsimp simp: neq_Nil_conv invs_vspace_objs)
apply (frule cte_wp_valid_cap[where p="(a, b)" for a b], clarsimp)
apply (frule cte_wp_valid_cap[where p=slot], clarsimp)
apply (clarsimp simp: cte_wp_at_caps_of_state mask_cap_def linorder_not_le
aligned_sum_less_kernel_base)
apply (clarsimp simp: cap_rights_update_def acap_rights_update_def invs_implies is_cap_simps
is_aligned_pml4 not_less
cong: conj_cong
split: cap.splits arch_cap.splits)
apply (prop_tac "args ! 0 < pptr_base \<and> canonical_address (args ! 0)",
clarsimp dest!: aligned_sum_le_user_vtop, simp)
apply (extract_conjunct \<open>match conclusion in \<open>data_at vmpage_size word _\<close> \<Rightarrow> \<open>-\<close>\<close>,
clarsimp simp: valid_cap_simps data_at_def split: if_splits)
apply (extract_conjunct \<open>match conclusion in \<open>_ \<turnstile> _\<close> \<Rightarrow> \<open>-\<close>\<close>,
clarsimp simp: valid_cap_simps cap_aligned_def vmsz_aligned_def)
apply (fastforce simp: vs_cap_ref_def split: vmpage_size.split)
apply (clarsimp simp: cte_wp_at_caps_of_state invs_implies is_aligned_pml4)
apply (drule bspec[where x="excaps ! 0"]; clarsimp)
apply (extract_conjunct \<open>match conclusion in \<open>data_at vmpage_size word _\<close> \<Rightarrow> \<open>-\<close>\<close>,
clarsimp simp: valid_cap_simps data_at_def split: if_splits)
apply (prop_tac "args ! 0 < pptr_base \<and> canonical_address (args ! 0)",
clarsimp simp: valid_cap_simps, simp)
apply (clarsimp simp: cap_rights_update_def acap_rights_update_def)
apply (clarsimp simp: is_arch_update_reset_page get_cap_caps_of_state)
apply (cases "snd (excaps ! 0)", fastforce simp: mask_cap_def cap_rights_update_def
acap_rights_update_def)
apply (cases "invocation_type label = ArchInvocationLabel X64PageUnmap")
apply (simp split del: if_split)
apply (rule hoare_pre, wp)
apply (clarsimp simp: valid_arch_inv_def valid_page_inv_def)
apply (thin_tac "Ball S P" for S P)
apply (clarsimp split: option.split)
apply (clarsimp simp: valid_cap_simps cap_aligned_def)
apply (simp add: valid_unmap_def)
apply (cases "invocation_type label = ArchInvocationLabel X64PageGetAddress"
; simp split del: if_split
; wpsimp simp: valid_arch_inv_def valid_page_inv_def)
done
lemma decode_page_table_invocation_wf[wp]:
"arch_cap = PageTableCap pt_ptr pt_map_data \<Longrightarrow>
\<lbrace>invs and valid_cap (ArchObjectCap arch_cap) and
cte_wp_at ((=) (ArchObjectCap arch_cap)) slot and
(\<lambda>s. \<forall>x \<in> set excaps. cte_wp_at ((=) (fst x)) (snd x) s)\<rbrace>
decode_page_table_invocation label args slot arch_cap excaps
\<lbrace>valid_arch_inv\<rbrace>,-"
apply (simp add: arch_decode_invocation_def decode_page_table_invocation_def
Let_def split_def is_final_cap_def
cong: if_cong split del: if_split)
apply (wp whenE_throwError_wp lookup_pd_slot_wp find_vspace_for_asid_lookup_vspace_wp
get_pde_wp
| wpc
| simp add: valid_arch_inv_def valid_pti_def unlessE_whenE vs_cap_ref_def
split del: if_split)+
apply (rule conjI; clarsimp simp: is_cap_simps elim!: cte_wp_at_weakenE)
apply (rule conjI; clarsimp)
apply (drule_tac x=ref in spec; erule impE; clarsimp)
apply (fastforce elim!: is_aligned_pml4)
apply (frule valid_arch_cap_typ_at; clarsimp)
apply (strengthen not_in_global_refs_vs_lookup, rule conjI, fastforce)
apply (clarsimp simp: neq_Nil_conv)
apply (thin_tac "Ball S P" for S P)
apply (clarsimp simp: cte_wp_at_caps_of_state valid_vm_rights_def
is_arch_update_def cap_master_cap_def is_cap_simps)
apply (frule_tac p="(aa,b)" in valid_capsD[OF _ valid_objs_caps], fastforce)
apply (rule conjI)
apply (clarsimp simp: valid_cap_simps cap_aligned_def is_aligned_addrFromPPtr_n table_size)
apply (rule conjI)
apply (clarsimp simp: valid_cap_simps cap_aligned_def order_le_less_trans[OF word_and_le2])
apply (frule empty_table_pt_capI; clarsimp)
apply (clarsimp simp: vspace_at_asid_def; drule (2) vs_lookup_invs_ref_is_unique; clarsimp)
apply (clarsimp simp: get_pd_index_def get_pdpt_index_def get_pml4_index_def)
apply (rule conjI[rotated], simp add: bit_simps mask_def, word_bitwise)
apply (erule rsubst[where P="\<lambda>r. (r \<rhd> p) s" for p s])
apply (clarsimp simp: mask_def bit_simps; word_bitwise)
done
lemma decode_page_directory_invocation_wf[wp]:
"arch_cap = PageDirectoryCap pd_ptr pd_map_data \<Longrightarrow>
\<lbrace>invs and valid_cap (ArchObjectCap arch_cap) and
cte_wp_at ((=) (ArchObjectCap arch_cap)) slot and
(\<lambda>s. \<forall>x \<in> set excaps. cte_wp_at ((=) (fst x)) (snd x) s)\<rbrace>
decode_page_directory_invocation label args slot arch_cap excaps
\<lbrace>valid_arch_inv\<rbrace>,-"
apply (simp add: arch_decode_invocation_def decode_page_directory_invocation_def
Let_def split_def is_final_cap_def
cong: if_cong split del: if_split)
apply ((wp whenE_throwError_wp lookup_pdpt_slot_wp find_vspace_for_asid_lookup_vspace_wp get_pdpte_wp
| wpc | simp add: valid_arch_inv_def valid_pdi_def unlessE_whenE vs_cap_ref_def
split del: if_split)+)[1]
apply (rule conjI; clarsimp simp: is_cap_simps elim!: cte_wp_at_weakenE)
apply (rule conjI; clarsimp)
apply (drule_tac x=ref in spec; erule impE; clarsimp)
apply (fastforce elim!: is_aligned_pml4)
apply (frule valid_arch_cap_typ_at; clarsimp)
apply (strengthen not_in_global_refs_vs_lookup, rule conjI, fastforce)
apply (clarsimp simp: neq_Nil_conv)
apply (thin_tac "Ball S P" for S P)
apply (clarsimp simp: cte_wp_at_caps_of_state valid_vm_rights_def
is_arch_update_def cap_master_cap_def is_cap_simps)
apply (frule_tac p="(aa,b)" in valid_capsD[OF _ valid_objs_caps], fastforce)
apply (rule conjI)
apply (clarsimp simp: valid_cap_simps cap_aligned_def is_aligned_addrFromPPtr_n table_size)
apply (rule conjI)
apply (clarsimp simp: wellformed_mapdata_def vmsz_aligned_def valid_cap_def cap_aligned_def
order_le_less_trans[OF word_and_le2])
apply (frule valid_table_caps_pdD; clarsimp)
apply (clarsimp simp: vspace_at_asid_def; drule (2) vs_lookup_invs_ref_is_unique; clarsimp)
apply (clarsimp simp: pdpte_ref_pages_def get_pd_index_def get_pdpt_index_def get_pml4_index_def)
apply (rule conjI[rotated], simp add: bit_simps mask_def, word_bitwise)
apply (erule rsubst[where P="\<lambda>r. (r \<rhd> p) s" for p s])
apply (clarsimp simp: mask_def bit_simps; word_bitwise)
done
lemma decode_pdpt_invocation_wf[wp]:
"arch_cap = PDPointerTableCap pdpt_ptr pdpt_map_data \<Longrightarrow>
\<lbrace>invs and valid_cap (ArchObjectCap arch_cap) and
cte_wp_at ((=) (ArchObjectCap arch_cap)) slot and
(\<lambda>s. \<forall>x \<in> set excaps. cte_wp_at ((=) (fst x)) (snd x) s)\<rbrace>
decode_pdpt_invocation label args slot arch_cap excaps
\<lbrace>valid_arch_inv\<rbrace>,-"
apply (simp add: arch_decode_invocation_def decode_pdpt_invocation_def
Let_def split_def is_final_cap_def lookup_pml4_slot_def
cong: if_cong split del: if_split)
apply ((wp whenE_throwError_wp find_vspace_for_asid_lookup_vspace_wp get_pml4e_wp
| wpc | simp add: valid_arch_inv_def valid_pdpti_def unlessE_whenE vs_cap_ref_def
split del: if_split)+)[1]
apply (rule conjI; clarsimp simp: is_cap_simps elim!: cte_wp_at_weakenE)
apply (rule conjI; clarsimp)
apply (frule is_aligned_pml4, fastforce)
apply (frule valid_arch_cap_typ_at; clarsimp simp: pml4_shifting)
apply (strengthen not_in_global_refs_vs_lookup, rule conjI, fastforce)
apply (clarsimp simp: neq_Nil_conv)
apply (thin_tac "Ball S P" for S P)
apply (clarsimp simp: cte_wp_at_caps_of_state valid_vm_rights_def is_arch_update_def
cap_master_cap_def is_cap_simps)
apply (frule_tac p="(aa,b)" in valid_capsD[OF _ valid_objs_caps], fastforce)
apply (rule conjI)
apply (clarsimp simp: valid_cap_simps cap_aligned_def is_aligned_addrFromPPtr_n table_size)
apply (rule conjI)
apply (clarsimp simp: valid_cap_simps cap_aligned_def order_le_less_trans[OF word_and_le2])
apply (frule valid_table_caps_pdptD; clarsimp)
apply (clarsimp simp: vspace_at_asid_def; drule (2) vs_lookup_invs_ref_is_unique; clarsimp)
apply (rule conjI, fastforce simp: pml4e_at_shifting_magic)
apply (rule conjI, fastforce simp: empty_pml4e_at_def pml4_shifting)
apply (rule context_conjI, fastforce simp: get_pml4_index_def bit_simps, simp)
apply (clarsimp simp: kernel_vsrefs_kernel_mapping_slots' and_not_mask_pml4_not_kernel_mapping_slots)
done
lemma asid_wf_low_add:
fixes b :: asid_low_index
shows "asid_wf a \<Longrightarrow> is_aligned a asid_low_bits \<Longrightarrow> asid_wf (ucast b + a)"
apply (clarsimp simp: asid_wf_def field_simps)
apply (erule is_aligned_add_less_t2n)
apply (simp add: asid_low_bits_def)
apply (rule ucast_less[where 'b=asid_low_len, simplified], simp)
by (auto simp: asid_bits_defs)
lemma asid_wf_high:
fixes a :: asid_high_index
shows "asid_wf (ucast a << asid_low_bits)"
apply (clarsimp simp: asid_wf_def)
apply (rule shiftl_less_t2n)
apply (rule order_less_le_trans, rule ucast_less, simp)
by (auto simp: asid_bits_defs)
lemma cte_wp_at_eq_simp:
"cte_wp_at ((=) cap) = cte_wp_at (\<lambda>c. c = cap)"
apply (rule arg_cong [where f=cte_wp_at])
apply (safe intro!: ext)
done
lemma is_ioport_range_free_wp:
"\<lbrace>\<lambda>s. \<forall>rv. (rv \<longrightarrow> {f..l} \<inter> issued_ioports (arch_state s) = {}) \<longrightarrow> Q rv s \<rbrace> is_ioport_range_free f l \<lbrace>Q\<rbrace>"
by (wpsimp simp: is_ioport_range_free_def issued_ioports_def)
lemma decode_ioport_control_inv_wf[wp]:
"arch_cap = IOPortControlCap \<Longrightarrow>
\<lbrace>invs and cte_wp_at ((=) (cap.ArchObjectCap IOPortControlCap)) slot
and (\<lambda>s. \<forall>cap \<in> set excaps. is_cnode_cap cap \<longrightarrow>
(\<forall>r \<in> cte_refs cap (interrupt_irq_node s). ex_cte_cap_wp_to is_cnode_cap r s))
and (\<lambda>s. \<forall>cap \<in> set excaps. s \<turnstile> cap)\<rbrace>
decode_ioport_control_invocation label args slot arch_cap excaps
\<lbrace>valid_arch_inv\<rbrace>, -"
apply (clarsimp simp: decode_ioport_control_invocation_def Let_def valid_arch_inv_def
valid_iocontrol_inv_def whenE_def lookup_target_slot_def
split del: if_split
cong: if_cong)
apply (rule hoare_pre)
apply (wp ensure_empty_stronger hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift
is_ioport_range_free_wp
| simp add: cte_wp_at_eq_simp valid_iocontrol_inv_def valid_arch_inv_def
split del: if_split
| wpc | wp (once) hoare_drop_imps)+
apply (clarsimp simp: invs_valid_objs word_le_not_less)
apply (cases excaps, auto)
done
lemma arch_decode_inv_wf[wp]:
"\<lbrace>invs and valid_cap (ArchObjectCap arch_cap) and
cte_wp_at ((=) (ArchObjectCap arch_cap)) slot and
(\<lambda>s. \<forall>x \<in> set excaps. cte_wp_at ((=) (fst x)) (snd x) s) and
(\<lambda>s. \<forall>x \<in> set excaps. s \<turnstile> (fst x))\<rbrace>
arch_decode_invocation label args x_slot slot arch_cap excaps
\<lbrace>valid_arch_inv\<rbrace>,-"
apply (cases arch_cap)
(* ASIDPoolCap \<rightarrow> X64ASIDPoolAssign *)
apply (rename_tac word1 word2)
apply (simp add: arch_decode_invocation_def Let_def split_def
cong: if_cong split del: if_split)
apply (rule hoare_pre)
apply ((wp whenE_throwError_wp check_vp_wpR ensure_empty_stronger select_wp select_ext_weak_wp
| wpc | simp add: valid_arch_inv_def valid_apinv_def)+)[1]
apply (simp add: valid_arch_inv_def valid_apinv_def)
apply (intro allI impI ballI)
apply (elim conjE exE)
apply simp
apply (clarsimp simp: dom_def neq_Nil_conv)
apply (thin_tac "Ball S P" for S P)+
apply (clarsimp simp: valid_cap_def)
apply (rule conjI)
apply (clarsimp simp: obj_at_def)
apply (subgoal_tac "asid_low_bits_of (ucast xa + word2) = xa")
apply simp
apply (simp add: is_aligned_nth)
apply (subst word_plus_and_or_coroll)
apply (rule word_eqI)
apply (clarsimp simp: word_size word_bits_def nth_ucast)
apply (drule test_bit_size)
apply (simp add: word_size asid_low_bits_def)
apply (rule word_eqI)
apply (clarsimp simp: asid_bits_of_defs asid_bits_defs word_size word_bits_def nth_ucast)
apply (rule conjI)
apply (clarsimp simp add: cte_wp_at_caps_of_state)
apply (rename_tac c c')
apply (frule_tac cap="(ArchObjectCap (PML4Cap xb None))" in caps_of_state_valid, assumption)
apply (clarsimp simp: is_pml4_cap_def cap_rights_update_def acap_rights_update_def)
apply (clarsimp simp: word_neq_0_conv asid_high_bits_of_def asid_wf_low_add)
apply (drule vs_lookup_atI, erule_tac s="word2 >> asid_low_bits" in rsubst)
apply (simp add: asid_bits_defs aligned_shift[OF ucast_less[where 'b=9], simplified])
(* ASIDControlCap \<rightarrow> X64ASIDControlMakePool *)
apply (simp add: arch_decode_invocation_def Let_def split_def
cong: if_cong split del: if_split)
apply (rule hoare_pre)
apply ((wp whenE_throwError_wp check_vp_wpR ensure_empty_stronger
| wpc | simp add: valid_arch_inv_def valid_aci_def is_aligned_shiftl_self)+)[1]
apply (rule_tac Q'= "\<lambda>rv. real_cte_at rv
and ex_cte_cap_wp_to is_cnode_cap rv
and (\<lambda>s. descendants_of (snd (excaps!0)) (cdt s) = {})
and cte_wp_at (\<lambda>c. \<exists>idx. c = UntypedCap False frame pageBits idx) (snd (excaps!0))
and (\<lambda>s. x64_asid_table (arch_state s) free = None)"
in hoare_post_imp_R)
apply (simp add: lookup_target_slot_def)
apply wp
apply (clarsimp simp: cte_wp_at_def asid_wf_high)
apply (wp ensure_no_children_sp select_ext_weak_wp select_wp whenE_throwError_wp | wpc | simp)+
apply clarsimp
apply (rule conjI, fastforce)
apply (cases excaps, simp)
apply (case_tac list, simp)
apply clarsimp
apply (rule conjI)
apply clarsimp
apply (simp add: ex_cte_cap_wp_to_def)
apply (rule_tac x=ac in exI)
apply (rule_tac x=ba in exI)
apply (clarsimp simp add: cte_wp_at_caps_of_state)
apply (clarsimp simp add: cte_wp_at_caps_of_state)
apply (clarsimp simp: cap_rights_update_def)
(* IOPortCap *)
apply (simp add: arch_decode_invocation_def decode_port_invocation_def)
apply (rule hoare_pre)
apply (wp whenE_throwError_wp | wpc | simp)+
apply (simp add: valid_arch_inv_def)
(* IOPortControlCap *)
apply (simp add: arch_decode_invocation_def)
apply wpsimp
apply (drule_tac x="(a,aa,b)" in bspec, assumption)+
apply (simp add: ex_cte_cap_wp_to_def)
apply (rule_tac x=aa in exI)
apply (rule_tac x=b in exI)
apply (clarsimp simp add: cte_wp_at_caps_of_state)
apply (clarsimp simp: is_cap_simps cap_rights_update_def)
(* PageCap *)
apply (simp add: arch_decode_invocation_def)
apply (wp, simp, simp)
(* PageTableCap *)
apply (simp add: arch_decode_invocation_def)
apply (wp, simp, simp)
(* PageDirectoryCap *)
apply (simp add: arch_decode_invocation_def)
apply (wp, simp, simp)
(* PDPTCap *)
apply (simp add: arch_decode_invocation_def)
apply (wp, simp, simp)
(* PML4Cap - no invocations *)
apply (wpsimp simp: arch_decode_invocation_def)
done
declare word_less_sub_le [simp]
crunch pred_tcb_at [wp]:
perform_page_table_invocation, perform_page_directory_invocation, perform_pdpt_invocation,
perform_page_invocation, perform_asid_pool_invocation, perform_io_port_invocation,
perform_ioport_control_invocation
"pred_tcb_at proj P t"
(wp: crunch_wps simp: crunch_simps)
lemma arch_pinv_st_tcb_at:
"\<lbrace>invs and valid_arch_inv ai and ct_active and
st_tcb_at (P and (Not \<circ> inactive) and (Not \<circ> idle)) t\<rbrace>
arch_perform_invocation ai
\<lbrace>\<lambda>rv. st_tcb_at P t\<rbrace>"
apply (cases ai; simp add: arch_perform_invocation_def valid_arch_inv_def)
apply (wp perform_asid_control_invocation_st_tcb_at; fastforce elim!: pred_tcb_weakenE)+
done
end
context begin interpretation Arch .
requalify_consts
valid_arch_inv
requalify_facts
invoke_arch_tcb
invoke_arch_invs
sts_valid_arch_inv
arch_decode_inv_wf
arch_pinv_st_tcb_at
end
declare invoke_arch_invs[wp]
declare arch_decode_inv_wf[wp]
end
|
import Control.Monad.Identity
import System
fib : Integer -> Identity Integer
fib 0 = pure 0
fib 1 = pure 1
fib n = pure $ !(fib (n - 1)) + !(fib (n - 2))
main : IO ()
main = do
value <- getLine
printLn $ runIdentity (fib (cast value))
|
Formal statement is: lemma has_contour_integral_reversepath: assumes "valid_path g" and f: "(f has_contour_integral i) g" shows "(f has_contour_integral (-i)) (reversepath g)" Informal statement is: If $f$ has a contour integral along a path $g$, then $f$ has a contour integral along the reverse path $g^{-1}$ with the opposite sign. |
subroutine fmc11b(a,n,ir)
c factorize a matrix given in a
implicit double precision (a-h,o-z)
dimension a(*)
ir=n
if(n.gt.1)goto100
if(a(1).gt.0.d0)return
a(1)=0.d0
ir=0
return
100 continue
np=n+1
ii=1
do 104 i=2,n
aa=a(ii)
ni=ii+np-i
if(aa.gt.0.d0)goto101
a(ii)=0.d0
ir=ir-1
ii=ni+1
goto104
101 continue
ip=ii+1
ii=ni+1
jk=ii
do 103 ij=ip,ni
v=a(ij)/aa
do 102 ik=ij,ni
a(jk)=a(jk)-a(ik)*v
102 jk=jk+1
103 a(ij)=v
104 continue
if(a(ii).gt.0.d0)return
a(ii)=0.d0
ir=ir-1
return
end
|
/-
Copyright (c) 2020 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import data.finsupp.basic
/-!
# The pointwise product on `finsupp`.
TODO per issue #1864:
We intend to remove the convolution product on finsupp, and define
it only on a type synonym `add_monoid_algebra`. After we've done this,
it would be good to make this the default product on `finsupp`.
-/
noncomputable theory
open_locale classical
open finset
universes u₁ u₂ u₃ u₄ u₅
variables {α : Type u₁} {β : Type u₂} {γ : Type u₃} {δ : Type u₄} {ι : Type u₅}
namespace finsupp
/-! ### Declarations about the pointwise product on `finsupp`s -/
section
variables [mul_zero_class β]
/-- The product of `f g : α →₀ β` is the finitely supported function
whose value at `a` is `f a * g a`. -/
instance : has_mul (α →₀ β) := ⟨zip_with (*) (mul_zero 0)⟩
@[simp] lemma mul_apply {g₁ g₂ : α →₀ β} {a : α} : (g₁ * g₂) a = g₁ a * g₂ a :=
rfl
lemma support_mul {g₁ g₂ : α →₀ β} : (g₁ * g₂).support ⊆ g₁.support ∩ g₂.support :=
begin
intros a h,
simp only [mul_apply, mem_support_iff] at h,
simp only [mem_support_iff, mem_inter, ne.def],
rw ←not_or_distrib,
intro w,
apply h,
cases w; { rw w, simp },
end
instance : mul_zero_class (α →₀ β) :=
{ zero := 0,
mul := (*),
mul_zero := λ f, by { ext, simp only [mul_apply, zero_apply, mul_zero], },
zero_mul := λ f, by { ext, simp only [mul_apply, zero_apply, zero_mul], }, }
end
instance [semigroup_with_zero β] : semigroup_with_zero (α →₀ β) :=
{ mul := (*),
mul_assoc := λ f g h, by { ext, simp only [mul_apply, mul_assoc], },
..(infer_instance : mul_zero_class (α →₀ β)) }
instance [semiring β] : distrib (α →₀ β) :=
{ left_distrib := λ f g h, by { ext, simp only [mul_apply, add_apply, left_distrib] {proj := ff} },
right_distrib := λ f g h,
by { ext, simp only [mul_apply, add_apply, right_distrib] {proj := ff} },
..(infer_instance : semigroup (α →₀ β)),
..(infer_instance : add_comm_monoid (α →₀ β)) }
-- If `non_unital_semiring` existed in the algebraic hierarchy, we could produce one here.
end finsupp
|
subsection \<open>Example: A Ticket Automaton\<close>
text \<open>As another example, we consider an automaton modeling a ticket vending system, where
users can buy normal tickets or VIP tickets.
We will prove a noninterference property: buyers of normal tickets learn nothing about
the actions of VIP buyers.\<close>
theory Ticket_Automaton
imports "../../Reference_Monitor" "HOL-Library.Code_Target_Nat"
begin
text \<open>The state stores the number of remaining normal and VIP tickets, respectively, as a
pair of natural numbers.\<close>
type_synonym state = "nat \<times> nat"
text \<open>There are actions for initializing the system with a given number of normal or VIP tickets,
buying tickets, and querying the number of remaining tickets.\<close>
datatype act =
NInit nat
| VInit nat
| NQuery
| VQuery
| NBuy
| VBuy
datatype "output" =
OK
| Err
| Out_NTickets nat
| Out_NVTickets "(nat \<times> nat)"
datatype var =
NTickets
| VTickets
text \<open>The automaton starts without any tickets. The initialization actions supply the system with
a given number of tickets of the respective category. Customers can query the remaining number of
tickets (where prospective VIP customers can query both the remaining numbers of normal and VIP
tickets.) Buying is possible as long as there are still remaining tickets. If a category of
tickets is sold out, it can be refilled with the corresponding initialization action.\<close>
definition "s0 = (0, 0)"
fun step :: "state \<Rightarrow> act \<Rightarrow> state" where
"step (n, v) (NInit i) = (if n = 0 then (i, v) else (n, v))"
| "step (n, v) (VInit j) = (if v = 0 then (n, j) else (n, v))"
| "step (n, v) (NQuery) = (n, v)"
| "step (n, v) (VQuery) = (n, v)"
| "step (n, v) (NBuy) = (if n = 0 then (n, v) else (n - 1, v))"
| "step (n, v) (VBuy) = (if v = 0 then (n, v) else (n, v - 1))"
fun out :: "state \<Rightarrow> act \<Rightarrow> output" where
"out (n, v) (NInit i) = (if n = 0 then OK else Err)"
| "out (n, v) (VInit j) = (if v = 0 then OK else Err)"
| "out (n, v) (NQuery) = Out_NTickets n"
| "out (n, v) (VQuery) = Out_NVTickets (n, v)"
| "out (n, v) (NBuy) = (if n = 0 then Err else OK)"
| "out (n, v) (VBuy) = (if v = 0 then Err else OK)"
text \<open>This is an instance of automata with structured states.\<close>
fun contents :: "state \<Rightarrow> var \<Rightarrow> nat" where
"contents (n, v) NTickets = n"
| "contents (n, v) VTickets = v"
global_interpretation Structured_State s0 step out contents defines ex_run = run .
value "out (run s0 [NBuy, NInit 10000, NBuy, VInit 100, NBuy, NInit 50, VBuy, NBuy]) VQuery"
\<comment> \<open>outputs \<open>Out_NVTickets (9997, 99)\<close>\<close>
text \<open>We now consider two security domains: Normal ticket buyers, and VIP ticket buyers.\<close>
datatype domain =
N
| V
text \<open>We allow information to flow from normal to VIP users, but not the other way around.\<close>
definition [simp]: "FP = {(N, N), (N, V), (V, V)}"
fun dom :: "act \<Rightarrow> domain" where
"dom (NInit i) = N"
| "dom (VInit j) = V"
| "dom (NQuery) = N"
| "dom (VQuery) = V"
| "dom (NBuy) = N"
| "dom (VBuy) = V"
global_interpretation NI s0 step out FP dom defines ex_purge = purge
proof
show "refl FP" and "trans FP" by (auto simp add: refl_on_def trans_def)
qed
text \<open>The noninterference policy requires that the output of \<open>N\<close> actions must not depend on any
\<open>V\<close> actions that have happened before. In the following example, this is the case: Purging the
actions that are secret for \<open>N\<close> does not change the output of \<open>NQuery\<close>.\<close>
value "out (run s0 [NBuy, NInit 10000, NBuy, VInit 100, NBuy, NInit 50, VBuy, NBuy]) NQuery"
\<comment> \<open>outputs \<open>Out_NTickets 9997\<close>\<close>
value "out (run s0 (purge [NBuy, NInit 10000, NBuy, VInit 100, NBuy, NInit 50, VBuy, NBuy] N)) NQuery"
\<comment> \<open>outputs \<open>Out_NTickets 9997\<close>\<close>
text \<open>We prove that noninterference holds generally for this system by proving the reference
monitor assumptions.
We implement the policy by restricting which variables may be read and written by the two
domains.\<close>
fun observe :: "domain \<Rightarrow> var set" where
"observe N = {NTickets}"
| "observe V = {NTickets, VTickets}"
fun alter :: "domain \<Rightarrow> var set" where
"alter N = {NTickets}"
| "alter V = {VTickets}"
text \<open>This essentially specifies access control requirements, which are sufficient to implement
the information flow policy.\<close>
global_interpretation FP_Implementation s0 step out contents FP dom observe alter
proof
fix u v
assume "(u, v) \<in> FP"
then show "observe u \<subseteq> observe v" by auto
next
fix n u v
assume "n \<in> alter u" and "n \<in> observe v"
then show "(u, v) \<in> FP" by auto
qed
text \<open>We now have to verify that the automaton correctly implements the access control
requirements.\<close>
notation view'' ("_ \<sim>\<^bsub>_\<^esub> _")
text \<open>The visibility of variables, as specified by \<open>observe\<close>, induces state equivalence relations
for each domain. For our concrete system, we can simplify the general definition of these relations
as follows.\<close>
lemma [simp]:
"(s \<sim>\<^bsub>N\<^esub> t) \<longleftrightarrow> contents s NTickets = contents t NTickets"
"(s \<sim>\<^bsub>V\<^esub> t) \<longleftrightarrow> s = t"
unfolding view_def by (cases s; cases t; auto)+
text \<open>It turns out that, using these characterizations of the equivalence relations, the built-in
reasoner in Isabelle can prove the reference monitor assumptions automatically (after case
distinction wrt.\ actions and variables.)\<close>
global_interpretation Reference_Monitor s0 step out contents FP dom observe alter
proof (unfold_locales, goal_cases)
case (1 s t a) then show ?case by (cases s; cases t; cases a) auto next
case (2 s t a n) then show ?case by (cases s; cases t; cases a; cases n) auto next
case (3 s a n) then show ?case by (cases s; cases a; cases n) (auto split: if_splits)
qed
text \<open>Hence, the system is secure wrt.\ the noninterference policy.\<close>
theorem "NI_secure"
using monitor_secure .
end
|
lemma filterlim_at_bot_at_right: fixes f :: "'a::linorder_topology \<Rightarrow> 'b::linorder" assumes mono: "\<And>x y. Q x \<Longrightarrow> Q y \<Longrightarrow> x \<le> y \<Longrightarrow> f x \<le> f y" and bij: "\<And>x. P x \<Longrightarrow> f (g x) = x" "\<And>x. P x \<Longrightarrow> Q (g x)" and Q: "eventually Q (at_right a)" and bound: "\<And>b. Q b \<Longrightarrow> a < b" and P: "eventually P at_bot" shows "filterlim f at_bot (at_right a)" |
#include "traceback.h"
#include <stdio.h>
#include <string.h>
#include <boost/stacktrace.hpp>
#include <string>
// Gather addresses from the call stack.
void cgoTraceback(cgoTracebackArg* arg) {
try {
// We can only unwind the current stack.
if (arg->context != 0) {
arg->buf[0] = 0;
return;
}
std::size_t skip = 3;
std::size_t max_depth = arg->max;
boost::stacktrace::stacktrace stacktrace(skip, max_depth);
// std::cout << boost::stacktrace::stacktrace();
std::size_t i = 0;
for (auto it = stacktrace.cbegin(); it != stacktrace.cend(); it++) {
arg->buf[i++] = (uintptr_t)(it->address());
}
auto frames_count = stacktrace.size();
// The list of addresses terminates at a 0, so make sure there is one.
if (frames_count < 0) {
arg->buf[0] = 0;
} else if (frames_count < arg->max) {
arg->buf[frames_count] = 0;
}
} catch (...) {
// ignore exception
}
} |
function dayear(dd,mm,yyyy)
!
implicit none
!
! Dummy arguments
!
integer :: dd,mm,yyyy
integer :: dayear
!
! Local variables
!
integer :: difdat
!
! + + + purpose + + +
! given a date in dd/mm/yyyy format,
! dayear will return the number of days
! from the first of that year.
!
! + + + keywords + + +
! date, utility
!
! + + + argument declarations + + +
!
! + + + argument definitions + + +
! dayear - returns the number of days from the first of that year
! dd - day
! mm - month
! yyyy - year
!
! + + + local variable definitions + + +
! difdat - the number of days between two dates. a function. this
! variable holds the value returned by the diffdat function.
! debe assumed this definition
! + + + function declarations + + +
!
! + + + end specifications + + +
!
! get the difference in days + 1
!
dayear = difdat(1,1,yyyy,dd,mm,yyyy) + 1
!
end function dayear
|
module Main
import Coda.Node.Core
import Coda.Node.HTTP
import Coda.Node.FS
%default total
{-
HTTP Helpers
-}
queryParams : Ptr -> JS_IO (List String)
queryParams req = Strings.split (== '?') <$> prop { ty = String } req "url"
end : Ptr -> String -> JS_IO ()
end res = method { a = String } { b = () } res "end"
{-
Route Handlers
-}
handleIndex : Ptr -> Ptr -> JS_IO ()
handleIndex req res = do
fs <- require "fs"
indexFile <- readFileSync fs "./index.html"
end res indexFile
handleRun : Ptr -> Ptr -> JS_IO ()
handleRun req res = do
[_, query] <- queryParams req | _ => end res "Needs a query"
log query
end res query
handle404 : Ptr -> Ptr -> JS_IO ()
handle404 _ res = end res "Invalid route"
route : (String, String) -> Ptr -> Ptr -> JS_IO ()
route ("GET", "/") = handleIndex
route ("GET", "/run") = handleRun
route _ = handle404
partial
router : HTTP.Handler
router req res = do
verb <- prop { ty = String } req "method"
[path, query] <- queryParams req
log (verb ++ " " ++ path ++ " ? " ++ query)
route (verb, path) req res
partial
main : JS_IO ()
main = do server <- createServer router
listen server (MkPort 2112)
|
function prior = priorExpandParam(prior, params)
% PRIOREXPANDPARAM Expand the prior model's parameters from params vector.
% FORMAT
% DESC returns a prior structure filled with the parameters in the
% given vector. This is used as a helper function to enable
% parameters to be optimised in, for example, the NETLAB
% optimisation functions.
% ARG prior : the prior structure in which the parameters are to be
% placed.
% ARG param : vector of parameters which are to be placed in the
% prior structure.
% RETURN prior : prior structure with the given parameters in the
% relevant locations.
%
% As well as extracting the parameters, some transformation of
% parameters is also undertaken in this file. If the field
% transforms is not empty, it dictactes how the prior parameters
% are to be transformed (for example by a exponential to keep them
% positive).
%
% SEEALSO : priorExtractParam, scg, conjgrad
%
% COPYRIGHT : Neil D. Lawrence, 2003, 2004
% PRIOR
if isfield(prior, 'transforms')
for i = 1:length(prior.transforms)
index = prior.transforms(i).index;
fhandle = str2func([prior.transforms(i).type 'Transform']);
params(index) = fhandle(params(index), 'atox');
end
end
fhandle = str2func([prior.type 'PriorExpandParam']);
prior = fhandle(prior, params);
|
function value = r8vec_eq ( n, a1, a2 )
%*****************************************************************************80
%
%% R8VEC_EQ is true if two R8VEC's are equal.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 27 October 2005
%
% Author:
%
% John Burkardt
%
% Parameters:
%
% Input, integer N, the number of entries in the vectors.
%
% Input, real A1(N), A2(N), two vectors to compare.
%
% Output, logical VALUE, is TRUE if every pair of elements A1(I)
% and A2(I) are equal, and FALSE otherwise.
%
value = all ( a1(1:n) == a2(1:n) );
return
end
|
<a href="https://colab.research.google.com/github/artiseza/quantum/blob/master/docs/tutorials/quantum_data.ipynb" target="_parent"></a>
##### Copyright 2020 The TensorFlow Authors.
```python
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Quantum data
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/quantum/tutorials/quantum_data">View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/quantum/blob/master/docs/tutorials/quantum_data.ipynb">Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/quantum/blob/master/docs/tutorials/quantum_data.ipynb">View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/quantum/docs/tutorials/quantum_data.ipynb">Download notebook</a>
</td>
</table>
Building off of the comparisons made in the [MNIST](https://www.tensorflow.org/quantum/tutorials/mnist) tutorial, this tutorial explores the recent work of [Huang et al.](https://arxiv.org/abs/2011.01938) that shows how different datasets affect performance comparisons. In the work, the authors seek to understand how and when classical machine learning models can learn as well as (or better than) quantum models. The work also showcases an empirical performance separation between classical and quantum machine learning model via a carefully crafted dataset. You will:
1. Prepare a reduced dimension Fashion-MNIST dataset.
2. Use quantum circuits to re-label the dataset and compute Projected Quantum Kernel features (PQK).
3. Train a classical neural network on the re-labeled dataset and compare the performance with a model that has access to the PQK features.
## Setup
```python
!pip install tensorflow==2.7.0 tensorflow-quantum
```
Collecting tensorflow==2.7.0
Downloading tensorflow-2.7.0-cp37-cp37m-manylinux2010_x86_64.whl (489.6 MB)
[K |████████████████████████████████| 489.6 MB 15 kB/s
[?25hCollecting tensorflow-quantum
Downloading tensorflow_quantum-0.6.1-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (10.5 MB)
[K |████████████████████████████████| 10.5 MB 20.1 MB/s
[?25hRequirement already satisfied: h5py>=2.9.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (3.1.0)
Requirement already satisfied: typing-extensions>=3.6.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (3.10.0.2)
Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (1.1.0)
Requirement already satisfied: astunparse>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (1.6.3)
Requirement already satisfied: tensorboard~=2.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (2.8.0)
Requirement already satisfied: grpcio<2.0,>=1.24.3 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (1.44.0)
Requirement already satisfied: keras-preprocessing>=1.1.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (1.1.2)
Requirement already satisfied: google-pasta>=0.1.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (0.2.0)
Requirement already satisfied: wheel<1.0,>=0.32.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (0.37.1)
Collecting keras<2.8,>=2.7.0rc0
Downloading keras-2.7.0-py2.py3-none-any.whl (1.3 MB)
[K |████████████████████████████████| 1.3 MB 29.1 MB/s
[?25hRequirement already satisfied: libclang>=9.0.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (13.0.0)
Requirement already satisfied: protobuf>=3.9.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (3.17.3)
Requirement already satisfied: six>=1.12.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (1.15.0)
Collecting gast<0.5.0,>=0.2.1
Downloading gast-0.4.0-py3-none-any.whl (9.8 kB)
Requirement already satisfied: absl-py>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (1.0.0)
Collecting tensorflow-estimator<2.8,~=2.7.0rc0
Downloading tensorflow_estimator-2.7.0-py2.py3-none-any.whl (463 kB)
[K |████████████████████████████████| 463 kB 34.1 MB/s
[?25hRequirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (3.3.0)
Requirement already satisfied: wrapt>=1.11.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (1.14.0)
Requirement already satisfied: numpy>=1.14.5 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (1.21.5)
Requirement already satisfied: tensorflow-io-gcs-filesystem>=0.21.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (0.24.0)
Requirement already satisfied: flatbuffers<3.0,>=1.12 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.7.0) (2.0)
Requirement already satisfied: cached-property in /usr/local/lib/python3.7/dist-packages (from h5py>=2.9.0->tensorflow==2.7.0) (1.5.2)
Requirement already satisfied: google-auth<3,>=1.6.3 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow==2.7.0) (1.35.0)
Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow==2.7.0) (57.4.0)
Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow==2.7.0) (0.4.6)
Requirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow==2.7.0) (2.23.0)
Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow==2.7.0) (1.0.1)
Requirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow==2.7.0) (0.6.1)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow==2.7.0) (3.3.6)
Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow==2.7.0) (1.8.1)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<3,>=1.6.3->tensorboard~=2.6->tensorflow==2.7.0) (0.2.8)
Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth<3,>=1.6.3->tensorboard~=2.6->tensorflow==2.7.0) (4.8)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<3,>=1.6.3->tensorboard~=2.6->tensorflow==2.7.0) (4.2.4)
Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.6->tensorflow==2.7.0) (1.3.1)
Requirement already satisfied: importlib-metadata>=4.4 in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard~=2.6->tensorflow==2.7.0) (4.11.3)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard~=2.6->tensorflow==2.7.0) (3.7.0)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth<3,>=1.6.3->tensorboard~=2.6->tensorflow==2.7.0) (0.4.8)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow==2.7.0) (2.10)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow==2.7.0) (1.24.3)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow==2.7.0) (2021.10.8)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow==2.7.0) (3.0.4)
Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.6->tensorflow==2.7.0) (3.2.0)
Collecting cirq-core>=0.13.1
Downloading cirq_core-0.14.0-py3-none-any.whl (1.8 MB)
[K |████████████████████████████████| 1.8 MB 34.0 MB/s
[?25hCollecting googleapis-common-protos==1.52.0
Downloading googleapis_common_protos-1.52.0-py2.py3-none-any.whl (100 kB)
[K |████████████████████████████████| 100 kB 8.2 MB/s
[?25hCollecting sympy==1.8
Downloading sympy-1.8-py3-none-any.whl (6.1 MB)
[K |████████████████████████████████| 6.1 MB 24.1 MB/s
[?25hCollecting cirq-google>=0.13.1
Downloading cirq_google-0.14.0-py3-none-any.whl (541 kB)
[K |████████████████████████████████| 541 kB 47.0 MB/s
[?25hCollecting google-api-core==1.21.0
Downloading google_api_core-1.21.0-py2.py3-none-any.whl (90 kB)
[K |████████████████████████████████| 90 kB 8.3 MB/s
[?25hCollecting google-auth<3,>=1.6.3
Downloading google_auth-1.18.0-py2.py3-none-any.whl (90 kB)
[K |████████████████████████████████| 90 kB 7.8 MB/s
[?25hRequirement already satisfied: pytz in /usr/local/lib/python3.7/dist-packages (from google-api-core==1.21.0->tensorflow-quantum) (2018.9)
Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.7/dist-packages (from sympy==1.8->tensorflow-quantum) (1.2.1)
Collecting backports.cached-property~=1.0.1
Downloading backports.cached_property-1.0.1-py3-none-any.whl (5.7 kB)
Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from cirq-core>=0.13.1->tensorflow-quantum) (4.63.0)
Collecting duet~=0.2.0
Downloading duet-0.2.5-py3-none-any.whl (28 kB)
Requirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from cirq-core>=0.13.1->tensorflow-quantum) (1.3.5)
Requirement already satisfied: sortedcontainers~=2.0 in /usr/local/lib/python3.7/dist-packages (from cirq-core>=0.13.1->tensorflow-quantum) (2.4.0)
Requirement already satisfied: networkx~=2.4 in /usr/local/lib/python3.7/dist-packages (from cirq-core>=0.13.1->tensorflow-quantum) (2.6.3)
Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from cirq-core>=0.13.1->tensorflow-quantum) (1.4.1)
Requirement already satisfied: matplotlib~=3.0 in /usr/local/lib/python3.7/dist-packages (from cirq-core>=0.13.1->tensorflow-quantum) (3.2.2)
Requirement already satisfied: google-api-core[grpc]<2.0.0dev,>=1.14.0 in /usr/local/lib/python3.7/dist-packages (from cirq-google>=0.13.1->tensorflow-quantum) (1.26.3)
Collecting typing-extensions>=3.6.6
Downloading typing_extensions-3.10.0.0-py3-none-any.whl (26 kB)
Collecting google-api-core[grpc]<2.0.0dev,>=1.14.0
Downloading google_api_core-1.31.5-py2.py3-none-any.whl (93 kB)
[K |████████████████████████████████| 93 kB 1.0 MB/s
[?25h Downloading google_api_core-1.31.4-py2.py3-none-any.whl (93 kB)
[K |████████████████████████████████| 93 kB 1.5 MB/s
[?25h Downloading google_api_core-1.31.3-py2.py3-none-any.whl (93 kB)
[K |████████████████████████████████| 93 kB 1.5 MB/s
[?25h Downloading google_api_core-1.31.2-py2.py3-none-any.whl (93 kB)
[K |████████████████████████████████| 93 kB 1.4 MB/s
[?25h Downloading google_api_core-1.31.1-py2.py3-none-any.whl (93 kB)
[K |████████████████████████████████| 93 kB 1.3 MB/s
[?25h Downloading google_api_core-1.31.0-py2.py3-none-any.whl (93 kB)
[K |████████████████████████████████| 93 kB 1.3 MB/s
[?25h Downloading google_api_core-1.30.0-py2.py3-none-any.whl (93 kB)
[K |████████████████████████████████| 93 kB 1.4 MB/s
[?25h Downloading google_api_core-1.29.0-py2.py3-none-any.whl (93 kB)
[K |████████████████████████████████| 93 kB 1.2 MB/s
[?25h Downloading google_api_core-1.28.0-py2.py3-none-any.whl (92 kB)
[K |████████████████████████████████| 92 kB 1.1 MB/s
[?25h Downloading google_api_core-1.27.0-py2.py3-none-any.whl (93 kB)
[K |████████████████████████████████| 93 kB 1.4 MB/s
[?25h Downloading google_api_core-1.26.2-py2.py3-none-any.whl (93 kB)
[K |████████████████████████████████| 93 kB 1.1 MB/s
[?25h Downloading google_api_core-1.26.1-py2.py3-none-any.whl (92 kB)
[K |████████████████████████████████| 92 kB 948 kB/s
[?25h Downloading google_api_core-1.26.0-py2.py3-none-any.whl (92 kB)
[K |████████████████████████████████| 92 kB 1.0 MB/s
[?25h Downloading google_api_core-1.25.1-py2.py3-none-any.whl (92 kB)
[K |████████████████████████████████| 92 kB 229 kB/s
[?25h Downloading google_api_core-1.25.0-py2.py3-none-any.whl (92 kB)
[K |████████████████████████████████| 92 kB 171 kB/s
[?25h Downloading google_api_core-1.24.1-py2.py3-none-any.whl (92 kB)
[K |████████████████████████████████| 92 kB 10.3 MB/s
[?25h Downloading google_api_core-1.24.0-py2.py3-none-any.whl (91 kB)
[K |████████████████████████████████| 91 kB 9.5 MB/s
[?25h Downloading google_api_core-1.23.0-py2.py3-none-any.whl (91 kB)
[K |████████████████████████████████| 91 kB 9.8 MB/s
[?25h Downloading google_api_core-1.22.4-py2.py3-none-any.whl (91 kB)
[K |████████████████████████████████| 91 kB 9.0 MB/s
[?25h Downloading google_api_core-1.22.3-py2.py3-none-any.whl (91 kB)
[K |████████████████████████████████| 91 kB 9.2 MB/s
[?25h Downloading google_api_core-1.22.2-py2.py3-none-any.whl (91 kB)
[K |████████████████████████████████| 91 kB 8.5 MB/s
[?25h Downloading google_api_core-1.22.1-py2.py3-none-any.whl (91 kB)
[K |████████████████████████████████| 91 kB 8.5 MB/s
[?25h Downloading google_api_core-1.22.0-py2.py3-none-any.whl (91 kB)
[K |████████████████████████████████| 91 kB 9.1 MB/s
[?25hRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib~=3.0->cirq-core>=0.13.1->tensorflow-quantum) (3.0.7)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib~=3.0->cirq-core>=0.13.1->tensorflow-quantum) (0.11.0)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib~=3.0->cirq-core>=0.13.1->tensorflow-quantum) (1.4.0)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib~=3.0->cirq-core>=0.13.1->tensorflow-quantum) (2.8.2)
Installing collected packages: typing-extensions, googleapis-common-protos, google-auth, sympy, google-api-core, duet, backports.cached-property, cirq-core, tensorflow-estimator, keras, gast, cirq-google, tensorflow-quantum, tensorflow
Attempting uninstall: typing-extensions
Found existing installation: typing-extensions 3.10.0.2
Uninstalling typing-extensions-3.10.0.2:
Successfully uninstalled typing-extensions-3.10.0.2
Attempting uninstall: googleapis-common-protos
Found existing installation: googleapis-common-protos 1.56.0
Uninstalling googleapis-common-protos-1.56.0:
Successfully uninstalled googleapis-common-protos-1.56.0
Attempting uninstall: google-auth
Found existing installation: google-auth 1.35.0
Uninstalling google-auth-1.35.0:
Successfully uninstalled google-auth-1.35.0
Attempting uninstall: sympy
Found existing installation: sympy 1.7.1
Uninstalling sympy-1.7.1:
Successfully uninstalled sympy-1.7.1
Attempting uninstall: google-api-core
Found existing installation: google-api-core 1.26.3
Uninstalling google-api-core-1.26.3:
Successfully uninstalled google-api-core-1.26.3
Attempting uninstall: tensorflow-estimator
Found existing installation: tensorflow-estimator 2.8.0
Uninstalling tensorflow-estimator-2.8.0:
Successfully uninstalled tensorflow-estimator-2.8.0
Attempting uninstall: keras
Found existing installation: keras 2.8.0
Uninstalling keras-2.8.0:
Successfully uninstalled keras-2.8.0
Attempting uninstall: gast
Found existing installation: gast 0.5.3
Uninstalling gast-0.5.3:
Successfully uninstalled gast-0.5.3
Attempting uninstall: tensorflow
Found existing installation: tensorflow 2.8.0
Uninstalling tensorflow-2.8.0:
Successfully uninstalled tensorflow-2.8.0
[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
pydata-google-auth 1.4.0 requires google-auth<3.0dev,>=1.25.0; python_version >= "3.6", but you have google-auth 1.18.0 which is incompatible.[0m
Successfully installed backports.cached-property-1.0.1 cirq-core-0.14.0 cirq-google-0.14.0 duet-0.2.5 gast-0.4.0 google-api-core-1.21.0 google-auth-1.18.0 googleapis-common-protos-1.52.0 keras-2.7.0 sympy-1.8 tensorflow-2.7.0 tensorflow-estimator-2.7.0 tensorflow-quantum-0.6.1 typing-extensions-3.10.0.0
```python
# Update package resources to account for version changes.
import importlib, pkg_resources
importlib.reload(pkg_resources)
```
<module 'pkg_resources' from '/usr/local/lib/python3.7/dist-packages/pkg_resources/__init__.py'>
```python
import cirq
import sympy
import numpy as np
import tensorflow as tf
import tensorflow_quantum as tfq
# visualization tools
%matplotlib inline
import matplotlib.pyplot as plt
from cirq.contrib.svg import SVGCircuit
np.random.seed(1234)
```
## 1. Data preparation
You will begin by preparing the fashion-MNIST dataset for running on a quantum computer.
### 1.1 Download fashion-MNIST
The first step is to get the traditional fashion-mnist dataset. This can be done using the `tf.keras.datasets` module.
```python
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
# Rescale the images from [0,255] to the [0.0,1.0] range.
x_train, x_test = x_train/255.0, x_test/255.0
print("Number of original training examples:", len(x_train))
print("Number of original test examples:", len(x_test))
```
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz
32768/29515 [=================================] - 0s 0us/step
40960/29515 [=========================================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz
26427392/26421880 [==============================] - 0s 0us/step
26435584/26421880 [==============================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz
16384/5148 [===============================================================================================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz
4423680/4422102 [==============================] - 0s 0us/step
4431872/4422102 [==============================] - 0s 0us/step
Number of original training examples: 60000
Number of original test examples: 10000
Filter the dataset to keep just the T-shirts/tops and dresses, remove the other classes. At the same time convert the label, `y`, to boolean: True for 0 and False for 3.
```python
def filter_03(x, y):
keep = (y == 0) | (y == 3)
x, y = x[keep], y[keep]
y = y == 0
return x,y
```
```python
x_train, y_train = filter_03(x_train, y_train)
x_test, y_test = filter_03(x_test, y_test)
print("Number of filtered training examples:", len(x_train))
print("Number of filtered test examples:", len(x_test))
```
Number of filtered training examples: 12000
Number of filtered test examples: 2000
```python
print(y_train[0])
plt.imshow(x_train[0, :, :])
plt.colorbar()
```
### 1.2 Downscale the images
Just like the MNIST example, you will need to downscale these images in order to be within the boundaries for current quantum computers. This time however you will use a PCA transformation to reduce the dimensions instead of a `tf.image.resize` operation.
```python
def truncate_x(x_train, x_test, n_components=10):
"""Perform PCA on image dataset keeping the top `n_components` components."""
n_points_train = tf.gather(tf.shape(x_train), 0)
n_points_test = tf.gather(tf.shape(x_test), 0)
# Flatten to 1D
x_train = tf.reshape(x_train, [n_points_train, -1])
x_test = tf.reshape(x_test, [n_points_test, -1])
# Normalize.
feature_mean = tf.reduce_mean(x_train, axis=0)
x_train_normalized = x_train - feature_mean
x_test_normalized = x_test - feature_mean
# Truncate.
e_values, e_vectors = tf.linalg.eigh(
tf.einsum('ji,jk->ik', x_train_normalized, x_train_normalized))
return tf.einsum('ij,jk->ik', x_train_normalized, e_vectors[:,-n_components:]), \
tf.einsum('ij,jk->ik', x_test_normalized, e_vectors[:, -n_components:])
```
```python
DATASET_DIM = 10
x_train, x_test = truncate_x(x_train, x_test, n_components=DATASET_DIM)
print(f'New datapoint dimension:', len(x_train[0]))
```
New datapoint dimension: 10
The last step is to reduce the size of the dataset to just 1000 training datapoints and 200 testing datapoints.
```python
N_TRAIN = 1000
N_TEST = 200
x_train, x_test = x_train[:N_TRAIN], x_test[:N_TEST]
y_train, y_test = y_train[:N_TRAIN], y_test[:N_TEST]
```
```python
print("New number of training examples:", len(x_train))
print("New number of test examples:", len(x_test))
```
New number of training examples: 1000
New number of test examples: 200
## 2. Relabeling and computing PQK features
You will now prepare a "stilted" quantum dataset by incorporating quantum components and re-labeling the truncated fashion-MNIST dataset you've created above. In order to get the most seperation between quantum and classical methods, you will first prepare the PQK features and then relabel outputs based on their values.
### 2.1 Quantum encoding and PQK features
You will create a new set of features, based on `x_train`, `y_train`, `x_test` and `y_test` that is defined to be the 1-RDM on all qubits of:
$V(x_{\text{train}} / n_{\text{trotter}}) ^ {n_{\text{trotter}}} U_{\text{1qb}} | 0 \rangle$
Where $U_\text{1qb}$ is a wall of single qubit rotations and $V(\hat{\theta}) = e^{-i\sum_i \hat{\theta_i} (X_i X_{i+1} + Y_i Y_{i+1} + Z_i Z_{i+1})}$
First, you can generate the wall of single qubit rotations:
```python
def single_qubit_wall(qubits, rotations):
"""Prepare a single qubit X,Y,Z rotation wall on `qubits`."""
wall_circuit = cirq.Circuit()
for i, qubit in enumerate(qubits):
for j, gate in enumerate([cirq.X, cirq.Y, cirq.Z]):
wall_circuit.append(gate(qubit) ** rotations[i][j])
return wall_circuit
```
You can quickly verify this works by looking at the circuit:
```python
SVGCircuit(single_qubit_wall(
cirq.GridQubit.rect(1,4), np.random.uniform(size=(4, 3))))
```
findfont: Font family ['Arial'] not found. Falling back to DejaVu Sans.
Next you can prepare $V(\hat{\theta})$ with the help of `tfq.util.exponential` which can exponentiate any commuting `cirq.PauliSum` objects:
```python
def v_theta(qubits):
"""Prepares a circuit that generates V(\theta)."""
ref_paulis = [
cirq.X(q0) * cirq.X(q1) + \
cirq.Y(q0) * cirq.Y(q1) + \
cirq.Z(q0) * cirq.Z(q1) for q0, q1 in zip(qubits, qubits[1:])
]
exp_symbols = list(sympy.symbols('ref_0:'+str(len(ref_paulis))))
return tfq.util.exponential(ref_paulis, exp_symbols), exp_symbols
```
This circuit might be a little bit harder to verify by looking at, but you can still examine a two qubit case to see what is happening:
```python
test_circuit, test_symbols = v_theta(cirq.GridQubit.rect(1, 2))
print(f'Symbols found in circuit:{test_symbols}')
SVGCircuit(test_circuit)
```
Symbols found in circuit:[ref_0]
Now you have all the building blocks you need to put your full encoding circuits together:
```python
def prepare_pqk_circuits(qubits, classical_source, n_trotter=10):
"""Prepare the pqk feature circuits around a dataset."""
n_qubits = len(qubits)
n_points = len(classical_source)
# Prepare random single qubit rotation wall.
random_rots = np.random.uniform(-2, 2, size=(n_qubits, 3))
initial_U = single_qubit_wall(qubits, random_rots)
# Prepare parametrized V
V_circuit, symbols = v_theta(qubits)
exp_circuit = cirq.Circuit(V_circuit for t in range(n_trotter))
# Convert to `tf.Tensor`
initial_U_tensor = tfq.convert_to_tensor([initial_U])
initial_U_splat = tf.tile(initial_U_tensor, [n_points])
full_circuits = tfq.layers.AddCircuit()(
initial_U_splat, append=exp_circuit)
# Replace placeholders in circuits with values from `classical_source`.
return tfq.resolve_parameters(
full_circuits, tf.convert_to_tensor([str(x) for x in symbols]),
tf.convert_to_tensor(classical_source*(n_qubits/3)/n_trotter))
```
Choose some qubits and prepare the data encoding circuits:
```python
qubits = cirq.GridQubit.rect(1, DATASET_DIM + 1)
q_x_train_circuits = prepare_pqk_circuits(qubits, x_train)
q_x_test_circuits = prepare_pqk_circuits(qubits, x_test)
```
Next, compute the PQK features based on the 1-RDM of the dataset circuits above and store the results in `rdm`, a `tf.Tensor` with shape `[n_points, n_qubits, 3]`. The entries in `rdm[i][j][k]` = $\langle \psi_i | OP^k_j | \psi_i \rangle$ where `i` indexes over datapoints, `j` indexes over qubits and `k` indexes over $\lbrace \hat{X}, \hat{Y}, \hat{Z} \rbrace$ .
```python
def get_pqk_features(qubits, data_batch):
"""Get PQK features based on above construction."""
ops = [[cirq.X(q), cirq.Y(q), cirq.Z(q)] for q in qubits]
ops_tensor = tf.expand_dims(tf.reshape(tfq.convert_to_tensor(ops), -1), 0)
batch_dim = tf.gather(tf.shape(data_batch), 0)
ops_splat = tf.tile(ops_tensor, [batch_dim, 1])
exp_vals = tfq.layers.Expectation()(data_batch, operators=ops_splat)
rdm = tf.reshape(exp_vals, [batch_dim, len(qubits), -1])
return rdm
```
```python
x_train_pqk = get_pqk_features(qubits, q_x_train_circuits)
x_test_pqk = get_pqk_features(qubits, q_x_test_circuits)
print('New PQK training dataset has shape:', x_train_pqk.shape)
print('New PQK testing dataset has shape:', x_test_pqk.shape)
```
New PQK training dataset has shape: (1000, 11, 3)
New PQK testing dataset has shape: (200, 11, 3)
### 2.2 Re-labeling based on PQK features
Now that you have these quantum generated features in `x_train_pqk` and `x_test_pqk`, it is time to re-label the dataset. To achieve maximum seperation between quantum and classical performance you can re-label the dataset based on the spectrum information found in `x_train_pqk` and `x_test_pqk`.
Note: This preparation of your dataset to explicitly maximize the seperation in performance between the classical and quantum models might feel like cheating, but it provides a **very** important proof of existance for datasets that are hard for classical computers and easy for quantum computers to model. There would be no point in searching for quantum advantage in QML if you couldn't first create something like this to demonstrate advantage.
```python
def compute_kernel_matrix(vecs, gamma):
"""Computes d[i][j] = e^ -gamma * (vecs[i] - vecs[j]) ** 2 """
scaled_gamma = gamma / (
tf.cast(tf.gather(tf.shape(vecs), 1), tf.float32) * tf.math.reduce_std(vecs))
return scaled_gamma * tf.einsum('ijk->ij',(vecs[:,None,:] - vecs) ** 2)
def get_spectrum(datapoints, gamma=1.0):
"""Compute the eigenvalues and eigenvectors of the kernel of datapoints."""
KC_qs = compute_kernel_matrix(datapoints, gamma)
S, V = tf.linalg.eigh(KC_qs)
S = tf.math.abs(S)
return S, V
```
```python
S_pqk, V_pqk = get_spectrum(
tf.reshape(tf.concat([x_train_pqk, x_test_pqk], 0), [-1, len(qubits) * 3]))
S_original, V_original = get_spectrum(
tf.cast(tf.concat([x_train, x_test], 0), tf.float32), gamma=0.005)
print('Eigenvectors of pqk kernel matrix:', V_pqk)
print('Eigenvectors of original kernel matrix:', V_original)
```
Eigenvectors of pqk kernel matrix: tf.Tensor(
[[ 0.02095697 0.01059745 0.02166322 ... 0.09526508 0.00300356
0.02826785]
[ 0.02293038 0.04663572 0.00791177 ... 0.00220983 -0.6957587
0.02859015]
[ 0.01778554 -0.0030075 -0.02552235 ... 0.02335721 0.00414519
0.02690097]
...
[-0.0605794 0.01324826 0.02695336 ... 0.00716051 0.03977184
0.03853431]
[-0.06333087 -0.00304116 0.00977427 ... -0.03250755 0.02224028
0.03674842]
[-0.05860277 0.00584422 0.00264832 ... -0.04459745 -0.01932838
0.03299437]], shape=(1200, 1200), dtype=float32)
Eigenvectors of original kernel matrix: tf.Tensor(
[[ 3.8356818e-02 2.8347293e-02 -1.1697864e-02 ... -4.0755421e-02
2.0624822e-02 3.2069720e-02]
[-4.0181600e-02 8.8809701e-03 -1.3882567e-02 ... -7.6112538e-03
7.1638334e-01 2.8819481e-02]
[-1.6671857e-02 1.3503703e-02 -3.6638588e-02 ... 4.2131193e-02
-3.7604037e-03 2.1954076e-02]
...
[-3.0156480e-02 -1.6716314e-02 -1.6033923e-02 ... 2.1481956e-03
-5.8309413e-03 2.3656894e-02]
[ 3.9776899e-03 -4.9988784e-02 -5.2833343e-03 ... -2.2350436e-02
-4.1845851e-02 2.7820019e-02]
[-1.6657291e-02 -8.1861708e-03 -4.3234091e-02 ... -3.2867838e-04
9.1463570e-03 1.8750878e-02]], shape=(1200, 1200), dtype=float32)
Now you have everything you need to re-label the dataset! Now you can consult with the flowchart to better understand how to maximize performance seperation when re-labeling the dataset:
In order to maximize the seperation between quantum and classical models, you will attempt to maximize the geometric difference between the original dataset and the PQK features kernel matrices $g(K_1 || K_2) = \sqrt{ || \sqrt{K_2} K_1^{-1} \sqrt{K_2} || _\infty}$ using `S_pqk, V_pqk` and `S_original, V_original`. A large value of $g$ ensures that you initially move to the right in the flowchart down towards a prediction advantage in the quantum case.
Note: Computing quantities for $s$ and $d$ are also very useful when looking to better understand performance seperations. In this case ensuring a large $g$ value is enough to see performance seperation.
```python
def get_stilted_dataset(S, V, S_2, V_2, lambdav=1.1):
"""Prepare new labels that maximize geometric distance between kernels."""
S_diag = tf.linalg.diag(S ** 0.5)
S_2_diag = tf.linalg.diag(S_2 / (S_2 + lambdav) ** 2)
scaling = S_diag @ tf.transpose(V) @ \
V_2 @ S_2_diag @ tf.transpose(V_2) @ \
V @ S_diag
# Generate new lables using the largest eigenvector.
_, vecs = tf.linalg.eig(scaling)
new_labels = tf.math.real(
tf.einsum('ij,j->i', tf.cast(V @ S_diag, tf.complex64), vecs[-1])).numpy()
# Create new labels and add some small amount of noise.
final_y = new_labels > np.median(new_labels)
noisy_y = (final_y ^ (np.random.uniform(size=final_y.shape) > 0.95))
return noisy_y
```
```python
y_relabel = get_stilted_dataset(S_pqk, V_pqk, S_original, V_original)
y_train_new, y_test_new = y_relabel[:N_TRAIN], y_relabel[N_TRAIN:]
```
## 3. Comparing models
Now that you have prepared your dataset it is time to compare model performance. You will create two small feedforward neural networks and compare performance when they are given access to the PQK features found in `x_train_pqk`.
### 3.1 Create PQK enhanced model
Using standard `tf.keras` library features you can now create and a train a model on the `x_train_pqk` and `y_train_new` datapoints:
```python
#docs_infra: no_execute
def create_pqk_model():
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(32, activation='sigmoid', input_shape=[len(qubits) * 3,]))
model.add(tf.keras.layers.Dense(16, activation='sigmoid'))
model.add(tf.keras.layers.Dense(1))
return model
pqk_model = create_pqk_model()
pqk_model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(learning_rate=0.003),
metrics=['accuracy'])
pqk_model.summary()
```
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense (Dense) (None, 32) 1088
dense_1 (Dense) (None, 16) 528
dense_2 (Dense) (None, 1) 17
=================================================================
Total params: 1,633
Trainable params: 1,633
Non-trainable params: 0
_________________________________________________________________
```python
#docs_infra: no_execute
pqk_history = pqk_model.fit(tf.reshape(x_train_pqk, [N_TRAIN, -1]),
y_train_new,
batch_size=32,
epochs=1000,
verbose=0,
validation_data=(tf.reshape(x_test_pqk, [N_TEST, -1]), y_test_new))
```
### 3.2 Create a classical model
Similar to the code above you can now also create a classical model that doesn't have access to the PQK features in your stilted dataset. This model can be trained using `x_train` and `y_label_new`.
```python
#docs_infra: no_execute
def create_fair_classical_model():
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(32, activation='sigmoid', input_shape=[DATASET_DIM,]))
model.add(tf.keras.layers.Dense(16, activation='sigmoid'))
model.add(tf.keras.layers.Dense(1))
return model
model = create_fair_classical_model()
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(learning_rate=0.03),
metrics=['accuracy'])
model.summary()
```
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_3 (Dense) (None, 32) 352
dense_4 (Dense) (None, 16) 528
dense_5 (Dense) (None, 1) 17
=================================================================
Total params: 897
Trainable params: 897
Non-trainable params: 0
_________________________________________________________________
```python
#docs_infra: no_execute
classical_history = model.fit(x_train,
y_train_new,
batch_size=32,
epochs=1000,
verbose=0,
validation_data=(x_test, y_test_new))
```
### 3.3 Compare performance
Now that you have trained the two models you can quickly plot the performance gaps in the validation data between the two. Typically both models will achieve > 0.9 accuaracy on the training data. However on the validation data it becomes clear that only the information found in the PQK features is enough to make the model generalize well to unseen instances.
```python
#docs_infra: no_execute
plt.figure(figsize=(10,5))
plt.plot(classical_history.history['accuracy'], label='accuracy_classical')
plt.plot(classical_history.history['val_accuracy'], label='val_accuracy_classical')
plt.plot(pqk_history.history['accuracy'], label='accuracy_quantum')
plt.plot(pqk_history.history['val_accuracy'], label='val_accuracy_quantum')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
```
Success: You have engineered a stilted quantum dataset that can intentionally defeat classical models in a fair (but contrived) setting. Try comparing results using other types of classical models. The next step is to try and see if you can find new and interesting datasets that can defeat classical models without needing to engineer them yourself!
## 4. Important conclusions
There are several important conclusions you can draw from this and the [MNIST](https://www.tensorflow.org/quantum/tutorials/mnist) experiments:
1. It's very unlikely that the quantum models of today will beat classical model performance on classical data. Especially on today's classical datasets that can have upwards of a million datapoints.
2. Just because the data might come from a hard to classically simulate quantum circuit, doesn't necessarily make the data hard to learn for a classical model.
3. Datasets (ultimately quantum in nature) that are easy for quantum models to learn and hard for classical models to learn do exist, regardless of model architecture or training algorithms used.
|
[STATEMENT]
lemma lm129:
assumes "runiq f" "x \<in> Domain f"
shows "(f Else 0) x = (toFunction f) x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (f Else (0::'b)) x = toFunction f x
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
runiq f
x \<in> Domain f
goal (1 subgoal):
1. (f Else (0::'b)) x = toFunction f x
[PROOF STEP]
by (metis Image_runiq_eq_eval toFunction_def) |
(* Title: HOL/Auth/n_germanish_on_inis.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_germanish Protocol Case Study*}
theory n_germanish_on_inis imports n_germanish_on_ini
begin
lemma on_inis:
assumes b1: "f \<in> (invariants N)" and b2: "ini \<in> {andList (allInitSpecs N)}" and b3: "formEval ini s"
shows "formEval f s"
proof -
have c1: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__1 p__Inv0 p__Inv2)\<or>
(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__2 p__Inv2)\<or>
(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__3 p__Inv0 p__Inv2)"
apply (cut_tac b1, simp) done
moreover {
assume d1: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__1 p__Inv0 p__Inv2)"
have "formEval f s"
apply (rule iniImply_inv__1)
apply (cut_tac d1, assumption)
apply (cut_tac b2 b3, blast) done
}
moreover {
assume d1: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__2 p__Inv2)"
have "formEval f s"
apply (rule iniImply_inv__2)
apply (cut_tac d1, assumption)
apply (cut_tac b2 b3, blast) done
}
moreover {
assume d1: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__3 p__Inv0 p__Inv2)"
have "formEval f s"
apply (rule iniImply_inv__3)
apply (cut_tac d1, assumption)
apply (cut_tac b2 b3, blast) done
}
ultimately show "formEval f s"
by satx
qed
end
|
-makelib xcelium_lib/xil_defaultlib -sv \
"C:/Xilinx/Vivado/2019.1/data/ip/xpm/xpm_cdc/hdl/xpm_cdc.sv" \
"C:/Xilinx/Vivado/2019.1/data/ip/xpm/xpm_memory/hdl/xpm_memory.sv" \
-endlib
-makelib xcelium_lib/xpm \
"C:/Xilinx/Vivado/2019.1/data/ip/xpm/xpm_VCOMP.vhd" \
-endlib
-makelib xcelium_lib/blk_mem_gen_v8_4_3 \
"../../../ipstatic/simulation/blk_mem_gen_v8_4.v" \
-endlib
-makelib xcelium_lib/xil_defaultlib \
"../../../../MIPSMulticiclo_R3yModoDepuracion.srcs/sources_1/ip/mem32x512/sim/mem32x512.v" \
-endlib
-makelib xcelium_lib/xil_defaultlib \
glbl.v
-endlib
|
library(XML)
filename <- "D:/DataMining/data-mining-course/httpd/httpd_log_simple.xml"
xmlFile <- xmlTreeParse(filename)
class(xmlFile)
xmlTop = xmlRoot(xmlFile)
# print(xmlTop)[1:2]
doc <- xmlParse(filename)
df_paths <- xmlToDataFrame(
getNodeSet(doc, "//path"),
colClasses=c("list"))
print(df_paths)
print(xmlTop[1])
xml_lapply <- xmlApply(xmlTop, function(x) xmlApply(x, xmlValue))
print(xml_lapply)
# xml_unlist <- unlist(xml_lapply$logentry$paths)
# print(xml_unlist)
print(xml_lapply[1]$logentry$paths)
class(xml_lapply[1]$logentry$paths)
xml_apply <- xmlSApply(xmlTop, function(x) xmlSApply(x, xmlValue))
print(xml_apply)
# df <- data.frame(t(xml_apply), row.names=NULL)
df2 <- data.frame(t(sapply(xml_apply, unlist)), row.names=NULL)
print(df)
|
/-
Copyright (c) 2019 Johan Commelin. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johan Commelin, Simon Hudon, Scott Morrison
-/
import control.bifunctor
import logic.equiv.basic
/-!
# Functor and bifunctors can be applied to `equiv`s.
We define
```lean
def functor.map_equiv (f : Type u → Type v) [functor f] [is_lawful_functor f] :
α ≃ β → f α ≃ f β
```
and
```lean
def bifunctor.map_equiv (F : Type u → Type v → Type w) [bifunctor F] [is_lawful_bifunctor F] :
α ≃ β → α' ≃ β' → F α α' ≃ F β β'
```
-/
universes u v w
variables {α β : Type u}
open equiv
namespace functor
variables (f : Type u → Type v) [functor f] [is_lawful_functor f]
/-- Apply a functor to an `equiv`. -/
def map_equiv (h : α ≃ β) : f α ≃ f β :=
{ to_fun := map h,
inv_fun := map h.symm,
left_inv := λ x, by simp [map_map],
right_inv := λ x, by simp [map_map] }
@[simp]
lemma map_equiv_apply (h : α ≃ β) (x : f α) :
(map_equiv f h : f α ≃ f β) x = map h x := rfl
@[simp]
lemma map_equiv_symm_apply (h : α ≃ β) (y : f β) :
(map_equiv f h : f α ≃ f β).symm y = map h.symm y := rfl
@[simp]
lemma map_equiv_refl : map_equiv f (equiv.refl α) = equiv.refl (f α) :=
begin
ext x,
simp only [map_equiv_apply, refl_apply],
exact is_lawful_functor.id_map x,
end
end functor
namespace bifunctor
variables {α' β' : Type v} (F : Type u → Type v → Type w) [bifunctor F] [is_lawful_bifunctor F]
/-- Apply a bifunctor to a pair of `equiv`s. -/
def map_equiv (h : α ≃ β) (h' : α' ≃ β') : F α α' ≃ F β β' :=
{ to_fun := bimap h h',
inv_fun := bimap h.symm h'.symm,
left_inv := λ x, by simp [bimap_bimap, id_bimap],
right_inv := λ x, by simp [bimap_bimap, id_bimap] }
@[simp]
lemma map_equiv_apply (h : α ≃ β) (h' : α' ≃ β') (x : F α α') :
(map_equiv F h h' : F α α' ≃ F β β') x = bimap h h' x := rfl
@[simp]
lemma map_equiv_symm_apply (h : α ≃ β) (h' : α' ≃ β') (y : F β β') :
(map_equiv F h h' : F α α' ≃ F β β').symm y = bimap h.symm h'.symm y := rfl
@[simp]
lemma map_equiv_refl_refl : map_equiv F (equiv.refl α) (equiv.refl α') = equiv.refl (F α α') :=
begin
ext x,
simp [id_bimap]
end
end bifunctor
|
#readelf: -r --wide
Relocation section '.rel.dyn' at offset 0x[0-9a-f]+ contains 1 entries:
Offset Info Type Sym. Value Symbol's Name
[0-9a-f]+ +[0-9a-f]+ +R_386_RELATIVE +
|
# coding: utf-8
# In[ ]:
import ensembles as en
import pandas as pd
import numpy as np
import xgboost as xgb
import category_encoders as ce
from sklearn import datasets, linear_model, preprocessing, grid_search
from sklearn.preprocessing import Imputer, PolynomialFeatures, StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.cross_validation import KFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import StratifiedKFold, KFold
from sklearn.preprocessing import OneHotEncoder
from sklearn.externals import joblib
from keras.layers import Dense, Activation, Dropout
from keras.models import Sequential
from keras.regularizers import l2, activity_l2
from sklearn.metrics import roc_auc_score, average_precision_score, f1_score, log_loss, accuracy_score, mean_absolute_error, mean_squared_error, r2_score
from sklearn.cross_validation import train_test_split
from joblib import Parallel, delayed
from sklearn.pipeline import Pipeline
from hyperopt import hp, fmin, tpe, STATUS_OK, Trials
from hyperas import optim
from hyperas.distributions import choice, uniform, conditional
from functools import partial
np.random.seed(1338)
# In[2]:
#Setting the parameters for the Gradient Boosting Model
# # Example 1
# In[3]:
#Default Values
param_gb = en.parameter_set_gradient_boosting(eval_metric = ['auc'], objective = ['binary:logistic'])
print(param_gb)
# # Example 2
# In[4]:
#Changing max_depth and eta
param_gb = en.parameter_set_gradient_boosting(eval_metric = ['auc'], objective = ['binary:logistic'], max_depth = [10], eta = [0.5])
print(param_gb)
# # Example 3
# In[5]:
#Hyper Parameter Optimisation (max_depth and eta)
param_gb = en.parameter_set_gradient_boosting(hyper_parameter_optimisation = True, eval_metric = ['auc'], objective = ['binary:logistic'], max_depth = [5, 10, 15], eta = [0.1, 0.3, 0.5])
print(param_gb)
# # Example 4
# In[6]:
#Hyper Parameter Optimisation (gamma and eta)
param_gb = en.parameter_set_gradient_boosting(hyper_parameter_optimisation = True, eval_metric = ['auc'], objective = ['binary:logistic'], gamma = [0, 1, 3, 5, 7], eta = [0.1, 0.3])
print(param_gb)
# # Example 5
# In[7]:
#Hyper Parameter Optimisation (gamma and eta)
param_gb = en.parameter_set_gradient_boosting(hyper_parameter_optimisation = True, eval_metric = ['auc'], objective = ['binary:logistic'], gamma = [0, 1, 3, 5, 7], eta = [0.1, 0.3], max_depth = [5, 10, 15], colsample_bylevel = [0.1])
print(param_gb)
|
lemma homeomorphism_compact: fixes f :: "'a::topological_space \<Rightarrow> 'b::t2_space" assumes "compact s" "continuous_on s f" "f ` s = t" "inj_on f s" shows "\<exists>g. homeomorphism s t f g" |
C *********************************************************
C * *
C * TEST NUMBER: 06.02.02/02 *
C * TEST TITLE : Validity of predefined view table *
C * *
C * PHIGS Validation Tests, produced by NIST *
C * *
C *********************************************************
COMMON /GLOBNU/ CTLHND, ERRSIG, ERRFIL, IERRCT, UNERR,
1 TESTCT, IFLERR, PASSSW, ERRSW, MAXLIN,
2 CONID, MEMUN, WKID, WTYPE, GLBLUN, INDLUN,
3 DUMINT, DUMRL
INTEGER CTLHND, ERRSIG, ERRFIL, IERRCT, UNERR,
1 TESTCT, IFLERR, PASSSW, ERRSW, MAXLIN,
2 CONID, MEMUN, WKID, WTYPE, GLBLUN, INDLUN,
3 DUMINT(20), ERRIND
REAL DUMRL(20)
COMMON /GLOBCH/ PIDENT, GLBERR, TSTMSG, FUNCID,
1 DUMCH
CHARACTER PIDENT*40, GLBERR*60, TSTMSG*900, FUNCID*80,
1 DUMCH(20)*20
C clipping indicator
C noclip clip
INTEGER PNCLIP, PCLIP
PARAMETER (PNCLIP = 0, PCLIP = 1)
INTEGER SPECON, SPECWT, NPVW, IVW, VCID(3)
REAL VOM(4,4), VMM(4,4), VCLM(6), IDENT(4,4), EXVCLM(6)
LOGICAL RAREQ
C expected values for default clipping limits
DATA EXVCLM /0.0, 1.0, 0.0, 1.0, 0.0, 1.0/
CALL INITGL ('06.02.02/02')
C open PHIGS
CALL XPOPPH (ERRFIL, MEMUN)
C open workstation
CALL POPWK (WKID, CONID, WTYPE)
CALL PQWKC (WKID, ERRIND, SPECON, SPECWT)
CALL CHKINQ ('pqwkc', ERRIND)
CALL SETMSG ('4 5', 'The reported number of predefined views ' //
1 'should be at least 6.')
C <Inquire view facilities> to determine
C npvw = number of predefined views
NPVW = -6
CALL PQVWF (SPECWT, ERRIND, NPVW)
CALL IFPF (ERRIND .EQ. 0 .AND. NPVW .GT. 5)
CALL SETMSG ('4 6', 'All the predefined view entries should ' //
1 'be accessible by <inquire predefined view ' //
2 'representation>.')
DO 100 IVW = 0, NPVW-1
CALL PQPVWR (SPECWT, IVW,
1 ERRIND, VOM, VMM, VCLM, VCID(1),VCID(2),VCID(3))
IF (ERRIND .NE. 0) THEN
CALL FAIL
GOTO 110
ENDIF
100 CONTINUE
CALL PASS
110 CONTINUE
C <inquire predefined view representation> on entry #0 to determine:
C vom(4,4) = orientation matrix
C vmm(4,4) = mapping matrix
C vclm(6) = view clipping limits
C vcid(3) = clipping indicators
VCLM(1) = -6.0
VCID(1) = -6
CALL PQPVWR (SPECWT, 0,
1 ERRIND, VOM, VMM, VCLM, VCID(1),VCID(2),VCID(3))
CALL CHKINQ ('pqpvwr', ERRIND)
CALL SETMSG ('6 7', 'The orientation and mapping matrices for ' //
1 'predefined view #0 should both be the identity ' //
2 'matrix.')
CALL IDMAT ( 4, IDENT)
CALL IFPF ( RAREQ(16, VOM, IDENT, 0.0, 0.0) .AND.
1 RAREQ(16, VMM, IDENT, 0.0, 0.0) )
CALL SETMSG ('6 7', 'The clipping limits for predefined view ' //
1 '#0 should be [0,1] in all three dimensions.')
CALL IFPF ( RAREQ(6, VCLM, EXVCLM, 0.0, 0.0))
CALL SETMSG ('6 7', 'All three clipping indicators for ' //
1 'predefined view #0 should be CLIP.')
CALL IFPF ( VCID(1) .EQ. PCLIP .AND.
1 VCID(2) .EQ. PCLIP .AND.
2 VCID(3) .EQ. PCLIP )
666 CONTINUE
CALL ENDIT
END
|
theory Queue
imports "../l4v/tools/autocorres/AutoCorres"
begin
text{*
Author: Andrew Gacek
Description: Correctness proofs for enqueue and dequeue operations on a circular queue.
*}
type_synonym 'a queue = "'a list \<times> nat \<times> nat"
fun wf :: "'a queue \<Rightarrow> bool" where
"wf (xs, f, n) = (0 < length xs \<and> f < length xs \<and> n \<le> length xs)"
fun is_empty :: "'a queue \<Rightarrow> bool" where
"is_empty (xs, f, n) = (n = 0)"
fun is_full :: "'a queue \<Rightarrow> bool" where
"is_full (xs, f, n) = (n = length xs)"
fun enqueue :: "'a queue \<Rightarrow> 'a \<Rightarrow> 'a queue" where
"enqueue (xs, f, n) x =
(if \<not>is_full (xs, f, n) then
(xs[(f + n) mod length xs := x], f, n + 1)
else
undefined)"
fun dequeue :: "'a queue \<Rightarrow> 'a \<times> 'a queue" where
"dequeue (xs, f, Suc n) = (xs!f, (xs, (f + 1) mod length xs, n))" |
"dequeue (xs, f, 0) = undefined"
fun rep :: "'a queue \<Rightarrow> 'a list" where
"rep (xs, f, n) = take n (rotate f xs)"
lemma enqueue_wf[intro, simp]:
"\<lbrakk> wf q; \<not>is_full q \<rbrakk> \<Longrightarrow> wf (enqueue q x)"
by (cases q, auto)
lemma dequeue_wf[intro, simp]:
"\<lbrakk> wf q; \<not>is_empty q \<rbrakk> \<Longrightarrow> wf (snd (dequeue q))"
by (cases q, auto simp: gr0_conv_Suc)
lemma nth_rotate1:
"i < length xs \<Longrightarrow> rotate1 xs ! i = (if Suc i < length xs then xs ! (Suc i) else xs ! 0)"
apply (cases "xs = []", simp)
apply (auto simp: rotate1_hd_tl nth_append nth_tl hd_conv_nth)
done
lemma nth_rotate1_mod:
"0 < length xs \<Longrightarrow> rotate1 xs ! (i mod length xs) = xs ! (Suc i mod length xs)"
by (metis hd_rotate_conv_nth length_greater_0_conv length_rotate1 rotate1_rotate_swap rotate_Suc)
lemma rotate1_Suc_update:
"rotate1 (xs[Suc n mod length xs := x]) = (rotate1 xs)[n mod length xs := x]"
apply (cases "xs = []", simp)
apply (auto simp: mod_Suc nth_list_update nth_rotate1 intro: nth_equalityI)
done
lemma take_rotate_update:
"n < length xs \<Longrightarrow> take n (rotate f (xs[(f + n) mod length xs := x])) = take n (rotate f xs)"
apply (induction f arbitrary: xs)
apply (auto simp: rotate1_rotate_swap rotate1_Suc_update)
apply (metis length_rotate1)
done
lemma nth_rotate:
"n < length xs \<Longrightarrow> (rotate f xs) ! n = xs ! ((f + n) mod length xs)"
apply (induction f arbitrary: xs)
apply (auto simp: rotate1_rotate_swap nth_rotate1_mod length_ineq_not_Nil)
done
lemma enqueue_rep:
"\<lbrakk> wf q; \<not>is_full q \<rbrakk> \<Longrightarrow> rep (enqueue q x) = rep q @ [x]"
by (cases q, auto simp: take_Suc_conv_app_nth take_rotate_update nth_rotate)
lemma dequeue_rep:
"\<lbrakk> wf q; \<not>is_empty q; dequeue q = (x, q') \<rbrakk> \<Longrightarrow> rep q = x # rep q'"
apply (cases q, simp)
apply (auto simp: gr0_conv_Suc take_Suc hd_rotate_conv_nth rotate_conv_mod[symmetric] rotate1_hd_tl)
done
install_C_file queue.c
autocorres[ts_rules = nondet, heap_abs_syntax] queue.c
context queue begin
definition is_queue :: "lifted_globals \<Rightarrow> bool" where
"is_queue s \<equiv> front_'' s < 10 \<and> length_'' s \<le> 10"
definition the_queue :: "lifted_globals \<Rightarrow> 8 word queue" where
"the_queue s \<equiv> (list_array (contents_'' s), unat (front_'' s), unat (length_'' s))"
fun queue_length :: "'a queue \<Rightarrow> nat" where
"queue_length (xs, f, n) = n"
lemma is_full_wp [wp]:
"\<lbrace> \<lambda>s. if queue_length (the_queue s) = 10 then Q 1 s else Q 0 s \<rbrace>
is_full'
\<lbrace> \<lambda>r s. Q r s \<rbrace>!"
apply (unfold is_full'_def)
apply wp
apply (auto simp: the_queue_def unat_arith_simps)
done
lemma enqueue_full:
"\<lbrace> \<lambda>s. is_queue s \<and> queue_length (the_queue s) = 10 \<and> P s \<rbrace>
enqueue' x
\<lbrace> \<lambda>r s. r = 0 \<and> P s \<rbrace>!"
apply (unfold enqueue'_def)
apply wp
apply (auto simp: is_queue_def the_queue_def unat_arith_simps)
done
lemma list_array_update:
"i < length (list_array a) \<Longrightarrow> list_array (Arrays.update a i x) = list_array a[i := x]"
by (auto simp add: list_array_def nth_list_update intro: nth_equalityI)
lemma enqueue_not_full:
"(\<And>c n s. P (s\<lparr>contents_'' := c, length_'' := n\<rparr>) = P s) \<Longrightarrow>
\<lbrace> \<lambda>s. is_queue s \<and>
q = the_queue s \<and>
queue_length q < 10 \<and>
P s \<rbrace>
enqueue' x
\<lbrace> \<lambda>r s. r > 0 \<and>
the_queue s = enqueue q x \<and>
is_queue s \<and>
P s \<rbrace>!"
apply (unfold enqueue'_def)
apply wp
apply (auto simp: is_queue_def the_queue_def list_array_update unat_arith_simps)
done
lemma is_empty_wp [wp]:
"\<lbrace> \<lambda>s. if queue_length (the_queue s) = 0 then Q 1 s else Q 0 s \<rbrace>
is_empty'
\<lbrace> \<lambda>r s. Q r s \<rbrace>!"
apply (unfold is_empty'_def)
apply wp
apply (auto simp: the_queue_def unat_arith_simps)
done
lemma dequeue_empty:
"\<lbrace> \<lambda>s. is_queue s \<and> queue_length (the_queue s) = 0 \<and> P s \<rbrace>
dequeue' x
\<lbrace> \<lambda>r s. r = 0 \<and> P s \<rbrace>!"
apply (unfold dequeue'_def)
apply wp
apply (auto simp: is_queue_def the_queue_def unat_arith_simps)
done
lemma dequeue_not_empty:
"(\<And>f n s. P (s\<lparr>front_'' := f, length_'' := n\<rparr>) = P s) \<Longrightarrow>
(\<And>s v. P (s[x := v]) = P s) \<Longrightarrow>
(\<And>s v. contents_'' (s[x := v]) = contents_'' s) \<Longrightarrow>
(\<And>s v. front_'' (s[x := v]) = front_'' s) \<Longrightarrow>
(\<And>s v. length_'' (s[x := v]) = length_'' s) \<Longrightarrow>
\<lbrace> \<lambda>s. is_queue s \<and>
q = the_queue s \<and>
queue_length q > 0 \<and>
is_valid_w8 s x \<and>
P s \<rbrace>
dequeue' x
\<lbrace> \<lambda>r s. r > 0 \<and>
dequeue q = (heap_w8 s x, the_queue s) \<and>
is_queue s \<and>
P s \<rbrace>!"
apply (unfold dequeue'_def)
apply wp
apply (auto simp add: fun_upd_def is_queue_def the_queue_def
gr0_conv_Suc list_array_nth unat_arith_simps
simp del: word_unat.Rep_inject[symmetric])
done
end
end |
/* -----------------------------------------------------------------------------
* Copyright 2021 Jonathan Haigh
* SPDX-License-Identifier: MIT
* ---------------------------------------------------------------------------*/
#ifndef SQ_INCLUDE_GUARD_system_schema_h_
#define SQ_INCLUDE_GUARD_system_schema_h_
#include "core/Primitive.h"
#include "core/typeutil.h"
#include <cstddef>
#include <gsl/gsl>
#include <optional>
#include <string_view>
namespace sq::system {
class SchemaImpl;
/**
* Represents the schema for a primitive type.
*/
class PrimitiveTypeSchema {
public:
constexpr explicit PrimitiveTypeSchema(std::string_view name,
std::string_view doc)
: name_{name}, doc_{doc} {}
SQ_ND std::string_view name() const;
SQ_ND std::string_view doc() const;
private:
std::string_view name_;
std::string_view doc_;
};
/**
* Represents the schema for a parameter of field of a system object.
*/
class ParamSchema {
public:
constexpr ParamSchema(std::string_view name, std::string_view doc,
std::size_t index, std::size_t type_index,
bool required, std::string_view default_value_str,
std::string_view default_value_doc)
: name_{name}, doc_{doc}, index_{index}, type_index_{type_index},
required_{required}, default_value_str_{default_value_str},
default_value_doc_{default_value_doc} {}
SQ_ND std::string_view name() const;
SQ_ND std::string_view doc() const;
SQ_ND std::size_t index() const;
SQ_ND const PrimitiveTypeSchema &type() const;
SQ_ND bool required() const;
SQ_ND std::optional<Primitive> default_value() const;
SQ_ND std::string_view default_value_doc() const;
private:
std::string_view name_;
std::string_view doc_;
std::size_t index_;
std::size_t type_index_;
bool required_;
std::string_view default_value_str_;
std::string_view default_value_doc_;
};
class TypeSchema;
/**
* Represents the schema for a field of a system object.
*/
class FieldSchema {
public:
constexpr FieldSchema(std::string_view name, std::string_view doc,
std::size_t params_begin_index,
std::size_t params_end_index,
std::size_t return_type_index, bool return_list,
bool null)
: name_{name}, doc_{doc}, params_begin_index_{params_begin_index},
params_end_index_{params_end_index},
return_type_index_{return_type_index},
return_list_{return_list}, null_{null} {}
SQ_ND std::string_view name() const;
SQ_ND std::string_view doc() const;
SQ_ND gsl::span<const ParamSchema> params() const;
SQ_ND const TypeSchema &return_type() const;
SQ_ND bool return_list() const;
SQ_ND bool null() const;
private:
std::string_view name_;
std::string_view doc_;
std::size_t params_begin_index_;
std::size_t params_end_index_;
std::size_t return_type_index_;
bool return_list_;
bool null_;
};
/**
* Represents the schema for a system object.
*/
class TypeSchema {
public:
constexpr TypeSchema(std::string_view name, std::string_view doc,
std::size_t fields_begin_index,
std::size_t fields_end_index)
: name_{name}, doc_{doc}, fields_begin_index_{fields_begin_index},
fields_end_index_{fields_end_index} {}
SQ_ND std::string_view name() const;
SQ_ND std::string_view doc() const;
SQ_ND gsl::span<const FieldSchema> fields() const;
private:
std::string_view name_;
std::string_view doc_;
std::size_t fields_begin_index_;
std::size_t fields_end_index_;
};
/**
* Represents the whole SQ schema.
*/
struct Schema {
public:
SQ_ND gsl::span<const TypeSchema> types() const;
SQ_ND gsl::span<const PrimitiveTypeSchema> primitive_types() const;
SQ_ND const TypeSchema &root_type() const;
};
/**
* Get the whole SQ schema.
*/
const Schema &schema();
} // namespace sq::system
#endif // SQ_INCLUDE_GUARD_system_schema_h_
|
<a href="https://colab.research.google.com/github/gcfer/reinforcement-learning/blob/main/RL_REINFORCE_TF2.ipynb" target="_parent"></a>
# Reinforcement Learning: REINFORCE (Policy Gradient Algorithm)
## Overview
In this notebook, we'll cover policy gradient algorithms, and we'll implement `REINFORCE`, the archetypal policy gradient algorithm. We'll test it by solving the cartpole problem in the Open AI gym.
```
import numpy as np
import pandas as pd
import datetime
# Import tensorflow
#!pip install tensorflow-gpu==1.14.0 > /dev/null 2>&1
import tensorflow as tf
import tensorflow.keras as K
print(tf.__version__)
# Check that tf sees the GPU
device_name = tf.test.gpu_device_name()
print(device_name)
# Import libraries for plotting
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.style.use('seaborn-pastel')
%matplotlib inline
%config InlineBackend.figure_format = 'retina' # this makes plot in high res
```
2.4.0
/device:GPU:0
Since we are in a remote notebook, we cannot display the progress of the environment in real time. Instead, we store the renderings and show a video at the end of the episode (refer to [this](https://star-ai.github.io/Rendering-OpenAi-Gym-in-Colaboratory/) guide in case you need it). The only advice that I can give is to import `gym` _after_ the update below.
```
#remove " > /dev/null 2>&1" to see what is going on under the hood
!pip install gym pyvirtualdisplay > /dev/null 2>&1
!apt-get install -y xvfb python-opengl ffmpeg > /dev/null 2>&1
```
```
# Maybe
# !apt-get update > /dev/null 2>&1
# !apt-get install cmake > /dev/null 2>&1
# !pip install --upgrade setuptools 2>&1
# !pip install ez_setup > /dev/null 2>&1
# !pip install gym[atari] > /dev/null 2>&1
```
```
# Open AI gym
import gym
from gym import logger as gymlogger
from gym.wrappers import Monitor
gymlogger.set_level(40) #error only
import math
import random
import glob
import io
import base64
from IPython.display import HTML
from IPython import display as ipythondisplay
```
```
from pyvirtualdisplay import Display
display = Display(visible=0, size=(2880, 1800))
display.start()
```
<pyvirtualdisplay.display.Display at 0x7f1f97063978>
The function below is needed to display the video. I slightly modified it from the original one (that you can in the guide I linked above) to avoid the infinite repetition loop of the video.
```
"""
Utility functions to enable video recording of gym environment and displaying it
To enable video, just do "env = wrap_env(env)""
"""
def show_video():
mp4list = glob.glob('video/*.mp4')
if len(mp4list) > 0:
mp4 = mp4list[0]
video = io.open(mp4, 'r+b').read()
encoded = base64.b64encode(video)
ipythondisplay.display(HTML(data=''''''.format(encoded.decode('ascii'))))
else:
print("Could not find video")
def wrap_env(env):
env = Monitor(env, './video', force=True)
return env
```
## OpenAI Gym Cartpole
The Cartpole problem is a discrete control problem where we try to keep the pole vertical by moving the cart below it.
Upon loading the environment, we launch a simulation where the agent chooses at random from the action sample space the next action. Finally, we show the video of the result. What happens is that the problem is considered unsolved (= game over) if the angle between pole and the line orthogonal to the cart axis is larger than a threshold. The parameter `done` specifies when the experiment is over.
```
# Load the environment and start
env = wrap_env(gym.make("CartPole-v0"))
```
```
observation = env.reset()
while True:
env.render()
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
break;
env.close()
```
```
show_video()
```
To better understand the inputs and outputs of the environment, let consider the action space and the observation space.
The action space is of type `Discrete(n)` where `n` is the number of actions. This is equivalent to the discrete set $\{ 0, 1, 2, \dotsc, n-1 \}$.
```
env.action_space
```
Discrete(2)
The observation space is of type `Box(n)`, which means that it is the Cartesian product of `n` intervals.
```
env.observation_space
```
Box(-3.4028234663852886e+38, 3.4028234663852886e+38, (4,), float32)
```
[env.observation_space.low, env.observation_space.high]
```
[array([-4.8000002e+00, -3.4028235e+38, -4.1887903e-01, -3.4028235e+38],
dtype=float32),
array([4.8000002e+00, 3.4028235e+38, 4.1887903e-01, 3.4028235e+38],
dtype=float32)]
When we make a step in the environment, the feedback that we get includes the observation:
```
#env = gym.make('CartPole-v0')
# env = Monitor(env, './video', force=True)
env.reset()
obs, r, done, _ = env.step(0)
print(obs)
```
[-0.0248302 -0.19857408 0.00899464 0.29775695]
## Policy Gradient Algorithms
Policy gradient algorithms seek to learn the policy via gradient descent, as the name suggests. They differ from value-based algorithms that instead learn the Q-function and derive the policy from there, and thus indirectly.
To be more precise, a policy gradient algorithm seeks to learn the probability of each action given the current state, i.e., the policy $\pi(s,a)$. Then we take the action by randomly sampling $\pi(s,a)$.
In order to fit $\pi(s,a)$, we need to define an objective function. Since the neural network can approximate “any” function, we shall keep the neural network find the parameters $\theta$ of the policy $\pi_\theta$ that maximizes the future expected discounted reward:
$$ \theta^* = \arg\max_\theta \mathbf{E}\!\left[\sum_{t=0}^T\gamma^t r(s_t,a_t)\right] $$
where the expectation is over trajectories $\tau=(s_0, a_0, r_0, s_1, a_1, r_1, \cdots, r_{T})$, $r_t:=r(s_t,a_t)$ is the reward that we get when we take action $a_t$ in state $s_t$, and $T$ is the length of the episode.
As a shorthand, define the future discounted reward
$$ R(\tau) =\sum_{t=0}^T\gamma^t r(s_t,a_t). $$
The expected value of $R$ is just the value function. To be extremely clear, it is very important to recognize that $R$ depends on the trajectory, but it does not depend on the weights of the policy. In fact, two different policies that suggest the same actions would get to the same value of $R$. In other words, $R$ depends on the actions, not on why or how those actions have been taken. This is very important in deriving the gradient with respect to $\theta$ below.
The trajectory follows a Markov decision process, i.e.
$$ p_\theta(\tau) = \prod_{t=0}^T p(s_{t}|a_{t-1},s_{t-1})\pi_\theta(a_t|s_t) $$
with the convention that $p(s_0|a_{-1}, s_{-1})=p_0(s_0)$ where $p_0$ is the density of the distribution of initial states. Putting all together:
$$ \theta^* = \arg\max_\theta \int \prod_{t=0}^T ds_t\, da_t \, \pi_\theta(a_t|s_t) p(s_{t}|a_{t-1}, s_{t-1}) \, R(\tau). $$
To find the maximum, we compute the gradient with respect to $\theta$ of the objective function above:
\begin{align} \nabla_\theta \mathbf{E}[R(\tau)]
& = \int d\tau \nabla p_\theta(\tau) R(\tau) \\ & = \int d\tau p_\theta(\tau) \nabla_\theta\!\log p_\theta(\tau) R(\tau) \\
& = \mathbf{E}[\nabla_\theta\!\log p_\theta(\tau) R(\tau)] \\
& = \mathbf{E}\!\left[\sum_{t=0}^T \nabla_\theta\!\log \pi_\theta(a_t|s_t) R(\tau)\right].
\end{align}
Notice that $R(\tau)$ multiplies the entire sum, that is,
$$ \nabla_\theta \mathbf{E}[R(\tau)] = \mathbf{E}\!\left[R(\tau)\sum_{t=0}^T \nabla_\theta\!\log \pi_\theta(a_t|s_t) \right]. $$
By integrating back in $\theta$, we get that an equivalent objective function to maximize is given by the expression above without the gradient, i.e.
$$ \theta^* = \arg\max_\theta \mathbf{E}\!\left[R(\tau)\sum_{t=0}^T \log \pi_\theta(a_t|s_t) \right]. $$
From a theoretical perspective, this is pretty much what a policy gradient algorithm does.
From a practical perspective, the integral is evaluated via the Monte Carlo method over several trajectories (episodes). That is, we sample a trajectory, optimize over it, and repeat this process several times.
The way it is implemented below is the following. We sample a trajectory with the previously available $\pi_\theta$. We save in memory all steps, actions, and rewards. We form the artificial distribution $\tilde{\pi}_t(a, a_t)=\delta_{a,a_t}$ where $\delta$ is the Kronecker delta. This is our “target distribution” since we expect, at convergence, that there is always a best action to take and that $\pi_\theta$ suggests such best action. To compare $\pi_\theta$ and $\tilde{\pi}_t$ we use the <font color='green'>categorical crossentropy</font>. The loss function is given by
\begin{align} L(\tau) & = R(\tau)\sum_{t=0}^T h(\tilde{\pi}_t, \pi_\theta) \\ & = R(\tau)\sum_{t=0}^T {\color{green}{\sum_{a\in\mathcal{A}} -\tilde{\pi}_t(a) \log (\pi_\theta(a))}} \\
& = -R(\tau)\sum_{t=0}^T \log (\pi_\theta(a_t)).
\end{align}
Minimizing $L(\tau)$ is equivalent to the maximization problem that we illustrated above.
To speed up the learning process, we provide all steps at once to the network, and compute a slightly different loss. Firstly, let us simplify the loss above via
$$ L(\tau) \approx -R(\tau) \log(\pi_\theta(a_0)).$$
Secondly, define the discounted future reward from $t'$ to $T$,
$$R(\tau_{t'}) := R(s_{t'}, a_{t'}, r_{t'}, \cdots, s_{T}, a_{T}, r_{T}),$$
for all $t'\in[0:T]$. Finally, we define a cumulative loss by summing up the loss at each step in the episode:
\begin{align} L(\tau) & \approx - \sum_{t=0}^T R(\tau_{t}) \log (\pi_\theta(a_t)).
\end{align}
```
# REINFORCE
class REINFORCE:
def __init__(self, state_size, action_size, gamma=None, max_steps=None):
# max_steps is the maximum number of batches [s, a, r, s_] or epochs remembered
# Parameters
self.state_size = state_size
self.action_size = action_size
self.memory = list()
if gamma is None:
self.gamma = 0.99
else:
self.gamma = gamma
if max_steps is None:
self.max_steps = 200
else:
self.max_steps = max_steps
self.lr = 0.005 # learning rates
# actor network
self.actor = self.build_actor()
def remember(self, s, a, r, s_, done):
self.memory.append([s, a, r, s_, done])
if len(self.memory) > self.max_steps: # if too long
self.memory.pop(0) # forget the oldest
def forget(self):
self.memory = list()
# actor learns the policy: input is state; output is distribution over actions (policy)
def build_actor(self, n_hidden_1=None, n_hidden_2=None):
if n_hidden_1 == None:
n_hidden_1 = 6 * self.state_size
if n_hidden_2 == None:
n_hidden_2 = 6 * self.state_size
model = K.Sequential()
model.add(K.layers.Dense(n_hidden_1, activation=tf.nn.elu, input_dim=self.state_size)) # input
model.add(K.layers.Dense(n_hidden_2, activation=tf.nn.elu))
model.add(K.layers.Dense(self.action_size, activation='softmax')) # output
# loss is categorical_crossentropy since pi_theta* (vector) should be equal to one-hot action (vector)
# because there is always a best action to be taken
#
model.compile(optimizer=K.optimizers.RMSprop(lr=self.lr), loss='categorical_crossentropy')
return model
# actor implements policy gradient
def policy(self, s):
policy = self.actor.predict(s, batch_size=1).flatten()
a = np.random.choice(self.action_size, 1, p=policy)[0]
return a
# learn from memory
def learn(self):
# replay the entire episode
# minibatch = random.sample(self.memory, batch_size)
s, a, r, s_, done = zip(*self.memory)
a = np.reshape(a, (-1, 1))
T = a.shape[0] # epochs in memory
a_one_hot = np.zeros((T, self.action_size))
a_one_hot[np.arange(T), a.reshape(-1)] = 1 # size: T x action_size
s = np.concatenate(s) # or np.vstack(s)
target_actor = a_one_hot # actions
# s_ = np.concatenate(s_)
R = np.zeros(T,)
R[T-1] = r[T-1]
for t in reversed(range(T-1)):
R[t] = self.gamma * R[t+1] + r[t]
R = R.reshape(-1, 1)
self.actor.fit(s, target_actor, sample_weight=R, epochs=1, verbose=0)
# self.actor.train_on_batch(s, target_actor, sample_weight=R)
# v = self.critic.predict(s)
# v_ = self.critic.predict(s_)
# target_actor[:, a] = r + (1-done) * self.gamma * v_ - v # advantage
```
## Training
```
seed = 0
np.random.seed(seed)
tf.random.set_seed(seed)
# Restart environment
# env = Monitor(env, './video', force=True)
MAX_REWARD = 200
env._max_episode_steps = MAX_REWARD
# Parameters
n_episodes = 300
winning_streak = 10 # after this number of successive successes, training stops
reward_history = np.zeros(n_episodes)
gamma = 0.99
steps_in_memory = 200 # number of steps to remember
A = np.arange(env.action_space.n)
dim_state_space = env.observation_space.shape[0]
# Start training
agent = REINFORCE(dim_state_space, env.action_space.n, gamma, steps_in_memory)
# init
s = env.reset()
s = np.reshape(s, [1, dim_state_space])
# for _ in range(n_steps_in_memory):
# a = agent.policy(s)
# s_, r, done, _ = env.step(a)
# s_ = np.reshape(s_, [1, dim_state_space])
# agent.remember(s, a, r, s_, done)
template = "\rEpisode: {:3d}/{:3d} | Reward: {:3.0f} | Duration: {:.2f} s"
```
```
for e in range(n_episodes):
start_time = datetime.datetime.now()
s = env.reset()
s = np.reshape(s, [1, dim_state_space])
done = False
cum_reward = 0
while not done:
a = agent.policy(s)
s_, r, done, _ = env.step(a)
s_ = np.reshape(s_, [1, dim_state_space])
agent.remember(s, a, r, s_, done)
cum_reward += r
s = s_
agent.learn()
agent.forget()
dt = datetime.datetime.now() - start_time
print(template.format(e+1, n_episodes, cum_reward, dt.total_seconds()), end='')
reward_history[e] = cum_reward
```
Episode: 300/300 | Reward: 200 | Duration: 4.90 s
```
plt.plot(reward_history[0:e], label='Reward')
plt.xlabel('Episodes')
plt.ylabel('Cumulative reward')
plt.tight_layout()
plt.show()
```
Empirical observations:
* it is well known that policy gradient algorithms are noisy, and indeed we can observe it very clearly in the
* increasing the learning rate accelerates learning (e.g. going from 0.001 to 0.01 more than halves the number of epochs required to reach the maximum score)
* we can modify $J_t$ by subtracting a “baseline” so as to reduce the variance of the results; one very important baseline is such that $J_t$ becomes the so-called advantage, which forms the basis for actor-critic algorithms
## Trying it
```
env = wrap_env(gym.make("CartPole-v0"))
s = env.reset()
s = np.reshape(s, [1, dim_state_space])
done = False
cum_reward = 0
while not done:
env.render()
a = agent.policy(s)
s_, r, done, _ = env.step(a)
s_ = np.reshape(s_, [1, dim_state_space])
agent.remember(s, a, r, s_, done)
cum_reward += r
s = s_
env.close()
print('We got a reward equal to {:.0f}'.format(cum_reward))
```
We got a reward equal to 200
```
show_video()
```
```
```
|
Formal statement is: proposition homotopic_with_compose_continuous_right: "\<lbrakk>homotopic_with_canon (\<lambda>f. p (f \<circ> h)) X Y f g; continuous_on W h; h ` W \<subseteq> X\<rbrakk> \<Longrightarrow> homotopic_with_canon p W Y (f \<circ> h) (g \<circ> h)" Informal statement is: If $f$ and $g$ are homotopic maps from $X$ to $Y$ and $h$ is a continuous map from $W$ to $X$ such that $h(W) \subseteq X$, then $f \circ h$ and $g \circ h$ are homotopic maps from $W$ to $Y$. |
proposition Schwarz_reflection: assumes "open S" and cnjs: "cnj ` S \<subseteq> S" and holf: "f holomorphic_on (S \<inter> {z. 0 < Im z})" and contf: "continuous_on (S \<inter> {z. 0 \<le> Im z}) f" and f: "\<And>z. \<lbrakk>z \<in> S; z \<in> \<real>\<rbrakk> \<Longrightarrow> (f z) \<in> \<real>" shows "(\<lambda>z. if 0 \<le> Im z then f z else cnj(f(cnj z))) holomorphic_on S" |
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D5_DimensionalityReduction/student/W1D5_Tutorial1.ipynb" target="_parent"></a>
```python
# Mount Google Drive
from google.colab import drive # import drive from google colab
ROOT = "/content/drive" # default location for the drive
print(ROOT) # print content of ROOT (Optional)
drive.mount(ROOT,force_remount=True)
```
/content/drive
Mounted at /content/drive
# Neuromatch Academy: Week 1, Day 5, Tutorial 1
# Dimensionality Reduction: Geometric view of data
__Content creators:__ Alex Cayco Gajic, John Murray
__Content reviewers:__ Roozbeh Farhoudi, Matt Krause, Spiros Chavlis, Richard Gao, Michael Waskom
---
# Tutorial Objectives
In this notebook we'll explore how multivariate data can be represented in different orthonormal bases. This will help us build intuition that will be helpful in understanding PCA in the following tutorial.
Overview:
- Generate correlated multivariate data.
- Define an arbitrary orthonormal basis.
- Project the data onto the new basis.
```python
# @title Video 1: Geometric view of data
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="THu9yHnpq9I", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
Video available at https://youtube.com/watch?v=THu9yHnpq9I
---
# Setup
```python
# Import
import numpy as np
import matplotlib.pyplot as plt
```
```python
# @title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
```
```python
# @title Helper functions
def get_data(cov_matrix):
"""
Returns a matrix of 1000 samples from a bivariate, zero-mean Gaussian.
Note that samples are sorted in ascending order for the first random variable
Args:
cov_matrix (numpy array of floats): desired covariance matrix
Returns:
(numpy array of floats) : samples from the bivariate Gaussian, with each
column corresponding to a different random
variable
"""
mean = np.array([0, 0])
X = np.random.multivariate_normal(mean, cov_matrix, size=1000)
indices_for_sorting = np.argsort(X[:, 0])
X = X[indices_for_sorting, :]
return X
def plot_data(X):
"""
Plots bivariate data. Includes a plot of each random variable, and a scatter
plot of their joint activity. The title indicates the sample correlation
calculated from the data.
Args:
X (numpy array of floats) : Data matrix each column corresponds to a
different random variable
Returns:
Nothing.
"""
fig = plt.figure(figsize=[8, 4])
gs = fig.add_gridspec(2, 2)
ax1 = fig.add_subplot(gs[0, 0])
ax1.plot(X[:, 0], color='k')
plt.ylabel('Neuron 1')
plt.title('Sample var 1: {:.1f}'.format(np.var(X[:, 0])))
ax1.set_xticklabels([])
ax2 = fig.add_subplot(gs[1, 0])
ax2.plot(X[:, 1], color='k')
plt.xlabel('Sample Number')
plt.ylabel('Neuron 2')
plt.title('Sample var 2: {:.1f}'.format(np.var(X[:, 1])))
ax3 = fig.add_subplot(gs[:, 1])
ax3.plot(X[:, 0], X[:, 1], '.', markerfacecolor=[.5, .5, .5],
markeredgewidth=0)
ax3.axis('equal')
plt.xlabel('Neuron 1 activity')
plt.ylabel('Neuron 2 activity')
plt.title('Sample corr: {:.1f}'.format(np.corrcoef(X[:, 0], X[:, 1])[0, 1]))
plt.show()
def plot_basis_vectors(X, W):
"""
Plots bivariate data as well as new basis vectors.
Args:
X (numpy array of floats) : Data matrix each column corresponds to a
different random variable
W (numpy array of floats) : Square matrix representing new orthonormal
basis each column represents a basis vector
Returns:
Nothing.
"""
plt.figure(figsize=[4, 4])
plt.plot(X[:, 0], X[:, 1], '.', color=[.5, .5, .5], label='Data')
plt.axis('equal')
plt.xlabel('Neuron 1 activity')
plt.ylabel('Neuron 2 activity')
plt.plot([0, W[0, 0]], [0, W[1, 0]], color='r', linewidth=3,
label='Basis vector 1')
plt.plot([0, W[0, 1]], [0, W[1, 1]], color='b', linewidth=3,
label='Basis vector 2')
plt.legend()
plt.show()
def plot_data_new_basis(Y):
"""
Plots bivariate data after transformation to new bases.
Similar to plot_data but with colors corresponding to projections onto
basis 1 (red) and basis 2 (blue). The title indicates the sample correlation
calculated from the data.
Note that samples are re-sorted in ascending order for the first
random variable.
Args:
Y (numpy array of floats): Data matrix in new basis each column
corresponds to a different random variable
Returns:
Nothing.
"""
fig = plt.figure(figsize=[8, 4])
gs = fig.add_gridspec(2, 2)
ax1 = fig.add_subplot(gs[0, 0])
ax1.plot(Y[:, 0], 'r')
plt.xlabel
plt.ylabel('Projection \n basis vector 1')
plt.title('Sample var 1: {:.1f}'.format(np.var(Y[:, 0])))
ax1.set_xticklabels([])
ax2 = fig.add_subplot(gs[1, 0])
ax2.plot(Y[:, 1], 'b')
plt.xlabel('Sample number')
plt.ylabel('Projection \n basis vector 2')
plt.title('Sample var 2: {:.1f}'.format(np.var(Y[:, 1])))
ax3 = fig.add_subplot(gs[:, 1])
ax3.plot(Y[:, 0], Y[:, 1], '.', color=[.5, .5, .5])
ax3.axis('equal')
plt.xlabel('Projection basis vector 1')
plt.ylabel('Projection basis vector 2')
plt.title('Sample corr: {:.1f}'.format(np.corrcoef(Y[:, 0], Y[:, 1])[0, 1]))
plt.show()
```
---
# Section 1: Generate correlated multivariate data
```python
# @title Video 2: Multivariate data
video = YouTubeVideo(id="jcTq2PgU5Vw", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
Video available at https://youtube.com/watch?v=jcTq2PgU5Vw
To gain intuition, we will first use a simple model to generate multivariate data. Specifically, we will draw random samples from a *bivariate normal distribution*. This is an extension of the one-dimensional normal distribution to two dimensions, in which each $x_i$ is marginally normal with mean $\mu_i$ and variance $\sigma_i^2$:
\begin{align}
x_i \sim \mathcal{N}(\mu_i,\sigma_i^2).
\end{align}
Additionally, the joint distribution for $x_1$ and $x_2$ has a specified correlation coefficient $\rho$. Recall that the correlation coefficient is a normalized version of the covariance, and ranges between -1 and +1:
\begin{align}
\rho = \frac{\text{cov}(x_1,x_2)}{\sqrt{\sigma_1^2 \sigma_2^2}}.
\end{align}
For simplicity, we will assume that the mean of each variable has already been subtracted, so that $\mu_i=0$. The remaining parameters can be summarized in the covariance matrix, which for two dimensions has the following form:
\begin{equation*}
{\bf \Sigma} =
\begin{pmatrix}
\text{var}(x_1) & \text{cov}(x_1,x_2) \\
\text{cov}(x_1,x_2) &\text{var}(x_2)
\end{pmatrix}.
\end{equation*}
In general, $\bf \Sigma$ is a symmetric matrix with the variances $\text{var}(x_i) = \sigma_i^2$ on the diagonal, and the covariances on the off-diagonal. Later, we will see that the covariance matrix plays a key role in PCA.
## Exercise 1: Draw samples from a distribution
We have provided code to draw random samples from a zero-mean bivariate normal distribution. Throughout this tutorial, we'll imagine these samples represent the activity (firing rates) of two recorded neurons on different trials. Fill in the function below to calculate the covariance matrix given the desired variances and correlation coefficient. The covariance can be found by rearranging the equation above:
\begin{align}
\text{cov}(x_1,x_2) = \rho \sqrt{\sigma_1^2 \sigma_2^2}.
\end{align}
Use these functions to generate and plot data while varying the parameters. You should get a feel for how changing the correlation coefficient affects the geometry of the simulated data.
**Steps**
* Fill in the function `calculate_cov_matrix` to calculate the desired covariance.
* Generate and plot the data for $\sigma_1^2 =1$, $\sigma_1^2 =1$, and $\rho = .8$. Try plotting the data for different values of the correlation coefficent: $\rho = -1, -.5, 0, .5, 1$.
```python
help(plot_data)
help(get_data)
```
Help on function plot_data in module __main__:
plot_data(X)
Plots bivariate data. Includes a plot of each random variable, and a scatter
plot of their joint activity. The title indicates the sample correlation
calculated from the data.
Args:
X (numpy array of floats) : Data matrix each column corresponds to a
different random variable
Returns:
Nothing.
Help on function get_data in module __main__:
get_data(cov_matrix)
Returns a matrix of 1000 samples from a bivariate, zero-mean Gaussian.
Note that samples are sorted in ascending order for the first random variable
Args:
cov_matrix (numpy array of floats): desired covariance matrix
Returns:
(numpy array of floats) : samples from the bivariate Gaussian, with each
column corresponding to a different random
variable
```python
def calculate_cov_matrix(var_1, var_2, corr_coef):
"""
Calculates the covariance matrix based on the variances and correlation
coefficient.
Args:
var_1 (scalar) : variance of the first random variable
var_2 (scalar) : variance of the second random variable
corr_coef (scalar) : correlation coefficient
Returns:
(numpy array of floats) : covariance matrix
"""
#################################################
## TODO for students: calculate the covariance matrix
# Fill out function and remove
#raise NotImplementedError("Student excercise: calculate the covariance matrix!")
#################################################
# Calculate the covariance from the variances and correlation
cov = corr_coef*np.sqrt([var_1*var_2])
cov_matrix = np.array([[var_1, cov], [cov, var_2]])
return cov_matrix
###################################################################
## TO DO for students: generate and plot bivariate Gaussian data with variances of 1
## and a correlation coefficients of: 0.8
## repeat while varying the correlation coefficient from -1 to 1
###################################################################
np.random.seed(2020) # set random seed
variance_1 = 1
variance_2 = 1
corr_coef = 0.8
# Uncomment to test your code and plot
cov_matrix = calculate_cov_matrix(variance_1, variance_2, corr_coef)
X = get_data(cov_matrix)
plot_data(X)
```
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial1_Solution_57497711.py)
*Example output:*
---
# Section 2: Define a new orthonormal basis
```python
# @title Video 3: Orthonormal bases
video = YouTubeVideo(id="PC1RZELnrIg", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
Video available at https://youtube.com/watch?v=PC1RZELnrIg
Next, we will define a new orthonormal basis of vectors ${\bf u} = [u_1,u_2]$ and ${\bf w} = [w_1,w_2]$. As we learned in the video, two vectors are orthonormal if:
1. They are orthogonal (i.e., their dot product is zero):
\begin{equation}
{\bf u\cdot w} = u_1 w_1 + u_2 w_2 = 0
\end{equation}
2. They have unit length:
\begin{equation}
||{\bf u} || = ||{\bf w} || = 1
\end{equation}
In two dimensions, it is easy to make an arbitrary orthonormal basis. All we need is a random vector ${\bf u}$, which we have normalized. If we now define the second basis vector to be ${\bf w} = [-u_2,u_1]$, we can check that both conditions are satisfied:
\begin{equation}
{\bf u\cdot w} = - u_1 u_2 + u_2 u_1 = 0
\end{equation}
and
\begin{equation}
{|| {\bf w} ||} = \sqrt{(-u_2)^2 + u_1^2} = \sqrt{u_1^2 + u_2^2} = 1,
\end{equation}
where we used the fact that ${\bf u}$ is normalized. So, with an arbitrary input vector, we can define an orthonormal basis, which we will write in matrix by stacking the basis vectors horizontally:
\begin{equation}
{{\bf W} } =
\begin{pmatrix}
u_1 & w_1 \\
u_2 & w_2
\end{pmatrix}.
\end{equation}
## Exercise 2: Find an orthonormal basis
In this exercise you will fill in the function below to define an orthonormal basis, given a single arbitrary 2-dimensional vector as an input.
**Steps**
* Modify the function `define_orthonormal_basis` to first normalize the first basis vector $\bf u$.
* Then complete the function by finding a basis vector $\bf w$ that is orthogonal to $\bf u$.
* Test the function using initial basis vector ${\bf u} = [3,1]$. Plot the resulting basis vectors on top of the data scatter plot using the function `plot_basis_vectors`. (For the data, use $\sigma_1^2 =1$, $\sigma_2^2 =1$, and $\rho = .8$).
```python
help(plot_basis_vectors)
```
Help on function plot_basis_vectors in module __main__:
plot_basis_vectors(X, W)
Plots bivariate data as well as new basis vectors.
Args:
X (numpy array of floats) : Data matrix each column corresponds to a
different random variable
W (numpy array of floats) : Square matrix representing new orthonormal
basis each column represents a basis vector
Returns:
Nothing.
```python
a=[1,2]
b = np.flip(a)
b[0] = -b[0]
print(b)
```
[-2 1]
```python
def define_orthonormal_basis(u):
"""
Calculates an orthonormal basis given an arbitrary vector u.
Args:
u (numpy array of floats) : arbitrary 2-dimensional vector used for new
basis
Returns:
(numpy array of floats) : new orthonormal basis
columns correspond to basis vectors
"""
#################################################
## TODO for students: calculate the orthonormal basis
# Fill out function and remove
#raise NotImplementedError("Student excercise: implement the orthonormal basis function")
#################################################
# normalize vector u
u = u/np.sqrt(np.transpose(u)@u)
# calculate vector w that is orthogonal to w
w = [-u[1],u[0]]
W = np.column_stack([u, w])
print(W)
return W
np.random.seed(2020) # set random seed
variance_1 = 1
variance_2 = 1
corr_coef = 0.8
cov_matrix = calculate_cov_matrix(variance_1, variance_2, corr_coef)
X = get_data(cov_matrix)
u = np.array([3, 1])
# Uncomment and run below to plot the basis vectors
W = define_orthonormal_basis(u)
plot_basis_vectors(X, W)
```
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial1_Solution_7a9640ef.py)
*Example output:*
---
# Section 3: Project data onto new basis
```python
# @title Video 4: Change of basis
video = YouTubeVideo(id="Mj6BRQPKKUc", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
Video available at https://youtube.com/watch?v=Mj6BRQPKKUc
Finally, we will express our data in the new basis that we have just found. Since $\bf W$ is orthonormal, we can project the data into our new basis using simple matrix multiplication :
\begin{equation}
{\bf Y = X W}.
\end{equation}
We will explore the geometry of the transformed data $\bf Y$ as we vary the choice of basis.
## Exercise 3: Define an orthonormal basis
In this exercise you will fill in the function below to define an orthonormal basis, given a single arbitrary vector as an input.
**Steps**
* Complete the function `change_of_basis` to project the data onto the new basis.
* Plot the projected data using the function `plot_data_new_basis`.
* What happens to the correlation coefficient in the new basis? Does it increase or decrease?
* What happens to variance?
```python
def change_of_basis(X, W):
"""
Projects data onto new basis W.
Args:
X (numpy array of floats) : Data matrix each column corresponding to a
different random variable
W (numpy array of floats) : new orthonormal basis columns correspond to
basis vectors
Returns:
(numpy array of floats) : Data matrix expressed in new basis
"""
#################################################
## TODO for students: project the data onto o new basis W
# Fill out function and remove
#raise NotImplementedError("Student excercise: implement change of basis")
#################################################
# project data onto new basis described by W
Y = X@W
return Y
# Unomment below to transform the data by projecting it into the new basis
Y = change_of_basis(X, W)
plot_data_new_basis(Y)
```
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial1_Solution_a1124bbc.py)
*Example output:*
## Interactive Demo: Play with the basis vectors
To see what happens to the correlation as we change the basis vectors, run the cell below. The parameter $\theta$ controls the angle of $\bf u$ in degrees. Use the slider to rotate the basis vectors.
```python
# @title
# @markdown Make sure you execute this cell to enable the widget!
def refresh(theta=0):
u = [1, np.tan(theta * np.pi / 180)]
W = define_orthonormal_basis(u)
Y = change_of_basis(X, W)
plot_basis_vectors(X, W)
plot_data_new_basis(Y)
_ = widgets.interact(refresh, theta=(0, 90, 5))
```
interactive(children=(IntSlider(value=0, description='theta', max=90, step=5), Output()), _dom_classes=('widge…
## Questions
* What happens to the projected data as you rotate the basis?
* How does the correlation coefficient change? How does the variance of the projection onto each basis vector change?
* Are you able to find a basis in which the projected data is **uncorrelated**?
---
# Summary
- In this tutorial, we learned that multivariate data can be visualized as a cloud of points in a high-dimensional vector space. The geometry of this cloud is shaped by the covariance matrix.
- Multivariate data can be represented in a new orthonormal basis using the dot product. These new basis vectors correspond to specific mixtures of the original variables - for example, in neuroscience, they could represent different ratios of activation across a population of neurons.
- The projected data (after transforming into the new basis) will generally have a different geometry from the original data. In particular, taking basis vectors that are aligned with the spread of cloud of points decorrelates the data.
* These concepts - covariance, projections, and orthonormal bases - are key for understanding PCA, which we be our focus in the next tutorial.
|
module Control.Monad.State
import public Control.Monad.Identity -- left here for compatibility
import public Control.Monad.Trans -- left here for compatibility
import public Control.Monad.State.Interface as Control.Monad.State
import public Control.Monad.State.State as Control.Monad.State
|
! { dg-do compile }
! PR 55593 - bogus error with generic subroutines
module foo
implicit none
interface sub
subroutine sub2(i)
integer, intent(in) :: i
end subroutine sub2
subroutine sub(i)
integer, dimension(:), intent(out) :: i
end subroutine sub
end interface sub
interface tub2
subroutine tub2(i)
integer, intent(in) :: i
end subroutine tub2
subroutine tub(i)
integer, dimension(:), intent(out) :: i
end subroutine tub
end interface tub2
interface func
integer function ifunc(i)
integer, intent(in) :: i
end function ifunc
integer function func(i)
integer, intent(in) :: i(:)
end function func
end interface func
interface igunc
integer function igunc(i)
integer, intent(in) :: i
end function igunc
integer function gunc(i)
integer, intent(in) :: i(:)
end function gunc
end interface igunc
end module foo
program main
use foo
implicit none
integer :: i
do i=1,10
call sub(i)
call tub2(i)
end do
do i=1,10
print *,func(i)
print *,igunc(i)
end do
do undeclared=1,10 ! { dg-error "has no IMPLICIT type" }
call sub(undeclared)
end do
end program main
|
Formal statement is: lemmas prime_dvd_power_int_iff = prime_dvd_power_iff[where ?'a = int] Informal statement is: For any prime $p$ and integers $a$ and $b$, $p$ divides $a^b$ if and only if $p$ divides $a$. |
State Before: F : Type u_1
inst✝ : Field F
x y : F
hxy : x ≠ y
⊢ degree (basisDivisor x y) = 1 State After: F : Type u_1
inst✝ : Field F
x y : F
hxy : x ≠ y
⊢ (x - y)⁻¹ ≠ 0 Tactic: rw [basisDivisor, degree_mul, degree_X_sub_C, degree_C, zero_add] State Before: F : Type u_1
inst✝ : Field F
x y : F
hxy : x ≠ y
⊢ (x - y)⁻¹ ≠ 0 State After: no goals Tactic: exact inv_ne_zero (sub_ne_zero_of_ne hxy) |
Formal statement is: lemma homotopy_eqv_contractible_sets: fixes S :: "'a::real_normed_vector set" and T :: "'b::real_normed_vector set" assumes "contractible S" "contractible T" "S = {} \<longleftrightarrow> T = {}" shows "S homotopy_eqv T" Informal statement is: If two sets are contractible, then they are homotopy equivalent. |
isobath_db = function(
p = NULL,
spatial_domain="canada.east.superhighres",
depths=c( 0, 10, 20, 50, 75, 100, 200, 250, 300, 350, 400, 450, 500, 550, 600, 700, 750, 800, 900,
1000, 1200, 1250, 1400, 1500, 1750, 2000, 2500, 3000, 4000, 5000 ),
DS="isobath",
project_to=projection_proj4string("lonlat_wgs84"),
data_dir=project.datadirectory( "aegis", "bathymetry" ),
aRange = 3 # # pixels to approx 1 SD ,
) {
#\\ create or return isobaths and coastlines/coast polygons
#\\ isobaths come from aggregated data (resolution of pres) which is then locally smoothed through a guassian kernal process
if (DS %in% c( "isobath", "isobath.redo" )) {
# behviour determmined by p. If passed then a p-specific Zsmooth is created otherwise use the highest resoltuion for the region
require (fields)
if ( spatial_domain %in% c( "canada.east.superhighres", "canada.east.highres", "canada.east", "SSE", "SSE.mpa" , "snowcrab" ) ) spatial_domain_input = "canada.east.superhighres"
isobaths = NULL
options( max.contour.segments=50000 )
depths = sort( unique( depths ) )
if ( is.null(p)) {
if ( DS == "isobath" ) {
fn.iso = file.path( data_dir, "isobaths", paste("isobaths", spatial_domain_input, "rdata", sep=".") ) # in case there is an alternate project
if (file.exists(fn.iso)) {
load(fn.iso)
isobaths = as( isobaths, "sf")
nn = row.names(isobaths)
if ( st_crs( isobaths ) != st_crs(project_to) ) isobaths = st_transform( isobaths, st_crs( project_to ) )
notfound = setdiff( as.character(depths), nn )
if (length( notfound) > 0 ) {
message( "matching isobaths not found, computing on the fly .. " )
Zsmoothed = attributes( isobaths)$Zsmoothed
x = seq(min(attributes( isobaths)$corners$plon), max(attributes( isobaths)$corners$plon), by=attributes( isobaths)$pres)
y = seq(min(attributes( isobaths)$corners$plat), max(attributes( isobaths)$corners$plat), by=attributes( isobaths)$pres)
cl = contourLines( x=x, y=y, Zsmoothed$z, levels=depths )
iso_crs = attributes( isobaths)$proj4string_planar
isobaths = maptools::ContourLines2SLDF(cl, proj4string=sp::CRS(iso_crs) )
isobaths = as( isobaths, "sf")
st_crs(isobaths) = st_crs( iso_crs )
isobaths = st_transform( isobaths, st_crs(projection_proj4string("lonlat_wgs84")) ) ## longlat as storage format
row.names(isobaths) = as.character(isobaths$level)
}
return( isobaths )
}
}
}
# here is redoing or p is passed and a lower (alt) resolution, p-specific isobath is desired
if (is.null(p)) p = aegis.bathymetry::bathymetry_parameters( spatial_domain=spatial_domain_input )
fn.iso = file.path( data_dir, "isobaths", paste("isobaths", spatial_domain, "rdata", sep=".") ) # in case there is an alternate project
Z = bathymetry_db( p=p, DS="aggregated_data" )
Zi = array_map( "xy->2", Z[, c("plon", "plat")], gridparams=p$gridparams )
# remove raw data outside of the bounding box
good = which( Zi[,1] >= 1 & Zi[,1] <= p$nplons & Zi[,2] >= 1 & Zi[,2] <= p$nplats )
Zi = Zi[good,]
Z = Z[good,]
Zmatrix = matrix(NA, nrow=p$nplons, ncol=p$nplats )
Zmatrix[Zi] = Z$z.mean
Zsmoothed = image.smooth( Zmatrix, aRange=aRange )
x = seq(min(p$corners$plon), max(p$corners$plon), by=p$pres)
y = seq(min(p$corners$plat), max(p$corners$plat), by=p$pres)
cl = contourLines( x=x, y=y, Zsmoothed$z, levels=depths )
isobaths = maptools::ContourLines2SLDF(cl, proj4string=sp::CRS( p$aegis_proj4string_planar_km ) )
isobaths = as( isobaths, "sf")
st_crs(isobaths) = st_crs( p$aegis_proj4string_planar_km )
isobaths = st_transform( isobaths, st_crs(projection_proj4string("lonlat_wgs84")) ) ## longlat as storage format
row.names(isobaths) = as.character(isobaths$level)
attr( isobaths, "Zsmoothed" ) = Zsmoothed
attr( isobaths, "aRange" ) = aRange
attr( isobaths, "corners" ) = p$corners
attr( isobaths, "pres" ) = p$pres
attr( isobaths, "proj4string_planar" ) = p$aegis_proj4string_planar_km
attr( isobaths, "proj4string_lonlat" ) = projection_proj4string("lonlat_wgs84")
save( isobaths, file=fn.iso, compress=TRUE)
if ( ! st_crs( isobaths ) == st_crs( project_to) ) isobaths = st_transform( isobaths, st_crs( project_to ) )
return( isobaths )
}
# ------------------------
if (DS %in% c( "coastLine", "coastLine.redo")) {
#\\ synomym for coastline_db ... left for historical compatibility .. deprecated
if (DS=="coastline") return( coastline_db( project_to = project_to ) )
}
# ------------------------
if (DS %in% c("coastPolygon", "coastPolygon.redo") ) {
#\\ synomym for coastline_db ... left for historical compatibility .. deprecated
if (DS=="coastPolygon") return( coastline_db( project_to = project_to ) )
}
}
|
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE DeriveDataTypeable, DeriveGeneric #-}
-- |
-- Module : Statistics.Distribution.ChiSquared
-- Copyright : (c) 2010 Alexey Khudyakov
-- License : BSD3
--
-- Maintainer : [email protected]
-- Stability : experimental
-- Portability : portable
--
-- The chi-squared distribution. This is a continuous probability
-- distribution of sum of squares of k independent standard normal
-- distributions. It's commonly used in statistical tests
module Statistics.Distribution.ChiSquared (
ChiSquared
, chiSquaredNDF
-- * Constructors
, chiSquared
, chiSquaredE
) where
import Control.Applicative
import Data.Data (Data, Typeable)
import GHC.Generics (Generic)
import Numeric.SpecFunctions ( incompleteGamma,invIncompleteGamma,logGamma,digamma)
import Numeric.MathFunctions.Constants (m_neg_inf)
import qualified System.Random.MWC.Distributions as MWC
import qualified Statistics.Distribution as D
import Statistics.Internal
-- | Chi-squared distribution
newtype ChiSquared = ChiSquared
{ chiSquaredNDF :: Int
-- ^ Get number of degrees of freedom
}
deriving (Eq, Typeable, Data, Generic)
instance Show ChiSquared where
showsPrec i (ChiSquared n) = defaultShow1 "chiSquared" n i
instance Read ChiSquared where
readPrec = defaultReadPrecM1 "chiSquared" chiSquaredE
-- | Construct chi-squared distribution. Number of degrees of freedom
-- must be positive.
chiSquared :: Int -> ChiSquared
chiSquared n = maybe (error $ errMsg n) id $ chiSquaredE n
-- | Construct chi-squared distribution. Number of degrees of freedom
-- must be positive.
chiSquaredE :: Int -> Maybe ChiSquared
chiSquaredE n
| n <= 0 = Nothing
| otherwise = Just (ChiSquared n)
errMsg :: Int -> String
errMsg n = "Statistics.Distribution.ChiSquared.chiSquared: N.D.F. must be positive. Got " ++ show n
instance D.Distribution ChiSquared where
cumulative = cumulative
instance D.ContDistr ChiSquared where
density chi x
| x <= 0 = 0
| otherwise = exp $ log x * (ndf2 - 1) - x2 - logGamma ndf2 - log 2 * ndf2
where
ndf = fromIntegral $ chiSquaredNDF chi
ndf2 = ndf/2
x2 = x/2
logDensity chi x
| x <= 0 = m_neg_inf
| otherwise = log x * (ndf2 - 1) - x2 - logGamma ndf2 - log 2 * ndf2
where
ndf = fromIntegral $ chiSquaredNDF chi
ndf2 = ndf/2
x2 = x/2
quantile = quantile
instance D.Mean ChiSquared where
mean (ChiSquared ndf) = fromIntegral ndf
instance D.Variance ChiSquared where
variance (ChiSquared ndf) = fromIntegral (2*ndf)
instance D.MaybeMean ChiSquared where
maybeMean = Just . D.mean
instance D.MaybeVariance ChiSquared where
maybeStdDev = Just . D.stdDev
maybeVariance = Just . D.variance
instance D.Entropy ChiSquared where
entropy (ChiSquared ndf) =
let kHalf = 0.5 * fromIntegral ndf in
kHalf
+ log 2
+ logGamma kHalf
+ (1-kHalf) * digamma kHalf
instance D.MaybeEntropy ChiSquared where
maybeEntropy = Just . D.entropy
instance D.ContGen ChiSquared where
genContVar (ChiSquared n) = MWC.chiSquare n
cumulative :: ChiSquared -> Double -> Double
cumulative chi x
| x <= 0 = 0
| otherwise = incompleteGamma (ndf/2) (x/2)
where
ndf = fromIntegral $ chiSquaredNDF chi
quantile :: ChiSquared -> Double -> Double
quantile (ChiSquared ndf) p
| p == 0 = 0
| p == 1 = 1/0
| p > 0 && p < 1 = 2 * invIncompleteGamma (fromIntegral ndf / 2) p
| otherwise =
error $ "Statistics.Distribution.ChiSquared.quantile: p must be in [0,1] range. Got: "++show p
|
State Before: α : Type ?u.219951
β : Type ?u.219954
γ : Type ?u.219957
r : α → α → Prop
s : β → β → Prop
t : γ → γ → Prop
o : Ordinal
⊢ card (type (?m.220055 o)) = card o State After: no goals Tactic: rw [Ordinal.type_lt] |
||| Environments.
|||
||| Module : Environment.idr
||| Copyright : (c) Jan de Muijnck-Hughes
||| License : see LICENSE
|||
module Toolkit.DeBruijn.Environment
import Decidable.Equality
import Data.DPair
import Toolkit.Decidable.Informative
import Toolkit.Data.List.AtIndex
import Toolkit.Data.DList
import Toolkit.Data.DList.AtIndex
import Toolkit.DeBruijn.Context.Item
import Toolkit.DeBruijn.Context
import Toolkit.DeBruijn.Renaming
%default total
||| Sometimes it is bettern to think that we have this thing called an
||| environment and not a `DList`.
|||
||| @t The Type for Types in our environment.
||| @obj How we interpret the types in our DSL. Either this is a
||| dependent type or a function that computes a type.
||| @ctxt The typing context.
public export
Env : (t : Type) -> (obj : t -> Type) -> (ctxt : List t) -> Type
Env = DList
||| Add an object to our execution environment.
||| @env The typing environment.
export
extend : {t : ty}
-> (env : Env ty e ctxt)
-> (obj : e t)
-> Env ty e (t::ctxt)
extend env obj = obj :: env
namespace Elem
||| Read an object from our typing environment.
|||
||| @idx Which object.
||| @env The execution environment.
export
read : (idx : Elem t ctxt)
-> (env : Env ty e ctxt)
-> e t
read Here (obj::store) = obj
read (There x) (obj::store) = read x store
||| Add an object to our execution environment.
|||
||| @idx Where the object is.
||| @obj The new object.
||| @env The environment to which the object is added.
export
update : (idx : Elem t ctxt)
-> (obj : e t)
-> (env : Env ty e ctxt)
-> Env ty e ctxt
update Here obj (_ :: store) = obj :: store
update (There x) obj (obj' :: store) = obj' :: update x obj store
namespace IsVar
||| Read an object from our typing environment.
|||
||| @idx Which object.
||| @env The execution environment.
export
read : (idx : IsVar ctxt t)
-> (env : Env ty e ctxt)
-> e t
read (V 0 Here) (elem :: rest)
= elem
read (V (S idx) (There later)) (elem :: rest)
= read (V idx later) rest
||| Add an object to our execution environment.
|||
||| @idx Where the object is.
||| @obj The new object.
||| @env The environment to which the object is added.
export
update : (idx : IsVar ctxt t)
-> (obj : e t)
-> (env : Env ty e ctxt)
-> Env ty e ctxt
update (V 0 Here) obj (elem :: rest)
= obj :: rest
update (V (S k) (There later)) obj (elem :: rest)
= elem :: update (V k later) obj rest
-- [ EOF ]
|
lemma cone_hull_eq: "cone hull S = S \<longleftrightarrow> cone S" |
/-
Copyright (c) 2021 Aaron Anderson, Jesse Michael Han, Floris van Doorn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Aaron Anderson, Jesse Michael Han, Floris van Doorn
-/
import data.list.prod_sigma
import data.set.prod
import logic.equiv.fin
import model_theory.language_map
/-!
# Basics on First-Order Syntax
This file defines first-order terms, formulas, sentences, and theories in a style inspired by the
[Flypitch project](https://flypitch.github.io/).
## Main Definitions
* A `first_order.language.term` is defined so that `L.term α` is the type of `L`-terms with free
variables indexed by `α`.
* A `first_order.language.formula` is defined so that `L.formula α` is the type of `L`-formulas with
free variables indexed by `α`.
* A `first_order.language.sentence` is a formula with no free variables.
* A `first_order.language.Theory` is a set of sentences.
* The variables of terms and formulas can be relabelled with `first_order.language.term.relabel`,
`first_order.language.bounded_formula.relabel`, and `first_order.language.formula.relabel`.
* Given an operation on terms and an operation on relations,
`first_order.language.bounded_formula.map_term_rel` gives an operation on formulas.
* `first_order.language.bounded_formula.cast_le` adds more `fin`-indexed variables.
* `first_order.language.bounded_formula.lift_at` raises the indexes of the `fin`-indexed variables
above a particular index.
* `first_order.language.term.subst` and `first_order.language.bounded_formula.subst` substitute
variables with given terms.
* Language maps can act on syntactic objects with functions such as
`first_order.language.Lhom.on_formula`.
## Implementation Notes
* Formulas use a modified version of de Bruijn variables. Specifically, a `L.bounded_formula α n`
is a formula with some variables indexed by a type `α`, which cannot be quantified over, and some
indexed by `fin n`, which can. For any `φ : L.bounded_formula α (n + 1)`, we define the formula
`∀' φ : L.bounded_formula α n` by universally quantifying over the variable indexed by
`n : fin (n + 1)`.
## References
For the Flypitch project:
- [J. Han, F. van Doorn, *A formal proof of the independence of the continuum hypothesis*]
[flypitch_cpp]
- [J. Han, F. van Doorn, *A formalization of forcing and the unprovability of
the continuum hypothesis*][flypitch_itp]
-/
universes u v w u' v'
namespace first_order
namespace language
variables (L : language.{u v}) {L' : language}
variables {M : Type w} {N P : Type*} [L.Structure M] [L.Structure N] [L.Structure P]
variables {α : Type u'} {β : Type v'} {γ : Type*}
open_locale first_order
open Structure fin
/-- A term on `α` is either a variable indexed by an element of `α`
or a function symbol applied to simpler terms. -/
inductive term (α : Type u') : Type (max u u')
| var {} : ∀ (a : α), term
| func {} : ∀ {l : ℕ} (f : L.functions l) (ts : fin l → term), term
export term
variable {L}
namespace term
open finset
/-- The `finset` of variables used in a given term. -/
@[simp] def var_finset [decidable_eq α] : L.term α → finset α
| (var i) := {i}
| (func f ts) := univ.bUnion (λ i, (ts i).var_finset)
/-- The `finset` of variables from the left side of a sum used in a given term. -/
@[simp] def var_finset_left [decidable_eq α] : L.term (α ⊕ β) → finset α
| (var (sum.inl i)) := {i}
| (var (sum.inr i)) := ∅
| (func f ts) := univ.bUnion (λ i, (ts i).var_finset_left)
/-- Relabels a term's variables along a particular function. -/
@[simp] def relabel (g : α → β) : L.term α → L.term β
| (var i) := var (g i)
| (func f ts) := func f (λ i, (ts i).relabel)
lemma relabel_id (t : L.term α) :
t.relabel id = t :=
begin
induction t with _ _ _ _ ih,
{ refl, },
{ simp [ih] },
end
@[simp] lemma relabel_id_eq_id :
(term.relabel id : L.term α → L.term α) = id :=
funext relabel_id
@[simp] lemma relabel_relabel (f : α → β) (g : β → γ) (t : L.term α) :
(t.relabel f).relabel g = t.relabel (g ∘ f) :=
begin
induction t with _ _ _ _ ih,
{ refl, },
{ simp [ih] },
end
@[simp] lemma relabel_comp_relabel (f : α → β) (g : β → γ) :
(term.relabel g ∘ term.relabel f : L.term α → L.term γ) = term.relabel (g ∘ f) :=
funext (relabel_relabel f g)
/-- Restricts a term to use only a set of the given variables. -/
def restrict_var [decidable_eq α] : Π (t : L.term α) (f : t.var_finset → β), L.term β
| (var a) f := var (f ⟨a, mem_singleton_self a⟩)
| (func F ts) f := func F (λ i, (ts i).restrict_var
(f ∘ set.inclusion (subset_bUnion_of_mem _ (mem_univ i))))
/-- Restricts a term to use only a set of the given variables on the left side of a sum. -/
def restrict_var_left [decidable_eq α] {γ : Type*} :
Π (t : L.term (α ⊕ γ)) (f : t.var_finset_left → β), L.term (β ⊕ γ)
| (var (sum.inl a)) f := var (sum.inl (f ⟨a, mem_singleton_self a⟩))
| (var (sum.inr a)) f := var (sum.inr a)
| (func F ts) f := func F (λ i, (ts i).restrict_var_left
(f ∘ set.inclusion (subset_bUnion_of_mem _ (mem_univ i))))
end term
/-- The representation of a constant symbol as a term. -/
def constants.term (c : L.constants) : (L.term α) :=
func c default
/-- Applies a unary function to a term. -/
def functions.apply₁ (f : L.functions 1) (t : L.term α) : L.term α := func f ![t]
/-- Applies a binary function to two terms. -/
def functions.apply₂ (f : L.functions 2) (t₁ t₂ : L.term α) : L.term α := func f ![t₁, t₂]
namespace term
instance inhabited_of_var [inhabited α] : inhabited (L.term α) :=
⟨var default⟩
instance inhabited_of_constant [inhabited L.constants] : inhabited (L.term α) :=
⟨(default : L.constants).term⟩
/-- Raises all of the `fin`-indexed variables of a term greater than or equal to `m` by `n'`. -/
def lift_at {n : ℕ} (n' m : ℕ) : L.term (α ⊕ fin n) → L.term (α ⊕ fin (n + n')) :=
relabel (sum.map id (λ i, if ↑i < m then fin.cast_add n' i else fin.add_nat n' i))
/-- Substitutes the variables in a given term with terms. -/
@[simp] def subst : L.term α → (α → L.term β) → L.term β
| (var a) tf := tf a
| (func f ts) tf := (func f (λ i, (ts i).subst tf))
end term
localized "prefix `&`:max := first_order.language.term.var ∘ sum.inr" in first_order
namespace Lhom
/-- Maps a term's symbols along a language map. -/
@[simp] def on_term (φ : L →ᴸ L') : L.term α → L'.term α
| (var i) := var i
| (func f ts) := func (φ.on_function f) (λ i, on_term (ts i))
@[simp] lemma id_on_term :
((Lhom.id L).on_term : L.term α → L.term α) = id :=
begin
ext t,
induction t with _ _ _ _ ih,
{ refl },
{ simp_rw [on_term, ih],
refl, },
end
@[simp] lemma comp_on_term {L'' : language} (φ : L' →ᴸ L'') (ψ : L →ᴸ L') :
((φ.comp ψ).on_term : L.term α → L''.term α) = φ.on_term ∘ ψ.on_term :=
begin
ext t,
induction t with _ _ _ _ ih,
{ refl },
{ simp_rw [on_term, ih],
refl, },
end
end Lhom
/-- Maps a term's symbols along a language equivalence. -/
@[simps] def Lequiv.on_term (φ : L ≃ᴸ L') : L.term α ≃ L'.term α :=
{ to_fun := φ.to_Lhom.on_term,
inv_fun := φ.inv_Lhom.on_term,
left_inv := by rw [function.left_inverse_iff_comp, ← Lhom.comp_on_term, φ.left_inv,
Lhom.id_on_term],
right_inv := by rw [function.right_inverse_iff_comp, ← Lhom.comp_on_term, φ.right_inv,
Lhom.id_on_term] }
variables (L) (α)
/-- `bounded_formula α n` is the type of formulas with free variables indexed by `α` and up to `n`
additional free variables. -/
inductive bounded_formula : ℕ → Type (max u v u')
| falsum {} {n} : bounded_formula n
| equal {n} (t₁ t₂ : L.term (α ⊕ fin n)) : bounded_formula n
| rel {n l : ℕ} (R : L.relations l) (ts : fin l → L.term (α ⊕ fin n)) : bounded_formula n
| imp {n} (f₁ f₂ : bounded_formula n) : bounded_formula n
| all {n} (f : bounded_formula (n+1)) : bounded_formula n
/-- `formula α` is the type of formulas with all free variables indexed by `α`. -/
@[reducible] def formula := L.bounded_formula α 0
/-- A sentence is a formula with no free variables. -/
@[reducible] def sentence := L.formula empty
/-- A theory is a set of sentences. -/
@[reducible] def Theory := set L.sentence
variables {L} {α} {n : ℕ}
/-- Applies a relation to terms as a bounded formula. -/
def relations.bounded_formula {l : ℕ} (R : L.relations n) (ts : fin n → L.term (α ⊕ fin l)) :
L.bounded_formula α l := bounded_formula.rel R ts
/-- Applies a unary relation to a term as a bounded formula. -/
def relations.bounded_formula₁ (r : L.relations 1) (t : L.term (α ⊕ fin n)) :
L.bounded_formula α n :=
r.bounded_formula ![t]
/-- Applies a binary relation to two terms as a bounded formula. -/
def relations.bounded_formula₂ (r : L.relations 2) (t₁ t₂ : L.term (α ⊕ fin n)) :
L.bounded_formula α n :=
r.bounded_formula ![t₁, t₂]
/-- The equality of two terms as a bounded formula. -/
def term.bd_equal (t₁ t₂ : L.term (α ⊕ fin n)) : (L.bounded_formula α n) :=
bounded_formula.equal t₁ t₂
/-- Applies a relation to terms as a bounded formula. -/
def relations.formula (R : L.relations n) (ts : fin n → L.term α) :
L.formula α := R.bounded_formula (λ i, (ts i).relabel sum.inl)
/-- Applies a unary relation to a term as a formula. -/
def relations.formula₁ (r : L.relations 1) (t : L.term α) :
L.formula α :=
r.formula ![t]
/-- Applies a binary relation to two terms as a formula. -/
def relations.formula₂ (r : L.relations 2) (t₁ t₂ : L.term α) :
L.formula α :=
r.formula ![t₁, t₂]
/-- The equality of two terms as a first-order formula. -/
def term.equal (t₁ t₂ : L.term α) : (L.formula α) :=
(t₁.relabel sum.inl).bd_equal (t₂.relabel sum.inl)
namespace bounded_formula
instance : inhabited (L.bounded_formula α n) :=
⟨falsum⟩
instance : has_bot (L.bounded_formula α n) := ⟨falsum⟩
/-- The negation of a bounded formula is also a bounded formula. -/
@[pattern] protected def not (φ : L.bounded_formula α n) : L.bounded_formula α n := φ.imp ⊥
/-- Puts an `∃` quantifier on a bounded formula. -/
@[pattern] protected def ex (φ : L.bounded_formula α (n + 1)) : L.bounded_formula α n :=
φ.not.all.not
instance : has_top (L.bounded_formula α n) := ⟨bounded_formula.not ⊥⟩
instance : has_inf (L.bounded_formula α n) := ⟨λ f g, (f.imp g.not).not⟩
instance : has_sup (L.bounded_formula α n) := ⟨λ f g, f.not.imp g⟩
/-- The biimplication between two bounded formulas. -/
protected def iff (φ ψ : L.bounded_formula α n) := φ.imp ψ ⊓ ψ.imp φ
open finset
/-- The `finset` of variables used in a given formula. -/
@[simp] def free_var_finset [decidable_eq α] :
∀ {n}, L.bounded_formula α n → finset α
| n falsum := ∅
| n (equal t₁ t₂) := t₁.var_finset_left ∪ t₂.var_finset_left
| n (rel R ts) := univ.bUnion (λ i, (ts i).var_finset_left)
| n (imp f₁ f₂) := f₁.free_var_finset ∪ f₂.free_var_finset
| n (all f) := f.free_var_finset
/-- Casts `L.bounded_formula α m` as `L.bounded_formula α n`, where `m ≤ n`. -/
@[simp] def cast_le : ∀ {m n : ℕ} (h : m ≤ n), L.bounded_formula α m → L.bounded_formula α n
| m n h falsum := falsum
| m n h (equal t₁ t₂) := equal (t₁.relabel (sum.map id (fin.cast_le h)))
(t₂.relabel (sum.map id (fin.cast_le h)))
| m n h (rel R ts) := rel R (term.relabel (sum.map id (fin.cast_le h)) ∘ ts)
| m n h (imp f₁ f₂) := (f₁.cast_le h).imp (f₂.cast_le h)
| m n h (all f) := (f.cast_le (add_le_add_right h 1)).all
@[simp] lemma cast_le_rfl {n} (h : n ≤ n) (φ : L.bounded_formula α n) :
φ.cast_le h = φ :=
begin
induction φ with _ _ _ _ _ _ _ _ _ _ _ ih1 ih2 _ _ ih3,
{ refl },
{ simp [fin.cast_le_of_eq], },
{ simp [fin.cast_le_of_eq], },
{ simp [fin.cast_le_of_eq, ih1, ih2], },
{ simp [fin.cast_le_of_eq, ih3], },
end
@[simp] lemma cast_le_cast_le {k m n} (km : k ≤ m) (mn : m ≤ n) (φ : L.bounded_formula α k) :
(φ.cast_le km).cast_le mn = φ.cast_le (km.trans mn) :=
begin
revert m n,
induction φ with _ _ _ _ _ _ _ _ _ _ _ ih1 ih2 _ _ ih3;
intros m n km mn,
{ refl },
{ simp },
{ simp only [cast_le, eq_self_iff_true, heq_iff_eq, true_and],
rw [← function.comp.assoc, relabel_comp_relabel],
simp },
{ simp [ih1, ih2] },
{ simp only [cast_le, ih3] }
end
@[simp] lemma cast_le_comp_cast_le {k m n} (km : k ≤ m) (mn : m ≤ n) :
(bounded_formula.cast_le mn ∘ bounded_formula.cast_le km :
L.bounded_formula α k → L.bounded_formula α n) =
bounded_formula.cast_le (km.trans mn) :=
funext (cast_le_cast_le km mn)
/-- Restricts a bounded formula to only use a particular set of free variables. -/
def restrict_free_var [decidable_eq α] : Π {n : ℕ} (φ : L.bounded_formula α n)
(f : φ.free_var_finset → β), L.bounded_formula β n
| n falsum f := falsum
| n (equal t₁ t₂) f := equal
(t₁.restrict_var_left (f ∘ set.inclusion (subset_union_left _ _)))
(t₂.restrict_var_left (f ∘ set.inclusion (subset_union_right _ _)))
| n (rel R ts) f := rel R (λ i, (ts i).restrict_var_left
(f ∘ set.inclusion (subset_bUnion_of_mem _ (mem_univ i))))
| n (imp φ₁ φ₂) f :=
(φ₁.restrict_free_var (f ∘ set.inclusion (subset_union_left _ _))).imp
(φ₂.restrict_free_var (f ∘ set.inclusion (subset_union_right _ _)))
| n (all φ) f := (φ.restrict_free_var f).all
/-- Places universal quantifiers on all extra variables of a bounded formula. -/
def alls : ∀ {n}, L.bounded_formula α n → L.formula α
| 0 φ := φ
| (n + 1) φ := φ.all.alls
/-- Places existential quantifiers on all extra variables of a bounded formula. -/
def exs : ∀ {n}, L.bounded_formula α n → L.formula α
| 0 φ := φ
| (n + 1) φ := φ.ex.exs
/-- Maps bounded formulas along a map of terms and a map of relations. -/
def map_term_rel {g : ℕ → ℕ}
(ft : ∀ n, L.term (α ⊕ fin n) → L'.term (β ⊕ fin (g n)))
(fr : ∀ n, L.relations n → L'.relations n)
(h : ∀ n, L'.bounded_formula β (g (n + 1)) → L'.bounded_formula β (g n + 1)) :
∀ {n}, L.bounded_formula α n → L'.bounded_formula β (g n)
| n falsum := falsum
| n (equal t₁ t₂) := equal (ft _ t₁) (ft _ t₂)
| n (rel R ts) := rel (fr _ R) (λ i, ft _ (ts i))
| n (imp φ₁ φ₂) := φ₁.map_term_rel.imp φ₂.map_term_rel
| n (all φ) := (h n φ.map_term_rel).all
/-- Raises all of the `fin`-indexed variables of a formula greater than or equal to `m` by `n'`. -/
def lift_at : ∀ {n : ℕ} (n' m : ℕ), L.bounded_formula α n → L.bounded_formula α (n + n') :=
λ n n' m φ, φ.map_term_rel (λ k t, t.lift_at n' m) (λ _, id)
(λ _, cast_le (by rw [add_assoc, add_comm 1, add_assoc]))
@[simp] lemma map_term_rel_map_term_rel {L'' : language}
(ft : ∀ n, L.term (α ⊕ fin n) → L'.term (β ⊕ fin n))
(fr : ∀ n, L.relations n → L'.relations n)
(ft' : ∀ n, L'.term (β ⊕ fin n) → L''.term (γ ⊕ fin n))
(fr' : ∀ n, L'.relations n → L''.relations n)
{n} (φ : L.bounded_formula α n) :
(φ.map_term_rel ft fr (λ _, id)).map_term_rel ft' fr' (λ _, id) =
φ.map_term_rel (λ _, (ft' _) ∘ (ft _)) (λ _, (fr' _) ∘ (fr _)) (λ _, id) :=
begin
induction φ with _ _ _ _ _ _ _ _ _ _ _ ih1 ih2 _ _ ih3,
{ refl },
{ simp [map_term_rel] },
{ simp [map_term_rel] },
{ simp [map_term_rel, ih1, ih2] },
{ simp [map_term_rel, ih3], }
end
@[simp] lemma map_term_rel_id_id_id {n} (φ : L.bounded_formula α n) :
φ.map_term_rel (λ _, id) (λ _, id) (λ _, id) = φ :=
begin
induction φ with _ _ _ _ _ _ _ _ _ _ _ ih1 ih2 _ _ ih3,
{ refl },
{ simp [map_term_rel] },
{ simp [map_term_rel] },
{ simp [map_term_rel, ih1, ih2] },
{ simp [map_term_rel, ih3], }
end
/-- An equivalence of bounded formulas given by an equivalence of terms and an equivalence of
relations. -/
@[simps] def map_term_rel_equiv (ft : ∀ n, L.term (α ⊕ fin n) ≃ L'.term (β ⊕ fin n))
(fr : ∀ n, L.relations n ≃ L'.relations n) {n} :
L.bounded_formula α n ≃ L'.bounded_formula β n :=
⟨map_term_rel (λ n, ft n) (λ n, fr n) (λ _, id),
map_term_rel (λ n, (ft n).symm) (λ n, (fr n).symm) (λ _, id),
λ φ, by simp, λ φ, by simp⟩
/-- A function to help relabel the variables in bounded formulas. -/
def relabel_aux (g : α → β ⊕ fin n) (k : ℕ) :
α ⊕ fin k → β ⊕ fin (n + k) :=
sum.map id fin_sum_fin_equiv ∘ equiv.sum_assoc _ _ _ ∘ sum.map g id
@[simp] lemma sum_elim_comp_relabel_aux {m : ℕ} {g : α → (β ⊕ fin n)}
{v : β → M} {xs : fin (n + m) → M} :
sum.elim v xs ∘ relabel_aux g m =
sum.elim (sum.elim v (xs ∘ cast_add m) ∘ g) (xs ∘ nat_add n) :=
begin
ext x,
cases x,
{ simp only [bounded_formula.relabel_aux, function.comp_app, sum.map_inl, sum.elim_inl],
cases g x with l r;
simp },
{ simp [bounded_formula.relabel_aux] }
end
@[simp] lemma relabel_aux_sum_inl (k : ℕ) :
relabel_aux (sum.inl : α → α ⊕ fin n) k =
sum.map id (nat_add n) :=
begin
ext x,
cases x;
{ simp [relabel_aux] },
end
/-- Relabels a bounded formula's variables along a particular function. -/
def relabel (g : α → (β ⊕ fin n)) {k} (φ : L.bounded_formula α k) :
L.bounded_formula β (n + k) :=
φ.map_term_rel (λ _ t, t.relabel (relabel_aux g _)) (λ _, id)
(λ _, cast_le (ge_of_eq (add_assoc _ _ _)))
@[simp] lemma relabel_falsum (g : α → (β ⊕ fin n)) {k} :
(falsum : L.bounded_formula α k).relabel g = falsum :=
rfl
@[simp] lemma relabel_bot (g : α → (β ⊕ fin n)) {k} :
(⊥ : L.bounded_formula α k).relabel g = ⊥ :=
rfl
@[simp] lemma relabel_imp (g : α → (β ⊕ fin n)) {k} (φ ψ : L.bounded_formula α k) :
(φ.imp ψ).relabel g = (φ.relabel g).imp (ψ.relabel g) :=
rfl
@[simp] lemma relabel_not (g : α → (β ⊕ fin n)) {k} (φ : L.bounded_formula α k) :
φ.not.relabel g = (φ.relabel g).not :=
by simp [bounded_formula.not]
@[simp] lemma relabel_all (g : α → (β ⊕ fin n)) {k} (φ : L.bounded_formula α (k + 1)) :
φ.all.relabel g = (φ.relabel g).all :=
begin
rw [relabel, map_term_rel, relabel],
simp,
end
@[simp] lemma relabel_ex (g : α → (β ⊕ fin n)) {k} (φ : L.bounded_formula α (k + 1)) :
φ.ex.relabel g = (φ.relabel g).ex :=
by simp [bounded_formula.ex]
@[simp] lemma relabel_sum_inl (φ : L.bounded_formula α n) :
(φ.relabel sum.inl : L.bounded_formula α (0 + n)) =
φ.cast_le (ge_of_eq (zero_add n)) :=
begin
simp only [relabel, relabel_aux_sum_inl],
induction φ with _ _ _ _ _ _ _ _ _ _ _ ih1 ih2 _ _ ih3,
{ refl },
{ simp [fin.nat_add_zero, cast_le_of_eq, map_term_rel] },
{ simp [fin.nat_add_zero, cast_le_of_eq, map_term_rel] },
{ simp [map_term_rel, ih1, ih2], },
{ simp [map_term_rel, ih3, cast_le], },
end
/-- Substitutes the variables in a given formula with terms. -/
@[simp] def subst {n : ℕ} (φ : L.bounded_formula α n) (f : α → L.term β) : L.bounded_formula β n :=
φ.map_term_rel (λ _ t, t.subst (sum.elim (term.relabel sum.inl ∘ f) (var ∘ sum.inr)))
(λ _, id) (λ _, id)
/-- Turns the extra variables of a bounded formula into free variables. -/
@[simp] def to_formula : ∀ {n : ℕ}, L.bounded_formula α n → L.formula (α ⊕ fin n)
| n falsum := falsum
| n (equal t₁ t₂) := t₁.equal t₂
| n (rel R ts) := R.formula ts
| n (imp φ₁ φ₂) := φ₁.to_formula.imp φ₂.to_formula
| n (all φ) := (φ.to_formula.relabel
(sum.elim (sum.inl ∘ sum.inl) (sum.map sum.inr id ∘ fin_sum_fin_equiv.symm))).all
variables {l : ℕ} {φ ψ : L.bounded_formula α l} {θ : L.bounded_formula α l.succ}
variables {v : α → M} {xs : fin l → M}
/-- An atomic formula is either equality or a relation symbol applied to terms.
Note that `⊥` and `⊤` are not considered atomic in this convention. -/
inductive is_atomic : L.bounded_formula α n → Prop
| equal (t₁ t₂ : L.term (α ⊕ fin n)) : is_atomic (bd_equal t₁ t₂)
| rel {l : ℕ} (R : L.relations l) (ts : fin l → L.term (α ⊕ fin n)) :
is_atomic (R.bounded_formula ts)
lemma not_all_is_atomic (φ : L.bounded_formula α (n + 1)) :
¬ φ.all.is_atomic :=
λ con, by cases con
lemma not_ex_is_atomic (φ : L.bounded_formula α (n + 1)) :
¬ φ.ex.is_atomic :=
λ con, by cases con
lemma is_atomic.relabel {m : ℕ} {φ : L.bounded_formula α m} (h : φ.is_atomic)
(f : α → β ⊕ (fin n)) :
(φ.relabel f).is_atomic :=
is_atomic.rec_on h (λ _ _, is_atomic.equal _ _) (λ _ _ _, is_atomic.rel _ _)
lemma is_atomic.lift_at {k m : ℕ} (h : is_atomic φ) : (φ.lift_at k m).is_atomic :=
is_atomic.rec_on h (λ _ _, is_atomic.equal _ _) (λ _ _ _, is_atomic.rel _ _)
lemma is_atomic.cast_le {h : l ≤ n} (hφ : is_atomic φ) :
(φ.cast_le h).is_atomic :=
is_atomic.rec_on hφ (λ _ _, is_atomic.equal _ _) (λ _ _ _, is_atomic.rel _ _)
/-- A quantifier-free formula is a formula defined without quantifiers. These are all equivalent
to boolean combinations of atomic formulas. -/
inductive is_qf : L.bounded_formula α n → Prop
| falsum : is_qf falsum
| of_is_atomic {φ} (h : is_atomic φ) : is_qf φ
| imp {φ₁ φ₂} (h₁ : is_qf φ₁) (h₂ : is_qf φ₂) : is_qf (φ₁.imp φ₂)
lemma is_atomic.is_qf {φ : L.bounded_formula α n} : is_atomic φ → is_qf φ :=
is_qf.of_is_atomic
lemma is_qf_bot : is_qf (⊥ : L.bounded_formula α n) :=
is_qf.falsum
lemma is_qf.not {φ : L.bounded_formula α n} (h : is_qf φ) :
is_qf φ.not :=
h.imp is_qf_bot
lemma is_qf.relabel {m : ℕ} {φ : L.bounded_formula α m} (h : φ.is_qf)
(f : α → β ⊕ (fin n)) :
(φ.relabel f).is_qf :=
is_qf.rec_on h is_qf_bot (λ _ h, (h.relabel f).is_qf) (λ _ _ _ _ h1 h2, h1.imp h2)
lemma is_qf.lift_at {k m : ℕ} (h : is_qf φ) : (φ.lift_at k m).is_qf :=
is_qf.rec_on h is_qf_bot (λ _ ih, ih.lift_at.is_qf) (λ _ _ _ _ ih1 ih2, ih1.imp ih2)
lemma is_qf.cast_le {h : l ≤ n} (hφ : is_qf φ) :
(φ.cast_le h).is_qf :=
is_qf.rec_on hφ is_qf_bot (λ _ ih, ih.cast_le.is_qf) (λ _ _ _ _ ih1 ih2, ih1.imp ih2)
lemma not_all_is_qf (φ : L.bounded_formula α (n + 1)) :
¬ φ.all.is_qf :=
λ con, begin
cases con with _ con,
exact (φ.not_all_is_atomic con),
end
lemma not_ex_is_qf (φ : L.bounded_formula α (n + 1)) :
¬ φ.ex.is_qf :=
λ con, begin
cases con with _ con _ _ con,
{ exact (φ.not_ex_is_atomic con) },
{ exact not_all_is_qf _ con }
end
/-- Indicates that a bounded formula is in prenex normal form - that is, it consists of quantifiers
applied to a quantifier-free formula. -/
inductive is_prenex : ∀ {n}, L.bounded_formula α n → Prop
| of_is_qf {n} {φ : L.bounded_formula α n} (h : is_qf φ) : is_prenex φ
| all {n} {φ : L.bounded_formula α (n + 1)} (h : is_prenex φ) : is_prenex φ.all
| ex {n} {φ : L.bounded_formula α (n + 1)} (h : is_prenex φ) : is_prenex φ.ex
lemma is_qf.is_prenex {φ : L.bounded_formula α n} : is_qf φ → is_prenex φ :=
is_prenex.of_is_qf
lemma is_atomic.is_prenex {φ : L.bounded_formula α n} (h : is_atomic φ) : is_prenex φ :=
h.is_qf.is_prenex
lemma is_prenex.induction_on_all_not {P : ∀ {n}, L.bounded_formula α n → Prop}
{φ : L.bounded_formula α n}
(h : is_prenex φ)
(hq : ∀ {m} {ψ : L.bounded_formula α m}, ψ.is_qf → P ψ)
(ha : ∀ {m} {ψ : L.bounded_formula α (m + 1)}, P ψ → P ψ.all)
(hn : ∀ {m} {ψ : L.bounded_formula α m}, P ψ → P ψ.not) :
P φ :=
is_prenex.rec_on h (λ _ _, hq) (λ _ _ _, ha) (λ _ _ _ ih, hn (ha (hn ih)))
lemma is_prenex.relabel {m : ℕ} {φ : L.bounded_formula α m} (h : φ.is_prenex)
(f : α → β ⊕ (fin n)) :
(φ.relabel f).is_prenex :=
is_prenex.rec_on h
(λ _ _ h, (h.relabel f).is_prenex)
(λ _ _ _ h, by simp [h.all])
(λ _ _ _ h, by simp [h.ex])
lemma is_prenex.cast_le (hφ : is_prenex φ) :
∀ {n} {h : l ≤ n}, (φ.cast_le h).is_prenex :=
is_prenex.rec_on hφ
(λ _ _ ih _ _, ih.cast_le.is_prenex)
(λ _ _ _ ih _ _, ih.all)
(λ _ _ _ ih _ _, ih.ex)
lemma is_prenex.lift_at {k m : ℕ} (h : is_prenex φ) : (φ.lift_at k m).is_prenex :=
is_prenex.rec_on h
(λ _ _ ih, ih.lift_at.is_prenex)
(λ _ _ _ ih, ih.cast_le.all)
(λ _ _ _ ih, ih.cast_le.ex)
/-- An auxiliary operation to `first_order.language.bounded_formula.to_prenex`.
If `φ` is quantifier-free and `ψ` is in prenex normal form, then `φ.to_prenex_imp_right ψ`
is a prenex normal form for `φ.imp ψ`. -/
def to_prenex_imp_right :
∀ {n}, L.bounded_formula α n → L.bounded_formula α n → L.bounded_formula α n
| n φ (bounded_formula.ex ψ) := ((φ.lift_at 1 n).to_prenex_imp_right ψ).ex
| n φ (all ψ) := ((φ.lift_at 1 n).to_prenex_imp_right ψ).all
| n φ ψ := φ.imp ψ
lemma is_qf.to_prenex_imp_right {φ : L.bounded_formula α n} :
Π {ψ : L.bounded_formula α n}, is_qf ψ → (φ.to_prenex_imp_right ψ = φ.imp ψ)
| _ is_qf.falsum := rfl
| _ (is_qf.of_is_atomic (is_atomic.equal _ _)) := rfl
| _ (is_qf.of_is_atomic (is_atomic.rel _ _)) := rfl
| _ (is_qf.imp is_qf.falsum _) := rfl
| _ (is_qf.imp (is_qf.of_is_atomic (is_atomic.equal _ _)) _) := rfl
| _ (is_qf.imp (is_qf.of_is_atomic (is_atomic.rel _ _)) _) := rfl
| _ (is_qf.imp (is_qf.imp _ _) _) := rfl
lemma is_prenex_to_prenex_imp_right {φ ψ : L.bounded_formula α n}
(hφ : is_qf φ) (hψ : is_prenex ψ) :
is_prenex (φ.to_prenex_imp_right ψ) :=
begin
induction hψ with _ _ hψ _ _ _ ih1 _ _ _ ih2,
{ rw hψ.to_prenex_imp_right,
exact (hφ.imp hψ).is_prenex },
{ exact (ih1 hφ.lift_at).all },
{ exact (ih2 hφ.lift_at).ex }
end
/-- An auxiliary operation to `first_order.language.bounded_formula.to_prenex`.
If `φ` and `ψ` are in prenex normal form, then `φ.to_prenex_imp ψ`
is a prenex normal form for `φ.imp ψ`. -/
def to_prenex_imp :
∀ {n}, L.bounded_formula α n → L.bounded_formula α n → L.bounded_formula α n
| n (bounded_formula.ex φ) ψ := (φ.to_prenex_imp (ψ.lift_at 1 n)).all
| n (all φ) ψ := (φ.to_prenex_imp (ψ.lift_at 1 n)).ex
| _ φ ψ := φ.to_prenex_imp_right ψ
lemma is_qf.to_prenex_imp : Π {φ ψ : L.bounded_formula α n}, φ.is_qf →
φ.to_prenex_imp ψ = φ.to_prenex_imp_right ψ
| _ _ is_qf.falsum := rfl
| _ _ (is_qf.of_is_atomic (is_atomic.equal _ _)) := rfl
| _ _ (is_qf.of_is_atomic (is_atomic.rel _ _)) := rfl
| _ _ (is_qf.imp is_qf.falsum _) := rfl
| _ _ (is_qf.imp (is_qf.of_is_atomic (is_atomic.equal _ _)) _) := rfl
| _ _ (is_qf.imp (is_qf.of_is_atomic (is_atomic.rel _ _)) _) := rfl
| _ _ (is_qf.imp (is_qf.imp _ _) _) := rfl
lemma is_prenex_to_prenex_imp {φ ψ : L.bounded_formula α n}
(hφ : is_prenex φ) (hψ : is_prenex ψ) :
is_prenex (φ.to_prenex_imp ψ) :=
begin
induction hφ with _ _ hφ _ _ _ ih1 _ _ _ ih2,
{ rw hφ.to_prenex_imp,
exact is_prenex_to_prenex_imp_right hφ hψ },
{ exact (ih1 hψ.lift_at).ex },
{ exact (ih2 hψ.lift_at).all }
end
/-- For any bounded formula `φ`, `φ.to_prenex` is a semantically-equivalent formula in prenex normal
form. -/
def to_prenex : ∀ {n}, L.bounded_formula α n → L.bounded_formula α n
| _ falsum := ⊥
| _ (equal t₁ t₂) := t₁.bd_equal t₂
| _ (rel R ts) := rel R ts
| _ (imp f₁ f₂) := f₁.to_prenex.to_prenex_imp f₂.to_prenex
| _ (all f) := f.to_prenex.all
lemma to_prenex_is_prenex (φ : L.bounded_formula α n) :
φ.to_prenex.is_prenex :=
bounded_formula.rec_on φ
(λ _, is_qf_bot.is_prenex)
(λ _ _ _, (is_atomic.equal _ _).is_prenex)
(λ _ _ _ _, (is_atomic.rel _ _).is_prenex)
(λ _ _ _ h1 h2, is_prenex_to_prenex_imp h1 h2)
(λ _ _, is_prenex.all)
end bounded_formula
namespace Lhom
open bounded_formula
/-- Maps a bounded formula's symbols along a language map. -/
@[simp] def on_bounded_formula (g : L →ᴸ L') :
∀ {k : ℕ}, L.bounded_formula α k → L'.bounded_formula α k
| k falsum := falsum
| k (equal t₁ t₂) := (g.on_term t₁).bd_equal (g.on_term t₂)
| k (rel R ts) := (g.on_relation R).bounded_formula (g.on_term ∘ ts)
| k (imp f₁ f₂) := (on_bounded_formula f₁).imp (on_bounded_formula f₂)
| k (all f) := (on_bounded_formula f).all
@[simp] lemma id_on_bounded_formula :
((Lhom.id L).on_bounded_formula : L.bounded_formula α n → L.bounded_formula α n) = id :=
begin
ext f,
induction f with _ _ _ _ _ _ _ _ _ _ _ ih1 ih2 _ _ ih3,
{ refl },
{ rw [on_bounded_formula, Lhom.id_on_term, id.def, id.def, id.def, bd_equal] },
{ rw [on_bounded_formula, Lhom.id_on_term],
refl, },
{ rw [on_bounded_formula, ih1, ih2, id.def, id.def, id.def] },
{ rw [on_bounded_formula, ih3, id.def, id.def] }
end
@[simp] lemma comp_on_bounded_formula {L'' : language} (φ : L' →ᴸ L'') (ψ : L →ᴸ L') :
((φ.comp ψ).on_bounded_formula : L.bounded_formula α n → L''.bounded_formula α n) =
φ.on_bounded_formula ∘ ψ.on_bounded_formula :=
begin
ext f,
induction f with _ _ _ _ _ _ _ _ _ _ _ ih1 ih2 _ _ ih3,
{ refl },
{ simp only [on_bounded_formula, comp_on_term, function.comp_app],
refl, },
{ simp only [on_bounded_formula, comp_on_relation, comp_on_term, function.comp_app],
refl },
{ simp only [on_bounded_formula, function.comp_app, ih1, ih2, eq_self_iff_true, and_self], },
{ simp only [ih3, on_bounded_formula, function.comp_app] }
end
/-- Maps a formula's symbols along a language map. -/
def on_formula (g : L →ᴸ L') : L.formula α → L'.formula α :=
g.on_bounded_formula
/-- Maps a sentence's symbols along a language map. -/
def on_sentence (g : L →ᴸ L') : L.sentence → L'.sentence :=
g.on_formula
/-- Maps a theory's symbols along a language map. -/
def on_Theory (g : L →ᴸ L') (T : L.Theory) : L'.Theory :=
g.on_sentence '' T
@[simp]
end Lhom
namespace Lequiv
/-- Maps a bounded formula's symbols along a language equivalence. -/
@[simps] def on_bounded_formula (φ : L ≃ᴸ L') :
L.bounded_formula α n ≃ L'.bounded_formula α n :=
{ to_fun := φ.to_Lhom.on_bounded_formula,
inv_fun := φ.inv_Lhom.on_bounded_formula,
left_inv := by rw [function.left_inverse_iff_comp, ← Lhom.comp_on_bounded_formula, φ.left_inv,
Lhom.id_on_bounded_formula],
right_inv := by rw [function.right_inverse_iff_comp, ← Lhom.comp_on_bounded_formula, φ.right_inv,
Lhom.id_on_bounded_formula] }
lemma on_bounded_formula_symm (φ : L ≃ᴸ L') :
(φ.on_bounded_formula.symm : L'.bounded_formula α n ≃ L.bounded_formula α n) =
φ.symm.on_bounded_formula :=
rfl
/-- Maps a formula's symbols along a language equivalence. -/
def on_formula (φ : L ≃ᴸ L') :
L.formula α ≃ L'.formula α :=
φ.on_bounded_formula
@[simp] lemma on_formula_apply (φ : L ≃ᴸ L') :
(φ.on_formula : L.formula α → L'.formula α) = φ.to_Lhom.on_formula :=
rfl
@[simp] lemma on_formula_symm (φ : L ≃ᴸ L') :
(φ.on_formula.symm : L'.formula α ≃ L.formula α) = φ.symm.on_formula :=
rfl
/-- Maps a sentence's symbols along a language equivalence. -/
@[simps] def on_sentence (φ : L ≃ᴸ L') :
L.sentence ≃ L'.sentence :=
φ.on_formula
end Lequiv
localized "infix ` =' `:88 := first_order.language.term.bd_equal" in first_order
-- input \~- or \simeq
localized "infixr ` ⟹ `:62 := first_order.language.bounded_formula.imp" in first_order
-- input \==>
localized "prefix `∀'`:110 := first_order.language.bounded_formula.all" in first_order
localized "prefix `∼`:max := first_order.language.bounded_formula.not" in first_order
-- input \~, the ASCII character ~ has too low precedence
localized "infix ` ⇔ `:61 := first_order.language.bounded_formula.iff" in first_order -- input \<=>
localized "prefix `∃'`:110 := first_order.language.bounded_formula.ex" in first_order -- input \ex
namespace formula
/-- Relabels a formula's variables along a particular function. -/
def relabel (g : α → β) : L.formula α → L.formula β :=
@bounded_formula.relabel _ _ _ 0 (sum.inl ∘ g) 0
/-- The graph of a function as a first-order formula. -/
def graph (f : L.functions n) : L.formula (fin (n + 1)) :=
equal (var 0) (func f (λ i, var i.succ))
/-- The negation of a formula. -/
protected def not (φ : L.formula α) : L.formula α := φ.not
/-- The implication between formulas, as a formula. -/
protected def imp : L.formula α → L.formula α → L.formula α := bounded_formula.imp
/-- The biimplication between formulas, as a formula. -/
protected def iff (φ ψ : L.formula α) : L.formula α := φ.iff ψ
lemma is_atomic_graph (f : L.functions n) : (graph f).is_atomic :=
bounded_formula.is_atomic.equal _ _
end formula
namespace relations
variable (r : L.relations 2)
/-- The sentence indicating that a basic relation symbol is reflexive. -/
protected def reflexive : L.sentence := ∀' r.bounded_formula₂ &0 &0
/-- The sentence indicating that a basic relation symbol is irreflexive. -/
protected def irreflexive : L.sentence := ∀' ∼ (r.bounded_formula₂ &0 &0)
/-- The sentence indicating that a basic relation symbol is symmetric. -/
protected def symmetric : L.sentence := ∀' ∀' (r.bounded_formula₂ &0 &1 ⟹ r.bounded_formula₂ &1 &0)
/-- The sentence indicating that a basic relation symbol is antisymmetric. -/
protected def antisymmetric : L.sentence :=
∀' ∀' (r.bounded_formula₂ &0 &1 ⟹ (r.bounded_formula₂ &1 &0 ⟹ term.bd_equal &0 &1))
/-- The sentence indicating that a basic relation symbol is transitive. -/
protected def transitive : L.sentence :=
∀' ∀' ∀' (r.bounded_formula₂ &0 &1 ⟹ r.bounded_formula₂ &1 &2 ⟹ r.bounded_formula₂ &0 &2)
/-- The sentence indicating that a basic relation symbol is total. -/
protected def total : L.sentence :=
∀' ∀' (r.bounded_formula₂ &0 &1 ⊔ r.bounded_formula₂ &1 &0)
end relations
section cardinality
variable (L)
/-- A sentence indicating that a structure has `n` distinct elements. -/
protected def sentence.card_ge (n) : L.sentence :=
(((((list.fin_range n).product (list.fin_range n)).filter (λ ij : _ × _, ij.1 ≠ ij.2)).map
(λ (ij : _ × _), ∼ ((& ij.1).bd_equal (& ij.2)))).foldr (⊓) ⊤).exs
/-- A theory indicating that a structure is infinite. -/
def infinite_theory : L.Theory := set.range (sentence.card_ge L)
/-- A theory that indicates a structure is nonempty. -/
def nonempty_theory : L.Theory := {sentence.card_ge L 1}
/-- A theory indicating that each of a set of constants is distinct. -/
def distinct_constants_theory (s : set α) : L[[α]].Theory :=
(λ ab : α × α, (((L.con ab.1).term.equal (L.con ab.2).term).not)) '' ((s ×ˢ s) ∩ (set.diagonal α)ᶜ)
variables {L} {α}
open set
lemma monotone_distinct_constants_theory :
monotone (L.distinct_constants_theory : set α → L[[α]].Theory) :=
λ s t st, (image_subset _ (inter_subset_inter_left _ (prod_mono st st)))
lemma directed_distinct_constants_theory :
directed (⊆) (L.distinct_constants_theory : set α → L[[α]].Theory) :=
monotone.directed_le monotone_distinct_constants_theory
lemma distinct_constants_theory_eq_Union (s : set α) :
L.distinct_constants_theory s = ⋃ (t : finset s), L.distinct_constants_theory
(t.map (function.embedding.subtype (λ x, x ∈ s))) :=
begin
classical,
simp only [distinct_constants_theory],
rw [← image_Union, ← Union_inter],
refine congr rfl (congr (congr rfl _) rfl),
ext ⟨i, j⟩,
simp only [prod_mk_mem_set_prod_eq, finset.coe_map, function.embedding.coe_subtype, mem_Union,
mem_image, finset.mem_coe, subtype.exists, subtype.coe_mk, exists_and_distrib_right,
exists_eq_right],
refine ⟨λ h, ⟨{⟨i, h.1⟩, ⟨j, h.2⟩}, ⟨h.1, _⟩, ⟨h.2, _⟩⟩, _⟩,
{ simp },
{ simp },
{ rintros ⟨t, ⟨is, _⟩, ⟨js, _⟩⟩,
exact ⟨is, js⟩ }
end
end cardinality
end language
end first_order
|
/-
Copyright (c) 2020 Yury G. Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury G. Kudryashov, Patrick Massot
! This file was ported from Lean 3 source module data.set.pointwise.interval
! leanprover-community/mathlib commit 2196ab363eb097c008d4497125e0dde23fb36db2
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Data.Set.Intervals.UnorderedInterval
import Mathlib.Data.Set.Intervals.Monoid
import Mathlib.Data.Set.Pointwise.Basic
import Mathlib.Algebra.Order.Field.Basic
import Mathlib.Algebra.Order.Group.MinMax
/-!
# (Pre)images of intervals
In this file we prove a bunch of trivial lemmas like “if we add `a` to all points of `[b, c]`,
then we get `[a + b, a + c]`”. For the functions `x ↦ x ± a`, `x ↦ a ± x`, and `x ↦ -x` we prove
lemmas about preimages and images of all intervals. We also prove a few lemmas about images under
`x ↦ a * x`, `x ↦ x * a` and `x ↦ x⁻¹`.
-/
open Interval Pointwise
variable {α : Type _}
namespace Set
section OrderedAddCommGroup
variable [OrderedAddCommGroup α] (a b c : α)
/-!
### Preimages under `x ↦ a + x`
-/
@[simp]
theorem preimage_const_add_Ici : (fun x => a + x) ⁻¹' Ici b = Ici (b - a) :=
ext fun _x => sub_le_iff_le_add'.symm
#align set.preimage_const_add_Ici Set.preimage_const_add_Ici
@[simp]
theorem preimage_const_add_Ioi : (fun x => a + x) ⁻¹' Ioi b = Ioi (b - a) :=
ext fun _x => sub_lt_iff_lt_add'.symm
#align set.preimage_const_add_Ioi Set.preimage_const_add_Ioi
@[simp]
theorem preimage_const_add_Iic : (fun x => a + x) ⁻¹' Iic b = Iic (b - a) :=
ext fun _x => le_sub_iff_add_le'.symm
#align set.preimage_const_add_Iic Set.preimage_const_add_Iic
@[simp]
theorem preimage_const_add_Iio : (fun x => a + x) ⁻¹' Iio b = Iio (b - a) :=
ext fun _x => lt_sub_iff_add_lt'.symm
#align set.preimage_const_add_Iio Set.preimage_const_add_Iio
@[simp]
theorem preimage_const_add_Icc : (fun x => a + x) ⁻¹' Icc b c = Icc (b - a) (c - a) := by
simp [← Ici_inter_Iic]
#align set.preimage_const_add_Icc Set.preimage_const_add_Icc
@[simp]
theorem preimage_const_add_Ico : (fun x => a + x) ⁻¹' Ico b c = Ico (b - a) (c - a) := by
simp [← Ici_inter_Iio]
#align set.preimage_const_add_Ico Set.preimage_const_add_Ico
@[simp]
theorem preimage_const_add_Ioc : (fun x => a + x) ⁻¹' Ioc b c = Ioc (b - a) (c - a) := by
simp [← Ioi_inter_Iic]
#align set.preimage_const_add_Ioc Set.preimage_const_add_Ioc
@[simp]
theorem preimage_const_add_Ioo : (fun x => a + x) ⁻¹' Ioo b c = Ioo (b - a) (c - a) := by
simp [← Ioi_inter_Iio]
#align set.preimage_const_add_Ioo Set.preimage_const_add_Ioo
/-!
### Preimages under `x ↦ x + a`
-/
@[simp]
theorem preimage_add_const_Ici : (fun x => x + a) ⁻¹' Ici b = Ici (b - a) :=
ext fun _x => sub_le_iff_le_add.symm
#align set.preimage_add_const_Ici Set.preimage_add_const_Ici
@[simp]
theorem preimage_add_const_Ioi : (fun x => x + a) ⁻¹' Ioi b = Ioi (b - a) :=
ext fun _x => sub_lt_iff_lt_add.symm
#align set.preimage_add_const_Ioi Set.preimage_add_const_Ioi
@[simp]
theorem preimage_add_const_Iic : (fun x => x + a) ⁻¹' Iic b = Iic (b - a) :=
ext fun _x => le_sub_iff_add_le.symm
#align set.preimage_add_const_Iic Set.preimage_add_const_Iic
@[simp]
theorem preimage_add_const_Iio : (fun x => x + a) ⁻¹' Iio b = Iio (b - a) :=
ext fun _x => lt_sub_iff_add_lt.symm
#align set.preimage_add_const_Iio Set.preimage_add_const_Iio
@[simp]
theorem preimage_add_const_Icc : (fun x => x + a) ⁻¹' Icc b c = Icc (b - a) (c - a) := by
simp [← Ici_inter_Iic]
#align set.preimage_add_const_Icc Set.preimage_add_const_Icc
@[simp]
theorem preimage_add_const_Ico : (fun x => x + a) ⁻¹' Ico b c = Ico (b - a) (c - a) := by
simp [← Ici_inter_Iio]
#align set.preimage_add_const_Ico Set.preimage_add_const_Ico
@[simp]
theorem preimage_add_const_Ioc : (fun x => x + a) ⁻¹' Ioc b c = Ioc (b - a) (c - a) := by
simp [← Ioi_inter_Iic]
#align set.preimage_add_const_Ioc Set.preimage_add_const_Ioc
@[simp]
theorem preimage_add_const_Ioo : (fun x => x + a) ⁻¹' Ioo b c = Ioo (b - a) (c - a) := by
simp [← Ioi_inter_Iio]
#align set.preimage_add_const_Ioo Set.preimage_add_const_Ioo
/-!
### Preimages under `x ↦ -x`
-/
@[simp]
theorem preimage_neg_Ici : -Ici a = Iic (-a) :=
ext fun _x => le_neg
#align set.preimage_neg_Ici Set.preimage_neg_Ici
@[simp]
theorem preimage_neg_Iic : -Iic a = Ici (-a) :=
ext fun _x => neg_le
#align set.preimage_neg_Iic Set.preimage_neg_Iic
@[simp]
theorem preimage_neg_Ioi : -Ioi a = Iio (-a) :=
ext fun _x => lt_neg
#align set.preimage_neg_Ioi Set.preimage_neg_Ioi
@[simp]
theorem preimage_neg_Iio : -Iio a = Ioi (-a) :=
ext fun _x => neg_lt
#align set.preimage_neg_Iio Set.preimage_neg_Iio
@[simp]
theorem preimage_neg_Icc : -Icc a b = Icc (-b) (-a) := by simp [← Ici_inter_Iic, inter_comm]
#align set.preimage_neg_Icc Set.preimage_neg_Icc
@[simp]
theorem preimage_neg_Ico : -Ico a b = Ioc (-b) (-a) := by
simp [← Ici_inter_Iio, ← Ioi_inter_Iic, inter_comm]
#align set.preimage_neg_Ico Set.preimage_neg_Ico
@[simp]
theorem preimage_neg_Ioc : -Ioc a b = Ico (-b) (-a) := by
simp [← Ioi_inter_Iic, ← Ici_inter_Iio, inter_comm]
#align set.preimage_neg_Ioc Set.preimage_neg_Ioc
@[simp]
theorem preimage_neg_Ioo : -Ioo a b = Ioo (-b) (-a) := by simp [← Ioi_inter_Iio, inter_comm]
#align set.preimage_neg_Ioo Set.preimage_neg_Ioo
/-!
### Preimages under `x ↦ x - a`
-/
@[simp]
theorem preimage_sub_const_Ici : (fun x => x - a) ⁻¹' Ici b = Ici (b + a) := by
simp [sub_eq_add_neg]
#align set.preimage_sub_const_Ici Set.preimage_sub_const_Ici
@[simp]
theorem preimage_sub_const_Ioi : (fun x => x - a) ⁻¹' Ioi b = Ioi (b + a) := by
simp [sub_eq_add_neg]
#align set.preimage_sub_const_Ioi Set.preimage_sub_const_Ioi
@[simp]
theorem preimage_sub_const_Iic : (fun x => x - a) ⁻¹' Iic b = Iic (b + a) := by
simp [sub_eq_add_neg]
#align set.preimage_sub_const_Iic Set.preimage_sub_const_Iic
@[simp]
theorem preimage_sub_const_Iio : (fun x => x - a) ⁻¹' Iio b = Iio (b + a) := by
simp [sub_eq_add_neg]
#align set.preimage_sub_const_Iio Set.preimage_sub_const_Iio
@[simp]
theorem preimage_sub_const_Icc : (fun x => x - a) ⁻¹' Icc b c = Icc (b + a) (c + a) := by
simp [sub_eq_add_neg]
#align set.preimage_sub_const_Icc Set.preimage_sub_const_Icc
@[simp]
theorem preimage_sub_const_Ico : (fun x => x - a) ⁻¹' Ico b c = Ico (b + a) (c + a) := by
simp [sub_eq_add_neg]
#align set.preimage_sub_const_Ico Set.preimage_sub_const_Ico
@[simp]
theorem preimage_sub_const_Ioc : (fun x => x - a) ⁻¹' Ioc b c = Ioc (b + a) (c + a) := by
simp [sub_eq_add_neg]
#align set.preimage_sub_const_Ioc Set.preimage_sub_const_Ioc
@[simp]
theorem preimage_sub_const_Ioo : (fun x => x - a) ⁻¹' Ioo b c = Ioo (b + a) (c + a) := by
simp [sub_eq_add_neg]
#align set.preimage_sub_const_Ioo Set.preimage_sub_const_Ioo
/-!
### Preimages under `x ↦ a - x`
-/
@[simp]
theorem preimage_const_sub_Ici : (fun x => a - x) ⁻¹' Ici b = Iic (a - b) :=
ext fun _x => le_sub_comm
#align set.preimage_const_sub_Ici Set.preimage_const_sub_Ici
@[simp]
theorem preimage_const_sub_Iic : (fun x => a - x) ⁻¹' Iic b = Ici (a - b) :=
ext fun _x => sub_le_comm
#align set.preimage_const_sub_Iic Set.preimage_const_sub_Iic
@[simp]
theorem preimage_const_sub_Ioi : (fun x => a - x) ⁻¹' Ioi b = Iio (a - b) :=
ext fun _x => lt_sub_comm
#align set.preimage_const_sub_Ioi Set.preimage_const_sub_Ioi
@[simp]
theorem preimage_const_sub_Iio : (fun x => a - x) ⁻¹' Iio b = Ioi (a - b) :=
ext fun _x => sub_lt_comm
#align set.preimage_const_sub_Iio Set.preimage_const_sub_Iio
@[simp]
theorem preimage_const_sub_Icc : (fun x => a - x) ⁻¹' Icc b c = Icc (a - c) (a - b) := by
simp [← Ici_inter_Iic, inter_comm]
#align set.preimage_const_sub_Icc Set.preimage_const_sub_Icc
@[simp]
theorem preimage_const_sub_Ico : (fun x => a - x) ⁻¹' Ico b c = Ioc (a - c) (a - b) := by
simp [← Ioi_inter_Iic, ← Ici_inter_Iio, inter_comm]
#align set.preimage_const_sub_Ico Set.preimage_const_sub_Ico
@[simp]
theorem preimage_const_sub_Ioc : (fun x => a - x) ⁻¹' Ioc b c = Ico (a - c) (a - b) := by
simp [← Ioi_inter_Iic, ← Ici_inter_Iio, inter_comm]
#align set.preimage_const_sub_Ioc Set.preimage_const_sub_Ioc
@[simp]
theorem preimage_const_sub_Ioo : (fun x => a - x) ⁻¹' Ioo b c = Ioo (a - c) (a - b) := by
simp [← Ioi_inter_Iio, inter_comm]
#align set.preimage_const_sub_Ioo Set.preimage_const_sub_Ioo
/-!
### Images under `x ↦ a + x`
-/
-- @[simp] -- Porting note: simp can prove this modulo `add_comm`
theorem image_const_add_Iic : (fun x => a + x) '' Iic b = Iic (a + b) := by simp [add_comm]
#align set.image_const_add_Iic Set.image_const_add_Iic
-- @[simp] -- Porting note: simp can prove this modulo `add_comm`
theorem image_const_add_Iio : (fun x => a + x) '' Iio b = Iio (a + b) := by simp [add_comm]
#align set.image_const_add_Iio Set.image_const_add_Iio
/-!
### Images under `x ↦ x + a`
-/
-- @[simp] -- Porting note: simp can prove this
theorem image_add_const_Iic : (fun x => x + a) '' Iic b = Iic (b + a) := by simp
#align set.image_add_const_Iic Set.image_add_const_Iic
-- @[simp] -- Porting note: simp can prove this
theorem image_add_const_Iio : (fun x => x + a) '' Iio b = Iio (b + a) := by simp
#align set.image_add_const_Iio Set.image_add_const_Iio
/-!
### Images under `x ↦ -x`
-/
theorem image_neg_Ici : Neg.neg '' Ici a = Iic (-a) := by simp
#align set.image_neg_Ici Set.image_neg_Ici
theorem image_neg_Iic : Neg.neg '' Iic a = Ici (-a) := by simp
#align set.image_neg_Iic Set.image_neg_Iic
theorem image_neg_Ioi : Neg.neg '' Ioi a = Iio (-a) := by simp
#align set.image_neg_Ioi Set.image_neg_Ioi
theorem image_neg_Iio : Neg.neg '' Iio a = Ioi (-a) := by simp
#align set.image_neg_Iio Set.image_neg_Iio
theorem image_neg_Icc : Neg.neg '' Icc a b = Icc (-b) (-a) := by simp
#align set.image_neg_Icc Set.image_neg_Icc
theorem image_neg_Ico : Neg.neg '' Ico a b = Ioc (-b) (-a) := by simp
#align set.image_neg_Ico Set.image_neg_Ico
theorem image_neg_Ioc : Neg.neg '' Ioc a b = Ico (-b) (-a) := by simp
#align set.image_neg_Ioc Set.image_neg_Ioc
theorem image_neg_Ioo : Neg.neg '' Ioo a b = Ioo (-b) (-a) := by simp
#align set.image_neg_Ioo Set.image_neg_Ioo
/-!
### Images under `x ↦ a - x`
-/
@[simp]
theorem image_const_sub_Ici : (fun x => a - x) '' Ici b = Iic (a - b) := by
have := image_comp (fun x => a + x) fun x => -x; dsimp [Function.comp] at this
simp [sub_eq_add_neg, this, add_comm]
#align set.image_const_sub_Ici Set.image_const_sub_Ici
@[simp]
theorem image_const_sub_Iic : (fun x => a - x) '' Iic b = Ici (a - b) := by
have := image_comp (fun x => a + x) fun x => -x; dsimp [Function.comp] at this
simp [sub_eq_add_neg, this, add_comm]
#align set.image_const_sub_Iic Set.image_const_sub_Iic
@[simp]
theorem image_const_sub_Ioi : (fun x => a - x) '' Ioi b = Iio (a - b) := by
have := image_comp (fun x => a + x) fun x => -x; dsimp [Function.comp] at this
simp [sub_eq_add_neg, this, add_comm]
#align set.image_const_sub_Ioi Set.image_const_sub_Ioi
@[simp]
theorem image_const_sub_Iio : (fun x => a - x) '' Iio b = Ioi (a - b) := by
have := image_comp (fun x => a + x) fun x => -x; dsimp [Function.comp] at this
simp [sub_eq_add_neg, this, add_comm]
#align set.image_const_sub_Iio Set.image_const_sub_Iio
@[simp]
theorem image_const_sub_Icc : (fun x => a - x) '' Icc b c = Icc (a - c) (a - b) := by
have := image_comp (fun x => a + x) fun x => -x; dsimp [Function.comp] at this
simp [sub_eq_add_neg, this, add_comm]
#align set.image_const_sub_Icc Set.image_const_sub_Icc
@[simp]
theorem image_const_sub_Ico : (fun x => a - x) '' Ico b c = Ioc (a - c) (a - b) := by
have := image_comp (fun x => a + x) fun x => -x; dsimp [Function.comp] at this
simp [sub_eq_add_neg, this, add_comm]
#align set.image_const_sub_Ico Set.image_const_sub_Ico
@[simp]
theorem image_const_sub_Ioc : (fun x => a - x) '' Ioc b c = Ico (a - c) (a - b) := by
have := image_comp (fun x => a + x) fun x => -x; dsimp [Function.comp] at this
simp [sub_eq_add_neg, this, add_comm]
#align set.image_const_sub_Ioc Set.image_const_sub_Ioc
@[simp]
theorem image_const_sub_Ioo : (fun x => a - x) '' Ioo b c = Ioo (a - c) (a - b) := by
have := image_comp (fun x => a + x) fun x => -x; dsimp [Function.comp] at this
simp [sub_eq_add_neg, this, add_comm]
#align set.image_const_sub_Ioo Set.image_const_sub_Ioo
/-!
### Images under `x ↦ x - a`
-/
@[simp]
theorem image_sub_const_Ici : (fun x => x - a) '' Ici b = Ici (b - a) := by simp [sub_eq_neg_add]
#align set.image_sub_const_Ici Set.image_sub_const_Ici
@[simp]
theorem image_sub_const_Iic : (fun x => x - a) '' Iic b = Iic (b - a) := by simp [sub_eq_neg_add]
#align set.image_sub_const_Iic Set.image_sub_const_Iic
@[simp]
theorem image_sub_const_Ioi : (fun x => x - a) '' Ioi b = Ioi (b - a) := by simp [sub_eq_neg_add]
#align set.image_sub_const_Ioi Set.image_sub_const_Ioi
@[simp]
theorem image_sub_const_Iio : (fun x => x - a) '' Iio b = Iio (b - a) := by simp [sub_eq_neg_add]
#align set.image_sub_const_Iio Set.image_sub_const_Iio
@[simp]
theorem image_sub_const_Icc : (fun x => x - a) '' Icc b c = Icc (b - a) (c - a) := by
simp [sub_eq_neg_add]
#align set.image_sub_const_Icc Set.image_sub_const_Icc
@[simp]
theorem image_sub_const_Ico : (fun x => x - a) '' Ico b c = Ico (b - a) (c - a) := by
simp [sub_eq_neg_add]
#align set.image_sub_const_Ico Set.image_sub_const_Ico
@[simp]
theorem image_sub_const_Ioc : (fun x => x - a) '' Ioc b c = Ioc (b - a) (c - a) := by
simp [sub_eq_neg_add]
#align set.image_sub_const_Ioc Set.image_sub_const_Ioc
@[simp]
theorem image_sub_const_Ioo : (fun x => x - a) '' Ioo b c = Ioo (b - a) (c - a) := by
simp [sub_eq_neg_add]
#align set.image_sub_const_Ioo Set.image_sub_const_Ioo
/-!
### Bijections
-/
theorem Iic_add_bij : BijOn (· + a) (Iic b) (Iic (b + a)) :=
image_add_const_Iic a b ▸ ((add_left_injective _).injOn _).bijOn_image
#align set.Iic_add_bij Set.Iic_add_bij
theorem Iio_add_bij : BijOn (· + a) (Iio b) (Iio (b + a)) :=
image_add_const_Iio a b ▸ ((add_left_injective _).injOn _).bijOn_image
#align set.Iio_add_bij Set.Iio_add_bij
end OrderedAddCommGroup
section LinearOrderedAddCommGroup
variable [LinearOrderedAddCommGroup α] (a b c d : α)
@[simp]
theorem preimage_const_add_uIcc : (fun x => a + x) ⁻¹' [[b, c]] = [[b - a, c - a]] := by
simp only [← Icc_min_max, preimage_const_add_Icc, min_sub_sub_right, max_sub_sub_right]
#align set.preimage_const_add_uIcc Set.preimage_const_add_uIcc
@[simp]
theorem preimage_add_const_uIcc : (fun x => x + a) ⁻¹' [[b, c]] = [[b - a, c - a]] := by
simpa only [add_comm] using preimage_const_add_uIcc a b c
#align set.preimage_add_const_uIcc Set.preimage_add_const_uIcc
-- TODO: Why is the notation `-[[a, b]]` broken?
@[simp]
theorem preimage_neg_uIcc : @Neg.neg (Set α) Set.neg [[a, b]] = [[-a, -b]] := by
simp only [← Icc_min_max, preimage_neg_Icc, min_neg_neg, max_neg_neg]
#align set.preimage_neg_uIcc Set.preimage_neg_uIcc
@[simp]
theorem preimage_sub_const_uIcc : (fun x => x - a) ⁻¹' [[b, c]] = [[b + a, c + a]] := by
simp [sub_eq_add_neg]
#align set.preimage_sub_const_uIcc Set.preimage_sub_const_uIcc
@[simp]
theorem preimage_const_sub_uIcc : (fun x => a - x) ⁻¹' [[b, c]] = [[a - b, a - c]] := by
simp_rw [← Icc_min_max, preimage_const_sub_Icc]
simp only [sub_eq_add_neg, min_add_add_left, max_add_add_left, min_neg_neg, max_neg_neg]
#align set.preimage_const_sub_uIcc Set.preimage_const_sub_uIcc
-- @[simp] -- Porting note: simp can prove this module `add_comm`
theorem image_const_add_uIcc : (fun x => a + x) '' [[b, c]] = [[a + b, a + c]] := by simp [add_comm]
#align set.image_const_add_uIcc Set.image_const_add_uIcc
-- @[simp] -- Porting note: simp can prove this
theorem image_add_const_uIcc : (fun x => x + a) '' [[b, c]] = [[b + a, c + a]] := by simp
#align set.image_add_const_uIcc Set.image_add_const_uIcc
@[simp]
theorem image_const_sub_uIcc : (fun x => a - x) '' [[b, c]] = [[a - b, a - c]] := by
have := image_comp (fun x => a + x) fun x => -x; dsimp [Function.comp] at this
simp [sub_eq_add_neg, this, add_comm]
#align set.image_const_sub_uIcc Set.image_const_sub_uIcc
@[simp]
theorem image_sub_const_uIcc : (fun x => x - a) '' [[b, c]] = [[b - a, c - a]] := by
simp [sub_eq_add_neg, add_comm]
#align set.image_sub_const_uIcc Set.image_sub_const_uIcc
theorem image_neg_uIcc : Neg.neg '' [[a, b]] = [[-a, -b]] := by simp
#align set.image_neg_uIcc Set.image_neg_uIcc
variable {a b c d}
/-- If `[c, d]` is a subinterval of `[a, b]`, then the distance between `c` and `d` is less than or
equal to that of `a` and `b` -/
theorem abs_sub_le_of_uIcc_subset_uIcc (h : [[c, d]] ⊆ [[a, b]]) : |d - c| ≤ |b - a| := by
rw [← max_sub_min_eq_abs, ← max_sub_min_eq_abs]
rw [uIcc_subset_uIcc_iff_le] at h
exact sub_le_sub h.2 h.1
#align set.abs_sub_le_of_uIcc_subset_uIcc Set.abs_sub_le_of_uIcc_subset_uIcc
/-- If `c ∈ [a, b]`, then the distance between `a` and `c` is less than or equal to
that of `a` and `b` -/
theorem abs_sub_left_of_mem_uIcc (h : c ∈ [[a, b]]) : |c - a| ≤ |b - a| :=
abs_sub_le_of_uIcc_subset_uIcc <| uIcc_subset_uIcc_left h
#align set.abs_sub_left_of_mem_uIcc Set.abs_sub_left_of_mem_uIcc
/-- If `x ∈ [a, b]`, then the distance between `c` and `b` is less than or equal to
that of `a` and `b` -/
theorem abs_sub_right_of_mem_uIcc (h : c ∈ [[a, b]]) : |b - c| ≤ |b - a| :=
abs_sub_le_of_uIcc_subset_uIcc <| uIcc_subset_uIcc_right h
#align set.abs_sub_right_of_mem_uIcc Set.abs_sub_right_of_mem_uIcc
end LinearOrderedAddCommGroup
/-!
### Multiplication and inverse in a field
-/
section LinearOrderedField
variable [LinearOrderedField α] {a : α}
@[simp]
@[simp]
theorem preimage_mul_const_Ioi (a : α) {c : α} (h : 0 < c) :
(fun x => x * c) ⁻¹' Ioi a = Ioi (a / c) :=
ext fun _x => (div_lt_iff h).symm
#align set.preimage_mul_const_Ioi Set.preimage_mul_const_Ioi
@[simp]
theorem preimage_mul_const_Iic (a : α) {c : α} (h : 0 < c) :
(fun x => x * c) ⁻¹' Iic a = Iic (a / c) :=
ext fun _x => (le_div_iff h).symm
#align set.preimage_mul_const_Iic Set.preimage_mul_const_Iic
@[simp]
theorem preimage_mul_const_Ici (a : α) {c : α} (h : 0 < c) :
(fun x => x * c) ⁻¹' Ici a = Ici (a / c) :=
ext fun _x => (div_le_iff h).symm
#align set.preimage_mul_const_Ici Set.preimage_mul_const_Ici
@[simp]
theorem preimage_mul_const_Ioo (a b : α) {c : α} (h : 0 < c) :
(fun x => x * c) ⁻¹' Ioo a b = Ioo (a / c) (b / c) := by simp [← Ioi_inter_Iio, h]
#align set.preimage_mul_const_Ioo Set.preimage_mul_const_Ioo
@[simp]
theorem preimage_mul_const_Ioc (a b : α) {c : α} (h : 0 < c) :
(fun x => x * c) ⁻¹' Ioc a b = Ioc (a / c) (b / c) := by simp [← Ioi_inter_Iic, h]
#align set.preimage_mul_const_Ioc Set.preimage_mul_const_Ioc
@[simp]
theorem preimage_mul_const_Ico (a b : α) {c : α} (h : 0 < c) :
(fun x => x * c) ⁻¹' Ico a b = Ico (a / c) (b / c) := by simp [← Ici_inter_Iio, h]
#align set.preimage_mul_const_Ico Set.preimage_mul_const_Ico
@[simp]
theorem preimage_mul_const_Icc (a b : α) {c : α} (h : 0 < c) :
(fun x => x * c) ⁻¹' Icc a b = Icc (a / c) (b / c) := by simp [← Ici_inter_Iic, h]
#align set.preimage_mul_const_Icc Set.preimage_mul_const_Icc
@[simp]
theorem preimage_mul_const_Iio_of_neg (a : α) {c : α} (h : c < 0) :
(fun x => x * c) ⁻¹' Iio a = Ioi (a / c) :=
ext fun _x => (div_lt_iff_of_neg h).symm
#align set.preimage_mul_const_Iio_of_neg Set.preimage_mul_const_Iio_of_neg
@[simp]
theorem preimage_mul_const_Ioi_of_neg (a : α) {c : α} (h : c < 0) :
(fun x => x * c) ⁻¹' Ioi a = Iio (a / c) :=
ext fun _x => (lt_div_iff_of_neg h).symm
#align set.preimage_mul_const_Ioi_of_neg Set.preimage_mul_const_Ioi_of_neg
@[simp]
theorem preimage_mul_const_Iic_of_neg (a : α) {c : α} (h : c < 0) :
(fun x => x * c) ⁻¹' Iic a = Ici (a / c) :=
ext fun _x => (div_le_iff_of_neg h).symm
#align set.preimage_mul_const_Iic_of_neg Set.preimage_mul_const_Iic_of_neg
@[simp]
theorem preimage_mul_const_Ici_of_neg (a : α) {c : α} (h : c < 0) :
(fun x => x * c) ⁻¹' Ici a = Iic (a / c) :=
ext fun _x => (le_div_iff_of_neg h).symm
#align set.preimage_mul_const_Ici_of_neg Set.preimage_mul_const_Ici_of_neg
@[simp]
theorem preimage_mul_const_Ioo_of_neg (a b : α) {c : α} (h : c < 0) :
(fun x => x * c) ⁻¹' Ioo a b = Ioo (b / c) (a / c) := by simp [← Ioi_inter_Iio, h, inter_comm]
#align set.preimage_mul_const_Ioo_of_neg Set.preimage_mul_const_Ioo_of_neg
@[simp]
theorem preimage_mul_const_Ioc_of_neg (a b : α) {c : α} (h : c < 0) :
(fun x => x * c) ⁻¹' Ioc a b = Ico (b / c) (a / c) := by
simp [← Ioi_inter_Iic, ← Ici_inter_Iio, h, inter_comm]
#align set.preimage_mul_const_Ioc_of_neg Set.preimage_mul_const_Ioc_of_neg
@[simp]
theorem preimage_mul_const_Ico_of_neg (a b : α) {c : α} (h : c < 0) :
(fun x => x * c) ⁻¹' Ico a b = Ioc (b / c) (a / c) := by
simp [← Ici_inter_Iio, ← Ioi_inter_Iic, h, inter_comm]
#align set.preimage_mul_const_Ico_of_neg Set.preimage_mul_const_Ico_of_neg
@[simp]
theorem preimage_mul_const_Icc_of_neg (a b : α) {c : α} (h : c < 0) :
(fun x => x * c) ⁻¹' Icc a b = Icc (b / c) (a / c) := by simp [← Ici_inter_Iic, h, inter_comm]
#align set.preimage_mul_const_Icc_of_neg Set.preimage_mul_const_Icc_of_neg
@[simp]
theorem preimage_const_mul_Iio (a : α) {c : α} (h : 0 < c) : (· * ·) c ⁻¹' Iio a = Iio (a / c) :=
ext fun _x => (lt_div_iff' h).symm
#align set.preimage_const_mul_Iio Set.preimage_const_mul_Iio
@[simp]
theorem preimage_const_mul_Ioi (a : α) {c : α} (h : 0 < c) : (· * ·) c ⁻¹' Ioi a = Ioi (a / c) :=
ext fun _x => (div_lt_iff' h).symm
#align set.preimage_const_mul_Ioi Set.preimage_const_mul_Ioi
@[simp]
theorem preimage_const_mul_Iic (a : α) {c : α} (h : 0 < c) : (· * ·) c ⁻¹' Iic a = Iic (a / c) :=
ext fun _x => (le_div_iff' h).symm
#align set.preimage_const_mul_Iic Set.preimage_const_mul_Iic
@[simp]
theorem preimage_const_mul_Ici (a : α) {c : α} (h : 0 < c) : (· * ·) c ⁻¹' Ici a = Ici (a / c) :=
ext fun _x => (div_le_iff' h).symm
#align set.preimage_const_mul_Ici Set.preimage_const_mul_Ici
@[simp]
theorem preimage_const_mul_Ioo (a b : α) {c : α} (h : 0 < c) :
(· * ·) c ⁻¹' Ioo a b = Ioo (a / c) (b / c) := by simp [← Ioi_inter_Iio, h]
#align set.preimage_const_mul_Ioo Set.preimage_const_mul_Ioo
@[simp]
theorem preimage_const_mul_Ioc (a b : α) {c : α} (h : 0 < c) :
(· * ·) c ⁻¹' Ioc a b = Ioc (a / c) (b / c) := by simp [← Ioi_inter_Iic, h]
#align set.preimage_const_mul_Ioc Set.preimage_const_mul_Ioc
@[simp]
theorem preimage_const_mul_Ico (a b : α) {c : α} (h : 0 < c) :
(· * ·) c ⁻¹' Ico a b = Ico (a / c) (b / c) := by simp [← Ici_inter_Iio, h]
#align set.preimage_const_mul_Ico Set.preimage_const_mul_Ico
@[simp]
theorem preimage_const_mul_Icc (a b : α) {c : α} (h : 0 < c) :
(· * ·) c ⁻¹' Icc a b = Icc (a / c) (b / c) := by simp [← Ici_inter_Iic, h]
#align set.preimage_const_mul_Icc Set.preimage_const_mul_Icc
@[simp]
theorem preimage_const_mul_Iio_of_neg (a : α) {c : α} (h : c < 0) :
(· * ·) c ⁻¹' Iio a = Ioi (a / c) := by
simpa only [mul_comm] using preimage_mul_const_Iio_of_neg a h
#align set.preimage_const_mul_Iio_of_neg Set.preimage_const_mul_Iio_of_neg
@[simp]
theorem preimage_const_mul_Ioi_of_neg (a : α) {c : α} (h : c < 0) :
(· * ·) c ⁻¹' Ioi a = Iio (a / c) := by
simpa only [mul_comm] using preimage_mul_const_Ioi_of_neg a h
#align set.preimage_const_mul_Ioi_of_neg Set.preimage_const_mul_Ioi_of_neg
@[simp]
theorem preimage_const_mul_Iic_of_neg (a : α) {c : α} (h : c < 0) :
(· * ·) c ⁻¹' Iic a = Ici (a / c) := by
simpa only [mul_comm] using preimage_mul_const_Iic_of_neg a h
#align set.preimage_const_mul_Iic_of_neg Set.preimage_const_mul_Iic_of_neg
@[simp]
theorem preimage_const_mul_Ici_of_neg (a : α) {c : α} (h : c < 0) :
(· * ·) c ⁻¹' Ici a = Iic (a / c) := by
simpa only [mul_comm] using preimage_mul_const_Ici_of_neg a h
#align set.preimage_const_mul_Ici_of_neg Set.preimage_const_mul_Ici_of_neg
@[simp]
theorem preimage_const_mul_Ioo_of_neg (a b : α) {c : α} (h : c < 0) :
(· * ·) c ⁻¹' Ioo a b = Ioo (b / c) (a / c) := by
simpa only [mul_comm] using preimage_mul_const_Ioo_of_neg a b h
#align set.preimage_const_mul_Ioo_of_neg Set.preimage_const_mul_Ioo_of_neg
@[simp]
theorem preimage_const_mul_Ioc_of_neg (a b : α) {c : α} (h : c < 0) :
(· * ·) c ⁻¹' Ioc a b = Ico (b / c) (a / c) := by
simpa only [mul_comm] using preimage_mul_const_Ioc_of_neg a b h
#align set.preimage_const_mul_Ioc_of_neg Set.preimage_const_mul_Ioc_of_neg
@[simp]
theorem preimage_const_mul_Ico_of_neg (a b : α) {c : α} (h : c < 0) :
(· * ·) c ⁻¹' Ico a b = Ioc (b / c) (a / c) := by
simpa only [mul_comm] using preimage_mul_const_Ico_of_neg a b h
#align set.preimage_const_mul_Ico_of_neg Set.preimage_const_mul_Ico_of_neg
@[simp]
theorem preimage_const_mul_Icc_of_neg (a b : α) {c : α} (h : c < 0) :
(· * ·) c ⁻¹' Icc a b = Icc (b / c) (a / c) := by
simpa only [mul_comm] using preimage_mul_const_Icc_of_neg a b h
#align set.preimage_const_mul_Icc_of_neg Set.preimage_const_mul_Icc_of_neg
@[simp]
theorem preimage_mul_const_uIcc (ha : a ≠ 0) (b c : α) :
(fun x => x * a) ⁻¹' [[b, c]] = [[b / a, c / a]] :=
(lt_or_gt_of_ne ha).elim
(fun h => by
simp [← Icc_min_max, h, h.le, min_div_div_right_of_nonpos, max_div_div_right_of_nonpos])
fun ha : 0 < a => by simp [← Icc_min_max, ha, ha.le, min_div_div_right, max_div_div_right]
#align set.preimage_mul_const_uIcc Set.preimage_mul_const_uIcc
@[simp]
theorem preimage_const_mul_uIcc (ha : a ≠ 0) (b c : α) :
(fun x => a * x) ⁻¹' [[b, c]] = [[b / a, c / a]] := by
simp only [← preimage_mul_const_uIcc ha, mul_comm]
#align set.preimage_const_mul_uIcc Set.preimage_const_mul_uIcc
@[simp]
theorem preimage_div_const_uIcc (ha : a ≠ 0) (b c : α) :
(fun x => x / a) ⁻¹' [[b, c]] = [[b * a, c * a]] := by
simp only [div_eq_mul_inv, preimage_mul_const_uIcc (inv_ne_zero ha), inv_inv]
#align set.preimage_div_const_uIcc Set.preimage_div_const_uIcc
@[simp]
theorem image_mul_const_uIcc (a b c : α) : (fun x => x * a) '' [[b, c]] = [[b * a, c * a]] :=
if ha : a = 0 then by simp [ha]
else calc
(fun x => x * a) '' [[b, c]] = (fun x => x * a⁻¹) ⁻¹' [[b, c]] :=
(Units.mk0 a ha).mulRight.image_eq_preimage _
_ = (fun x => x / a) ⁻¹' [[b, c]] := by simp only [div_eq_mul_inv]
_ = [[b * a, c * a]] := preimage_div_const_uIcc ha _ _
#align set.image_mul_const_uIcc Set.image_mul_const_uIcc
@[simp]
theorem image_const_mul_uIcc (a b c : α) : (fun x => a * x) '' [[b, c]] = [[a * b, a * c]] := by
simpa only [mul_comm] using image_mul_const_uIcc a b c
#align set.image_const_mul_uIcc Set.image_const_mul_uIcc
@[simp]
theorem image_div_const_uIcc (a b c : α) : (fun x => x / a) '' [[b, c]] = [[b / a, c / a]] := by
simp only [div_eq_mul_inv, image_mul_const_uIcc]
#align set.image_div_const_uIcc Set.image_div_const_uIcc
theorem image_mul_right_Icc' (a b : α) {c : α} (h : 0 < c) :
(fun x => x * c) '' Icc a b = Icc (a * c) (b * c) :=
((Units.mk0 c h.ne').mulRight.image_eq_preimage _).trans (by simp [h, division_def])
#align set.image_mul_right_Icc' Set.image_mul_right_Icc'
theorem image_mul_right_Icc {a b c : α} (hab : a ≤ b) (hc : 0 ≤ c) :
(fun x => x * c) '' Icc a b = Icc (a * c) (b * c) := by
cases eq_or_lt_of_le hc
· subst c
simp [(nonempty_Icc.2 hab).image_const]
exact image_mul_right_Icc' a b ‹0 < c›
#align set.image_mul_right_Icc Set.image_mul_right_Icc
theorem image_mul_left_Icc' {a : α} (h : 0 < a) (b c : α) :
(· * ·) a '' Icc b c = Icc (a * b) (a * c) := by
convert image_mul_right_Icc' b c h using 1 <;> simp only [mul_comm _ a]
#align set.image_mul_left_Icc' Set.image_mul_left_Icc'
theorem image_mul_left_Icc {a b c : α} (ha : 0 ≤ a) (hbc : b ≤ c) :
(· * ·) a '' Icc b c = Icc (a * b) (a * c) := by
convert image_mul_right_Icc hbc ha using 1 <;> simp only [mul_comm _ a]
#align set.image_mul_left_Icc Set.image_mul_left_Icc
theorem image_mul_right_Ioo (a b : α) {c : α} (h : 0 < c) :
(fun x => x * c) '' Ioo a b = Ioo (a * c) (b * c) :=
((Units.mk0 c h.ne').mulRight.image_eq_preimage _).trans (by simp [h, division_def])
#align set.image_mul_right_Ioo Set.image_mul_right_Ioo
theorem image_mul_left_Ioo {a : α} (h : 0 < a) (b c : α) :
(· * ·) a '' Ioo b c = Ioo (a * b) (a * c) := by
convert image_mul_right_Ioo b c h using 1 <;> simp only [mul_comm _ a]
#align set.image_mul_left_Ioo Set.image_mul_left_Ioo
/-- The (pre)image under `inv` of `Ioo 0 a` is `Ioi a⁻¹`. -/
theorem inv_Ioo_0_left {a : α} (ha : 0 < a) : (Ioo 0 a)⁻¹ = Ioi a⁻¹ := by
ext x
exact
⟨fun h => inv_inv x ▸ (inv_lt_inv ha h.1).2 h.2, fun h =>
⟨inv_pos.2 <| (inv_pos.2 ha).trans h,
inv_inv a ▸ (inv_lt_inv ((inv_pos.2 ha).trans h) (inv_pos.2 ha)).2 h⟩⟩
#align set.inv_Ioo_0_left Set.inv_Ioo_0_left
theorem inv_Ioi {a : α} (ha : 0 < a) : (Ioi a)⁻¹ = Ioo 0 a⁻¹ := by
rw [inv_eq_iff_eq_inv, inv_Ioo_0_left (inv_pos.2 ha), inv_inv]
#align set.inv_Ioi Set.inv_Ioi
theorem image_const_mul_Ioi_zero {k : Type _} [LinearOrderedField k] {x : k} (hx : 0 < x) :
(fun y => x * y) '' Ioi (0 : k) = Ioi 0 := by
erw [(Units.mk0 x hx.ne').mulLeft.image_eq_preimage, preimage_const_mul_Ioi 0 (inv_pos.mpr hx),
zero_div]
#align set.image_const_mul_Ioi_zero Set.image_const_mul_Ioi_zero
/-!
### Images under `x ↦ a * x + b`
-/
@[simp]
theorem image_affine_Icc' {a : α} (h : 0 < a) (b c d : α) :
(fun x => a * x + b) '' Icc c d = Icc (a * c + b) (a * d + b) := by
suffices (fun x => x + b) '' ((fun x => a * x) '' Icc c d) = Icc (a * c + b) (a * d + b) by
rwa [Set.image_image] at this
rw [image_mul_left_Icc' h, image_add_const_Icc]
#align set.image_affine_Icc' Set.image_affine_Icc'
end LinearOrderedField
end Set
|
If $M$ is a nonempty set of measures on a common $\sigma$-algebra, then the supremum measure of $M$ is the measure that assigns to each measurable set $X$ the supremum of the measures of $X$ over all finite subsets of $M$. |
<center><h1>Elektromagnetska kompatibilnost (<a href='https://nastava.fesb.unist.hr/nastava/predmeti/13207' target='_blank' rel='noopener noreferrer'>FELO21</a>)</h1></center>
<center>~ Akademska godina 2021/2022 ~</center>
<center>Ante Lojić Kapetanović</center>
<center>Fakultet elektrotehnike, strojastva i brodogradnje</center>
<center>Sveučilište u Splitu, Split, Hrvatska</center>
---
### Plan izvedbe
1. laboratorijska vježba - **Uvod u kolegij**
- teoretski pregled
- uvod u programsku okolinu `Jupyter Notebook` i programski jezik `Python 3.x`
- uvod u simulacijski softver `PSpice`
2. laboratorijska vježba - nisko propusni filtri
3. laboratorijska vježba - visoko i pojasno propusni filtri
4. laboratorijska vježba - primjena filtera na stvarnim primjerima
5. laboratorijska vježba - **Pokazna vježba** primjena filtera u inverznoj identifikaciji sustava
6. laboratorijska vježba - **Nadoknada** vježbi
---
### 1. Uvod u kolegij
#### 1.0. Općenito o izvođenju
- Uz uvodnu, ukupno će se održavati 5 laboratorijskih vježbi.
- Za sve naredne vježbe koje će se odvijati u laboratoriju, potrebno je pročitati pripadna upustva, postavljena na [ovoj web stranici](http://adria.fesb.hr/~alojic00/teaching/emc/reading/), prije dolaska na vježbe.
- Ukupno možete izostati s jednih vježbi a nadoknade će se održati po dogovoru u predzadnjem tjednu semestra.
#### 1.1. Teoretski pregled
- Dodatni preporučeni materijal:
1. [Teorija elektromagnetskih polja s primjenama u inženjerstvu](https://shop.skolskaknjiga.hr/teorija-elektromagnetskih-polja-s-primjenama-u-inzenjerstvu.html) - poglavlje 28. Osnove elektromagnetske kompatibilnosti;
2. [Advanced Modeling in Computational Electromagnetic Compatibility](https://onlinelibrary.wiley.com/doi/book/10.1002/0470116889).
Pod pojmom elektromagnetska kompatibilnosti (*electromagnetic compatibility* - EMC) podrazumijeva se sposobnost uređaja da zadovoljavajuće funkcionira unutar elektromagnetskog okruženja:
- uređaj ili sustav ne smije biti ometan vanjskim poljem te ne smije biti izvor smetnji kao što su netolerabilna elektromagnetska ometanja elektroničkog sustava u bliskom okruženju;
- zadovoljavajuće funkcioniranje uređaja ili sustava implicira pravilan rad i imunost na smetenje koje se smatraju uobičajenima u okruženju.
Zadaća EMC-a je potisnuti bilo kakvu vrstu elektromagnetske smetnje (*electromagnetic interference* -EMI) kroz 2 osnovna zahtjeva:
1. testiranje imunosti (*immunity testing*) - nakon realizacije i konstrukcije uređaja, potrebno je ispitati je li uređaj potencijalna žrtva smetenje, odnosno zadovoljava li EMC zahtjev da ga ne ometaju pri radu bilo kakvi vanjski izvori elektromagnetske smetnje proizvedeni u njegovom bliskom okruženju;
2. testiranje emisije (*emission testing*) - tijekom same realizacije i konstrukcije uređaja, potrebno je uvjeriti se da uređaj nije izvor elektromagnetske smetenje za bilo koje druge uređaje ili okolni sustav u bliskom okruženju.
Kako bi se izbjegli troškovi realizacije i konstrukcije već spomenutih elektroničkih uređaja ili sustava, osnovna zadaća **računalne** elektromagnetske kompatibilnosti (naspram do sada spomenute eksperimentalne) je **simulacija** elektromagnetskog ponašanja za vrlo širok opseg parametara - različiti početni i rubni uvjeti, oblici pobude te naposlijetku konfiguracija i geometrija samog sustava. Osnovni računalni EMC model uključuje **izvor smetnje** (npr., radioodašiljač, mobilni telefon, udar munje ili neki drugi tip elektromagnetskog impulsa), **stazu sprege** koja se odnosi na elektromagnetska polja koja se propagiraju u slobodnom prostoru, materijalu ili vodiču i **žrtvu smetnje** (npr., radioprijemnik, medicinska elektronička oprema, ljudsko tijelo).
(4 osnovna mehanizma sprege: konduktivni, kapacitivni, induktivni, i zračenje. ref: https://en.wikipedia.org/wiki/Electromagnetic_compatibility)
Svi EMC računalni modeli proizlaze iz koncepta rigorozne elektromagnetske teroije i temelja zasnovanih na Maxwellovim jednadžbama - prikazane u diferencijalnoj vektorskoj formi kako slijedi:
$$
\begin{align}
\nabla \times \vec E &= - \frac{\partial \vec B}{\partial t}\\
\nabla \times \vec H &= \frac{\partial \vec D}{\partial t} + \vec J\\
\nabla \vec D &= \rho\\
\nabla \vec B &= 0\\
\end{align}
$$
EMC modeli realiziraju se primjenom analitičkih ili numeričkih relacija i metoda. Analitički modeli se koriste za jednostavne geometrije visokog stupnja simetrije (kanonski problemi), dok se numerički modeli koriste za precizne simulacije složenih praktičnih inženjerskih problema koji oključuju realistične scenarije i kompleksne geometrije domene.
Osnovna klasifikacija EMC računalnih modela koji se koriste u istraživanju, ali i u inženjerskoj praksi:
1. modeli teorije krugova zasnovani na konceptu koncentriranih električnih parametara;
2. modeli prijenosnih linija zasnovani na distribuiranim parametrima koji vrijede za spregu s poljima niskih frekvencija;
3. modeli zastnovani na teoriji antena kod kojih se uzimaju u obzir efekti zračenja za rješavanje problema propagacije elektromagnetskih valova.
U svojoj osnovi, svi spomenuti modeli mogu se dodatno klasificirati na:
1. stacionarne probleme (*continuous wave problems*);
2. prijalazne pojave (*transients*).
S obzirom na stazu sprege, smetnje dijelimo na dvije velike grupe:
1. vođene smetnje (npr. inducirani prenaponi, harmonici);
2. zračene smetenje (npr. naponi inducirani udarom munje, zračenjem antenskih sustava, prisluškivanjem).
#### 1.2. Uvod u programsku okolinu `Jupyter Notebook` i programski jezik `Python 3.x`
- bilježnica za vježbe: 01-lab-ex.ipynb
- dodatni preporučeni materijali:
1. [Learn X in Y minutes](https://learnxinyminutes.com/docs/python/)
2. [Whirlwind Tour of Python](https://github.com/jakevdp/WhirlwindTourOfPython)
3. [Getting started with Python for science](https://scipy-lectures.org/intro/index.html)
4. [A Primer on Scientific Programming with Python](https://link.springer.com/book/10.1007%2F978-3-642-54959-5)
#### 1.3. Uvod u simulacijsku aplikaciju `PSpice`
- klikni za skidanje .exe datoteke: [download](http://adria.fesb.hr/~alojic00/teaching/emc/lab/PSpice/91pspstu_PSPICE_9_1.exe)
- instalacija: http://adria.fesb.hr/~alojic00/emc/lab/PSpice/docs/installation_win10.pdf
- tutorial: http://adria.fesb.hr/~alojic00/emc/PSpice/docs/tutorial.pdf
- `PSpice` omogućuje simulaciju ponašanja elektroničkih sklopova na digitalnom računalu i emulira generator signala, te mjernu opremu poput multimetara, osciloskopa ili analizatora frekvencijskog spektra
- 4 osnovna tipa analize elektroničke opreme kroz `PSpice`:
1. *Bias point* analiza u elektromagnetskoj kompatibilnosti; u ovom režimu, simulator računa inicijalne uvjete za početak *DC* ili *AC sweep* analize;
2. *DC sweep* analiza izlaza u lineranoj ili logaritamskoj skali s obzirom na napon podešen na ulazu sklopa;
3. *AC sweep* analiza izlaza u linearnoj ili logaritamskoj skali s obzirom na frekvenciju podešenu kao radnu za promatrani elektronički sklop;
4. tranzijentna analiza koja omogućuju promatranje izlaza kao funkcije vremena s obzirom na parametre sklopa koji se testiraju.
|
State Before: α : Type u
β : Type v
γ : Type ?u.7567
f g : Ultrafilter α
s t : Set α
p q : α → Prop
⊢ sᶜ ∈ f ↔ ¬s ∈ f State After: no goals Tactic: rw [← compl_not_mem_iff, compl_compl] |
Require Import Coq.Lists.List.
Require Import Axioms.
Require Import Tactics.
Require Import Equality.
Require Import Relation.
Require Import Syntax.
Require Import SimpSub.
Require Import Hygiene.
Require Import ContextHygiene.
Require Import Dynamic.
Require Import Reduction.
Require Import Equivalence.
Require Import Ofe.
Require Import Sequence.
Require Import Promote.
Arguments rw_nil {object}.
Arguments rw_cons {object i a}.
Arguments reducer {object a}.
Definition map_operator {A B : Type} (f : A -> B) (a : list nat) (th : operator A a)
: operator B a
:=
match th with
| oper_ext _ x => oper_ext _ (f x)
| oper_extt _ x => oper_extt _ (f x)
| oper_univ _ => oper_univ _
| oper_cty _ => oper_cty _
| oper_con _ => oper_con _
| oper_karrow _ => oper_karrow _
| oper_arrow _ => oper_arrow _
| oper_pi _ => oper_pi _
| oper_clam _ => oper_clam _
| oper_capp _ => oper_capp _
| oper_ctlam _ => oper_ctlam _
| oper_ctapp _ => oper_ctapp _
| oper_lam _ => oper_lam _
| oper_app _ => oper_app _
| oper_intersect _ => oper_intersect _
| oper_fut _ => oper_fut _
| oper_cnext _ => oper_cnext _
| oper_cprev _ => oper_cprev _
| oper_next _ => oper_next _
| oper_prev _ => oper_prev _
| oper_rec _ => oper_rec _
| oper_equal _ => oper_equal _
| oper_triv _ => oper_triv _
| oper_eqtype _ => oper_eqtype _
| oper_subtype _ => oper_subtype _
| oper_kuniv _ => oper_kuniv _
| oper_all _ => oper_all _
| oper_alltp _ => oper_alltp _
| oper_exist _ => oper_exist _
| oper_mu _ => oper_mu _
| oper_ispositive _ => oper_ispositive _
| oper_isnegative _ => oper_isnegative _
| oper_voidtp _ => oper_voidtp _
| oper_unittp _ => oper_unittp _
| oper_cunit _ => oper_cunit _
| oper_booltp _ => oper_booltp _
| oper_btrue _ => oper_btrue _
| oper_bfalse _ => oper_bfalse _
| oper_bite _ => oper_bite _
| oper_prod _ => oper_prod _
| oper_sigma _ => oper_sigma _
| oper_cpair _ => oper_cpair _
| oper_cpi1 _ => oper_cpi1 _
| oper_cpi2 _ => oper_cpi2 _
| oper_ppair _ => oper_ppair _
| oper_ppi1 _ => oper_ppi1 _
| oper_ppi2 _ => oper_ppi2 _
| oper_set _ => oper_set _
| oper_quotient _ => oper_quotient _
| oper_guard _ => oper_guard _
| oper_wt _ => oper_wt _
end.
Arguments map_operator {A B} f {a}.
Fixpoint map_term {A B : Type} (f : A -> B) (m : term A) {struct m} : @term B
:=
(match m with
| var j => var j
| oper a th r => oper a (map_operator f th) (map_row f a r)
end)
with map_row {A B : Type} (f : A -> B) (a : list nat) (r : row _ a) {struct r} : @row B a
:=
match r
in @row _ a
return @row B a
with
| rw_nil => rw_nil
| @rw_cons _ i a m r => @rw_cons _ i a (map_term f m) (map_row f a r)
end.
Arguments map_row {A B} f {a}.
Lemma map_ext :
forall A B (f : A -> B) (x : A),
map_term f (ext x) = ext (f x).
Proof.
auto.
Qed.
Lemma map_extt :
forall A B (f : A -> B) (x : A),
map_term f (extt x) = extt (f x).
Proof.
auto.
Qed.
Lemma map_var :
forall A B (f : A -> B) i, map_term f (var i) = var i.
Proof.
auto.
Qed.
Lemma map_univ :
forall A B (f : A -> B) m, map_term f (univ m) = univ (map_term f m).
Proof.
auto.
Qed.
Lemma map_cty :
forall A B (f : A -> B) m, map_term f (cty m) = cty (map_term f m).
Proof.
auto.
Qed.
Lemma map_con :
forall A B (f : A -> B) m1 m2, map_term f (con m1 m2) = con (map_term f m1) (map_term f m2).
Proof.
auto.
Qed.
Lemma map_karrow :
forall A B (f : A -> B) m1 m2, map_term f (karrow m1 m2) = karrow (map_term f m1) (map_term f m2).
Proof.
auto.
Qed.
Lemma map_arrow :
forall A B (f : A -> B) m1 m2, map_term f (arrow m1 m2) = arrow (map_term f m1) (map_term f m2).
Proof.
auto.
Qed.
Lemma map_pi :
forall A B (f : A -> B) m1 m2, map_term f (pi m1 m2) = pi (map_term f m1) (map_term f m2).
Proof.
auto.
Qed.
Lemma map_clam :
forall A B (f : A -> B) k a, map_term f (clam k a) = clam (map_term f k) (map_term f a).
Proof.
auto.
Qed.
Lemma map_capp :
forall A B (f : A -> B) m1 m2, map_term f (capp m1 m2) = capp (map_term f m1) (map_term f m2).
Proof.
auto.
Qed.
Lemma map_ctlam :
forall A B (f : A -> B) a b k, map_term f (ctlam a b k) = ctlam (map_term f a) (map_term f b) (map_term f k).
Proof.
auto.
Qed.
Lemma map_ctapp :
forall A B (f : A -> B) m1 m2, map_term f (ctapp m1 m2) = ctapp (map_term f m1) (map_term f m2).
Proof.
auto.
Qed.
Lemma map_lam :
forall A B (f : A -> B) m, map_term f (lam m) = lam (map_term f m).
Proof.
auto.
Qed.
Lemma map_app :
forall A B (f : A -> B) m1 m2, map_term f (app m1 m2) = app (map_term f m1) (map_term f m2).
Proof.
auto.
Qed.
Lemma map_intersect :
forall A B (f : A -> B) m1 m2, map_term f (intersect m1 m2) = intersect (map_term f m1) (map_term f m2).
Proof.
auto.
Qed.
Lemma map_fut :
forall A B (f : A -> B) k, map_term f (fut k) = fut (map_term f k).
Proof.
auto.
Qed.
Lemma map_cnext :
forall A B (f : A -> B) a, map_term f (cnext a) = cnext (map_term f a).
Proof.
auto.
Qed.
Lemma map_cprev :
forall A B (f : A -> B) a, map_term f (cprev a) = cprev (map_term f a).
Proof.
auto.
Qed.
Lemma map_next :
forall A B (f : A -> B) a, map_term f (next a) = next (map_term f a).
Proof.
auto.
Qed.
Lemma map_prev :
forall A B (f : A -> B) a, map_term f (prev a) = prev (map_term f a).
Proof.
auto.
Qed.
Lemma map_rec :
forall A B (f : A -> B) k, map_term f (rec k) = rec (map_term f k).
Proof.
auto.
Qed.
Lemma map_equal :
forall A B (f : A -> B) a m n, map_term f (equal a m n) = equal (map_term f a) (map_term f m) (map_term f n).
Proof.
auto.
Qed.
Lemma map_triv :
forall A B (f : A -> B), map_term f triv = triv.
Proof.
auto.
Qed.
Lemma map_eqtype :
forall A B (f : A -> B) m1 m2, map_term f (eqtype m1 m2) = eqtype (map_term f m1) (map_term f m2).
Proof.
auto.
Qed.
Lemma map_subtype :
forall A B (f : A -> B) m1 m2, map_term f (subtype m1 m2) = subtype (map_term f m1) (map_term f m2).
Proof.
auto.
Qed.
Lemma map_kuniv :
forall A B (f : A -> B) m, map_term f (kuniv m) = kuniv (map_term f m).
Proof.
auto.
Qed.
Lemma map_all :
forall A B (f : A -> B) m1 m2 m3, map_term f (all m1 m2 m3) = all (map_term f m1) (map_term f m2) (map_term f m3).
Proof.
auto.
Qed.
Lemma map_alltp :
forall A B (f : A -> B) m1, map_term f (alltp m1) = alltp (map_term f m1).
Proof.
auto.
Qed.
Lemma map_exist :
forall A B (f : A -> B) m1 m2 m3, map_term f (exist m1 m2 m3) = exist (map_term f m1) (map_term f m2) (map_term f m3).
Proof.
auto.
Qed.
Lemma map_mu :
forall A B (f : A -> B) m, map_term f (mu m) = mu (map_term f m).
Proof.
auto.
Qed.
Lemma map_ispositive :
forall A B (f : A -> B) m, map_term f (ispositive m) = ispositive (map_term f m).
Proof.
auto.
Qed.
Lemma map_isnegative :
forall A B (f : A -> B) m, map_term f (isnegative m) = isnegative (map_term f m).
Proof.
auto.
Qed.
Lemma map_voidtp :
forall A B (f : A -> B), map_term f voidtp = voidtp.
Proof.
auto.
Qed.
Lemma map_unittp :
forall A B (f : A -> B), map_term f unittp = unittp.
Proof.
auto.
Qed.
Lemma map_cunit :
forall A B (f : A -> B), map_term f cunit = cunit.
Proof.
auto.
Qed.
Lemma map_booltp :
forall A B (f : A -> B), map_term f booltp = booltp.
Proof.
auto.
Qed.
Lemma map_btrue :
forall A B (f : A -> B), map_term f btrue = btrue.
Proof.
auto.
Qed.
Lemma map_bfalse :
forall A B (f : A -> B), map_term f bfalse = bfalse.
Proof.
auto.
Qed.
Lemma map_bite :
forall A B (f : A -> B) a m n, map_term f (bite a m n) = bite (map_term f a) (map_term f m) (map_term f n).
Proof.
auto.
Qed.
Lemma map_prod :
forall A B (f : A -> B) m1 m2, map_term f (prod m1 m2) = prod (map_term f m1) (map_term f m2).
Proof.
auto.
Qed.
Lemma map_sigma :
forall A B (f : A -> B) m1 m2, map_term f (sigma m1 m2) = sigma (map_term f m1) (map_term f m2).
Proof.
auto.
Qed.
Lemma map_cpair :
forall A B (f : A -> B) m1 m2, map_term f (cpair m1 m2) = cpair (map_term f m1) (map_term f m2).
Proof.
auto.
Qed.
Lemma map_cpi1 :
forall A B (f : A -> B) a, map_term f (cpi1 a) = cpi1 (map_term f a).
Proof.
auto.
Qed.
Lemma map_cpi2 :
forall A B (f : A -> B) a, map_term f (cpi2 a) = cpi2 (map_term f a).
Proof.
auto.
Qed.
Lemma map_ppair :
forall A B (f : A -> B) m1 m2, map_term f (ppair m1 m2) = ppair (map_term f m1) (map_term f m2).
Proof.
auto.
Qed.
Lemma map_ppi1 :
forall A B (f : A -> B) a, map_term f (ppi1 a) = ppi1 (map_term f a).
Proof.
auto.
Qed.
Lemma map_ppi2 :
forall A B (f : A -> B) a, map_term f (ppi2 a) = ppi2 (map_term f a).
Proof.
auto.
Qed.
Lemma map_set :
forall A B (f : A -> B) m1 m2, map_term f (set m1 m2) = set (map_term f m1) (map_term f m2).
Proof.
auto.
Qed.
Lemma map_quotient :
forall A B (f : A -> B) m1 m2, map_term f (quotient m1 m2) = quotient (map_term f m1) (map_term f m2).
Proof.
auto.
Qed.
Lemma map_guard :
forall A B (f : A -> B) m1 m2, map_term f (guard m1 m2) = guard (map_term f m1) (map_term f m2).
Proof.
auto.
Qed.
Lemma map_wt :
forall A B (f : A -> B) m1 m2, map_term f (wt m1 m2) = wt (map_term f m1) (map_term f m2).
Proof.
auto.
Qed.
Hint Rewrite map_ext map_extt map_var map_univ map_cty map_con map_karrow map_arrow map_pi map_clam map_capp map_ctlam map_ctapp map_lam map_app map_intersect map_fut map_cnext map_cprev map_next map_prev map_rec map_equal map_triv map_eqtype map_subtype map_kuniv map_all map_alltp map_exist map_mu map_ispositive map_isnegative map_voidtp map_unittp map_cunit map_booltp map_btrue map_bfalse map_bite map_prod map_sigma map_cpair map_cpi1 map_cpi2 map_ppair map_ppi1 map_ppi2 map_wt map_set map_quotient map_guard : map.
Lemma map_sumbool :
forall A B (f : A -> B) P Q (c : {P} + {Q}) m n,
map_term f (if c then m else n) = if c then map_term f m else map_term f n.
Proof.
intros A B f P Q c m n.
destruct c; auto.
Qed.
Hint Rewrite map_sumbool : map.
Fixpoint map_sub {A B : Type} (f : A -> B) (s : @sub A) {struct s} : @sub B
:=
match s with
| dot m s' => dot (map_term f m) (map_sub f s')
| sh i => sh i
end.
Lemma map_dot :
forall A B (f : A -> B) m s,
map_sub f (dot m s) = dot (map_term f m) (map_sub f s).
Proof.
auto.
Qed.
Lemma map_sh :
forall A B (f : A -> B) i,
map_sub f (sh i) = sh i.
Proof.
auto.
Qed.
Lemma map_id :
forall A B (f : A -> B),
map_sub f id = id.
Proof.
auto.
Qed.
Lemma map_project :
forall A B (f : A -> B) s i,
map_term f (project s i) = project (map_sub f s) i.
Proof.
intros A B f s i.
revert s.
induct i.
(* 0 *)
{
intros s.
case s; clear s.
{
intros m s.
simpsub.
rewrite -> map_dot.
simpsub.
reflexivity.
}
{
intro i.
simpsub.
rewrite -> map_sh.
simpsub.
rewrite -> map_var.
reflexivity.
}
}
(* S *)
{
intros n IH s.
case s; clear s.
{
intros m s.
simpsub.
rewrite -> map_dot.
simpsub.
apply IH.
}
{
intro i.
simpsub.
rewrite -> map_sh.
simpsub.
rewrite -> map_var.
reflexivity.
}
}
Qed.
Lemma map_traverse_and_row :
forall A B (f : A -> B) (resolve : nat -> nat -> term A),
(forall i m,
map_term f (traverse A resolve i m)
=
traverse B (fun i j => map_term f (resolve i j)) i (map_term f m))
/\
(forall i a r,
map_row f (traverse_row A resolve i a r)
=
traverse_row B (fun i j => map_term f (resolve i j)) i a (map_row f r)).
Proof.
intros A B f resolve.
exploit
(syntax_ind A
(fun m =>
forall i,
map_term f (traverse A resolve i m)
=
traverse B (fun i j => map_term f (resolve i j)) i (map_term f m))
(fun a r =>
forall i,
map_row f (traverse_row A resolve i a r)
=
traverse_row B (fun i j => map_term f (resolve i j)) i a (map_row f r))) as Hprop;
intros; cbn; f_equal; eauto.
cbn in Hprop.
destruct Hprop.
split; intros; eauto.
Qed.
Lemma map_traverse :
forall A B (f : A -> B) (resolve : nat -> nat -> term A) i m,
map_term f (traverse A resolve i m)
=
traverse B (fun i j => map_term f (resolve i j)) i (map_term f m).
Proof.
intros A B f resolve.
exact (map_traverse_and_row A B f resolve andel).
Qed.
Lemma map_shift :
forall A B (f : A -> B) n (m : term A),
map_term f (shift n m)
=
shift n (map_term f m).
Proof.
intros A B f n m.
unfold shift.
etransitivity.
{
apply map_traverse.
}
f_equal.
fextensionality 2.
intros i j.
set (X := Compare_dec.lt_dec j i).
destruct X; auto.
Qed.
Lemma map_subst :
forall A B (f : A -> B) (s : @sub A) (m : term A),
map_term f (subst s m)
=
subst (map_sub f s) (map_term f m).
Proof.
intros A B f s m.
unfold subst.
etransitivity.
{
apply map_traverse.
}
f_equal.
fextensionality 2.
intros i j.
set (X := Compare_dec.lt_dec j i).
destruct X; auto.
rewrite -> map_shift.
rewrite -> map_project.
reflexivity.
Qed.
Lemma map_trunc :
forall A B (f : A -> B) n (s : @sub A),
map_sub f (trunc n s) = trunc n (map_sub f s).
Proof.
intros A B f n s.
revert s.
induct n; auto.
(* S *)
intros n IH s.
cbn.
destruct s as [m s | i]; auto.
rewrite -> map_dot.
apply IH.
Qed.
Lemma map_compose :
forall A B (f : A -> B) s1 s2,
map_sub f (compose s1 s2) = compose (map_sub f s1) (map_sub f s2).
Proof.
intros A B f s1 s2.
revert s2.
induct s1.
(* dot *)
{
intros m s1 IH s2.
simpsub.
rewrite -> !map_dot.
simpsub.
rewrite -> IH.
rewrite -> map_subst.
reflexivity.
}
(* sh *)
{
intros n s2.
cbn.
apply map_trunc.
}
Qed.
Lemma map_under :
forall A B (f : A -> B) i s,
map_sub f (under i s) = under i (map_sub f s).
Proof.
intros A B f i s.
induct i.
(* 0 *)
{
simpsub.
reflexivity.
}
(* S *)
{
intros n IH.
rewrite -> !under_succ.
rewrite -> map_dot.
rewrite -> map_var.
rewrite -> map_compose.
rewrite -> IH.
rewrite -> map_sh.
reflexivity.
}
Qed.
Lemma map_subst1 :
forall A B (f : A -> B) (m n : term A),
map_term f (subst1 m n)
=
subst1 (map_term f m) (map_term f n).
Proof.
intros A B f m n.
unfold subst1.
apply map_subst.
Qed.
Lemma map_sh1 :
forall A B (f : A -> B), map_sub f sh1 = sh1.
Proof.
intros A B f.
unfold sh1.
apply map_sh.
Qed.
Hint Rewrite map_dot map_sh map_id map_project map_subst map_compose map_under map_subst1 map_sh1 : map.
Definition map_hyp {A B : Type} (f : A -> B) (h : @hyp A) : @hyp B :=
match h with
| hyp_tpl => hyp_tpl
| hyp_tp => hyp_tp
| hyp_tml m => hyp_tml (map_term f m)
| hyp_tm m => hyp_tm (map_term f m)
| hyp_emp => hyp_emp
end.
Lemma map_tpl :
forall A B (f : A -> B),
map_hyp f hyp_tpl = hyp_tpl.
Proof.
auto.
Qed.
Lemma map_tp :
forall A B (f : A -> B),
map_hyp f hyp_tp = hyp_tp.
Proof.
auto.
Qed.
Lemma map_tml :
forall A B (f : A -> B) m,
map_hyp f (hyp_tml m) = hyp_tml (map_term f m).
Proof.
auto.
Qed.
Lemma map_tm :
forall A B (f : A -> B) m,
map_hyp f (hyp_tm m) = hyp_tm (map_term f m).
Proof.
auto.
Qed.
Lemma map_emp :
forall A B (f : A -> B),
map_hyp f hyp_emp = hyp_emp.
Proof.
auto.
Qed.
Hint Rewrite map_tpl map_tp map_tml map_tm map_emp : map.
Definition map_ctx {A B : Type} (f : A -> B) :=
map (map_hyp f).
Lemma map_nil :
forall A B (f : A -> B),
map_ctx f nil = nil.
Proof.
auto.
Qed.
Lemma map_cons :
forall A B (f : A -> B) h G,
map_ctx f (cons h G) = cons (map_hyp f h) (map_ctx f G).
Proof.
auto.
Qed.
Lemma map_appctx :
forall A B (f : A -> B) G1 G2,
map_ctx f (G2 ++ G1) = map_ctx f G2 ++ map_ctx f G1.
Proof.
intros A B f G1 G2.
induct G2; auto.
intros; cbn.
f_equal; auto.
Qed.
Hint Rewrite map_nil map_cons map_appctx : map.
Lemma length_map_ctx :
forall A B (f : A -> B) (G : @context A),
length (map_ctx f G) = length G.
Proof.
intros A B f G.
induct G; cbn; auto.
Qed.
Lemma map_index :
forall A B (f : A -> B) i G h,
index i G h
-> index i (map_ctx f G) (map_hyp f h).
Proof.
intros A B f i G h H.
induct H.
(* 0 *)
{
intros; apply index_0.
}
(* S *)
{
intros i h' G h _ IH.
cbn.
apply index_S; auto.
}
Qed.
Lemma map_promote_hyp :
forall A B (f : A -> B) h,
map_hyp f (promote_hyp h) = promote_hyp (map_hyp f h).
Proof.
intros A B f h.
induct h; auto.
Qed.
Lemma map_promote :
forall A B (f : A -> B) G,
map_ctx f (promote G) = promote (map_ctx f G).
Proof.
intros A B f G.
induct G; auto.
intros h G IH.
cbn.
f_equal; auto using map_promote_hyp.
Qed.
Hint Rewrite map_promote_hyp map_promote : map.
Definition map_jud {A B : Type} (f : A -> B) (J : @judgement A) : @judgement B :=
match J with
| deq m1 m2 m3 => deq (map_term f m1) (map_term f m2) (map_term f m3)
end.
Lemma map_deq :
forall A B (f : A -> B) m1 m2 m3,
map_jud f (deq m1 m2 m3) = deq (map_term f m1) (map_term f m2) (map_term f m3).
Proof.
auto.
Qed.
Hint Rewrite map_deq : map.
Ltac simpmap :=
autorewrite with map.
Ltac simpmapin H :=
autorewrite with map in H.
Lemma map_substh :
forall A B (f : A -> B) (s : sub) (h : @hyp A),
map_hyp f (substh s h) = substh (map_sub f s) (map_hyp f h).
Proof.
intros A B f s h.
cases h; intros; simpsub; simpmap; auto.
Qed.
Lemma map_substctx :
forall A B (f : A -> B) (s : sub) (G : @context A),
map_ctx f (substctx s G) = substctx (map_sub f s) (map_ctx f G).
Proof.
intros A B f s G.
induct G; auto.
intros h G IH.
cbn.
f_equal; auto.
rewrite -> map_substh.
simpmap.
rewrite -> length_map_ctx.
reflexivity.
Qed.
Lemma map_substj :
forall A B (f : A -> B) (s : sub) (J : @judgement A),
map_jud f (substj s J) = substj (map_sub f s) (map_jud f J).
Proof.
intros A B f s J.
destruct J as [m n a].
simpmap; simpsub.
simpmap.
reflexivity.
Qed.
Hint Rewrite map_substh map_substctx map_substj : map.
Lemma map_hygiene :
forall A B (f : A -> B) P (m : term A),
hygiene P m
-> hygiene P (map_term f m).
Proof.
intros A B f P m Hcl.
induct Hcl
using (fun X => hygiene_mut_ind _ X
(fun P a r => hygiene_row P (map_row f r))).
(* var *)
{
intros P i H.
apply hygiene_var; auto.
}
(* oper *)
{
intros P a th r _ IH.
*cbn.
apply hygiene_oper; auto.
}
(* nil *)
{
intros; apply hygiene_nil.
}
(* cons *)
{
intros P i a m r _ IH1 _ IH2.
cbn.
apply hygiene_cons; auto.
}
Qed.
Lemma map_hygiene_conv :
forall A B (f : A -> B) P (m : term A),
hygiene P (map_term f m)
-> hygiene P m.
Proof.
intros A B f P m Hcl.
remember (map_term f m) as m' eqn:Heq.
revert m Heq.
induct Hcl
using (fun X => hygiene_mut_ind _ X
(fun P a r' => forall r, r' = map_row f r -> hygiene_row P r)).
Proof.
(* var *)
{
intros P i H m.
cases m.
2:{
intros; discriminate.
}
intros ? Heq.
cbn in Heq.
injection Heq.
intros <-.
apply hygiene_var; auto.
}
(* oper *)
{
intros P a' th' r' _ IH m.
cases m.
{
intros; discriminate.
}
intros a th r Heq.
cbn in Heq.
injection Heq.
intros H1 H2 <-.
injectionT H1.
intros ->.
injectionT H2.
intros ->.
apply hygiene_oper; auto.
}
(* nil *)
{
intros P r.
cases r.
intros _.
apply hygiene_nil.
}
(* cons *)
{
intros P i' a' m' r' _ IH1 _ IH2 r.
cases r.
intros i a m r Heq Heq'.
injection Heq.
intros <- <-.
so (proof_irrelevance _ Heq (eq_refl _)); subst Heq.
cbn in Heq'.
injection Heq'.
intros H ->.
injectionT H.
intros ->.
apply hygiene_cons; auto.
}
Qed.
Lemma map_hygieneh :
forall A B (f : A -> B) P (h : @hyp A),
hygieneh P h
-> hygieneh P (map_hyp f h).
Proof.
intros A B f P h H.
cases H; intros; simpmap;
[apply hygieneh_tpl | apply hygieneh_tp | apply hygieneh_tml | apply hygieneh_tm | apply hygieneh_emp]; auto using map_hygiene.
Qed.
Lemma map_hygieneh_conv :
forall A B (f : A -> B) P (h : @hyp A),
hygieneh P (map_hyp f h)
-> hygieneh P h.
Proof.
intros A B f P h H.
revert H.
cases h; intros; [apply hygieneh_tpl | apply hygieneh_tp | apply hygieneh_tml | apply hygieneh_tm | apply hygieneh_emp].
{
simpmapin H.
invert H.
eauto using map_hygiene_conv.
}
{
simpmapin H.
invert H.
eauto using map_hygiene_conv.
}
Qed.
Lemma map_term_sh1_under_form :
forall A B i (f : A -> B) (m : term A) (n : term B),
map_term f m = subst (under i sh1) n
-> exists m',
m = subst (under i sh1) m'
/\ n = map_term f m'.
Proof.
intros A B i f m n Heq.
assert (hygiene (fun j => j <> i) (subst (under i sh1) n)) as Hhyg.
{
eapply hygiene_shift_under'.
refine (hygiene_weaken _#4 _ (hygiene_okay _ _)).
intros x _.
omega.
}
rewrite <- Heq in Hhyg.
so (map_hygiene_conv _#5 Hhyg) as Hhyg'.
so (subst_into_absent_single _#3 unittp Hhyg') as Heq'.
simpsubin Heq'.
exists (subst (under i (dot unittp id)) m).
split.
{
rewrite <- subst_compose.
rewrite <- compose_under.
simpsub.
auto.
}
{
so (f_equal (fun z => subst (under i (dot unittp id)) z) Heq) as Heq''.
cbn in Heq''.
rewrite <- subst_compose in Heq''.
rewrite <- compose_under in Heq''.
simpsubin Heq''.
rewrite <- Heq''.
rewrite -> map_subst.
simpmap.
reflexivity.
}
Qed.
Lemma map_term_sh1_form :
forall A B (f : A -> B) (m : term A) (n : term B),
map_term f m = subst sh1 n
-> exists m',
m = subst sh1 m'
/\ n = map_term f m'.
Proof.
intros A B f m n H.
apply (map_term_sh1_under_form A B 0 f m n); auto.
Qed.
Lemma map_term_sh_form :
forall A B i (f : A -> B) (m : term A) (n : term B),
map_term f m = subst (sh i) n
-> exists m',
m = subst (sh i) m'
/\ n = map_term f m'.
Proof.
intros A B i f m n H.
revert m n H.
induct i.
(* 0 *)
{
intros m n H.
simpsubin H.
subst n.
exists m.
split; auto.
simpsub; auto.
}
(* S *)
{
intros i IH m n Heq.
replace (S i) with (i + 1) in Heq by omega.
rewrite <- compose_sh_sh in Heq.
rewrite -> subst_compose in Heq.
so (map_term_sh1_form _#5 Heq) as (m' & -> & Heq').
symmetry in Heq'.
so (IH _ _ Heq') as (m'' & -> & Heq'').
exists m''.
split; auto.
simpsub.
replace (i + 1) with (S i) by omega.
reflexivity.
}
Qed.
Definition inverses {A B : Type} (f : B -> A) (g : A -> B) : Prop
:=
forall x, f (g x) = x.
Definition injective {A B : Type} (f : A -> B) : Prop
:=
forall x y, f x = f y -> x = y.
Lemma inverses_impl_injective :
forall A B f g,
@inverses A B f g
-> injective g.
Proof.
intros A B f g Hinv x y Heq.
so (f_equal f Heq) as Heq'.
rewrite -> !Hinv in Heq'.
auto.
Qed.
Lemma map_operator_inv :
forall A B f g,
@inverses A B f g
-> forall a, inverses (@map_operator _ _ f a) (@map_operator _ _ g a).
Proof.
intros A B f g Hinv a th.
cases th; try (intros; auto; done).
(* ext *)
{
intros a.
cbn.
rewrite -> Hinv.
reflexivity.
}
(* extt *)
{
intros a.
cbn.
rewrite -> Hinv.
reflexivity.
}
Qed.
Lemma map_operator_inj :
forall A B (f : A -> B),
injective f
-> forall a, injective (@map_operator _ _ f a).
Proof.
intros A B f Hinj a th th' Heq.
set (a' := a) in th'.
assert (eq_dep _ (operator B) a (map_operator f th) a' (@map_operator _ _ f a' th')) as Heq'.
{
apply eq_impl_eq_dep_snd; auto.
}
cut (eq_dep _ (operator A) a th a' th').
{
apply eq_dep_impl_eq_snd; auto.
}
renameover Heq' into Heq.
assert (a = a') as Heqa by reflexivity.
clearbody a'.
revert Heqa Heq.
cases th; cases th';
try (intros; discriminate Heqa);
try (intros; so (eq_dep_impl_eq_snd _#5 Heq) as Heqth; discriminate Heqth);
try (intros; apply eq_dep_refl; done);
try (intros; apply eq_impl_eq_dep_snd; f_equal; cbn in Heq; injection (eq_dep_impl_eq_snd _#5 Heq); auto; done).
Qed.
Lemma map_term_inv :
forall A B f g,
@inverses A B f g
-> inverses (map_term f) (map_term g).
Proof.
intros A B f g Hinv m.
induct m using
(fun z => term_mut_ind _ z
(fun a r => map_row f (map_row g r) = r)); auto.
(* oper *)
{
intros a th r IH.
cbn.
rewrite -> map_operator_inv; auto.
rewrite -> IH.
reflexivity.
}
(* cons *)
{
intros i a m IH1 r IH2.
cbn.
rewrite -> IH2.
rewrite -> IH1.
reflexivity.
}
Qed.
Lemma map_term_inj :
forall A B (f : A -> B),
injective f
-> injective (map_term f).
Proof.
intros A B f Hinj m n Heq.
revert n Heq.
induct m using
(fun z => term_mut_ind _ z
(fun a r => forall s, map_row f r = map_row f s -> r = s)).
(* var *)
{
intros i n Heq.
destruct n as [j |]; cbn in Heq; [| discriminate Heq].
injection Heq.
auto.
}
(* oper *)
{
intros a th r IH n Heq.
destruct n as [| a' th' r']; cbn in Heq; [discriminate Heq |].
injection Heq.
intros H1 H2 <-.
injectionT H1.
injectionT H2.
intros Heqth Heqr.
f_equal.
{
eapply map_operator_inj; eauto.
}
apply IH; auto.
}
(* nil *)
{
intros s H.
so (row_nil_invert _ s); subst s.
reflexivity.
}
(* cons *)
{
intros i a m IH1 r IH2 s Heq.
so (row_cons_invert _#3 s) as (m' & r' & ->).
cbn in Heq.
injectionc Heq.
intros H Heqm.
injectionT H.
intros Heqr.
f_equal; auto.
}
Qed.
Lemma map_reduce :
forall A B (f : A -> B) (m n : term A),
reduce m n
-> reduce (map_term f m) (map_term f n).
Proof.
intros A B f m n Hmn.
induct Hmn using
(fun z => reduce_mut_ind _ z
(fun a r s => reducer (map_row f r) (map_row f s)));
try (intros; cbn; eauto using reduce_var, reduce_oper, reducer_nil, reducer_cons; done).
(* tapp_beta *)
{
intros m m' n n' _ IH1 _ IH2.
simpmap.
apply reduce_app_beta; auto.
}
(* prev_beta *)
{
intros m m' _ IH.
simpmap.
apply reduce_prev_beta; auto.
}
(* bite_beta1 *)
{
intros n n' p _ IH.
simpmap.
apply reduce_bite_beta1; auto.
}
(* bite_beta2 *)
{
intros n n' p _ IH.
simpmap.
apply reduce_bite_beta2; auto.
}
(* ppi1_beta *)
{
intros m m' n _ IH.
simpmap.
apply reduce_ppi1_beta; auto.
}
(* ppi2_beta *)
{
intros m m' n _ IH.
simpmap.
apply reduce_ppi2_beta; auto.
}
Qed.
Lemma map_reduces :
forall A B (f : A -> B) (m n : term A),
star reduce m n
-> star reduce (map_term f m) (map_term f n).
Proof.
intros A B f m n H.
eapply star_map; eauto using map_reduce.
Qed.
Lemma map_step :
forall A B (f : A -> B) (m n : term A),
step m n
-> step (map_term f m) (map_term f n).
Proof.
intros A B f m n Hmn.
induct Hmn;
try (intros; simpmap; eauto using step_app1, step_app2, step_prev1, step_prev2, step_bite1, step_bite2, step_bite3, step_ppi11, step_ppi12, step_ppi21, step_ppi22; done).
Qed.
Lemma map_steps :
forall A B (f : A -> B) (m n : term A),
star step m n
-> star step (map_term f m) (map_term f n).
Proof.
intros A B f m n H.
eapply star_map; eauto using map_step.
Qed.
Lemma map_eq_oper_invert :
forall A B (f : A -> B) m a th r,
map_term f m = oper a th r
-> exists th' r',
m = oper a th' r'
/\ map_operator f th' = th
/\ map_row f r' = r.
Proof.
intros A B f m a th r Heq.
destruct m as [n | a' th' r'].
{
cbn in Heq.
discriminate Heq.
}
cbn in Heq.
injection Heq.
intros Heqr Heqth ->.
exists th', r'.
so (existT_injection_2 _#5 Heqth).
subst th.
so (existT_injection_2 _#5 Heqr).
subst r.
auto.
Qed.
Lemma map_operator_same :
forall A B (f : A -> B) a (th : operator A a) (th' : operator B a),
map_operator f th = th'
-> same_operator a a th th'.
Proof.
intros A B f a th' th Heqth.
revert th Heqth.
induct th';
try (intros;
so (eq_impl_eq_dep _#6 (eq_refl _) Heqth) as Heq;
clear Heqth;
revert Heq;
induct th;
try (intros;
injectionT Heq;
intros; discriminate);
intros;
eauto with same_operator;
so (eq_dep_impl_eq_snd _#5 Heq) as Heq';
cbn in Heq';
injection Heq';
intros; subst;
eauto with same_operator;
done).
Qed.
Lemma map_eq_lam_invert :
forall A B (f : A -> B) m l,
map_term f m = lam l
-> exists l',
m = lam l'
/\ map_term f l' = l.
Proof.
intros A B f m l Heq.
so (map_eq_oper_invert _#7 Heq) as (th & r & -> & Heqth & Heqr).
so (row_cons_invert _ 1 nil r) as (l' & r' & ->).
so (row_nil_invert _ r'); subst r'.
exists l'.
split.
{
unfold lam.
f_equal.
so (map_operator_same _#6 Heqth) as H.
invert H.
auto.
}
{
cbn in Heqr.
injection Heqr.
auto.
}
Qed.
Lemma map_eq_next_invert :
forall A B (f : A -> B) m n,
map_term f m = next n
-> exists n',
m = next n'
/\ map_term f n' = n.
Proof.
intros A B f m n Heq.
so (map_eq_oper_invert _#7 Heq) as (th & r & -> & Heqth & Heqr).
so (row_invert_auto _ _ r) as H; cbn in H.
destruct H as (n' & ->).
exists n'.
split.
{
unfold next.
f_equal.
so (map_operator_same _#6 Heqth) as H.
invert H.
auto.
}
{
cbn in Heqr.
injection Heqr.
auto.
}
Qed.
Lemma map_eq_prev_invert :
forall A B (f : A -> B) m n,
map_term f m = prev n
-> exists n',
m = prev n'
/\ map_term f n' = n.
Proof.
intros A B f m n Heq.
so (map_eq_oper_invert _#7 Heq) as (th & r & -> & Heqth & Heqr).
so (row_invert_auto _ _ r) as H; cbn in H.
destruct H as (n' & ->).
exists n'.
split.
{
unfold prev.
f_equal.
so (map_operator_same _#6 Heqth) as H.
invert H.
auto.
}
{
cbn in Heqr.
injection Heqr.
auto.
}
Qed.
Lemma map_eq_triv_invert :
forall A B (f : A -> B) m,
map_term f m = triv
-> m = triv.
Proof.
intros A B f m Heq.
so (map_eq_oper_invert _#7 Heq) as (th & r & -> & Heqth & _).
unfold triv.
f_equal.
2:{
so (row_nil_invert _ r); subst r.
reflexivity.
}
so (map_operator_same _#6 Heqth) as H.
invert H.
intros <-.
reflexivity.
Qed.
Lemma map_eq_bite_invert :
forall A B (f : A -> B) m n p q,
map_term f m = bite n p q
-> exists n' p' q',
m = bite n' p' q'
/\ map_term f n' = n
/\ map_term f p' = p
/\ map_term f q' = q.
Proof.
intros A B f m n p q Heq.
so (map_eq_oper_invert _#7 Heq) as (th & r & -> & Heqth & Heqr).
so (row_invert_auto _ _ r) as H; cbn in H.
destruct H as (n' & p' & q' & ->).
exists n', p', q'.
split.
{
unfold next.
f_equal.
so (map_operator_same _#6 Heqth) as H.
invert H.
intros <-.
auto.
}
{
cbn in Heqr.
injection Heqr.
auto.
}
Qed.
Lemma map_eq_btrue_invert :
forall A B (f : A -> B) m,
map_term f m = btrue
-> m = btrue.
Proof.
intros A B f m Heq.
so (map_eq_oper_invert _#7 Heq) as (th & r & -> & Heqth & _).
unfold btrue.
f_equal.
2:{
so (row_nil_invert _ r); subst r.
reflexivity.
}
so (map_operator_same _#6 Heqth) as H.
invert H.
intros <-.
reflexivity.
Qed.
Lemma map_eq_bfalse_invert :
forall A B (f : A -> B) m,
map_term f m = bfalse
-> m = bfalse.
Proof.
intros A B f m Heq.
so (map_eq_oper_invert _#7 Heq) as (th & r & -> & Heqth & _).
unfold bfalse.
f_equal.
2:{
so (row_nil_invert _ r); subst r.
reflexivity.
}
so (map_operator_same _#6 Heqth) as H.
invert H.
intros <-.
reflexivity.
Qed.
Lemma map_eq_ppair_invert :
forall A B (f : A -> B) m n p,
map_term f m = ppair n p
-> exists n' p',
m = ppair n' p'
/\ map_term f n' = n
/\ map_term f p' = p.
Proof.
intros A B f m n p Heq.
so (map_eq_oper_invert _#7 Heq) as (th & r & -> & Heqth & Heqr).
so (row_invert_auto _ _ r) as H; cbn in H.
destruct H as (n' & p' & ->).
exists n', p'.
split.
{
unfold next.
f_equal.
so (map_operator_same _#6 Heqth) as H.
invert H.
intros <-.
auto.
}
{
cbn in Heqr.
injection Heqr.
auto.
}
Qed.
Lemma map_eq_ppi1_invert :
forall A B (f : A -> B) m n,
map_term f m = ppi1 n
-> exists n',
m = ppi1 n'
/\ map_term f n' = n.
Proof.
intros A B f m n Heq.
so (map_eq_oper_invert _#7 Heq) as (th & r & -> & Heqth & Heqr).
so (row_invert_auto _ _ r) as H; cbn in H.
destruct H as (n' & ->).
exists n'.
split.
{
unfold ppi1.
f_equal.
so (map_operator_same _#6 Heqth) as H.
invert H.
auto.
}
{
cbn in Heqr.
injection Heqr.
auto.
}
Qed.
Lemma map_eq_ppi2_invert :
forall A B (f : A -> B) m n,
map_term f m = ppi2 n
-> exists n',
m = ppi2 n'
/\ map_term f n' = n.
Proof.
intros A B f m n Heq.
so (map_eq_oper_invert _#7 Heq) as (th & r & -> & Heqth & Heqr).
so (row_invert_auto _ _ r) as H; cbn in H.
destruct H as (n' & ->).
exists n'.
split.
{
unfold ppi2.
f_equal.
so (map_operator_same _#6 Heqth) as H.
invert H.
auto.
}
{
cbn in Heqr.
injection Heqr.
auto.
}
Qed.
Lemma map_eq_ext_invert :
forall A B (f : A -> B) m x,
map_term f m = ext x
-> exists y,
m = ext y
/\ f y = x.
Proof.
intros A B f m x Heq.
so (map_eq_oper_invert _#7 Heq) as (th & r & -> & Heqth & Heqr).
so (row_invert_auto _ _ r) as H; cbn in H.
subst r.
clear Heq Heqr.
revert Heqth.
cases th; try (intros; discriminate Heqth).
intros y Heq.
cbn in Heq.
injection Heq.
intros <-.
exists y; auto.
Qed.
Lemma map_step_form :
forall A B (f : A -> B) (m : term A) (n : term B),
step (map_term f m) n
-> exists p,
n = map_term f p
/\ step m p.
Proof.
intros A B f m n H.
remember (map_term f m) as x eqn:Heqx.
revert m Heqx.
induct H.
(* tapp1 *)
{
intros m1 m1' m2 _ IH mm Heqx.
so (map_eq_oper_invert _#7 (eqsymm Heqx)) as (th & r & -> & Heqth & Heqr).
clear Heqx.
revert Heqth.
cases th; try (intros; discriminate Heqth).
intros _.
so (row_2_invert _#3 r) as (n1 & n2 & ->).
cbn in Heqr.
injectionc Heqr.
intros <- <-.
so (IH n1 (eq_refl _)) as (n1' & -> & Hstep).
exists (app n1' n2).
split.
{
simpmap.
reflexivity.
}
{
apply step_app1; auto.
}
}
(* tapp2 *)
{
intros m1 m2 mm Heqx.
so (map_eq_oper_invert _#7 (eqsymm Heqx)) as (th & r & -> & Heqth & Heqr).
clear Heqx.
revert Heqth.
cases th; try (intros; discriminate Heqth).
intros _.
so (row_2_invert _#3 r) as (a & p' & ->).
cbn in Heqr.
injectionc Heqr.
intros <- Heqa.
so (map_eq_lam_invert _#5 Heqa) as (m' & -> & <-).
clear Heqa.
fold (app (lam m') p').
exists (subst1 p' m').
split.
{
simpmap.
reflexivity.
}
{
apply step_app2.
}
}
(* prev1 *)
{
intros m m' _ IH mm Heq.
so (map_eq_prev_invert _#5 (eqsymm Heq)) as (n & -> & Hn).
so (IH _ (eqsymm Hn)) as (p & -> & Hp).
exists (prev p).
split; auto.
apply step_prev1; auto.
}
(* prev2 *)
{
intros m m' Heq.
so (map_eq_prev_invert _#5 (eqsymm Heq)) as (n & -> & Hn).
so (map_eq_next_invert _#5 Hn) as (p & -> & Hp).
exists p.
split; auto.
apply step_prev2.
}
(* bite1 *)
{
intros m1 m1' m2 m3 _ IH mm Heq.
so (map_eq_bite_invert _#7 (eqsymm Heq)) as (n & p & q & -> & Hn & Hp & Hq).
so (IH _ (eqsymm Hn)) as (n' & -> & Hn').
exists (bite n' p q).
split.
{
simpmap.
f_equal; auto.
}
{
apply step_bite1; auto.
}
}
(* bite2 *)
{
intros m1 m2 mm Heq.
so (map_eq_bite_invert _#7 (eqsymm Heq)) as (n & p & q & -> & Hn & Hp & Hq).
so (map_eq_btrue_invert _#4 Hn); subst n.
exists p.
split; auto.
apply step_bite2.
}
(* bite3 *)
{
intros m1 m2 mm Heq.
so (map_eq_bite_invert _#7 (eqsymm Heq)) as (n & p & q & -> & Hn & Hp & Hq).
so (map_eq_bfalse_invert _#4 Hn); subst n.
exists q.
split; auto.
apply step_bite3.
}
(* ppi11 *)
{
intros m m' _ IH mm Heq.
so (map_eq_ppi1_invert _#5 (eqsymm Heq)) as (n & -> & Hn).
so (IH _ (eqsymm Hn)) as (p & -> & Hp).
exists (ppi1 p).
split; auto.
apply step_ppi11; auto.
}
(* ppi12 *)
{
intros m m' mm Heq.
so (map_eq_ppi1_invert _#5 (eqsymm Heq)) as (n & -> & Hn).
so (map_eq_ppair_invert _#6 Hn) as (p & q & -> & Hp & Hq).
exists p.
split; auto.
apply step_ppi12.
}
(* ppi21 *)
{
intros m m' _ IH mm Heq.
so (map_eq_ppi2_invert _#5 (eqsymm Heq)) as (n & -> & Hn).
so (IH _ (eqsymm Hn)) as (p & -> & Hp).
exists (ppi2 p).
split; auto.
apply step_ppi21; auto.
}
(* ppi22 *)
{
intros m m' mm Heq.
so (map_eq_ppi2_invert _#5 (eqsymm Heq)) as (n & -> & Hn).
so (map_eq_ppair_invert _#6 Hn) as (p & q & -> & Hp & Hq).
exists q.
split; auto.
apply step_ppi22.
}
Qed.
Lemma map_steps_form :
forall A B (f : A -> B) (m : term A) (n : term B),
star step (map_term f m) n
-> exists p,
n = map_term f p
/\ star step m p.
Proof.
intros A B f m n H.
remember (map_term f m) as x eqn:Heqx.
revert m Heqx.
induct H.
(* refl *)
{
intros x m ->.
exists m.
auto using star_refl.
}
(* step *)
{
intros m n p Hmn _ IH m' ->.
so (map_step_form _#5 Hmn) as (n' & -> & Hmn').
so (IH n' (eq_refl _)) as (p' & -> & Hnp').
exists p'.
split; auto.
eapply star_step; eauto.
}
Qed.
Lemma map_reduce_form :
forall A B (f : A -> B) (m : term A) (n : term B),
reduce (map_term f m) n
-> exists p,
n = map_term f p
/\ reduce m p.
Proof.
intros A B f m n H.
remember (map_term f m) as x eqn:Heqx.
revert m Heqx.
induct H using
(fun z => reduce_mut_ind _ z
(fun a r s => forall r', r = map_row f r' -> exists s', s = map_row f s' /\ reducer r' s')).
(* var *)
{
intros i m.
cases m; try (intros; discriminate Heqx).
intros j Heq.
rewrite -> map_var in Heq.
injection Heq.
intros <-.
exists (var i).
split; auto.
apply reduce_var.
}
(* oper *)
{
intros a th r s _ IH m.
cases m; try (intros; discriminate Heqx).
intros a' th' r' Heq.
cbn in Heq.
injection Heq.
intros Heqr Heqth <-.
so (existT_injection_2 _#5 Heqth); subst th.
so (existT_injection_2 _#5 Heqr); subst r.
clear Heq Heqr Heqth.
so (IH _ (eq_refl _)) as (s' & -> & Hrs).
exists (oper a th' s').
split; auto.
apply reduce_oper; auto.
}
(* tapp_beta *)
{
intros m n p q Hmn IH1 Hpq IH2 x Heqx.
so (map_eq_oper_invert _#7 (eqsymm Heqx)) as (th & r & -> & Heqth & Heqr).
clear Heqx.
revert Heqth.
cases th; try (intros; discriminate Heqth).
intros _.
so (row_2_invert _#3 r) as (a & p' & ->).
cbn in Heqr.
injectionc Heqr.
intros <- Heqa.
so (map_eq_lam_invert _#5 Heqa) as (m' & -> & <-).
clear Heqa.
fold (app (lam m') p').
so (IH1 _ (eq_refl _)) as (n' & -> & Hmn').
so (IH2 _ (eq_refl _)) as (q' & -> & Hpq').
exists (subst1 q' n').
split.
{
symmetry.
apply map_subst.
}
{
apply reduce_app_beta; auto.
}
}
(* prev_beta *)
{
intros m n Hmn IH x Heqx.
so (map_eq_prev_invert _#5 (eqsymm Heqx)) as (y & -> & Heqy).
so (map_eq_next_invert _#5 Heqy) as (p & -> & Hp).
so (IH _ (eqsymm Hp)) as (q & -> & Hq).
exists q.
split; auto.
apply reduce_prev_beta; auto.
}
(* bite_beta1 *)
{
intros m1 m1' m2 _ IH m Heqx.
so (map_eq_bite_invert _#7 (eqsymm Heqx)) as (n & p & q & -> & Hn & Hp & Hq).
so (map_eq_btrue_invert _#4 Hn); subst n.
so (IH _ (eqsymm Hp)) as (p' & -> & Hp').
exists p'.
split; auto.
apply reduce_bite_beta1; auto.
}
(* bite_beta2 *)
{
intros m1 m1' m2 _ IH m Heqx.
so (map_eq_bite_invert _#7 (eqsymm Heqx)) as (n & p & q & -> & Hn & Hp & Hq).
so (map_eq_bfalse_invert _#4 Hn); subst n.
so (IH _ (eqsymm Hq)) as (q' & -> & Hq').
exists q'.
split; auto.
apply reduce_bite_beta2; auto.
}
(* ppi1_beta *)
{
intros m1 m1' m2 _ IH x Heqx.
so (map_eq_ppi1_invert _#5 (eqsymm Heqx)) as (n & -> & Hn).
so (map_eq_ppair_invert _#6 Hn) as (p & q & -> & Hp & Hq).
so (IH _ (eqsymm Hp)) as (p' & -> & Hp').
exists p'.
split; auto.
apply reduce_ppi1_beta; auto.
}
(* ppi2_beta *)
{
intros m1 m1' m2 _ IH x Heqx.
so (map_eq_ppi2_invert _#5 (eqsymm Heqx)) as (n & -> & Hn).
so (map_eq_ppair_invert _#6 Hn) as (p & q & -> & Hp & Hq).
so (IH _ (eqsymm Hq)) as (p' & -> & Hp').
exists p'.
split; auto.
apply reduce_ppi2_beta; auto.
}
(* nil *)
{
intros r' H.
so (row_nil_invert _ r'); subst r'.
exists rw_nil.
auto using reducer_nil.
}
(* cons *)
{
intros i a m n r s Hmn IH1 Hrs IH2 x Heq.
so (row_cons_invert _#3 x) as (m' & r' & ->).
cbn in Heq.
injectionc Heq.
intros Heqr ->.
injectionT Heqr.
intros ->.
so (IH1 _ (eq_refl _)) as (n' & -> & Hmn').
so (IH2 _ (eq_refl _)) as (s' & -> & Hrs').
exists (rw_cons n' s').
split; eauto using reducer_cons.
}
Qed.
Lemma map_reduces_form :
forall A B (f : A -> B) (m : term A) (n : term B),
star reduce (map_term f m) n
-> exists p,
n = map_term f p
/\ star reduce m p.
Proof.
intros A B f m n H.
remember (map_term f m) as x eqn:Heqx.
revert m Heqx.
induct H.
(* refl *)
{
intros x m ->.
exists m.
auto using star_refl.
}
(* step *)
{
intros m n p Hmn _ IH m' ->.
so (map_reduce_form _#5 Hmn) as (n' & -> & Hmn').
so (IH n' (eq_refl _)) as (p' & -> & Hnp').
exists p'.
split; auto.
eapply star_step; eauto.
}
Qed.
Lemma map_equiv :
forall A B (f : A -> B) (m n : term A),
equiv m n
-> equiv (map_term f m) (map_term f n).
Proof.
intros A B f m n H.
eapply (star_map _ _ _ _ (map_term f)); eauto.
clear m n H.
intros m n H.
destruct H; eauto using map_reduce.
Qed.
Lemma map_equivh :
forall A B (f : A -> B) (h h' : @hyp A),
equivh h h'
-> equivh (map_hyp f h) (map_hyp f h').
Proof.
intros A B f h h' Hequiv.
cases Hequiv;
intros;
simpmap;
[apply equivh_tpl | apply equivh_tp | apply equivh_tml | apply equivh_tm | apply equivh_emp]; apply map_equiv; auto.
Qed.
Lemma map_equiv_conv :
forall A B (f : A -> B) (m n : term A),
injective f
-> equiv (map_term f m) (map_term f n)
-> equiv m n.
Proof.
intros A B f m n Hinj H.
so (church_rosser _#3 H) as (p & Hmp & Hnp).
so (map_reduces_form _#5 Hmp) as (p' & -> & Hmp').
so (map_reduces_form _#5 Hnp) as (p'' & Heq & Hnp').
so (map_term_inj _#3 Hinj _ _ Heq); subst p''.
eapply equiv_trans.
{
apply reduces_equiv; eauto.
}
{
apply equiv_symm.
apply reduces_equiv; auto.
}
Qed.
Lemma map_term_equiv_inj :
forall A B (f : A -> B),
injective f
-> forall (m n : term A),
equiv (map_term f m) (map_term f n)
-> equiv m n.
Proof.
intros v w h m n Heq.
eapply map_equiv_conv; eauto.
Qed.
Lemma map_closub :
forall A B (f : A -> B) P (s : @sub A),
closub P s
-> closub P (map_sub f s).
Proof.
intros A B f P s Hcl.
intros j Hj.
rewrite <- map_project.
apply map_hygiene.
eapply Hcl; eauto.
Qed.
Lemma map_operator_compose :
forall (A B C : Type) (f : B -> C) (g : A -> B) a (th : @operator A a),
map_operator f (map_operator g th)
=
map_operator (fun z => f (g z)) th.
Proof.
intros A B C f g a th.
case th; reflexivity.
Qed.
Lemma map_term_and_row_compose :
forall (A B C : Type) (f : B -> C) (g : A -> B),
(forall (m : term A),
map_term f (map_term g m)
=
map_term (fun z => f (g z)) m)
/\
(forall a (r : @row A a),
map_row f (map_row g r)
=
map_row (fun z => f (g z)) r).
Proof.
intros A B C f g.
exploit (syntax_ind A
(fun m =>
map_term f (map_term g m)
=
map_term (fun z => f (g z)) m)
(fun a r =>
map_row f (map_row g r)
=
map_row (fun z => f (g z)) r)) as Hprop;
intros; cbn; f_equal; eauto using map_operator_compose.
Qed.
Lemma map_term_compose :
forall (A B C : Type) (f : B -> C) (g : A -> B) (m : term A),
map_term f (map_term g m)
=
map_term (fun z => f (g z)) m.
Proof.
intros A B C f g.
exact (map_term_and_row_compose _#5 andel).
Qed.
Lemma map_sub_compose :
forall (A B C : Type) (f : B -> C) (g : A -> B) (s : @sub A),
map_sub f (map_sub g s)
=
map_sub (fun z => f (g z)) s.
Proof.
intros A B C f g s.
induct s; auto.
(* dot *)
{
intros m s IH.
cbn.
f_equal; auto.
apply map_term_compose.
}
Qed.
Lemma map_operator_id :
forall A a (th : @operator A a),
map_operator (fun z => z) th = th.
Proof.
intros A a th.
case th; reflexivity.
Qed.
Lemma map_term_and_row_id :
forall (A : Type),
(forall (m : term A), map_term (fun z => z) m = m)
/\
(forall a (r : @row A a), map_row (fun z => z) r = r).
Proof.
intros A.
exploit
(syntax_ind A
(fun m => map_term (fun z => z) m = m)
(fun a r => map_row (fun z => z) r = r)) as Hprop;
intros; cbn; f_equal; eauto using map_operator_id.
Qed.
Lemma map_term_id :
forall (A : Type) (m : term A),
map_term (fun z => z) m = m.
Proof.
intro A.
exact (map_term_and_row_id _ andel).
Qed.
Lemma map_sub_id :
forall (A : Type) (s : @sub A),
map_sub (fun z => z) s = s.
Proof.
intros A s.
induct s; auto.
intros m s IH.
cbn.
rewrite -> map_term_id.
f_equal; auto.
Qed.
Lemma map_value :
forall A B (f : A -> B) m,
value m
-> value (map_term f m).
Proof.
intros A B f m H.
invertc H.
intros a th r Hcanon <-.
cbn.
apply value_i.
clear r.
cases Hcanon; intros; cbn; auto using canon.
Qed.
|
-- Insert lean 3 code here.
namespace foo
/-- test -/
@[simp] def foo := 1
theorem foo_eq_one : foo.foo = 1 := rfl
end foo
|
= = <unk> species = =
|
import ideal
section generation
variables {R : Type} [comm_ring R]
/- Inductively define the (underlying set of) the ideal
generated by a set
-/
inductive gen_by' (A : set R) : set R
| mem_self : ∀ a ∈ A, gen_by' a
| zero_mem : gen_by' 0
| add_mem : ∀ x y : R, gen_by' x → gen_by' y → gen_by' (x + y)
| smul_mem : ∀ r x : R, gen_by' x → gen_by' (r*x)
def gen_by (A : set R) : ideal R :=
{ carrier := gen_by' A,
zero_mem' := gen_by'.zero_mem,
add_mem' := gen_by'.add_mem,
smul_mem' := gen_by'.smul_mem
}
/- TODO : a million helper lemmas
gen_by A ⊆ I ↔ A ⊆ I.carrier
gen_by A = intersection of ideals I st A ⊆ I.carrier,
etc.
-/
end generation
/- An ideal is finitely generated if it is the ideal defined by a
finite set.
Lean defines a set X to be finite if there is a list L (built-in inductive type)
of elements of the set, such that ∀ x ∈ X, x ∈ X → x ∈ L
-/
section fingen
variables {R : Type} [comm_ring R]
def fin_gen (I : ideal R) := ∃ A : set R, set.finite A ∧ gen_by A = I
end fingen
section noeth
variables (R : Type) [comm_ring R]
def noetherian := ∀ I : ideal R, fin_gen I
end noeth
section ascending_chain
variables (R : Type) [comm_ring R]
def asc_chain {R : Type} [comm_ring R] (I : ℕ → ideal R) := ∀ n : ℕ, I n ⊆ I (n + 1)
def asc_ch_cond (R : Type) [comm_ring R] :=
∀ (f : ℕ → ideal R), asc_chain f → ∃ N : ℕ, ∀ n : ℕ, n ≥ N → f n = f N
end ascending_chain
variables {R : Type} [comm_ring R]
lemma noetherian_of_acc (h : asc_ch_cond R) : noetherian R := sorry
lemma acc_of_noetherian (h : noetherian R) : asc_ch_cond R := sorry
-- need facts about polynomials, degrees? |
This centuries old, highly specialised, time-consuming craft is regarded as one of the finest methods of shoe construction today. Goodyear welting utilises precision craftsmanship to painstakingly stitch and bond superior quality leathers to the soles of all our footwear. The result is some of the finest, most stylish and comfortable footwear available. Barker is proud to offer Goodyear welted footwear of distinction.. |
### Preparation
* [`numpy`](http://www.numpy.org) is a module for scientific computing (i.e. *vector* and *matrix* processing) with Python.
* [`matplotlib`](https://matplotlib.org) is a module for plotting.
```python
import numpy as np
import matplotlib.pyplot as plt
import seaborn
```
```python
from operator import mul
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
plt.style.use('fivethirtyeight')
```
## An example
Let us look at a hypothetical appartment rent data. We will take a look only at the price per square meter ($m^2$): `price = w0 * 1 + w1 * square_meters`. We call `price` a response variable; in literature usually denoted as $y$. We call `square_meters` a predictor variable. We call `w0` and `w1` weights - it is basically *intercept* and *slope* of a line. `w0` has a special meaning and is called a `bias`. In our example we can interpret it as base fixed price you would pay for 0 sqaure meters of living area.
Here is a graph that visualizes a price with 200 CU (currency units) base price and 10 CU increase per square meter: `price = 200 + 10 * square_meters`. We also add some *observations*, i.e., some data that we read in a finctional announce or whereever. These data points have some *noise* - factors that we cannot account for. For example some renters may have good or bad negotiation skills that will make the price vary around the line.
```python
square_meters = np.linspace(0, 150)
price = 200 + 10 * square_meters
# a random number between 0 and 1 will be multiplied by 150 to
# kind of represent a spectrum of square meters.
number_of_points = 20
random_square_meters_data = np.random.sample(size = number_of_points) * 150
price_with_noise = 200 + 10 * random_square_meters_data + np.random.normal(size = number_of_points, scale = 50)
# plot data
_ = plt.plot(square_meters, price, color = 'grey')
_ = plt.plot(random_square_meters_data, price_with_noise, 'o', color = 'blue')
_ = plt.xlabel('Square meters')
_ = plt.ylabel('Price in CU')
plt.ylim(0, 2000)
plt.show()
```
The data here has one dimension: the square meters. But we rarely have only one-dimensional data (also see [problems with many dimensions](https://en.wikipedia.org/wiki/Curse_of_dimensionality)). For example, square meters are actually two variables: length and width. We can also add height as a third dimension that may be an additional influence to the price. So is the city, weather and anything else. We usually abstract from the actual names and simly call dimensions by their numbered variable: dimension $i$ is simply encoded in variable $x_i$. For example $x_1$ is `length`, $x_2$ is `width`, $x_3$ is `height` and so on. *Bias* has usually a special notation: $x_0$. We almost always need to take care for *bias* variable and we will do it down bellow.
We now have a conceptual understanding of dimensions and can jump into coding stuff.
The goal of the following is to explain how basic regression works.
Imagine we have gathered 20 samples of 'data' where for each price (response) we have `length`, `width`, `height`, and `distance` from downtown (predictors).
The weights influence the price as follows: we have `basic_price` ($w_0$), `length` ($x_1$), `width` ($x_2$), `height` ($x_3$), `distance` ($x_4$), and the last one `square_meters` ($x_5$). Thus the price results from the following:
$$price = w_0 + w_1 \cdot x_1 + w_2 \cdot x_2 + w_3 \cdot x_3 + w_4 \cdot x_4 + w_5 \cdot x_5$$
```python
weights = (np.random.uniform(size = 6) + 1) * [200, .5, .7, 1.5, -.05, 3.5]
weights
```
```python
n_samples = 20
np.set_printoptions(2, suppress=True)
tmp = np.column_stack((1.5 + np.random.sample(size = n_samples) * 10, # length
1.5 + np.random.sample(size = n_samples) * 10, # width
np.round(2 + np.random.sample(size = n_samples) * 2, decimals=1), # height
np.random.sample(size = n_samples) * 3000)) # distance from center in meters
samples_data_X = np.column_stack((tmp, tmp[:,0]*tmp[:,1])) # square meters
del(tmp)
samples_data_X
```
```python
samples_data_price_Y = np.dot(np.column_stack((np.ones(samples_data_X.shape[0]), samples_data_X)), weights)
# we add some noise to the response that adds unpredictable variance to the price.
samples_data_price_Y = samples_data_price_Y + np.random.normal(scale = 2.5, size = np.prod(samples_data_price_Y.shape))
samples_data_price_Y
```
Let's say, we want to find *weights* of a *linear model* to predict the price of an offer (the six $w_i$ from the equation above). Meaning we got data from a makler that send us data for `length=10`, `width=10`, `height=3`. Furthermore we looked at some map data and got the `distance=3044` meters from downtown. The makler didn't tell us the price yet - but lets see if we can estimate the price by ourselfes. In such a way we will go into negotiations and can back up our claims with data.
Having gathered the data, we compute the following:
$$\mathbf{\hat{w}} = (\mathbf{X}^T \mathbf{X})^{-1}\mathbf{X}^T \mathbf{y} $$
```python
bold_X = np.column_stack((np.ones(samples_data_X.shape[0]), samples_data_X))
estimated_weights = np.dot(np.dot(np.linalg.inv(np.dot(bold_X.T, bold_X)), bold_X.T), samples_data_price_Y)
estimated_weights
weights
```
Looks like the estimated weight from noisy data are not so far off. Of course we wouldn't know the true weights in reality. And so, we would have to bargain with a makler for a price around:
```python
np.round(np.dot(estimated_weights, [1, 10, 10, 3, 3044, 10*10]), 2)
```
### Here be dragons
**What is this matrix vodoo above? And why does it looks like it looks like?**
We will derive the above equation based on amazingly clear [tutorial of Mark L. J. Orr](https://www.cc.gatech.edu/~isbell/tutorials/rbf-intro.pdf). And we will do it in the least mathematical way possible - mostly with **code**.
## Appendix A.1
A *vector* is simply an array of $n$ numbers $\mathbf{x}^T = [x_1, x_2, \ldots, x_n]$. The number $n$ represents the number of *dimensions*. The T-operator $\cdot^T$ simply tells us to treat the vector as a *column* vector - more to that in a second.
```python
x = np.arange(5)
x
```
A matrix is simply a 'collection' of vectors with the same dimension. Most of the time the matrix will have $m$ number of rows and $n$ number of columns.
**Example**: We want to have two (2) arrays. Each of array contains five (5) numbers, i.e., is a 5-dimensional vector. The resulting matrix has $m=2$ rows and $n=5$ columns.
```python
v1 = np.arange(1, 6)
v1
v2 = np.arange(1, 6) * 2
v2
simple_matrix = np.stack([v1, v2])
simple_matrix
```
For computational purpuses we seldomly use $x$ as single vector. Instead we treat it as *row* entry in a matrix, i.e. as a *row vector*. Hence, in matrix notation, a vector has one (1) row and $n$ columns. (This is usually denoted as a *tuple* `(1, n)`)
```python
v1 = np.reshape(np.arange(1,6), (1,5))
v1
```
Remember our T-operator? Now, if we use $\cdot^T$ it will *[T]ranspose* the matrix, i.e. rows will be collums and collumns will be rows.
```python
v1.T
```
The same applies to matrices:
```python
simple_matrix = np.stack([v1, v2])
simple_matrix
simple_matrix.T
```
Which bring us directly to matrix and vector operations. We denote a $k$-th vector with bold $\mathbf{x}_k$ (e.g. vector 1 is $\mathbf{x}_1$). We denote a matrix with a bold capital letter, for example $M$. As discussed, we can access $j$-th vector (i.e. $\mathbf{x}_j$) and $i$-th value (value in dimension $i$) - or in short: the value of $M_{ji}.
We have the following operations: (TODO implement in actual code; compare timings with numpy)
* vector addition
* vector and scalar multiplication
* vector matrix product
* matrix product
About matrix product: you can only multiply matrices when the 'inner' shape matches: `(shape_1, inner_shape) x (inner_shape, shape_2)`. The resulting shape of the matrix will be `(shape_1, shape_2)`. Here the T-operator comes in play because it reverses the shape. This allow us to multiply matrix and its transpose form. The resulting shape will have the same number of rows and columns and is termed quadratic matrix. One of the important properties of the quadratic matrix is that it is inversible. The inverse of a matrix describes the same effect for simple number. If you have a number $a$ - the inverse is $\frac{1}{a}$ or $a^{-1}$ which means that $a \cdot a^{-1} = 1$.
The same is true for matrices where $\mathbf{M} \cdot \mathbf{M}^{-1} = \mathbf{I}$. $\mathbf{I}$ is the unity matrix - a matrix where the only non-zero entries are in the diagonal with value 1, or in other terms $I_{ii} = 1$. The identity matrix' purpose is to have an operation which yields the same 'object' when multiplied.
```python
vector = np.arange(5)
vector
vector + 5 # 5 is 'broadcasted' into the array - i.e. + 5 is repeated for every entry
vector + vector # adding vectors element wise
vector * 3 # multiply every entry with 3
vector * vector # element wise multiplication | THAT IS NOT A VECTOR PRODUCT | it is called Hadaman-Product
np.dot(vector, vector) # that is a vector product; it results in a scalar
mat_1 = np.reshape(np.random.randint(1,100, size = 2*3), (2,3))
mat_2 = np.reshape(np.random.randint(1,100, size = 2*3), (2,3))
mat_1
mat_2
np.dot(mat_1, mat_2.T)
np.dot(mat_1, mat_2.T).shape
np.dot(mat_1.T, mat_2)
np.dot(mat_1.T, mat_2).shape
```
#### Why do we use matrix operations ?
In linear models we usually compute.
$$y = w_0 \cdot 1 + w_1 \cdot x_1 + w_2 \cdot x_2 + w_3 \cdot x_3 + w_4 \cdot x_4 + w_5 \cdot x_5$$
$$y = w_0 + \sum\limits_{i=1}^n w_i \cdot x_i$$
For efficiency reasons we transform the vector $\mathbf{x} = (x_1, x_2, x_3, x_4, x_5)$ into $\mathbf{x} = (1, x_1, x_2, x_3, x_4, x_5)$
```python
example_weights = np.array([20,4,6])
example_weights
example_x = np.array([1, 4, - 5])
example_x
```
The equation above can basically be implemented like this (don't! it is highly ineffcient!):
```python
def get_y(w, x):
res = 0
for i in range(len(x)):
res = res + w[i]*x[i]
return res
```
```python
get_y(example_weights, example_x)
```
A *vectorized* version is actually 'better':
```python
sum(example_weights * example_x)
```
For matrix operation we need to treat the vectors as tuples (1 row of 3 dimensions, `(1,3)`). The ultimate operation is the *dot-product* and since we need matching of inner dimensions, we need to prepare the vectors to return one number, i.e. the outer shape is 1. Thus the dot-product between these two matrix-vectors can be written as $\mathbf{w}\cdot\mathbf{x}^T$ for *row-vectors* (and actually in mathematical terms for *column vectors*: $\mathbf{w}^T\mathbf{x}$, but never mind).
```python
example_weights2 = np.reshape(example_weights, (1, 3))
example_x2 = np.reshape(example_x, (1, 3))
np.dot(example_weights2, example_x2.T)
```
The timings speak for themselfes:
```python
%timeit get_y(example_weights, example_x)
%timeit sum(example_weights * example_x)
%timeit np.dot(example_weights2, example_x2.T)
```
And that, just for one single small sample. We usually are dealing with more. How about 10000 samples with 1000 dimensions?
```python
example_weights2 = np.reshape(np.random.sample(size = 1*1000), (1, 1000))
example_x2 = np.reshape(np.random.sample(size = 10000*1000), (10000, 1000))
```
```python
%timeit y1 = [get_y(example_weights2[0], x) for x in example_x2]
%timeit y2 = [sum(example_weights2[0] * x) for x in example_x2]
%timeit y3 = np.dot(example_weights2, example_x2.T)
```
**This is the reason why matrix operations are so important!** (not only because of math)!
## Appendix A.4 The optimal weight vector
Now we are getting into the bottom of things: So given data in $\mathbf{X}$ (m rows of n dimensional vectors) we want to determine weights (n (+1) dimensional vector) of a linear model. As [exemplarly shown](https://www.geogebra.org/m/xC6zq7Zv), the goal is to find such a weight vector that minimizes the '*reconstruction error*' of the estimation represented by a *sum of squared errors* (SSE):
$$E = \frac{1}{2}\sum\limits_{\mathbf{x}_i \in \mathbf{X}, \hat{y}_i} (\hat{y}_i - f(\mathbf{x}_i))^2$$
with $f(\mathbf{x}_i)$ beeing our estimator:
$$f(\mathbf{x}) = \sum\limits_{k=0}^n w_k \cdot x_k$$
(we add $\frac{1}{2}$ to make our equations nicer ;-)
Now, to find a minimum of a function (and we want to find parameters such that the error $E$ is minimal) math tells us to
* differentiate the function with respect to the variables (in our case $w_i$ from $\mathbf{w}$)
* eqaute the result with zero
* solve the equation for the variables
Let's to just that:
$$\frac{\delta E}{\delta w_j} = \frac{\delta \frac{1}{2} \sum\limits_{\mathbf{x}_i \in \mathbf{X}, \hat{y}_i} (\hat{y}_i - f(\mathbf{x}_i))^2}{\delta w_j}$$
which is equally valid to
$$\frac{\delta E}{\delta w_j} = \frac{1}{2} \sum\limits_{\mathbf{x}_i \in \mathbf{X}, \hat{y}_i} \frac{\delta (\hat{y}_i - f(\mathbf{x}_i))^2}{\delta w_j}$$
Let us focus on the term $\frac{\delta (\hat{y}_i - f(\mathbf{x}_i))^2}{\delta w_j}$. A simple application of [chain rule](https://en.wikipedia.org/wiki/Chain_rule) tells us that $(h(g(x)))' = h'(g(x))\cdot g'(x)$ with $h(x)~~\hat{=}~~x^2$ and $g(x)~~\hat{=}~~\hat{y}_i - f(\mathbf{x}_i)$. The derivate of $h(x)$ is $h'(x) = 2\cdot x$. And the derivate of $g(x)$ is $g'(x) = - x_{ij}$.
Substituting the variables yields:
$$\frac{\delta (\hat{y}_i - f(\mathbf{x}_i))^2}{\delta w_j} = 2 \cdot (\hat{y}_i - f(\mathbf{x}_i)) \cdot {- x_{ij}}$$
Inserting into the error $E$ we get
$$\frac{\delta E}{\delta w_j} = \frac{1}{2} \sum\limits_{\mathbf{x}_i \in \mathbf{X}, \hat{y}_i} 2 \cdot (\hat{y}_i - f(\mathbf{x}_i)) \cdot {- x_{ij}}$$
and equating to zero we get:
\begin{align}
0 & = \sum\limits_{\mathbf{x}_i \in \mathbf{X}, \hat{y}_i} (\hat{y}_i - f(\mathbf{x}_i)) \cdot {- x_{ij}} \\
0 & = \sum\limits_{\mathbf{x}_i \in \mathbf{X}, \hat{y}_i} - \hat{y}_i \cdot x_{ij} + f(\mathbf{x}_i) \cdot x_{ij} \\
0 & = - \sum\limits_{\mathbf{x}_i \in \mathbf{X}, \hat{y}_i} \hat{y}_i \cdot x_{ij} + \sum\limits_{\mathbf{x}_i \in \mathbf{X}, \hat{y}_i} f(\mathbf{x}_i) \cdot x_{ij} \\
\sum\limits_{\mathbf{x}_i \in \mathbf{X}, \hat{y}_i} \hat{y}_i \cdot x_{ij} & = \sum\limits_{\mathbf{x}_i \in \mathbf{X}, \hat{y}_i} f(\mathbf{x}_i) \cdot x_{ij} \\
\end{align}
Now, as we saw earlier, we can rewrite the sums over samples in *matrix form*:
$$\mathbf{X}^T\cdot \hat{\mathbf{y}} = \mathbf{X}^T\cdot \mathbf{f}$$
Our $\mathbf{f}$ is a vector of linear model estimation of $\mathbf{x}_j$:
$$f_j = f(\mathbf{x}_j) = \sum\limits_{i=0}^n w_i \cdot x_{i}$$
(with $x_0 = 1$) and hence can be rewritten to $\mathbf{x}_j^T\hat{\mathbf{w}}$ as we saw earlier.
$$
\mathbf{f} = \begin{bmatrix}f_1\\\ldots\\f_m\end{bmatrix} = \begin{bmatrix}\mathbf{x}_1^T\hat{\mathbf{w}}\\\ldots\\\mathbf{x}_m^T\hat{\mathbf{w}}\end{bmatrix} = \mathbf{X}\mathbf{\hat{w}}
$$
Then, from above, we have:
\begin{align}
\mathbf{X}^T \hat{\mathbf{y}} & = \mathbf{X}^T \cdot \mathbf{f} \\
\mathbf{X}^T \hat{\mathbf{y}} & = \mathbf{X}^T \mathbf{X} \cdot \mathbf{\hat{w}}
\end{align}
And solving for $\hat{w}$ (i.e. 'dividing' by $\mathbf{X}^T \mathbf{X}$ or better yet: multiplying by the inverse $(\mathbf{X}^T \mathbf{X})^{-1}$) finally yields:
$$ \mathbf{\hat{w}} = (\mathbf{X}^T \mathbf{X})^{-1} \mathbf{X}^T \hat{\mathbf{y}}$$
This is our solution from above. Simply coded as `np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), Y)`.
## Runtime information
Numpy, BLAS, and LAPACK.
```python
np.__file__
np.__version__
np.show_config()
```
```bash
%%bash
ldd ~/anaconda3/lib/python3.6/site-packages/numpy/core/multiarray.cpython-36m-x86_64-linux-gnu.so
```
|
[STATEMENT]
lemma [code abstract]: "vec_nth (columnvector v) = columnvector_row v"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ($) (columnvector v) = columnvector_row v
[PROOF STEP]
unfolding columnvector_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ($) (\<chi>i j. v $ i) = columnvector_row v
[PROOF STEP]
unfolding columnvector_row_def[abs_def]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ($) (\<chi>i j. v $ i) = (\<lambda>i. \<chi>j. v $ i)
[PROOF STEP]
by auto |
% -*- coding: UTF-8 -*-
% vim: autoindent expandtab tabstop=4 sw=4 sts=4 filetype=tex
\chapter{Meeting minutes}
\label{chap:10_meeting_minutes}
\VerbatimInput[label=20160224]{inc/static/attachment/minutes/20160224.rst}
\newpage
\VerbatimInput[label=20160401]{inc/static/attachment/minutes/20160401.rst}
\newpage
\VerbatimInput[label=20160427]{inc/static/attachment/minutes/20160427.rst}
\newpage
\VerbatimInput[label=20160715]{inc/static/attachment/minutes/20160715.rst}
|
(* Title: A Definitional Encoding of TLA in Isabelle/HOL
Authors: Gudmund Grov <ggrov at inf.ed.ac.uk>
Stephan Merz <Stephan.Merz at loria.fr>
Year: 2011
Maintainer: Gudmund Grov <ggrov at inf.ed.ac.uk>
*)
header {* Representing state in TLA* *}
theory State
imports Liveness
begin
text{*
We adopt the hidden state appraoch, as used in the existing
Isabelle/HOL TLA embedding \cite{Merz98}. This approach is also used
in \cite{Ehmety01}.
Here, a state space is defined by its projections, and everything else is
unknown. Thus, a variable is a projection of the state space, and has the same
type as a state function. Moreover, strong typing is achieved, since the projection
function may have any result type. To achieve this, the state space is represented
by an undefined type, which is an instance of the @{text world} class to enable
use with the @{text Intensional} theory.
*}
typedecl state
instance state :: world ..
type_synonym 'a statefun = "(state,'a) stfun"
type_synonym statepred = "bool statefun"
type_synonym 'a tempfun = "(state,'a) formfun"
type_synonym temporal = "state formula"
text {*
Formalizing type state would require formulas to be tagged with
their underlying state space and would result in a system that is
much harder to use. (Unlike Hoare logic or Unity, TLA has quantification
over state variables, and therefore one usually works with different
state spaces within a single specification.) Instead, state is just
an anonymous type whose only purpose is to provide Skolem constants.
Moreover, we do not define a type of state variables separate from that
of arbitrary state functions, again in order to simplify the definition
of flexible quantification later on. Nevertheless, we need to distinguish
state variables, mainly to define the enabledness of actions. The user
identifies (tuples of) ``base'' state variables in a specification via the
``meta predicate'' @{text basevars}, which is defined here.
*}
definition stvars :: "'a statefun \<Rightarrow> bool"
where basevars_def: "stvars \<equiv> surj"
syntax
"PRED" :: "lift \<Rightarrow> 'a" ("PRED _")
"_stvars" :: "lift \<Rightarrow> bool" ("basevars _")
translations
"PRED P" \<rightharpoonup> "(P::state => _)"
"_stvars" \<rightleftharpoons> "CONST stvars"
text {*
Base variables may be assigned arbitrary (type-correct) values.
In the following lemma, note that @{text vs} may be a tuple of variables.
The correct identification of base variables is up to the user who must
take care not to introduce an inconsistency. For example, @{term "basevars (x,x)"}
would definitely be inconsistent.
*}
lemma basevars: "basevars vs \<Longrightarrow> \<exists>u. vs u = c"
proof (unfold basevars_def surj_def)
assume "\<forall>y. \<exists>x. y = vs x"
then obtain x where "c = vs x" by blast
thus "\<exists>u. vs u = c" by blast
qed
lemma baseE:
assumes H1: "basevars v" and H2:"\<And>x. v x = c \<Longrightarrow> Q"
shows "Q"
using H1[THEN basevars] H2 by auto
text {* A variant written for sequences rather than single states. *}
lemma first_baseE:
assumes H1: "basevars v" and H2: "\<And>x. v (first x) = c \<Longrightarrow> Q"
shows "Q"
using H1[THEN basevars] H2 by (force simp: first_def)
lemma base_pair1:
assumes h: "basevars (x,y)"
shows "basevars x"
proof (auto simp: basevars_def)
fix c
from h[THEN basevars] obtain s where "(LIFT (x,y)) s = (c, arbitrary)" by auto
thus "c \<in> range x" by auto
qed
lemma base_pair2:
assumes h: "basevars (x,y)"
shows "basevars y"
proof (auto simp: basevars_def)
fix d
from h[THEN basevars] obtain s where "(LIFT (x,y)) s = (arbitrary, d)" by auto
thus "d \<in> range y" by auto
qed
lemma base_pair: "basevars (x,y) \<Longrightarrow> basevars x \<and> basevars y"
by (auto elim: base_pair1 base_pair2)
text {*
Since the @{typ unit} type has just one value, any state function of unit type
satisfies the predicate @{text basevars}. The following theorem can sometimes
be useful because it gives a trivial solution for @{text basevars} premises.
*}
lemma unit_base: "basevars (v::state \<Rightarrow> unit)"
by (auto simp: basevars_def)
text {*
A pair of the form @{text "(x,x)"} will generally not satisfy the predicate
@{text basevars} -- except for pathological cases such as @{text "x::unit"}.
*}
lemma
fixes x :: "state \<Rightarrow> bool"
assumes h1: "basevars (x,x)"
shows "False"
proof -
from h1 have "\<exists>u. (LIFT (x,x)) u = (False,True)" by (rule basevars)
thus False by auto
qed
lemma
fixes x :: "state \<Rightarrow> nat"
assumes h1: "basevars (x,x)"
shows "False"
proof -
from h1 have "\<exists>u. (LIFT (x,x)) u = (0,1)" by (rule basevars)
thus False by auto
qed
text {*
The following theorem reduces the reasoning about the existence of a
state sequence satisfiyng an enabledness predicate to finding a suitable
value @{text c} at the successor state for the base variables of the
specification. This rule is intended for reasoning about standard TLA
specifications, where @{text Enabled} is applied to actions, not arbitrary
pre-formulas.
*}
lemma base_enabled:
assumes h1: "basevars vs"
and h2: "\<And>u. vs (first u) = c \<Longrightarrow> ((first s) ## u) \<Turnstile> F"
shows "s \<Turnstile> Enabled F"
using h1 proof (rule first_baseE)
fix t
assume "vs (first t) = c"
hence "((first s) ## t) \<Turnstile> F" by (rule h2)
thus "s \<Turnstile> Enabled F" unfolding enabled_def by blast
qed
subsection "Temporal Quantifiers"
text{*
In \cite{Lamport94}, Lamport gives a stuttering invariant definition
of quantification over (flexible) variables. It relies on similarity
of two sequences (as supported in our @{theory Sequence} theory), and
equivalence of two sequences up to a variable (the bound variable).
However, sequence equaivalence up to a variable, requires state
equaivalence up to a variable. Our state representation above does not
support this, hence we cannot encode Lamport's definition in our theory.
Thus, we need to axiomatise quantification over (flexible) variables.
Note that with a state representation supporting this, our theory should
allow such an encoding.
*}
consts
EEx :: "('a statefun \<Rightarrow> temporal) \<Rightarrow> temporal" (binder "Eex " 10)
AAll :: "('a statefun \<Rightarrow> temporal) \<Rightarrow> temporal" (binder "Aall " 10)
syntax
"_EEx" :: "[idts, lift] \<Rightarrow> lift" ("(3EEX _./ _)" [0,10] 10)
"_AAll" :: "[idts, lift] \<Rightarrow> lift" ("(3AALL _./ _)" [0,10] 10)
translations
"_EEx v A" == "Eex v. A"
"_AAll v A" == "Aall v. A"
syntax (xsymbols)
"_EEx" :: "[idts, lift] => lift" ("(3\<exists>\<exists> _./ _)" [0,10] 10)
"_AAll" :: "[idts, lift] => lift" ("(3\<forall>\<forall> _./ _)" [0,10] 10)
axiomatization where
eexI: "\<turnstile> F x \<longrightarrow> (\<exists>\<exists> x. F x)"
and eexE: "\<lbrakk>s \<Turnstile> (\<exists>\<exists> x. F x) ; basevars vs; (!! x. \<lbrakk> basevars (x,vs); s \<Turnstile> F x \<rbrakk> \<Longrightarrow> s \<Turnstile> G)\<rbrakk>
\<Longrightarrow> (s \<Turnstile> G)"
and all_def: "\<turnstile> (\<forall>\<forall> x. F x) = (\<not>(\<exists>\<exists> x. \<not>(F x)))"
and eexSTUT: "STUTINV F x \<Longrightarrow> STUTINV (\<exists>\<exists> x. F x)"
and history: "\<turnstile> (I \<and> \<box>[A]_v) = (\<exists>\<exists> h. ($h = ha) \<and> I \<and> \<box>[A \<and> h$=hb]_(h,v))"
lemmas eexI_unl = eexI[unlift_rule] --{* @{text "w \<Turnstile> F x \<Longrightarrow> w \<Turnstile> (\<exists>\<exists> x. F x)"} *}
text {*
@{text tla_defs} can be used to unfold TLA definitions into lowest predicate level.
This is particularly useful for reasoning about enabledness of formulas.
*}
lemmas tla_defs = unch_def before_def after_def first_def second_def suffix_def
tail_def nexts_def app_def angle_actrans_def actrans_def
end
|
\chapter{Deep learning for PDEs}
\input{6DL/DL-PDE} |
For any positive integer $n$, the map $k \mapsto e^{2 \pi i k / n}$ is a bijection from $\{0, 1, \ldots, n-1\}$ to the set of $n$th roots of unity. |
<!-- dom:TITLE: FFM232, Klassisk fysik och vektorfält - Föreläsningsanteckningar -->
# FFM232, Klassisk fysik och vektorfält - Föreläsningsanteckningar
<!-- dom:AUTHOR: [Christian Forssén](http://fy.chalmers.se/subatom/tsp/), Institutionen för fysik, Chalmers, Göteborg, Sverige -->
<!-- Author: --> **[Christian Forssén](http://fy.chalmers.se/subatom/tsp/), Institutionen för fysik, Chalmers, Göteborg, Sverige**
Date: **Sep 9, 2016**
# 12. Tensorer
* Fysikaliska lagar skall inte bero på i vilket koordinatsystem de beskrivs.
* Detta kan vi åstadkomma genom att skriva dessa lagar som en likhet mellan två objekt vilka vi vet transformerar likadant under en koordinattransformation.
* Vi kommer att kalla sådana objekt för *tensorer*.
* En invarians kan kopplas till en symmetri (t.ex. rotationssymmetri). Invariansen kan kodas in genom att använda"tensorspråket".
* Kan generaliseras till större symmetrier. T.ex. Lorentzinvarians i speciell relativitetsteori vilken involverar rum-tiden (fyra dimensioner); och mer allmänna koordinattransformationer i allmän relativitetsteori.
Plan:
* Hur beskriva transformation mellan cartesiska koordinatsystem
* Transformationsegenskaper hos: Skalär, vektor, ... generell tensor
* Visa att diverse objekt verkligen är tensorer
* Fysikaliskt exempel: Tröghetstensorn
## Koordinattransformationer
När vi byter koordinater från ett Cartesiskt högersystem, med koordinater
$x_i$, till ett annat (med origo i samma punkt), med koordinater $x'_i$, relateras
<!-- Equation labels as ordinary links -->
<div id="eq:transformation"></div>
$$
\begin{equation}
x'_i=L_{ij}x_j,
\label{eq:transformation} \tag{1}
\end{equation}
$$
där $\mathbf{L}$ är en ortogonal matris som uppfyller $\mathbf{L}\mathbf{L}^t = \mathbf{I}=\mathbf{L}^t\mathbf{L}$. Från detta följer direkt att $\det(\mathbf{L}^t\mathbf{L}) = \det(\mathbf{L})^2 = \det\mathbf{I}=1$ vilket ger $\det\mathbf{L}=\pm 1$.
[Rita 1: $x'y'$-system som är roterat en vinkel $\alpha$ relativt ett $xy$-system.]
$$
\begin{equation}
\begin{pmatrix}
x' \\ y'
\end{pmatrix}
=
\begin{pmatrix}
\cos\alpha & \sin\alpha \\
-\sin\alpha & \cos\alpha
\end{pmatrix}
\begin{pmatrix}
x \\ y
\end{pmatrix}
\end{equation}
$$
[Comment 2: Om $\mathbf{L}$ transformerar ett högersystem till ett högersystem gäller plustecknet, $\det\mathbf{L}=1$. Kolla exemplet ovan att $\mathbf{L}^t \mathbf{L} = \mathbf{I}$.]
I indexnotation
$$
\begin{equation}
\left( L^t L \right)_{ij} = \left( L^t \right)_{ik} L_{kj}
= L_{ki}L_{kj}=\delta_{ij}.
\end{equation}
$$
Ett allmänt uttryck för determinanten av en $(3\times3)$-matris är
$$
\begin{equation}
\det\mathbf{M}=\varepsilon^{ijk}M_{1i}M_{2j}M_{3k}
\end{equation}
$$
[Comment 3: Kontrollera gärna. Detta uttryck ger en summa med sex nollskilda termer (3 positiva och 3 negativa) vilket alltså motsvarar determinanten.]
Notera att vi genom att derivera Ekv. [(1)](#eq:transformation) kan skriva matrisen $\mathbf{L}$ som $L_{ij}=\frac{\partial x'_i}{\partial x_j}$. Eftersom matrisen är ortogonal gäller den inversa relationen $x_i=L_{ji}x'_j$, och vi har också $L_{ji}=\frac{\partial x_i}{\partial x'_j}$.
## Skalärer och vektorer
En skalär $s$ (= enklaste exempel på en tensor) kännetecknas av att den tar samma värde i alla koordinatsystem, dvs. $s'=s$.
[Comment 4: Det är viktigt att förstå att det är transformationsregeln som är det viktiga. Det räcker inte med att "$s$ är ett tal".]
[Rita 5: En punkt P i ovanstående två koordinatsystem och illustrera dess $x'$- och $x$-koordinat.]
Sålunda är $x$-koordinaten för en punkt i $\mathbf{R}^3$ inte en skalär, medan t.ex. temperaturen i en punkt är en skalär.
En vektor $\vec v$ är en uppsättning tal som beter sig likadant som ortvektorns komponenter när vi byter system, dvs.
$$
\begin{equation}
v'_i=L_{ij}v_j
\end{equation}
$$
[Comment 6: Det är denna transformationsregel som definierar vilka uppsättningar av tre (eller $D$) tal som får privilegiet att kallas vektor.]
## Tensorer
[Comment 7: Nu är det rättframt att gå vidare och definiera objekt, tensorer, som har fler än ett index. En matris kan, som vi redan sett, skrivas som en tensor med två index, $T_{ij}$. Men inget hindrar att man har ett godtyckligt antal, säg $p$.]
Transformationsregeln för en tensor med två index (tänk matris) är
$$
\begin{equation}
T'_{ij} = L_{ik} L_{jl} T_{kl} \left( = L_{ik} T_{kl} \left( L^t \right)_{lj} \right),
\end{equation}
$$
vilket ju motsvarar $\mathbf{T}' = \mathbf{L} \mathbf{T} \mathbf{L}^t$.
En tensor med $p$ index (en tensor av rank $p$) skrivs $T_{i_1i_2\ldots i_p}$. och allmänt
$$
\begin{equation}
T'_{i_1\ldots i_p}=L_{i_1j_1}\ldots L_{i_pj_p}T_{j_1\ldots j_p}.
\end{equation}
$$
Poängen med detta är att man kan multiplicera samman tensorer och
vara säker på att resultatet blir en tensor.
### Skalärprodukt
Är $\vec{u} \cdot \vec{v}$ en skalär? Hur beter den sig vid ett koordinatbyte?
$$
\begin{equation}
u_i' v_i' = L_{ij} u_j L_{ik} v_k = \delta_{jk} u_j v_k = u_k v_k
\end{equation}
$$
Resultatet är alltså oberoende av koordinatsystem, dvs det är en skalär.
### Produkt av tensorer
Produkten av två tensorer är också en tensor.
* $c_{ij}=a_i b_j$ är också en tensor.
* $u_i=M_{ij}v_j$ är också en tensor.
[Comment 8: I första fallet kan man "kontrahera" två index, såsom man bildar skalärprodukten.]
[Comment 9: För det andra fallet har vi $M'_{ij}v'_j=L_{ik}L_{jl}M_{kl}L_{jm}v_m=L_{ik}\delta_{lm}M_{kl}v_m=L_{ik}M_{kl}v_l
=L_{ik}u_k$ (där vi i första steget har använt att $\mathbf{L}$ är ortogonal). Om $M_{ij}$ och $v_i$ är tensorer är alltså även $u_i$ det. Det allmänna beviset går likadant (och innefattar förstås det faktum att skalärprodukten av två vektorer är en skalär).]
### Exempel: Kryssprodukt
Är $(\vec a\times\vec b)_i = \varepsilon_{ijk}a_jb_k$ en tensor? Dvs är resultatet en vektor?
[Comment 10: I linjär algebrakurserna har ni säkert visat att detta är en vektor.]
Räknereglerna ovan ger att vi bara måste visa att $\varepsilon_{ijk}$ är en tensor. Transformationsreglerna säger:
$$
\begin{equation}
\varepsilon'_{ijk}=L_{il}L_{jm}L_{kn}\varepsilon_{lmn}
\end{equation}
$$
Vi kommer att visa att $\varepsilon$ är helt invariant under koordinattransformationer. Men låt oss först visa att $\varepsilon'_{ijk}$ är antisymmetriskt. Byt plats på två index
$$
\begin{equation}
\varepsilon'_{jik}=L_{jl}L_{im}L_{kn}\varepsilon_{lmn} = L_{jm}L_{il}L_{kn}\varepsilon_{mln} = - L_{il} L_{jm}L_{kn}\varepsilon_{lmn} = - \varepsilon'_{ijk},
\end{equation}
$$
där vi först bytte namn på två summationsindex, och sedan bytte plats på dem och utnyttjade att $\varepsilon_{ijk}$ är antisymmetrisk.
Den bevisade antisymmetrin betyder ju också att element med två lika index måste vara noll. T.ex. $\varepsilon'_{iik} = -\varepsilon'_{iik}$.
$\varepsilon'_{ijk}$ är alltså proportionell mot $\varepsilon_{ijk}$. Visa därför en permutation, t.ex. $ijk=123$.
$$
\begin{equation}
\varepsilon'_{123}=L_{1l}L_{2m}L_{3n}\varepsilon_{lmn} = \det \mathbf{L} = +1,
\end{equation}
$$
för en högertransformation.
Så $\varepsilon'_{ijk}=\varepsilon_{ijk}$, Levi--Civita-tensorn är en *invariant tensor*. Den enda andra invarianta tensorn är Kroneckers delta. Visa själv att $\delta'_{ij}=\delta_{ij}$.
### Vektoroperatorn
Vi behöver också kunna derivera. Låt oss visa det litet triviala påståendet att gradienten
av en skalär är en vektor. Vi har $(\nabla\phi)'_i=\frac{\partial}{\partial x'_i}\phi'=\frac{\partial \phi}{\partial x_j} \frac{\partial x_j}{\partial x'_i}=
L_{ij} \frac{\partial \phi}{\partial x_j}=L_{ij}(\nabla\phi)_j$. Detta visar att $\partial _i$ är en vektoroperator, och man kan sedan använda den för att konstruera andra derivator (divergens, rotation osv.). Samma regler gäller för $\partial _i$ som för andra tensorer.
## Tröghetstensorn
Ett annat klassiskt exempel är tröghetstensorn,
$$
\begin{equation}
I_{ij}=\int_VdV\,\rho(r^2\delta_{ij}-x_ix_j),
\end{equation}
$$
som man räknade ut i stelkroppsdynamik. Den relaterar rörelsemängdsmomentet till
rotationsvektorn enligt $L_i=I_{ij}\omega_j$. I och med att den innehåller upprepade
kryssprodukter är det enklare att härleda den i tensorformalism.
Ett litet volymelement $dV$ av en stel kropp har massan $dm=\rho dV$,
och om kroppen roterar med en rotationsvektor $\omega_i$ har
det hastigheten $v_i=(\vec\omega\times\vec{r})_i=\varepsilon_{ijk}\omega_jx_k$.
Dess rörelsemängd är $\mbox{d}p_i=\mbox{d}mv_i=\mbox{d}m\varepsilon_{ijk}\omega_jx_k$. Bidraget till rörelsemängdsmomentet från volymelementet är
$$
\begin{equation}
\mbox{d}L_i = \varepsilon_{ijk}x_j \mbox{d}p_k = \mbox{d}m\varepsilon_{ijk}x_j\varepsilon_{klm}\omega_lx_m
=\mbox{d}m(\delta_{il}\delta_{jm}-\delta_{im}\delta_{jl})x_jx_m\omega_l \nonumber
\end{equation}
$$
$$
\begin{equation}
=\mbox{d}m(r^2\omega_i-x_ix_j\omega_j)=\mbox{d}V\rho(r^2\delta_{ij}-x_ix_j)\omega_j
\end{equation}
$$
och totalt blir detta
$$
\begin{equation}
L_i = \int_V \mbox{d}V\,\rho(r^2\delta_{ij}-x_ix_j) \omega_j = I_{ij} \omega_j,
\end{equation}
$$
|
State Before: α : Type u_1
β : Type ?u.55772
γ : Type ?u.55775
ι : Sort u_2
inst✝ : ConditionallyCompleteLattice α
s t : Set α
a b : α
f g : ι → α
B : BddAbove (range g)
H : ∀ (x : ι), f x ≤ g x
⊢ iSup f ≤ iSup g State After: case inl
α : Type u_1
β : Type ?u.55772
γ : Type ?u.55775
ι : Sort u_2
inst✝ : ConditionallyCompleteLattice α
s t : Set α
a b : α
f g : ι → α
B : BddAbove (range g)
H : ∀ (x : ι), f x ≤ g x
h✝ : IsEmpty ι
⊢ iSup f ≤ iSup g
case inr
α : Type u_1
β : Type ?u.55772
γ : Type ?u.55775
ι : Sort u_2
inst✝ : ConditionallyCompleteLattice α
s t : Set α
a b : α
f g : ι → α
B : BddAbove (range g)
H : ∀ (x : ι), f x ≤ g x
h✝ : Nonempty ι
⊢ iSup f ≤ iSup g Tactic: cases isEmpty_or_nonempty ι State Before: case inl
α : Type u_1
β : Type ?u.55772
γ : Type ?u.55775
ι : Sort u_2
inst✝ : ConditionallyCompleteLattice α
s t : Set α
a b : α
f g : ι → α
B : BddAbove (range g)
H : ∀ (x : ι), f x ≤ g x
h✝ : IsEmpty ι
⊢ iSup f ≤ iSup g State After: no goals Tactic: rw [iSup_of_empty', iSup_of_empty'] State Before: case inr
α : Type u_1
β : Type ?u.55772
γ : Type ?u.55775
ι : Sort u_2
inst✝ : ConditionallyCompleteLattice α
s t : Set α
a b : α
f g : ι → α
B : BddAbove (range g)
H : ∀ (x : ι), f x ≤ g x
h✝ : Nonempty ι
⊢ iSup f ≤ iSup g State After: no goals Tactic: exact ciSup_le fun x => le_ciSup_of_le B x (H x) |
function [fx,J,dfdp,d2fdxdp] = f_fullDCM4fmri(Xt,Theta,ut,inF)
% DCM for fMRI evolution function (attn: embedding of HRF!)
% function [fx,dF_dX,dF_dTheta] = f_fullDCM4fmri(Xt,Theta,ut,inF)
% This function evaluates the evolution funciton of the neuronal level in
% DCM for fMRI. For the sake of HRF convolution purposes, it also
% internally calls g_fullDCM4fmri.m so that the hemodynamic states are
% updated properly.
%- Neuronal states evolution
[fx,J,dfdp] = f_dcm4fmri(Xt,Theta,ut,inF);
d2fdxdp = [];
%- HRF convolution
g_fullDCM4fmri(Xt,[],[],inF.inG);
% This call is used to update the hemodynamic states that are convolved
% outputs of the neuronal states (Xt), as well as the necessary gradient,
% i.e. Jacobian and derivative w.r.t. hemodynamic parameters.
|
[STATEMENT]
lemma dir_tree_arc1_in_apath:
assumes "u \<rightarrow>\<^bsub>dir_tree_r r\<^esub> v" and "r \<in> verts G"
shows "\<exists>p. apath r p v \<and> u \<in> set (awalk_verts r p)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>p. apath r p v \<and> u \<in> set (awalk_verts r p)
[PROOF STEP]
using directed_tree.apath_over_inarc_if_dominated[OF directed_tree_r[OF assms(2)] assms(1)]
bfs_tree.apath_sub_imp_apath bfs_dir_tree_r[OF assms(2)] bfs_tree.awalk_verts_G_T
[PROOF STATE]
proof (prove)
using this:
\<exists>p. pre_digraph.apath (dir_tree_r r) r p v \<and> u \<in> set (pre_digraph.awalk_verts (dir_tree_r r) r p)
\<lbrakk>bfs_tree ?G ?T ?root; pre_digraph.apath ?T ?u ?p ?v\<rbrakk> \<Longrightarrow> pre_digraph.apath ?G ?u ?p ?v
bfs_tree G (dir_tree_r r) r
bfs_tree ?G ?T ?root \<Longrightarrow> pre_digraph.awalk_verts ?G = pre_digraph.awalk_verts ?T
goal (1 subgoal):
1. \<exists>p. apath r p v \<and> u \<in> set (awalk_verts r p)
[PROOF STEP]
by fastforce |
import Logic.Predicate.Term
universe u u₁ u₂ v v₁ v₂ w w₁ w₂
variable
{L : Language.{u}} {L₁ : Language.{u₁}} {L₂ : Language.{u₂}}
{μ : Type v} {μ₁ : Type v₁} {μ₂ : Type v₂}
namespace FirstOrder
@[ext] class Structure (L : Language.{u}) (M : Type w) where
func : {k : ℕ} → L.func k → (Fin k → M) → M
rel : {k : ℕ} → L.rel k → (Fin k → M) → Prop
end FirstOrder
namespace Language
namespace Hom
open FirstOrder
def onStructure (Φ : L₁ →ᵥ L₂) {M : Type w} (S : Structure L₂ M) : Structure L₁ M where
func := fun f => S.func (Φ.onFunc f)
rel := fun r => S.rel (Φ.onRel r)
instance subLanguageStructure {pf : ∀ k, Language.func L k → Prop} {pr : ∀ k, L.rel k → Prop}
{M : Type w} (s : Structure L M) : Structure (subLanguage L pf pr) M :=
onStructure (ofSubLanguage L) s
noncomputable def extendStructure (Φ : L₁ →ᵥ L₂) {M : Type w} [Inhabited M] (s : Structure L₁ M) : Structure L₂ M where
func := fun {k} f₂ v => Classical.epsilon (fun y => ∃ f₁ : L₁.func k, Φ.onFunc f₁ = f₂ ∧ y = s.func f₁ v)
rel := fun {k} r₂ v => ∃ r₁ : L₁.rel k, Φ.onRel r₁ = r₂ ∧ s.rel r₁ v
end Hom
end Language
namespace FirstOrder
namespace Structure
instance [Inhabited M] : Inhabited (Structure L M) :=
⟨{ func := fun _ _ => default, rel := fun _ _ => True }⟩
variable (Φ : L₁ →ᵥ L₂) {M : Type w} (s₂ : Structure L₂ M)
@[simp] lemma onStructure_func {k} {f : L₁.func k} {v : Fin k → M} :
(Φ.onStructure s₂).func f v = s₂.func (Φ.onFunc f) v := rfl
@[simp] lemma onStructure_rel {k} {r : L₁.rel k} {v : Fin k → M} :
(Φ.onStructure s₂).rel r v ↔ s₂.rel (Φ.onRel r) v := of_eq rfl
variable [Inhabited M] (s₁ : Structure L₁ M)
lemma extendStructure_func
{k} (injf : Function.Injective (Φ.onFunc : L₁.func k → L₂.func k)) (f₁ : L₁.func k) (v : Fin k → M) :
(Φ.extendStructure s₁).func (Φ.onFunc f₁) v = s₁.func f₁ v := by
simp[Language.Hom.extendStructure]
have : ∃ y, ∃ f₁' : L₁.func k, Φ.onFunc f₁' = Φ.onFunc f₁ ∧ y = s₁.func f₁' v := ⟨s₁.func f₁ v, f₁, rfl, rfl⟩
rcases Classical.epsilon_spec this with ⟨f', f'eq, h⟩
rcases injf f'eq with rfl; exact h
lemma extendStructure_rel
{k} (injr : Function.Injective (Φ.onRel : L₁.rel k → L₂.rel k)) (r₁ : L₁.rel k) (v : Fin k → M) :
(Φ.extendStructure s₁).rel (Φ.onRel r₁) v ↔ s₁.rel r₁ v := by
simp[Language.Hom.extendStructure]
refine ⟨by intros h; rcases h with ⟨r₁', e, h⟩; rcases injr e; exact h, by intros h; refine ⟨r₁, rfl, h⟩⟩
class Eq (L : Language.{u}) [L.HasEq] (M : Type w) [s : Structure L M] where
eq : ∀ a b, s.rel Language.HasEq.eq ![a, b] ↔ a = b
attribute [simp] Eq.eq
end Structure
end FirstOrder
namespace SubTerm
open FirstOrder
variable {M} (s : Structure L M) {n n₁ n₂ : ℕ} (e : Fin n → M) (e₂ : Fin n₂ → M) (ε : μ → M) (ε₂ : μ₂ → M)
def val : SubTerm L μ n → M
| #x => e x
| &x => ε x
| func f v => s.func f (fun i => (v i).val)
variable (M) {s}
@[reducible] def val! (M : Type w) [s : Structure L M] {n} (e : Fin n → M) (ε : μ → M) : SubTerm L μ n → M := val s e ε
variable {M e e₂ ε ε₂}
@[simp] lemma val_bvar (x) : val s e ε (#x : SubTerm L μ n) = e x := rfl
@[simp] lemma val_fvar (x) : val s e ε (&x : SubTerm L μ n) = ε x := rfl
lemma val_func {k} (f : L.func k) (v) :
val s e ε (func f v) = s.func f (fun i => (v i).val s e ε) := rfl
lemma val_bind (bound : Fin n₁ → SubTerm L μ₂ n₂) (free : μ₁ → SubTerm L μ₂ n₂) (t : SubTerm L μ₁ n₁) :
(bind bound free t).val s e₂ ε₂ = t.val s (val s e₂ ε₂ ∘ bound) (val s e₂ ε₂ ∘ free) :=
by induction t <;> simp[*, bind_func, val_func]
lemma val_map (bound : Fin n₁ → Fin n₂) (free : μ₁ → μ₂) (t : SubTerm L μ₁ n₁) :
(map bound free t).val s e₂ ε₂ = t.val s (e₂ ∘ bound) (ε₂ ∘ free) := val_bind _ _ _
lemma val_subst (u : SubTerm L μ n) (t : SubTerm L μ (n + 1)) :
(subst u t).val s e ε = t.val s (e <: u.val s e ε) ε :=
by simp[subst, val_bind]; congr; exact funext $ Fin.lastCases (by simp) (by simp)
@[simp] lemma val_bShift (a : M) (t : SubTerm L μ n) :
t.bShift.val s (a :> e) ε = t.val s e ε := by simp[bShift, val_map, Function.comp]
section Language
variable (Φ : L₁ →ᵥ L₂) (e : Fin n → M) (ε : μ → M)
lemma val_onSubTerm (s₂ : Structure L₂ M) {t : SubTerm L₁ μ n} :
val s₂ e ε (Φ.onSubTerm t) = val (Φ.onStructure s₂) e ε t :=
by induction t <;> simp[*, val!, Function.comp, val_func, Language.Hom.onSubTerm_func]
variable [Inhabited M]
lemma val_extendStructure_onSubTerm
(injf : ∀ k, Function.Injective (Φ.onFunc : L₁.func k → L₂.func k))
(s₁ : Structure L₁ M) (t : SubTerm L₁ μ n) :
val (Φ.extendStructure s₁) e ε (Φ.onSubTerm t) = val s₁ e ε t := by
induction t <;> simp[*, Language.Hom.onSubTerm_func, val_func]
case func k f v ih =>
exact Structure.extendStructure_func Φ s₁ (injf k) f (fun i => val s₁ e ε (v i))
end Language
section Syntactic
variable (ε : ℕ → M)
lemma val_shift (t : SyntacticSubTerm L n) :
t.shift.val s e ε = t.val s e (ε ∘ Nat.succ) := by simp[shift, val_map]
lemma val_free (a : M) (t : SyntacticSubTerm L (n + 1)) :
t.free.val s e (a :>ₙ ε) = t.val s (e <: a) ε :=
by simp[free, val_bind]; congr; exact funext $ Fin.lastCases (by simp) (by simp)
lemma val_fix (a : M) (t : SyntacticSubTerm L n) :
t.fix.val s (e <: a) ε = t.val s e (a :>ₙ ε) :=
by simp[fix, val_bind, Function.comp]; congr; exact funext (Nat.cases (by simp) (by simp))
end Syntactic
end SubTerm |
{-# OPTIONS --cubical --no-import-sorts #-}
module MorePropAlgebra.Definitions where
open import Agda.Primitive renaming (_⊔_ to ℓ-max; lsuc to ℓ-suc; lzero to ℓ-zero)
open import Cubical.Foundations.Everything renaming (_⁻¹ to _⁻¹ᵖ; assoc to ∙-assoc)
open import Cubical.Relation.Nullary.Base renaming (¬_ to ¬ᵗ_)-- ¬ᵗ_
open import Cubical.Data.Sum.Base renaming (_⊎_ to infixr 4 _⊎_)
open import Cubical.HITs.PropositionalTruncation.Base using (∣_∣)
open import Cubical.Foundations.Logic renaming
( inl to inlᵖ
; inr to inrᵖ
; _⇒_ to infixr 0 _⇒_ -- shifting by -6
; _⇔_ to infixr -2 _⇔_ --
; ∃[]-syntax to infix -4 ∃[]-syntax --
; ∃[∶]-syntax to infix -4 ∃[∶]-syntax --
; ∀[∶]-syntax to infix -4 ∀[∶]-syntax --
; ∀[]-syntax to infix -4 ∀[]-syntax --
)
open import Utils
open import MoreLogic.Definitions renaming
( _ᵗ⇒_ to infixr 0 _ᵗ⇒_
; ∀ᵖ[∶]-syntax to infix -4 ∀ᵖ[∶]-syntax
; ∀ᵖ〚∶〛-syntax to infix -4 ∀ᵖ〚∶〛-syntax
; ∀ᵖ!〚∶〛-syntax to infix -4 ∀ᵖ!〚∶〛-syntax
; ∀〚∶〛-syntax to infix -4 ∀〚∶〛-syntax
; Σᵖ[]-syntax to infix -4 Σᵖ[]-syntax
; Σᵖ[∶]-syntax to infix -4 Σᵖ[∶]-syntax
)
-- hProps of relations
module _ {ℓ ℓ' : Level} {A : Type ℓ} (R : hPropRel A A ℓ') where
isRefl = ∀[ a ] R a a
isIrrefl = ∀[ a ] ¬ (R a a)
isIrrefl' = ∀[ a ] ∀[ b ] ( R a b ⊔ R b a ) ⇒ ¬( a ≡ₚ b)
isIrrefl'' = ∀[ a ] ∀[ b ] ([ R a b ] ⊎ [ R b a ]) ᵗ⇒ ¬( a ≡ₚ b)
isIrreflˢ'' = λ(isset : isSet A) → ∀[ a ] ∀[ b ] ([ R a b ] ⊎ [ R b a ]) ᵗ⇒ ¬([ isset ] a ≡ˢ b)
isTrans = ∀[ a ] ∀[ b ] ∀[ x ] R a b ⇒ R b x ⇒ R a x
isCotrans = ∀[ a ] ∀[ b ] R a b ⇒ (∀[ x ] R a x ⊔ R x b)
isConnex = ∀[ a ] ∀[ b ] R a b ⊔ R b a
-- isTrichotomous = λ(<-irrefl ∶ [ isIrrefl' _<_ ]) → λ(<-asym : isAsym _<_) ∀[ a ] ∀[ b ] [ <-irrefl ]
-- [ P ] → ¬ᵗ [ Q ]
-- a ≡ b ⊎ (a < b ⊎ b < a)
-- (a < b ⊎ b < a) ⇒ (¬ a ≡ b) -- also irrefl
-- isTight
-- two variants of asymmetry
--
-- IsAsym R = ∀ a b → [ R a b ⇒ ¬ R b a ]
-- IsAsym' R = ∀ a b → [ ¬ (R a b ⊓ R b a) ]
--
-- which are equivalent
--
-- isAsymᵖ≡' : isAsym R ≡ isAsym' R
--
-- but it seems that this one is not equivalent:
--
-- ∀ a b → [ (¬ R b a) ⇒ R a b ]
isSym = ∀[ a ] ∀[ b ] R a b ⇒ R b a
isAsym = ∀[ a ] ∀[ b ] R a b ⇒ ¬ R b a
isAsym' = ∀[ a ] ∀[ b ] ¬ (R a b ⊓ R b a)
isAsym'' = ∀[ a ] ∀[ b ] ¬ R a b ⇒ R b a -- not equivalent! (weaker)
isTrichotomous : (<-irrefl : [ isIrrefl'' ]) → (<-asym : [ isAsym ]) → hProp _
isTrichotomousˢ : (isset : isSet A) → (<-irrefl : [ isIrreflˢ'' isset ]) → (<-asym : [ isAsym ]) → hProp _
isTrichotomous isirrefl isasym = ∀[ a ] ∀[ b ] ([ isirrefl a b ] ([ isasym a b ] R a b ⊎ᵖ R b a ) ⊎ᵖ ( a ≡ₚ b))
isTrichotomousˢ isset isirrefl isasym = ∀[ a ] ∀[ b ] ([ isirrefl a b ] ([ isasym a b ] R a b ⊎ᵖ R b a ) ⊎ᵖ ([ isset ] a ≡ˢ b))
isAntisym = ∀[ a ] ∀[ b ] R a b ⇒ R b a ⇒ a ≡ₚ b
isAntisymˢ = λ(isset : isSet A) → ∀[ a ] ∀[ b ] R a b ⇒ R b a ⇒ ([ isset ] a ≡ˢ b)
isAntisym' = ∀[ a ] ∀[ b ] R a b ⇒ ¬( a ≡ₚ b) ⇒ R b a
isAntisymˢ' = λ(isset : isSet A) → ∀[ a ] ∀[ b ] R a b ⇒ ¬([ isset ] a ≡ˢ b) ⇒ R b a
-- tightness is closely related to antisymmetry:
--
-- R-antisym : [ R a b ] → [ R b a ] → a ≡ b
-- R-tight : [ ¬ R a b ] → [ ¬ R b a ] → a ≡ b
--
-- this becomes even more obvious if we regard the intended use: when _≤_ and _#_ are derived from _<_
--
-- a ≤ b = ¬ (b < a)
-- a # b = ¬ ([ a < b ] ⊎ [ b < a ])
--
-- and indeed, we get
--
-- isTight _<_ ≡ isAntisym (λ a b → ¬ (b < a))
-- isTight' _<_ ≡ isTight''' (λ a b → (a < b) ⊔ (b < a))
--
-- In that case, `≤-antisym` and `#-tight` are almost the same, definitionally:
--
-- ≤-antisym : [ ¬ (b < a) ] → [ ¬ (a < b) ] → a ≡ b
-- ≤-antisym : [ ¬ (b < a) ] × [ ¬ (a < b) ] → a ≡ b -- by curry/uncurry
-- ≤-antisym : ¬ᵗ ( [ b < a ] ⊎ [ a < b ]) → a ≡ b -- by deMorgan
-- #-tight : [ ¬ (a < b) ] → [ ¬ (b < a) ] → a ≡ b
-- #-tight : [ ¬ (a < b) ] × [ ¬ (b < a) ] → a ≡ b -- by curry/uncurry
-- #-tight : ¬ᵗ ( [ a < b ] ⊎ [ b < a ]) → a ≡ b -- by deMorgan
--
-- We provide a few variants of tightness
--
isTight = ∀[ a ] ∀[ b ] ¬ R a b ⇒ ¬ R b a ⇒ a ≡ₚ b -- on _<_, "canonical"
isTightˢ = λ(isset : isSet A) → ∀[ a ] ∀[ b ] ¬ R a b ⇒ ¬ R b a ⇒ [ isset ] a ≡ˢ b -- on _<_
isTight' = ∀[ a ] ∀[ b ] ¬ ( R a b ⊔ R b a ) ⇒ a ≡ₚ b -- on _<_, definitional `isTight-ᵖ'≡'''`
isTightˢ' = λ(isset : isSet A) → ∀[ a ] ∀[ b ] ¬ ( R a b ⊔ R b a ) ⇒ [ isset ] a ≡ˢ b -- on _<_
isTight'' = ∀[ a ] ∀[ b ] (¬ᵗ ([ R a b ] ⊎ [ R b a ])) ᵗ⇒ a ≡ₚ b -- on _<_, definitional `isTight-ᵖ''≡'''`
isTightˢ'' = λ(isset : isSet A) → ∀[ a ] ∀[ b ] (¬ᵗ ([ R a b ] ⊎ [ R b a ])) ᵗ⇒ [ isset ] a ≡ˢ b -- on _<_, "convenient"
isTight''' = ∀[ a ] ∀[ b ] ¬ R a b ⇒ a ≡ₚ b -- on _#_
isTightˢ''' = λ(isset : isSet A) → ∀[ a ] ∀[ b ] ¬ R a b ⇒ [ isset ] a ≡ˢ b -- on _#_, also "convenient"
--
-- where the very first one, `IsTight` corresponds to a "canonical" definition,
-- the later one, `IsTightˢ''` is the "most convenient" one to use for `a # b = ¬ ([ a < b ] ⊎ [ b < a ])` on sets.
-- and the last ones `IsTight'''` and `IsTightˢ'''` are for "_#_" instead of "_<_".
--
-- These tightness definitions are all equivalent in the following sense:
--
-- isTight-ˢ≡ : (is-set : isSet A) → isTightˢ is-set _<_ ≡ isTight _<_
-- isTight-ˢ'≡' : (is-set : isSet A) → isTightˢ' is-set _<_ ≡ isTight' _<_
-- isTight-ˢ''≡'' : (is-set : isSet A) → isTightˢ'' is-set _<_ ≡ isTight'' _<_
-- isTight-ˢ'''≡''' : (is-set : isSet A) → isTightˢ''' is-set _#_ ≡ isTight''' _#_
-- isTight-≡' : isTight _<_ ≡ isTight' _<_
-- isTight-'≡'' : isTight' _<_ ≡ isTight'' _<_
-- isTight-'≡''' : isTight' _<_ ≡ isTight''' (λ a b → (a < b) ⊔ (b < a))
-- isTight-''≡''' : (<-asym : [ isAsym _<_ ]) → isTight'' _<_ ≡ isTight''' (λ a b → [ <-asym a b ] (a < b) ⊎ᵖ (b < a))
--
-- where `isTight-ᵖ'≡'''` and `isTight-ᵖ''≡'''` hold definitionally.
-- common definitions of less equal _≤_ and apartness _#_ with respect to _<_
module _ {ℓ ℓ'} {X : Type ℓ} {_<_ : hPropRel X X ℓ'} where
_#'_ : hPropRel X X ℓ'
_#'_ x y = (x < y) ⊔ (y < x)
-- a variant that omits propositional truncation by using asymmetry of _<_
_#''_ : {<-asym : [ isAsym _<_ ]} → hPropRel X X ℓ'
_#''_ {<-asym = <-asym} x y = [ <-asym x y ] (x < y) ⊎ᵖ (y < x)
_≤'_ : hPropRel X X ℓ'
_≤'_ x y = ¬ (y < x)
-- this is how Bridges 1999 defines _≤_
_≤''_ : hPropRel X X (ℓ-max ℓ ℓ')
x ≤'' y = ∀[ ε ] (y < ε) ⇒ (x < ε) -- (∀ ε → [ y < ε ] → [ x < ε ]) , isPropΠ2 (λ ε y<ε → isProp[] (x < ε))
-- combined hProps of relations
module _ {ℓ ℓ' : Level} {A : Type ℓ} (R : hPropRel A A ℓ')
(let _<_ = R; _≤_ = R) -- "strict" is denoted by _<_, and "non-strict" by _≤_
where
record IsApartnessRel : Type (ℓ-max ℓ ℓ') where
constructor isapartnessrel
field
is-irrefl : ∀ a → [ ¬ R a a ]
is-sym : ∀ a b → [ R a b ] → [ R b a ]
is-cotrans : ∀ a b → [ R a b ] → ∀ x → [ R a x ⊔ R x b ]
_ : [ isIrrefl R ]; _ = is-irrefl
_ : [ isSym R ]; _ = is-sym
_ : [ isCotrans R ]; _ = is-cotrans
isApartnessRel : hProp (ℓ-max ℓ ℓ')
isApartnessRel .fst = IsApartnessRel
isApartnessRel .snd (isapartnessrel a₀ b₀ c₀) (isapartnessrel a₁ b₁ c₁) = φ where
abstract φ = λ i → isapartnessrel (snd (isIrrefl R) a₀ a₁ i) (snd (isSym R) b₀ b₁ i) (snd (isCotrans R) c₀ c₁ i)
record IsStrictPartialOrder : Type (ℓ-max ℓ ℓ') where
constructor isstrictpartialorder
field
is-irrefl : ∀ a → [ ¬ a < a ]
is-trans : ∀ a b x → [ a < b ] → [ b < x ] → [ a < x ]
is-cotrans : ∀ a b → [ a < b ] → ∀ x → [ a < x ⊔ x < b ]
_ : [ isIrrefl _<_ ]; _ = is-irrefl
_ : [ isTrans _<_ ]; _ = is-trans
_ : [ isCotrans _<_ ]; _ = is-cotrans
isStrictPartialOrder : hProp (ℓ-max ℓ ℓ')
isStrictPartialOrder .fst = IsStrictPartialOrder
isStrictPartialOrder .snd (isstrictpartialorder a₀ b₀ c₀) (isstrictpartialorder a₁ b₁ c₁) = φ where
abstract φ = λ i → isstrictpartialorder (snd (isIrrefl _<_) a₀ a₁ i) (snd (isTrans _<_) b₀ b₁ i) (snd (isCotrans _<_) c₀ c₁ i)
record IsPreorder : Type (ℓ-max ℓ ℓ') where
constructor ispreorder
field
is-refl : ∀ a → [ R a a ]
is-trans : ∀ a b x → [ R a b ] → [ R b x ] → [ R a x ]
_ : [ isRefl R ]; _ = is-refl
_ : [ isTrans R ]; _ = is-trans
isPreorder : hProp (ℓ-max ℓ ℓ')
isPreorder .fst = IsPreorder
isPreorder .snd (ispreorder a₀ b₀) (ispreorder a₁ b₁) = φ where
abstract φ = λ i → ispreorder (snd (isRefl R) a₀ a₁ i) (snd (isTrans R) b₀ b₁ i)
record IsPartialOrder : Type (ℓ-max ℓ ℓ') where
constructor ispartialorder
field
is-refl : ∀ a → [ a ≤ a ]
is-antisym : ∀ a b → [ a ≤ b ] → [ b ≤ a ] → [ a ≡ₚ b ]
is-trans : ∀ a b x → [ a ≤ b ] → [ b ≤ x ] → [ a ≤ x ]
_ : [ isRefl _≤_ ]; _ = is-refl
_ : [ isAntisym _≤_ ]; _ = is-antisym
_ : [ isTrans _≤_ ]; _ = is-trans
isPartialOrder : hProp (ℓ-max ℓ ℓ')
isPartialOrder .fst = IsPartialOrder
isPartialOrder .snd (ispartialorder a₀ b₀ c₀) (ispartialorder a₁ b₁ c₁) = φ where
abstract φ = λ i → ispartialorder (snd (isRefl _≤_) a₀ a₁ i) (snd (isAntisym _≤_) b₀ b₁ i) (snd (isTrans _≤_) c₀ c₁ i)
record IsLinearOrder : Type (ℓ-max ℓ ℓ') where
constructor islinearorder
field
is-connex : ∀ a b → [ a ≤ b ⊔ b ≤ a ]
is-antisym : ∀ a b → [ a ≤ b ] → [ b ≤ a ] → [ a ≡ₚ b ]
is-trans : ∀ a b x → [ a ≤ b ] → [ b ≤ x ] → [ a ≤ x ]
_ : [ isConnex _≤_ ]; _ = is-connex
_ : [ isAntisym _≤_ ]; _ = is-antisym
_ : [ isTrans _≤_ ]; _ = is-trans
isLinearOrder : hProp (ℓ-max ℓ ℓ')
isLinearOrder .fst = IsLinearOrder
isLinearOrder .snd (islinearorder a₀ b₀ c₀) (islinearorder a₁ b₁ c₁) = φ where
abstract φ = λ i → islinearorder (snd (isConnex _≤_) a₀ a₁ i) (snd (isAntisym _≤_) b₀ b₁ i) (snd (isTrans _≤_) c₀ c₁ i)
record IsStrictLinearOrder : Type (ℓ-max ℓ ℓ') where
constructor isstrictlinearorder
field
is-irrefl : ∀ a → [ ¬ a < a ]
is-trans : ∀ a b x → [ a < b ] → [ b < x ] → [ a < x ]
is-tricho : ∀ a b → ([ a < b ] ⊎ [ b < a ]) ⊎ [ a ≡ₚ b ]
private
is-asym : ∀ a b → [ a < b ] → [ ¬ b < a ]
is-asym a b a<b b<a = is-irrefl _ (is-trans _ _ _ a<b b<a)
is-irrefl'' : ∀ a b → [ a < b ] ⊎ [ b < a ] → [ ¬(a ≡ₚ b) ]
is-irrefl'' a b (inl a<b) a≡b = is-irrefl _ (substₚ (λ p → p < b) a≡b a<b)
is-irrefl'' a b (inr b<a) a≡b = is-irrefl _ (substₚ (λ p → b < p) a≡b b<a)
_ : [ isIrrefl _<_ ]; _ = is-irrefl
_ : [ isTrans _<_ ]; _ = is-trans
_ : [ isTrichotomous _<_ is-irrefl'' is-asym ]; _ = is-tricho
isStrictLinearOrder : hProp (ℓ-max ℓ ℓ')
isStrictLinearOrder .fst = IsStrictLinearOrder
isStrictLinearOrder .snd (isstrictlinearorder a₀ b₀ c₀) (isstrictlinearorder a₁ b₁ c₁) = φ where
abstract φ = λ i → let is-irrefl = snd (isIrrefl _<_ ) a₀ a₁ i
is-trans = snd (isTrans _<_ ) b₀ b₁ i
is-asym : ∀ a b → [ a < b ] → [ ¬ b < a ]
is-asym a b a<b b<a = is-irrefl _ (is-trans _ _ _ a<b b<a)
is-irrefl'' : ∀ a b → [ a < b ] ⊎ [ b < a ] → [ ¬(a ≡ₚ b) ]
is-irrefl'' a b = λ
{ (inl a<b) a≡b → is-irrefl _ (substₚ (λ p → p < b) a≡b a<b)
; (inr b<a) a≡b → is-irrefl _ (substₚ (λ p → b < p) a≡b b<a)
}
is-tricho = snd (isTrichotomous _<_ is-irrefl'' is-asym) c₀ c₁ i
in isstrictlinearorder is-irrefl is-trans is-tricho
-- properties tied to some operation `op` on sets
module _ {ℓ : Level} {A : Type ℓ} (op : A → A → A) (is-set : isSet A)
(let _·_ = op -- different semantics
_+_ = op --
_≡ˢ_ = λ(x y : A) → [ is-set ] x ≡ˢ y
infixl 7 _·_
infixl 5 _+_
infixl 4 _≡ˢ_
) where
isAssociativeˢ = ∀[ x ] ∀[ y ] ∀[ z ] x · (y · z) ≡ˢ (x · y) · z
isIdentityˢ = λ(ε : A) → ∀[ x ] ( x · ε ≡ˢ x) ⊓ ( ε · x ≡ˢ x)
isCommutativeˢ = ∀[ x ] ∀[ y ] x + y ≡ˢ y + x
-- other properties
module _ {ℓ : Level} {A : Type ℓ} where
is-+-#-Extensional : (_+_ : A → A → A) → ∀{ℓ'} → (_#_ : hPropRel A A ℓ') → hProp _
is-+-<-Extensional : (_+_ : A → A → A) → ∀{ℓ'} → (_<_ : hPropRel A A ℓ') → hProp _
is-+-#-Extensional _+_ _#_ = ∀[ w ] ∀[ x ] ∀[ y ] ∀[ z ] (w + x) # (y + z) ⇒ (w # y) ⊔ (x # z)
is-+-<-Extensional _+_ _<_ = ∀[ w ] ∀[ x ] ∀[ y ] ∀[ z ] (w + x) < (y + z) ⇒ (w < y) ⊔ (x < z)
isMin : ∀{ℓ'} → (_≤_ : hPropRel A A ℓ') (min : A → A → A) → hProp _
isMax : ∀{ℓ'} → (_≤_ : hPropRel A A ℓ') (max : A → A → A) → hProp _
isMin _≤_ min = ∀[ x ] ∀[ y ] ∀[ z ] z ≤ (min x y) ⇔ z ≤ x ⊓ z ≤ y
isMax _≤_ max = ∀[ x ] ∀[ y ] ∀[ z ] (max x y) ≤ z ⇔ x ≤ z ⊓ y ≤ z
operation_preserves_when_ : (op : A → A → A) → ∀{ℓ'} → (R : hPropRel A A ℓ') → ∀{ℓ''} → (A → hProp ℓ'') → hProp _
operation_reflects_when_ : (op : A → A → A) → ∀{ℓ'} → (R : hPropRel A A ℓ') → ∀{ℓ''} → (A → hProp ℓ'') → hProp _
operation_reflects_〚when〛 : (op : A → A → A) → ∀{ℓ'} → (R : hPropRel A A ℓ') → ∀{ℓ''} → (A → hProp ℓ'') → hProp _
operation_creates_when_ : (op : A → A → A) → ∀{ℓ'} → (R : hPropRel A A ℓ') → ∀{ℓ''} → (A → hProp ℓ'') → hProp _
operation _·_ preserves _<_ when P = ∀[ x ] ∀[ y ] ∀[ z ] P z ⇒ x < y ⇒ (x · z) < (y · z)
operation _·_ reflects _<_ when P = ∀[ x ] ∀[ y ] ∀[ z ] P z ⇒ (x · z) < (y · z) ⇒ x < y
operation _·_ reflects _<_ 〚when〛 P = ∀[ x ] ∀[ y ] ∀[ z ] ∀〚 _ ∶ [ P z ] 〛 (x · z) < (y · z) ⇒ x < y
operation _·_ creates _<_ when P = ∀[ x ] ∀[ y ] ∀[ z ] P z ⇒ (x < y ⇔ (x · z) < (y · z))
isAbsNonnegative : ∀{ℓ} {F : Type ℓ} {Rℓ Rℓ'} {R : Type Rℓ} (0ᴿ : R) (_≤ᴿ_ : hPropRel R R Rℓ') (abs : F → R) → hProp _
isAbsCreatesZero : ∀{ℓ} {F : Type ℓ} (is-set : isSet F) (0f : F) {Rℓ Rℓ'} {R : Type Rℓ} (is-setᴿ : isSet R) (0ᴿ : R) (_≤ᴿ_ : hPropRel R R Rℓ') (abs : F → R) → hProp _
isAbsPreservesMultiplication : ∀{ℓ} {F : Type ℓ} ( _·_ : F → F → F) {Rℓ } {R : Type Rℓ} (is-setᴿ : isSet R) ( _·ᴿ_ : R → R → R) (abs : F → R) → hProp _
isAbsTriangleInequality : ∀{ℓ} {F : Type ℓ} (_+_ : F → F → F) {Rℓ Rℓ'} {R : Type Rℓ} (_+ᴿ_ : R → R → R) (_≤ᴿ_ : hPropRel R R Rℓ') (abs : F → R) → hProp _
isAbsNonnegative 0ᴿ _≤ᴿ_ abs = ∀[ x ] 0ᴿ ≤ᴿ (abs x)
isAbsCreatesZero is-set 0f is-setᴿ 0ᴿ _≤ᴿ_ abs = ∀[ x ] ([ is-set ] x ≡ˢ 0f ⇔ [ is-setᴿ ] abs x ≡ˢ 0ᴿ)
isAbsPreservesMultiplication _·_ is-setᴿ _·ᴿ_ abs = ∀[ x ] ∀[ y ] [ is-setᴿ ] (abs (x · y)) ≡ˢ (abs x ·ᴿ abs y)
isAbsTriangleInequality _+_ _+ᴿ_ _≤ᴿ_ abs = ∀[ x ] ∀[ y ] abs (x + y) ≤ᴿ (abs x +ᴿ abs y)
record IsAbsˢ
{ ℓ : Level} {F : Type ℓ } (is-set : isSet F) (0f : F) (_+_ _·_ : F → F → F)
{Rℓ Rℓ' : Level} {R : Type Rℓ} (is-setᴿ : isSet R) (0ᴿ : R) (_+ᴿ_ _·ᴿ_ : R → R → R) (_≤ᴿ_ : hPropRel R R Rℓ')
(abs : F → R)
: Type (ℓ-suc (ℓ-max ℓ (ℓ-max Rℓ Rℓ')))
where
constructor isabs
field
is-0≤abs : ∀ x → [ 0ᴿ ≤ᴿ (abs x) ]
abs-creates-0 : ∀ x → [ [ is-set ] x ≡ˢ 0f ⇔ [ is-setᴿ ] abs x ≡ˢ 0ᴿ ]
abs-preserves-· : ∀ x y → (abs (x · y)) ≡ (abs x ·ᴿ abs y)
triangle-ineq : ∀ x y → [ abs (x + y) ≤ᴿ (abs x +ᴿ abs y) ]
_ : [ isAbsNonnegative 0ᴿ _≤ᴿ_ abs ]; _ = is-0≤abs
_ : [ isAbsCreatesZero is-set 0f is-setᴿ 0ᴿ _≤ᴿ_ abs ]; _ = abs-creates-0
_ : [ isAbsPreservesMultiplication _·_ is-setᴿ _·ᴿ_ abs ]; _ = abs-preserves-·
_ : [ isAbsTriangleInequality _+_ _+ᴿ_ _≤ᴿ_ abs ]; _ = triangle-ineq
abs-preserves-0 : ∀ x → x ≡ 0f → abs x ≡ 0ᴿ
abs-preserves-0 x = abs-creates-0 x .fst
abs-reflects-0 : ∀ x → abs x ≡ 0ᴿ → x ≡ 0f
abs-reflects-0 x = abs-creates-0 x .snd
isAbs : { ℓ : Level} {F : Type ℓ } (is-set : isSet F) (0f : F) (_+_ _·_ : F → F → F)
{Rℓ Rℓ' : Level} {R : Type Rℓ} (is-setᴿ : isSet R) (0ᴿ : R) (_+ᴿ_ _·ᴿ_ : R → R → R) (_≤ᴿ_ : hPropRel R R Rℓ')
(abs : F → R)
→ hProp (ℓ-suc (ℓ-max ℓ (ℓ-max Rℓ Rℓ')))
isAbs is-set 0f _+_ _·_ is-setᴿ 0ᴿ _+ᴿ_ _·ᴿ_ _≤ᴿ_ abs .fst = IsAbsˢ is-set 0f _+_ _·_ is-setᴿ 0ᴿ _+ᴿ_ _·ᴿ_ _≤ᴿ_ abs
isAbs is-set 0f _+_ _·_ is-setᴿ 0ᴿ _+ᴿ_ _·ᴿ_ _≤ᴿ_ abs .snd (isabs a₀ b₀ c₀ d₀) (isabs a₁ b₁ c₁ d₁) = φ where
abstract φ = λ i → isabs (snd (isAbsNonnegative 0ᴿ _≤ᴿ_ abs) a₀ a₁ i) (snd (isAbsCreatesZero is-set 0f is-setᴿ 0ᴿ _≤ᴿ_ abs) b₀ b₁ i)
(snd (isAbsPreservesMultiplication _·_ is-setᴿ _·ᴿ_ abs) c₀ c₁ i) (snd (isAbsTriangleInequality _+_ _+ᴿ_ _≤ᴿ_ abs) d₀ d₁ i)
-- other properties on sets
module _ {ℓ : Level} {A : Type ℓ} (is-set : isSet A)
(let _≡ˢ_ = λ(x y : A) → [ is-set ] x ≡ˢ y; infixl 4 _≡ˢ_) where
-- NOTE: the left inverse is "on the right" of `_⊓_` (you get it with `snd`)
-- and the right inverse is "on the left" of `_⊓_` (you get it with `fst`)
-- .. this is how it's done in the cubical standard library
isInverseˢ : (0g : A) (_+_ : A → A → A) (-_ : A → A) → hProp _
isDistributiveˢ : (_+_ _·_ : A → A → A) → hProp _
isNonzeroInverseˢ' : (0f 1f : A) ( _·_ : A → A → A) (_⁻¹ : (x : A) → {{ ! [ ¬'(x ≡ 0f) ] }} → A) → hProp _
isNonzeroInverseˢ : (0f 1f : A) ( _·_ : A → A → A) → ∀{ℓ'} → (_#_ : hPropRel A A ℓ') → (_⁻¹ : (x : A) → {{ [ x # 0f ] }} → A) → hProp _
isNonzeroInverseˢ'' : (0f 1f : A) ( _·_ : A → A → A) → ∀{ℓ'} → (_#_ : hPropRel A A ℓ') → hProp _
-- isNonzeroInverseˢ''' : (0f 1f : A) ( _·_ : A → A → A) → ∀{ℓ'} → (_#_ : hPropRel A A ℓ') → hProp _
isInverseNonzeroˢ : (0f 1f : A) ( _·_ : A → A → A) → ∀{ℓ'} → (_#_ : hPropRel A A ℓ') → hProp _
isInverseˢ 0g _+_ -_ = ∀[ x ] ( x + (- x) ≡ˢ 0g)
⊓ ((- x) + x ≡ˢ 0g)
isDistributiveˢ _+_ _·_ = ∀[ x ] ∀[ y ] ∀[ z ] ( x · (y + z) ≡ˢ (x · y) + (x · z))
⊓ ((x + y) · z ≡ˢ (x · z) + (y · z))
-- classical notion of inverse operating on `¬(x ≡ 0)`, used in `IsClassicalField`
-- `∀ᵖ!〚_〛_` creates in instance argument of type `!_`
-- because `¬'(x ≡ 0f)` is a function type with an explicit argument and won't be considered in instance search
isNonzeroInverseˢ' 0f 1f _·_ _⁻¹ = ∀[ x ] ∀ᵖ!〚 p ∶ ¬'(x ≡ 0f) 〛 (x · (x ⁻¹) {{ p }} ≡ˢ 1f)
⊓ ((x ⁻¹) {{ p }} · x ≡ˢ 1f)
-- constructive notion of inverse operating on `x # 0`
-- `∀ᵖ〚_〛_` creates in instance argument
isNonzeroInverseˢ 0f 1f _·_ _#_ _⁻¹ = ∀[ x ] ∀ᵖ〚 p ∶ x # 0f 〛 (x · (x ⁻¹) {{ p }} ≡ˢ 1f)
⊓ ((x ⁻¹) {{ p }} · x ≡ˢ 1f)
-- constructive notion of inverse
-- this is the formulation in Booij2020, used in `IsAlmostOrderedField`
-- we need to proof uniqueness of inverses to obtain `_⁻¹` for `isNonzeroInverseˢ`
isNonzeroInverseˢ'' 0f 1f _·_ _#_ = ∀[ x ] (∃[ y ] x · y ≡ˢ 1f) ⇔ x # 0f
-- isNonzeroInverseˢ''' 0f 1f _·_ _#_ = ∀[ x ] (Σᵖ[ y ] x · y ≡ˢ 1f) ⇔ x # 0f
isInverseNonzeroˢ 0f 1f _·_ _#_ = ∀[ x ] ∀[ y ] x · y ≡ˢ 1f ⇒ x # 0f ⊓ y # 0f
|
module ctxt-types where
open import lib
open import cedille-types
open import general-util
open import syntax-util
location : Set
location = string × posinfo -- file path and starting position in the file
-- file path and starting / ending position in file
span-location = string × posinfo × posinfo
-- missing locations
missing-location : location
missing-location = ("missing" , "missing")
missing-span-location : span-location
missing-span-location = ("missing" , "missing" , "missing")
{- we will generally keep classifiers of variables in hnf in the ctxt, although
we will not necessarily unfold recursive type definitions. -}
defScope : Set
defScope = 𝔹
pattern localScope = tt
pattern globalScope = ff
pattern concrete-datatype = globalScope
pattern abstract-datatype = localScope
defParams : Set
defParams = maybe params
data ctxt-info : Set where
-- for defining a datatype
-- datatype-def : defParams → (ind reg : kind) → ctrs → ctxt-info
-- for defining a datatype constructor
ctr-def : params → type → (ctrs-length ctr-index ctr-unerased-arrows : ℕ) → ctxt-info
-- for declaring the type that proves a type is a datatype (X/Mu)
-- mu-def : defParams → var → kind → ctxt-info
-- for declaring a variable to have a given type (with no definition)
term-decl : type → ctxt-info
-- for defining a variable to equal a term with a given type
-- maybe term, because datatype X/Mu and X/mu have params, etc... but no def
term-def : defParams → opacity → maybe term → type → ctxt-info
-- for untyped term definitions
term-udef : defParams → opacity → term → ctxt-info
-- for declaring a variable to have a given kind (with no definition)
type-decl : kind → ctxt-info
-- for defining a variable to equal a type with a given kind
type-def : defParams → opacity → maybe type → kind → ctxt-info
-- for defining a variable to equal a kind
kind-def : params → kind → ctxt-info
-- to rename a variable at any level to another
rename-def : var → ctxt-info
-- representing a declaration of a variable with no other information about it
var-decl : ctxt-info
sym-info : Set
sym-info = ctxt-info × location
-- module filename, name, parameters, and qualifying substitution
mod-info : Set
mod-info = string × string × params × qualif
is-term-level : ctxt-info → 𝔹
is-term-level (term-decl _) = tt
is-term-level (term-def _ _ _ _) = tt
is-term-level (term-udef _ _ _) = tt
is-term-level (ctr-def _ _ _ _ _ ) = tt
is-term-level _ = ff
data ctxt : Set where
mk-ctxt : (mod : mod-info) → -- current module
(syms : trie (string × 𝕃 string) × trie string × trie params × trie ℕ × Σ ℕ (𝕍 string)) → -- map each filename to its module name and the symbols declared in that file, map each module name to its filename and params, and file ID's for use in to-string.agda
(i : trie sym-info) → -- map symbols (from Cedille files) to their ctxt-info and location
(sym-occurrences : trie (𝕃 (var × posinfo × string))) → -- map symbols to a list of definitions they occur in (and relevant file info)
(Δ : trie (params × kind × kind × ctrs) × trie (var × var × args) × trie var) → -- datatype info: (concrete/global datatypes × abstract/local datatypes × datatype/Mu map)
ctxt
ctxt-binds-var : ctxt → var → 𝔹
ctxt-binds-var (mk-ctxt (_ , _ , _ , q) _ i _ _) x = trie-contains q x || trie-contains i x
ctxt-var-decl : var → ctxt → ctxt
ctxt-var-decl v (mk-ctxt (fn , mn , ps , q) syms i symb-occs Δ) =
mk-ctxt (fn , mn , ps , (trie-insert q v (v , []))) syms (trie-insert i v (var-decl , "missing" , "missing")) symb-occs Δ
ctxt-var-decl-loc : posinfo → var → ctxt → ctxt
ctxt-var-decl-loc pi v (mk-ctxt (fn , mn , ps , q) syms i symb-occs Δ) =
mk-ctxt (fn , mn , ps , (trie-insert q v (v , []))) syms (trie-insert i v (var-decl , fn , pi)) symb-occs Δ
qualif-var : ctxt → var → var
qualif-var (mk-ctxt (_ , _ , _ , q) _ _ _ _) v with trie-lookup q v
...| just (v' , _) = v'
...| nothing = v
start-modname : start → string
start-modname (File _ _ _ mn _ _ _) = mn
ctxt-get-current-filename : ctxt → string
ctxt-get-current-filename (mk-ctxt (fn , _) _ _ _ _) = fn
ctxt-get-current-mod : ctxt → mod-info
ctxt-get-current-mod (mk-ctxt m _ _ _ _) = m
ctxt-get-current-modname : ctxt → string
ctxt-get-current-modname (mk-ctxt (_ , mn , _ , _) _ _ _ _) = mn
ctxt-get-current-params : ctxt → params
ctxt-get-current-params (mk-ctxt (_ , _ , ps , _) _ _ _ _) = ps
ctxt-get-symbol-occurrences : ctxt → trie (𝕃 (var × posinfo × string))
ctxt-get-symbol-occurrences (mk-ctxt _ _ _ symb-occs _) = symb-occs
ctxt-set-symbol-occurrences : ctxt → trie (𝕃 (var × posinfo × string)) → ctxt
ctxt-set-symbol-occurrences (mk-ctxt fn syms i symb-occs Δ) new-symb-occs = mk-ctxt fn syms i new-symb-occs Δ
|
# Bayesian Hierarchical Linear Regression
Author: [Carlos Souza](mailto:[email protected])
Probabilistic Machine Learning models can not only make predictions about future data, but also **model uncertainty**. In areas such as **personalized medicine**, there might be a large amount of data, but there is still a relatively **small amount of data for each patient**. To customize predictions for each person it becomes necessary to **build a model for each person** — with its inherent **uncertainties** — and to couple these models together in a **hierarchy** so that information can be borrowed from other **similar people** [1].
The purpose of this tutorial is to demonstrate how to **implement a Bayesian Hierarchical Linear Regression model using NumPyro**. To motivate the tutorial, I will use [OSIC Pulmonary Fibrosis Progression](https://www.kaggle.com/c/osic-pulmonary-fibrosis-progression) competition, hosted at Kaggle.
## 1. Understanding the task
Pulmonary fibrosis is a disorder with no known cause and no known cure, created by scarring of the lungs. In this competition, we were asked to predict a patient’s severity of decline in lung function. Lung function is assessed based on output from a spirometer, which measures the forced vital capacity (FVC), i.e. the volume of air exhaled.
In medical applications, it is useful to **evaluate a model's confidence in its decisions**. Accordingly, the metric used to rank the teams was designed to reflect **both the accuracy and certainty of each prediction**. It's a modified version of the Laplace Log Likelihood (more details on that later).
Let's explore the data and see what's that all about:
```python
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
```
```python
train = pd.read_csv('https://gist.githubusercontent.com/ucals/'
'2cf9d101992cb1b78c2cdd6e3bac6a4b/raw/'
'43034c39052dcf97d4b894d2ec1bc3f90f3623d9/'
'osic_pulmonary_fibrosis.csv')
train.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Patient</th>
<th>Weeks</th>
<th>FVC</th>
<th>Percent</th>
<th>Age</th>
<th>Sex</th>
<th>SmokingStatus</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>ID00007637202177411956430</td>
<td>-4</td>
<td>2315</td>
<td>58.253649</td>
<td>79</td>
<td>Male</td>
<td>Ex-smoker</td>
</tr>
<tr>
<th>1</th>
<td>ID00007637202177411956430</td>
<td>5</td>
<td>2214</td>
<td>55.712129</td>
<td>79</td>
<td>Male</td>
<td>Ex-smoker</td>
</tr>
<tr>
<th>2</th>
<td>ID00007637202177411956430</td>
<td>7</td>
<td>2061</td>
<td>51.862104</td>
<td>79</td>
<td>Male</td>
<td>Ex-smoker</td>
</tr>
<tr>
<th>3</th>
<td>ID00007637202177411956430</td>
<td>9</td>
<td>2144</td>
<td>53.950679</td>
<td>79</td>
<td>Male</td>
<td>Ex-smoker</td>
</tr>
<tr>
<th>4</th>
<td>ID00007637202177411956430</td>
<td>11</td>
<td>2069</td>
<td>52.063412</td>
<td>79</td>
<td>Male</td>
<td>Ex-smoker</td>
</tr>
</tbody>
</table>
</div>
In the dataset, we were provided with a baseline chest CT scan and associated clinical information for a set of patients. A patient has an image acquired at time Week = 0 and has numerous follow up visits over the course of approximately 1-2 years, at which time their FVC is measured. For this tutorial, I will use only the Patient ID, the weeks and the FVC measurements, discarding all the rest. Using only these columns enabled our team to achieve a competitive score, which shows the power of Bayesian hierarchical linear regression models especially when gauging uncertainty is an important part of the problem.
Since this is real medical data, the relative timing of FVC measurements varies widely, as shown in the 3 sample patients below:
```python
def chart(patient_id, ax):
data = train[train['Patient'] == patient_id]
x = data['Weeks']
y = data['FVC']
ax.set_title(patient_id)
ax = sns.regplot(x, y, ax=ax, ci=None, line_kws={'color':'red'})
f, axes = plt.subplots(1, 3, figsize=(15, 5))
chart('ID00007637202177411956430', axes[0])
chart('ID00009637202177434476278', axes[1])
chart('ID00010637202177584971671', axes[2])
```
On average, each of the 176 provided patients made 9 visits, when FVC was measured. The visits happened in specific weeks in the [-12, 133] interval. The decline in lung capacity is very clear. We see, though, they are very different from patient to patient.
We were are asked to predict every patient's FVC measurement for every possible week in the [-12, 133] interval, and the confidence for each prediction. In other words: we were asked fill a matrix like the one below, and provide a confidence score for each prediction:
The task was perfect to apply Bayesian inference. However, the vast majority of solutions shared by Kaggle community used discriminative machine learning models, disconsidering the fact that most discriminative methods are very poor at providing realistic uncertainty estimates. Because they are typically trained in a manner that optimizes the parameters to minimize some loss criterion (e.g. the predictive error), they do not, in general, encode any uncertainty in either their parameters or the subsequent predictions. Though many methods can produce uncertainty estimates either as a by-product or from a post-processing step, these are typically heuristic based, rather than stemming naturally from a statistically principled estimate of the target uncertainty distribution [2].
## 2. Modelling: Bayesian Hierarchical Linear Regression with Partial Pooling
The simplest possible linear regression, not hierarchical, would assume all FVC decline curves have the same $\alpha$ and $\beta$. That's the **pooled model**. In the other extreme, we could assume a model where each patient has a personalized FVC decline curve, and **these curves are completely unrelated**. That's the **unpooled model**, where each patient has completely separate regressions.
Here, I'll use the middle ground: **Partial pooling**. Specifically, I'll assume that while $\alpha$'s and $\beta$'s are different for each patient as in the unpooled case, **the coefficients all share similarity**. We can model this by assuming that each individual coefficient comes from a common group distribution. The image below represents this model graphically:
Mathematically, the model is described by the following equations:
\begin{align}
\mu_{\alpha} &\sim \mathcal{N}(0, 100) \\
\sigma_{\alpha} &\sim |\mathcal{N}(0, 100)| \\
\mu_{\beta} &\sim \mathcal{N}(0, 100) \\
\sigma_{\beta} &\sim |\mathcal{N}(0, 100)| \\
\alpha_i &\sim \mathcal{N}(\mu_{\alpha}, \sigma_{\alpha}) \\
\beta_i &\sim \mathcal{N}(\mu_{\beta}, \sigma_{\beta}) \\
\sigma &\sim \mathcal{N}(0, 100) \\
FVC_{ij} &\sim \mathcal{N}(\alpha_i + t \beta_i, \sigma)
\end{align}
where *t* is the time in weeks. Those are very uninformative priors, but that's ok: our model will converge!
Implementing this model in NumPyro is pretty straightforward:
```python
import numpyro
from numpyro.infer import MCMC, NUTS, Predictive
import numpyro.distributions as dist
from jax import random
```
```python
def model(PatientID, Weeks, FVC_obs=None):
μ_α = numpyro.sample("μ_α", dist.Normal(0., 100.))
σ_α = numpyro.sample("σ_α", dist.HalfNormal(100.))
μ_β = numpyro.sample("μ_β", dist.Normal(0., 100.))
σ_β = numpyro.sample("σ_β", dist.HalfNormal(100.))
unique_patient_IDs = np.unique(PatientID)
n_patients = len(unique_patient_IDs)
with numpyro.plate("plate_i", n_patients):
α = numpyro.sample("α", dist.Normal(μ_α, σ_α))
β = numpyro.sample("β", dist.Normal(μ_β, σ_β))
σ = numpyro.sample("σ", dist.HalfNormal(100.))
FVC_est = α[PatientID] + β[PatientID] * Weeks
with numpyro.plate("data", len(PatientID)):
numpyro.sample("obs", dist.Normal(FVC_est, σ), obs=FVC_obs)
```
That's all for modelling!
## 3. Fitting the model
A great achievement of Probabilistic Programming Languages such as NumPyro is to decouple model specification and inference. After specifying my generative model, with priors, condition statements and data likelihood, I can leave the hard work to NumPyro's inference engine.
Calling it requires just a few lines. Before we do it, let's add a numerical Patient ID for each patient code. That can be easily done with scikit-learn's LabelEncoder:
```python
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
train['PatientID'] = le.fit_transform(train['Patient'].values)
FVC_obs = train['FVC'].values
Weeks = train['Weeks'].values
PatientID = train['PatientID'].values
```
```python
numpyro.set_host_device_count(4)
```
Now, calling NumPyro's inference engine:
```python
nuts_kernel = NUTS(model)
mcmc = MCMC(nuts_kernel, num_samples=2000, num_warmup=2000)
rng_key = random.PRNGKey(0)
mcmc.run(rng_key, PatientID, Weeks, FVC_obs=FVC_obs)
posterior_samples = mcmc.get_samples()
```
sample: 100%|██████████| 4000/4000 [00:27<00:00, 144.62it/s, 63 steps of size 1.09e-01. acc. prob=0.88]
## 4. Checking the model
### 4.1. Inspecting the learned parameters
First, let's inspect the parameters learned. To do that, I will use [ArviZ](https://arviz-devs.github.io/arviz/), which perfectly integrates with NumPyro:
```python
import arviz as az
data = az.from_numpyro(mcmc)
az.plot_trace(data, compact=True);
```
Looks like our model learned personalized alphas and betas for each patient!
### 4.2. Visualizing FVC decline curves for some patients
Now, let's visually inspect FVC decline curves predicted by our model. We will completely fill in the FVC table, predicting all missing values. The first step is to create a table to fill:
```python
pred_template = []
for i in range(train['Patient'].nunique()):
df = pd.DataFrame(columns=['PatientID', 'Weeks'])
df['Weeks'] = np.arange(-12, 134)
df['PatientID'] = i
pred_template.append(df)
pred_template = pd.concat(pred_template, ignore_index=True)
```
Predicting the missing values in the FVC table and confidence (sigma) for each value becomes really easy:
```python
PatientID = pred_template['PatientID'].values
Weeks = pred_template['Weeks'].values
predictive = Predictive(model, posterior_samples,
return_sites=['σ', 'obs'])
samples_predictive = predictive(random.PRNGKey(0),
PatientID, Weeks, None)
```
Let's now put the predictions together with the true values, to visualize them:
```python
df = pd.DataFrame(columns=['Patient', 'Weeks', 'FVC_pred', 'sigma'])
df['Patient'] = le.inverse_transform(pred_template['PatientID'])
df['Weeks'] = pred_template['Weeks']
df['FVC_pred'] = samples_predictive['obs'].T.mean(axis=1)
df['sigma'] = samples_predictive['obs'].T.std(axis=1)
df['FVC_inf'] = df['FVC_pred'] - df['sigma']
df['FVC_sup'] = df['FVC_pred'] + df['sigma']
df = pd.merge(df, train[['Patient', 'Weeks', 'FVC']],
how='left', on=['Patient', 'Weeks'])
df = df.rename(columns={'FVC': 'FVC_true'})
df.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Patient</th>
<th>Weeks</th>
<th>FVC_pred</th>
<th>sigma</th>
<th>FVC_inf</th>
<th>FVC_sup</th>
<th>FVC_true</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>ID00007637202177411956430</td>
<td>-12</td>
<td>2220.162598</td>
<td>159.290878</td>
<td>2060.871826</td>
<td>2379.453369</td>
<td>NaN</td>
</tr>
<tr>
<th>1</th>
<td>ID00007637202177411956430</td>
<td>-11</td>
<td>2210.083496</td>
<td>157.518021</td>
<td>2052.565430</td>
<td>2367.601562</td>
<td>NaN</td>
</tr>
<tr>
<th>2</th>
<td>ID00007637202177411956430</td>
<td>-10</td>
<td>2213.199951</td>
<td>154.847916</td>
<td>2058.352051</td>
<td>2368.047852</td>
<td>NaN</td>
</tr>
<tr>
<th>3</th>
<td>ID00007637202177411956430</td>
<td>-9</td>
<td>2209.025391</td>
<td>153.300079</td>
<td>2055.725342</td>
<td>2362.325439</td>
<td>NaN</td>
</tr>
<tr>
<th>4</th>
<td>ID00007637202177411956430</td>
<td>-8</td>
<td>2203.191895</td>
<td>156.085449</td>
<td>2047.106445</td>
<td>2359.277344</td>
<td>NaN</td>
</tr>
</tbody>
</table>
</div>
Finally, let's see our predictions for 3 patients:
```python
def chart(patient_id, ax):
data = df[df['Patient'] == patient_id]
x = data['Weeks']
ax.set_title(patient_id)
ax.plot(x, data['FVC_true'], 'o')
ax.plot(x, data['FVC_pred'])
ax = sns.regplot(x, data['FVC_true'], ax=ax, ci=None,
line_kws={'color':'red'})
ax.fill_between(x, data["FVC_inf"], data["FVC_sup"],
alpha=0.5, color='#ffcd3c')
ax.set_ylabel('FVC')
f, axes = plt.subplots(1, 3, figsize=(15, 5))
chart('ID00007637202177411956430', axes[0])
chart('ID00009637202177434476278', axes[1])
chart('ID00011637202177653955184', axes[2])
```
The results are exactly what we expected to see! Highlight observations:
- The model adequately learned Bayesian Linear Regressions! The orange line (learned predicted FVC mean) is very inline with the red line (deterministic linear regression). But most important: it learned to predict uncertainty, showed in the light orange region (one sigma above and below the mean FVC line)
- The model predicts a higher uncertainty where the data points are more disperse (1st and 3rd patients). Conversely, where the points are closely grouped together (2nd patient), the model predicts a higher confidence (narrower light orange region)
- Finally, in all patients, we can see that the uncertainty grows as the look more into the future: the light orange region widens as the # of weeks grow!
### 4.3. Computing the modified Laplace Log Likelihood and RMSE
As mentioned earlier, the competition was evaluated on a modified version of the Laplace Log Likelihood. In medical applications, it is useful to evaluate a model's confidence in its decisions. Accordingly, the metric is designed to reflect both the accuracy and certainty of each prediction.
For each true FVC measurement, we predicted both an FVC and a confidence measure (standard deviation $\sigma$). The metric was computed as:
\begin{align}
\sigma_{clipped} &= max(\sigma, 70) \\
\delta &= min(|FVC_{true} - FVC_{pred}|, 1000) \\
metric &= -\dfrac{\sqrt{2}\delta}{\sigma_{clipped}} - \ln(\sqrt{2} \sigma_{clipped})
\end{align}
The error was thresholded at 1000 ml to avoid large errors adversely penalizing results, while the confidence values were clipped at 70 ml to reflect the approximate measurement uncertainty in FVC. The final score was calculated by averaging the metric across all (Patient, Week) pairs. Note that metric values will be negative and higher is better.
Next, we calculate the metric and RMSE:
```python
y = df.dropna()
rmse = ((y['FVC_pred'] - y['FVC_true']) ** 2).mean() ** (1/2)
print(f'RMSE: {rmse:.1f} ml')
sigma_c = y['sigma'].values
sigma_c[sigma_c < 70] = 70
delta = (y['FVC_pred'] - y['FVC_true']).abs()
delta[delta > 1000] = 1000
lll = - np.sqrt(2) * delta / sigma_c - np.log(np.sqrt(2) * sigma_c)
print(f'Laplace Log Likelihood: {lll.mean():.4f}')
```
RMSE: 122.1 ml
Laplace Log Likelihood: -6.1375
What do these numbers mean? It means if you adopted this approach, you would **outperform most of the public solutions** in the competition. Curiously, the vast majority of public solutions adopt a standard deterministic Neural Network, modelling uncertainty through a quantile loss. **Most of the people still adopt a frequentist approach**.
**Uncertainty** for single predictions becomes more and more important in machine learning and is often a requirement. **Especially when the consequenses of a wrong prediction are high**, we need to know what the probability distribution of an individual prediction is. For perspective, Kaggle just launched a new competition sponsored by Lyft, to build motion prediction models for self-driving vehicles. "We ask that you predict a few trajectories for every agent **and provide a confidence score for each of them**."
Finally, I hope the great work done by Pyro/NumPyro developers help democratize Bayesian methods, empowering an ever growing community of researchers and practitioners to create models that can not only generate predictions, but also assess uncertainty in their predictions.
## References
1. Ghahramani, Z. Probabilistic machine learning and artificial intelligence. Nature 521, 452–459 (2015). https://doi.org/10.1038/nature14541
2. Rainforth, Thomas William Gamlen. Automating Inference, Learning, and Design Using Probabilistic Programming. University of Oxford, 2017.
```python
```
|
> module SequentialDecisionProblems.NonDeterministicDefaults
> import Data.List
> import Data.List.Quantifiers
> import SequentialDecisionProblems.CoreTheory
> import SequentialDecisionProblems.Utils
> import List.Operations
> import List.Properties
> %default total
> %auto_implicits off
In non-deterministic SDPs, |M = List|:
> SequentialDecisionProblems.CoreTheory.M =
> List
Thus, |M| is a functor:
> SequentialDecisionProblems.CoreTheory.fmap =
> List.Operations.fmap
, |M| is a monad:
> SequentialDecisionProblems.Utils.ret =
> List.Operations.ret
> SequentialDecisionProblems.Utils.bind =
> List.Operations.bind
Moreover, |M| is a container monad
> SequentialDecisionProblems.CoreTheory.Elem =
> Data.List.Elem
> SequentialDecisionProblems.CoreTheory.NotEmpty =
> List.Operations.NonEmpty
> SequentialDecisionProblems.CoreTheory.All =
> Data.List.Quantifiers.All
> SequentialDecisionProblems.CoreTheory.elemNotEmptySpec0 =
> List.Properties.elemNonEmptySpec0
> SequentialDecisionProblems.CoreTheory.elemNotEmptySpec1 =
> List.Properties.elemNonEmptySpec1
> SequentialDecisionProblems.CoreTheory.tagElem =
> List.Operations.tagElem
> SequentialDecisionProblems.CoreTheory.allElemSpec0 =
> List.Properties.containerMonadSpec3
and |All| and |NotEmpty| are finite and decidable:
> SequentialDecisionProblems.Utils.finiteAll =
> List.Properties.finiteAll
> SequentialDecisionProblems.Utils.finiteNotEmpty =
> List.Properties.finiteNonEmpty
> SequentialDecisionProblems.Utils.decidableAll =
> List.Properties.decidableAll
> SequentialDecisionProblems.Utils.decidableNotEmpty =
> List.Properties.decidableNonEmpty
> {-
> ---}
|
{-# OPTIONS --safe #-}
module Cubical.Algebra.CommMonoid.CommMonoidProd where
open import Cubical.Foundations.HLevels
open import Cubical.Foundations.Prelude
open import Cubical.Data.Sigma
open import Cubical.Algebra.Monoid
open import Cubical.Algebra.Semigroup
open import Cubical.Algebra.CommMonoid.Base
open CommMonoidStr
open IsCommMonoid hiding (rid ; lid)
open IsMonoid hiding (rid ; lid)
open IsSemigroup
private
variable
ℓ ℓ' : Level
CommMonoidProd : CommMonoid ℓ → CommMonoid ℓ' → CommMonoid (ℓ-max ℓ ℓ')
CommMonoidProd M N = makeCommMonoid ε× _·×_ is-set× assoc× rid× comm×
where
ε× : (fst M) × (fst N)
ε× = (ε (snd M)) , (ε (snd N))
_·×_ : (fst M) × (fst N) → (fst M) × (fst N) → (fst M) × (fst N)
(x₁ , x₂) ·× (y₁ , y₂) = (_·_ (snd M) x₁ y₁) , (_·_ (snd N) x₂ y₂)
is-set× : isSet ((fst M) × (fst N))
is-set× = isSet× (is-set (snd M)) (is-set (snd N))
assoc× : ∀ x y z → x ·× (y ·× z) ≡ (x ·× y) ·× z
assoc× _ _ _ = cong₂ (_,_) (assoc (snd M) _ _ _) (assoc (snd N) _ _ _)
rid× : ∀ x → x ·× ε× ≡ x
rid× _ = cong₂ (_,_) (rid (snd M) _) (rid (snd N) _)
comm× : ∀ x y → x ·× y ≡ y ·× x
comm× _ _ = cong₂ (_,_) (comm (snd M) _ _) (comm (snd N) _ _)
|
lemma continuous_on_avoid: fixes f :: "'a::metric_space \<Rightarrow> 'b::t1_space" assumes "continuous_on s f" and "x \<in> s" and "f x \<noteq> a" shows "\<exists>e>0. \<forall>y \<in> s. dist x y < e \<longrightarrow> f y \<noteq> a" |
module TiledArrays
import Base: to_indices, size, getindex, setindex!
export AbstractTiledArray
"""
AbstractTiledArray{T, N} <: AbstractArray{T, N}
Supertype for all arrays that are best indexed into in tile by tile.
"""
abstract type AbstractTiledArray{T, N} <: AbstractArray{T, N} end
include("indexing.jl")
"""
TiledArray{T, N, A<:AbstractArray{T, N}, TS<:TilingStyle}(parent::A)
Wrapper around `parent` array, making it into an [`AbstractTiledArray`](@ref) with
[`TilingStyle`](@ref) `TS`
"""
struct TiledArray{T, N, A<:AbstractArray{T, N}, TS<:TilingStyle} <: AbstractTiledArray{T, N}
parent::A
end
size(A::TiledArray) = size(A.parent)
TilingStyle(::Type{<:TiledArray{<:Any, <:Any, <:Any, TS}}) where {TS} = TS
# to_indices will take care of converting the Tile to the
# correct indices for the parent's index type.
view(A::TiledArray, t::Tile) = view(A.parent, t::Tile)
getindex(A::TiledArray, t::Tile) = getindex(A.parent, t)
include("BufferedArrays.jl")
end # module
|
'''Elevator test.'''
from discretized_lri import DLRI as DLRI
from environment import Environment
from pinger import Pinger
import numpy as np
import helpers as h
import math
num_actions = 6
env = Environment(num_actions)
dlri = DLRI(num_actions)
bestdepth = np.zeros(num_actions)
E = [0.1, 0.2, 0.4, 0.2, 0.01, 0.09]
det_obj = Pinger(E)
for k in range(5):
for j in range(1000):
# Caught me again...
dlri.p = np.array(h.make_dp(num_actions))
m = math.floor(num_actions / 2)
while(True):
req = det_obj.request()
resp = env.response(m, req)
if(not resp):
dlri.do_reward(m)
else:
dlri.do_penalty()
m = dlri.next_action()
if(max(dlri.p) == (num_actions * num_actions)):
# The best depth counting from 0 (seasurface).
bestdepth[np.argmax(dlri.p)] += 1
break
# print("The best depth tally is : " + str(bestdepth))
print("Converge on depth: " + str(np.argmax(bestdepth)))
print("The probability vector is: " + str(bestdepth / sum(bestdepth)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.