text
stringlengths 0
3.34M
|
---|
[STATEMENT]
lemma "(973646::nat) + 6475 = 980121"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 973646 + 6475 = 980121
[PROOF STEP]
by simp
|
my_quants <- function(x)
{
r <- quantile(x, probs = c(0.025, 0.25, 0.5, 0.75, 0.975))
names(r) <- c("ymin", "lower", "middle", "upper", "ymax")
r
}
my_ggsave <- function(fname, plot, dpi = 300, ...)
{
ggsave(paste0(fname, ".pdf"), plot = plot, useDingbats = F, bg = "#FFFFFF",
...)
embedFonts(file = paste0(fname, ".pdf"),
outfile = paste0(fname, ".pdf"))
ggsave(paste0(fname, ".png"), plot = plot, dpi = dpi, bg = "#FFFFFF",
...)
}
get_extras <- function(path){
# extra files
extras <- c(
"alcohol.csv",
"physical_activity.csv",
"sleep.csv",
"smoking.csv")
# build lookup tables
# This allows us to to elegantly (vectortized dict!) add extra info later.
lookup_names <- fread(file.path(extra_path, "names.csv"))
lookup_names <- setNames(
unlist(lookup_names[2,-1]),
unlist(lookup_names[1,-1]))
lookup_family <- setNames(
rep(c("Alcohol", "Sleep", "Smoking", "Activity"), c(16, 7, 2, 9)),
unname(lookup_names))
# read first file and do recursive join using DT syntax
DT <- fread(file.path(extra_path, extras[1]))
for (ii in 2:4){
this_DT <- fread(file.path(extra_path, extras[ii]))
DT <- DT[this_DT, on = "eid"]
}
# move to long format
# kick out variable columns
DT[,c('variable', 'i.variable', 'i.variable.1', 'i.variable.2')] <- NULL
DT <- melt(
DT,
id.vars = c("eid"), measure.vars = names(DT)[-c(1)],
variable.factor = F,
variable.name = "var_code")
# Add new column inplace using lookup trick. Then order by individual Ids
DT[, name := lookup_names[var_code]]
DT[, family := lookup_family[name]]
setorder(DT, eid)
DT
}
stack_wide <- function(DT)
{
DT_wide <- data.table(expand.grid(
eid = unique(DT$eid), variable = unique(DT$variable)))
DT_tmp <- merge(
DT_wide,
DT[target == 'Age'][, c("eid", "variable")])
setkey(DT_wide, eid, variable)
DT_tmp <- DT[target == 'Age'][,
.(age = true, age_pred = predicted, eid = eid, variable = variable)]
setkey(DT_tmp, eid, variable)
DT_wide[DT_tmp, `:=`(age = i.age, age_pred = i.age_pred)]
DT_tmp <- DT[variable != 'Age, Sex' & target == 'Neuroticism'][,
.(N = true, N_pred = predicted, eid = eid, variable = variable)]
setkey(DT_tmp, eid, variable)
DT_wide[DT_tmp, `:=`(N = i.N, N_pred = i.N_pred)]
DT_tmp <- DT[variable != 'Age, Sex' & target == 'Fluid intelligence'][,
.(FI = true, FI_pred = predicted, eid = eid, variable = variable)]
setkey(DT_tmp, eid, variable)
DT_wide[DT_tmp, `:=`(FI = i.FI, FI_pred = i.FI_pred)]
DT_tmp <- DT[variable == 'Age, Sex' & target == 'Neuroticism'][,
.(N = true, N_pred = predicted, eid = eid, variable = variable)]
setkey(DT_tmp, eid, variable)
DT_wide[DT_tmp, `:=`(N = i.N, N_pred = i.N_pred)]
DT_tmp <- DT[variable == 'Age, Sex' & target == 'Fluid intelligence'][,
.(FI = true, FI_pred = predicted, eid = eid, variable = variable)]
setkey(DT_tmp, eid, variable)
DT_wide[DT_tmp, `:=`(FI = i.FI, FI_pred = i.FI_pred)]
DT_wide
}
get_extras2 <- function(path)
{
# extra files
extras <- c(
"alcohol.csv",
"physical_activity.csv",
"sleep.csv",
"smoking.csv")
summary_vars <- list(
"Alcohol" = c(
"Average weekly intake of other alcoholic drinks",
"Average weekly fortified wine intake",
"Average weekly spirits intake",
"Average weekly beer plus cider intake",
"Average weekly champagne plus white wine intake",
"Average weekly red wine intake"),
"Activity" = c("Summed MET minutes per week for all activity"),
"Sleep" = c("Sleep duration"),
"Smoking" = c("Pack years of smoking"))
summary_names <- setNames(
c("Summed weekly drinks", "Summed MET actvity", "Sleep duration",
"Pack years"),
names(summary_vars)
)
# build lookup tables
# This allows us to to elegantly (vectortized dict!) add extra info later.
lookup_names <- fread(file.path(extra_path, "names.csv"))
lookup_codes <- setNames(
unlist(lookup_names[1,-1]),
unlist(lookup_names[2,-1]))
lookup_family <- setNames(
rep(c("Alcohol", "Sleep", "Smoking", "Activity"), c(16, 7, 2, 9)),
names(lookup_codes))
# read first file and do recursive join using DT syntax
DTS <- NULL
for (ii in seq_along(extras)){
DT <- fread(file.path(extra_path, extras[ii]))
family <- names(summary_vars)[[ii]]
cols <- summary_vars[[family]]
sel <- unname(lookup_codes[cols])
DT <- DT[, c("eid", sel), with = F]
# set values < 0 to NA
for(j in seq_along(DT)[-1]){
set(DT, i = which(DT[[j]] < 0), j = j, value = NA)
}
# get absolutely NA rows
na_index <- rowSums(is.na(DT[, sel, with = F])) == length(sel)
# compute sums
DT[, value := rowSums(.SD, na.rm = T), .SDcols = sel]
# replaces wrong zeros for NAs
DT$value[na_index] <- NA
DT[, family := family] # set family
DT[, name := summary_names[family]]
DT <- DT[, -sel, with = F] # kick out inputs
DTS[[ii]] <- DT # append to list
}
DTS <- do.call(rbind, DTS)
DTS[, eid := factor(eid)]
setorder(DTS, eid)
DTS
}
fit_train_deconfound <- function(train, test, var, confound,
default.fml = "%s ~ poly(%s, degree = 2)")
{
# fit confounding model on train set
train <- na.omit(train[, c(confound, var), with = F])
var_s <- paste0(var, "_s")
conf_s <- paste0(confound, "_s")
train[[var_s]] <- scale(train[[var]])
train[[conf_s]] <- scale(train[[confound]])
fml <- as.formula(
sprintf(default.fml, var_s, conf_s))
mod_confound <- lm(fml, train)
# apply on test set
test[[var_s]] <- scale(test[[var]],
attr(train[[var_s]], "scaled:center"),
attr(train[[var_s]], "scaled:scale"))
test[[conf_s]] <- scale(test[[confound]],
attr(train[[conf_s]], "scaled:center"),
attr(train[[conf_s]], "scaled:scale"))
attributes(test[[var_s]]) <- NULL
attributes(test[[conf_s]]) <- NULL
list(pred = predict(mod_confound, test),
mod = mod_confound)
}
read_file_parts <- function(fname, path){
fnames <- list.files(
path, paste0(strsplit(fname, ".csv")[[1]], "_"), full.names = T)
rbindlist(lapply(fnames, fread))
}
|
\section{Design} \mdseries
BrainGrid has undergone recent refactoring and data reorganization due to a need to simplify the implementations of other models. The original legacy code was formatted in an object-oriented fashion, with the multiple simulator/network objects branching out into a complicated list of files and methods that made implementing additional incredibly complex, as implementing said model would require modifying half-a-dozen files and much of these changes would end up being redundant.
\begin{figure}
\centering
\includegraphics[width=.6\textwidth]{./diagrams/OldDiagram.png}
\caption{BrainGrid's old design}
\end{figure}
\pagebreak
\noindent \mdseries The refactoring process of BrainGrid reorganized the data structures of the legacy code in order to implement a simpler program structure that prioritizes separating the model-dependent code from the model independent. The new code also stripped away some of the object-oriented structure of the neurons and the synapses, and instead now uses a data-centric structure, which utilizes two different structs as the containers of all neuron and synapse data. This structure was originally designed for the GPU implementation of the simulator, and this refactored version of the simulator simply uses that design for all other implementations as well. This is to simplify transitioning from single-threaded to multi-threaded.
\begin{figure}
\centering
\includegraphics[width=\textwidth]{./diagrams/NewDiagram.png}
\caption{BrainGrid's new design}
\end{figure}
\pagebreak
|
(* From "A Coq proof that Univalence Axioms implies Functional
Extensionality" by Andrej Bauer and Peter LeFanu Lumsdaine *)
(* Since eta conversion for functions is now judgmental in Coq, a big
part of the original proof is not needed anymore. *)
Inductive paths {A} : A -> A -> Type := idpath : forall x, paths x x.
Notation "x == y" := (paths x y) (at level 70).
Hint Resolve @idpath.
Ltac path_induction :=
intros; repeat progress (
match goal with
| [p : _ == _ |- _] => induction p
| _ => idtac
end); auto.
Definition concat {A} {x y z : A} : x == y -> y == z -> x == z.
Proof.
path_induction.
Qed.
Notation "p @ q" := (concat p q) (at level 60).
Ltac path_via x := apply @concat with (y := x); auto.
Lemma map {A B} {x y : A} (f : A -> B) (p : x == y) : f x == f y.
Proof.
path_induction.
Qed.
Definition contractible A := {x : A & forall y : A, y == x}.
Definition hfiber {A B} (f : A -> B) (y : B) := {x : A & f x == y}.
Definition is_wequiv {A B} (f : A -> B) := forall y : B, contractible (hfiber f y).
Inductive is_sequiv {A : Type} : forall {B}, (A -> B) -> Type :=
| is_sequiv_id : is_sequiv (fun x : A => x).
Definition sequiv A B := {s : A -> B & is_sequiv s}.
Definition sequiv_coerce_to_function A B (s : sequiv A B) : A -> B := projT1 s.
Coercion sequiv_coerce_to_function : sequiv >-> Funclass.
Lemma seq_injective U V : forall (s : sequiv U V) x y, s x == s y -> x == y.
Proof.
intros (s, H) x y p.
destruct H.
exact p.
Qed.
Theorem seq_exponential : forall {A B} (s : sequiv A B) C, sequiv (C -> A) (C -> B).
Proof.
intros A B (s, H) C.
exists (fun h x => s (h x)).
destruct H.
apply is_sequiv_id.
Defined.
Definition path_pred A := fun xy : A * A => fst xy == snd xy.
Definition path_space A := sigT (path_pred A).
Axiom is_weq_to_seq : forall A B (f : A -> B), is_wequiv f -> is_sequiv f.
Definition src A : sequiv (path_space A) A.
Proof.
exists (fun p => fst (projT1 p)).
apply is_weq_to_seq.
intros x.
eexists (existT _ (existT (fun (xy : A * A) => fst xy == snd xy) (x, x) (idpath x)) _).
intros [[[u v] p] q].
unfold path_pred in p.
simpl in * |- *.
destruct q, p.
apply idpath.
Defined.
Definition trg A : sequiv (path_space A) A.
Proof.
exists (fun p => snd (projT1 p)).
apply is_weq_to_seq.
intros x.
eexists (existT _ (existT (fun (xy : A * A) => fst xy == snd xy) (x, x) (idpath x)) _).
intros [[[u v] p] q].
unfold path_pred in p.
simpl in * |- *.
destruct q, p.
apply idpath.
Defined.
Theorem extensionality {A B : Type} (f g : A -> B) : (forall x, f x == g x) -> f == g.
Proof.
intro p.
pose (d := fun x : A => existT (path_pred B) (f x, f x) (idpath (f x))).
pose (e := fun x : A => existT (path_pred B) (f x, g x) (p x)).
pose (src_compose := seq_exponential (src B) A).
pose (trg_compose := seq_exponential (trg B) A).
path_via (projT1 trg_compose e).
path_via (projT1 trg_compose d).
apply map.
apply seq_injective with (s := src_compose).
apply idpath.
Qed.
|
[STATEMENT]
lemma ldc_tuple [simp]: "ldc (C,Cs) = last Cs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ldc (C, Cs) = last Cs
[PROOF STEP]
by(simp add:ldc_def)
|
//
// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef BEAST_HTTP_BASIC_PARSER_v1_HPP
#define BEAST_HTTP_BASIC_PARSER_v1_HPP
#include <beast/config.hpp>
#include <beast/http/message.hpp>
#include <beast/http/parse_error.hpp>
#include <beast/http/rfc7230.hpp>
#include <beast/http/detail/basic_parser_v1.hpp>
#include <boost/asio/buffer.hpp>
#include <boost/assert.hpp>
#include <array>
#include <climits>
#include <cstdint>
#include <type_traits>
namespace beast {
namespace http {
/** Parse flags
The set of parser bit flags are returned by @ref basic_parser_v1::flags.
*/
enum parse_flag
{
chunked = 1,
connection_keep_alive = 2,
connection_close = 4,
connection_upgrade = 8,
trailing = 16,
upgrade = 32,
skipbody = 64,
contentlength = 128,
paused = 256
};
/** Body maximum size option.
Sets the maximum number of cumulative bytes allowed including
all body octets. Octets in chunk-encoded bodies are counted
after decoding. A value of zero indicates no limit on
the number of body octets.
The default body maximum size for requests is 4MB (four
megabytes or 4,194,304 bytes) and unlimited for responses.
@note Objects of this type are used with @ref basic_parser_v1::set_option.
*/
struct body_max_size
{
std::size_t value;
explicit
body_max_size(std::size_t v)
: value(v)
{
}
};
/** Header maximum size option.
Sets the maximum number of cumulative bytes allowed
including all header octets. A value of zero indicates
no limit on the number of header octets.
The default header maximum size is 16KB (16,384 bytes).
@note Objects of this type are used with @ref basic_parser_v1::set_option.
*/
struct header_max_size
{
std::size_t value;
explicit
header_max_size(std::size_t v)
: value(v)
{
}
};
/** A value indicating how the parser should treat the body.
This value is returned from the `on_header` callback in
the derived class. It controls what the parser does next
in terms of the message body.
*/
enum class body_what
{
/** The parser should expect a body, keep reading.
*/
normal,
/** Skip parsing of the body.
When returned by `on_header` this causes parsing to
complete and control to return to the caller. This
could be used when sending a response to a HEAD
request, for example.
*/
skip,
/** The message represents an UPGRADE request.
When returned by `on_body_prepare` this causes parsing
to complete and control to return to the caller.
*/
upgrade,
/** Suspend parsing before reading the body.
When returned by `on_body_prepare` this causes parsing
to pause. Control is returned to the caller, and the
parser state is preserved such that a subsequent call
to the parser will begin reading the message body.
This could be used by callers to inspect the HTTP
header before committing to read the body. For example,
to choose the body type based on the fields. Or to
respond to an Expect: 100-continue request.
*/
pause
};
/// The value returned when no content length is known or applicable.
static std::uint64_t constexpr no_content_length =
(std::numeric_limits<std::uint64_t>::max)();
/** A parser for decoding HTTP/1 wire format messages.
This parser is designed to efficiently parse messages in the
HTTP/1 wire format. It allocates no memory and uses minimal
state. It will handle chunked encoding and it understands the
semantics of the Connection and Content-Length header fields.
The interface uses CRTP (Curiously Recurring Template Pattern).
To use this class, derive from basic_parser. When bytes are
presented, the implementation will make a series of zero or
more calls to derived class members functions (referred to as
"callbacks" from here on) matching a specific signature.
Every callback must be provided by the derived class, or else
a compilation error will be generated. This exemplar shows
the signature and description of the callbacks required in
the derived class.
@code
template<bool isRequest>
struct exemplar : basic_parser_v1<isRequest, exemplar>
{
// Called when the first valid octet of a new message is received
//
void on_start(error_code&);
// Called for each piece of the Request-Method
//
void on_method(boost::string_ref const&, error_code&);
// Called for each piece of the Request-URI
//
void on_uri(boost::string_ref const&, error_code&);
// Called for each piece of the reason-phrase
//
void on_reason(boost::string_ref const&, error_code&);
// Called after the entire Request-Line has been parsed successfully.
//
void on_request(error_code&);
// Called after the entire Response-Line has been parsed successfully.
//
void on_response(error_code&);
// Called for each piece of the current header field.
//
void on_field(boost::string_ref const&, error_code&);
// Called for each piece of the current header value.
//
void on_value(boost::string_ref const&, error_code&)
// Called when the entire header has been parsed successfully.
//
void
on_header(std::uint64_t content_length, error_code&);
// Called after on_header, before the body is parsed
//
body_what
on_body_what(std::uint64_t content_length, error_code&);
// Called for each piece of the body.
//
// If the header indicates chunk encoding, the chunk
// encoding is removed from the buffer before being
// passed to the callback.
//
void on_body(boost::string_ref const&, error_code&);
// Called when the entire message has been parsed successfully.
// At this point, @ref complete returns `true`, and the parser
// is ready to parse another message if `keep_alive` would
// return `true`.
//
void on_complete(error_code&) {}
};
@endcode
The return value of `on_body_what` is special, it controls
whether or not the parser should expect a body. See @ref body_what
for choices of the return value.
If a callback sets an error, parsing stops at the current octet
and the error is returned to the caller. Callbacks must not throw
exceptions.
@tparam isRequest A `bool` indicating whether the parser will be
presented with request or response message.
@tparam Derived The derived class type. This is part of the
Curiously Recurring Template Pattern interface.
*/
template<bool isRequest, class Derived>
class basic_parser_v1 : public detail::parser_base
{
private:
template<bool, class>
friend class basic_parser_v1;
using self = basic_parser_v1;
typedef void(self::*pmf_t)(error_code&, boost::string_ref const&);
enum field_state : std::uint8_t
{
h_general = 0,
h_C,
h_CO,
h_CON,
h_matching_connection,
h_matching_proxy_connection,
h_matching_content_length,
h_matching_transfer_encoding,
h_matching_upgrade,
h_connection,
h_content_length0,
h_content_length,
h_content_length_ows,
h_transfer_encoding,
h_upgrade,
h_matching_transfer_encoding_chunked,
h_matching_transfer_encoding_general,
h_matching_connection_keep_alive,
h_matching_connection_close,
h_matching_connection_upgrade,
h_transfer_encoding_chunked,
h_transfer_encoding_chunked_ows,
h_connection_keep_alive,
h_connection_keep_alive_ows,
h_connection_close,
h_connection_close_ows,
h_connection_upgrade,
h_connection_upgrade_ows,
h_connection_token,
h_connection_token_ows
};
std::size_t h_max_;
std::size_t h_left_;
std::size_t b_max_;
std::size_t b_left_;
std::uint64_t content_length_;
pmf_t cb_;
state s_ : 8;
unsigned fs_ : 8;
unsigned pos_ : 8; // position in field state
unsigned http_major_ : 16;
unsigned http_minor_ : 16;
unsigned status_code_ : 16;
unsigned flags_ : 9;
bool upgrade_ : 1; // true if parser exited for upgrade
public:
/// Default constructor
basic_parser_v1();
/// Copy constructor.
template<class OtherDerived>
basic_parser_v1(basic_parser_v1<
isRequest, OtherDerived> const& other);
/// Copy assignment.
template<class OtherDerived>
basic_parser_v1& operator=(basic_parser_v1<
isRequest, OtherDerived> const& other);
/** Set options on the parser.
@param args One or more parser options to set.
*/
#if GENERATING_DOCS
template<class... Args>
void
set_option(Args&&... args)
#else
template<class A1, class A2, class... An>
void
set_option(A1&& a1, A2&& a2, An&&... an)
#endif
{
set_option(std::forward<A1>(a1));
set_option(std::forward<A2>(a2),
std::forward<An>(an)...);
}
/// Set the header maximum size option
void
set_option(header_max_size const& o)
{
h_max_ = o.value;
h_left_ = h_max_;
}
/// Set the body maximum size option
void
set_option(body_max_size const& o)
{
b_max_ = o.value;
b_left_ = b_max_;
}
/// Returns internal flags associated with the parser.
unsigned
flags() const
{
return flags_;
}
/** Returns `true` if the message end is indicated by eof.
This function returns true if the semantics of the message require
that the end of the message is signaled by an end of file. For
example, if the message is a HTTP/1.0 message and the Content-Length
is unspecified, the end of the message is indicated by an end of file.
@return `true` if write_eof must be used to indicate the message end.
*/
bool
needs_eof() const
{
return needs_eof(
std::integral_constant<bool, isRequest>{});
}
/** Returns the major HTTP version number.
Examples:
* Returns 1 for HTTP/1.1
* Returns 1 for HTTP/1.0
@return The HTTP major version number.
*/
unsigned
http_major() const
{
return http_major_;
}
/** Returns the minor HTTP version number.
Examples:
* Returns 1 for HTTP/1.1
* Returns 0 for HTTP/1.0
@return The HTTP minor version number.
*/
unsigned
http_minor() const
{
return http_minor_;
}
/** Returns `true` if the message is an upgrade message.
A value of `true` indicates that the parser has successfully
completed parsing a HTTP upgrade message.
@return `true` if the message is an upgrade message.
*/
bool
upgrade() const
{
return upgrade_;
}
/** Returns the numeric HTTP Status-Code of a response.
@return The Status-Code.
*/
unsigned
status_code() const
{
return status_code_;
}
/** Returns `true` if the connection should be kept open.
@note This function is only valid to call when the parser
is complete.
*/
bool
keep_alive() const;
/** Returns `true` if the parse has completed succesfully.
When the parse has completed successfully, and the semantics
of the parsed message indicate that the connection is still
active, a subsequent call to `write` will begin parsing a
new message.
@return `true` If the parsing has completed successfully.
*/
bool
complete() const
{
return
s_ == s_restart ||
s_ == s_closed_complete ||
(flags_ & parse_flag::paused);
}
/** Write a sequence of buffers to the parser.
@param buffers An object meeting the requirements of
ConstBufferSequence that represents the input sequence.
@param ec Set to the error, if any error occurred.
@return The number of bytes consumed in the input sequence.
*/
template<class ConstBufferSequence>
#if GENERATING_DOCS
std::size_t
#else
typename std::enable_if<
! std::is_convertible<ConstBufferSequence,
boost::asio::const_buffer>::value,
std::size_t>::type
#endif
write(ConstBufferSequence const& buffers, error_code& ec);
/** Write a single buffer of data to the parser.
@param buffer The buffer to write.
@param ec Set to the error, if any error occurred.
@return The number of bytes consumed in the buffer.
*/
std::size_t
write(boost::asio::const_buffer const& buffer, error_code& ec);
/** Called to indicate the end of file.
HTTP needs to know where the end of the stream is. For example,
sometimes servers send responses without Content-Length and
expect the client to consume input (for the body) until EOF.
Callbacks and errors will still be processed as usual.
@note This is typically called when a socket read returns eof.
*/
void
write_eof(error_code& ec);
protected:
/** Reset the parsing state.
The state of the parser is reset to expect the beginning of
a new request or response. The old state is discarded.
*/
void
reset();
private:
Derived&
impl()
{
return *static_cast<Derived*>(this);
}
void
reset(std::true_type)
{
s_ = s_req_start;
}
void
reset(std::false_type)
{
s_ = s_res_start;
}
void
init(std::true_type)
{
// Request: 16KB max header, 4MB max body
h_max_ = 16 * 1024;
b_max_ = 4 * 1024 * 1024;
}
void
init(std::false_type)
{
// Response: 16KB max header, unlimited body
h_max_ = 16 * 1024;
b_max_ = 0;
}
void
init()
{
init(std::integral_constant<bool, isRequest>{});
reset();
}
bool
needs_eof(std::true_type) const;
bool
needs_eof(std::false_type) const;
template<class T, class = beast::detail::void_t<>>
struct check_on_start : std::false_type {};
template<class T>
struct check_on_start<T, beast::detail::void_t<decltype(
std::declval<T>().on_start(
std::declval<error_code&>())
)>> : std::true_type { };
template<class T, class = beast::detail::void_t<>>
struct check_on_method : std::false_type {};
template<class T>
struct check_on_method<T, beast::detail::void_t<decltype(
std::declval<T>().on_method(
std::declval<boost::string_ref>(),
std::declval<error_code&>())
)>> : std::true_type {};
template<class T, class = beast::detail::void_t<>>
struct check_on_uri : std::false_type {};
template<class T>
struct check_on_uri<T, beast::detail::void_t<decltype(
std::declval<T>().on_uri(
std::declval<boost::string_ref>(),
std::declval<error_code&>())
)>> : std::true_type {};
template<class T, class = beast::detail::void_t<>>
struct check_on_reason : std::false_type {};
template<class T>
struct check_on_reason<T, beast::detail::void_t<decltype(
std::declval<T>().on_reason(
std::declval<boost::string_ref>(),
std::declval<error_code&>())
)>> : std::true_type {};
template<class T, class = beast::detail::void_t<>>
struct check_on_request : std::false_type {};
template<class T>
struct check_on_request<T, beast::detail::void_t<decltype(
std::declval<T>().on_request(
std::declval<error_code&>())
)>> : std::true_type {};
template<class T, class = beast::detail::void_t<>>
struct check_on_response : std::false_type {};
template<class T>
struct check_on_response<T, beast::detail::void_t<decltype(
std::declval<T>().on_response(
std::declval<error_code&>())
)>> : std::true_type {};
template<class T, class = beast::detail::void_t<>>
struct check_on_field : std::false_type {};
template<class T>
struct check_on_field<T, beast::detail::void_t<decltype(
std::declval<T>().on_field(
std::declval<boost::string_ref>(),
std::declval<error_code&>())
)>> : std::true_type {};
template<class T, class = beast::detail::void_t<>>
struct check_on_value : std::false_type {};
template<class T>
struct check_on_value<T, beast::detail::void_t<decltype(
std::declval<T>().on_value(
std::declval<boost::string_ref>(),
std::declval<error_code&>())
)>> : std::true_type {};
template<class T, class = beast::detail::void_t<>>
struct check_on_headers : std::false_type {};
template<class T>
struct check_on_headers<T, beast::detail::void_t<decltype(
std::declval<T>().on_header(
std::declval<std::uint64_t>(),
std::declval<error_code&>())
)>> : std::true_type {};
// VFALCO Can we use std::is_detected? Is C++11 capable?
template<class C>
class check_on_body_what_t
{
template<class T, class R = std::is_convertible<decltype(
std::declval<T>().on_body_what(
std::declval<std::uint64_t>(),
std::declval<error_code&>())),
body_what>>
static R check(int);
template<class>
static std::false_type check(...);
using type = decltype(check<C>(0));
public:
static bool const value = type::value;
};
template<class C>
using check_on_body_what =
std::integral_constant<bool, check_on_body_what_t<C>::value>;
template<class T, class = beast::detail::void_t<>>
struct check_on_body : std::false_type {};
template<class T>
struct check_on_body<T, beast::detail::void_t<decltype(
std::declval<T>().on_body(
std::declval<boost::string_ref>(),
std::declval<error_code&>())
)>> : std::true_type {};
template<class T, class = beast::detail::void_t<>>
struct check_on_complete : std::false_type {};
template<class T>
struct check_on_complete<T, beast::detail::void_t<decltype(
std::declval<T>().on_complete(
std::declval<error_code&>())
)>> : std::true_type {};
void call_on_start(error_code& ec)
{
static_assert(check_on_start<Derived>::value,
"on_start requirements not met");
impl().on_start(ec);
}
void call_on_method(error_code& ec,
boost::string_ref const& s, std::true_type)
{
static_assert(check_on_method<Derived>::value,
"on_method requirements not met");
if(h_max_ && s.size() > h_left_)
{
ec = parse_error::header_too_big;
return;
}
h_left_ -= s.size();
impl().on_method(s, ec);
}
void call_on_method(error_code&,
boost::string_ref const&, std::false_type)
{
}
void call_on_method(error_code& ec,
boost::string_ref const& s)
{
call_on_method(ec, s,
std::integral_constant<bool, isRequest>{});
}
void call_on_uri(error_code& ec,
boost::string_ref const& s, std::true_type)
{
static_assert(check_on_uri<Derived>::value,
"on_uri requirements not met");
if(h_max_ && s.size() > h_left_)
{
ec = parse_error::header_too_big;
return;
}
h_left_ -= s.size();
impl().on_uri(s, ec);
}
void call_on_uri(error_code&,
boost::string_ref const&, std::false_type)
{
}
void call_on_uri(error_code& ec,
boost::string_ref const& s)
{
call_on_uri(ec, s,
std::integral_constant<bool, isRequest>{});
}
void call_on_reason(error_code& ec,
boost::string_ref const& s, std::true_type)
{
static_assert(check_on_reason<Derived>::value,
"on_reason requirements not met");
if(h_max_ && s.size() > h_left_)
{
ec = parse_error::header_too_big;
return;
}
h_left_ -= s.size();
impl().on_reason(s, ec);
}
void call_on_reason(error_code&,
boost::string_ref const&, std::false_type)
{
}
void call_on_reason(error_code& ec, boost::string_ref const& s)
{
call_on_reason(ec, s,
std::integral_constant<bool, ! isRequest>{});
}
void call_on_request(error_code& ec, std::true_type)
{
static_assert(check_on_request<Derived>::value,
"on_request requirements not met");
impl().on_request(ec);
}
void call_on_request(error_code&, std::false_type)
{
}
void call_on_request(error_code& ec)
{
call_on_request(ec,
std::integral_constant<bool, isRequest>{});
}
void call_on_response(error_code& ec, std::true_type)
{
static_assert(check_on_response<Derived>::value,
"on_response requirements not met");
impl().on_response(ec);
}
void call_on_response(error_code&, std::false_type)
{
}
void call_on_response(error_code& ec)
{
call_on_response(ec,
std::integral_constant<bool, ! isRequest>{});
}
void call_on_field(error_code& ec,
boost::string_ref const& s)
{
static_assert(check_on_field<Derived>::value,
"on_field requirements not met");
if(h_max_ && s.size() > h_left_)
{
ec = parse_error::header_too_big;
return;
}
h_left_ -= s.size();
impl().on_field(s, ec);
}
void call_on_value(error_code& ec,
boost::string_ref const& s)
{
static_assert(check_on_value<Derived>::value,
"on_value requirements not met");
if(h_max_ && s.size() > h_left_)
{
ec = parse_error::header_too_big;
return;
}
h_left_ -= s.size();
impl().on_value(s, ec);
}
void
call_on_headers(error_code& ec)
{
static_assert(check_on_headers<Derived>::value,
"on_header requirements not met");
impl().on_header(content_length_, ec);
}
body_what
call_on_body_what(error_code& ec)
{
static_assert(check_on_body_what<Derived>::value,
"on_body_what requirements not met");
return impl().on_body_what(content_length_, ec);
}
void call_on_body(error_code& ec,
boost::string_ref const& s)
{
static_assert(check_on_body<Derived>::value,
"on_body requirements not met");
if(b_max_ && s.size() > b_left_)
{
ec = parse_error::body_too_big;
return;
}
b_left_ -= s.size();
impl().on_body(s, ec);
}
void call_on_complete(error_code& ec)
{
static_assert(check_on_complete<Derived>::value,
"on_complete requirements not met");
impl().on_complete(ec);
}
};
} // http
} // beast
#include <beast/http/impl/basic_parser_v1.ipp>
#endif
|
# US Production Data for RBC Modeling
```python
import pandas as pd
import numpy as np
import fredpy as fp
import matplotlib.pyplot as plt
plt.style.use('classic')
%matplotlib inline
pd.plotting.register_matplotlib_converters()
```
```python
# Load API key
fp.api_key = fp.load_api_key('fred_api_key.txt')
# Download nominal GDP, nominal personal consumption expenditures, nominal
# gross private domestic investment, the GDP deflator, and an index of hours
# worked in the nonfarm business sector produced by the BLS. All data are
# from FRED and are quarterly.
gdp = fp.series('GDP')
cons = fp.series('PCEC')
invest = fp.series('GPDI')
hours = fp.series('HOANBS')
defl = fp.series('GDPDEF')
pcec = fp.series('PCEC')
m2 = fp.series('M2SL')
tb3mo = fp.series('TB3MS')
unemp = fp.series('UNRATE')
# Convert monthly M2, 3-mo T-Bill, and unemployment to quarterly
m2 = m2.as_frequency('Q')
tb3mo = tb3mo.as_frequency('Q')
unemp = unemp.as_frequency('Q')
# Convert unemployment and t-bill data to decimals instead of percents
unemp.data = unemp.data/100
tb3mo.data = tb3mo.data/100
# pcec inflation as pecent change over past year
pcec = pcec.apc()
pcec.data = pcec.data/100
# Make sure that all of the downloaded series have the same data ranges
gdp,cons,invest,hours,defl,pcec,m2,tb3mo,unemp = fp.window_equalize([gdp,cons,invest,hours,defl,pcec,m2,tb3mo,unemp])
# Compute real GDP, real consumption, real investment
gdp.data = gdp.data/defl.data*100
cons.data = cons.data/defl.data*100
invest.data = invest.data/defl.data*100
m2.data = m2.data/defl.data*100
# Print units
print('Hours units: ',hours.units)
print('Deflator units:',defl.units)
```
Hours units: Index 2012=100
Deflator units: Index 2012=100
Next, compute the quarterly capital stock series for the US using the perpetual inventory method. The discrete-time Solow growth model is given by:
\begin{align}
Y_t & = A_tK_t^{\alpha}L_t^{1-\alpha} \tag{1}\\
C_t & = (1-s)Y_t \tag{2}\\
Y_t & = C_t + I_t \tag{3}\\
K_{t+1} & = I_t + (1-\delta)K_t \tag{4}\\
A_{t+1} & = (1+g)A_t \tag{5}\\
L_{t+1} & = (1+n)L_t \tag{6}.
\end{align}
Here the model is assumed to be quarterly so $n$ is the *quarterly* growth rate of labor hours, $g$ is the *quarterly* growth rate of TFP, and $\delta$ is the *quarterly* rate of depreciation of the capital stock. Given a value of the quarterly depreciation rate $\delta$, an investment series $I_t$, and an initial capital stock $K_0$, the law of motion for the capital stock, Equation (4), can be used to compute an implied capital series. But we don't know $K_0$ or $\delta$ so we'll have to *calibrate* these values using statistics computed from the data that we've already obtained.
Let lowercase letters denote a variable that's been divided by $A_t^{1/(1-\alpha)}L_t$. E.g.,
\begin{align}
y_t = \frac{Y_t}{A_t^{1/(1-\alpha)}L_t}\tag{7}
\end{align}
Then (after substituting consumption from the model), the scaled version of the model can be written as:
\begin{align}
y_t & = k_t^{\alpha} \tag{8}\\
i_t & = sy_t \tag{9}\\
k_{t+1} & = i_t + (1-\delta-n-g')k_t,\tag{10}
\end{align}
where $g' = g/(1-\alpha)$ is the growth rate of $A_t^{1/(1-\alpha)}$. In the steady state:
\begin{align}
k & = \left(\frac{s}{\delta+n+g'}\right)^{\frac{1}{1-\alpha}} \tag{11}
\end{align}
which means that the ratio of capital to output is constant:
\begin{align}
\frac{k}{y} & = \frac{s}{\delta+n+g'} \tag{12}
\end{align}
and therefore the steady state ratio of depreciation to output is:
\begin{align}
\overline{\delta K/ Y} & = \frac{\delta s}{\delta + n + g'} \tag{13}
\end{align}
where $\overline{\delta K/ Y}$ is the long-run average ratio of depreciation to output. We can use Equation (13) to calibrate $\delta$ given $\overline{\delta K/ Y}$, $s$, $n$, and $g'$.
Furthermore, in the steady state, the growth rate of output is constant:
\begin{align}
\frac{\Delta Y}{Y} & = n + g' \tag{14}
\end{align}
1. Assume $\alpha = 0.35$.
2. Calibrate $s$ as the average of ratio of investment to GDP.
3. Calibrate $n$ as the average quarterly growth rate of labor hours.
4. Calibrate $g'$ as the average quarterly growth rate of real GDP minus n.
5. Calculate the average ratio of depreciation to GDP $\overline{\delta K/ Y}$ and use the result to calibrate $\delta$. That is, find the average ratio of Current-Cost Depreciation of Fixed Assets (FRED series ID: M1TTOTL1ES000) to GDP (FRED series ID: GDPA). Then calibrate $\delta$ from the following steady state relationship:
\begin{align}
\delta & = \frac{\left( \overline{\delta K/ Y} \right)\left(n + g' \right)}{s - \left( \overline{\delta K/ Y} \right)} \tag{15}
\end{align}
6. Calibrate $K_0$ by asusming that the capital stock is initially equal to its steady state value:
\begin{align}
K_0 & = \left(\frac{s}{\delta + n + g'}\right) Y_0 \tag{16}
\end{align}
Then, armed with calibrated values for $K_0$ and $\delta$, compute $K_1, K_2, \ldots$ recursively. See Timothy Kehoe's notes for more information on the perpetual inventory method:
http://users.econ.umn.edu/~tkehoe/classes/GrowthAccountingNotes.pdf
```python
# Set the capital share of income
alpha = 0.35
# Average saving rate
s = np.mean(invest.data/gdp.data)
# Average quarterly labor hours growth rate
n = (hours.data[-1]/hours.data[0])**(1/(len(hours.data)-1)) - 1
# Average quarterly real GDP growth rate
g = ((gdp.data[-1]/gdp.data[0])**(1/(len(gdp.data)-1)) - 1) - n
# Compute annual depreciation rate
depA = fp.series('M1TTOTL1ES000')
gdpA = fp.series('gdpa')
gdpA = gdpA.window([gdp.data.index[0],gdp.data.index[-1]])
gdpA,depA = fp.window_equalize([gdpA,depA])
deltaKY = np.mean(depA.data/gdpA.data)
delta = (n+g)*deltaKY/(s-deltaKY)
# print calibrated values:
print('Avg saving rate: ',round(s,5))
print('Avg annual labor growth:',round(4*n,5))
print('Avg annual gdp growth: ',round(4*g,5))
print('Avg annual dep rate: ',round(4*delta,5))
# Construct the capital series. Note that the GPD and investment data are reported on an annualized basis
# so divide by 4 to get quarterly data.
capital = np.zeros(len(gdp.data))
capital[0] = gdp.data[0]/4*s/(n+g+delta)
for t in range(len(gdp.data)-1):
capital[t+1] = invest.data[t]/4 + (1-delta)*capital[t]
# Save in a fredpy series
capital = fp.to_fred_series(data = capital,dates =gdp.data.index,units = gdp.units,title='Capital stock of the US',frequency='Quarterly')
# plot the computed capital series
plt.plot(capital.data.index,capital.data,'-',lw=3,alpha = 0.7)
plt.ylabel(capital.units)
plt.title(capital.title)
plt.grid()
```
```python
# Compute TFP
tfp = gdp.data/capital.data**alpha/hours.data**(1-alpha)
tfp = fp.to_fred_series(data = tfp,dates =gdp.data.index,units = gdp.units,title='TFP of the US',frequency='Quarterly')
# Plot the computed capital series
plt.plot(tfp.data.index,tfp.data,'-',lw=3,alpha = 0.7)
plt.ylabel(tfp.units)
plt.title(tfp.title)
plt.grid()
```
```python
# Convert each series into per capita using civilian pop 16 and over
gdp = gdp.per_capita(civ_pop=True)
cons = cons.per_capita(civ_pop=True)
invest = invest.per_capita(civ_pop=True)
hours = hours.per_capita(civ_pop=True)
capital = capital.per_capita(civ_pop=True)
m2 = m2.per_capita(civ_pop=True)
# Put GDP, consumption, investment, and M2 in units of thousands of dollars per person
gdp.data = gdp.data*1000
cons.data = cons.data*1000
invest.data = invest.data*1000
capital.data = capital.data*1000
m2.data = m2.data/1000
# Scale hours per person to equal 100 on October (Quarter III) of 2012
hours.data = hours.data/hours.data.loc['2012-10-01']*100
# Compute and plot log real GDP, log consumption, log investment, log hours
gdp_log = gdp.log()
cons_log = cons.log()
invest_log = invest.log()
hours_log = hours.log()
capital_log = capital.log()
tfp_log = tfp.log()
m2_log = m2.log()
m2_log = m2.log()
```
```python
# HP filter to isolate trend and cyclical components
gdp_log_cycle,gdp_log_trend = gdp_log.hp_filter()
cons_log_cycle,cons_log_trend = cons_log.hp_filter()
invest_log_cycle,invest_log_trend = invest_log.hp_filter()
hours_log_cycle,hours_log_trend = hours_log.hp_filter()
capital_log_cycle,capital_log_trend = capital_log.hp_filter()
tfp_log_cycle,tfp_log_trend = tfp_log.hp_filter()
m2_log_cycle,m2_log_trend = m2_log.hp_filter()
tb3mo_cycle,tb3mo_trend = tb3mo.hp_filter()
unemp_cycle,unemp_trend = unemp.hp_filter()
pcec_cycle,pcec_trend = pcec.hp_filter()
```
```python
# Create a DataFrame with actual and trend data
data = pd.DataFrame({
'gdp':gdp.data,
'gdp_trend':np.exp(gdp_log_trend.data),
'gdp_cycle':gdp_log_cycle.data,
'consumption':cons.data,
'consumption_trend':np.exp(cons_log_trend.data),
'consumption_cycle':cons_log_cycle.data,
'investment':invest.data,
'investment_trend':np.exp(invest_log_trend.data),
'investment_cycle':invest_log_cycle.data,
'hours':hours.data,
'hours_trend':np.exp(hours_log_trend.data),
'hours_cycle':hours_log_cycle.data,
'capital':capital.data,
'capital_trend':np.exp(capital_log_trend.data),
'capital_cycle':capital_log_cycle.data,
'tfp':tfp.data,
'tfp_trend':np.exp(tfp_log_trend.data),
'tfp_cycle':tfp_log_cycle.data,
'real_m2':m2.data,
'real_m2_trend':np.exp(m2_log_trend.data),
'real_m2_cycle':m2_log_cycle.data,
't_bill_3mo':tb3mo.data,
't_bill_3mo_trend':tb3mo_trend.data,
't_bill_3mo_cycle':tb3mo_cycle.data,
'pce_inflation':pcec.data,
'pce_inflation_trend':pcec_trend.data,
'pce_inflation_cycle':pcec_cycle.data,
'unemployment':unemp.data,
'unemployment_trend':unemp_trend.data,
'unemployment_cycle':unemp_cycle.data,
},index = gdp.data.index)
# # RBC Data
# columns_ordered =[]
# names = ['gdp','consumption','investment','hours','capital','tfp']
# for name in names:
# columns_ordered.append(name)
# columns_ordered.append(name+'_trend')
# data[columns_ordered].to_csv('../Csv/rbc_data_actual_trend.csv')
# # Create a DataFrame with actual, trend, and cycle data
# columns_ordered =[]
# names = ['gdp','consumption','investment','hours','capital','tfp']
# for name in names:
# columns_ordered.append(name)
# columns_ordered.append(name+'_trend')
# columns_ordered.append(name+'_cycle')
# data[columns_ordered].to_csv('../Csv/rbc_data_actual_trend_cycle.csv')
# Business Cycle Data
columns_ordered =[]
names = ['gdp','consumption','investment','hours','capital','tfp','real_m2','t_bill_3mo','pce_inflation','unemployment']
for name in names:
columns_ordered.append(name)
columns_ordered.append(name+'_trend')
data[columns_ordered].to_csv('../Csv/business_cycle_data_actual_trend.csv')
# Create a DataFrame with actual, trend, and cycle data
columns_ordered =[]
names = ['gdp','consumption','investment','hours','capital','tfp','real_m2','t_bill_3mo','pce_inflation','unemployment']
for name in names:
columns_ordered.append(name)
columns_ordered.append(name+'_trend')
columns_ordered.append(name+'_cycle')
data[columns_ordered].to_csv('../Csv/business_cycle_data_actual_trend_cycle.csv')
```
|
(*<*)
(*
* Copyright 2015, NICTA
*
* This software may be distributed and modified according to the terms of
* the BSD 2-Clause license. Note that NO WARRANTY is provided.
* See "LICENSE_BSD2.txt" for details.
*
* @TAG(NICTA_BSD)
*)
theory StrongTricolour
imports
Global_Invariants_Lemmas
Local_Invariants_Lemmas
Tactics
begin
(*>*)
(* local lemma bucket *)
context mut_m
begin
(* marked insertions *)
lemma marked_insertions_store_ins[simp]:
"\<lbrakk> marked_insertions s; (\<exists>r'. opt_r' = Some r') \<longrightarrow> marked (the opt_r') s \<rbrakk>
\<Longrightarrow> marked_insertions
(s(mutator m := s (mutator m)\<lparr>ghost_honorary_root := {}\<rparr>,
sys := s sys
\<lparr>mem_store_buffers := (mem_store_buffers (s sys))(mutator m := sys_mem_store_buffers (mutator m) s @ [mw_Mutate r f opt_r'])\<rparr>))"
by (auto simp: marked_insertions_def
split: mem_store_action.splits option.splits)
lemma marked_insertions_alloc[simp]:
"\<lbrakk> heap (s sys) r' = None; valid_refs_inv s \<rbrakk>
\<Longrightarrow> marked_insertions (s(mutator m' := s (mutator m')\<lparr>roots := roots'\<rparr>, sys := s sys\<lparr>heap := (sys_heap s)(r' \<mapsto> obj')\<rparr>))
\<longleftrightarrow> marked_insertions s"
apply (clarsimp simp: marked_insertions_def split: mem_store_action.splits option.splits)
apply (rule iffI)
apply clarsimp
apply (rename_tac ref field x)
apply (drule_tac x=ref in spec, drule_tac x=field in spec, drule_tac x=x in spec, clarsimp)
apply (drule valid_refs_invD(6)[where x=r' and y=r'], simp_all)
done
(* marked_deletions *)
lemma marked_deletions_store_ins[simp]:
"\<lbrakk> marked_deletions s; obj_at_field_on_heap (\<lambda>r'. marked r' s) r f s \<rbrakk>
\<Longrightarrow> marked_deletions
(s(mutator m := s (mutator m)\<lparr>ghost_honorary_root := {}\<rparr>,
sys := s sys
\<lparr>mem_store_buffers := (mem_store_buffers (s sys))(mutator m := sys_mem_store_buffers (mutator m) s @ [mw_Mutate r f opt_r'])\<rparr>))"
by (auto simp: marked_deletions_def
split: mem_store_action.splits option.splits)
lemma marked_deletions_alloc[simp]:
"\<lbrakk> marked_deletions s; heap (s sys) r' = None; valid_refs_inv s \<rbrakk>
\<Longrightarrow> marked_deletions (s(mutator m' := s (mutator m')\<lparr>roots := roots'\<rparr>, sys := s sys\<lparr>heap := (sys_heap s)(r' \<mapsto> obj')\<rparr>))"
apply (clarsimp simp: marked_deletions_def split: mem_store_action.splits)
apply (rename_tac ref field option)
apply (drule_tac x="mw_Mutate ref field option" in spec)
apply clarsimp
apply (case_tac "ref = r'")
apply (auto simp: obj_at_field_on_heap_def split: option.splits)
done
end
subsection\<open>Sweep loop invariants\<close>
lemma (in gc) sweep_loop_invL_eq_imp:
"eq_imp (\<lambda>(_::unit) s. (AT s gc, s\<down> gc, sys_fM s\<down>, map_option obj_mark \<circ> sys_heap s\<down>))
sweep_loop_invL"
apply (clarsimp simp: eq_imp_def inv)
apply (rename_tac s s')
apply (subgoal_tac "\<forall>r. valid_ref r s\<down> \<longleftrightarrow> valid_ref r s'\<down>")
apply (subgoal_tac "\<forall>P r. obj_at (\<lambda>obj. P (obj_mark obj)) r s\<down> \<longleftrightarrow> obj_at (\<lambda>obj. P (obj_mark obj)) r s'\<down>")
apply (frule_tac x="\<lambda>mark. Some mark = gc_mark s'\<down>" in spec)
apply (frule_tac x="\<lambda>mark. mark = sys_fM s'\<down>" in spec)
apply clarsimp
apply (clarsimp simp: fun_eq_iff split: obj_at_splits)
apply (rename_tac r)
apply ( (drule_tac x=r in spec)+, auto)[1]
apply (clarsimp simp: fun_eq_iff split: obj_at_splits)
apply (rename_tac r)
apply (drule_tac x=r in spec, auto)[1]
apply (metis map_option_eq_Some)+
done
lemmas gc_sweep_loop_invL_niE[nie] =
iffD1[OF gc.sweep_loop_invL_eq_imp[simplified eq_imp_simps, rule_format, unfolded conj_explode, rule_format], rotated -1]
lemma (in gc) sweep_loop_invL[intro]:
"\<lbrace> fM_fA_invL \<^bold>\<and> phase_invL \<^bold>\<and> sweep_loop_invL \<^bold>\<and> tso_lock_invL
\<^bold>\<and> LSTP (phase_rel_inv \<^bold>\<and> mutators_phase_inv \<^bold>\<and> valid_W_inv) \<rbrace>
gc
\<lbrace> sweep_loop_invL \<rbrace>"
proof(vcg_jackhammer, vcg_name_cases)
case sweep_loop_ref_done then show ?case by blast
next case sweep_loop_check then show ?case
apply (clarsimp split: obj_at_splits)
apply (metis (no_types, lifting) option.collapse option.inject)
done
next case sweep_loop_load_mark then show ?case by (clarsimp split: obj_at_splits)
qed
context gc
begin
lemma sweep_loop_locs_subseteq_sweep_locs:
"sweep_loop_locs \<subseteq> sweep_locs"
by (auto simp: sweep_loop_locs_def sweep_locs_def intro: append_prefixD)
lemma sweep_locs_subseteq_fM_tso_empty_locs:
"sweep_locs \<subseteq> fM_tso_empty_locs"
by (auto simp: sweep_locs_def fM_tso_empty_locs_def loc_defs)
lemma sweep_loop_locs_fM_eq_locs:
"sweep_loop_locs \<subseteq> fM_eq_locs"
by (auto simp: sweep_loop_locs_def fM_eq_locs_def sweep_locs_def loc_defs)
lemma sweep_loop_locs_fA_eq_locs:
"sweep_loop_locs \<subseteq> fA_eq_locs"
apply (simp add: sweep_loop_locs_def fA_eq_locs_def sweep_locs_def)
apply (intro subset_insertI2)
apply (auto intro: append_prefixD)
done
lemma black_heap_locs_subseteq_fM_tso_empty_locs:
"black_heap_locs \<subseteq> fM_tso_empty_locs"
by (auto simp: black_heap_locs_def fM_tso_empty_locs_def loc_defs)
lemma black_heap_locs_fM_eq_locs:
"black_heap_locs \<subseteq> fM_eq_locs"
by (simp add: black_heap_locs_def fM_eq_locs_def loc_defs)
lemma black_heap_locs_fA_eq_locs:
"black_heap_locs \<subseteq> fA_eq_locs"
by (simp add: black_heap_locs_def fA_eq_locs_def sweep_locs_def loc_defs)
lemma fM_fA_invL_tso_emptyD:
"\<lbrakk> atS gc ls s; fM_fA_invL s; ls \<subseteq> fM_tso_empty_locs \<rbrakk> \<Longrightarrow> tso_pending_fM gc s\<down> = []"
by (auto simp: fM_fA_invL_def dest: atS_mono)
lemma gc_sweep_loop_invL_locsE[rule_format]:
"(atS gc (sweep_locs \<union> black_heap_locs) s \<longrightarrow> False) \<Longrightarrow> gc.sweep_loop_invL s"
apply (simp add: gc.sweep_loop_invL_def atS_un)
apply (auto simp: locset_cache atS_simps dest: atS_mono)
apply (simp add: atS_mono gc.sweep_loop_locs_subseteq_sweep_locs; fail)
apply (clarsimp simp: atS_def)
apply (rename_tac x)
apply (drule_tac x=x in bspec)
apply (auto simp: sweep_locs_def sweep_loop_not_choose_ref_locs_def intro: append_prefixD)
done
end
lemma (in sys) gc_sweep_loop_invL[intro]:
"\<lbrace> gc.fM_fA_invL \<^bold>\<and> gc.gc_W_empty_invL \<^bold>\<and> gc.sweep_loop_invL
\<^bold>\<and> LSTP (tso_store_inv \<^bold>\<and> valid_W_inv) \<rbrace>
sys
\<lbrace> gc.sweep_loop_invL \<rbrace>"
proof(vcg_jackhammer (keep_locs) (no_thin_post_inv), vcg_name_cases)
case (tso_dequeue_store_buffer s s' p w ws) then show ?case
proof(cases w)
case (mw_Mark r fl) with tso_dequeue_store_buffer show ?thesis
apply -
apply (rule gc.gc_sweep_loop_invL_locsE)
apply (simp only: gc.gc_W_empty_invL_def gc.no_grey_refs_locs_def cong del: atS_state_weak_cong)
apply (clarsimp simp: atS_un)
apply (thin_tac "AT _ = _") (* FIXME speed the metis call up a bit *)
apply (thin_tac "at _ _ _ \<longrightarrow> _")+
apply (metis (mono_tags, lifting) filter.simps(2) loc_mem_tac_simps(4) no_grey_refs_no_pending_marks)
done
next case (mw_Mutate r f opt_r') with tso_dequeue_store_buffer show ?thesis by clarsimp (erule gc_sweep_loop_invL_niE; simp add: fun_eq_iff fun_upd_apply)
next case (mw_Mutate_Payload r f pl) with tso_dequeue_store_buffer show ?thesis by clarsimp (erule gc_sweep_loop_invL_niE; simp add: fun_eq_iff fun_upd_apply)
next case (mw_fA fl) with tso_dequeue_store_buffer show ?thesis by - (erule gc_sweep_loop_invL_niE; simp add: fun_eq_iff)
next case (mw_fM fl) with tso_dequeue_store_buffer show ?thesis
apply -
apply (rule gc.gc_sweep_loop_invL_locsE)
apply (case_tac p; clarsimp)
apply (drule (1) gc.fM_fA_invL_tso_emptyD)
apply simp_all
using gc.black_heap_locs_subseteq_fM_tso_empty_locs gc.sweep_locs_subseteq_fM_tso_empty_locs apply blast
done
next case (mw_Phase ph) with tso_dequeue_store_buffer show ?thesis by - (erule gc_sweep_loop_invL_niE; simp add: fun_eq_iff)
qed
qed
lemma (in mut_m) gc_sweep_loop_invL[intro]:
"\<lbrace> gc.fM_fA_invL \<^bold>\<and> gc.handshake_invL \<^bold>\<and> gc.sweep_loop_invL
\<^bold>\<and> LSTP (mutators_phase_inv \<^bold>\<and> valid_refs_inv) \<rbrace>
mutator m
\<lbrace> gc.sweep_loop_invL \<rbrace>"
proof( vcg_chainsaw (no_thin) gc.fM_fA_invL_def gc.sweep_loop_invL_def gc.handshake_invL_def, vcg_name_cases gc)
case (sweep_loop_locs s s' rb) then show ?case by (metis (no_types, lifting) atS_mono gc.sweep_loop_locs_fA_eq_locs gc.sweep_loop_locs_fM_eq_locs)
next case (black_heap_locs s s' rb) then show ?case by (metis (no_types, lifting) atS_mono gc.black_heap_locs_fA_eq_locs gc.black_heap_locs_fM_eq_locs)
qed
subsection\<open> Mutator proofs \<close>
context mut_m
begin
(* reachable snapshot inv *)
lemma reachable_snapshot_inv_mo_co_mark[simp]:
"\<lbrakk> ghost_honorary_grey (s p) = {}; reachable_snapshot_inv s \<rbrakk>
\<Longrightarrow> reachable_snapshot_inv (s(p := s p\<lparr> ghost_honorary_grey := {r} \<rparr>))"
unfolding in_snapshot_def reachable_snapshot_inv_def by (auto simp: fun_upd_apply)
lemma reachable_snapshot_inv_hs_get_roots_done:
assumes sti: "strong_tricolour_inv s"
assumes m: "\<forall>r \<in> mut_roots s. marked r s"
assumes ghr: "mut_ghost_honorary_root s = {}"
assumes t: "tso_pending_mutate (mutator m) s = []"
assumes vri: "valid_refs_inv s"
shows "reachable_snapshot_inv
(s(mutator m := s (mutator m)\<lparr>W := {}, ghost_hs_phase := ghp'\<rparr>,
sys := s sys\<lparr>hs_pending := hp', W := sys_W s \<union> mut_W s, ghost_hs_in_sync := in'\<rparr>))"
(is "reachable_snapshot_inv ?s'")
proof(rule, clarsimp)
fix r assume "reachable r s"
then show "in_snapshot r ?s'"
proof (induct rule: reachable_induct)
case (root x) with m show ?case
apply (clarsimp simp: in_snapshot_def) (* FIXME intro rules *)
apply (auto dest: marked_imp_black_or_grey)
done
next
case (ghost_honorary_root x) with ghr show ?case by simp
next
case (tso_root x) with t show ?case
apply (clarsimp simp: filter_empty_conv tso_store_refs_def)
apply (rename_tac w; case_tac w; fastforce)
done
next
case (reaches x y)
from reaches vri have "valid_ref x s" "valid_ref y s"
using reachable_points_to by fastforce+
with reaches sti vri show ?case
apply (clarsimp simp: in_snapshot_def)
apply (elim disjE)
apply (clarsimp simp: strong_tricolour_inv_def)
apply (drule spec[where x=x])
apply clarsimp
apply (auto dest!: marked_imp_black_or_grey)[1]
apply (cases "white y s")
apply (auto dest: grey_protects_whiteE
dest!: marked_imp_black_or_grey)
done
qed
qed
lemma reachable_snapshot_inv_hs_get_work_done:
"reachable_snapshot_inv s
\<Longrightarrow> reachable_snapshot_inv
(s(mutator m := s (mutator m)\<lparr>W := {}\<rparr>,
sys := s sys\<lparr>hs_pending := pending', W := sys_W s \<union> mut_W s,
ghost_hs_in_sync := (ghost_hs_in_sync (s sys))(m := True)\<rparr>))"
by (simp add: reachable_snapshot_inv_def in_snapshot_def grey_protects_white_def)
lemma reachable_snapshot_inv_deref_del:
"\<lbrakk> reachable_snapshot_inv s; sys_load (mutator m) (mr_Ref r f) (s sys) = mv_Ref opt_r'; r \<in> mut_roots s; mut_ghost_honorary_root s = {} \<rbrakk>
\<Longrightarrow> reachable_snapshot_inv (s(mutator m := s (mutator m)\<lparr>ghost_honorary_root := Option.set_option opt_r', ref := opt_r'\<rparr>))"
unfolding reachable_snapshot_inv_def in_snapshot_def grey_protects_white_def by (clarsimp simp: fun_upd_apply)
lemma mutator_phase_inv[intro]:
notes fun_upd_apply[simp]
notes reachable_snapshot_inv_deref_del[simp]
notes if_split_asm[split del]
shows
"\<lbrace> handshake_invL
\<^bold>\<and> mark_object_invL
\<^bold>\<and> mut_get_roots.mark_object_invL m
\<^bold>\<and> mut_store_del.mark_object_invL m
\<^bold>\<and> mut_store_ins.mark_object_invL m
\<^bold>\<and> LSTP (handshake_phase_inv \<^bold>\<and> mutators_phase_inv \<^bold>\<and> phase_rel_inv \<^bold>\<and> sys_phase_inv \<^bold>\<and> fA_rel_inv \<^bold>\<and> fM_rel_inv \<^bold>\<and> valid_refs_inv \<^bold>\<and> strong_tricolour_inv \<^bold>\<and> valid_W_inv) \<rbrace>
mutator m
\<lbrace> LSTP mutator_phase_inv \<rbrace>"
proof( vcg_jackhammer (no_thin_post_inv)
, simp_all add: mutator_phase_inv_aux_case split: hs_phase.splits
, vcg_name_cases)
case alloc then show ?case
apply (drule_tac x=m in spec)
apply (drule handshake_phase_invD)
apply (clarsimp simp: fA_rel_inv_def fM_rel_inv_def fM_rel_def hp_step_rel_def split: if_split_asm)
apply (intro conjI impI; simp)
apply (elim disjE; force simp: fA_rel_def)
apply (rule reachable_snapshot_inv_alloc, simp_all)
apply (elim disjE; force simp: fA_rel_def)
done
next case (store_ins s s') then show ?case
apply (drule_tac x=m in spec)
apply (drule handshake_phase_invD)
apply (intro conjI impI; clarsimp)
apply (rule marked_deletions_store_ins, assumption) (* FIXME shuffle the following into this lemma *)
apply (cases "(\<forall>opt_r'. mw_Mutate (mut_tmp_ref s\<down>) (mut_field s\<down>) opt_r' \<notin> set (sys_mem_store_buffers (mutator m) s\<down>))"; clarsimp)
apply (force simp: marked_deletions_def)
apply (erule marked_insertions_store_ins)
apply (drule phase_rel_invD)
apply (clarsimp simp: phase_rel_def hp_step_rel_def; elim disjE; fastforce dest: reachable_blackD elim: blackD; fail)
apply (rule marked_deletions_store_ins; clarsimp) (* FIXME as above *)
apply (erule disjE; clarsimp)
apply (drule phase_rel_invD)
apply (clarsimp simp: phase_rel_def)
apply (elim disjE; clarsimp)
apply (fastforce simp: hp_step_rel_def)
apply (clarsimp simp: hp_step_rel_def)
apply (case_tac "sys_ghost_hs_phase s\<down>"; clarsimp) (* FIXME invert handshake_phase_rel *)
apply (clarsimp simp: obj_at_field_on_heap_def split: option.splits)
apply (rule conjI, fast, clarsimp)
apply (frule_tac r=x2a in blackD(1)[OF reachable_blackD], simp_all)[1]
apply (rule_tac x="mut_tmp_ref s\<down>" in reachable_points_to; auto simp: ran_def split: obj_at_splits; fail)
apply (clarsimp simp: obj_at_field_on_heap_def split: option.splits)
apply (rule conjI, fast, clarsimp)
apply (frule_tac r=x2a in blackD(1)[OF reachable_blackD], simp_all)[1]
apply (rule_tac x="mut_tmp_ref s\<down>" in reachable_points_to; auto simp: ran_def split: obj_at_splits; fail)
apply (force simp: marked_deletions_def)
done
next case (hs_noop_done s s') then show ?case
apply -
apply (drule_tac x=m in spec)
apply (drule handshake_phase_invD)
apply (simp add: fA_rel_def fM_rel_def hp_step_rel_def)
apply (cases "mut_ghost_hs_phase s\<down>") (* FIXME invert handshake_step *)
apply auto
done
next case (hs_get_roots_done s s') then show ?case
apply -
apply (drule_tac x=m in spec)
apply (drule handshake_phase_invD)
apply (force simp: hp_step_rel_def reachable_snapshot_inv_hs_get_roots_done)
done
next case (hs_get_work_done s s') then show ?case
apply (drule_tac x=m in spec)
apply (drule handshake_phase_invD)
apply (force simp add: hp_step_rel_def reachable_snapshot_inv_hs_get_work_done)
done
qed
end
lemma (in mut_m') mutator_phase_inv[intro]:
notes mut_m.mark_object_invL_def[inv]
notes mut_m.handshake_invL_def[inv]
notes fun_upd_apply[simp]
shows
"\<lbrace> handshake_invL \<^bold>\<and> mut_m.handshake_invL m'
\<^bold>\<and> mut_m.mark_object_invL m'
\<^bold>\<and> mut_get_roots.mark_object_invL m'
\<^bold>\<and> mut_store_del.mark_object_invL m'
\<^bold>\<and> mut_store_ins.mark_object_invL m'
\<^bold>\<and> LSTP (fA_rel_inv \<^bold>\<and> fM_rel_inv \<^bold>\<and> handshake_phase_inv \<^bold>\<and> mutators_phase_inv \<^bold>\<and> valid_refs_inv) \<rbrace>
mutator m'
\<lbrace> LSTP mutator_phase_inv \<rbrace>"
proof( vcg_jackhammer (no_thin_post_inv)
, simp_all add: mutator_phase_inv_aux_case split: hs_phase.splits
, vcg_name_cases)
case (alloc s s' rb) then show ?case
apply -
apply (clarsimp simp: fA_rel_inv_def fM_rel_inv_def white_def)
apply (drule spec[where x=m])
apply (intro conjI impI; clarsimp)
apply (clarsimp simp: hp_step_rel_def simp: fA_rel_def fM_rel_def dest!: handshake_phase_invD)
apply (elim disjE, auto; fail)
apply (rule reachable_snapshot_inv_alloc, simp_all)
apply (clarsimp simp: hp_step_rel_def simp: fA_rel_def fM_rel_def dest!: handshake_phase_invD)
apply (cases "sys_ghost_hs_phase s\<down>"; clarsimp; blast)
done
next case (hs_get_roots_done s s') then show ?case
apply -
apply (drule spec[where x=m])
apply (simp add: no_black_refs_def reachable_snapshot_inv_def in_snapshot_def)
done
next case (hs_get_work_done s s') then show ?case
apply -
apply (drule spec[where x=m])
apply (clarsimp simp: no_black_refs_def reachable_snapshot_inv_def in_snapshot_def grey_protects_white_def)
done
qed
(* FIXME Some of \<open>mutator_phase_inv\<close>, the rest in Global Noninterference *)
lemma no_black_refs_sweep_loop_free[simp]:
"no_black_refs s \<Longrightarrow> no_black_refs (s(sys := s sys\<lparr>heap := (sys_heap s)(gc_tmp_ref s := None)\<rparr>))"
unfolding no_black_refs_def by simp
lemma no_black_refs_load_W[simp]:
"\<lbrakk> no_black_refs s; gc_W s = {} \<rbrakk>
\<Longrightarrow> no_black_refs (s(gc := s gc\<lparr>W := sys_W s\<rparr>, sys := s sys\<lparr>W := {}\<rparr>))"
unfolding no_black_refs_def by simp
lemma marked_insertions_sweep_loop_free[simp]:
"\<lbrakk> mut_m.marked_insertions m s; white r s \<rbrakk>
\<Longrightarrow> mut_m.marked_insertions m (s(sys := (s sys)\<lparr>heap := (heap (s sys))(r := None)\<rparr>))"
unfolding mut_m.marked_insertions_def by (fastforce simp: fun_upd_apply split: mem_store_action.splits obj_at_splits option.splits)
lemma marked_deletions_sweep_loop_free[simp]:
notes fun_upd_apply[simp]
shows
"\<lbrakk> mut_m.marked_deletions m s; mut_m.reachable_snapshot_inv m s; no_grey_refs s; white r s \<rbrakk>
\<Longrightarrow> mut_m.marked_deletions m (s(sys := s sys\<lparr>heap := (sys_heap s)(r := None)\<rparr>))"
unfolding mut_m.marked_deletions_def
apply (clarsimp split: mem_store_action.splits)
apply (rename_tac ref field option)
apply (drule_tac x="mw_Mutate ref field option" in spec)
apply (clarsimp simp: obj_at_field_on_heap_def split: option.splits)
apply (rule conjI)
apply (clarsimp simp: mut_m.reachable_snapshot_inv_def)
apply (drule spec[where x=r], clarsimp simp: in_snapshot_def)
apply (drule mp, auto simp: mut_m.reachable_def mut_m.tso_store_refs_def split: mem_store_action.splits)[1] (* FIXME rule *)
apply (drule grey_protects_whiteD)
apply (clarsimp simp: no_grey_refs_def)
apply (clarsimp; fail)
apply (rule conjI; clarsimp)
apply (rule conjI)
apply (clarsimp simp: mut_m.reachable_snapshot_inv_def)
apply (drule spec[where x=r], clarsimp simp: in_snapshot_def)
apply (drule mp, auto simp: mut_m.reachable_def mut_m.tso_store_refs_def split: mem_store_action.splits)[1] (* FIXME rule *)
apply (drule grey_protects_whiteD)
apply (clarsimp simp: no_grey_refs_def)
unfolding white_def apply (clarsimp split: obj_at_splits)
done
context gc
begin
lemma obj_fields_marked_inv_blacken:
"\<lbrakk> gc_field_set s = {}; obj_fields_marked s; (gc_tmp_ref s points_to w) s; white w s \<rbrakk> \<Longrightarrow> False"
by (simp add: obj_fields_marked_def obj_at_field_on_heap_def ran_def white_def split: option.splits obj_at_splits)
lemma obj_fields_marked_inv_has_white_path_to_blacken:
"\<lbrakk> gc_field_set s = {}; gc_tmp_ref s \<in> gc_W s; (gc_tmp_ref s has_white_path_to w) s; obj_fields_marked s; valid_W_inv s \<rbrakk> \<Longrightarrow> w = gc_tmp_ref s"
by (metis (mono_tags, lifting) converse_rtranclpE gc.obj_fields_marked_inv_blacken has_white_path_to_def)
lemma mutator_phase_inv[intro]:
notes fun_upd_apply[simp]
shows
"\<lbrace> fM_fA_invL \<^bold>\<and> gc_W_empty_invL \<^bold>\<and> handshake_invL \<^bold>\<and> obj_fields_marked_invL \<^bold>\<and> sweep_loop_invL
\<^bold>\<and> gc_mark.mark_object_invL
\<^bold>\<and> LSTP (handshake_phase_inv \<^bold>\<and> mutators_phase_inv \<^bold>\<and> valid_refs_inv \<^bold>\<and> valid_W_inv) \<rbrace>
gc
\<lbrace> LSTP (mut_m.mutator_phase_inv m) \<rbrace>"
proof( vcg_jackhammer (no_thin_post_inv)
, simp_all add: mutator_phase_inv_aux_case white_def split: hs_phase.splits
, vcg_name_cases )
case (sweep_loop_free s s') then show ?case
apply (intro allI conjI impI)
apply (drule mut_m.handshake_phase_invD[where m=m], clarsimp simp: hp_step_rel_def; fail)
apply (rule mut_m.reachable_snapshot_inv_sweep_loop_free, simp_all add: white_def)
done
next case (mark_loop_get_work_load_W s s') then show ?case
apply clarsimp
apply (drule spec[where x=m])
apply (clarsimp simp: mut_m.reachable_snapshot_inv_def in_snapshot_def grey_protects_white_def) (* FIXME rule *)
done
next case (mark_loop_blacken s s') then show ?case
apply -
apply (drule spec[where x=m])
apply clarsimp
apply (intro allI conjI impI; clarsimp)
apply (drule mut_m.handshake_phase_invD[where m=m], clarsimp simp: hp_step_rel_def)
apply (clarsimp simp: mut_m.reachable_snapshot_inv_def in_snapshot_def grey_protects_white_def)
apply (metis (no_types, opaque_lifting) obj_fields_marked_inv_has_white_path_to_blacken)
done
next case (mark_loop_mo_co_mark s s' y) then show ?case by (clarsimp simp: handshake_in_syncD mut_m.reachable_snapshot_inv_mo_co_mark)
next case (mark_loop_get_roots_load_W s s') then show ?case
apply clarsimp
apply (drule spec[where x=m])
apply (clarsimp simp: mut_m.reachable_snapshot_inv_def in_snapshot_def grey_protects_white_def) (* FIXME rule *)
done
qed
end
lemma (in gc) strong_tricolour_inv[intro]:
notes fun_upd_apply[simp]
shows
"\<lbrace> fM_fA_invL \<^bold>\<and> gc_W_empty_invL \<^bold>\<and> gc_mark.mark_object_invL \<^bold>\<and> obj_fields_marked_invL \<^bold>\<and> sweep_loop_invL
\<^bold>\<and> LSTP (strong_tricolour_inv \<^bold>\<and> valid_W_inv) \<rbrace>
gc
\<lbrace> LSTP strong_tricolour_inv \<rbrace>"
unfolding strong_tricolour_inv_def
proof(vcg_jackhammer (no_thin_post_inv), vcg_name_cases)
case (mark_loop_blacken s s' x xa) then show ?case by (fastforce elim!: obj_fields_marked_inv_blacken)
qed
lemma (in mut_m) strong_tricolour[intro]:
notes fun_upd_apply[simp]
shows
"\<lbrace> mark_object_invL
\<^bold>\<and> mut_get_roots.mark_object_invL m
\<^bold>\<and> mut_store_del.mark_object_invL m
\<^bold>\<and> mut_store_ins.mark_object_invL m
\<^bold>\<and> LSTP (fA_rel_inv \<^bold>\<and> fM_rel_inv \<^bold>\<and> handshake_phase_inv \<^bold>\<and> mutators_phase_inv \<^bold>\<and> strong_tricolour_inv \<^bold>\<and> sys_phase_inv \<^bold>\<and> valid_refs_inv) \<rbrace>
mutator m
\<lbrace> LSTP strong_tricolour_inv \<rbrace>"
unfolding strong_tricolour_inv_def
proof(vcg_jackhammer (no_thin_post_inv), vcg_name_cases)
case (alloc s s' x xa rb) then show ?case
apply (clarsimp simp: fA_rel_inv_def fM_rel_inv_def)
apply (drule handshake_phase_invD)
apply (drule spec[where x=m])
apply (clarsimp simp: sys_phase_inv_aux_case
split: hs_phase.splits if_splits)
apply (blast dest: heap_colours_colours)
(* FIXME rule? *)
apply (metis (no_types, lifting) black_def no_black_refsD obj_at_cong option.simps(3))
apply (metis (no_types, lifting) black_def no_black_refsD obj_at_cong option.distinct(1))
apply (clarsimp simp: hp_step_rel_def)
apply (elim disjE; force simp: fA_rel_def fM_rel_def split: obj_at_splits)
apply (clarsimp simp: hp_step_rel_def)
apply (elim disjE; force simp: fA_rel_def fM_rel_def split: obj_at_splits)
done
qed
(*<*)
end
(*>*)
|
#ifndef DISKVECTOR_UTILS_HPP
#define DISKVECTOR_UTILS_HPP
#include <boost/archive/binary_oarchive.hpp>
#include <boost/archive/binary_iarchive.hpp>
#include <boost/serialization/vector.hpp>
#include <fstream>
#include <sstream>
#include <vector>
#include <string>
#include <glog/logging.h>
using namespace std;
template<typename Dtype>
void unhashObj(Dtype& obj, const string& hash) {
istringstream iss(hash);
boost::archive::binary_iarchive ia(iss);
ia >> obj;
}
template<typename Dtype>
void hashObj(const Dtype& obj, string& hash) {
ostringstream oss;
boost::archive::binary_oarchive oa(oss);
oa << obj;
hash = oss.str();
}
#endif
|
\subsection{Prototypical Networks for Small Datasets}
\label{models:protonet}
Prototypical Networks \cite{protonet} is a meta-learning model for the problem of few-shot classification, where a classifier must generalize to new classes not seen in the training set, given only a small number of examples of each new class. The ability of a algorithm to perform few-shot learning is typically measured by its performance on n-shot, k-way classification tasks. First a model is given a query sample belonging to a new, previously unseen class. Then, it’s also given a support set, S, consisting of n examples, each from k different unseen classes. Finally, the algorithm then has to determine which of the support set classes the query samples belong to.
Schemes for few shot classification tasks like Prototypical Networks can also be of use for training small datasets where all classes are known.
Prototypical Networks applies a compelling inductive bias in the form of class prototypes to achieve impressive few-shot performance. The key assumption is made is that there exists an embedding in which samples from each class cluster around a single prototypical representation which is simply the mean of the individual samples. This idea streamlines n-shot classification in the case of $n > 1$ as classification is simply performed by taking the label of the closest class prototype.
\subsection{DenseNet}
As our state of the art model we selected DenseNet because it can handle small datasets with low error rate\cite{pmlr-v80-pham18a}.
DenseNet \cite{densenet} works by concatenating the feature-maps of a convolutional block to the feature-maps of all the previous convolutional blocks and using this value as input for the next convolutional block. This way each convolutional block receives all the collective knowledge of the previous layers maintaining the global state of the network which can be accessed.
Convolutional networks construct informative features by fusion both spatial and chanel-wise information within local receptive fields at each layer. Squeeze and excitation blocks (SE block) \cite{Hu2017SqueezeandExcitationN} focus on the chanel-wise information used in the convolutional layers. SE blocks improve the quality of representations produced by the network by modeling the interdependency between channels to perform feature recalibration. SE blocks can be included in any model that uses convolutional layers to improve its performance at low computational cost. We added SE blocks to our DenseNet model to improve its performance.
\subsection{Data Augmentation}
Image data augmentation is a set of techniques that aim at artificially augmenting the amount of data that can be obtained from the images in the dataset. These techniques modify the images in the dataset with a set of predefined operations to create new images that can be used to train a model. In this manner, we can compensate for the lack of variability in a small dataset\cite{cubuk2019autoaugment}.
|
/*
* Copyright (c) 2016-2018 Nicholas Corgan ([email protected])
*
* Distributed under the MIT License (MIT) (See accompanying file LICENSE.txt
* or copy at http://opensource.org/licenses/MIT)
*/
#include "exception_internal.hpp"
#include "utils/misc.hpp"
#include "pokemon_gbaimpl.hpp"
#include "pokemon_gcnimpl.hpp"
#include "conversions/gen3_conversions.hpp"
#include "database/database_common.hpp"
#include "database/enum_conversions.hpp"
#include "database/id_to_string.hpp"
#include "database/index_to_string.hpp"
#include "io/read_write.hpp"
#include "pkmgc/includes.hpp"
#include "pksav/enum_maps.hpp"
#include "pksav/party_data.hpp"
#include "pksav/pksav_call.hpp"
#include "pksav/lib/gba/checksum.h"
#include "types/rng.hpp"
#include <pkmn/exception.hpp>
#include <pkmn/calculations/form.hpp>
#include <pkmn/calculations/gender.hpp>
#include <pkmn/calculations/personality.hpp>
#include <pkmn/calculations/shininess.hpp>
#include <pkmn/enums/enum_to_string.hpp>
#include <pkmn/utils/paths.hpp>
#include <pkmn/qt/Spinda.hpp>
#include <pkmn/database/item_entry.hpp>
#include <pksav/common/markings.h>
#include <pksav/common/stats.h>
#include <pksav/gba/language.h>
#include <pksav/gba/ribbons.h>
#include <pksav/gba/text.h>
#include <pksav/math/endian.h>
#include <boost/algorithm/string.hpp>
#include <boost/assign.hpp>
#include <boost/filesystem.hpp>
#include <boost/format.hpp>
#include <boost/thread/lock_guard.hpp>
#include <cstring>
#include <ctime>
#include <fstream>
#include <stdexcept>
namespace fs = boost::filesystem;
namespace pkmn
{
BOOST_STATIC_CONSTEXPR int MEW_ID = 151;
BOOST_STATIC_CONSTEXPR int UNOWN_ID = 201;
BOOST_STATIC_CONSTEXPR int DEOXYS_ID = 386;
pokemon_gbaimpl::pokemon_gbaimpl(
pkmn::database::pokemon_entry&& database_entry,
int level
): pokemon_impl(std::move(database_entry))
{
std::memset(&_pksav_pokemon, 0, sizeof(_pksav_pokemon));
// Set block pointers
_p_growth_block = &_pksav_pokemon.pc_data.blocks.growth;
_p_attacks_block = &_pksav_pokemon.pc_data.blocks.attacks;
_p_effort_block = &_pksav_pokemon.pc_data.blocks.effort;
_p_misc_block = &_pksav_pokemon.pc_data.blocks.misc;
_p_native_pc = &_pksav_pokemon.pc_data;
_p_native_party = &_pksav_pokemon.party_data;
pkmn::rng<uint32_t> rng;
_pksav_pokemon.pc_data.personality = rng.rand();
_pksav_pokemon.pc_data.ot_id.id = pksav_littleendian32(DEFAULT_TRAINER_ID);
PKSAV_CALL(
pksav_gba_export_text(
boost::algorithm::to_upper_copy(
_database_entry.get_species_name()
).c_str(),
_pksav_pokemon.pc_data.nickname,
PKSAV_GBA_POKEMON_NICKNAME_LENGTH
);
)
set_language(pkmn::e_language::ENGLISH);
PKSAV_CALL(
pksav_gba_export_text(
DEFAULT_TRAINER_NAME.c_str(),
_pksav_pokemon.pc_data.otname,
PKSAV_GBA_POKEMON_OTNAME_LENGTH
);
)
_p_growth_block->species = pksav_littleendian16(uint16_t(_database_entry.get_pokemon_index()));
_p_growth_block->exp = pksav_littleendian32(uint32_t(
_database_entry.get_experience_at_level(level)
));
_p_growth_block->friendship = uint8_t(_database_entry.get_base_friendship());
set_location_met("Fateful encounter", false);
set_level_met(level);
set_original_game(get_game());
set_ball(pkmn::e_ball::POKE_BALL);
_p_misc_block->iv_egg_ability = rng.rand();
_p_misc_block->iv_egg_ability &= ~PKSAV_GBA_POKEMON_EGG_MASK;
if(_pksav_pokemon.pc_data.personality % 2)
{
_p_misc_block->iv_egg_ability |= PKSAV_GBA_POKEMON_ABILITY_MASK;
}
else
{
_p_misc_block->iv_egg_ability &= ~PKSAV_GBA_POKEMON_ABILITY_MASK;
}
// Only do this by default for Mew or Deoxys, who won't obey without
// this flag. For any others, this will cause them to falsely be
// flagged as a fateful encounter if traded up to Generation IV.
if((_database_entry.get_species_id() == MEW_ID) or
(_database_entry.get_species_id() == DEOXYS_ID))
{
_p_misc_block->ribbons_obedience |= PKSAV_GBA_POKEMON_OBEDIENCE_MASK;
}
// Populate abstractions
_update_ribbons_map();
_update_EV_map();
_init_modern_IV_map(&_p_misc_block->iv_egg_ability);
_init_contest_stat_map(&_p_effort_block->contest_stats);
_init_markings_map(&_pksav_pokemon.pc_data.markings);
set_level(level);
_init_default_moves_for_level();
if(_database_entry.get_species_id() == UNOWN_ID)
{
_p_growth_block->species = pksav_littleendian16(uint16_t(
_database_entry.get_pokemon_index()
));
_set_unown_personality_from_form();
}
_register_attributes();
}
pokemon_gbaimpl::pokemon_gbaimpl(
const struct pksav_gba_pc_pokemon* p_pc_pokemon,
int game_id
): pokemon_impl(
pksav_littleendian16(p_pc_pokemon->blocks.growth.species),
game_id
)
{
BOOST_ASSERT(p_pc_pokemon != nullptr);
_pksav_pokemon.pc_data = *p_pc_pokemon;
_populate_party_data();
// Set block pointers
_p_growth_block = &_pksav_pokemon.pc_data.blocks.growth;
_p_attacks_block = &_pksav_pokemon.pc_data.blocks.attacks;
_p_effort_block = &_pksav_pokemon.pc_data.blocks.effort;
_p_misc_block = &_pksav_pokemon.pc_data.blocks.misc;
_p_native_pc = &_pksav_pokemon.pc_data;
_p_native_party = &_pksav_pokemon.party_data;
// Populate abstractions
_update_ribbons_map();
_update_EV_map();
_init_modern_IV_map(&_p_misc_block->iv_egg_ability);
_init_contest_stat_map(&_p_effort_block->contest_stats);
_init_markings_map(&_pksav_pokemon.pc_data.markings);
_update_stat_map();
_update_moves(-1);
if(_database_entry.get_species_id() == UNOWN_ID)
{
_set_unown_personality_from_form();
}
_register_attributes();
}
pokemon_gbaimpl::pokemon_gbaimpl(
const struct pksav_gba_party_pokemon* p_party_pokemon,
int game_id
): pokemon_impl(
pksav_littleendian16(p_party_pokemon->pc_data.blocks.growth.species),
game_id
)
{
BOOST_ASSERT(p_party_pokemon != nullptr);
_pksav_pokemon = *p_party_pokemon;
// Set block pointers
_p_growth_block = &_pksav_pokemon.pc_data.blocks.growth;
_p_attacks_block = &_pksav_pokemon.pc_data.blocks.attacks;
_p_effort_block = &_pksav_pokemon.pc_data.blocks.effort;
_p_misc_block = &_pksav_pokemon.pc_data.blocks.misc;
_p_native_pc = &_pksav_pokemon.pc_data;
_p_native_party = &_pksav_pokemon.party_data;
// Populate abstractions
_update_ribbons_map();
_update_EV_map();
_init_modern_IV_map(&_p_misc_block->iv_egg_ability);
_init_contest_stat_map(&_p_effort_block->contest_stats);
_init_markings_map(&_pksav_pokemon.pc_data.markings);
_update_stat_map();
_update_moves(-1);
if(_database_entry.get_species_id() == UNOWN_ID)
{
_set_unown_personality_from_form();
}
_register_attributes();
}
pokemon::sptr pokemon_gbaimpl::to_game(pkmn::e_game game)
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
pkmn::pokemon::sptr ret;
int game_id = pkmn::database::game_enum_to_id(game);
int generation = pkmn::database::game_id_to_generation(game_id);
switch(generation)
{
case 3:
if(game_is_gamecube(game_id))
{
if(game_id == COLOSSEUM_ID)
{
LibPkmGC::Colosseum::Pokemon colosseum_pokemon;
pkmn::conversions::gba_party_pokemon_to_gcn(
&_pksav_pokemon,
&colosseum_pokemon
);
ret = std::make_shared<pokemon_gcnimpl>(
&colosseum_pokemon,
game_id
);
}
else
{
LibPkmGC::XD::Pokemon xd_pokemon;
pkmn::conversions::gba_party_pokemon_to_gcn(
&_pksav_pokemon,
&xd_pokemon
);
ret = std::make_shared<pokemon_gcnimpl>(
&xd_pokemon,
game_id
);
}
}
else
{
ret = std::make_shared<pokemon_gbaimpl>(&_pksav_pokemon, game_id);
ret->set_level_met(get_level());
ret->set_original_game(get_game());
}
break;
case 4:
case 5:
case 6:
throw pkmn::unimplemented_error();
default:
throw std::invalid_argument("Generation III Pokémon can only be converted to Generation III-VI.");
}
return ret;
}
void pokemon_gbaimpl::export_to_file(
const std::string& filepath
)
{
std::string extension = fs::extension(filepath);
if((extension == ".3gpkm") || (extension == ".pk3"))
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
pkmn::io::write_file(
filepath,
get_native_pc_data(),
sizeof(struct pksav_gba_pc_pokemon)
);
}
else
{
throw std::invalid_argument("GBA Pokémon can only be saved to .3gpkm or .pk3 files.");
}
}
void pokemon_gbaimpl::set_form(
const std::string& form
)
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
_database_entry.set_form(form);
_p_growth_block->species = pksav_littleendian16(uint16_t(_database_entry.get_pokemon_index()));
if(_database_entry.get_species_id() == UNOWN_ID)
{
_set_unown_personality_from_form();
}
}
bool pokemon_gbaimpl::is_egg()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
return bool(_p_misc_block->iv_egg_ability & PKSAV_GBA_POKEMON_EGG_MASK);
}
void pokemon_gbaimpl::set_is_egg(
bool is_egg
)
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
if(is_egg)
{
_p_misc_block->iv_egg_ability |= PKSAV_GBA_POKEMON_EGG_MASK;
}
else
{
_p_misc_block->iv_egg_ability &= ~PKSAV_GBA_POKEMON_EGG_MASK;
}
}
pkmn::e_condition pokemon_gbaimpl::get_condition()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
pkmn::e_condition ret = pkmn::e_condition::NONE;
// Check the mask. We won't distinguish between sleep states for different
// numbers of turns.
const pksav::condition_mask_bimap_t& condition_mask_bimap = pksav::get_condition_mask_bimap();
for(const auto& condition_mask_iter: condition_mask_bimap.right)
{
if(_pksav_pokemon.party_data.condition & condition_mask_iter.first)
{
ret = condition_mask_iter.second;
break;
}
}
return ret;
}
void pokemon_gbaimpl::set_condition(
pkmn::e_condition condition
)
{
const pksav::condition_mask_bimap_t& condition_mask_bimap = pksav::get_condition_mask_bimap();
pkmn::enforce_value_in_map_keys(
"Condition",
condition,
condition_mask_bimap.left
);
boost::lock_guard<pokemon_gbaimpl> lock(*this);
_pksav_pokemon.party_data.condition = pksav_littleendian32(condition_mask_bimap.left.at(condition));
}
std::string pokemon_gbaimpl::get_nickname()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
// Add an extra character for the null terminator.
char nickname[PKSAV_GBA_POKEMON_NICKNAME_LENGTH + 1] = {0};
PKSAV_CALL(
pksav_gba_import_text(
_pksav_pokemon.pc_data.nickname,
nickname,
PKSAV_GBA_POKEMON_NICKNAME_LENGTH
);
)
return std::string(nickname);
}
void pokemon_gbaimpl::set_nickname(
const std::string& nickname
)
{
pkmn::enforce_string_length(
"Nickname",
nickname,
1,
PKSAV_GBA_POKEMON_NICKNAME_LENGTH
);
boost::lock_guard<pokemon_gbaimpl> lock(*this);
PKSAV_CALL(
pksav_gba_export_text(
nickname.c_str(),
_pksav_pokemon.pc_data.nickname,
PKSAV_GBA_POKEMON_NICKNAME_LENGTH
);
)
}
pkmn::e_gender pokemon_gbaimpl::get_gender()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
return pkmn::calculations::modern_pokemon_gender(
get_species(),
pksav_littleendian32(_pksav_pokemon.pc_data.personality)
);
}
void pokemon_gbaimpl::set_gender(
pkmn::e_gender gender
)
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
_set_modern_gender(
&_pksav_pokemon.pc_data.personality,
gender
);
// No need to check Unown case, it's genderless.
}
bool pokemon_gbaimpl::is_shiny()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
return pkmn::calculations::modern_shiny(
pksav_littleendian32(_pksav_pokemon.pc_data.personality),
pksav_littleendian32(_pksav_pokemon.pc_data.ot_id.id)
);
}
void pokemon_gbaimpl::set_shininess(
bool value
)
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
_set_modern_shininess(
&_pksav_pokemon.pc_data.personality,
&_pksav_pokemon.pc_data.ot_id.id,
value
);
if(_database_entry.get_species_id() == UNOWN_ID)
{
_set_unown_form_from_personality();
}
}
pkmn::e_item pokemon_gbaimpl::get_held_item()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
return pkmn::e_item(
pkmn::database::item_index_to_id(
pksav_littleendian16(_p_growth_block->held_item),
_database_entry.get_game_id()
)
);
}
void pokemon_gbaimpl::set_held_item(
pkmn::e_item held_item
)
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
// Make sure item is valid and holdable
pkmn::database::item_entry item(
held_item,
get_game()
);
if(!item.holdable() && (held_item != pkmn::e_item::NONE))
{
throw std::invalid_argument("This item is not holdable.");
}
_p_growth_block->held_item = pksav_littleendian16(uint16_t(item.get_item_index()));
}
pkmn::e_nature pokemon_gbaimpl::get_nature()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
const pksav::nature_bimap_t& nature_bimap = pksav::get_nature_bimap();
enum pksav_nature nature = static_cast<enum pksav_nature>(get_personality() % 25);
// All values 0-24 should be represented in this bimap.
auto nature_iter = nature_bimap.right.find(nature);
BOOST_ASSERT(nature_iter != nature_bimap.right.end());
return nature_iter->second;
}
void pokemon_gbaimpl::set_nature(
pkmn::e_nature nature
)
{
const pksav::nature_bimap_t& nature_bimap = pksav::get_nature_bimap();
pkmn::enforce_value_in_map_keys(
"Nature",
nature,
nature_bimap.left
);
boost::lock_guard<pokemon_gbaimpl> lock(*this);
// Nature is derived from personality, so we need to find a new
// one that preserves all other values.
set_personality(
pkmn::calculations::generate_personality(
get_species(),
get_original_trainer_id(),
is_shiny(),
get_ability(),
get_gender(),
nature
)
);
}
int pokemon_gbaimpl::get_pokerus_duration()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
return _get_pokerus_duration(&_p_misc_block->pokerus);
}
void pokemon_gbaimpl::set_pokerus_duration(
int duration
)
{
pkmn::enforce_bounds("Duration", duration, 0, 15);
boost::lock_guard<pokemon_gbaimpl> lock(*this);
_set_pokerus_duration(
&_p_misc_block->pokerus,
duration
);
}
std::string pokemon_gbaimpl::get_original_trainer_name()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
// Add an extra character for the null terminator.
char otname[PKSAV_GBA_POKEMON_OTNAME_LENGTH + 1] = {0};
PKSAV_CALL(
pksav_gba_import_text(
_pksav_pokemon.pc_data.otname,
otname,
PKSAV_GBA_POKEMON_OTNAME_LENGTH
);
)
return std::string(otname);
}
void pokemon_gbaimpl::set_original_trainer_name(
const std::string& trainer_name
)
{
pkmn::enforce_string_length(
"Trainer name",
trainer_name,
1,
PKSAV_GBA_POKEMON_OTNAME_LENGTH
);
boost::lock_guard<pokemon_gbaimpl> lock(*this);
PKSAV_CALL(
pksav_gba_export_text(
trainer_name.c_str(),
_pksav_pokemon.pc_data.otname,
PKSAV_GBA_POKEMON_OTNAME_LENGTH
);
)
}
uint16_t pokemon_gbaimpl::get_original_trainer_public_id()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
return pksav_littleendian16(_pksav_pokemon.pc_data.ot_id.pid);
}
uint16_t pokemon_gbaimpl::get_original_trainer_secret_id()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
return pksav_littleendian16(_pksav_pokemon.pc_data.ot_id.sid);
}
uint32_t pokemon_gbaimpl::get_original_trainer_id()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
return pksav_littleendian32(_pksav_pokemon.pc_data.ot_id.id);
}
void pokemon_gbaimpl::set_original_trainer_public_id(
uint16_t public_id
)
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
_pksav_pokemon.pc_data.ot_id.pid = pksav_littleendian16(public_id);
}
void pokemon_gbaimpl::set_original_trainer_secret_id(
uint16_t secret_id
)
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
_pksav_pokemon.pc_data.ot_id.sid = pksav_littleendian16(secret_id);
}
void pokemon_gbaimpl::set_original_trainer_id(
uint32_t id
)
{
_pksav_pokemon.pc_data.ot_id.id = pksav_littleendian32(id);
}
pkmn::e_gender pokemon_gbaimpl::get_original_trainer_gender()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
return (_p_misc_block->origin_info & PKSAV_GBA_POKEMON_OTGENDER_MASK)
? pkmn::e_gender::FEMALE
: pkmn::e_gender::MALE;
}
void pokemon_gbaimpl::set_original_trainer_gender(
pkmn::e_gender gender
)
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
if(gender == pkmn::e_gender::MALE)
{
_p_misc_block->origin_info &= ~PKSAV_GBA_POKEMON_OTGENDER_MASK;
}
else if(gender == pkmn::e_gender::FEMALE)
{
_p_misc_block->origin_info |= PKSAV_GBA_POKEMON_OTGENDER_MASK;
}
else
{
throw std::invalid_argument("gender: valid values \"Male\", \"Female\"");
}
}
pkmn::e_language pokemon_gbaimpl::get_language()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
pkmn::e_language ret = pkmn::e_language::ENGLISH;
enum pksav_gba_language language_as_enum = static_cast<enum pksav_gba_language>(
pksav_littleendian16(
_pksav_pokemon.pc_data.language
)
);
const pksav::gba_language_bimap_t& gba_language_bimap = pksav::get_gba_language_bimap();
// Allow for other values in case of a corrupted save.
auto gba_language_bimap_iter = gba_language_bimap.right.find(language_as_enum);
if(gba_language_bimap_iter != gba_language_bimap.right.end())
{
ret = gba_language_bimap_iter->second;
}
return ret;
}
void pokemon_gbaimpl::set_language(
pkmn::e_language language
)
{
const pksav::gba_language_bimap_t& gba_language_bimap = pksav::get_gba_language_bimap();
pkmn::enforce_value_in_map_keys(
"Language",
language,
gba_language_bimap.left
);
boost::lock_guard<pokemon_gbaimpl> lock(*this);
_pksav_pokemon.pc_data.language = pksav_littleendian16(static_cast<uint16_t>(
gba_language_bimap.left.at(language)
));
}
int pokemon_gbaimpl::get_current_trainer_friendship()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
return _p_growth_block->friendship;
}
void pokemon_gbaimpl::set_current_trainer_friendship(
int friendship
)
{
pkmn::enforce_bounds("Friendship", friendship, 0, 255);
boost::lock_guard<pokemon_gbaimpl> lock(*this);
_p_growth_block->friendship = uint8_t(friendship);
}
pkmn::e_ability pokemon_gbaimpl::get_ability()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
pkmn::e_ability ret;
pkmn::ability_pair_t abilities = _database_entry.get_abilities();
if(abilities.second == pkmn::e_ability::NONE)
{
ret = abilities.first;
}
else
{
bool has_second_ability = bool(_p_misc_block->iv_egg_ability & PKSAV_GBA_POKEMON_ABILITY_MASK);
ret = has_second_ability ? abilities.second : abilities.first;
}
return ret;
}
void pokemon_gbaimpl::set_ability(pkmn::e_ability ability)
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
_set_ability(ability);
// Ability is derived from personality, so we need to find a new
// one that preserves all other values.
_pksav_pokemon.pc_data.personality = pksav_littleendian32(
pkmn::calculations::generate_personality(
get_species(),
get_original_trainer_id(),
is_shiny(),
ability,
get_gender(),
get_nature()
));
}
pkmn::e_ball pokemon_gbaimpl::get_ball()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
// Account for corrupted data.
pkmn::e_ball ball = pkmn::e_ball(PKSAV_GBA_POKEMON_BALL(_p_misc_block->origin_info));
if((ball < pkmn::e_ball::NONE) || (ball > pkmn::e_ball::PREMIER_BALL))
{
ball = pkmn::e_ball::INVALID;
}
return ball;
}
void pokemon_gbaimpl::set_ball(
pkmn::e_ball ball
)
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
// Try and instantiate an item_entry to validate the ball.
(void)pkmn::database::item_entry(
pkmn::database::ball_to_item(ball),
get_game()
);
_p_misc_block->origin_info &= ~PKSAV_GBA_POKEMON_BALL_MASK;
_p_misc_block->origin_info |= (uint16_t(ball) << PKSAV_GBA_POKEMON_BALL_OFFSET);
}
int pokemon_gbaimpl::get_level_met()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
return (_p_misc_block->origin_info & PKSAV_GBA_POKEMON_LEVEL_MET_MASK);
}
void pokemon_gbaimpl::set_level_met(
int level
)
{
pkmn::enforce_bounds("Level met", level, 0, 100);
boost::lock_guard<pokemon_gbaimpl> lock(*this);
_p_misc_block->origin_info &= ~PKSAV_GBA_POKEMON_LEVEL_MET_MASK;
_p_misc_block->origin_info |= uint16_t(level);
}
std::string pokemon_gbaimpl::get_location_met(
bool as_egg
)
{
if(as_egg)
{
throw pkmn::feature_not_in_game_error("In-egg met location is not recorded in Generation III.");
}
else
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
return pkmn::database::location_index_to_name(
_p_misc_block->met_location,
_database_entry.get_game_id()
);
}
}
void pokemon_gbaimpl::set_location_met(
const std::string& location,
bool as_egg
)
{
if(as_egg)
{
throw pkmn::feature_not_in_game_error("In-egg met location is not recorded in Generation III.");
}
else
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
_p_misc_block->met_location = uint8_t(pkmn::database::location_name_to_index(
location,
_database_entry.get_game_id()
));
}
}
pkmn::e_game pokemon_gbaimpl::get_original_game()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
return pkmn::database::game_index_to_enum(
PKSAV_GBA_POKEMON_ORIGIN_GAME(_p_misc_block->origin_info)
);
}
void pokemon_gbaimpl::set_original_game(pkmn::e_game game)
{
int generation = pkmn::database::game_enum_to_generation(game);
if(generation != 3)
{
throw std::invalid_argument("Game must be from Generation III.");
}
boost::lock_guard<pokemon_gbaimpl> lock(*this);
_p_misc_block->origin_info &= ~PKSAV_GBA_POKEMON_ORIGIN_GAME_MASK;
uint16_t game_index = uint16_t(pkmn::database::game_enum_to_index(game));
_p_misc_block->origin_info |= (game_index << PKSAV_GBA_POKEMON_ORIGIN_GAME_OFFSET);
}
uint32_t pokemon_gbaimpl::get_personality()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
return pksav_littleendian32(_pksav_pokemon.pc_data.personality);
}
void pokemon_gbaimpl::set_personality(
uint32_t personality
)
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
_pksav_pokemon.pc_data.personality = pksav_littleendian32(personality);
_set_ability_from_personality();
if(_database_entry.get_species_id() == UNOWN_ID)
{
_set_unown_form_from_personality();
}
}
int pokemon_gbaimpl::get_experience()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
return int(pksav_littleendian32(_p_growth_block->exp));
}
void pokemon_gbaimpl::set_experience(
int experience
)
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
int max_experience = _database_entry.get_experience_at_level(100);
pkmn::enforce_bounds("Experience", experience, 0, max_experience);
_p_growth_block->exp = pksav_littleendian32(uint32_t(experience));
_pksav_pokemon.party_data.level = uint8_t(_database_entry.get_level_at_experience(experience));
_populate_party_data();
_update_stat_map();
}
int pokemon_gbaimpl::get_level()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
return int(_pksav_pokemon.party_data.level);
}
void pokemon_gbaimpl::set_level(
int level
)
{
pkmn::enforce_bounds("Level", level, 1, 100);
boost::lock_guard<pokemon_gbaimpl> lock(*this);
_pksav_pokemon.party_data.level = uint8_t(level);
_p_growth_block->exp = pksav_littleendian32(uint32_t(_database_entry.get_experience_at_level(level)));
_populate_party_data();
_update_stat_map();
}
void pokemon_gbaimpl::set_IV(
pkmn::e_stat stat,
int value
)
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
_set_modern_IV(
stat,
value,
&_p_misc_block->iv_egg_ability
);
}
void pokemon_gbaimpl::set_marking(
pkmn::e_marking marking,
bool value
)
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
_set_marking(
marking,
value,
&_pksav_pokemon.pc_data.markings
);
}
void pokemon_gbaimpl::set_ribbon(
const std::string& ribbon,
bool value
)
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
if(_ribbons.find(ribbon) == _ribbons.end())
{
throw std::invalid_argument("Invalid ribbon.");
}
const auto& gba_ribbons = pksav::get_gba_ribbon_mask_bimap().left;
if(gba_ribbons.find(ribbon) != gba_ribbons.end())
{
_set_ribbon<uint32_t, decltype(gba_ribbons)>(
ribbon,
value,
&_p_misc_block->ribbons_obedience,
gba_ribbons
);
}
else
{
_set_contest_ribbon(
ribbon,
value
);
}
}
void pokemon_gbaimpl::set_contest_stat(
pkmn::e_contest_stat stat,
int value
)
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
_set_contest_stat(
stat,
value,
&_p_effort_block->contest_stats
);
}
void pokemon_gbaimpl::set_move(
pkmn::e_move move,
int index
)
{
pkmn::enforce_bounds("Move index", index, 0, 3);
boost::lock_guard<pokemon_gbaimpl> lock(*this);
// This will throw an error if the move is invalid.
pkmn::database::move_entry entry(
move,
get_game()
);
_moves[index] = {move, entry.get_pp(0)};
_p_attacks_block->moves[index] = pksav_littleendian16(uint16_t(entry.get_move_id()));
_p_attacks_block->move_pps[index] = uint8_t(_moves[index].pp);
}
void pokemon_gbaimpl::set_move_pp(
int index,
int pp
)
{
pkmn::enforce_bounds("Move index", index, 0, 3);
boost::lock_guard<pokemon_gbaimpl> lock(*this);
std::vector<int> PPs;
pkmn::database::move_entry entry(_moves[index].move, get_game());
for(int num_PP_ups = 0; num_PP_ups < 4; ++num_PP_ups)
{
PPs.emplace_back(entry.get_pp(num_PP_ups));
}
pkmn::enforce_bounds("PP", pp, 0, PPs.back());
_moves[index].pp = pp;
_p_attacks_block->move_pps[index] = uint8_t(pp);
// Set the PP Up mask to the minimum value that will accommodate the given PP.
_p_growth_block->pp_up &= ~uint8_t(3 << (index*2));
for(uint8_t num_PP_ups = 0; num_PP_ups < 4; ++num_PP_ups)
{
if(pp <= PPs[num_PP_ups])
{
_p_growth_block->pp_up |= uint8_t(num_PP_ups << (index*2));
break;
}
}
}
void pokemon_gbaimpl::set_EV(
pkmn::e_stat stat,
int value
)
{
pkmn::enforce_value_in_vector(
"Stat",
stat,
pkmn::MODERN_STATS
);
pkmn::enforce_EV_bounds(stat, value, true);
boost::lock_guard<pokemon_gbaimpl> lock(*this);
switch(stat)
{
case pkmn::e_stat::HP:
_p_effort_block->ev_hp = uint8_t(value);
break;
case pkmn::e_stat::ATTACK:
_p_effort_block->ev_atk = uint8_t(value);
break;
case pkmn::e_stat::DEFENSE:
_p_effort_block->ev_def = uint8_t(value);
break;
case pkmn::e_stat::SPEED:
_p_effort_block->ev_spd = uint8_t(value);
break;
case pkmn::e_stat::SPECIAL_ATTACK:
_p_effort_block->ev_spatk = uint8_t(value);
break;
default:
_p_effort_block->ev_spdef = uint8_t(value);
break;
}
_update_EV_map();
_populate_party_data();
}
int pokemon_gbaimpl::get_current_hp()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
return pksav_littleendian16(_pksav_pokemon.party_data.current_hp);
}
void pokemon_gbaimpl::set_current_hp(
int hp
)
{
pkmn::enforce_bounds(
"Current HP",
hp,
0,
_stats[pkmn::e_stat::HP]
);
boost::lock_guard<pokemon_gbaimpl> lock(*this);
_pksav_pokemon.party_data.current_hp = pksav_littleendian16(static_cast<uint16_t>(hp));
}
std::string pokemon_gbaimpl::get_sprite_filepath()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
std::string ret;
#ifdef PKMN_ENABLE_QT
BOOST_STATIC_CONSTEXPR int SPINDA_ID = 327;
if(_database_entry.get_species_id() == SPINDA_ID) {
bool shiny = is_shiny();
fs::path spinda_sprite_filepath(pkmn::get_tmp_dir());
spinda_sprite_filepath /= str(boost::format("spinda_%d_%d_%u.png")
% _generation
% (shiny ? 1 : 0)
% pksav_littleendian32(_pksav_pokemon.pc_data.personality));
pkmn::qt::GenerateSpindaSpriteAtFilepath(
3,
pksav_littleendian32(_pksav_pokemon.pc_data.personality),
shiny,
spinda_sprite_filepath.string()
);
ret = spinda_sprite_filepath.string();
} else {
#endif
ret = pokemon_impl::get_sprite_filepath();
#ifdef PKMN_ENABLE_QT
}
#endif
return ret;
}
void* pokemon_gbaimpl::get_native_pc_data()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
pksav_gba_set_pokemon_checksum(
&_pksav_pokemon.pc_data
);
return _p_native_pc;
}
bool pokemon_gbaimpl::get_is_obedient()
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
return bool(_p_misc_block->ribbons_obedience & PKSAV_GBA_POKEMON_OBEDIENCE_MASK);
}
void pokemon_gbaimpl::set_is_obedient(bool is_obedient)
{
boost::lock_guard<pokemon_gbaimpl> lock(*this);
if(is_obedient)
{
_p_misc_block->ribbons_obedience |= PKSAV_GBA_POKEMON_OBEDIENCE_MASK;
}
else
{
_p_misc_block->ribbons_obedience &= ~PKSAV_GBA_POKEMON_OBEDIENCE_MASK;
}
}
void pokemon_gbaimpl::_set_contest_ribbon(
const std::string& ribbon,
bool value
)
{
std::vector<std::string> ribbon_parts;
boost::split(ribbon_parts, ribbon, boost::is_any_of(" "));
const auto& gba_contest_ribbon_masks = pksav::get_gba_contest_ribbon_mask_bimap().left;
const auto& gba_contest_ribbon_levels = pksav::get_gba_contest_ribbon_level_bimap().left;
const auto& gba_contest_ribbon_offsets = pksav::get_gba_contest_ribbon_offset_bimap().left;
// Validate input (which should already have been validated)
if((ribbon_parts.size() == 0 || ribbon_parts.size() > 2) ||
(gba_contest_ribbon_masks.count(ribbon_parts.at(0)) == 0) ||
(ribbon_parts.size() == 2 &&
gba_contest_ribbon_levels.count(ribbon_parts.at(1)) == 0)
)
{
throw std::invalid_argument("Invalid ribbon.");
}
uint16_t mask = uint16_t(gba_contest_ribbon_masks.at(ribbon_parts.at(0)));
uint16_t offset = uint16_t(gba_contest_ribbon_offsets.at(ribbon_parts.at(0)));
uint16_t level = 0;
if(ribbon_parts.size() == 2)
{
level = uint16_t(gba_contest_ribbon_levels.at(ribbon_parts.at(1)));
}
uint16_t current_level = (_p_misc_block->ribbons_obedience & mask) >> offset;
if(value)
{
if(level == 0)
{
++level;
}
level = std::max<uint16_t>(level, current_level);
}
else
{
if(level > 0)
{
--level;
}
level = std::min<uint16_t>(level, current_level);
}
_p_misc_block->ribbons_obedience &= ~mask;
_p_misc_block->ribbons_obedience |= (level << offset);
_update_ribbons_map();
}
void pokemon_gbaimpl::_populate_party_data()
{
pksav::gba_pc_pokemon_to_party_data(
_database_entry,
&_pksav_pokemon.pc_data,
&_pksav_pokemon.party_data
);
_update_stat_map();
}
void pokemon_gbaimpl::_update_moves(
int index
)
{
_moves.resize(4);
switch(index)
{
case 0:
case 1:
case 2:
case 3:
// TODO: check move validity
_moves[index] = pkmn::move_slot(
static_cast<pkmn::e_move>(
pksav_littleendian16(_p_attacks_block->moves[index])
),
_p_attacks_block->move_pps[index]
);
break;
default:
for(int i = 0; i < 4; ++i)
{
_update_moves(i);
}
}
}
void pokemon_gbaimpl::_update_markings_map()
{
_markings[pkmn::e_marking::CIRCLE] = bool(_pksav_pokemon.pc_data.markings & PKSAV_MARKING_CIRCLE);
_markings[pkmn::e_marking::TRIANGLE] = bool(_pksav_pokemon.pc_data.markings & PKSAV_MARKING_TRIANGLE);
_markings[pkmn::e_marking::SQUARE] = bool(_pksav_pokemon.pc_data.markings & PKSAV_MARKING_SQUARE);
_markings[pkmn::e_marking::HEART] = bool(_pksav_pokemon.pc_data.markings & PKSAV_MARKING_HEART);
}
void pokemon_gbaimpl::_update_ribbons_map() {
uint32_t cool_ribbons = _p_misc_block->ribbons_obedience & PKSAV_GBA_COOL_RIBBONS_MASK;
_ribbons["Cool"] = bool(cool_ribbons >= PKSAV_GBA_CONTEST_RIBBON_NORMAL);
_ribbons["Cool Super"] = bool(cool_ribbons >= PKSAV_GBA_CONTEST_RIBBON_SUPER);
_ribbons["Cool Hyper"] = bool(cool_ribbons >= PKSAV_GBA_CONTEST_RIBBON_HYPER);
_ribbons["Cool Master"] = bool(cool_ribbons >= PKSAV_GBA_CONTEST_RIBBON_MASTER);
uint32_t beauty_ribbons = _p_misc_block->ribbons_obedience & PKSAV_GBA_BEAUTY_RIBBONS_MASK;
beauty_ribbons >>= PKSAV_GBA_BEAUTY_RIBBONS_OFFSET;
_ribbons["Beauty"] = bool(beauty_ribbons >= PKSAV_GBA_CONTEST_RIBBON_NORMAL);
_ribbons["Beauty Super"] = bool(beauty_ribbons >= PKSAV_GBA_CONTEST_RIBBON_SUPER);
_ribbons["Beauty Hyper"] = bool(beauty_ribbons >= PKSAV_GBA_CONTEST_RIBBON_HYPER);
_ribbons["Beauty Master"] = bool(beauty_ribbons >= PKSAV_GBA_CONTEST_RIBBON_MASTER);
uint32_t cute_ribbons = _p_misc_block->ribbons_obedience & PKSAV_GBA_CUTE_RIBBONS_MASK;
cute_ribbons >>= PKSAV_GBA_CUTE_RIBBONS_OFFSET;
_ribbons["Cute"] = bool(cute_ribbons >= PKSAV_GBA_CONTEST_RIBBON_NORMAL);
_ribbons["Cute Super"] = bool(cute_ribbons >= PKSAV_GBA_CONTEST_RIBBON_SUPER);
_ribbons["Cute Hyper"] = bool(cute_ribbons >= PKSAV_GBA_CONTEST_RIBBON_HYPER);
_ribbons["Cute Master"] = bool(cute_ribbons >= PKSAV_GBA_CONTEST_RIBBON_MASTER);
uint32_t smart_ribbons = _p_misc_block->ribbons_obedience & PKSAV_GBA_SMART_RIBBONS_MASK;
smart_ribbons >>= PKSAV_GBA_SMART_RIBBONS_OFFSET;
_ribbons["Smart"] = bool(smart_ribbons >= PKSAV_GBA_CONTEST_RIBBON_NORMAL);
_ribbons["Smart Super"] = bool(smart_ribbons >= PKSAV_GBA_CONTEST_RIBBON_SUPER);
_ribbons["Smart Hyper"] = bool(smart_ribbons >= PKSAV_GBA_CONTEST_RIBBON_HYPER);
_ribbons["Smart Master"] = bool(smart_ribbons >= PKSAV_GBA_CONTEST_RIBBON_MASTER);
uint32_t tough_ribbons = _p_misc_block->ribbons_obedience & PKSAV_GBA_TOUGH_RIBBONS_MASK;
tough_ribbons >>= PKSAV_GBA_TOUGH_RIBBONS_OFFSET;
_ribbons["Tough"] = bool(tough_ribbons >= PKSAV_GBA_CONTEST_RIBBON_NORMAL);
_ribbons["Tough Super"] = bool(tough_ribbons >= PKSAV_GBA_CONTEST_RIBBON_SUPER);
_ribbons["Tough Hyper"] = bool(tough_ribbons >= PKSAV_GBA_CONTEST_RIBBON_HYPER);
_ribbons["Tough Master"] = bool(tough_ribbons >= PKSAV_GBA_CONTEST_RIBBON_MASTER);
_ribbons["Champion"] = bool(_p_misc_block->ribbons_obedience & PKSAV_GBA_CHAMPION_RIBBON_MASK);
_ribbons["Winning"] = bool(_p_misc_block->ribbons_obedience & PKSAV_GBA_WINNING_RIBBON_MASK);
_ribbons["Victory"] = bool(_p_misc_block->ribbons_obedience & PKSAV_GBA_VICTORY_RIBBON_MASK);
_ribbons["Artist"] = bool(_p_misc_block->ribbons_obedience & PKSAV_GBA_ARTIST_RIBBON_MASK);
_ribbons["Effort"] = bool(_p_misc_block->ribbons_obedience & PKSAV_GBA_EFFORT_RIBBON_MASK);
_ribbons["Marine"] = bool(_p_misc_block->ribbons_obedience & PKSAV_GBA_MARINE_RIBBON_MASK);
_ribbons["Land"] = bool(_p_misc_block->ribbons_obedience & PKSAV_GBA_LAND_RIBBON_MASK);
_ribbons["Sky"] = bool(_p_misc_block->ribbons_obedience & PKSAV_GBA_SKY_RIBBON_MASK);
_ribbons["Country"] = bool(_p_misc_block->ribbons_obedience & PKSAV_GBA_COUNTRY_RIBBON_MASK);
_ribbons["National"] = bool(_p_misc_block->ribbons_obedience & PKSAV_GBA_NATIONAL_RIBBON_MASK);
_ribbons["Earth"] = bool(_p_misc_block->ribbons_obedience & PKSAV_GBA_EARTH_RIBBON_MASK);
_ribbons["World"] = bool(_p_misc_block->ribbons_obedience & PKSAV_GBA_WORLD_RIBBON_MASK);
}
void pokemon_gbaimpl::_update_EV_map()
{
_EVs[pkmn::e_stat::HP] = int(_p_effort_block->ev_hp);
_EVs[pkmn::e_stat::ATTACK] = int(_p_effort_block->ev_atk);
_EVs[pkmn::e_stat::DEFENSE] = int(_p_effort_block->ev_def);
_EVs[pkmn::e_stat::SPEED] = int(_p_effort_block->ev_spd);
_EVs[pkmn::e_stat::SPECIAL_ATTACK] = int(_p_effort_block->ev_spatk);
_EVs[pkmn::e_stat::SPECIAL_DEFENSE] = int(_p_effort_block->ev_spdef);
}
void pokemon_gbaimpl::_update_stat_map() {
_stats[pkmn::e_stat::HP] = int(pksav_littleendian16(_pksav_pokemon.party_data.max_hp));
_stats[pkmn::e_stat::ATTACK] = int(pksav_littleendian16(_pksav_pokemon.party_data.atk));
_stats[pkmn::e_stat::DEFENSE] = int(pksav_littleendian16(_pksav_pokemon.party_data.def));
_stats[pkmn::e_stat::SPEED] = int(pksav_littleendian16(_pksav_pokemon.party_data.spd));
_stats[pkmn::e_stat::SPECIAL_ATTACK] = int(pksav_littleendian16(_pksav_pokemon.party_data.spatk));
_stats[pkmn::e_stat::SPECIAL_DEFENSE] = int(pksav_littleendian16(_pksav_pokemon.party_data.spdef));
}
void pokemon_gbaimpl::_set_ability(pkmn::e_ability ability)
{
pkmn::ability_pair_t abilities = _database_entry.get_abilities();
if(ability == pkmn::e_ability::NONE)
{
throw std::invalid_argument("The ability cannot be set to None.");
}
else if(ability == abilities.first)
{
_p_misc_block->iv_egg_ability &= ~PKSAV_GBA_POKEMON_ABILITY_MASK;
}
else if(ability == abilities.second)
{
BOOST_ASSERT(abilities.second != pkmn::e_ability::NONE);
_p_misc_block->iv_egg_ability |= PKSAV_GBA_POKEMON_ABILITY_MASK;
}
else
{
std::string error_message;
if(abilities.second == pkmn::e_ability::NONE)
{
error_message = str(boost::format("ability: valid values \"%s\"")
% pkmn::ability_to_string(abilities.first).c_str());
}
else
{
error_message = str(boost::format("ability: valid values \"%s\", \"%s\"")
% pkmn::ability_to_string(abilities.first).c_str()
% pkmn::ability_to_string(abilities.second).c_str());
}
throw std::invalid_argument(error_message.c_str());
}
}
void pokemon_gbaimpl::_set_unown_form_from_personality()
{
_database_entry.set_form(
pkmn::calculations::gen3_unown_form(
pksav_littleendian32(
_pksav_pokemon.pc_data.personality
)
)
);
}
void pokemon_gbaimpl::_set_unown_personality_from_form() {
char as_char = _database_entry.get_form()[0];
uint8_t num = 0;
switch(as_char) {
case '?':
num = 26;
break;
case '!':
num = 27;
break;
// We can assume the form is valid at this point.
default:
num = uint8_t(as_char - 'A');
break;
}
uint8_t* pid_as_bytes = reinterpret_cast<uint8_t*>(&_pksav_pokemon.pc_data.personality);
for(size_t i = 0; i < 4; ++i) {
pid_as_bytes[i] &= ~0x3;
pid_as_bytes[i] |= ((num & (0x3 << (2*i))) >> (2*i));
}
}
void pokemon_gbaimpl::_register_attributes()
{
using std::placeholders::_1;
_boolean_attribute_engine.register_attribute_fcns(
"Obedient?",
std::bind(&pokemon_gbaimpl::get_is_obedient, this),
std::bind(&pokemon_gbaimpl::set_is_obedient, this, _1)
);
}
}
|
State Before: α : Type u
β : Type v
X : Type ?u.532598
ι : Type ?u.532601
inst✝ : PseudoMetricSpace α
s : Set α
x y z : α
h : EMetric.diam s ≠ ⊤
hx : x ∈ s
hy : y ∈ s
⊢ dist x y ≤ diam s State After: α : Type u
β : Type v
X : Type ?u.532598
ι : Type ?u.532601
inst✝ : PseudoMetricSpace α
s : Set α
x y z : α
h : EMetric.diam s ≠ ⊤
hx : x ∈ s
hy : y ∈ s
⊢ ENNReal.toReal (edist x y) ≤ ENNReal.toReal (EMetric.diam s) Tactic: rw [diam, dist_edist] State Before: α : Type u
β : Type v
X : Type ?u.532598
ι : Type ?u.532601
inst✝ : PseudoMetricSpace α
s : Set α
x y z : α
h : EMetric.diam s ≠ ⊤
hx : x ∈ s
hy : y ∈ s
⊢ ENNReal.toReal (edist x y) ≤ ENNReal.toReal (EMetric.diam s) State After: α : Type u
β : Type v
X : Type ?u.532598
ι : Type ?u.532601
inst✝ : PseudoMetricSpace α
s : Set α
x y z : α
h : EMetric.diam s ≠ ⊤
hx : x ∈ s
hy : y ∈ s
⊢ edist x y ≤ EMetric.diam s Tactic: rw [ENNReal.toReal_le_toReal (edist_ne_top _ _) h] State Before: α : Type u
β : Type v
X : Type ?u.532598
ι : Type ?u.532601
inst✝ : PseudoMetricSpace α
s : Set α
x y z : α
h : EMetric.diam s ≠ ⊤
hx : x ∈ s
hy : y ∈ s
⊢ edist x y ≤ EMetric.diam s State After: no goals Tactic: exact EMetric.edist_le_diam_of_mem hx hy
|
If $f$ is holomorphic on a ball centered at $z$ with radius $r$, and $w$ is a point in the ball, then the integral of $f(u)/(u - w)$ around the circle centered at $z$ with radius $r$ is $2\pi i f(w)$.
|
Quoting the Jewish rabbi and scholar Jacob <unk> , Pope Benedict XVI explains that to Israel , keeping this commandment was more than ritual ; it was a way to imitate God , who rested on the seventh day after the creation . It also constituted the core of the social order .
|
#include "problems/mga_1dsm_transx.h"
#include <keplerian_toolbox/core_functions/array3D_operations.h>
#include <keplerian_toolbox/core_functions/fb_vel.h>
#include <keplerian_toolbox/core_functions/propagate_lagrangian.h>
#include <keplerian_toolbox/core_functions/fb_prop.h>
#include <keplerian_toolbox/lambert_problem.h>
#include <string>
#include <cmath>
#include <numeric>
#include <vector>
#include <boost/array.hpp>
#include <boost/math/constants/constants.hpp>
namespace pagmo { namespace problem {
int dimension_from_params(const std::vector<kep_toolbox::planet::planet_ptr> seq, const std::vector<bool> m_dsm) {
int dim = 2;
int n = seq.size();
bool dsm = m_dsm.size() == 0 || (m_dsm.size() > 0 && m_dsm[0]);
dim += (dsm ? 4 : 1);
for (int i = 1; i < seq.size() - 1; ++i) {
dsm = m_dsm.size() == 0 || (m_dsm.size() > i && m_dsm[i]);
dim += (dsm ? 4 : 1);
}
return dim;
}
bool mga_1dsm_transx::dsm_in_leg_i(int i) const {
return (m_dsm.size() == 0) || (m_dsm.size() > i && m_dsm[i]);
}
int mga_1dsm_transx::t0_index() const { return 0; }
int mga_1dsm_transx::tof_index() const { return 1; }
int mga_1dsm_transx::vinf_index() const { return 3; } // 3, 4
int mga_1dsm_transx::base_idx(int i) const{
bool dsm = dsm_in_leg_i(0);
int base = (tof_index() + 1);
if (i == 0) return base;
int idx = (base + (dsm ? 4 : 1));
for (int j = 1; j < i; ++j) {
dsm = dsm_in_leg_i(j);
idx += (dsm ? 4 : 1);
}
return idx;
}
int mga_1dsm_transx::T_idx(int i) const {
int base = base_idx(i);
if (i == 0) return base;
return base;
}
int mga_1dsm_transx::Beta_idx(int i) const {
int base = base_idx(i);
return base + 1;
}
int mga_1dsm_transx::R_idx(int i) const {
int base = base_idx(i);
return base + 2;
}
int mga_1dsm_transx::DSM_idx(int i) const {
int base = base_idx(i);
if (i == 0) return base + 4;
return base + 3;
}
mga_1dsm_transx::mga_1dsm_transx(const std::vector<kep_toolbox::planet::planet_ptr> seq,
const std::vector<bool> dsm,
const double dep_altitude, const double arr_altitude, const bool circularize,
const kep_toolbox::epoch t0_l, const kep_toolbox::epoch t0_u,
const double tof_l, const double tof_u,
const double vinf_l, const double vinf_u,
const bool add_vinf_dep, const bool add_vinf_arr,
const bool multi_objective) : transx_problem(seq, dep_altitude, arr_altitude, circularize, dimension_from_params(seq, dsm), 1 + (int)multi_objective), m_add_vinf_dep(add_vinf_dep), m_add_vinf_arr(add_vinf_arr), m_dsm(dsm) {
size_t dim(get_dimension());
decision_vector lb(dim, 0.0), ub(dim, 0.0);
lb[0] = t0_l.mjd2000(); ub[0] = t0_u.mjd2000();
lb[1] = tof_l * 365.25; ub[1] = tof_u * 365.25;
lb[2] = 1e-5; ub[2] = 1 - 1e-5;
if (dsm_in_leg_i(0)) {
lb[vinf_index()] = lb[vinf_index() + 1] = 0.0; ub[vinf_index()] = ub[vinf_index() + 1] = 1.0;
lb[vinf_index() + 2] = vinf_l * 1000; ub[vinf_index() + 2] = vinf_u * 1000;
lb[DSM_idx(0)] = 1e-5; ub[DSM_idx(0)] = 1 - 1e-5;
}
for (int i = 0; i < get_n_legs() - 1; ++i) {
auto planet = seq[i + 1];
int j = base_idx(i + 1);
bool dsm = dsm_in_leg_i(i + 1);
lb[j] = 1e-5; ub[j] = 1 - 1e-5; // T[i]
if (dsm) {
double a = planet->compute_elements()[0];
double soi = a * pow((planet->get_mu_self() / planet->get_mu_central_body()), 2/5);
double soiRad = soi / planet->get_radius();
double safeDistanceRatio = planet->get_safe_radius() / planet->get_radius();
lb[j + 1] = -2 * boost::math::constants::pi<double>(); ub[j + 1] = 2 * boost::math::constants::pi<double>(); // Beta
lb[j + 2] = safeDistanceRatio; ub[j + 2] = soiRad; // Rad
lb[j + 3] = 1e-5; ub[j + 3] = 1 - 1e-5;
}
}
for (int i = 1; i < get_n_legs() - 1; ++i) {
bool dsm = dsm_in_leg_i(i);
if (dsm) {
kep_toolbox::planet::planet_ptr pl = get_seq()[i];
lb[R_idx(i)] = pl->get_safe_radius() / pl->get_radius();
}
}
set_bounds(lb, ub);
}
mga_1dsm_transx::mga_1dsm_transx(const mga_1dsm_transx &p) : transx_problem(p.get_seq(), p.get_dep_altitude(), p.get_arr_altitude(), p.get_circularize(), p.get_dimension(), p.get_f_dimension()), m_dsm(p.m_dsm), m_add_vinf_arr(p.get_add_vinf_arr()), m_add_vinf_dep(p.get_add_vinf_dep()), m_multi_obj(p.get_multi_obj()) {
set_bounds(p.get_lb(), p.get_ub());
}
base_ptr mga_1dsm_transx::clone() const {
return base_ptr(new mga_1dsm_transx(*this));
}
void mga_1dsm_transx::calc_objective(fitness_vector &f, const decision_vector &x, bool should_print, TransXSolution * solution) const {
std::vector<double> T(get_n_legs());
double alpha_sum = 0;
for (int i = 0; i < T.size(); ++i) {
T[i] = - log(x[T_idx(i)]);
alpha_sum += T[i];
}
bool dsm = dsm_in_leg_i(0);
kep_toolbox::array3D Vinf;
if (dsm) {
double theta = 2 * M_PI * x[vinf_index()];
double phi = acos(2 * x[vinf_index() + 1] - 1) - M_PI / 2;
double vinf = x[vinf_index() + 2];
Vinf[0] = vinf * cos(phi) * cos(theta);
Vinf[1] = vinf * cos(phi) * sin(theta);
Vinf[2] = vinf * sin(phi);
}
for (int i = 0; i < T.size(); ++i) {
T[i] = x[tof_index()] * T[i] / alpha_sum;
}
int n = get_seq().size();
std::vector<kep_toolbox::epoch> t_P(n);
std::vector<kep_toolbox::array3D> r_P(n);
std::vector<kep_toolbox::array3D> v_P(n);
std::vector<double> DV(n + 1, 0.0);
for (int i = 0; i < n; ++i) {
kep_toolbox::planet::planet_ptr planet = get_seq()[i];
t_P[i] = kep_toolbox::epoch(x[t0_index()] + std::accumulate(T.begin(), T.begin() + i, 0.0));
planet->eph(t_P[i], r_P[i], v_P[i]);
}
if (should_print) {
transx_time_info(solution->mutable_times(), get_seq(), t_P);
}
kep_toolbox::array3D r, v;
r = r_P[0];
if (dsm) {
if (m_add_vinf_dep) {
DV[0] += burn_cost(get_seq()[0], Vinf, false, true);
}
if (should_print) {
transx_escape(solution->mutable_escape(), get_seq()[0], v_P[0], r_P[0], Vinf, t_P[0].mjd());
}
kep_toolbox::array3D v0;
kep_toolbox::sum(v0, v_P[0], Vinf);
v = v0;
kep_toolbox::propagate_lagrangian(r, v, (dsm ? x[DSM_idx(0)] : 0) * T[0] * ASTRO_DAY2SEC, get_common_mu());
}
double dt = (1 - (dsm ? x[DSM_idx(0)] : 0)) * T[0] * ASTRO_DAY2SEC;
kep_toolbox::lambert_problem l(r, r_P[1], dt, get_common_mu());
kep_toolbox::array3D v_end_l(l.get_v2()[0]);
kep_toolbox::array3D v_beg_l(l.get_v1()[0]);
if (!dsm) {
kep_toolbox::diff(Vinf, v_beg_l, v_P[0]);
v = Vinf;
kep_toolbox::sum(v, v_P[0], Vinf);
if (m_add_vinf_dep) {
DV[0] += burn_cost(get_seq()[0], Vinf, false, true);
}
if (should_print) {
transx_escape(solution->mutable_escape(), get_seq()[0], v_P[0], r_P[0], Vinf, t_P[0].mjd());
}
}
kep_toolbox::array3D deltaV;
kep_toolbox::diff(deltaV, v_beg_l, v);
DV[0] += kep_toolbox::norm(deltaV);
if (dsm && should_print) {
transx_dsm(solution->add_dsms(), v, r, deltaV, v_beg_l, t_P[0].mjd() + T[0] - dt / ASTRO_DAY2SEC, 0);
}
for (int i = 1; i < n - 1; ++i) {
dsm = dsm_in_leg_i(i);
kep_toolbox::array3D v_rel_in, v_rel_out;
if (dsm) {
double radius = x[R_idx(i)] * get_seq()[i]->get_radius();
double beta = x[Beta_idx(i)];
kep_toolbox::array3D v_out;
kep_toolbox::fb_prop(v_out, v_end_l, v_P[i], radius, beta, get_seq()[i]->get_mu_self());
kep_toolbox::diff(v_rel_in, v_end_l, v_P[i]);
kep_toolbox::diff(v_rel_out, v_out, v_P[i]);
if (should_print) {
transx_flyby(solution->add_flybyes(), get_seq()[i], v_P[i], r_P[i], v_rel_in, v_rel_out, t_P[i].mjd());
}
r = r_P[i]; v = v_out;
kep_toolbox::propagate_lagrangian(r, v, (dsm ? x[DSM_idx(i)] : 0) * T[i] * ASTRO_DAY2SEC, get_common_mu());
} else {
r = r_P[i]; v = v_end_l;
}
dt = (1 - (dsm ? x[DSM_idx(i)] : 0)) * T[i] * ASTRO_DAY2SEC;
kep_toolbox::lambert_problem l2(r, r_P[i + 1], dt, get_common_mu());
v_beg_l = l2.get_v1()[0];
v_end_l = l2.get_v2()[0];
if (dsm) {
kep_toolbox::diff(deltaV, v_beg_l, v);
DV[i] = kep_toolbox::norm(deltaV);
if (should_print) {
transx_dsm(solution->add_dsms(), v, r, deltaV, v_beg_l, t_P[i].mjd() + T[i] - dt / ASTRO_DAY2SEC, 1);
}
} else {
kep_toolbox::diff(v_rel_in, v, v_P[i]);
kep_toolbox::diff(v_rel_out, v_beg_l, v_P[i]);
kep_toolbox::planet::planet_ptr planet = get_seq()[i];
kep_toolbox::fb_vel(DV[i], v_rel_in, v_rel_out, *planet);
double ta = acos(kep_toolbox::dot(v_rel_in, v_rel_out)/sqrt(kep_toolbox::dot(v_rel_in,v_rel_in))/sqrt(kep_toolbox::dot(v_rel_out,v_rel_out)));
double alt = (planet->get_mu_self() / kep_toolbox::dot(v_rel_in,v_rel_in)*(1/sin(ta/2)-1) - planet->get_radius())/1000;
if (alt > planet->get_safe_radius()) {
f[0] = DBL_MAX;
return;
}
if (should_print) {
transx_flyby(solution->add_flybyes(), planet, v_P[i], r_P[i], v_rel_in, v_rel_out, t_P[i].mjd());
}
}
}
kep_toolbox::array3D Vexc_arr(v_end_l);
kep_toolbox::diff(Vexc_arr, v_end_l, v_P[v_P.size() - 1]);
if (m_add_vinf_arr) {
DV[DV.size() - 1] += burn_cost(get_seq()[get_seq().size() - 1], Vexc_arr, true, get_circularize());
}
if (should_print) {
transx_arrival(solution->mutable_arrival(), get_seq()[get_seq().size() - 1], Vexc_arr, t_P[t_P.size() - 1].mjd());
}
double sumDeltaV = std::accumulate(DV.begin(), DV.end(), 0.0);
double sumT = std::accumulate(T.begin(), T.end(), 0.0);
if (should_print) {
solution->set_fuel_cost(sumDeltaV);
}
f[0] = sumDeltaV;
if (get_f_dimension() == 2) {
f[1] = sumT;
}
}
std::string mga_1dsm_transx::get_name() const {
return "MGA-1DSM";
}
}} // namespaces
BOOST_CLASS_EXPORT_IMPLEMENT(pagmo::problem::mga_1dsm_transx)
|
President Abdoulaye Wade of Senegal offered interested Haitians free land in Senegal ; depending on how many respond to the offer , this could include up to an entire region .
|
r=359.26
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7sw2z/media/images/d7sw2z-003/svc:tesseract/full/full/359.26/default.jpg Accept:application/hocr+xml
|
[GOAL]
α : Type u_1
α₁ : Type u_2
α₂ : Type u_3
β : α → Type u_4
β₁ : α₁ → Type u_5
β₂ : α₂ → Type u_6
a₁ a₂ : α
b₁ : β a₁
b₂ : β a₂
h : { fst := a₁, snd := b₁ } = { fst := a₂, snd := b₂ }
⊢ a₁ = a₂ ∧ HEq b₁ b₂
[PROOFSTEP]
cases h
[GOAL]
case refl
α : Type u_1
α₁ : Type u_2
α₂ : Type u_3
β : α → Type u_4
β₁ : α₁ → Type u_5
β₂ : α₂ → Type u_6
a₁ : α
b₁ : β a₁
⊢ a₁ = a₁ ∧ HEq b₁ b₁
[PROOFSTEP]
exact ⟨rfl, heq_of_eq rfl⟩
[GOAL]
α : Type u_1
α₁ : Type u_2
α₂ : Type u_3
β : α → Type u_4
β₁ : α₁ → Type u_5
β₂ : α₂ → Type u_6
a₁ a₂ : α
b₁ : β a₁
b₂ : β a₂
x✝ : a₁ = a₂ ∧ HEq b₁ b₂
h₁ : a₁ = a₂
h₂ : HEq b₁ b₂
⊢ { fst := a₁, snd := b₁ } = { fst := a₂, snd := b₂ }
[PROOFSTEP]
subst h₁
[GOAL]
α : Type u_1
α₁ : Type u_2
α₂ : Type u_3
β : α → Type u_4
β₁ : α₁ → Type u_5
β₂ : α₂ → Type u_6
a₁ : α
b₁ b₂ : β a₁
x✝ : a₁ = a₁ ∧ HEq b₁ b₂
h₂ : HEq b₁ b₂
⊢ { fst := a₁, snd := b₁ } = { fst := a₁, snd := b₂ }
[PROOFSTEP]
rw [eq_of_heq h₂]
[GOAL]
α : Type u_1
α₁ : Type u_2
α₂ : Type u_3
β : α → Type u_4
β₁ : α₁ → Type u_5
β₂ : α₂ → Type u_6
x₀ x₁ : Sigma β
h₀ : x₀.fst = x₁.fst
h₁ : HEq x₀.snd x₁.snd
⊢ x₀ = x₁
[PROOFSTEP]
cases x₀
[GOAL]
case mk
α : Type u_1
α₁ : Type u_2
α₂ : Type u_3
β : α → Type u_4
β₁ : α₁ → Type u_5
β₂ : α₂ → Type u_6
x₁ : Sigma β
fst✝ : α
snd✝ : β fst✝
h₀ : { fst := fst✝, snd := snd✝ }.fst = x₁.fst
h₁ : HEq { fst := fst✝, snd := snd✝ }.snd x₁.snd
⊢ { fst := fst✝, snd := snd✝ } = x₁
[PROOFSTEP]
cases x₁
[GOAL]
case mk.mk
α : Type u_1
α₁ : Type u_2
α₂ : Type u_3
β : α → Type u_4
β₁ : α₁ → Type u_5
β₂ : α₂ → Type u_6
fst✝¹ : α
snd✝¹ : β fst✝¹
fst✝ : α
snd✝ : β fst✝
h₀ : { fst := fst✝¹, snd := snd✝¹ }.fst = { fst := fst✝, snd := snd✝ }.fst
h₁ : HEq { fst := fst✝¹, snd := snd✝¹ }.snd { fst := fst✝, snd := snd✝ }.snd
⊢ { fst := fst✝¹, snd := snd✝¹ } = { fst := fst✝, snd := snd✝ }
[PROOFSTEP]
cases h₀
[GOAL]
case mk.mk.refl
α : Type u_1
α₁ : Type u_2
α₂ : Type u_3
β : α → Type u_4
β₁ : α₁ → Type u_5
β₂ : α₂ → Type u_6
fst✝ : α
snd✝¹ snd✝ : β fst✝
h₁ : HEq { fst := fst✝, snd := snd✝¹ }.snd { fst := fst✝, snd := snd✝ }.snd
⊢ { fst := fst✝, snd := snd✝¹ } = { fst := fst✝, snd := snd✝ }
[PROOFSTEP]
cases h₁
[GOAL]
case mk.mk.refl.refl
α : Type u_1
α₁ : Type u_2
α₂ : Type u_3
β : α → Type u_4
β₁ : α₁ → Type u_5
β₂ : α₂ → Type u_6
fst✝ : α
snd✝ : β fst✝
⊢ { fst := fst✝, snd := snd✝ } = { fst := fst✝, snd := snd✝ }
[PROOFSTEP]
rfl
[GOAL]
α : Type u_1
α₁ : Type u_2
α₂ : Type u_3
β : α → Type u_4
β₁ : α₁ → Type u_5
β₂ : α₂ → Type u_6
x₀ x₁ : Sigma β
⊢ x₀ = x₁ ↔ x₀.fst = x₁.fst ∧ HEq x₀.snd x₁.snd
[PROOFSTEP]
cases x₀
[GOAL]
case mk
α : Type u_1
α₁ : Type u_2
α₂ : Type u_3
β : α → Type u_4
β₁ : α₁ → Type u_5
β₂ : α₂ → Type u_6
x₁ : Sigma β
fst✝ : α
snd✝ : β fst✝
⊢ { fst := fst✝, snd := snd✝ } = x₁ ↔
{ fst := fst✝, snd := snd✝ }.fst = x₁.fst ∧ HEq { fst := fst✝, snd := snd✝ }.snd x₁.snd
[PROOFSTEP]
cases x₁
[GOAL]
case mk.mk
α : Type u_1
α₁ : Type u_2
α₂ : Type u_3
β : α → Type u_4
β₁ : α₁ → Type u_5
β₂ : α₂ → Type u_6
fst✝¹ : α
snd✝¹ : β fst✝¹
fst✝ : α
snd✝ : β fst✝
⊢ { fst := fst✝¹, snd := snd✝¹ } = { fst := fst✝, snd := snd✝ } ↔
{ fst := fst✝¹, snd := snd✝¹ }.fst = { fst := fst✝, snd := snd✝ }.fst ∧
HEq { fst := fst✝¹, snd := snd✝¹ }.snd { fst := fst✝, snd := snd✝ }.snd
[PROOFSTEP]
exact Sigma.mk.inj_iff
[GOAL]
α : Type u_1
α₁ : Type u_2
α₂ : Type u_3
β : α → Type u_4
β₁ : α₁ → Type u_5
β₂ : α₂ → Type u_6
γ : Type u_7
inst✝ : Nonempty γ
a b : α
f : γ → β a
g : γ → β b
h : mk a ∘ f = mk b ∘ g
⊢ a = b ∧ HEq f g
[PROOFSTEP]
rcases‹Nonempty γ› with ⟨i⟩
[GOAL]
case intro
α : Type u_1
α₁ : Type u_2
α₂ : Type u_3
β : α → Type u_4
β₁ : α₁ → Type u_5
β₂ : α₂ → Type u_6
γ : Type u_7
inst✝ : Nonempty γ
a b : α
f : γ → β a
g : γ → β b
h : mk a ∘ f = mk b ∘ g
i : γ
⊢ a = b ∧ HEq f g
[PROOFSTEP]
obtain rfl : a = b := congr_arg Sigma.fst (congr_fun h i)
[GOAL]
case intro
α : Type u_1
α₁ : Type u_2
α₂ : Type u_3
β : α → Type u_4
β₁ : α₁ → Type u_5
β₂ : α₂ → Type u_6
γ : Type u_7
inst✝ : Nonempty γ
a : α
f : γ → β a
i : γ
g : γ → β a
h : mk a ∘ f = mk a ∘ g
⊢ a = a ∧ HEq f g
[PROOFSTEP]
simpa [Function.funext_iff] using h
[GOAL]
α : Type u_1
α₁ : Type u_2
α₂ : Type u_3
β : α → Type u_4
β₁ : α₁ → Type u_5
β₂ : α₂ → Type u_6
f₁ : α₁ → α₂
f₂ : (a : α₁) → β₁ a → β₂ (f₁ a)
h₁ : Injective f₁
h₂ : ∀ (a : α₁), Injective (f₂ a)
i : α₁
x : β₁ i
j : α₁
y : β₁ j
h : Sigma.map f₁ f₂ { fst := i, snd := x } = Sigma.map f₁ f₂ { fst := j, snd := y }
⊢ { fst := i, snd := x } = { fst := j, snd := y }
[PROOFSTEP]
obtain rfl : i = j := h₁ (Sigma.mk.inj_iff.mp h).1
[GOAL]
α : Type u_1
α₁ : Type u_2
α₂ : Type u_3
β : α → Type u_4
β₁ : α₁ → Type u_5
β₂ : α₂ → Type u_6
f₁ : α₁ → α₂
f₂ : (a : α₁) → β₁ a → β₂ (f₁ a)
h₁ : Injective f₁
h₂ : ∀ (a : α₁), Injective (f₂ a)
i : α₁
x y : β₁ i
h : Sigma.map f₁ f₂ { fst := i, snd := x } = Sigma.map f₁ f₂ { fst := i, snd := y }
⊢ { fst := i, snd := x } = { fst := i, snd := y }
[PROOFSTEP]
obtain rfl : x = y := h₂ i (sigma_mk_injective h)
[GOAL]
α : Type u_1
α₁ : Type u_2
α₂ : Type u_3
β : α → Type u_4
β₁ : α₁ → Type u_5
β₂ : α₂ → Type u_6
f₁ : α₁ → α₂
f₂ : (a : α₁) → β₁ a → β₂ (f₁ a)
h₁ : Injective f₁
h₂ : ∀ (a : α₁), Injective (f₂ a)
i : α₁
x : β₁ i
h : Sigma.map f₁ f₂ { fst := i, snd := x } = Sigma.map f₁ f₂ { fst := i, snd := x }
⊢ { fst := i, snd := x } = { fst := i, snd := x }
[PROOFSTEP]
rfl
[GOAL]
α : Type u_1
α₁ : Type u_2
α₂ : Type u_3
β : α → Type u_4
β₁ : α₁ → Type u_5
β₂ : α₂ → Type u_6
f₁ : α₁ → α₂
f₂ : (a : α₁) → β₁ a → β₂ (f₁ a)
h₁ : Surjective f₁
h₂ : ∀ (a : α₁), Surjective (f₂ a)
⊢ Surjective (Sigma.map f₁ f₂)
[PROOFSTEP]
simp only [Function.Surjective, Sigma.forall, h₁.forall]
[GOAL]
α : Type u_1
α₁ : Type u_2
α₂ : Type u_3
β : α → Type u_4
β₁ : α₁ → Type u_5
β₂ : α₂ → Type u_6
f₁ : α₁ → α₂
f₂ : (a : α₁) → β₁ a → β₂ (f₁ a)
h₁ : Surjective f₁
h₂ : ∀ (a : α₁), Surjective (f₂ a)
⊢ ∀ (x : α₁) (b : β₂ (f₁ x)), ∃ a, Sigma.map f₁ f₂ a = { fst := f₁ x, snd := b }
[PROOFSTEP]
exact fun i ↦ (h₂ _).forall.2 fun x ↦ ⟨⟨i, x⟩, rfl⟩
[GOAL]
α : Sort u_1
β : α → Sort u_2
x₀ x₁ : PSigma β
h₀ : x₀.fst = x₁.fst
h₁ : HEq x₀.snd x₁.snd
⊢ x₀ = x₁
[PROOFSTEP]
cases x₀
[GOAL]
case mk
α : Sort u_1
β : α → Sort u_2
x₁ : PSigma β
fst✝ : α
snd✝ : β fst✝
h₀ : { fst := fst✝, snd := snd✝ }.fst = x₁.fst
h₁ : HEq { fst := fst✝, snd := snd✝ }.snd x₁.snd
⊢ { fst := fst✝, snd := snd✝ } = x₁
[PROOFSTEP]
cases x₁
[GOAL]
case mk.mk
α : Sort u_1
β : α → Sort u_2
fst✝¹ : α
snd✝¹ : β fst✝¹
fst✝ : α
snd✝ : β fst✝
h₀ : { fst := fst✝¹, snd := snd✝¹ }.fst = { fst := fst✝, snd := snd✝ }.fst
h₁ : HEq { fst := fst✝¹, snd := snd✝¹ }.snd { fst := fst✝, snd := snd✝ }.snd
⊢ { fst := fst✝¹, snd := snd✝¹ } = { fst := fst✝, snd := snd✝ }
[PROOFSTEP]
cases h₀
[GOAL]
case mk.mk.refl
α : Sort u_1
β : α → Sort u_2
fst✝ : α
snd✝¹ snd✝ : β fst✝
h₁ : HEq { fst := fst✝, snd := snd✝¹ }.snd { fst := fst✝, snd := snd✝ }.snd
⊢ { fst := fst✝, snd := snd✝¹ } = { fst := fst✝, snd := snd✝ }
[PROOFSTEP]
cases h₁
[GOAL]
case mk.mk.refl.refl
α : Sort u_1
β : α → Sort u_2
fst✝ : α
snd✝ : β fst✝
⊢ { fst := fst✝, snd := snd✝ } = { fst := fst✝, snd := snd✝ }
[PROOFSTEP]
rfl
[GOAL]
α : Sort u_1
β : α → Sort u_2
x₀ x₁ : PSigma β
⊢ x₀ = x₁ ↔ x₀.fst = x₁.fst ∧ HEq x₀.snd x₁.snd
[PROOFSTEP]
cases x₀
[GOAL]
case mk
α : Sort u_1
β : α → Sort u_2
x₁ : PSigma β
fst✝ : α
snd✝ : β fst✝
⊢ { fst := fst✝, snd := snd✝ } = x₁ ↔
{ fst := fst✝, snd := snd✝ }.fst = x₁.fst ∧ HEq { fst := fst✝, snd := snd✝ }.snd x₁.snd
[PROOFSTEP]
cases x₁
[GOAL]
case mk.mk
α : Sort u_1
β : α → Sort u_2
fst✝¹ : α
snd✝¹ : β fst✝¹
fst✝ : α
snd✝ : β fst✝
⊢ { fst := fst✝¹, snd := snd✝¹ } = { fst := fst✝, snd := snd✝ } ↔
{ fst := fst✝¹, snd := snd✝¹ }.fst = { fst := fst✝, snd := snd✝ }.fst ∧
HEq { fst := fst✝¹, snd := snd✝¹ }.snd { fst := fst✝, snd := snd✝ }.snd
[PROOFSTEP]
exact PSigma.mk.inj_iff
|
function ppwd = pwd_imp_circexp(pm,Npw)
%PWD_IMP_CIRCEXP converts a circular basis expansion of a sound field to its
%two-dimensional plane wave decomposition
%
% Usage: ppwd = pwd_imp_circexp(pm,[Npw])
%
% Input parameters:
% pm - circular basis expansion [N x (M+1)]
% Npw - number of equi-angular distributed plane waves, optional,
% default: 2*M+1
%
% Output parameters:
% ppwd - plane wave decomposition [N x Npw]
%
% See also: driving_function_imp_localwfs_sbl_ps,
% driving_function_imp_localwfs_sbl_pw
%*****************************************************************************
% The MIT License (MIT) *
% *
% Copyright (c) 2010-2019 SFS Toolbox Developers *
% *
% Permission is hereby granted, free of charge, to any person obtaining a *
% copy of this software and associated documentation files (the "Software"), *
% to deal in the Software without restriction, including without limitation *
% the rights to use, copy, modify, merge, publish, distribute, sublicense, *
% and/or sell copies of the Software, and to permit persons to whom the *
% Software is furnished to do so, subject to the following conditions: *
% *
% The above copyright notice and this permission notice shall be included in *
% all copies or substantial portions of the Software. *
% *
% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *
% THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *
% FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *
% DEALINGS IN THE SOFTWARE. *
% *
% The SFS Toolbox allows to simulate and investigate sound field synthesis *
% methods like wave field synthesis or higher order ambisonics. *
% *
% https://sfs.readthedocs.io [email protected] *
%*****************************************************************************
%% ===== Checking of input parameters ==================================
nargmin = 1;
nargmax = 2;
narginchk(nargmin,nargmax);
isargmatrix(pm);
M = size(pm,2)-1;
if nargin == nargmin
Npw = 2*M+1;
else
isargpositivescalar(Npw);
end
%% ===== Computation ====================================================
% Implementation of
% ___
% _ \
% p(phipw, t) = /__ p (t) i^m e^(-i m phipw)
% m=-M..M m
% with
%
% phipw = n * 2*pi/Npw
pm = [conj(pm(:,end:-1:2)), pm]; % append coefficients for negative m
ppwd = inverse_cht(bsxfun(@times,pm,1i.^(-M:M)),Npw);
|
Formal statement is: lemma translation_invert: fixes a :: "'a::ab_group_add" assumes "(\<lambda>x. a + x) ` A = (\<lambda>x. a + x) ` B" shows "A = B" Informal statement is: If two sets are equal after translation by the same amount, then they are equal.
|
#' NA
#'
#' NA utilities.
#'
#' @param x,object
#' A float vector/matrix.
#' @param ...
#' Ignored.
#'
#' @examples
#' library(float)
#'
#' s = flrunif(10, 3)
#' is.na(s)
#'
#' @useDynLib float R_isna_spm R_naomit_spm R_anyNA_spm
#' @name na
#' @rdname na
NULL
is.na_float32 = function(x)
{
.Call(R_isna_spm, DATA(x))
}
na.omit_float32 = function(object, ...)
{
ret = .Call(R_naomit_spm, DATA(object))
float32(ret)
}
#' @rdname na
#' @export
setMethod("is.na", signature(x="float32"), is.na_float32)
#' @rdname na
#' @export
setMethod("na.omit", signature(object="float32"), na.omit_float32)
#' @rdname na
#' @export
setMethod("na.exclude", signature(object="float32"), na.omit_float32)
# -----------------------------------------------------------------------------
# internal only
# -----------------------------------------------------------------------------
anyNA = function(x)
{
.Call(R_anyNA_spm, DATA(x))
}
|
# Problem 8.42
Atmospheric air enters a 10m long, 150mm diameter uninsulated heating duct at $60^\circ\mathrm{C}$ and 0.04 kg/s. The duct surface temperature is approximately constant at $T_{s} = 15^\circ\mathrm{C}$.
(a) What are the outlet air temperature, the heat rate q, and the pressure drop $\Delta p$ for these conditions?
(b) To illustrate the tradeoff between heat transfer rate and pressure drop considerations, calculate $q$ and $\Delta p$ for diameters in the range from 0.1 to 0.2 m. In your analysis, maintain the total surface area, $A_{s} = \pi DL$, at the value computed for part (a). Plot $q,\Delta p$, and $L$ as a function of the duct diameter.
```python
from IPython.display import clear_output
import schemdraw as schem
import schemdraw.elements as e
import matplotlib.pyplot as plt
import numpy as np
import math
import scipy.constants as sc
import sympy as sym
from Libraries import thermodynamics as thermo
from Libraries import HT_internal_convection as intconv
```
```python
fluid = thermo.Fluid('air',60.,"C")
L = 10 #m
D = 0.15 #m
mdot = 0.04 #kg/s
A = np.pi*(0.075**2)
bulk = mdot/(fluid.rho*A)
Re = (4*mdot)/(np.pi*D*fluid.mu)
print('The Reynolds number is %.2f'%Re)
pipe = intconv.PipeFlow(D, L, rho=fluid.rho, nu=fluid.nu, Um = bulk)
print(pipe.Re)
```
The Reynolds number is 16836.53
10
# Problem 8.62
Water at a flow rate of $\dot{m}$ = 0.215 kg/s is cooled from $70^\circ\mathrm{C}$ to $30^\circ\mathrm{C}$ by passing it through a thin-walled tube of diameter $D$ = 50 mm and maintaining a coolant at $T_{\infty}$ = $15^\circ\mathrm{C}$ in cross flow over the tube.
(a) What is the required tube length if the coolant is air and its velocity is $V$ = 20 m/s?
(b) What is the tuve length if the coolant is water and $V$ = 2 m/s?
```python
T_m = (70+30)/2
T_infty = 15 #C
T_mi = 70 #C
T_mo = 30 #C
T_f = (T_infty + T_m)/2 #C
mdot = 0.215 #kg/s
D = 0.05 #m
water = thermo.Fluid('water',T_m,"C")
air_c = thermo.Fluid('air',T_f,"C")
water_c = thermo.Fluid('water',T_f,"C")
q =mdot*water.Cp*(70-30)
#Finding log mean temperature, initial Reynolds number, and friction factor
T_lm = -(T_mi - T_mo)/np.log((T_infty-T_mo)/(T_infty-T_mi))
Re_i = (4*mdot)/(np.pi*D*water.mu)
f = (0.79*np.log(Re_i))**-2
#Gnielinski
Nu_i = ((f/8)*(Re_i-1000)*water.Pr)/(1+12.7*(((f/8)**.5))*((water.Pr**(2/3))-1))
h_i = Nu_i*(water.k/D)
#Reynolds number at outlet
Re_o_1 = (20*D)/air_c.nu
#Seban, constant T_s
Nu_o_1 = 5+0.025*(Re_o_1*water.Pr)**0.8
h_o_1 = Nu_o_1*water.k/D
Q = ((1/h_i)+(1/h_o_1))**-1
L_1 = q/(Q*np.pi*D*T_lm)
print("The required length if the coolant is air at 20 m/s is %.2f m."%L_1)
Re_o_2 = (2*D)/(water_c.nu)
Nu_o_2 = 5+0.025*(Re_o_2*water.Pr)**0.8
h_o_2 = Nu_o_2*water.k/D
Q_2 = ((1/h_i)+(1/h_o_2))**-1
L_2 = q/(Q_2*np.pi*D*T_lm)
print("The required length if the coolant is water at 2 m/s is %.2f m."%L_2)
```
30.78621092446306
The required length if the coolant is air at 20 m/s is 15.13 m.
The required length if the coolant is water at 2 m/s is 14.58 m.
```python
```
|
If $g$ tends to $0$ in $F$, then $f \circ g$ tends to $0$ in $F$.
|
!------------------------------------------------------------
!------------------------------------------------------------
! pla : Plant's Model of Bursting Nerve Cells
!------------------------------------------------------------
!------------------------------------------------------------
SUBROUTINE FUNC(NDIM,U,ICP,PAR,IJAC,F,DFDU,DFDP)
! ---------- ----
IMPLICIT NONE
INTEGER, INTENT(IN) :: NDIM, ICP(*), IJAC
DOUBLE PRECISION, INTENT(IN) :: U(NDIM), PAR(*)
DOUBLE PRECISION, INTENT(OUT) :: F(NDIM)
DOUBLE PRECISION, INTENT(INOUT) :: DFDU(NDIM,NDIM), DFDP(NDIM,*)
DOUBLE PRECISION VI,VK,VL,VCa,gK,gL,Kp,Kc,rho,txT,zeta,gi,gP,gT
DOUBLE PRECISION V,xT,xK,yI,c,a,b,Vs,am,ah,an,bm,bh,bn,si,sK
DOUBLE PRECISION txK,tyI,zI,sT
VI=30
VK=-75
VL=-40
VCa=140
gK=0.3
gL=0.003
Kp=0.5
Kc=0.0085
rho=0.0003
txT=235
zeta=1
gI=PAR(1)
gP=PAR(2)
gT=PAR(3)
V =U(1)
xT=U(2)
xK=U(3)
yI=U(4)
c =U(5)
a=127/(VI-VK)
b=(115*VK+12*VI)/(VI-VK)
Vs=a*V-b
am=0.1*(50-Vs)/(DEXP((50-Vs)/10)-1)
ah=0.07*DEXP((25-Vs)/20)
an=0.01*(55-Vs)/(DEXP((55-Vs)/10)-1)
bm=4*DEXP((25-Vs)/18)
bh=1./(DEXP((55-Vs)/10)+1)
bn=0.125*DEXP((45-Vs)/80)
sI=am/(am+bm)
sK=an/(an+bn)
txK=12.5/(an+bn)
tyI=12.5/(ah+bh)
zI=ah/(ah+bh)
sT=1./(DEXP(0.15*(-50-V))+1)
F(1)= (gI*sI**3*yI + gT*xT) * (VI-V) &
+ (gK*xK**4 + gP*c/(Kp+c)) * (VK-V) &
+ gL * (VL-V)
F(2)= (sT-xT) / (txT*zeta)
F(3)= (sK-xK) / (txK*zeta)
F(4)= (zI-yI) / (tyI*zeta)
F(5)= rho * (Kc*xT*(VCa-V) - c)
END SUBROUTINE FUNC
SUBROUTINE STPNT(NDIM,U,PAR,T)
! ---------- -----
IMPLICIT NONE
INTEGER, INTENT(IN) :: NDIM
DOUBLE PRECISION, INTENT(INOUT) :: U(NDIM),PAR(*)
DOUBLE PRECISION, INTENT(IN) :: T
DOUBLE PRECISION gI,gP,gT
gI=50.
gP=0.03
gT=0.01
PAR(1)=gI
PAR(2)=gP
PAR(3)=gT
U(1)=-2.329601E+00
U(2)= 9.992162E-01
U(3)= 7.373314E-01
U(4)= 6.135948E-03
U(5)= 1.208853E+00
END SUBROUTINE STPNT
SUBROUTINE BCND
END SUBROUTINE BCND
SUBROUTINE ICND
END SUBROUTINE ICND
SUBROUTINE FOPT
END SUBROUTINE FOPT
SUBROUTINE PVLS
END SUBROUTINE PVLS
|
#Minesweeper 16x16
function placemine(grid) #randomly places a mine in a square in the grid where there isn't already a mine
row, column = rand(1:16), rand(1:16)
if grid[row][column]!="*"
grid[row][column]="*"
else
placemine(grid)
end
end
up_int(str::String) = string(parse(Int64,str)+1)
function update(g, r, c) #add one to the boxes next to a mine; split into sections horizontally
if r>1
update_c(g,r-1,c) #update row above
end
update_c(g,r,c) #update the same row
if r<16
update_c(g,r+1,c) #update row below
end
end
function update_c(g, r, c) #adds one to the boxes next to a mine, vertically
row = g[r]
if c>1 && row[c-1]!="*"
row[c-1] = up_int(row[c-1]) #update left column
end
if row[c]!="*"
row[c] = up_int(row[c]) #update current column
end
if c<16 && row[c+1]!="*"
row[c+1] = up_int(row[c+1]) #update right column
end
end
function printgrid(grid) #prints the grid
println("\n A B C D E F G H I J K L M N O P")
println(" ╔═══╦═══╦═══╦═══╦═══╦═══╦═══╦═══╦═══╦═══╦═══╦═══╦═══╦═══╦═══╦═══╗")
for r=1:16
if r<10
println(r," ║ ",grid[r][1]," ║ ",grid[r][2]," ║ ",grid[r][3]," ║ ",grid[r][4]," ║ ",grid[r][5]," ║ ",grid[r][6]," ║ ",grid[r][7]," ║ ",grid[r][8]," ║ ",grid[r][9]," ║ ",grid[r][10]," ║ ",grid[r][11]," ║ ",grid[r][12]," ║ ",grid[r][13]," ║ ",grid[r][14]," ║ ",grid[r][15]," ║ ",grid[r][16]," ║ ")
else
println(r,"║ ",grid[r][1]," ║ ",grid[r][2]," ║ ",grid[r][3]," ║ ",grid[r][4]," ║ ",grid[r][5]," ║ ",grid[r][6]," ║ ",grid[r][7]," ║ ",grid[r][8]," ║ ",grid[r][9]," ║ ",grid[r][10]," ║ ",grid[r][11]," ║ ",grid[r][12]," ║ ",grid[r][13]," ║ ",grid[r][14]," ║ ",grid[r][15]," ║ ",grid[r][16]," ║ ")
end
if r != 16
println(" ╠═══╬═══╬═══╬═══╬═══╬═══╬═══╬═══╬═══╬═══╬═══╬═══╬═══╬═══╬═══╬═══╣")
end
end
println(" ╚═══╩═══╩═══╩═══╩═══╩═══╩═══╩═══╩═══╩═══╩═══╩═══╩═══╩═══╩═══╩═══╝")
end
function search(letters, loc) #returns the index of the letter (from the input) in the array letters
for i=1:length(letters)
if letters[i] == string(loc[1])
return i
end
end
end
function play(g, kg) #the main function that is called for every move
letters = String["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p"]
loc = lowercase(string(input("Choose a square to reveal. (eg. F4)\n")))
r = parse(Int64,loc[2]) #the number corresponds to the row
c = search(letters,loc) #the letter corresponds to the column
value = g[r][c]
if value == "*"
printgrid(g)
println("You Lose.")
playagain = lowercase(string(input("Play again? Y/N\n")))
if playagain == "y"
restart()
else
exit()
end
end
kg[r][c] = value
if value == "0"
checkzeros(kg, g, r, c)
end
printgrid(kg)
remaining = 0
for x=1:16
for y in kg[x]
if y == " "
remaining = remaining + 1
end
end
end
if remaining <= 20
printboard(g)
println("You Win.")
playagain = lowercase(string(input("Play again? Y/N\n")))
if playagain == "y"
restart()
else
exit()
end
end
play(g,kg)
end
function checkzeros(kg, g, r, c) #function that checks that every square around each 0 is open
oldkg = deepcopy(kg)
openzero(kg, g, r, c)
if oldkg==kg #check if there are any changes in the grid
return
end
while true #loops through the grid to open any other 0s we find, creating a chain reaction
oldkg = deepcopy(kg)
for x=1:16
for y=1:16
if kg[x][y]=="0"
openzero(kg, g, x, y)
end
end
end
if oldkg==kg
return
end
end
end
function openzero(kg, g, r, c) #opens all the squares around when 0 is found
#Row above
if r>1
row = kg[r-1]
if c>1
row[c-1] = g[r-1][c-1]
end
row[c] = g[r-1][c]
if c<16
row[c+1] = g[r-1][c+1]
end
end
#Same row
row = kg[r]
if c>1
row[c-1] = g[r][c-1]
end
if c<16
row[c+1] = g[r][c+1]
end
#Row below
if r<16
row = kg[r+1]
if c>1
row[c-1] = g[r+1][c-1]
end
row[c] = g[r+1][c]
if c<16
row[c+1] = g[r+1][c+1]
end
end
end
function restart() #the main function that initiates every game
println("Welcome to Minesweeper by kfung.")
println("Your goal is to find 20 mines(*) that are located in an 16x16 grid.")
println("In each go, type to coordinates of the square you want to uncover, eg. G3")
println("You lose if you choose a square with a mine.")
println("Otherwise, the number of mines adjacent to a square (including diagonals) will be revealed.")
println("Squares around 0 will be automatically reveal because they are no adjacent mines.")
println("LET'S BEGIN!")
grid = [["0" for j in 1:16] for k in 1:16]
knowngrid = [[" " for j in 1:16] for k in 1:16]
for i=1:20
placemine(grid)
end
for row=1:16
for column=1:16
if grid[row][column] == "*"
update(grid, row, column)
end
end
end
printgrid(knowngrid)
play(grid, knowngrid)
end
restart()
|
\section{Justification for the research}
\todo
|
import Data.Vect
%default total
interface ArrayM where
Array : (n : Nat) -> (a : Type) -> Type
create : (n : Nat) -> a -> Array n a
lkp : (i : Nat) -> (prf : LTE (S i) n) -> Array n a -> a
upd : a -> (i : Nat) -> (prf : LT i n) -> Array n a -> Array n a
using (ArrayM)
sumArrayHelp : Num a => (k : Nat) -> LT k (S n) -> Array n a -> a
sumArrayHelp Z x y = 0
sumArrayHelp (S k) (LTESucc prf) y
= lkp k prf y + sumArrayHelp k (lteSuccRight prf) y
sumArray : Array n Int -> Int
sumArray {n} x = sumArrayHelp n lteRefl x
implementation ArrayM where
Array = Vect
create n x = replicate n x
lkp Z (LTESucc x) (y :: xs) = y
lkp (S k) (LTESucc x) (y :: xs) = lkp k x xs
upd val Z (LTESucc p) (_ :: xs) = val :: xs
upd val (S k) (LTESucc p) (x :: xs) = x :: upd val k p xs
-- using (ArrayM as vectArray)
testSum : Array n Int -> Int
testSum = sumArray
main : IO ()
main = printLn (testSum (upd 11 1 (LTESucc (LTESucc LTEZero)) (create 4 10)))
|
module Test.Object
import Hedgehog
import Generics.Derive
import JS
%language ElabReflection
public export
record Address where
constructor MkAddress
street : String
nr : Bits32
zip : String
city : String
%runElab derive "Address" [Generic,Meta,Show,Eq]
-- Direct JSON encoding
toStr : Address -> String
toStr s = #"{"street":\#{show s.street},"nr":\#{show s.nr},"zip":\#{show s.zip},"city":\#{show s.city}}"#
-- Encoding via `stringify`.
toJSON : Address -> String
toJSON a = let ps = [ ("street",Str a.street)
, ("nr", Num . fromInteger $ cast a.nr)
, ("zip", Str a.zip)
, ("city", Str a.city)
]
in stringify (pairs ps)
export
fromJSON : String -> Maybe Address
fromJSON s =
do val <- parseMaybe s
obj <- getObject val
[| MkAddress (valueAt obj "street" >>= getStr)
(valueAt obj "nr" >>= map (fromInteger . cast) . getNum)
(valueAt obj "zip" >>= getStr)
(valueAt obj "city" >>= getStr) |]
--------------------------------------------------------------------------------
-- Generators
--------------------------------------------------------------------------------
plainString : Gen String
plainString = string (linear 1 10) alphaNum
export
addresses : Gen Address
addresses = [| MkAddress plainString
(bits32 $ linear 0 50)
plainString
plainString |]
--------------------------------------------------------------------------------
-- Properties
--------------------------------------------------------------------------------
prop_toJSON : Property
prop_toJSON = property $ do a <- forAll addresses
toStr a === toJSON a
prop_decode : Property
prop_decode = property $ do a <- forAll addresses
case parse (toJSON a) of
Left e => do footnote (dispErr e)
assert False
Right _ => assert True
prop_roundTrip : Property
prop_roundTrip = property $ do a <- forAll addresses
Just a === fromJSON (toJSON a)
export
test : IO ()
test = ignore . checkGroup . withTests 100 $ MkGroup "Object" [
("prop_toJSON", prop_toJSON)
, ("prop_decode", prop_decode)
, ("prop_roundTrip", prop_roundTrip)
]
|
# 条件随机场
序列标注模型条件随机场,这种模型与感知机同属结构化学习大家族,但性能比感知机还要强大。
## 一、机器学习的模型谱系
生成式模型包含的算法:
> 朴素贝叶斯\
K近邻\
混合高斯模型\
隐马尔科夫模型\
贝叶斯网络\
Sigmoid Belief Networks\
马尔科夫随机场(markov random fields)\
深度信念网络\
LDA文档主题生成模型
判别式模型包含的算法:
> 线性回归(linear regression)\
逻辑回归(logistic regression)\
线性判别分析\
支持向量机\
CART(classification and regression tree)\
神经网络(NN)\
高斯过程(gaussian process)\
条件随机场(conditional random field)
【假设我们有训练数据(X,Y),X是属性集合,Y是类别标记。这时来了一个新的样本x,我们想要预测它的类别y。
我们最终的目的是求得最大的条件概率P(y|x)作为新样本的分类。】
根据建模的究竟是联合概率(同时发生的概率)分布P(x,y)还是条件概率分布P(y|x)。派生出生成式模型与判别式模型,这两个模型最终算的都是
P(y|x)
### 1、生成式模型
【一般会对每一个类建立一个模型,有多少个类别,就建立多少个模型。比如说类别标签有{猫,狗,猪},那首先根据猫的特征学习出一个猫的模型,再根据狗的特征学习出狗的模型,之后分别计算新样本 x 跟三个类别的联合概率 P(x,y) ,然后根据贝叶斯公式计算出P(y|x)】
生成式模型:模拟数据的生成过程,两类随机变量存在因果先后关系,先有因素 y,后有结果 x,这种因果关系由联合分布模拟:
$$P(x,y) = P(y)P(x|y)$$
通过联合分布 P(x,y),生成式模型其实间接建模了 P(x):
$$P(x) = \sum_{y \in Y}P(x,y)$$
然后依据贝叶斯公式可以得到P(y|x)【最终求的仍然是P(y|x),只是通过了P(x,y)来求】:
$$P(y|x) = \frac{P(x,y)}{P(x)}$$
这里有两个缺陷:
1. P(x) 很难准确估计,因为特征之间并非相互独立,而是存在错综复杂的依赖关系。
2. P(x) 在分类中也没有直接作用。
为了克服这两个问题,判别式模型出现。
### 2、判别式模型
【根据训练数据得到分类函数和分界面,比如说根据SVM模型得到一个分界面,然后直接计算条件概率 P(y|x) ,我们将最大的 P(y|x) 作为新样本的分类。判别式模型是对条件概率建模,学习不同类别之间的最优边界,无法反映训练数据本身的特性,能力有限,其只能告诉我们分类的类别。】
<font color='red'>  判别式模型直接跳过了 P(x),直接对条件概率 P(y|x) 建模。不管 x 内部存在多复杂的关系,也不影响判别式模型对 y 的判断,于是就能够放心大胆的利用各种各样丰富的、有关联的特征。 所以我们会看到感知机分词的准确率高于隐马尔可夫模型。</font>
$$P(y|x) = \frac{exp(score(x,y))}{\sum_{x,y}exp(score(x,y))} \ \ \ \ 说明:exp是指数函数$$
### 2.1、举例说明
假设要对猫和狗进行区分:
生成式模型:是根据狗的特征首先学习出一个狗的模型,然后根据猫的特征学习出一个猫的模型,然后从这个动物中提取特征,放到狗模型中看概率是多少,再放到猫模型中看概率是多少,哪个大就是哪个。
判别式模型:用判别式模型的方法是从历史数据中学习到模型,然后通过提取该动物的特征来预测出是狗的概率,是猫的概率。
### 2.2、数据计算过程样例
假设现在有一个分类问题,X是特征,Y是类标记。用判别式模型学习一个条件概率分布P(y|x),用生成式模型学习一个联合概率分布 P(x,y) 。用一个简单的例子来说明这个问题。假设X就是两个特征(1或2),Y有两类(0或1),有如下训练样本(1,0)、(1,0)、(1,1)、(2,1)。
而学习到的联合概率分布(生成式模型)如下:
| | 0 | 1 |
|-|-|-|
|1|1/2|1/4|
|2|0|1/4|
则学习到的条件概率分布(判别式模型)如下:
| | 0 | 1 |
|-|-|-|
|1|2/3|1/3|
|2|0|1|
在实际分类问题中,判别式模型可以直接用来判断特征的类别情况;而生成式模型需要加上贝叶斯公式,然后应用到分类中。但是,生成式模型的概率分布可以有其他应用,就是说生成式模型更一般更普适。不过判别式模型更直接,更简单。两种方法目前交叉较多。由生成式模型可以得到判别式模型,但由判别式模型得不到生成式模型。
### 2.3、用图说明区别
上图左边为判别式模型而右边为生成式模型,可以很清晰地看到差别,判别式模型是在寻找一个决策边界,通过该边界来将样本划分到对应类别。而生成式模型则不同,它学习了每个类别的边界,它包含了更多信息,可以用来生成样本。
### 2.4、两个模型总结
不管是生成式模型还是判别式模型,它们最终的判断依据都是条件概率P(y|x),但是生成式模型先计算了联合概率P(x,y),再由贝叶斯公式计算得到条件概率。因此,生成式模型可以体现更多数据本身的分布信息,其普适性更广。
####    生成式模型的特点:
生成式模型学习的是联合概率密度分布 P(X,Y),可以从统计的角度表示分布的情况,能够反映同类数据本身的相似度,它不关心到底划分不同类的边界在哪里。生成式模型的学习收敛速度更快,当样本容量增加时,学习到的模型可以更快的收敛到真实模型,当存在隐变量时,依旧可以用生成式模型,此时判别式方法就不行了。具体来说,有以下特点:
> 对联合概率建模,学习所有分类数据的分布。\
> 学习到的数据本身信息更多,能反应数据本身特性。\
> 学习成本较高,需要更多的计算资源。\
> 需要的样本数更多,样本较少时学习效果较差。\
> 推断时性能较差。\
> 一定条件下能转换成判别式。
####    判别式模型特点:
判别式模型直接学习决策函数 Y=f(X) 或者条件概率 P(Y|X) ,不能反映训练数据本身的特性,但它寻找不同类别之间的最优分裂面,反映的是异类数据之间的差异,直接面对预测往往学习准确度更高。具体来说有以下特点:
> 对条件概率建模,学习不同类别之间的最优边界。\
> 捕捉不同类别特征的差异信息,不学习本身分布信息,无法反应数据本身特性。\
> 学习成本较低,需要的计算资源较少。\
> 需要的样本数可以较少,少样本也能很好学习。\
> 预测时拥有较好性能。\
> 无法转换成生成式。
总之,判别式模型和生成式模型都是使后验概率最大化,判别式是直接对后验概率建模,而生成式模型通过贝叶斯定理这一“桥梁”使问题转化为求联合概率。
### 3、有向概率图模型
概率图模型能够分析错综复杂的随机变量关系。
有向概率图模型也称为贝叶斯网络。
贝叶斯网络的一个基本要求是图必须是有向无环图(DAG/directed acyclic graph)。
概率图模型( Probabilistic Graphical Model, PGM)是用来表示与推断多维随机变量联合分布 P(X,Y) 的强大框架,被广泛用于计算机视觉、知识表达、贝叶斯统计与自然语言处理。它利用节点 V 来表示随机变量,用边 E 连接有关联的随机变量,将多维随机变量分布表示为图 G=(V,E)。这样就带来了一个好处,那就是整个图可以分解为子图再进行分析.子图中的随机变量更少,建模更加简单。具体如何分解,据此派生出有向图模型和无向图模型。
有向图模型按事件的先后因果顺序将节点连接为有向图。如果事件 A 导致事件 B,则用箭头连接两个事件 A-->B。
有向图模型都将概率有向图分解为一系列条件概率之积,有向图模型经常用生成式模型来实现。定义 π(v) 表示节点 v 的所有前驱节点,则分布为:
$$P(X,Y)=\prod_{v=V}P(v|π(v))$$
#### 3.1、有向概率图模型有啥用
很多现实问题都可以使用有向概率图进行建模。
例如:
这个图描述了某个学生注册某个大学课程的设定。该图中有 5 个随机变量:
> 课程的难度(Difficulty):可取两个值,0 表示低难度,1 表示高难度\
学生的智力水平(Intelligence):可取两个值,0 表示不聪明,1 表示聪明\
学生的评级(Grade):可取三个值,1 表示差,2 表示中,3 表示优\
学生的 SAT 成绩(SAT):可取两个值,0 表示低分,1 表示高分\
在完成该课程后学生从教授那里所得到的推荐信的质量(Letter):可取两个值,0 表示推荐信不好,1 表示推荐信很好
该图中的边编码了这些变量之间的依赖关系。
学生的 Grade 取决于课程的 Difficulty 和学生的 Intelligence;
而 Grade 又反过来决定了学生能否从教授那里得到一份好的 Letter;
另外,学生的 Intelligence 除了会影响他们的 Grade,还会影响他们的 SAT 分数。
注意其中箭头的方向表示了因果关系——Intelligence 会影响 SAT 分数,但 SAT 不会影响 Intelligence。
我们看到的每个节点关联的表格,它们的正式名称是条件概率分布(CPD/conditional probability distribution)
### 4、无向概率图模型
无向概率图模型也称为马尔科夫网络。
相反,无向图模型则不探究每个事件的因果关系,也就是说不涉及条件概率分解。无向图模型的边没有方向,仅仅代表两个事件有关联。
无向概率图模型的几个概念
#### 4.1、团(clique)
#### 4.2、极大团(maximum clique)
#### 4.3、最大团 (maximul clique)
>团:必须两两相连(任意两点都相连)\
极大团:无法再加入一个节点使其成为团\
最大团:极大团里节点最多的团
#### 4.4、因子节点
因子节点是两个点间虚拟出来的点,用来辅助计算【暂时这么理解】。
因子分解:将概率无向图模型的联合概率分布表示为其最大团上的随机变量的函数乘积形式的操作
依据Hammersley- Clifford定理,无向图模型的联合概率分布表示为其最大团上的随机变量的函数乘积形式
$$P(X,Y) = \frac{1}{Z}\prod_{a}\Psi_{a}(X_{a}, Y_{a})$$
其中,a 是因子节点,$\Psi_{a}$ 则是一个因子节点对应的函数,参数 $X_{a}, Y_{a}$ 是与因子节点相连的所有变量节点。为了将式子约束为概率分布,定义常数 Z 为如下归一化因子:
$$Z = \sum_{x,y}\prod_{a}\Psi_{a}(X_{a}, Y_{a})$$
在机器学习中,常用指数家族的因子函数:
$$\Psi_{a}(X_{a}, Y_{a}) = exp\left\{\sum_{k}W_{ak}F_{ak}(X_{a}, Y_{a})\right\}$$
其中,k 为特征的编号,$F_{ak}$ 是特征函数,$W_{ak}$ 为相应的特征权重。
判别式模型经常用无向图来表示,只需要在归一化时,对每种 x 都求一个归一化因子:
$$Z(X) = \sum_{y}\prod_{a}\Psi_{a}(X_{a}, Y_{a})$$
<font color="red">然后 P(X,Y) 就转化为判别式模型所需的条件概率分布【之所以能转化是因为在条件随机场模型中,X是给定的,Y是条件随机场】:</font>
$$P(Y|X) = \frac{1}{Z(X)}\prod_{a}\Psi_{a}(X_{a}, Y_{a})$$
到这里,最后一个公式就是条件随机场的一般形式。
### 5、为什么有了有向概率图模型,还需要无向概率图模型
原因是有些问题使用有向图表示会更加自然,比如上面提到的学生网络,有向图可以轻松描述变量之间的因果关系——学生的智力水平会影响 SAT 分数,但 SAT 分数不会影响智力水平(尽管它也许能反映学生的智力水平)。
而对于其它一些问题,比如图像,你可能需要将每个像素都表示成一个节点。我们知道相邻的像素互有影响,但像素之间并不存在因果关系;它们之间的相互作用是对称的。所以我们在这样的案例中使用无向图模型。
## 二、条件随机场
条件随机场(Conditional Random Fields, 以下简称CRF)是给定一组输入序列条件下另一组输出序列的条件概率分布模型,在自然语言处理中得到了广泛应用。本系列主要关注于CRF的特殊形式:线性链(Linear chain) CRF。
### 1、什么样的问题需要CRF模型
和HMM类似,在讨论CRF之前,我们来看看什么样的问题需要CRF模型。这里举一个简单的例子:
假设我们有Bob一天从早到晚的一系列照片,Bob想考考我们,要我们猜这一系列的每张照片对应的活动,比如: 工作的照片,吃饭的照片,唱歌的照片等等。一个比较直观的办法就是,我们找到Bob之前的日常生活的一系列照片,然后找Bob问清楚这些照片代表的活动标记,这样我们就可以用监督学习的方法来训练一个分类模型,比如逻辑回归,接着用模型去预测这一天的每张照片最可能的活动标记。
这种办法虽然是可行的,但是却忽略了一个重要的问题,就是这些照片之间的顺序其实是有很大的时间顺序关系的,而用上面的方法则会忽略这种关系。比如我们现在看到了一张Bob闭着嘴的照片,那么这张照片我们怎么标记Bob的活动呢?比较难去打标记。但是如果我们有Bob在这一张照片前一点点时间的照片的话,那么这张照片就好标记了。如果在时间序列上前一张的照片里Bob在吃饭,那么这张闭嘴的照片很有可能是在吃饭咀嚼。而如果在时间序列上前一张的照片里Bob在唱歌,那么这张闭嘴的照片很有可能是在唱歌。
为了让我们的分类器表现的更好,可以在标记数据的时候,可以考虑相邻数据的标记信息。这一点,是普通的分类器难以做到的。而这一块,也是CRF比较擅长的地方。
在实际应用中,自然语言处理中的词性标注(POS Tagging)就是非常适合CRF使用的地方。词性标注的目标是给出一个句子中每个词的词性(名词,动词,形容词等)。而这些词的词性往往和上下文的词的词性有关,因此,使用CRF来处理是很适合的,当然CRF不是唯一的选择,也有很多其他的词性标注方法。
### 2、从随机场到马尔科夫随机场
首先,我们来看看什么是随机场。“随机场”的名字取的很玄乎,其实理解起来不难。随机场是由若干个位置组成的整体,当给每一个位置中按照某种分布随机赋予一个值之后,其全体就叫做随机场。还是举词性标注的例子:假如我们有一个十个词形成的句子需要做词性标注。这十个词每个词的词性可以在我们已知的词性集合(名词,动词...)中去选择。当我们为每个词选择完词性后,这就形成了一个随机场。
了解了随机场,我们再来看看马尔科夫随机场。马尔科夫随机场是随机场的特例,它假设随机场中某一个位置的赋值仅仅与和它相邻的位置的赋值有关,和与其不相邻的位置的赋值无关。继续举十个词的句子词性标注的例子: 如果我们假设所有词的词性只和它相邻的词的词性有关时,这个随机场就特化成一个马尔科夫随机场。比如第三个词的词性除了与自己本身的位置有关外,只与第二个词和第四个词的词性有关。
### 3、从马尔科夫随机场到条件随机场
理解了马尔科夫随机场,再理解CRF就容易了。CRF是马尔科夫随机场的特例,它假设马尔科夫随机场中只有𝑋和𝑌两种变量,𝑋一般是给定的,而𝑌一般是在给定𝑋的条件下我们的输出。这样马尔科夫随机场就特化成了条件随机场。在我们十个词的句子词性标注的例子中,𝑋是词,𝑌是词性。因此,如果我们假设它是一个马尔科夫随机场,那么它也就是一个CRF。
对于CRF,我们给出准确的数学语言描述:
设$𝑋$与$𝑌$是随机变量,$𝑃(𝑌|𝑋)$是给定$𝑋$时$𝑌$的条件概率分布,若随机变量𝑌构成的是一个马尔科夫随机场,则称条件概率分布$𝑃(𝑌|𝑋)$是条件随机场。
### 4、从条件随机场到线性链条件随机场
注意在CRF的定义中,我们并没有要求𝑋和𝑌有相同的结构。而实现中,我们一般都假设𝑋和𝑌有相同的结构,即:
$$X =(x_1,x_2,...x_n),\;\;Y=(y_1,y_2,...y_n)$$
$$【有n个词,对应有n个词性】$$
𝑋和𝑌有相同的结构的CRF就构成了线性链条件随机场(Linear chain Conditional Random Fields,以下简称 linear-CRF)。
在我们的十个词的句子的词性标记中,词有十个,词性也是十个,因此,如果我们假设它是一个马尔科夫随机场,那么它也就是一个linear-CRF。
我们再来看看 linear-CRF的数学定义:
设$X =(x_1,x_2,...x_n),Y=(y_1,y_2,...y_n)$均为线性链表示的随机变量序列,在给定随机变量序列𝑋的情况下,随机变量𝑌的条件概率分布𝑃(𝑌|𝑋)构成条件随机场,即满足马尔科夫性:
$$P(y_i|X,y_1,y_2,...y_n) = P(y_i|X,y_{i-1},y_{i+1});i\in[1,n]$$
$$表示在给定一个句子的所有词时,对应某个词的词性的概率,只和这个词的前后词性有关$$
则称𝑃(𝑌|𝑋)为线性链条件随机场。
### 5、线性链条件随机场的参数化形式
对于上一节讲到的linear-CRF,我们如何将其转化为可以学习的机器学习模型呢?这是通过特征函数和其权重系数来定义的。什么是特征函数呢?
在linear-CRF中,特征函数分为两类,第一类【状态特征,对应hmm里的发射矩阵】是定义在$y_i$节点上的节点特征函数,这类特征函数只和当前节点有关,记为:
$$s_l(y_i,X,i),\;\; l =1,2,...L$$
其中𝐿是定义在该节点的节点特征函数的总个数【一个节点会有L个特征函数,符合特征函数的s值为1,不符合的s值为0】,𝑖是当前节点在序列的位置。
第二类【转移特征,对应hmm里的转移矩阵】是定义在$y_i$节点 上下文的局部特征函数,这类特征函数只和当前节点和上一个节点有关,记为:
$$t_k(y_{i-1},y_i,X,i),\;\; k =1,2,...K$$
$$这里为了简化计算,暂时当前词性先只和前一个词的词性有关$$
其中𝐾是定义在该节点的局部特征函数的总个数【一个节点会有K个特征函数,符合特征函数的t值为1,不符合的t值为0】,𝑖是当前节点在序列的位置。之所以只有上下文相关的局部特征函数,没有不相邻节点之间的特征函数,是因为我们的linear-CRF满足马尔科夫性。
无论是节点特征函数还是局部特征函数,它们的取值只能是0或者1。即满足特征条件或者不满足特征条件。同时,我们可以为每个特征函数赋予一个权值,用以表达我们对这个特征函数的信任度。假设$𝑡_𝑘$的权重系数是$𝜆_𝑘,𝑠_𝑙$的权重系数是$𝜇_𝑙$,则linear-CRF由我们所有的$𝑡_𝑘,𝜆_𝑘,𝑠_𝑙,𝜇_𝑙$共同决定。
此时我们得到了linear-CRF的参数化形式如下:
$$P(Y|X) = \frac{1}{Z(X)}exp\Big(\sum\limits_{i,k} \lambda_kt_k(y_{i-1},y_i, X,i) +\sum\limits_{i,l}\mu_ls_l(y_i, X,i)\Big)$$
$$表示在给定X情况下,某个猜测的词性序列的概率。就是给一个句子的所有词,这些词对应的词性组成序列的一种可能占所有可能的概率$$
其中,𝑍(X)为规范化因子【表示给定X情况下,所有可能的词性序列的(特征函数值乘以特征权重)的累加和】:
$$Z(X) =\sum\limits_{y} exp\Big(\sum\limits_{i,k} \lambda_kt_k(y_{i-1},y_i, X,i) +\sum\limits_{i,l}\mu_ls_l(y_i, X,i)\Big)$$
$$y表示一个句子所有词的所有可能词性序列$$
回到特征函数本身,每个特征函数定义了一个linear-CRF的规则,则其系数定义了这个规则的可信度。所有的规则和其可信度一起构成了我们的linear-CRF的最终的条件概率分布。
## 6、线性链条件随机场实例
这里我们给出一个linear-CRF用于词性标注的实例,为了方便,我们简化了词性的种类。假设输入的都是三个词的句子,即$𝑋=(𝑋_1,𝑋_2,𝑋_3)$,输出的词性标记为$𝑌=(𝑌_1,𝑌_2,𝑌_3)$,其中$𝑌\in \{1(名词),2(动词)\}$
| <div style="width: 100pt">特征函数</div> | <div style="width: 300pt">函数值和对应的条件</div> | <div style="width: 200pt">特征权重</div> |
| :------ | :------ | :------ |
| $t_1$ | =1;($y_1=1,y_2=2,X,2$)或($y_2=1,y_3=2,X,3$)<br>=0;其他 | $𝜆_1=1$ |
| $t_2$ | =1;($y_1=1,y_2=1,X,2$)<br>=0;其他 | $𝜆_2=0.5$ |
| $t_3$ | =1;($y_2=2,y_3=1,X,3$)<br>=0;其他 | $𝜆_3=1$ |
| $t_4$ | =1;($y_1=2,y_2=1,X,2$)<br>=0;其他 | $𝜆_4=1$ |
| $t_5$ | =1;($y_2=2,y_3=2,X,3$)<br>=0;其他 | $𝜆_5=0.2$ |
| $s_1$ | =1;($y_1=2,X,1$)<br>=0;其他 | $\mu_1=1$ |
| $s_2$ | =1;($y_1=2,X,1$)或($y_2=2,X,2$)<br>=0;其他 | $\mu_1=0.5$ |
| $s_3$ | =1;($y_2=1,X,2$)或($y_3=1,X,3$)<br>=0;其他 | $\mu_1=0.8$ |
| $s_4$ | =1;($y_3=2,X,3$)<br>=0;其他 | $\mu_1=0.5$ |
求Y为(1,2,2)词性序列时的非规范化概率。
利用linear-CRF的参数化公式,我们有:
$$exp\Big(\sum\limits_{i,k} \lambda_kt_k(y_{i-1},y_i, X,i) +\sum\limits_{i,l}\mu_ls_l(y_i, X,i)\Big) = exp\Big[\sum\limits_{k=1}^5\lambda_k\sum\limits_{i=2}^3t_k(y_{i-1},y_i, X,i) + \sum\limits_{l=1}^4\mu_l\sum\limits_{i=1}^3s_l(y_i, X,i) \Big]$$
带入(1,2,2)我们有:
$$
\begin{align}
exp\Big[\sum\limits_{k=1}^5\lambda_k\sum\limits_{i=2}^3t_k(y_{i-1},y_i, X,i) + \sum\limits_{l=1}^4\mu_l\sum\limits_{i=1}^3s_l(y_i, X,i) \Big] &= \lambda_1(t_1(y_1=1,y_2=2,X,2)+t_1(y_2=2,y_3=2,X,3)) \\
&+\lambda_2(t_2(y_1=1,y_2=2,X,2)+t_2(y_2=2,y_3=2,X,3)) \\
&+\lambda_3(t_3(y_1=1,y_2=2,X,2)+t_3(y_2=2,y_3=2,X,3)) \\
&+\lambda_4(t_4(y_1=1,y_2=2,X,2)+t_4(y_2=2,y_3=2,X,3)) \\
&+\lambda_5(t_5(y_1=1,y_2=2,X,2)+t_5(y_2=2,y_3=2,X,3)) \\
&+\mu_1(s_1(y_1=1,X,1)+s_2(y_2=2,X,2)+s_3(y_3=2,X,3)) \\
&+\mu_2(s_1(y_1=1,X,1)+s_2(y_2=2,X,2)+s_3(y_3=2,X,3)) \\
&+\mu_3(s_1(y_1=1,X,1)+s_2(y_2=2,X,2)+s_3(y_3=2,X,3)) \\
&+\mu_4(s_1(y_1=1,X,1)+s_2(y_2=2,X,2)+s_3(y_3=2,X,3)) \\
&=(1+0+0+0+0.2)+(1+0.5+0+0.5) \\
&=exp(3.2)
\end{align}
$$
$$
\begin{align}
Z(X)&= y为(y_1=1,y_2=1,y_3=1) \\
&+y为(y_1=1,y_2=1,y_3=2) \\
&+y为(y_1=1,y_2=2,y_3=1) \\
&+y为(y_1=2,y_2=1,y_3=1) \\
&+y为(y_1=2,y_2=1,y_3=2) \\
&+y为(y_1=2,y_2=2,y_3=1) \\
&+y为(y_1=2,y_2=2,y_3=2) \\
&+y为(y_1=1,y_2=2,y_3=2) \\
&=exp(4.2)+exp(5.2)+exp(6.2)+exp(7.2)+exp(8.2)+...+exp(3.2)
\end{align}
$$
$$P(y_1=1,y_2=2,y_3=2|X)=\frac{exp(3.2)}{exp(4.2)+exp(5.2)+exp(6.2)+exp(7.2)+exp(8.2)+...+exp(3.2)}$$
注意:
可以看出一共有2x2x2=8种组合,也就是8种Y可能序列。
## 7、线性链条件随机场的简化形式
在上几节里面,我们用$𝑠_𝑙$表示节点特征函数,用$𝑡_𝑘$表示局部特征函数,同时也用了不同的符号表示权重系数,导致表示起来比较麻烦。其实我们可以对特征函数稍加整理,将其统一起来。
假设我们在某一节点我们有$𝐾_1$个局部特征函数和$𝐾_2$个节点特征函数,总共有$𝐾=𝐾_1+𝐾_2$个特征函数。我们用一个特征函数$𝑓_𝑘(𝑦_{𝑖−1},𝑦_𝑖,X,𝑖)$来统一表示如下:
$$f_k(y_{i-1},y_i,X,i)= \begin{cases} t_k(y_{i-1},y_i, X,i) & {k=1,2,...K_1}\\ s_l(y_i, x,i)& {k=K_1+l,\; l=1,2...,K_2} \end{cases}$$
以为由Y,X参数就可以知道$y_{i-1},y_i,X,i$,所以:
$$f_k(Y,X) = \sum\limits_{i=1}^nf_k(y_{i-1},y_i, X,i)\ \ \ \text{理解为:n代表有n个词}$$
同时我们也统一$𝑓_𝑘(𝑦_{𝑖−1},𝑦_𝑖,X,𝑖)$对应的权重系数$𝑤_𝑘$如下:
$$w_k= \begin{cases} \lambda_k & {k=1,2,...K_1}\\ \mu_l & {k=K_1+l,\; l=1,2...,K_2} \end{cases}$$
这样,我们的linear-CRF的参数化形式简化为:
$$P(Y|X) = \frac{1}{Z(X)}exp\sum\limits_{k=1}^Kw_kf_k(Y,X)$$
其中,𝑍(X)为规范化因子:
$$Z(X) =\sum\limits_{y}exp\sum\limits_{k=1}^Kw_kf_k(Y,X)$$
如果将上两式中的$𝑤_𝑘$与$𝑓_𝑘$的用向量表示,即:
$$w=(w_1,w_2,...w_K)^T\;\;\; F(Y,X) =(f_1(Y,X),f_2(Y,X),...f_K(Y,X))^T$$
则linear-CRF的参数化形式简化为内积形式如下:
$$P_w(Y|X) = \frac{exp(w \bullet F(Y,X))}{Z_w(X)} = \frac{exp(w \bullet F(Y,X))}{\sum\limits_{y}exp(w \bullet F(Y,X))}$$
## 8、线性链条件随机场的矩阵形式
将上一节统一后的linear-CRF公式加以整理,我们还可以将linear-CRF的参数化形式写成矩阵形式。为此我们定义一个𝑚×𝑚的矩阵𝑀,𝑚为𝑦所有可能的状态的取值个数。𝑀定义如下:
$$M_i(X) = \Big[M_i(y_{i-1},y_i|X)\Big] = \Big[exp(W_i(y_{i-1},y_i|X))\Big] = \Big[ exp(\sum\limits_{k=1}^Kw_kf_k(y_{i-1},y_i,X,i))\Big]$$
我们引入起点和终点标记$𝑦_0=𝑠𝑡𝑎𝑟𝑡,𝑦_{𝑛+1}=𝑠𝑡𝑜𝑝$, 这样,标记序列𝑦的规范化概率可以通过𝑛+1个矩阵元素的乘积得到,即:
$$P_w(Y|X) = \frac{1}{Z_w(X)}\prod_{i=1}^{n+1}M_i(y_{i-1},y_i|X)$$
其中$𝑍_𝑤(X)$为规范化因子。
以上就是linear-CRF的模型基础,后面我们会讨论linear-CRF和HMM类似的三个问题的求解方法。
# 三、前向后向算法评估标记序列概率
## 1、linear-CRF的三个基本问题
在隐马尔科夫模型HMM中,我们讲到了HMM的三个基本问题,而linear-CRF也有三个类似的的基本问题。不过和HMM不同,在linear-CRF中,我们对于给出的观测序列𝑥是一直作为一个整体看待的,也就是不会拆开看(𝑥1,𝑥2,...),因此linear-CRF的问题模型要比HMM简单一些,如果你很熟悉HMM,那么CRF的这三个问题的求解就不难了。
~~linear-CRF第一个问题是评估,即给定 linear-CRF的条件概率分布𝑃(𝑦|𝑥)【也就是知道了所有特征函数的权重是多少了】, 在给定输入序列𝑥和输出序列𝑦时【计算一个句子中,某个词为某个词性,它的概率是多少;即计算给定的这个y中对应$y_i$取所有的可能值的概率分别是多少】,计算条件概率$𝑃(𝑦_𝑖|𝑥)和𝑃(𝑦_{𝑖−1},𝑦_𝑖|𝑥)$以及对应的期望。~~
> ~~再来解释下这个问题:\
>给定了$P(y|x)$,你只能拿到所有y对应的序列条件概率分布,却没有在某一序列位置i对应的取值为$y_i$的概率。这个概率需要去计算。\
>举例:3长度序列,2个状态取值,对于给定的x,我们已知$y=(𝑦_0=0,𝑦_1=1,𝑦_2=1)$的条件分布概率。但是此时$y_1=0$或者1的条件分布概率还需要我们去计算。 ~~
linear-CRF第一个问题是评估,即给定linear-CRF的条件概率分布𝑃(𝑦|𝑥)【也就是知道了所有特征函数的权重是多少了】,在给定x的情况下,求条件概率$𝑃(𝑦_𝑖|𝑥)和𝑃(𝑦_{𝑖−1},𝑦_𝑖|𝑥)$以及对应的期望。
linear-CRF第二个问题是学习,即给定训练数据集𝑋和𝑌,学习linear-CRF的模型参数$𝑤_𝑘$和条件概率$𝑃_𝑤(𝑦|𝑥)$,这个问题的求解比HMM的学习算法简单的多,普通的梯度下降法,拟牛顿法都可以解决。
linear-CRF第三个问题是解码,即给定 linear-CRF的条件概率分布𝑃(𝑦|𝑥),和输入序列𝑥, 计算使条件概率最大的输出序列𝑦。类似于HMM,使用维特比算法可以很方便的解决这个问题。
## 2、第一个问题评估
### 2.1 linear-CRF的前向后向概率概述
给定x输入
例如:给定$x=\{x_1="我",x_2="是",x_3="中国人"\},y\in \{"动词","名词"\}$
要计算条件概率$𝑃(𝑦_𝑖|𝑥)和𝑃(𝑦_{𝑖−1},𝑦_𝑖|𝑥)$【例如:求$P(y_2="动词"|x)和P(y_1="名词",y_2="动词"|x)$】,我们也可以使用和HMM类似的方法,使用前向后向算法来完成。
我们先看下$𝑃(𝑦_𝑖|𝑥)$等于什么。
>**二维和多维边缘概率** \
>假设有两个随机变量A和B,取值都为{0,1},我们有:\
>$P(A)=\sum_B P(A,B)=P(A,0)+P(A,1)$ \
>那么\
>$P(A=0)=P(0,0)+P(0,1)$ \
>$P(A=1)=P(1,0)+P(1,1)$ \
>\
>对于三个随机变量A、B、C,取值都为{0,1},我们依然有: \
>$P(A)=\sum_B \sum_C P(A,B,C)=\sum_B [P(A,B,0)+P(A,B,1)]=[P(A,0,0)+P(A,0,1)]+[P(A,1,0)+P(A,1,1)]$\
>那么\
>$P(A=0)=[P(0,0,0)+P(0,0,1)]+[P(0,1,0)+P(0,1,1)]$ \
>$P(A=1)=[P(1,0,0)+P(1,0,1)]+[P(1,1,0)+P(1,1,1)]$ \
>再来假设\
>$P(A,B,C)=f_1(A)f_2(B)f_3(C)$     $f_1,f_2,f_3$是分别关于变量A,B,C的函数 \
>那么\
>$$
\begin{align}
P(A)=\sum_B \sum_C P(A,B,C)&=\sum_B \sum_C[f_1(A)f_2(B)f_3(C)] \\
&=\sum_B[f_1(A)f_2(B)f_3(0)+f_1(A)f_2(B)f_3(1)]\\
&=[f_1(A)f_2(0)f_3(0)+f_1(A)f_2(0)f_3(1)]+[f_1(A)f_2(1)f_3(0)+f_1(A)f_2(1)f_3(1)]\\
\end{align}
$$
>\
$$
\begin{align}
f_1(A)\big[[\sum_B f_2(B)][\sum_C f_3(C)]\big]&=f_1(A)\big[[f_2(0)+f_2(1)][f_3(0)+f_3(1)]\big]\\
&=f_1(A)\big[f_2(0)f_3(0)+f_2(0)f_3(1)+f_2(1)f_3(0)+f_2(1)f_3(1)\big]\\
&=f_1(A)f_2(0)f_3(0)+f_1(A)f_2(0)f_3(1)+f_1(A)f_2(1)f_3(0)+f_1(A)f_2(1)f_3(1)\\
\end{align}
$$
>所以:\
>$P(A)=\sum_B \sum_C[f_1(A)f_2(B)f_3(C)]=f_1(A)\big[[\sum_B f_2(B)][\sum_C f_3(C)]\big]$这两种形式是等价的
假设一个句子总共有n个词,依据边缘概率有:
$$
\begin{align}
𝑃(𝑦_𝑖|𝑥)
&= \sum_{[y_1,\ y_{i-1}\ ]}\sum_{[y_{i+1}\ ,\ y_{n}\ ]}P(y_1,y_2,...,y_i,...,y_N|x) \\
&= \sum_{[y_1,\ y_{i-1}\ ]}\sum_{[y_{i+1}\ ,\ y_{n}\ ]}P(y|x) \\
&= \sum_{[y_1,\ y_{i-1}\ ]}\sum_{[y_{i+1}\ ,\ y_{n}\ ]}\frac{1}{Z(x)}\prod_{t=1}^{T}exp\left\{\sum_{k=1}^{K}W_{k}F_{k}(y_{t-1}, y_{t}, x)\right\}\\
&我们另f_t(y_{t-1}, y_{t})=exp\left\{\sum_{k=1}^{K}W_{k}F_{k}(y_{t-1}, y_{t}, x)\right\} \\
&那么 \\
&= \sum_{[y_1,\ y_{i-1}\ ]}\sum_{[y_{i+1}\ ,\ y_{n}\ ]}\frac{1}{Z(x)}\prod_{t=1}^{T}f_t(y_{t-1}, y_{t})\\
&假设y_i的取值个数为s,则上式的计算复杂度为O(s^T*T),指数级复杂度基本就没法暴力计算,所以我们找技巧\\
&=\frac{1}{Z(x)}.\\
&\big[\sum_{y_{i-1}}\big(f_i(y_{i-1}, y_{i})...\sum_{y_2}\big(f_3(y_{2}, y_{3})\sum_{y_1}\big(f_2(y_{1}, y_{2})\sum_{y_0}f_1(y_{0}, y_{1})\big)\big)\big)\big].\\
&\big[\sum_{y_{i+1}}\big(f_{i+1}(y_{i}, y_{i+1})...\sum_{y_{n-1}}\big(f_{n-1}(y_{n-2}, y_{n-1})\sum_{y_{n}}\big(f_n(y_{n-1}, y_{n})\sum_{y_{n+1}}f_{n+1}(y_{n}, y_{n+1})\big)\big)\big)\big]\\
&在这里我们增加了y_0和y_{n+1}是为了方便我们计算 \\
&我们定义\alpha_i(y_i|x)表示序列位置i的标记是y_i时,在位置i之前(包括i)的部分标记序列的非规范化概率,\\
&我们定义\beta_i(y_i|x)表示序列位置i的标记是y_i时,在位置𝑖之后的从i+1到n的部分标记序列的非规范化概率 \\
&在起点和终点处,我们定义: \\
&\alpha_0(y_0|x)= \begin{cases} 1 & {y_0 =start}\\ 0 & {else} \end{cases}\\
&\beta_{n+1}(y_{n+1}|x)= \begin{cases} 1 & {y_{n+1} =stop}\\ 0 & {else} \end{cases}\\
&我们写出递推公式为:\\
&\alpha_i(y_i|x)=\sum_{y_{i-1}}\big(f_i(y_{i-1}, y_{i})\alpha_{i-1}(y_{i-1}|x)\big)\\
&\beta_i(y_i|x)=\sum_{y_{i+1}}\big(f_{i+1}(y_{i}, y_{i+1})\beta_{i+1}(y_{i+1}|x)\big)\\
&所以最终我们的推导结果为:\\
&=\frac{1}{Z(x)}.\alpha_i(y_i|x).\beta_i(y_i|x)
\end{align}
$$
同理可以得到:
$$
\begin{align}
𝑃(𝑦_{𝑖−1},𝑦_𝑖|𝑥)=\frac{1}{Z(x)}.\alpha_{i-1}(y_{i-1}|x)f_i(y_{i-1},y_i).\beta_i(y_i|x)
\end{align}
$$
### 2.2 linear-CRF的期望计算
某函数f(x)的关于某分布P(x)的期望:$\sum_xP(x)f(x)$
有了上一节计算的条件概率,我们也可以很方便的计算联合分布𝑃(𝑥,𝑦)与条件分布𝑃(𝑦|𝑥)的期望。
特征函数$𝑓_𝑘(𝑥,𝑦)$关于条件分布𝑃(𝑦|𝑥)的期望表达式是:
$$\begin{align} E_{P(y|x)}[f_k] & = E_{P(y|x)}[f_k(y,x)] \\ & = \sum\limits_{i=1}^{n+1} \sum\limits_{y_{i-1}\;\;y_i}P(y_{i-1},y_i|x)f_k(y_{i-1},y_i,x, i) \\ & = \sum\limits_{i=1}^{n+1} \sum\limits_{y_{i-1}\;\;y_i}f_k(y_{i-1},y_i,x, i) \frac{\alpha_{i-1}^T(y_{i-1}|x)M_i(y_{i-1},y_i|x)\beta_i(y_i|x)}{Z(x)} \end{align}$$
同样可以计算联合分布𝑃(𝑥,𝑦)的期望:
$$
\begin{align} E_{P(x,y)}[f_k] & = \sum\limits_{x,y}P(x,y) \sum\limits_{i=1}^{n+1}f_k(y_{i-1},y_i,x, i) \\& = \sum\limits_{x}\tilde{P}(x) \sum\limits_{y}P(y|x) \sum\limits_{i=1}^{n+1}f_k(y_{i-1},y_i,x, i) \\& = \sum\limits_{x}\tilde{P}(x)\sum\limits_{i=1}^{n+1} \sum\limits_{y_{i-1}\;\;y_i}f_k(y_{i-1},y_i,x, i) \frac{\alpha_{i-1}^T(y_{i-1}|x)M_i(y_{i-1},y_i|x)\beta_i(y_i|x)}{Z(x)}
\end{align}
$$
这里假设已知边缘分布 P(X) 的经验分布为 $\tilde{P}(x)$ ,经验分布就是根据训练数据,用频数估计的方式得到 $\tilde{P}(x)=\frac{𝑥}{𝑁}$,可以参考最大熵模型算法。
假设一共有𝐾个特征函数,则𝑘=1,2,...𝐾
## 3、第二个问题模型学习
在linear-CRF模型参数学习问题中,我们给定训练数据集𝑋和对应的标记序列𝑌,𝐾个特征函数$𝑓_𝑘(𝑥,𝑦)$,需要学习linear-CRF的模型参数$𝑤_𝑘$和条件概率$𝑃_𝑤(𝑦|𝑥)$,其中条件概率$𝑃_𝑤(𝑦|𝑥)$和模型参数$𝑤_𝑘$满足一下关系:
$$P_w(y|x) = P(y|x) = \frac{1}{Z_w(x)}exp\sum\limits_{k=1}^Kw_kf_k(x,y) = \frac{exp\sum\limits_{k=1}^Kw_kf_k(x,y)}{\sum\limits_{y}exp\sum\limits_{k=1}^Kw_kf_k(x,y)}$$
所以我们的目标就是求出所有的模型参数𝑤𝑘,这样条件概率𝑃𝑤(𝑦|𝑥)可以从上式计算出来。
求解这个问题有很多思路,比如梯度下降法,牛顿法,拟牛顿法。同时,这个模型中$𝑃_𝑤(𝑦|𝑥)$的表达式和最大熵模型原理小结中的模型一样,也可以使用最大熵模型中使用的改进的迭代尺度法(improved iterative scaling, IIS)来求解。
下面我们只简要介绍用梯度下降法的求解思路。
### 3.1 linear-CRF模型参数学习之梯度下降法求解
在使用梯度下降法求解模型参数之前,我们需要定义我们的优化函数,一般极大化条件分布$𝑃_𝑤(𝑦|𝑥)$的对数似然函数如下:
$$L(w)= log\prod_{x,y}P_w(y|x)^{\tilde{P}(x,y)} = \sum\limits_{x,y}\tilde{P}(x,y)logP_w(y|x)$$
> <font color="red">这里可以加经验概率也可以不加,加入的好处: \
> 举个简单的例子,中国人口分布,如果你没有先验知识,你会假设每个地方人口的分布都是一样的,均匀的。然后根据你的训练数据来优化你的人口分布认知。\
>如果你有先验知识,知道西部人口少,东部人口多,那么你的先验分布就不是均匀分布了,这也是那个指数经验分布的意义。
>
>这里用这个指数来标示当前样本出现的频度。
>
>这个频度如果没有先验知识,就是从训练样本中得出,可以每个样本取值为1,优化目标也就是你上面提到的直接取对数。否则从先验知识得出。</font>
其中$\tilde{P}(𝑥,𝑦)$为经验分布,可以从先验知识和训练集样本中得到,这点和最大熵模型类似。为了使用梯度下降法,我们现在极小化$𝑓(𝑤)=−𝐿(𝑃_𝑤)$如下:
$$\begin{align}f(w) & = -\sum\limits_{x,y}\tilde{P}(x,y)logP_w(y|x) \\ &= \sum\limits_{x,y}\tilde{P}(x,y)logZ_w(x) - \sum\limits_{x,y}\tilde{P}(x,y)\sum\limits_{k=1}^Kw_kf_k(x,y) \\& = \sum\limits_{x}\tilde{P}(x)logZ_w(x) - \sum\limits_{x,y}\tilde{P}(x,y)\sum\limits_{k=1}^Kw_kf_k(x,y) \\& = \sum\limits_{x}\tilde{P}(x)log\sum\limits_{y}exp\sum\limits_{k=1}^Kw_kf_k(x,y) - \sum\limits_{x,y}\tilde{P}(x,y)\sum\limits_{k=1}^Kw_kf_k(x,y) \end{align}$$
对𝑤求导可以得到:
$$\frac{\partial f(w)}{\partial w} = \sum\limits_{x,y}\tilde{P}(x)P_w(y|x)f(x,y) - \sum\limits_{x,y}\tilde{P}(x,y)f(x,y)$$
有了𝑤的导数表达式,就可以用梯度下降法来迭代求解最优的𝑤了。注意在迭代过程中,每次更新𝑤后,需要同步更新$𝑃_𝑤(𝑥,𝑦)$,以用于下一次迭代的梯度计算。
## 4、第三个问题解码
利用维特比算法解码
我们现在已经学习出了$𝑃_𝑤(y|X)$,也就是给定一句话,其中某个词是某个词性的概率,现在我们要解码出,给定一句话,每个词的词性都是什么时,概率最大。
对于我们linear-CRF中的维特比算法,我们的第一个局部状态定义为$𝛿_𝑖(𝑙)$,表示在位置𝑖标记𝑙各个可能取值(1,2...m)对应的非规范化概率的最大值。之所以用非规范化概率是,规范化因子𝑍(𝑥)不影响最大值的比较。根据$𝛿_𝑖(𝑙)$的定义,我们递推在位置𝑖+1标记𝑙的表达式为:
$$\delta_{i+1}(l) = \max_{1 \leq j \leq m}\{\delta_i(j) + \sum\limits_{k=1}^Kw_kf_k(y_{i} =j,y_{i+1} = l,x,i)\}\;, l=1,2,...m$$
和HMM的维特比算法类似,我们需要用另一个局部状态$Ψ_{𝑖+1}(𝑙)$来记录使$𝛿_{𝑖+1}(𝑙)$达到最大的位置𝑖的标记取值,这个值用来最终回溯最优解,$Ψ_{𝑖+1}(𝑙)$的递推表达式为:
$$\Psi_{i+1}(l) = arg\;\max_{1 \leq j \leq m}\{\delta_i(j) + \sum\limits_{k=1}^Kw_kf_k(y_{i} =j,y_{i+1} = l,x,i)\}\; ,l=1,2,...m$$
## 二、条件随机场—分词
### 1、条件随机场介绍
条件随机场( Conditional Random Field, CRF)是一种给定输入随机变量 x,求解条件概率 p(y|x) 的概率无向图模型。用于序列标注时,特例化为线性链( linear chain )条件随机场。此时,输人输出随机变量为等长的两个序列。
每个 $X_{t}$ 上方有 3 个灰色节点,代表 $X_{t}$ 的 3 个特征,当然还可以是任意数量的特征,体现了特征的丰富性,黑色方块是因子节点,可以理解为一个特征函数 。其中仅仅利用了 $X_{t}$ 和 $X_{t}$ 的特征称作状态特征,利用了 $Y_{t-1}$ 的特征则称作转移特征,与感知机的特征函数相同。
线性链条件随机场的定义如下:
$$P(Y|X) = \frac{1}{Z(X)}\prod_{t=1}^{T}exp\left\{\sum_{k=1}^{K}W_{k}F_{k}(Y_{t-1}, Y_{t}, X_{t})\right\}$$
   K代表特征的总个数,
   $F_{k}$ 代表第k个特征的特征函数,
   $W_{k}$代表第k个特征函数求出的特征值所对应的权重。
其中,Z(X)为归一化函数:
$$Z(X) = \sum_{y}\prod_{t=1}^{T}exp\left\{\sum_{k=1}^{K}W_{k}F_{k}(Y_{t-1}, Y_{t}, X_{t})\right\}$$
<font color="red">注意:
y就是所有可能的序列,计算开销很大。线性链条件随机场公式的分子是一个句子某个类标序列的总得分,分母是一个句子所有可能类标序列的总得分,如果一个句子有10个字,那么所有可能的类标序列就有$4^{10}$种。</font>
上式定义在所有可能的标注序列上。如果将所有特征函数与权重分别写作向量形式,则线性链条件随机场的定义可简化为:
$$
\begin{aligned}
P(Y|X) &= \frac{1}{Z(X)}\prod_{t=1}^{T}exp\left\{\sum_{k=1}^{K}W_{k}F_{k}(Y_{t-1}, Y_{t}, X_{t})\right\}\\
&= \frac{1}{Z(X)}\prod_{t=1}^{T}exp\left\{W.\phi(Y_{t-1}, Y_{t}, X_{t})\right\}\\
&= \frac{1}{Z(X)}exp\left\{\sum_{t=1}^{T}W.\phi(Y_{t-1}, Y_{t}, X_{t})\right\}
\end{aligned}
$$
对比结构化感知机的打分函数:
$$score(X,Y) = \sum_{t=1}^{T}W.\phi(Y_{t-1}, Y_{t}, X_{t})$$
可以发现结构化感知机打分函数与条件随机场的指数部分完全相同,由于给定实例 X,Z(X) 就是一个常数 c,所以有:
$$P(Y|X) = \frac{1}{c}exp\left\{score(X,Y)\right\}$$
于是,条件随机场就和结构化感知机有以下联系:
> 条件随机场和结构化感知机的特征函数完全一致。\
结构化感知机预测打分越高,条件随机场给予该预测的概率也越大。\
这种相似性使得我们能够复用结构化感知机的预测算法,也就是维特比算法。
### 2、条件随机场的训练
指定训练集$D=\lbrace x^{(i)},y^{(i)}\rbrace _{i=1}^{N}$,N是总样本数(如果是句子,就是所有待训练的句子数)。其中每个$X^{(i)}={X_1^{(i)},X_2^{(i)},...X_T^{(i)}}$为输入序列,$Y^{(i)}={Y_1^{(i)},Y_2^{(i)},...Y_T^{(i)}}$为相应的标签序列。
根据极大似然估计,我们想要最大化给定模型参数W时,训练集D的似然概率:
$$P(D|W)=\prod_{i=1}^{N}P(Y^{(i)}|X^{(i)})$$
等价于极大化对数似然函数:
$$L(W)=\sum_{i=1}^{N}lnP(Y^{(i)}|X^{(i)})$$
将$P(Y|X) = \frac{1}{Z(X)}\prod_{t=1}^{T}exp\left\{\sum_{k=1}^{K}W_{k}F_{k}(Y_{t-1}, Y_{t}, X_{t})\right\}$代入上式得到:
$$L(W)=\sum_{i=1}^{N}\sum_{t=1}^{T}\sum_{k=1}^{K}W_{k}F_{k}(y_{t-1}^{(i)},y_{t}^{(i)},x_{t}^{(i)})-\sum_{i=1}^{N}lnZ(X)$$
$$Z(X) = \sum_{y}\prod_{t=1}^{T}exp\left\{\sum_{k=1}^{K}W_{k}F_{k}(Y_{t-1}, Y_{t}, X_{t})\right\}$$
其中N是样本集总数量,现在我们要用随机梯度下降法(SGD)来极大似然函数(准确地说应该是利用梯度上升法,来求最大值),因此每次只用处理一个训练序列,于是有:
$$L(W)=\sum_{t=1}^{T}\sum_{k=1}^{K}W_{k}F_{k}(y_{t-1}^{(i)},y_{t}^{(i)},x_{t}^{(i)})-lnZ(X)$$
那么对数似然函数的偏导为:
$$
\begin{aligned}
\frac{\partial L}{\partial W_{k}}&=\sum_{t=1}^{T}\sum_{k=1}^{K}F_k(y_{t-1}^{(i)},y_{t}^{(i)},x_{t}^{(i)})-\frac{1}{Z(X)}\frac{\partial Z(X)}{\partial W_{k}} \\
&=\sum_{t=1}^{T}\sum_{k=1}^{K}F_k(y_{t-1}^{(i)},y_{t}^{(i)},x_{t}^{(i)})-\frac{1}{Z(X)}\sum_{y}\bigg(exp\left\{\sum_{t=1}^{T}\sum_{k=1}^{K}W_kF_{k}(Y_{t-1}, Y_{t}, X_{t})\right\}\sum_{t=1}^{T}\sum_{k=1}^{K}F_{k}(Y_{t-1}, Y_{t}, X_{t})\bigg) \\
\end{aligned}
$$
因为:
$$P(Y|X)=\frac{1}{Z(X)}\prod_{t=1}^{T}exp\left\{\sum_{k=1}^{K}W_{k}F_{k}(Y_{t-1}, Y_{t}, X_{t})\right\}$$
所以偏导式子化简为:
$$\frac{\partial L}{\partial W_{k}}=\sum_{t=1}^{T}\sum_{k=1}^{K}F_k(y_{t-1}^{(i)},y_{t}^{(i)},x_{t}^{(i)})-\sum_{y}\bigg(P(Y|X)\sum_{t=1}^{T}\sum_{k=1}^{K}F_k(y_{t-1},y_{t},x_{t}^{(i)})\bigg)$$
推导完毕,这个式第一项是真实值,第二项是期望值,当两者相等时,梯度为0,迭代停止。
<font color="red">**这里需要累加所有样本的梯度,因为crf的权重更新是针对所有样本的一次迭代,假设样本数为M:**
$$\frac{\partial L}{\partial W_{k}}=\sum_{m=1}^M\left[\sum_{t=1}^{T}\sum_{k=1}^{K}F_k(y_{t-1}^{(i)},y_{t}^{(i)},x_{t}^{(i)})-\sum_{y}\bigg(P(Y|X)\sum_{t=1}^{T}\sum_{k=1}^{K}F_k(y_{t-1},y_{t},x_{t}^{(i)})\bigg)\right]$$
实际应用中特征函数、观测值的取值数量可能非常大,梯度下降法、牛顿法求解速度太慢,一般采用L-BFGS算法,其原理在《机器学学习与应用》一书中有详细的讲解。</font>
> <font color="red">**第一项真实值**
>
> 就是一句话对应的真实类标序列。
>
> **第二项期望值**
>
> 对一句话的每个字进行类标的排列组合序列计算每个类标序列对应的概率$P(Y|X)$,乘以每个类别序列所对应的$\sum_{t=1}^{T}F_k(y_{t-1},y_{t},x_{t}^{(i)})$累计得分,即为每个类标序列得分在$P(Y|X)$概率分布下的期望值。
>
> **例子:**
>
>
>
> $x^i$ = 小明是中国人;$y^i$=BESSBME
>
> W是每个特征函数的权重,包含了x特征和y特征【这个是依据hanlp的平均结构化感知器的逻辑,可以参考它的权重定义和更新规则】。
>
> 首先利用模型+维特比算法预测出该句子的类标序列$\hat{y}$,然后比较$\hat{y}$和$y^i$,如果不一致,则更新W权重。
>
>关于W偏导式子的第一项就是计算真实类标序列在每个特征上的得分的累加值,如果几个字有相同的特征那么该特征得分就会累加。
>
>第二项就是句子的字数乘以类标数的自由组合的类标序列,求得每个类标序列的得分然后乘以该类标序列的概率。</font>
利用前向-后向算法,特征函数的期望可以用下面的式子进行求解
$$E_{P(Y|X)}(f_k)=\sum_{y}P(y|x){f_k}(y,x)={\sum_{i=1}^{n+1}} \sum_{y_{i-1}\ \ ,y_i} f_k(y_{i-1},y_i,x,i)\bullet{\frac{{\alpha_i}^T(y_{i-1}|x)M_i (y_{i-1},{y_i}|x){\beta_i}(y_i|x)}{Z(x)}}$$
### 3、条件随机场和结构化感知器对比
结构化感知机和条件随机场的相同点:
><font color="red">特征函数相同:选取的特征和对应的特征函数是同结构化感知器一样的</font>\
>权重向量相同\
>打分函数相同\
>预测算法相同\
>同属结构化学习
不同点:
>1.感知机更新参数时,只使用一个训练实例,没有考虑整个数据集【应该是每次只考虑一个字,而没有考虑整个句子;后期已经证实是每个训练实例即每个句子】,难免顾此失彼;而条件随机场对数似然函数及其梯度则使用了整个数据集(整个句子;后期已经证实是整个数据集)<font color="red">【条件随机场应该可以设置batch,即训练多少个句子更新一次权重;后期已经证实他就是整个数据集做一次权重更新】</font>。
>
> 2.条件随机场更新参数更加合理,条件随机场更新参数如下:
$$W \leftarrow W+\phi(X^{i},Y^{i})-E_{w}\left[\phi(X^{i},Y)\right]$$
>
> 对比感知机的更新参数表达式:
>$$W \leftarrow W+\phi(X^{i},Y^{i})-\phi(X^{i},\hat{Y})$$
>
> 两者的差距一目了然,感知机奖励正确答案对应的特征函数 ϕ,但仅惩罚错误最厉害的那个 y,而条件随机场同时惩罚所有答案 y,分摊惩罚总量。
总结:
两者的差距一目了然,感知机奖励正确答案对应的特征函数中$\phi(X^{i},Y^{i})$。但仅惩罚错得最厉害的那一个类标于对应的特征函数$\phi(X^{i},\hat{Y})$,可谓枪打出头鸟。
而条件随机场不但奖励正确答案对应的特征函数,还同时惩罚所有答案y。这样就导致所有错误结果对应的特征函数都受到惩罚,可谓有罪么罚。条件随机场的惩罚总量依然为1。但所有答案分摊。而且每个答案的特征函数受到的惩罚力度精确为模型赋于它的概率,可谓依罪量刑。
正确答案的特征函数反而已经受到一单位的奖励(即$+\phi(X^{i},Y^{i})$)。所以不在平那点小于一单位的您罚(即$\sum_{y}\bigg(P(Y|X)\sum_{t=1}^{T}\sum_{k=1}^{K}F_k(y_{t-1},y_{t},x_{t}^{(i)})\bigg)$,由于$P(Y|X)\leq 1$,所以称作小于单位的惩罚)。
当特征函数的模型期望与经验分布期望一致时(即$P(Y|X) = 1$),梯度为0,模型参数不再变化。此时,全量梯度上升算法一定收敛到最优解。
<font color="red">注意:
CRF++训练详解
流程概述:
1. 生成特征函数
2. 构建概率图
3. 计算node和path的代价:node代价计算即该node对应的所有特征函数权重;path代价即该path对应的所有特征函数权重之和(node和path是有特定的label的)
4. 前向-后向算法计算alpha和beta, 计算期望
以上都是针对某个训练样本
以下是针对所有样本
5. 计算梯度g(w)(所有训练样本的期望-1),计算目标函数L(W)
6. LGBLF算法一轮迭代后更新W(代码中的alpha)</font>
```python
```
|
\section{Ogre Brute}
asdf
|
Immigrant workers make up nearly half of the dairy labor force and dairies employing immigrants produce 79% of the US milk supply. However, most workers receive no safety training.
Producers and workers struggle with language and cultural barriers and limited safety resources. This changing diversity in the workforce presents new occupational risk factors. Culturally and linguistically appropriate education in agricultural health and safety is necessary.
Producers and immigrant workers successfully tested a model program to address on-the-farm safety needs from hazard reduction to worker training. It developed a train-the-trainer curriculum that was later adopted by OSHA as an approved curriculum and utilizes Community Health Workers.
The 5-module curriculum includes background sections, step-by-step facilitator guides and handouts to reinforce safety messages, as well as visuals for workers with limited formal education and low literacy levels.
|
This editor can edit this entry and tell us a bit about themselves by clicking the Edit icon.
Howdy, Mr. or Ms. Tang! I renamed the entry about the event to From Spoke to Sprocket, as it probably isnt the only bike related exhibition in the past and future of Davis. :) Seriously, though... I added a bunch of links out from the entry, but it would probably be a good idea to link to the entry from the various bike and art related entries. Users/JabberWokky Evan JabberWokky Edwards
|
The day is drawing near when the Ukrainian Greek Catholic Church will celebrate the fourth centenary of the union between the Bishops of the Metropolia of Kievan Rus' and the Apostolic See. The union was effected at the meeting of representatives of the Metropolia of Kiev with the Pope on 23 December 1595 and was solemnly proclaimed at Brest-Litovsk on the River Bug on 16 October 1596. Pope Clement VIII, in the Apostolic Constitution Magnus Dominus et laudabilis nimis,1 announced the union to the whole Church and in the Apostolic Letter Benedictus sit Pastor2 he addressed the Bishops of the Metropolia, informing them that the union had taken place.
The Union of Brest opened a new page in the history of the Greek Catholic Church in Ukraine.4 Today that Church wishes to sing with joy a hymn of thanksgiving and praise to the One who, once more, has brought it back from death to life, and it wishes to set forth with renewed enthusiasm on the path marked out by the Second Vatican Council.
Joining the faithful of the Ukrainian Greek Catholic Church in this thanksgiving and petition are the Greek Catholic Churches of the diaspora which date back to the Union of Brest, together with the other Eastern Catholic Churches and the entire Church.
As the Bishop of Rome, I too wish to unite myself to the Catholics of the Byzantine tradition in those lands. For many years, during my pastoral ministry in Poland, I sensed a physical as well as a spiritual closeness with that Church, which was then undergoing such difficult trials. After my election to the See of Peter, I considered it a pressing duty, following the example of my Predecessors, to speak out in defence of its right to exist and freely to profess its faith, at a time when both these rights were being denied. I now have the privilege of joyfully sharing in its celebration of these days of regained freedom.
2. The celebrations commemorating the Union of Brest must be seen in the context of the Millennium of the Baptism of the Rus'. Seven years ago, in 1988, that event was celebrated with great solemnity. For the occasion I published two documents: the Apostolic Letter Euntes in mundum of 25 January 1988,5 for the whole Church, and the Message Magnum Baptismi donum, of 14 February of the same year,6 addressed to Ukrainian Catholics. It was an occasion for celebrating a moment of fundamental importance for the Christian and cultural identity of those peoples, a moment of unique significance, since at that time the Churches of the Byzantine tradition and the Church of Rome were still living in full communion.
After the division which damaged the unity between the West and the Byzantine East, frequent and intense efforts were made to restore full communion. I wish to mention two particularly significant events: the Second Council of Lyons in 1274, and above all the Council of Florence in 1439, when protocols of union with the Eastern Churches were signed. Unfortunately, various causes prevented the promise and potential of those agreements from being realized.
The Bishops of the Metropolia of Kiev, in restoring communion with Rome, made explicit reference to the decisions of the Council of Florence, a Council which had numbered among its participants representatives from the Patriarchate of Constantinople.
In this context, the figure of Metropolitan Isidore of Kiev stands out. As a faithful interpreter and defender of the decisions of that Council, he had to endure exile for his convictions.
The Bishops who promoted the union and the members of their Church retained a lively awareness of their original close ties to their Orthodox brethren, together with a full consciousness of the Oriental identity of their Metropolia, an identity which was also to be upheld after the union. In the history of the Catholic Church, it is a highly significant fact that this just desire was respected and that the act of union did not involve passing over to the Latin tradition, as some thought would happen. Their Church saw an acknowledgment of its right to be governed by its own hierarchy with a specific discipline and to maintain its Eastern liturgical and spiritual heritage.
3. After the union, the Ukrainian Greek Catholic Church enjoyed a period in which its ecclesiastical structures flourished, with resulting benefits for religious life, the education of the clergy and the spiritual commitment of the faithful. With remarkable farsightedness, great importance was attached to education. Thanks to the valuable contribution of the Basilian Order and other Religious Congregations, there was a great growth in the study of the sacred sciences and the nation's culture. In the present century, a figure of extraordinary prestige, in this regard as well as in his witness of suffering borne for Christ, was Metropolitan Andrii Sheptyckyi, whose education and fine spiritual qualities were combined with outstanding organizational gifts. He founded schools and academies, supported theological studies and the human sciences, the press and sacred art, and sought to preserve historical memories.
And yet, all this ecclesial vitality was continually marked by the tragedy of misunderstanding and opposition. An illustrious victim in this regard was the Archbishop of Polock and Vitebsk, Josaphat Kuntsevych, whose martyrdom merited the unfading crown of eternal glory. His body now lies in the Vatican Basilica, where it is continuously venerated with devotion and gratitude by Catholics from throughout the world.
Outstanding among the heroic witnesses to the rights not only of the faith but also of human conscience in those difficult years is the figure of Metropolitan Josyf Slipyj: his courage in enduring exile and prison for 18 years and his indomitable confidence in the resurrection of his Church make him one of the most powerful figures among the confessors of the faith in our time. Nor should his many companions in punishment be forgotten, particularly Bishops Hryhory Khomyshyn and Josaphat Kocylowskyj.
These tempestuous events shook the Church in the homeland to its roots. But Divine Providence had already begun to make it possible for many of its members to find a way of escape for themselves and their people. From the beginning of the 19th century onwards, great waves of emigrants began to cross the ocean, settling above all in Canada, the United States of America, Brazil, Argentina and Australia. The Holy See took care to be close to them, by providing assistance and establishing pastoral structures for them in their new homes, including the establishment of their own Eparchies. At the time of trial, during the atheistic persecution in their native land, the voice of these believers could thus be raised, in full freedom, with strength and courage. In the international forum they defended the right of their persecuted brethren to religious freedom, and thus strengthened the Second Vatican Council's appeal for religious freedom,8 and the efforts made in this regard by the Holy See.
4. The whole Catholic Community recalls with deep emotion the victims of such great suffering: the martyrs and confessors of the faith of the Church in Ukraine offer us a magnificent lesson in fidelity even at the price of life itself. And we, the favoured witnesses of their sacrifice, are aware that they helped to maintain the dignity of a world which seemed overwhelmed by atrocities. They knew the truth, and the truth set them free. Christians in Europe and throughout the world, pausing in prayer before the concentration camps and prisons, should be grateful for the light which they gave: it was the light of Christ, which they caused to shine in the darkness. For long years the darkness seemed in the eyes of the world to prevail, but it was not able to extinguish that light, which was the light of God and the light of man, wounded but not laid low.
This inheritance of suffering and glory today stands at a historic crossroads: now that the chains of imprisonment have been broken, the Greek Catholic Church in Ukraine has begun again to breathe in freedom and to regain fully its own active role in the Church and in history. This task, difficult yet providential, today calls for particular reflection, that it may be carried out with wisdom and farsightedness.
5. The celebration of the Union of Brest should be lived and interpreted in the light of the teachings of the Second Vatican Council. This is perhaps the most important aspect for understanding the significance of the anniversary.
It is well known that the Second Vatican Council made a special point of studying the mystery of the Church, and that one of the most important documents which it produced was the Constitution Lumen gentium. Precisely because of this detailed study the Council takes on a particular ecumenical significance. This is confirmed by the Decree Unitatis redintegratio, which presents a very enlightened programme of activities to be carried out in the work for Christian unity. Thirty years after the conclusion of the Council, I wished to reiterate this programme with my Encyclical Letter Ut unum sint, published on 25 May of this year.9 This Encyclical traces the ecumenical progress which has been made since the Second Vatican Council and, at the same time, looking to the third millennium of the Christian era, it seeks to open up new possibilities for the future.
Putting the celebrations of the coming year in the context of a reflection on the Church, as proposed by the Council, I am anxious above all to encourage a deeper understanding of the proper role which the Ukrainian Greek Catholic Church is called to play today in the ecumenical movement.
To promote dialogue with the Byzantine Orthodox Churches, there was set up, after the Second Vatican Council, a special Mixed Commission which also included among its members representatives of the Eastern Catholic Churches.
In various documents efforts have been made to arrive at a deeper and greater understanding between the Orthodox Churches and the Eastern Catholic Churches, efforts which have not been without positive results. In my Apostolic Letter Orientale lumen11 and in my Encyclical Letter Ut unum sint12 I have already written of the elements of sanctification and truth13 common to Eastern and Western Christianity, and of the desirable path to follow in the search for full communion between the Catholic Church and the Orthodox Churches, in the light of the greater ecclesiological understanding brought about by the Second Vatican Council: "Today we know that unity can be achieved through the love of God only if the Churches want it together, in full respect for the traditions of each and for necessary autonomy. We know that this can take place only on the basis of the love of Churches which feel increasingly called to manifest the one Church of Christ, born from one Baptism and from one Eucharist, and which want to be sisters".14 The deepening of knowledge of the doctrine on the Church, brought about by the Council and continuing since the Council, has marked out what can be called a new path on the journey to unity: the path of the dialogue of truth nourished and sustained by the dialogue of charity (cf. Eph 4: 15).
7. The shift from an underground existence has meant a radical change in the situation of the Ukrainian Greek Catholic Church: this Church has found itself facing the grave problems of rebuilding the structures of which it had been completely deprived and, more generally, it has had to commit itself to rediscovering itself fully, not only from within, but also in relation to the other Churches.
Thanks be to God for having enabled it to celebrate this jubilee with religious freedom regained. Thanks be to God also for the growth of the dialogue of charity, whereby significant progress has been made on the road to the much desired reconciliation with the Orthodox Churches.
Numerous migrations and deportations have redrawn the religious geography of those lands; many years of official State atheism have profoundly affected people's minds; there is still not enough clergy to respond to the immense needs of religious and moral reconstruction: these are some of the more dramatic challenges facing all of the Churches.
In the face of these difficulties a common witness of charity is required, in order that the proclamation of the Gospel may not be impeded. As I said in my Apostolic Letter Orientale lumen, "today we can co-operate in proclaiming the Kingdom or we can become the upholders of new divisions".15 May the Lord guide our feet into the way of peace.
8. In the new-found freedom we cannot forget the persecution and martyrdom which the Churches of that region, both Catholic and Orthodox, suffered in their own flesh. This is an important aspect for the Church of all times, as I recalled in my Apostolic Letter Tertio millennio adveniente.16 It concerns a particularly significant heritage of the Churches of Europe, which remain profoundly marked by it: this needs to be studied in the light of the Word of God.
Remembrance of the martyrs cannot be erased from the memory of the Church or of humanity: whether victims of the ideologies of the East or of the West, all of them are brought together in fellowship by the violence whereby hatred for the faith violated the dignity of the human person, created by God "in his image and likeness".
Pope John XXIII was fond of repeating: "What unites us is much greater than what divides us". I am convinced that this attitude can be a great benefit to all the Churches. More than 30 years have gone by since the Pope made this statement. In this period of time there have been many indications which suggest to us that Christians have made progress in this direction. Eloquent signs of this progress have been the fraternal meetings between Pope Paul VI and the Ecumenical Patriarch Athenagoras I, and those which I myself have had with the Ecumenical Patriarchs Dimitrios and, more recently, Bartholomaios, and with other venerable Patriarchs of the Churches of the East. All this, together with the many initiatives involving meetings and dialogue which are being promoted everywhere in the Church, encourages us to have hope: the Holy Spirit, the Spirit of unity, does not cease to work among Christians still separated from one another.
And yet human weakness and sin continue to resist the Spirit of unity. Sometimes one even has the impression that there are forces ready to do almost anything in order to slow down, and even put an end to, the movement towards Christian unity. But we cannot give in: daily we must find the courage and strength, which at one and the same time are the gift of the Spirit and the result of human effort, to continue on the path already undertaken.
10. In recalling the Union of Brest we must ask ourselves what this event means today. It was a union which concerned only a particular geographical region, but it is relevant for the entire field of ecumenism. The Eastern Catholic Churches can make a very important contribution to ecumenism. The Council's Decree Orientalium Ecclesiarum reminds us that "the Eastern Churches in communion with the Apostolic See of Rome have a special role to play in promoting the unity of all Christians, particularly Easterners, according to the principles of this sacred Synod's Decree on Ecumenism: first of all by prayer, then by the example of their lives, by religious fidelity to ancient Eastern traditions, by greater mutual knowledge, by collaboration, and by a brotherly regard for objects and attitudes".20 From this it follows that Eastern Catholics are to commit themselves to living profoundly what the Decree lays out. They are asked to make a confession of faith full of humility and gratitude to the Holy Spirit, who guides the Church towards the fulfilment promised her by the Redeemer of the world.
11. Prayer will therefore be the fundamental element which should mark the celebration of this jubilee. Such prayer above all involves giving thanks for all that has been accomplished, down the centuries, by commitment to the Church's unity, and especially for the impulse given to the search for unity as a result of the Second Vatican Council.
This prayer is one of thanksgiving to the Lord, who guides the unfolding of history, for the situation of new-found religious freedom in which this jubilee is being celebrated. It is also an appeal to the Spirit-Paraclete, that he may cause to flourish everything which promotes unity, and may give courage and strength to all those who commit themselves, according to the guidelines of the Council's Decree Unitatis redintegratio, to this work blessed by God. It is a plea for the gift of brotherly love, and for the forgiveness of offences and injustices suffered in the course of history. It is a petition that the power of the living God will bring good even out of the cruel and many-faceted evil caused by acts of human malice. This prayer also expresses hope for the future of the ecumenical journey: the power of God is greater than all human weaknesses, whether old or new. If this jubilee of the Ukrainian Greek Catholic Church, on the threshold of the third millennium, marks a step forward towards full Christian unity, this result will be first of all the work of the Holy Spirit.
12. The jubilee celebrations should also be a time of reflection. The Ukrainian Greek Catholic Church must first of all ask itself what full communion with the Apostolic See meant for it in the past, and what it will mean for it in the future. In a spirit of humble thanksgiving, the Greek Catholic Church will give glory to God, for its heroic fidelity to the Successor of Peter and, under the action of the Holy Spirit, it will understand that today this same fidelity commits it to fostering the unity of all the Churches. This fidelity cost it sufferings and martyrdom in the past: this is a sacrifice offered to God in order to implore the hoped-for union.
13. We cannot fail to entrust the yearning for full Christian unity to the Mother of Christ, she who is ever present in the work of the Lord and of his Church. Chapter Eight of the Dogmatic Constitution Lumen gentium points to her as the one who goes before us on our earthly pilgrimage of faith and is tenderly present to the Church which, at the end of the second millennium, is working to re-establish among all those who believe in Christ that unity which the Lord wills for them. Because she is the Mother of the one Christ, she is the Mother of unity. Since Mary, by the power of the Holy Spirit, gave birth to the Son of God, who received from her his human body, she ardently wills the visible unity of all believers who make up the Mystical Body of Christ. The veneration of Mary, which unites East and West so intimately, will serve, I am convinced, the cause of unity.
The Most Holy Virgin, already present everywhere in our midst both in so many sacred buildings and in the life of faith of so many families continuously speaks of unity, a unity for which she constantly intercedes. If today, in commemorating the Union of Brest, we recall the marvellous wealth of veneration that the Christian people of the Ukraine have offered to the Mother of God, we cannot but draw from this admiration for the history, spirituality and prayer of those peoples the consequences for unity which are so closely linked to those treasures.
Mary, who has inspired in their trials fathers and mothers, young people, the sick and the aged; Mary, the column of fire capable of guiding so many martyrs of the faith, is certainly at work in preparing the hoped-for union of all Christians: in the light of this, the Ukrainian Greek Catholic Church certainly has its own role to play.
To Mary the Church offers her thanks and asks her to make us share her concern for unity. With filial trust let us abandon ourselves to her, that we may be with her where God will be all in all.
To you, dear Brothers and Sisters, I impart my Apostolic Blessing.
From the Vatican, on 12 November, Memorial of Saint Josaphat, in the year 1995, the eighteenth of my Pontificate.
1) Cf. Bullarium romanum V/2 (1594-1602), 87-92.
2) Cf. A. Welykyj, Documenta Pontificum Romanorum Historiam Ucrainae illustrantia, t. I, p. 257-259.
3) Cf. AAS 38 (1946), 33-63.
5) Cf. AAS 80 (1988), 935-956.
7) Cf. AAS 38 (1946), 54-57. Those fears would be disturbingly confirmed a few years later, as the same Pope precisely brought out in the Encyclical Epistle Orientales Ecclesias (15 December 1952): AAS 45 (1953), 7-10.
8) Cf. Declaration on Religious Freedom Dignitatis humanae.
9) Cf. L'Osservatore Romano, 31 May 1995, 1-8.
10) Ibid., n. 14, loc. cit., 2.
11) Cf. nn. 18-19; L'Osservatore Romano, 2-3 May 1995, 4.
12) Cf. nn. 12-14; L'Osservatore Romano, 31 May 1995, 2.
13) Cf. Second Vatican Ecumenical Council, Decree on Ecumenism Unitatis redintegratio, n. 3.
14) John Paul II, Apostolic Letter Orientale lumen (2 May 1995), n. 20; L'Osservatore Romano, 2-3 May 1995, 4.
15) N. 19; L'Osservatore Romano, 2-3 May 1995, 4.
16) Cf. AAS 87 (1995), 29-30; Encyclical Letter Ut unum sint, n. 84; L'Osservatore Romano, 31 May 1995, 7.
17) Apol. 50, 13: CCL I, 171.
18) John Paul II, Encyclical Letter Ut unum sint, n. 84: L'Osservatore Romano, 31 May 1995, 7.
19) Ibid., n. 50, loc. cit., 5.
|
Moore Supply is proud to present this satin brass finished bidet part, by California Faucets. The 6904-MONO-SB is made from premium materials, this Bidet Part offers great function and value for your home. This fixture is part of California Faucets's decorative Crystal Collection, so make sure to check out other styles of fixtures to accessorize your room.
|
# Reference: https://arxiv.org/pdf/1509.02971.pdf
# https://github.com/seungeunrho/minimalRL/blob/master/ddpg.py
# https://github.com/openai/spinningup/blob/master/spinup/algos/pytorch/ddpg/ddpg.py
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import argparse
from distutils.util import strtobool
import collections
import numpy as np
import gym
import pybullet_envs
from gym.wrappers import TimeLimit, Monitor
from gym.spaces import Discrete, Box, MultiBinary, MultiDiscrete, Space
import time
import random
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='DDPG agent')
# Common arguments
parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip(".py"),
help='the name of this experiment')
parser.add_argument('--gym-id', type=str, default="HopperBulletEnv-v0",
help='the id of the gym environment')
parser.add_argument('--learning-rate', type=float, default=3e-4,
help='the learning rate of the optimizer')
parser.add_argument('--seed', type=int, default=1,
help='seed of the experiment')
parser.add_argument('--episode-length', type=int, default=0,
help='the maximum length of each episode')
parser.add_argument('--total-timesteps', type=int, default=1000000,
help='total timesteps of the experiments')
parser.add_argument('--torch-deterministic', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, `torch.backends.cudnn.deterministic=False`')
parser.add_argument('--cuda', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, cuda will not be enabled by default')
parser.add_argument('--track', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='run the script in production mode and use wandb to log outputs')
parser.add_argument('--capture-video', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='weather to capture videos of the agent performances (check out `videos` folder)')
parser.add_argument('--wandb-project-name', type=str, default="cleanRL",
help="the wandb's project name")
parser.add_argument('--wandb-entity', type=str, default=None,
help="the entity (team) of wandb's project")
# Algorithm specific arguments
parser.add_argument('--buffer-size', type=int, default=int(1e6),
help='the replay memory buffer size')
parser.add_argument('--gamma', type=float, default=0.99,
help='the discount factor gamma')
parser.add_argument('--tau', type=float, default=0.005,
help="target smoothing coefficient (default: 0.005)")
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='the maximum norm for the gradient clipping')
parser.add_argument('--batch-size', type=int, default=256,
help="the batch size of sample from the reply memory")
parser.add_argument('--exploration-noise', type=float, default=0.1,
help='the scale of exploration noise')
parser.add_argument('--learning-starts', type=int, default=25e3,
help="timestep to start learning")
parser.add_argument('--policy-frequency', type=int, default=2,
help="the frequency of training policy (delayed)")
parser.add_argument('--noise-clip', type=float, default=0.5,
help='noise clip parameter of the Target Policy Smoothing Regularization')
args = parser.parse_args()
if not args.seed:
args.seed = int(time.time())
# TRY NOT TO MODIFY: setup the environment
experiment_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
writer = SummaryWriter(f"runs/{experiment_name}")
writer.add_text('hyperparameters', "|param|value|\n|-|-|\n%s" % (
'\n'.join([f"|{key}|{value}|" for key, value in vars(args).items()])))
if args.track:
import wandb
wandb.init(project=args.wandb_project_name, entity=args.wandb_entity, sync_tensorboard=True, config=vars(args), name=experiment_name, monitor_gym=True, save_code=True)
writer = SummaryWriter(f"/tmp/{experiment_name}")
# TRY NOT TO MODIFY: seeding
device = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
env = gym.make(args.gym_id)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = args.torch_deterministic
env.seed(args.seed)
env.action_space.seed(args.seed)
env.observation_space.seed(args.seed)
# respect the default timelimit
assert isinstance(env.action_space, Box), "only continuous action space is supported"
assert isinstance(env, TimeLimit) or int(args.episode_length), "the gym env does not have a built in TimeLimit, please specify by using --episode-length"
if isinstance(env, TimeLimit):
if int(args.episode_length):
env._max_episode_steps = int(args.episode_length)
args.episode_length = env._max_episode_steps
else:
env = TimeLimit(env, int(args.episode_length))
if args.capture_video:
env = Monitor(env, f'videos/{experiment_name}')
# modified from https://github.com/seungeunrho/minimalRL/blob/master/dqn.py#
class ReplayBuffer():
def __init__(self, buffer_limit):
self.buffer = collections.deque(maxlen=buffer_limit)
def put(self, transition):
self.buffer.append(transition)
def sample(self, n):
mini_batch = random.sample(self.buffer, n)
s_lst, a_lst, r_lst, s_prime_lst, done_mask_lst = [], [], [], [], []
for transition in mini_batch:
s, a, r, s_prime, done_mask = transition
s_lst.append(s)
a_lst.append(a)
r_lst.append(r)
s_prime_lst.append(s_prime)
done_mask_lst.append(done_mask)
return np.array(s_lst), np.array(a_lst), \
np.array(r_lst), np.array(s_prime_lst), \
np.array(done_mask_lst)
# ALGO LOGIC: initialize agent here:
class QNetwork(nn.Module):
def __init__(self, env):
super(QNetwork, self).__init__()
self.fc1 = nn.Linear(
np.array(env.observation_space.shape).prod()+np.prod(env.action_space.shape), 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, 1)
def forward(self, x, a, device):
x = torch.Tensor(x).to(device)
x = torch.cat([x, a], 1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class Actor(nn.Module):
def __init__(self, env):
super(Actor, self).__init__()
self.fc1 = nn.Linear(np.array(env.observation_space.shape).prod(), 256)
self.fc2 = nn.Linear(256, 256)
self.fc_mu = nn.Linear(256, np.prod(env.action_space.shape))
def forward(self, x, device):
x = torch.Tensor(x).to(device)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return torch.tanh(self.fc_mu(x))
def linear_schedule(start_sigma: float, end_sigma: float, duration: int, t: int):
slope = (end_sigma - start_sigma) / duration
return max(slope * t + start_sigma, end_sigma)
max_action = float(env.action_space.high[0])
rb = ReplayBuffer(args.buffer_size)
actor = Actor(env).to(device)
qf1 = QNetwork(env).to(device)
qf1_target = QNetwork(env).to(device)
target_actor = Actor(env).to(device)
target_actor.load_state_dict(actor.state_dict())
qf1_target.load_state_dict(qf1.state_dict())
q_optimizer = optim.Adam(list(qf1.parameters()), lr=args.learning_rate)
actor_optimizer = optim.Adam(list(actor.parameters()), lr=args.learning_rate)
loss_fn = nn.MSELoss()
# TRY NOT TO MODIFY: start the game
obs = env.reset()
episode_reward = 0
for global_step in range(args.total_timesteps):
# ALGO LOGIC: put action logic here
if global_step < args.learning_starts:
action = env.action_space.sample()
else:
action = actor.forward(obs.reshape((1,)+obs.shape), device)
action = (
action.tolist()[0]
+ np.random.normal(0, max_action * args.exploration_noise, size=env.action_space.shape[0])
).clip(env.action_space.low, env.action_space.high)
# TRY NOT TO MODIFY: execute the game and log data.
next_obs, reward, done, info = env.step(action)
episode_reward += reward
# ALGO LOGIC: training.
rb.put((obs, action, reward, next_obs, done))
if global_step > args.learning_starts:
s_obs, s_actions, s_rewards, s_next_obses, s_dones = rb.sample(args.batch_size)
with torch.no_grad():
next_state_actions = (
target_actor.forward(s_next_obses, device)
).clamp(env.action_space.low[0], env.action_space.high[0])
qf1_next_target = qf1_target.forward(s_next_obses, next_state_actions, device)
next_q_value = torch.Tensor(s_rewards).to(device) + (1 - torch.Tensor(s_dones).to(device)) * args.gamma * (qf1_next_target).view(-1)
qf1_a_values = qf1.forward(s_obs, torch.Tensor(s_actions).to(device), device).view(-1)
qf1_loss = loss_fn(qf1_a_values, next_q_value)
# optimize the midel
q_optimizer.zero_grad()
qf1_loss.backward()
nn.utils.clip_grad_norm_(list(qf1.parameters()), args.max_grad_norm)
q_optimizer.step()
if global_step % args.policy_frequency == 0:
actor_loss = -qf1.forward(s_obs, actor.forward(s_obs, device), device).mean()
actor_optimizer.zero_grad()
actor_loss.backward()
nn.utils.clip_grad_norm_(list(actor.parameters()), args.max_grad_norm)
actor_optimizer.step()
# update the target network
for param, target_param in zip(actor.parameters(), target_actor.parameters()):
target_param.data.copy_(args.tau * param.data + (1 - args.tau) * target_param.data)
for param, target_param in zip(qf1.parameters(), qf1_target.parameters()):
target_param.data.copy_(args.tau * param.data + (1 - args.tau) * target_param.data)
if global_step % 100 == 0:
writer.add_scalar("losses/qf1_loss", qf1_loss.item(), global_step)
writer.add_scalar("losses/actor_loss", actor_loss.item(), global_step)
# TRY NOT TO MODIFY: CRUCIAL step easy to overlook
obs = next_obs
if done:
# TRY NOT TO MODIFY: record rewards for plotting purposes
print(f"global_step={global_step}, episode_reward={episode_reward}")
writer.add_scalar("charts/episodic_return", episode_reward, global_step)
obs, episode_reward = env.reset(), 0
env.close()
writer.close()
|
! -------------------------------------------------------------
!
! This file contains subroutines for solving the Dyson equation
! using the DLR
!
! -------------------------------------------------------------
!
! Copyright (C) 2021 The Simons Foundation
!
! Author: Jason Kaye
!
! -------------------------------------------------------------
!
! libdlr is licensed under the Apache License, Version 2.0 (the
! "License"); you may not use this file except in compliance with
! the License. You may obtain a copy of the License at
!
! http://www.apache.org/licenses/LICENSE-2.0
!
! Unless required by applicable law or agreed to in writing,
! software distributed under the License is distributed on an "AS
! IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
! express or implied. See the License for the specific language
! governing permissions and limitations under the License.
!
! -------------------------------------------------------------
!> Solve the nonlinear Dyson equation in imaginary time,
!! with a given expression for the self-energy in terms of the
!! Green's function.
!!
!! This solver uses weighted fixed point iteration to achieve
!! self-consistency; this is an iteration
!! G_{n+1} = w*G_n + (1-w)*G_{n-1}, where w is a given weight. The
!! iteration terminates when the difference between G_{n+1} and
!! G_n at all of the imaginary time grid points is less than a
!! given tolerance in absolute value, or when a maximum number of
!! iterations is reached.
!!
!! @param[in] beta inverse temperature
!! @param[in] r number of DLR basis functions
!! @param[in] dlrit DLR imaginary time nodes
!! @param[in] it2cf imaginary time grid values ->
!! DLR coefficients transform matrix,
!! stored in LAPACK LU factored format;
!! LU factors
!! @param[in] it2cfp imaginary time grid values ->
!! DLR coefficients transform matrix,
!! stored in LAPACK LU factored format;
!! LU pivots
!! @param[in] cf2it DLR coefficients -> imaginary time grid
!! values transform matrix
!! @param[in] phi tensor taking DLR coefficients of g to
!! matrix of convolution by g.
!! @param[in] sigfun subroutine with calling sequence
!! sigfun(r,g,sig), which takes in
!! value of an imaginary time Green's
!! function at the imaginary time grid
!! points, and returns values of the
!! self-energy Sigma at those grid points
!! @param[in] w weighting parameter for fixed point
!! iteration
!! @param[in] fptol fixed point iteration tolerance
!! @param[in,out] numit on input: max number of fixed point
!! iterations; on output: number of
!! fixed point iterations taken
!! @param[in] g0 right hand side of Dyson equation, on
!! imaginary time grid
!! @param[in,out] g on input, initial guess for fixed
!! point iteration; on output, solution
!! of the Dyson equation on imaginary
!! time grid
!! @param[out] info =0 if iteration converged to tolerance
!! fptol; =-1 if iteration did not
!! converge
subroutine dlr_dyson_it(beta,r,dlrit,it2cf,it2cfp,&
phi,sigfun,w,fptol,numit,g0,g,info)
implicit none
integer r,numit,it2cfp(r),info
real *8 beta,dlrit(r),it2cf(r,r)
real *8 g0(r),g(r),w,fptol,phi(r*r,r)
integer i,info1
real *8 one
real *8, allocatable :: g0mat(:,:),sig(:),gnew(:)
one = 1.0d0
! Get matrix of convolution by G0
allocate(g0mat(r,r))
call dlr_convmat(r,it2cf,it2cfp,phi,g0,g0mat)
! Weighted fixed point iteration
allocate(sig(r),gnew(r))
do i=1,numit
! Evaluate self-energy
call sigfun(r,g,sig)
! Solve linear Dyson equation
call dyson_it_lin(r,it2cf,it2cfp,phi,g0,g0mat,sig,gnew)
! Check self-consistency
if (maxval(abs(gnew-g))<fptol) then
g = gnew
numit = i
info = 0
return
else
! Next G is weighted linear combination of previous and
! current iterates
g = w*gnew + (one-w)*g
endif
enddo
info = -1
end subroutine dlr_dyson_it
!> Solve the linear Dyson equation in imaginary time, with a fixed
!! self-energy.
!!
!! This solver forms the Dyson equation in imaginary time using
!! the DLR basis and solves it using Gaussian elimination.
!!
!! @param[in] r number of DLR basis functions
!! @param[in] it2cf imaginary time grid values ->
!! DLR coefficients transform matrix,
!! stored in LAPACK LU factored format;
!! LU factors
!! @param[in] it2cfp imaginary time grid values ->
!! DLR coefficients transform matrix,
!! stored in LAPACK LU factored format;
!! LU pivots
!! @param[in] phi tensor taking DLR coefficients of g to
!! matrix of convolution by g.
!! @param[in] g0 values of the right hand side G0 on
!! the imaginary time grid
!! @param[in] g0mat matrix of convolution by G0
!! @param[in] sig values of the self-energy on the
!! imaginary time grid
!! @param[out] g solution of the linear Dyson equation
!! on imaginary time grid
subroutine dyson_it_lin(r,it2cf,it2cfp,phi,g0,g0mat,sig,g)
implicit none
integer r,it2cfp(r)
real *8 it2cf(r,r)
real *8 g0(r),g0mat(r,r),sig(r),g(r)
real *8 phi(r*r,r)
integer j,info1
integer, allocatable :: ipiv(:)
real *8 one
real *8, allocatable :: sigc(:),sigmat(:,:),sysmat(:,:)
one = 1.0d0
allocate(sigc(r),sigmat(r,r),sysmat(r,r),ipiv(r))
! Get matrix of convolution by self-energy
call dlr_convmat(r,it2cf,it2cfp,phi,sig,sigmat)
! Form system matrix for linear Dyson equation
call dgemm('N','N',r,r,r,-one,g0mat,r,sigmat,r,0*one,sysmat,r)
do j=1,r
sysmat(j,j) = one + sysmat(j,j)
enddo
! Solve linear equation by LU factorization + backsolve
call dgetrf(r,r,sysmat,r,ipiv,info1)
g = g0
call dgetrs('N',r,1,sysmat,r,ipiv,g,r,info1)
end subroutine dyson_it_lin
!> Solve the nonlinear Dyson equation in Matsubara frequency,
!! with a given expression for the self-energy in terms of the
!! Green's function, evaluated in imaginary time.
!!
!! This solver uses weighted fixed point iteration to achieve
!! self-consistency; this is an iteration
!! G_{n+1} = w*G_n + (1-w)*G_{n-1}, where w is a given weight. The
!! iteration terminates when the difference between G_{n+1} and
!! G_n at all of the imaginary time grid points is less than a
!! given tolerance in absolute value, or when a maximum number of
!! iterations is reached.
!!
!! The solver transforms back and forth between the imaginary time
!! and Matsubara frequency domains, evaluating the self-energy in
!! imaginary time, and solving the Dyson equation in Matsubara
!! frequency.
!!
!! @param[in] beta inverse temperature
!! @param[in] r number of DLR basis functions
!! @param[in] dlrit DLR imaginary time nodes
!! @param[in] it2cf imaginary time grid values ->
!! DLR coefficients transform matrix,
!! stored in LAPACK LU factored format;
!! LU factors
!! @param[in] it2cfp imaginary time grid values ->
!! DLR coefficients transform matrix,
!! stored in LAPACK LU factored format;
!! LU pivots
!! @param[in] cf2it DLR coefficients -> imaginary time grid
!! values transform matrix
!! @param[in] dlrmf DLR Matsubara frequency nodes
!! @param[in] mf2cf Matsubara frequency grid values ->
!! DLR coefficients transform matrix,
!! stored in LAPACK LU factored format;
!! LU factors
!! @param[in] mf2cfp Matsubra frequency grid values ->
!! DLR coefficients transform matrix,
!! stored in LAPACK LU factored format;
!! LU pivots
!! @param[in] cf2mf DLR coeffs -> Matsubara freq grid
!! values transform matrix
!! @param[in] sigfun subroutine with calling sequence
!! sigfun(r,g,sig), which takes in
!! value of an imaginary time Green's
!! function at the imaginary time grid
!! points, and returns values of the
!! self-energy Sigma at those grid
!! points
!! @param[in] w weighting parameter for fixed point
!! iteration
!! @param[in] fptol fixed point iteration tolerance
!! @param[in,out] numit on input: max number of fixed point
!! iterations; on output: number of
!! fixed point iterations taken
!! @param[in] g0 right hand side of Dyson equation, on
!! Matsubara frequency grid
!! @param[in,out] g on input, initial guess for fixed
!! point iteration; on output, solution
!! of the Dyson equation on imaginary
!! time grid
!! @param[out] info =0 if iteration converged to tolerance
!! fptol; =-1 if iteration did not
!! converge
subroutine dlr_dyson_mf(beta,r,dlrit,it2cf,it2cfp,cf2it,&
dlrmf,mf2cf,mf2cfp,cf2mf,sigfun,w,fptol,numit,g0,g,&
info)
implicit none
integer r,numit,it2cfp(r),mf2cfp(r),dlrmf(r)
integer info
real *8 beta,dlrit(r),it2cf(r,r)
real *8 cf2it(r,r),g(r),w,fptol
complex *16 mf2cf(r,r),cf2mf(r,r),g0(r)
integer i
real *8 one
real *8, allocatable :: sig(:),gnew(:)
one = 1.0d0
! Weighted fixed point iteration
allocate(sig(r),gnew(r))
do i=1,numit
! Evaluate self-energy
call sigfun(r,g,sig)
! Solve linear Dyson equation
call dyson_mf_lin(beta,r,it2cf,it2cfp,cf2it,&
mf2cf,mf2cfp,cf2mf,g0,sig,gnew)
! Check self-consistency
if (maxval(abs(gnew-g))<fptol) then
g = gnew
numit = i
info = 0
return
else
! Next G is weighted linear combination of previous and
! current iterates
g = w*gnew + (one-w)*g
endif
enddo
info = -1
end subroutine dlr_dyson_mf
!> Solve the linear Dyson equation in Matsubara frequency, with a
!! fixed self-energy.
!!
!! This solver takes in the self-energy and returns the imaginary
!! time Green's function on the imaginary time grid, but performs
!! the solve in the Matsubara frequency domain by diagonal
!! inversion.
!!
!! @param[in] beta inverse temperature
!! @param[in] r number of DLR basis functions
!! @param[in] it2cf imaginary time grid values ->
!! DLR coefficients transform matrix,
!! stored in LAPACK LU factored format;
!! LU factors
!! @param[in] it2cfp imaginary time grid values ->
!! DLR coefficients transform matrix,
!! stored in LAPACK LU factored format;
!! LU pivots
!! @param[in] cf2it DLR coefficients -> imaginary time grid
!! values transform matrix
!! @param[in] mf2cf Matsubara frequency grid values ->
!! DLR coefficients transform matrix,
!! stored in LAPACK LU factored format;
!! LU factors
!! @param[in] mf2cfp Matsubra frequency grid values ->
!! DLR coefficients transform matrix,
!! stored in LAPACK LU factored format;
!! LU pivots
!! @param[in] cf2mf DLR coeffs -> Matsubara freq grid
!! values transform matrix
!! @param[in] g0 values of the right hand side G0 on
!! the Matsubara frequency grid
!! @param[in] sig values of the self-energy on the
!! imaginary time grid
!! @param[out] g solution of the linear Dyson equation
!! on imaginary time grid
subroutine dyson_mf_lin(beta,r,it2cf,it2cfp,cf2it,&
mf2cf,mf2cfp,cf2mf,g0,sig,g)
implicit none
integer r,it2cfp(r),mf2cfp(r)
real *8 beta,it2cf(r,r),cf2it(r,r)
real *8 sig(r),g(r)
complex *16 mf2cf(r,r),cf2mf(r,r),g0(r)
real *8 one
real *8, allocatable :: gc(:),sigc(:)
complex *16, allocatable :: gmf(:),sigmf(:),tmp(:)
one = 1.0d0
allocate(sigc(r),gmf(r),sigmf(r),tmp(r),gc(r))
! Get DLR coefficients of self-energy
call dlr_it2cf(r,it2cf,it2cfp,sig,sigc)
! Get self-energy on Matsubara frequency grid
call dlr_cf2mf(r,cf2mf,sigc,sigmf)
! Solve Dyson equation by diagonal inversion
gmf = g0/(one-beta**2*g0*sigmf)
! Get DLR coefficients of solution
call dlr_mf2cf(r,mf2cf,mf2cfp,gmf,gc)
! Evaluate solution on imaginary time grid
call dlr_cf2it(r,cf2it,gc,g)
end subroutine dyson_mf_lin
!> Solve the Dyson equation in Matsubara frequency.
!!
!! This solver takes in the self-energy and returns the imaginary
!! time Green's function on the imaginary time grid, but performs
!! the solve in the Matsubara frequency domain by diagonal
!! inversion.
!!
!! @param[in] beta inverse temperature
!! @param[in] r number of DLR basis functions
!! @param[in] g0 values of the right hand side G0 on
!! the Matsubara frequency grid
!! @param[in] sigmf values of the self-energy on the
!! Matsubara frequency grid
!! @param[out] gmf solution of the Dyson equation on the
!! Matsubara frequency grid
subroutine dyson_mf_solve(beta,r,g0,sigmf,gmf)
implicit none
integer r
real *8 beta
complex *16 g0(r),sigmf(r),gmf(r)
gmf = g0/(1.0d0-beta**2*g0*sigmf)
end subroutine dyson_mf_solve
|
State Before: ι : Type ?u.94924
α : Type u_1
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrder α
a b c : α
inst✝ : DecidableEq α
h : a < b
⊢ insert b (Ioo a b) = Ioc a b State After: no goals Tactic: rw [← coe_inj, coe_insert, coe_Ioo, coe_Ioc, Set.insert_eq, Set.union_comm, Set.Ioo_union_right h]
|
State Before: R : Type u_3
R₁ : Type u_4
R₂ : Type u_5
M : Type ?u.137024
M₁ : Type ?u.137027
M₂ : Type ?u.137030
M₁' : Type ?u.137033
M₂' : Type ?u.137036
n : Type u_1
m : Type u_2
n' : Type ?u.137045
m' : Type ?u.137048
ι : Type ?u.137051
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing R₁
inst✝⁸ : CommRing R₂
inst✝⁷ : AddCommMonoid M₁
inst✝⁶ : Module R₁ M₁
inst✝⁵ : AddCommMonoid M₂
inst✝⁴ : Module R₂ M₂
inst✝³ : Fintype n
inst✝² : Fintype m
inst✝¹ : DecidableEq n
inst✝ : DecidableEq m
σ₁ : R₁ →+* R
σ₂ : R₂ →+* R
f : Matrix n m R
⊢ ↑(toMatrix₂Aux (fun i => ↑(LinearMap.stdBasis R₁ (fun x => R₁) i) 1) fun j =>
↑(LinearMap.stdBasis R₂ (fun x => R₂) j) 1)
(toLinearMap₂'Aux σ₁ σ₂ f) =
f State After: case a.h
R : Type u_3
R₁ : Type u_4
R₂ : Type u_5
M : Type ?u.137024
M₁ : Type ?u.137027
M₂ : Type ?u.137030
M₁' : Type ?u.137033
M₂' : Type ?u.137036
n : Type u_1
m : Type u_2
n' : Type ?u.137045
m' : Type ?u.137048
ι : Type ?u.137051
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing R₁
inst✝⁸ : CommRing R₂
inst✝⁷ : AddCommMonoid M₁
inst✝⁶ : Module R₁ M₁
inst✝⁵ : AddCommMonoid M₂
inst✝⁴ : Module R₂ M₂
inst✝³ : Fintype n
inst✝² : Fintype m
inst✝¹ : DecidableEq n
inst✝ : DecidableEq m
σ₁ : R₁ →+* R
σ₂ : R₂ →+* R
f : Matrix n m R
i : n
j : m
⊢ ↑(toMatrix₂Aux (fun i => ↑(LinearMap.stdBasis R₁ (fun x => R₁) i) 1) fun j =>
↑(LinearMap.stdBasis R₂ (fun x => R₂) j) 1)
(toLinearMap₂'Aux σ₁ σ₂ f) i j =
f i j Tactic: ext (i j) State Before: case a.h
R : Type u_3
R₁ : Type u_4
R₂ : Type u_5
M : Type ?u.137024
M₁ : Type ?u.137027
M₂ : Type ?u.137030
M₁' : Type ?u.137033
M₂' : Type ?u.137036
n : Type u_1
m : Type u_2
n' : Type ?u.137045
m' : Type ?u.137048
ι : Type ?u.137051
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing R₁
inst✝⁸ : CommRing R₂
inst✝⁷ : AddCommMonoid M₁
inst✝⁶ : Module R₁ M₁
inst✝⁵ : AddCommMonoid M₂
inst✝⁴ : Module R₂ M₂
inst✝³ : Fintype n
inst✝² : Fintype m
inst✝¹ : DecidableEq n
inst✝ : DecidableEq m
σ₁ : R₁ →+* R
σ₂ : R₂ →+* R
f : Matrix n m R
i : n
j : m
⊢ ↑(toMatrix₂Aux (fun i => ↑(LinearMap.stdBasis R₁ (fun x => R₁) i) 1) fun j =>
↑(LinearMap.stdBasis R₂ (fun x => R₂) j) 1)
(toLinearMap₂'Aux σ₁ σ₂ f) i j =
f i j State After: no goals Tactic: simp_rw [LinearMap.toMatrix₂Aux_apply, Matrix.toLinearMap₂'Aux_stdBasis]
|
module mediator_module
implicit none
private
public :: station_manager_t, passenger_train_t, freight_train_t
type, abstract :: train_t
contains
procedure(train_t_arrive), deferred :: arrive
procedure(train_t_depart), deferred :: depart
procedure(train_t_permit_arrival), deferred :: permit_arrival
end type train_t
type, abstract :: mediator_t
contains
procedure(mediator_t_can_arrive), deferred :: can_arrive
procedure(mediator_t_notify_about_departure), deferred :: notify_about_departure
end type mediator_t
abstract interface
subroutine train_t_arrive(self)
import train_t
class(train_t), intent(inout) :: self
end subroutine train_t_arrive
subroutine train_t_depart(self)
import train_t
class(train_t), intent(inout) :: self
end subroutine train_t_depart
subroutine train_t_permit_arrival(self)
import train_t
class(train_t), intent(inout) :: self
end subroutine train_t_permit_arrival
logical function mediator_t_can_arrive(self, train) result(can)
import mediator_t, train_t
class(mediator_t), intent(inout) :: self
class(train_t), intent(in), target :: train
end function mediator_t_can_arrive
subroutine mediator_t_notify_about_departure(self)
import mediator_t
class(mediator_t), intent(inout) :: self
end subroutine mediator_t_notify_about_departure
end interface
type, extends(train_t) :: passenger_train_t
class(mediator_t), pointer :: mediator
contains
procedure :: arrive => passenger_train_t_arrive
procedure :: depart => passenger_train_t_depart
procedure :: permit_arrival => passenger_train_t_permit_arrival
end type passenger_train_t
type, extends(train_t) :: freight_train_t
class(mediator_t), pointer :: mediator
contains
procedure :: arrive => freight_train_t_arrive
procedure :: depart => freight_train_t_depart
procedure :: permit_arrival => freight_train_t_permit_arrival
end type freight_train_t
type node_t
class(train_t), pointer :: train
end type node_t
type, extends(mediator_t) :: station_manager_t
logical :: is_platform_free = .true.
type(node_t), allocatable :: list(:)
contains
procedure :: can_arrive => station_manager_t_can_arrive
procedure :: notify_about_departure => station_manager_t_notify_about_departure
end type station_manager_t
contains
subroutine passenger_train_t_arrive(self)
class(passenger_train_t), intent(inout) :: self
if (.not. self%mediator%can_arrive(self)) then
print *, "Passenger train: arrival blocked, waiting"
return
end if
print *, "Passenger train: arrived"
end subroutine passenger_train_t_arrive
subroutine passenger_train_t_depart(self)
class(passenger_train_t), intent(inout) :: self
print *, "Passenger train: leaving"
call self%mediator%notify_about_departure()
end subroutine passenger_train_t_depart
subroutine passenger_train_t_permit_arrival(self)
class(passenger_train_t), intent(inout) :: self
print *, "Passenger train: arrival permitted, arriving"
call self%arrive()
end subroutine passenger_train_t_permit_arrival
subroutine freight_train_t_arrive(self)
class(freight_train_t), intent(inout) :: self
if (.not. self%mediator%can_arrive(self)) then
print *, "Freight train: arrival blocked, waiting"
return
end if
print *, "Freight train: arrived"
end subroutine freight_train_t_arrive
subroutine freight_train_t_depart(self)
class(freight_train_t), intent(inout) :: self
print *, "freight train: leaving"
call self%mediator%notify_about_departure()
end subroutine freight_train_t_depart
subroutine freight_train_t_permit_arrival(self)
class(freight_train_t), intent(inout) :: self
print *, "Freight train: arrival permitted, arriving"
call self%arrive()
end subroutine freight_train_t_permit_arrival
logical function station_manager_t_can_arrive(self, train) result(can)
class(station_manager_t), intent(inout) :: self
class(train_t), intent(in), target :: train
if (self%is_platform_free) then
self%is_platform_free = .false.
can = .true.
return
end if
self%list = [self%list, node_t(train)]
can = .false.
end function station_manager_t_can_arrive
subroutine station_manager_t_notify_about_departure(self)
class(station_manager_t), intent(inout) :: self
class(train_t), pointer :: train
if (.not. self%is_platform_free) then
self%is_platform_free = .true.
end if
if (size(self%list) > 0) then
train => self%list(1)%train
!> 内存泄露
self%list = self%list(2:)
call train%permit_arrival()
end if
end subroutine station_manager_t_notify_about_departure
end module mediator_module
|
SUBROUTINE XERDMP XER 10
C
C ABSTRACT
C XERDMP PRINTS AN ERROR TABLE SHOWING ALL ERRORS WHICH
C HAVE OCCURRED DURING THE CURRENT EXECUTION, OR SINCE XERDMP
C WAS LAST CALLED. AFTER PRINTING, THE ERROR TABLE IS CLEARED,
C AND IF PROGRAM EXECUTION IS CONTINUED ACCUMULATION OF THE
C ERROR TABLE BEGINS AT ZERO.
C
C WRITTEN BY RON JONES, WITH SLATEC COMMON MATH LIBRARY SUBCOMMITTEE
C END OF ABSTRACT
C LATEST REVISION --- 7 JUNE 1978
C
CALL XERSAV(1H ,0,0,0,KOUNT)
RETURN
END
|
using IterativeSolvers, KrylovKit, LinearAlgebra
# In this file, we provide linear solvers for the Package
abstract type AbstractLinearSolver end
# The function linsolve(J, x; kwargs...) must return whether the solve was successfull and how many steps were required for the solve.
# the following function can be used to cache some factorization, see DefaultLS() case for example
function (ls::AbstractLinearSolver)(J, rhs1, rhs2; kwargs...)
sol1, flag1, it1 = ls(J, rhs1; kwargs...)
sol2, flag2, it2 = ls(J, rhs2; kwargs...)
return sol1, sol2, flag1 & flag2, (it1, it2)
end
####################################################################################################
# The following functions are used for the Continuation of Hopf points and the computation of Floquet multipliers
# this function returns a₀ * I + a₁ .* J and ensures that we don't do unnecessary computations like 0*I + 1*J
function _axpy(J, a₀, a₁)
if a₀ == 0
if a₁ == 1
return J
else
return a₁ .* J
end
elseif a₀ == 1
if a₁ == 1
return I + J
else
return I + a₁ .* J
end
else
return a₀ * I + a₁ .* J
end
end
function _axpy_op(J, v::AbstractArray, a₀, a₁)
if a₀ == 0
if a₁ == 1
return apply(J, v)
else
return a₁ .* apply(J, v)
end
elseif a₀ == 1
if a₁ == 1
return v .+ apply(J, v)
else
return v .+ a₁ .* apply(J, v)
end
else
return a₀ .* v .+ a₁ .* apply(J, v)
end
end
####################################################################################################
# Solvers for default \ operator (backslash)
####################################################################################################
"""
$(TYPEDEF)
This struct is used to provide the backslash operator. Can be used to solve `(a₀ * I + a₁ * J) * x = rhs`.
$(TYPEDFIELDS)
"""
@with_kw struct DefaultLS <: AbstractLinearSolver
"Whether to catch a factorization for multiple solves. Some operators may not support LU (like ApproxFun.jl) or QR factorization so it is best to let the user decides. Some matrices do not have `factorize` like `StaticArrays.MMatrix`."
useFactorization::Bool = true
end
# this function is used to solve (a₀ * I + a₁ * J) * x = rhs
# the options a₀, a₁ are only used for the Hopf Newton / Continuation
function (l::DefaultLS)(J, rhs; a₀ = 0, a₁ = 1, kwargs...)
return _axpy(J, a₀, a₁) \ rhs, true, 1
end
# this function is used to solve (a₀ * I + a₁ * J) * x = rhs
# with multiple RHS. We can cache the factorization in this case
# the options a₀, a₁ are only used for the Hopf Newton / Continuation
function (l::DefaultLS)(J, rhs1, rhs2; a₀ = 0, a₁ = 1, kwargs...)
if l.useFactorization
Jfact = factorize(_axpy(J, a₀, a₁))
return Jfact \ rhs1, Jfact \ rhs2, true, (1, 1)
else
_J = _axpy(J, a₀, a₁)
return _J \ rhs1, _J \ rhs2, true, (1, 1)
end
end
####################################################################################################
# Solvers for IterativeSolvers
####################################################################################################
"""
$(TYPEDEF)
Linear solver based on gmres from `IterativeSolvers.jl`. Can be used to solve `(a₀ * I + a₁ * J) * x = rhs`.
$(TYPEDFIELDS)
"""
@with_kw mutable struct GMRESIterativeSolvers{T, Tl, Tr} <: AbstractLinearSolver
"Absolute tolerance for solver"
abstol::T = 0.0
"Relative tolerance for solver"
reltol::T = 1e-8
"Number of restarts"
restart::Int64 = 200
"Maximum number of iterations"
maxiter::Int64 = 100
"Dimension of the problem"
N::Int64 = 0
"Display information during iterations"
verbose::Bool = false
"Record information"
log::Bool = true
"Start with zero guess"
initially_zero::Bool = true
"Left preconditioner"
Pl::Tl = IterativeSolvers.Identity()
"Right preconditioner"
Pr::Tr = IterativeSolvers.Identity()
"Whether the linear operator is written inplace"
ismutating::Bool = false
end
# this function is used to solve (a₀ * I + a₁ * J) * x = rhs
# the optional shift is only used for the Hopf Newton / Continuation
function (l::GMRESIterativeSolvers{T, Tl, Tr})(J, rhs; a₀ = 0, a₁ = 1, kwargs...) where {T, Ts, Tl, Tr}
# no need to use fancy axpy! here because IterativeSolvers "only" handles AbstractArray
if l.ismutating == true
Jmap = LinearMap{T}((o, v) -> J(o, v), l.N, l.N ; ismutating = true)
@assert ((a₀ == 0) && (a₁ == 1)) "Perturbed inplace linear problem not done yet!"
else
J_map = v -> _axpy_op(J, v, a₀, a₁)
Jmap = LinearMap{T}(J_map, l.N, l.N ; ismutating = false)
end
res = IterativeSolvers.gmres(Jmap, rhs; abstol = l.abstol, reltol = l.reltol, log = l.log, verbose = l.verbose, restart = l.restart, maxiter = l.maxiter, initially_zero = l.initially_zero, Pl = l.Pl, Pr = l.Pr, kwargs...)
(res[2].iters >= l.maxiter) && (@warn "IterativeSolvers.gmres iterated maxIter = $(res[2].iters) times without achieving the desired tolerance.\n")
return res[1], length(res) > 1, res[2].iters
end
####################################################################################################
# Solvers for KrylovKit
####################################################################################################
"""
$(TYPEDEF)
Create a linear solver based on GMRES from `KrylovKit.jl`. Can be used to solve `(a₀ * I + a₁ * J) * x = rhs`.
$(TYPEDFIELDS)
!!! tip "Different linear solvers"
By tuning the options, you can select CG, GMRES... see [here](https://jutho.github.io/KrylovKit.jl/stable/man/linear/#KrylovKit.linsolve)
"""
@with_kw mutable struct GMRESKrylovKit{T, Tl} <: AbstractLinearSolver
"Krylov Dimension"
dim::Int64 = KrylovDefaults.krylovdim
"Absolute tolerance for solver"
atol::T = KrylovDefaults.tol
"Relative tolerance for solver"
rtol::T = KrylovDefaults.tol
"Maximum number of iterations"
maxiter::Int64 = KrylovDefaults.maxiter
"Verbosity ∈ {0,1,2}"
verbose::Int64 = 0
"If the linear map is symmetric, only meaningful if T<:Real"
issymmetric::Bool = false
"If the linear map is hermitian"
ishermitian::Bool = false
"If the linear map is positive definite"
isposdef::Bool = false
"Left preconditioner"
Pl::Tl = nothing
end
# this function is used to solve (a₀ * I + a₁ * J) * x = rhs
# the optional shift is only used for the Hopf Newton / Continuation
function (l::GMRESKrylovKit{T, Tl})(J, rhs; a₀ = 0, a₁ = 1, kwargs...) where {T, Tl}
if Tl == Nothing
res, info = KrylovKit.linsolve(J, rhs, a₀, a₁; rtol = l.rtol, verbosity = l.verbose, krylovdim = l.dim, maxiter = l.maxiter, atol = l.atol, issymmetric = l.issymmetric, ishermitian = l.ishermitian, isposdef = l.isposdef, kwargs...)
else # use preconditioner
res, info = KrylovKit.linsolve(x -> (out = apply(J, x); ldiv!(l.Pl, out)), ldiv!(l.Pl, copy(rhs)), a₀, a₁; rtol = l.rtol, verbosity = l.verbose, krylovdim = l.dim, maxiter = l.maxiter, atol = l.atol, issymmetric = l.issymmetric, ishermitian = l.ishermitian, isposdef = l.isposdef, kwargs...)
end
info.converged == 0 && (@warn "KrylovKit.linsolve solver did not converge")
return res, true, info.numops
end
|
SUBROUTINE GDRGRD ( dellat, dellon, lblfrq, blat, blon,
+ ifrmat, iret )
C************************************************************************
C* GDRGRD *
C* *
C* This subroutine draws a uniform latitude/longitude grid. The map *
C* projection must be defined before GDRGRD is called. The current *
C* color, line and text attributes are used. *
C* *
C* GDRGRD ( DELLAT, DELLON, LBLFRQ, BLAT, BLON, IFRMAT, IRET ) *
C* *
C* Input parameters: *
C* DELLAT REAL Latitude interval in degrees *
C* DELLON REAL Longitude interval in degrees *
C* LBLFRQ (2) INTEGER Label frequency *
C* 0 = no grid labels *
C* 1 = every grid line *
C* 2 = every other grid line *
C* n = every n-th line *
C* BLAT REAL Longitude label locations *
C* BLON REAL Latitiude label locations *
C* IFRMAT INTEGER Lat/lon format *
C* 1 = +/- value *
C* 2 = N,S,E,W addded, no '-' *
C* Defaults to 1 if not set to 2 *
C* *
C* Output parameters: *
C* IRET INTEGER Return code *
C** *
C* Log: *
C* R. Shah/RDS 8/81 *
C* J. M. Vilardo/RDS 1/84 GEMPLT Version 3.0 *
C* I. Graffman/RDS 5/85 GEMPLT Version 3.1 *
C* I. Graffman/RDS 6/88 Clean up *
C* M. desJardins/NMC 8/91 Separate lat/lon label frequencies *
C* L. Williams/EAi 3/94 Removed blank comments from header *
C* A. Hardy/GSC 6/98 Cleaned up prolog *
C* A. Hardy/GSC 12/00 Added lat/lon label locations, format *
C************************************************************************
INCLUDE 'FUNCCODE.PRM'
INCLUDE 'ERROR.PRM'
C*
INTEGER lblfrq (2)
C*
INTEGER isend (2)
REAL rsend (2)
C------------------------------------------------------------------------
C* Load input parameters into buffer and write them to the mailbox.
C
isend (1) = 9
isend (2) = FDRGRD
rsend (1) = dellat
rsend (2) = dellon
C
CALL GPUT ( isend, 2, iret )
IF ( iret .ne. NORMAL ) RETURN
C
CALL GPUTR ( rsend, 2, iret )
IF ( iret .ne. NORMAL ) RETURN
CALL GPUT ( lblfrq, 2, iret )
C
CALL GPUTR ( blat, 1, iret )
IF ( iret .ne. NORMAL ) RETURN
CALL GPUTR ( blon, 1, iret )
IF ( iret .ne. NORMAL ) RETURN
CALL GPUT ( ifrmat, 1, iret )
C
C* If successful write, get output parameters.
C
IF( iret .ne. NORMAL ) RETURN
CALL GGET ( iret, 1, ier )
IF ( ier .ne. NORMAL ) iret = ier
C
RETURN
END
|
lemma R_mult_left_mono: "R a b \<Longrightarrow> c \<ge> 0 \<Longrightarrow> R (c*a) (c*b)"
|
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ Finset.Nonempty (Icc a b) ↔ a ≤ b
[PROOFSTEP]
rw [← coe_nonempty, coe_Icc, Set.nonempty_Icc]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ Finset.Nonempty (Ico a b) ↔ a < b
[PROOFSTEP]
rw [← coe_nonempty, coe_Ico, Set.nonempty_Ico]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ Finset.Nonempty (Ioc a b) ↔ a < b
[PROOFSTEP]
rw [← coe_nonempty, coe_Ioc, Set.nonempty_Ioc]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : Preorder α
inst✝¹ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
inst✝ : DenselyOrdered α
⊢ Finset.Nonempty (Ioo a b) ↔ a < b
[PROOFSTEP]
rw [← coe_nonempty, coe_Ioo, Set.nonempty_Ioo]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ Icc a b = ∅ ↔ ¬a ≤ b
[PROOFSTEP]
rw [← coe_eq_empty, coe_Icc, Set.Icc_eq_empty_iff]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ Ico a b = ∅ ↔ ¬a < b
[PROOFSTEP]
rw [← coe_eq_empty, coe_Ico, Set.Ico_eq_empty_iff]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ Ioc a b = ∅ ↔ ¬a < b
[PROOFSTEP]
rw [← coe_eq_empty, coe_Ioc, Set.Ioc_eq_empty_iff]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : Preorder α
inst✝¹ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
inst✝ : DenselyOrdered α
⊢ Ioo a b = ∅ ↔ ¬a < b
[PROOFSTEP]
rw [← coe_eq_empty, coe_Ioo, Set.Ioo_eq_empty_iff]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ a ∈ Icc a b ↔ a ≤ b
[PROOFSTEP]
simp only [mem_Icc, true_and_iff, le_rfl]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ a ∈ Ico a b ↔ a < b
[PROOFSTEP]
simp only [mem_Ico, true_and_iff, le_refl]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ b ∈ Icc a b ↔ a ≤ b
[PROOFSTEP]
simp only [mem_Icc, and_true_iff, le_rfl]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ b ∈ Ioc a b ↔ a < b
[PROOFSTEP]
simp only [mem_Ioc, and_true_iff, le_rfl]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
ha : a₂ ≤ a₁
hb : b₁ ≤ b₂
⊢ Icc a₁ b₁ ⊆ Icc a₂ b₂
[PROOFSTEP]
simpa [← coe_subset] using Set.Icc_subset_Icc ha hb
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
ha : a₂ ≤ a₁
hb : b₁ ≤ b₂
⊢ Ico a₁ b₁ ⊆ Ico a₂ b₂
[PROOFSTEP]
simpa [← coe_subset] using Set.Ico_subset_Ico ha hb
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
ha : a₂ ≤ a₁
hb : b₁ ≤ b₂
⊢ Ioc a₁ b₁ ⊆ Ioc a₂ b₂
[PROOFSTEP]
simpa [← coe_subset] using Set.Ioc_subset_Ioc ha hb
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
ha : a₂ ≤ a₁
hb : b₁ ≤ b₂
⊢ Ioo a₁ b₁ ⊆ Ioo a₂ b₂
[PROOFSTEP]
simpa [← coe_subset] using Set.Ioo_subset_Ioo ha hb
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
h : a₁ < a₂
⊢ Ico a₂ b ⊆ Ioo a₁ b
[PROOFSTEP]
rw [← coe_subset, coe_Ico, coe_Ioo]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
h : a₁ < a₂
⊢ Set.Ico a₂ b ⊆ Set.Ioo a₁ b
[PROOFSTEP]
exact Set.Ico_subset_Ioo_left h
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
h : b₁ < b₂
⊢ Ioc a b₁ ⊆ Ioo a b₂
[PROOFSTEP]
rw [← coe_subset, coe_Ioc, coe_Ioo]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
h : b₁ < b₂
⊢ Set.Ioc a b₁ ⊆ Set.Ioo a b₂
[PROOFSTEP]
exact Set.Ioc_subset_Ioo_right h
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
h : b₁ < b₂
⊢ Icc a b₁ ⊆ Ico a b₂
[PROOFSTEP]
rw [← coe_subset, coe_Icc, coe_Ico]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
h : b₁ < b₂
⊢ Set.Icc a b₁ ⊆ Set.Ico a b₂
[PROOFSTEP]
exact Set.Icc_subset_Ico_right h
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ Ioo a b ⊆ Ico a b
[PROOFSTEP]
rw [← coe_subset, coe_Ioo, coe_Ico]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ Set.Ioo a b ⊆ Set.Ico a b
[PROOFSTEP]
exact Set.Ioo_subset_Ico_self
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ Ioo a b ⊆ Ioc a b
[PROOFSTEP]
rw [← coe_subset, coe_Ioo, coe_Ioc]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ Set.Ioo a b ⊆ Set.Ioc a b
[PROOFSTEP]
exact Set.Ioo_subset_Ioc_self
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ Ico a b ⊆ Icc a b
[PROOFSTEP]
rw [← coe_subset, coe_Ico, coe_Icc]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ Set.Ico a b ⊆ Set.Icc a b
[PROOFSTEP]
exact Set.Ico_subset_Icc_self
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ Ioc a b ⊆ Icc a b
[PROOFSTEP]
rw [← coe_subset, coe_Ioc, coe_Icc]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ Set.Ioc a b ⊆ Set.Icc a b
[PROOFSTEP]
exact Set.Ioc_subset_Icc_self
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
h₁ : a₁ ≤ b₁
⊢ Icc a₁ b₁ ⊆ Icc a₂ b₂ ↔ a₂ ≤ a₁ ∧ b₁ ≤ b₂
[PROOFSTEP]
rw [← coe_subset, coe_Icc, coe_Icc, Set.Icc_subset_Icc_iff h₁]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
h₁ : a₁ ≤ b₁
⊢ Icc a₁ b₁ ⊆ Ioo a₂ b₂ ↔ a₂ < a₁ ∧ b₁ < b₂
[PROOFSTEP]
rw [← coe_subset, coe_Icc, coe_Ioo, Set.Icc_subset_Ioo_iff h₁]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
h₁ : a₁ ≤ b₁
⊢ Icc a₁ b₁ ⊆ Ico a₂ b₂ ↔ a₂ ≤ a₁ ∧ b₁ < b₂
[PROOFSTEP]
rw [← coe_subset, coe_Icc, coe_Ico, Set.Icc_subset_Ico_iff h₁]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
hI : a₂ ≤ b₂
ha : a₂ < a₁
hb : b₁ ≤ b₂
⊢ Icc a₁ b₁ ⊂ Icc a₂ b₂
[PROOFSTEP]
rw [← coe_ssubset, coe_Icc, coe_Icc]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
hI : a₂ ≤ b₂
ha : a₂ < a₁
hb : b₁ ≤ b₂
⊢ Set.Icc a₁ b₁ ⊂ Set.Icc a₂ b₂
[PROOFSTEP]
exact Set.Icc_ssubset_Icc_left hI ha hb
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
hI : a₂ ≤ b₂
ha : a₂ ≤ a₁
hb : b₁ < b₂
⊢ Icc a₁ b₁ ⊂ Icc a₂ b₂
[PROOFSTEP]
rw [← coe_ssubset, coe_Icc, coe_Icc]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
hI : a₂ ≤ b₂
ha : a₂ ≤ a₁
hb : b₁ < b₂
⊢ Set.Icc a₁ b₁ ⊂ Set.Icc a₂ b₂
[PROOFSTEP]
exact Set.Icc_ssubset_Icc_right hI ha hb
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
s : Set α
h₀ : BddBelow s
h₁ : BddAbove s
⊢ Set.Finite s
[PROOFSTEP]
let ⟨a, ha⟩ := h₀
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a✝ a₁ a₂ b b₁ b₂ c x : α
s : Set α
h₀ : BddBelow s
h₁ : BddAbove s
a : α
ha : a ∈ lowerBounds s
⊢ Set.Finite s
[PROOFSTEP]
let ⟨b, hb⟩ := h₁
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a✝ a₁ a₂ b✝ b₁ b₂ c x : α
s : Set α
h₀ : BddBelow s
h₁ : BddAbove s
a : α
ha : a ∈ lowerBounds s
b : α
hb : b ∈ upperBounds s
⊢ Set.Finite s
[PROOFSTEP]
classical exact ⟨Set.fintypeOfMemBounds ha hb⟩
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrder α
a✝ a₁ a₂ b✝ b₁ b₂ c x : α
s : Set α
h₀ : BddBelow s
h₁ : BddAbove s
a : α
ha : a ∈ lowerBounds s
b : α
hb : b ∈ upperBounds s
⊢ Set.Finite s
[PROOFSTEP]
exact ⟨Set.fintypeOfMemBounds ha hb⟩
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : Preorder α
inst✝¹ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
inst✝ : DecidablePred fun x => x < c
hcb : c ≤ b
⊢ filter (fun x => x < c) (Ico a b) = Ico a c
[PROOFSTEP]
ext x
[GOAL]
case a
ι : Type u_1
α : Type u_2
inst✝² : Preorder α
inst✝¹ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x✝ : α
inst✝ : DecidablePred fun x => x < c
hcb : c ≤ b
x : α
⊢ x ∈ filter (fun x => x < c) (Ico a b) ↔ x ∈ Ico a c
[PROOFSTEP]
rw [mem_filter, mem_Ico, mem_Ico, and_right_comm]
[GOAL]
case a
ι : Type u_1
α : Type u_2
inst✝² : Preorder α
inst✝¹ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x✝ : α
inst✝ : DecidablePred fun x => x < c
hcb : c ≤ b
x : α
⊢ (a ≤ x ∧ x < c) ∧ x < b ↔ a ≤ x ∧ x < c
[PROOFSTEP]
exact and_iff_left_of_imp fun h => h.2.trans_le hcb
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : Preorder α
inst✝¹ : LocallyFiniteOrder α
a✝ a₁ a₂ b✝ b₁ b₂ c✝ x a b c : α
inst✝ : DecidablePred ((fun x x_1 => x ≤ x_1) c)
hac : a ≤ c
⊢ filter ((fun x x_1 => x ≤ x_1) c) (Ico a b) = Ico c b
[PROOFSTEP]
ext x
[GOAL]
case a
ι : Type u_1
α : Type u_2
inst✝² : Preorder α
inst✝¹ : LocallyFiniteOrder α
a✝ a₁ a₂ b✝ b₁ b₂ c✝ x✝ a b c : α
inst✝ : DecidablePred ((fun x x_1 => x ≤ x_1) c)
hac : a ≤ c
x : α
⊢ x ∈ filter ((fun x x_1 => x ≤ x_1) c) (Ico a b) ↔ x ∈ Ico c b
[PROOFSTEP]
rw [mem_filter, mem_Ico, mem_Ico, and_comm, and_left_comm]
[GOAL]
case a
ι : Type u_1
α : Type u_2
inst✝² : Preorder α
inst✝¹ : LocallyFiniteOrder α
a✝ a₁ a₂ b✝ b₁ b₂ c✝ x✝ a b c : α
inst✝ : DecidablePred ((fun x x_1 => x ≤ x_1) c)
hac : a ≤ c
x : α
⊢ a ≤ x ∧ (fun x x_1 => x ≤ x_1) c x ∧ x < b ↔ c ≤ x ∧ x < b
[PROOFSTEP]
exact and_iff_right_of_imp fun h => hac.trans h.1
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : Preorder α
inst✝² : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
inst✝¹ : Fintype α
inst✝ : DecidablePred fun j => a < j ∧ j < b
⊢ filter (fun j => a < j ∧ j < b) univ = Ioo a b
[PROOFSTEP]
ext
[GOAL]
case a
ι : Type u_1
α : Type u_2
inst✝³ : Preorder α
inst✝² : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
inst✝¹ : Fintype α
inst✝ : DecidablePred fun j => a < j ∧ j < b
a✝ : α
⊢ a✝ ∈ filter (fun j => a < j ∧ j < b) univ ↔ a✝ ∈ Ioo a b
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : Preorder α
inst✝² : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
inst✝¹ : Fintype α
inst✝ : DecidablePred fun j => a < j ∧ j ≤ b
⊢ filter (fun j => a < j ∧ j ≤ b) univ = Ioc a b
[PROOFSTEP]
ext
[GOAL]
case a
ι : Type u_1
α : Type u_2
inst✝³ : Preorder α
inst✝² : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
inst✝¹ : Fintype α
inst✝ : DecidablePred fun j => a < j ∧ j ≤ b
a✝ : α
⊢ a✝ ∈ filter (fun j => a < j ∧ j ≤ b) univ ↔ a✝ ∈ Ioc a b
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : Preorder α
inst✝² : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
inst✝¹ : Fintype α
inst✝ : DecidablePred fun j => a ≤ j ∧ j < b
⊢ filter (fun j => a ≤ j ∧ j < b) univ = Ico a b
[PROOFSTEP]
ext
[GOAL]
case a
ι : Type u_1
α : Type u_2
inst✝³ : Preorder α
inst✝² : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
inst✝¹ : Fintype α
inst✝ : DecidablePred fun j => a ≤ j ∧ j < b
a✝ : α
⊢ a✝ ∈ filter (fun j => a ≤ j ∧ j < b) univ ↔ a✝ ∈ Ico a b
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : Preorder α
inst✝² : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
inst✝¹ : Fintype α
inst✝ : DecidablePred fun j => a ≤ j ∧ j ≤ b
⊢ filter (fun j => a ≤ j ∧ j ≤ b) univ = Icc a b
[PROOFSTEP]
ext
[GOAL]
case a
ι : Type u_1
α : Type u_2
inst✝³ : Preorder α
inst✝² : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
inst✝¹ : Fintype α
inst✝ : DecidablePred fun j => a ≤ j ∧ j ≤ b
a✝ : α
⊢ a✝ ∈ filter (fun j => a ≤ j ∧ j ≤ b) univ ↔ a✝ ∈ Icc a b
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : Preorder α
inst✝¹ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
inst✝ : LocallyFiniteOrderTop α
⊢ Icc a b ⊆ Ici a
[PROOFSTEP]
simpa [← coe_subset] using Set.Icc_subset_Ici_self
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : Preorder α
inst✝¹ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
inst✝ : LocallyFiniteOrderTop α
⊢ Ico a b ⊆ Ici a
[PROOFSTEP]
simpa [← coe_subset] using Set.Ico_subset_Ici_self
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : Preorder α
inst✝¹ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
inst✝ : LocallyFiniteOrderTop α
⊢ Ioc a b ⊆ Ioi a
[PROOFSTEP]
simpa [← coe_subset] using Set.Ioc_subset_Ioi_self
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : Preorder α
inst✝¹ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
inst✝ : LocallyFiniteOrderTop α
⊢ Ioo a b ⊆ Ioi a
[PROOFSTEP]
simpa [← coe_subset] using Set.Ioo_subset_Ioi_self
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : Preorder α
inst✝¹ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
inst✝ : LocallyFiniteOrderBot α
⊢ Icc a b ⊆ Iic b
[PROOFSTEP]
simpa [← coe_subset] using Set.Icc_subset_Iic_self
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : Preorder α
inst✝¹ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
inst✝ : LocallyFiniteOrderBot α
⊢ Ioc a b ⊆ Iic b
[PROOFSTEP]
simpa [← coe_subset] using Set.Ioc_subset_Iic_self
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : Preorder α
inst✝¹ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
inst✝ : LocallyFiniteOrderBot α
⊢ Ico a b ⊆ Iio b
[PROOFSTEP]
simpa [← coe_subset] using Set.Ico_subset_Iio_self
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : Preorder α
inst✝¹ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
inst✝ : LocallyFiniteOrderBot α
⊢ Ioo a b ⊆ Iio b
[PROOFSTEP]
simpa [← coe_subset] using Set.Ioo_subset_Iio_self
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrderTop α
a : α
⊢ Ioi a ⊆ Ici a
[PROOFSTEP]
simpa [← coe_subset] using Set.Ioi_subset_Ici_self
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : Preorder α
inst✝² : LocallyFiniteOrderTop α
a : α
inst✝¹ : Fintype α
inst✝ : DecidablePred ((fun x x_1 => x < x_1) a)
⊢ filter ((fun x x_1 => x < x_1) a) univ = Ioi a
[PROOFSTEP]
ext
[GOAL]
case a
ι : Type u_1
α : Type u_2
inst✝³ : Preorder α
inst✝² : LocallyFiniteOrderTop α
a : α
inst✝¹ : Fintype α
inst✝ : DecidablePred ((fun x x_1 => x < x_1) a)
a✝ : α
⊢ a✝ ∈ filter ((fun x x_1 => x < x_1) a) univ ↔ a✝ ∈ Ioi a
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : Preorder α
inst✝² : LocallyFiniteOrderTop α
a : α
inst✝¹ : Fintype α
inst✝ : DecidablePred ((fun x x_1 => x ≤ x_1) a)
⊢ filter ((fun x x_1 => x ≤ x_1) a) univ = Ici a
[PROOFSTEP]
ext
[GOAL]
case a
ι : Type u_1
α : Type u_2
inst✝³ : Preorder α
inst✝² : LocallyFiniteOrderTop α
a : α
inst✝¹ : Fintype α
inst✝ : DecidablePred ((fun x x_1 => x ≤ x_1) a)
a✝ : α
⊢ a✝ ∈ filter ((fun x x_1 => x ≤ x_1) a) univ ↔ a✝ ∈ Ici a
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Preorder α
inst✝ : LocallyFiniteOrderBot α
a : α
⊢ Iio a ⊆ Iic a
[PROOFSTEP]
simpa [← coe_subset] using Set.Iio_subset_Iic_self
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : Preorder α
inst✝² : LocallyFiniteOrderBot α
a : α
inst✝¹ : Fintype α
inst✝ : DecidablePred fun x => x < a
⊢ filter (fun x => x < a) univ = Iio a
[PROOFSTEP]
ext
[GOAL]
case a
ι : Type u_1
α : Type u_2
inst✝³ : Preorder α
inst✝² : LocallyFiniteOrderBot α
a : α
inst✝¹ : Fintype α
inst✝ : DecidablePred fun x => x < a
a✝ : α
⊢ a✝ ∈ filter (fun x => x < a) univ ↔ a✝ ∈ Iio a
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : Preorder α
inst✝² : LocallyFiniteOrderBot α
a : α
inst✝¹ : Fintype α
inst✝ : DecidablePred fun x => x ≤ a
⊢ filter (fun x => x ≤ a) univ = Iic a
[PROOFSTEP]
ext
[GOAL]
case a
ι : Type u_1
α : Type u_2
inst✝³ : Preorder α
inst✝² : LocallyFiniteOrderBot α
a : α
inst✝¹ : Fintype α
inst✝ : DecidablePred fun x => x ≤ a
a✝ : α
⊢ a✝ ∈ filter (fun x => x ≤ a) univ ↔ a✝ ∈ Iic a
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a✝ b c a : α
⊢ Icc a a = {a}
[PROOFSTEP]
rw [← coe_eq_singleton, coe_Icc, Set.Icc_self]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a b c : α
⊢ Icc a b = {c} ↔ a = c ∧ b = c
[PROOFSTEP]
rw [← coe_eq_singleton, coe_Icc, Set.Icc_eq_singleton_iff]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrder α
a✝ b✝ c : α
inst✝ : DecidableEq α
a b : α
⊢ erase (Icc a b) a = Ioc a b
[PROOFSTEP]
simp [← coe_inj]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrder α
a✝ b✝ c : α
inst✝ : DecidableEq α
a b : α
⊢ erase (Icc a b) b = Ico a b
[PROOFSTEP]
simp [← coe_inj]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrder α
a✝ b✝ c : α
inst✝ : DecidableEq α
a b : α
⊢ erase (Ico a b) a = Ioo a b
[PROOFSTEP]
simp [← coe_inj]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrder α
a✝ b✝ c : α
inst✝ : DecidableEq α
a b : α
⊢ erase (Ioc a b) b = Ioo a b
[PROOFSTEP]
simp [← coe_inj]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrder α
a✝ b✝ c : α
inst✝ : DecidableEq α
a b : α
⊢ Icc a b \ {a, b} = Ioo a b
[PROOFSTEP]
simp [← coe_inj]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrder α
a b c : α
inst✝ : DecidableEq α
h : a ≤ b
⊢ insert b (Ico a b) = Icc a b
[PROOFSTEP]
rw [← coe_inj, coe_insert, coe_Icc, coe_Ico, Set.insert_eq, Set.union_comm, Set.Ico_union_right h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrder α
a b c : α
inst✝ : DecidableEq α
h : a ≤ b
⊢ insert a (Ioc a b) = Icc a b
[PROOFSTEP]
rw [← coe_inj, coe_insert, coe_Ioc, coe_Icc, Set.insert_eq, Set.union_comm, Set.Ioc_union_left h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrder α
a b c : α
inst✝ : DecidableEq α
h : a < b
⊢ insert a (Ioo a b) = Ico a b
[PROOFSTEP]
rw [← coe_inj, coe_insert, coe_Ioo, coe_Ico, Set.insert_eq, Set.union_comm, Set.Ioo_union_left h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrder α
a b c : α
inst✝ : DecidableEq α
h : a < b
⊢ insert b (Ioo a b) = Ioc a b
[PROOFSTEP]
rw [← coe_inj, coe_insert, coe_Ioo, coe_Ioc, Set.insert_eq, Set.union_comm, Set.Ioo_union_right h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrder α
a b c : α
inst✝ : DecidableEq α
h : a ≤ b
⊢ Icc a b \ Ico a b = {b}
[PROOFSTEP]
simp [← coe_inj, h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrder α
a b c : α
inst✝ : DecidableEq α
h : a ≤ b
⊢ Icc a b \ Ioc a b = {a}
[PROOFSTEP]
simp [← coe_inj, h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrder α
a b c : α
inst✝ : DecidableEq α
h : a ≤ b
⊢ Icc a b \ Ioo a b = {a, b}
[PROOFSTEP]
simp [← coe_inj, h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrder α
a b c : α
inst✝ : DecidableEq α
h : a < b
⊢ Ico a b \ Ioo a b = {a}
[PROOFSTEP]
simp [← coe_inj, h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrder α
a b c : α
inst✝ : DecidableEq α
h : a < b
⊢ Ioc a b \ Ioo a b = {b}
[PROOFSTEP]
simp [← coe_inj, h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a b c : α
h : a ≤ b
⊢ Icc a b = cons b (Ico a b) (_ : ¬b ∈ Ico a b)
[PROOFSTEP]
classical rw [cons_eq_insert, Ico_insert_right h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a b c : α
h : a ≤ b
⊢ Icc a b = cons b (Ico a b) (_ : ¬b ∈ Ico a b)
[PROOFSTEP]
rw [cons_eq_insert, Ico_insert_right h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a b c : α
h : a ≤ b
⊢ Icc a b = cons a (Ioc a b) (_ : ¬a ∈ Ioc a b)
[PROOFSTEP]
classical rw [cons_eq_insert, Ioc_insert_left h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a b c : α
h : a ≤ b
⊢ Icc a b = cons a (Ioc a b) (_ : ¬a ∈ Ioc a b)
[PROOFSTEP]
rw [cons_eq_insert, Ioc_insert_left h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a b c : α
h : a < b
⊢ Ioc a b = cons b (Ioo a b) (_ : ¬b ∈ Ioo a b)
[PROOFSTEP]
classical rw [cons_eq_insert, Ioo_insert_right h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a b c : α
h : a < b
⊢ Ioc a b = cons b (Ioo a b) (_ : ¬b ∈ Ioo a b)
[PROOFSTEP]
rw [cons_eq_insert, Ioo_insert_right h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a b c : α
h : a < b
⊢ Ico a b = cons a (Ioo a b) (_ : ¬a ∈ Ioo a b)
[PROOFSTEP]
classical rw [cons_eq_insert, Ioo_insert_left h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a b c : α
h : a < b
⊢ Ico a b = cons a (Ioo a b) (_ : ¬a ∈ Ioo a b)
[PROOFSTEP]
rw [cons_eq_insert, Ioo_insert_left h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrder α
a✝ b✝ c a b : α
inst✝ : DecidablePred fun x => x ≤ a
hab : a < b
⊢ filter (fun x => x ≤ a) (Ico a b) = {a}
[PROOFSTEP]
ext x
[GOAL]
case a
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrder α
a✝ b✝ c a b : α
inst✝ : DecidablePred fun x => x ≤ a
hab : a < b
x : α
⊢ x ∈ filter (fun x => x ≤ a) (Ico a b) ↔ x ∈ {a}
[PROOFSTEP]
rw [mem_filter, mem_Ico, mem_singleton, and_right_comm, ← le_antisymm_iff, eq_comm]
[GOAL]
case a
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrder α
a✝ b✝ c a b : α
inst✝ : DecidablePred fun x => x ≤ a
hab : a < b
x : α
⊢ x = a ∧ x < b ↔ x = a
[PROOFSTEP]
exact and_iff_left_of_imp fun h => h.le.trans_lt hab
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ c a b : α
⊢ card (Ico a b) = card (Icc a b) - 1
[PROOFSTEP]
classical
by_cases h : a ≤ b
· rw [Icc_eq_cons_Ico h, card_cons]
exact (Nat.add_sub_cancel _ _).symm
· rw [Ico_eq_empty fun h' => h h'.le, Icc_eq_empty h, card_empty, zero_tsub]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ c a b : α
⊢ card (Ico a b) = card (Icc a b) - 1
[PROOFSTEP]
by_cases h : a ≤ b
[GOAL]
case pos
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ c a b : α
h : a ≤ b
⊢ card (Ico a b) = card (Icc a b) - 1
[PROOFSTEP]
rw [Icc_eq_cons_Ico h, card_cons]
[GOAL]
case pos
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ c a b : α
h : a ≤ b
⊢ card (Ico a b) = card (Ico a b) + 1 - 1
[PROOFSTEP]
exact (Nat.add_sub_cancel _ _).symm
[GOAL]
case neg
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ c a b : α
h : ¬a ≤ b
⊢ card (Ico a b) = card (Icc a b) - 1
[PROOFSTEP]
rw [Ico_eq_empty fun h' => h h'.le, Icc_eq_empty h, card_empty, zero_tsub]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ c a b : α
⊢ card (Ioo a b) = card (Ico a b) - 1
[PROOFSTEP]
classical
by_cases h : a < b
· rw [Ico_eq_cons_Ioo h, card_cons]
exact (Nat.add_sub_cancel _ _).symm
· rw [Ioo_eq_empty h, Ico_eq_empty h, card_empty, zero_tsub]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ c a b : α
⊢ card (Ioo a b) = card (Ico a b) - 1
[PROOFSTEP]
by_cases h : a < b
[GOAL]
case pos
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ c a b : α
h : a < b
⊢ card (Ioo a b) = card (Ico a b) - 1
[PROOFSTEP]
rw [Ico_eq_cons_Ioo h, card_cons]
[GOAL]
case pos
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ c a b : α
h : a < b
⊢ card (Ioo a b) = card (Ioo a b) + 1 - 1
[PROOFSTEP]
exact (Nat.add_sub_cancel _ _).symm
[GOAL]
case neg
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ c a b : α
h : ¬a < b
⊢ card (Ioo a b) = card (Ico a b) - 1
[PROOFSTEP]
rw [Ioo_eq_empty h, Ico_eq_empty h, card_empty, zero_tsub]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ c a b : α
⊢ card (Ioo a b) = card (Icc a b) - 2
[PROOFSTEP]
rw [card_Ioo_eq_card_Ico_sub_one, card_Ico_eq_card_Icc_sub_one]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ c a b : α
⊢ card (Icc a b) - 1 - 1 = card (Icc a b) - 2
[PROOFSTEP]
rfl
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrderTop α
inst✝ : DecidableEq α
a : α
⊢ erase (Ici a) a = Ioi a
[PROOFSTEP]
ext
[GOAL]
case a
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrderTop α
inst✝ : DecidableEq α
a a✝ : α
⊢ a✝ ∈ erase (Ici a) a ↔ a✝ ∈ Ioi a
[PROOFSTEP]
simp_rw [Finset.mem_erase, mem_Ici, mem_Ioi, lt_iff_le_and_ne, and_comm, ne_comm]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrderTop α
inst✝ : DecidableEq α
a : α
⊢ insert a (Ioi a) = Ici a
[PROOFSTEP]
ext
[GOAL]
case a
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrderTop α
inst✝ : DecidableEq α
a a✝ : α
⊢ a✝ ∈ insert a (Ioi a) ↔ a✝ ∈ Ici a
[PROOFSTEP]
simp_rw [Finset.mem_insert, mem_Ici, mem_Ioi, le_iff_lt_or_eq, or_comm, eq_comm]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrderTop α
a : α
⊢ Ici a = cons a (Ioi a) (_ : ¬a ∈ Ioi a)
[PROOFSTEP]
classical rw [cons_eq_insert, Ioi_insert]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrderTop α
a : α
⊢ Ici a = cons a (Ioi a) (_ : ¬a ∈ Ioi a)
[PROOFSTEP]
rw [cons_eq_insert, Ioi_insert]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrderTop α
a : α
⊢ card (Ioi a) = card (Ici a) - 1
[PROOFSTEP]
rw [Ici_eq_cons_Ioi, card_cons, add_tsub_cancel_right]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrderBot α
inst✝ : DecidableEq α
b : α
⊢ erase (Iic b) b = Iio b
[PROOFSTEP]
ext
[GOAL]
case a
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrderBot α
inst✝ : DecidableEq α
b a✝ : α
⊢ a✝ ∈ erase (Iic b) b ↔ a✝ ∈ Iio b
[PROOFSTEP]
simp_rw [Finset.mem_erase, mem_Iic, mem_Iio, lt_iff_le_and_ne, and_comm]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrderBot α
inst✝ : DecidableEq α
b : α
⊢ insert b (Iio b) = Iic b
[PROOFSTEP]
ext
[GOAL]
case a
ι : Type u_1
α : Type u_2
inst✝² : PartialOrder α
inst✝¹ : LocallyFiniteOrderBot α
inst✝ : DecidableEq α
b a✝ : α
⊢ a✝ ∈ insert b (Iio b) ↔ a✝ ∈ Iic b
[PROOFSTEP]
simp_rw [Finset.mem_insert, mem_Iic, mem_Iio, le_iff_lt_or_eq, or_comm]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrderBot α
b : α
⊢ Iic b = cons b (Iio b) (_ : ¬b ∈ Iio b)
[PROOFSTEP]
classical rw [cons_eq_insert, Iio_insert]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrderBot α
b : α
⊢ Iic b = cons b (Iio b) (_ : ¬b ∈ Iio b)
[PROOFSTEP]
rw [cons_eq_insert, Iio_insert]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : PartialOrder α
inst✝ : LocallyFiniteOrderBot α
a : α
⊢ card (Iio a) = card (Iic a) - 1
[PROOFSTEP]
rw [Iic_eq_cons_Iio, card_cons, add_tsub_cancel_right]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a b a₁ b₁ a₂ b₂ : α
h : a₁ < b₁
⊢ Ico a₁ b₁ ⊆ Ico a₂ b₂ ↔ a₂ ≤ a₁ ∧ b₁ ≤ b₂
[PROOFSTEP]
rw [← coe_subset, coe_Ico, coe_Ico, Set.Ico_subset_Ico_iff h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
hab : a ≤ b
hbc : b ≤ c
⊢ Ico a b ∪ Ico b c = Ico a c
[PROOFSTEP]
rw [← coe_inj, coe_union, coe_Ico, coe_Ico, coe_Ico, Set.Ico_union_Ico_eq_Ico hab hbc]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
h₁ : a ≤ b
h₂ : b ≤ c
⊢ Ioc a b ∪ Ioc b c = Ioc a c
[PROOFSTEP]
rw [← coe_inj, coe_union, coe_Ioc, coe_Ioc, coe_Ioc, Set.Ioc_union_Ioc_eq_Ioc h₁ h₂]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
⊢ Ico a c ⊆ Ico a b ∪ Ico b c
[PROOFSTEP]
rw [← coe_subset, coe_union, coe_Ico, coe_Ico, coe_Ico]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
⊢ Set.Ico a c ⊆ Set.Ico a b ∪ Set.Ico b c
[PROOFSTEP]
exact Set.Ico_subset_Ico_union_Ico
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c d : α
hcb : c ≤ b
had : a ≤ d
⊢ Ico a b ∪ Ico c d = Ico (min a c) (max b d)
[PROOFSTEP]
rw [← coe_inj, coe_union, coe_Ico, coe_Ico, coe_Ico, Set.Ico_union_Ico' hcb had]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c d : α
h₁ : min a b ≤ max c d
h₂ : min c d ≤ max a b
⊢ Ico a b ∪ Ico c d = Ico (min a c) (max b d)
[PROOFSTEP]
rw [← coe_inj, coe_union, coe_Ico, coe_Ico, coe_Ico, Set.Ico_union_Ico h₁ h₂]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c d : α
⊢ Ico a b ∩ Ico c d = Ico (max a c) (min b d)
[PROOFSTEP]
rw [← coe_inj, coe_inter, coe_Ico, coe_Ico, coe_Ico, ← inf_eq_min, ← sup_eq_max, Set.Ico_inter_Ico]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
⊢ filter (fun x => x < c) (Ico a b) = Ico a (min b c)
[PROOFSTEP]
cases le_total b c with
| inl h => rw [Ico_filter_lt_of_right_le h, min_eq_left h]
| inr h => rw [Ico_filter_lt_of_le_right h, min_eq_right h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
x✝ : b ≤ c ∨ c ≤ b
⊢ filter (fun x => x < c) (Ico a b) = Ico a (min b c)
[PROOFSTEP]
cases le_total b c with
| inl h => rw [Ico_filter_lt_of_right_le h, min_eq_left h]
| inr h => rw [Ico_filter_lt_of_le_right h, min_eq_right h]
[GOAL]
case inl
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
h : b ≤ c
⊢ filter (fun x => x < c) (Ico a b) = Ico a (min b c)
[PROOFSTEP]
| inl h => rw [Ico_filter_lt_of_right_le h, min_eq_left h]
[GOAL]
case inl
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
h : b ≤ c
⊢ filter (fun x => x < c) (Ico a b) = Ico a (min b c)
[PROOFSTEP]
rw [Ico_filter_lt_of_right_le h, min_eq_left h]
[GOAL]
case inr
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
h : c ≤ b
⊢ filter (fun x => x < c) (Ico a b) = Ico a (min b c)
[PROOFSTEP]
| inr h => rw [Ico_filter_lt_of_le_right h, min_eq_right h]
[GOAL]
case inr
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
h : c ≤ b
⊢ filter (fun x => x < c) (Ico a b) = Ico a (min b c)
[PROOFSTEP]
rw [Ico_filter_lt_of_le_right h, min_eq_right h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
⊢ filter (fun x => c ≤ x) (Ico a b) = Ico (max a c) b
[PROOFSTEP]
cases le_total a c with
| inl h => rw [Ico_filter_le_of_left_le h, max_eq_right h]
| inr h => rw [Ico_filter_le_of_le_left h, max_eq_left h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
x✝ : a ≤ c ∨ c ≤ a
⊢ filter (fun x => c ≤ x) (Ico a b) = Ico (max a c) b
[PROOFSTEP]
cases le_total a c with
| inl h => rw [Ico_filter_le_of_left_le h, max_eq_right h]
| inr h => rw [Ico_filter_le_of_le_left h, max_eq_left h]
[GOAL]
case inl
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
h : a ≤ c
⊢ filter (fun x => c ≤ x) (Ico a b) = Ico (max a c) b
[PROOFSTEP]
| inl h => rw [Ico_filter_le_of_left_le h, max_eq_right h]
[GOAL]
case inl
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
h : a ≤ c
⊢ filter (fun x => c ≤ x) (Ico a b) = Ico (max a c) b
[PROOFSTEP]
rw [Ico_filter_le_of_left_le h, max_eq_right h]
[GOAL]
case inr
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
h : c ≤ a
⊢ filter (fun x => c ≤ x) (Ico a b) = Ico (max a c) b
[PROOFSTEP]
| inr h => rw [Ico_filter_le_of_le_left h, max_eq_left h]
[GOAL]
case inr
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
h : c ≤ a
⊢ filter (fun x => c ≤ x) (Ico a b) = Ico (max a c) b
[PROOFSTEP]
rw [Ico_filter_le_of_le_left h, max_eq_left h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
⊢ filter (fun x => x < c) (Ioo a b) = Ioo a (min b c)
[PROOFSTEP]
ext
[GOAL]
case a
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝¹ b✝ a b c a✝ : α
⊢ a✝ ∈ filter (fun x => x < c) (Ioo a b) ↔ a✝ ∈ Ioo a (min b c)
[PROOFSTEP]
simp [and_assoc]
[GOAL]
ι : Type u_1
α✝ : Type u_2
inst✝³ : LinearOrder α✝
inst✝² : LocallyFiniteOrder α✝
a✝ b✝ : α✝
α : Type u_3
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrderBot α
a b : α
⊢ filter (fun x => x < b) (Iio a) = Iio (min a b)
[PROOFSTEP]
ext
[GOAL]
case a
ι : Type u_1
α✝ : Type u_2
inst✝³ : LinearOrder α✝
inst✝² : LocallyFiniteOrder α✝
a✝¹ b✝ : α✝
α : Type u_3
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrderBot α
a b a✝ : α
⊢ a✝ ∈ filter (fun x => x < b) (Iio a) ↔ a✝ ∈ Iio (min a b)
[PROOFSTEP]
simp [and_assoc]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
⊢ Ico a b \ Ico a c = Ico (max a c) b
[PROOFSTEP]
cases le_total a c with
| inl h =>
ext x
rw [mem_sdiff, mem_Ico, mem_Ico, mem_Ico, max_eq_right h, and_right_comm, not_and, not_lt]
exact and_congr_left' ⟨fun hx => hx.2 hx.1, fun hx => ⟨h.trans hx, fun _ => hx⟩⟩
| inr h => rw [Ico_eq_empty_of_le h, sdiff_empty, max_eq_left h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
x✝ : a ≤ c ∨ c ≤ a
⊢ Ico a b \ Ico a c = Ico (max a c) b
[PROOFSTEP]
cases le_total a c with
| inl h =>
ext x
rw [mem_sdiff, mem_Ico, mem_Ico, mem_Ico, max_eq_right h, and_right_comm, not_and, not_lt]
exact and_congr_left' ⟨fun hx => hx.2 hx.1, fun hx => ⟨h.trans hx, fun _ => hx⟩⟩
| inr h => rw [Ico_eq_empty_of_le h, sdiff_empty, max_eq_left h]
[GOAL]
case inl
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
h : a ≤ c
⊢ Ico a b \ Ico a c = Ico (max a c) b
[PROOFSTEP]
| inl h =>
ext x
rw [mem_sdiff, mem_Ico, mem_Ico, mem_Ico, max_eq_right h, and_right_comm, not_and, not_lt]
exact and_congr_left' ⟨fun hx => hx.2 hx.1, fun hx => ⟨h.trans hx, fun _ => hx⟩⟩
[GOAL]
case inl
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
h : a ≤ c
⊢ Ico a b \ Ico a c = Ico (max a c) b
[PROOFSTEP]
ext x
[GOAL]
case inl.a
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
h : a ≤ c
x : α
⊢ x ∈ Ico a b \ Ico a c ↔ x ∈ Ico (max a c) b
[PROOFSTEP]
rw [mem_sdiff, mem_Ico, mem_Ico, mem_Ico, max_eq_right h, and_right_comm, not_and, not_lt]
[GOAL]
case inl.a
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
h : a ≤ c
x : α
⊢ (a ≤ x ∧ (a ≤ x → c ≤ x)) ∧ x < b ↔ c ≤ x ∧ x < b
[PROOFSTEP]
exact and_congr_left' ⟨fun hx => hx.2 hx.1, fun hx => ⟨h.trans hx, fun _ => hx⟩⟩
[GOAL]
case inr
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
h : c ≤ a
⊢ Ico a b \ Ico a c = Ico (max a c) b
[PROOFSTEP]
| inr h => rw [Ico_eq_empty_of_le h, sdiff_empty, max_eq_left h]
[GOAL]
case inr
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
h : c ≤ a
⊢ Ico a b \ Ico a c = Ico (max a c) b
[PROOFSTEP]
rw [Ico_eq_empty_of_le h, sdiff_empty, max_eq_left h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
⊢ Ico a b \ Ico c b = Ico a (min b c)
[PROOFSTEP]
cases le_total b c with
| inl h => rw [Ico_eq_empty_of_le h, sdiff_empty, min_eq_left h]
| inr h =>
ext x
rw [mem_sdiff, mem_Ico, mem_Ico, mem_Ico, min_eq_right h, and_assoc, not_and', not_le]
exact and_congr_right' ⟨fun hx => hx.2 hx.1, fun hx => ⟨hx.trans_le h, fun _ => hx⟩⟩
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
x✝ : b ≤ c ∨ c ≤ b
⊢ Ico a b \ Ico c b = Ico a (min b c)
[PROOFSTEP]
cases le_total b c with
| inl h => rw [Ico_eq_empty_of_le h, sdiff_empty, min_eq_left h]
| inr h =>
ext x
rw [mem_sdiff, mem_Ico, mem_Ico, mem_Ico, min_eq_right h, and_assoc, not_and', not_le]
exact and_congr_right' ⟨fun hx => hx.2 hx.1, fun hx => ⟨hx.trans_le h, fun _ => hx⟩⟩
[GOAL]
case inl
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
h : b ≤ c
⊢ Ico a b \ Ico c b = Ico a (min b c)
[PROOFSTEP]
| inl h => rw [Ico_eq_empty_of_le h, sdiff_empty, min_eq_left h]
[GOAL]
case inl
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
h : b ≤ c
⊢ Ico a b \ Ico c b = Ico a (min b c)
[PROOFSTEP]
rw [Ico_eq_empty_of_le h, sdiff_empty, min_eq_left h]
[GOAL]
case inr
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
h : c ≤ b
⊢ Ico a b \ Ico c b = Ico a (min b c)
[PROOFSTEP]
| inr h =>
ext x
rw [mem_sdiff, mem_Ico, mem_Ico, mem_Ico, min_eq_right h, and_assoc, not_and', not_le]
exact and_congr_right' ⟨fun hx => hx.2 hx.1, fun hx => ⟨hx.trans_le h, fun _ => hx⟩⟩
[GOAL]
case inr
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
h : c ≤ b
⊢ Ico a b \ Ico c b = Ico a (min b c)
[PROOFSTEP]
ext x
[GOAL]
case inr.a
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
h : c ≤ b
x : α
⊢ x ∈ Ico a b \ Ico c b ↔ x ∈ Ico a (min b c)
[PROOFSTEP]
rw [mem_sdiff, mem_Ico, mem_Ico, mem_Ico, min_eq_right h, and_assoc, not_and', not_le]
[GOAL]
case inr.a
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a✝ b✝ a b c : α
h : c ≤ b
x : α
⊢ a ≤ x ∧ x < b ∧ (x < b → x < c) ↔ a ≤ x ∧ x < c
[PROOFSTEP]
exact and_congr_right' ⟨fun hx => hx.2 hx.1, fun hx => ⟨hx.trans_le h, fun _ => hx⟩⟩
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : LinearOrder α
inst✝² : Fintype α
inst✝¹ : LocallyFiniteOrderTop α
inst✝ : LocallyFiniteOrderBot α
a : α
⊢ disjUnion (Ioi a) (Iio a) (_ : Disjoint (Ioi a) (Iio a)) = {a}ᶜ
[PROOFSTEP]
ext
[GOAL]
case a
ι : Type u_1
α : Type u_2
inst✝³ : LinearOrder α
inst✝² : Fintype α
inst✝¹ : LocallyFiniteOrderTop α
inst✝ : LocallyFiniteOrderBot α
a a✝ : α
⊢ a✝ ∈ disjUnion (Ioi a) (Iio a) (_ : Disjoint (Ioi a) (Iio a)) ↔ a✝ ∈ {a}ᶜ
[PROOFSTEP]
simp [eq_comm]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Lattice α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
h : a ≤ b
⊢ [[a, b]] = Icc a b
[PROOFSTEP]
rw [uIcc, inf_eq_left.2 h, sup_eq_right.2 h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Lattice α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
h : b ≤ a
⊢ [[a, b]] = Icc b a
[PROOFSTEP]
rw [uIcc, inf_eq_right.2 h, sup_eq_left.2 h]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Lattice α
inst✝ : LocallyFiniteOrder α
a✝ a₁ a₂ b✝ b₁ b₂ c x a b : α
⊢ [[a, b]] = [[b, a]]
[PROOFSTEP]
rw [uIcc, uIcc, inf_comm, sup_comm]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Lattice α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ [[a, a]] = {a}
[PROOFSTEP]
simp [uIcc]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Lattice α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
h₁ : a₁ ∈ [[a₂, b₂]]
h₂ : b₁ ∈ [[a₂, b₂]]
⊢ [[a₁, b₁]] ⊆ [[a₂, b₂]]
[PROOFSTEP]
rw [mem_uIcc] at h₁ h₂
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Lattice α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
h₁ : a₂ ⊓ b₂ ≤ a₁ ∧ a₁ ≤ a₂ ⊔ b₂
h₂ : a₂ ⊓ b₂ ≤ b₁ ∧ b₁ ≤ a₂ ⊔ b₂
⊢ [[a₁, b₁]] ⊆ [[a₂, b₂]]
[PROOFSTEP]
exact Icc_subset_Icc (_root_.le_inf h₁.1 h₂.1) (_root_.sup_le h₁.2 h₂.2)
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Lattice α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
ha : a₁ ∈ Icc a₂ b₂
hb : b₁ ∈ Icc a₂ b₂
⊢ [[a₁, b₁]] ⊆ Icc a₂ b₂
[PROOFSTEP]
rw [mem_Icc] at ha hb
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : Lattice α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
ha : a₂ ≤ a₁ ∧ a₁ ≤ b₂
hb : a₂ ≤ b₁ ∧ b₁ ≤ b₂
⊢ [[a₁, b₁]] ⊆ Icc a₂ b₂
[PROOFSTEP]
exact Icc_subset_Icc (_root_.le_inf ha.1 hb.1) (_root_.sup_le ha.2 hb.2)
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : DistribLattice α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ a ∈ [[b, c]] → b ∈ [[a, c]] → a = b
[PROOFSTEP]
simp_rw [mem_uIcc]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : DistribLattice α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ b ⊓ c ≤ a ∧ a ≤ b ⊔ c → a ⊓ c ≤ b ∧ b ≤ a ⊔ c → a = b
[PROOFSTEP]
exact Set.eq_of_mem_uIcc_of_mem_uIcc
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : DistribLattice α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ b ∈ [[a, c]] → c ∈ [[a, b]] → b = c
[PROOFSTEP]
simp_rw [mem_uIcc]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : DistribLattice α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ a ⊓ c ≤ b ∧ b ≤ a ⊔ c → a ⊓ b ≤ c ∧ c ≤ a ⊔ b → b = c
[PROOFSTEP]
exact Set.eq_of_mem_uIcc_of_mem_uIcc'
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : DistribLattice α
inst✝ : LocallyFiniteOrder α
a✝ a₁ a₂ b✝ b₁ b₂ c✝ x a b c : α
h : (fun b => [[b, a]]) b = (fun b => [[b, a]]) c
⊢ b = c
[PROOFSTEP]
rw [ext_iff] at h
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : DistribLattice α
inst✝ : LocallyFiniteOrder α
a✝ a₁ a₂ b✝ b₁ b₂ c✝ x a b c : α
h : ∀ (a_1 : α), a_1 ∈ (fun b => [[b, a]]) b ↔ a_1 ∈ (fun b => [[b, a]]) c
⊢ b = c
[PROOFSTEP]
exact eq_of_mem_uIcc_of_mem_uIcc ((h _).1 left_mem_uIcc) ((h _).2 left_mem_uIcc)
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : DistribLattice α
inst✝ : LocallyFiniteOrder α
a✝ a₁ a₂ b b₁ b₂ c x a : α
⊢ Injective (uIcc a)
[PROOFSTEP]
simpa only [uIcc_comm] using uIcc_injective_right a
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ ↑[[a, b]] = ↑(Icc a b ∪ Icc b a)
[PROOFSTEP]
push_cast
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ Set.uIcc a b = Set.Icc a b ∪ Set.Icc b a
[PROOFSTEP]
exact Set.uIcc_eq_union
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ a ∈ [[b, c]] ↔ b ≤ a ∧ a ≤ c ∨ c ≤ a ∧ a ≤ b
[PROOFSTEP]
simp [uIcc_eq_union]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ c < a → c < b → ¬c ∈ [[a, b]]
[PROOFSTEP]
rw [mem_uIcc]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ c < a → c < b → ¬(a ⊓ b ≤ c ∧ c ≤ a ⊔ b)
[PROOFSTEP]
exact Set.not_mem_uIcc_of_lt
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ a < c → b < c → ¬c ∈ [[a, b]]
[PROOFSTEP]
rw [mem_uIcc]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ a < c → b < c → ¬(a ⊓ b ≤ c ∧ c ≤ a ⊔ b)
[PROOFSTEP]
exact Set.not_mem_uIcc_of_gt
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ ↑[[a, c]] ⊆ ↑([[a, b]] ∪ [[b, c]])
[PROOFSTEP]
push_cast
[GOAL]
ι : Type u_1
α : Type u_2
inst✝¹ : LinearOrder α
inst✝ : LocallyFiniteOrder α
a a₁ a₂ b b₁ b₂ c x : α
⊢ Set.uIcc a c ⊆ Set.uIcc a b ∪ Set.uIcc b c
[PROOFSTEP]
exact Set.uIcc_subset_uIcc_union_uIcc
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : OrderedCancelAddCommMonoid α
inst✝¹ : ExistsAddOfLE α
inst✝ : LocallyFiniteOrder α
a b c : α
⊢ map (addLeftEmbedding c) (Icc a b) = Icc (c + a) (c + b)
[PROOFSTEP]
rw [← coe_inj, coe_map, coe_Icc, coe_Icc]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : OrderedCancelAddCommMonoid α
inst✝¹ : ExistsAddOfLE α
inst✝ : LocallyFiniteOrder α
a b c : α
⊢ ↑(addLeftEmbedding c) '' Set.Icc a b = Set.Icc (c + a) (c + b)
[PROOFSTEP]
exact Set.image_const_add_Icc _ _ _
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : OrderedCancelAddCommMonoid α
inst✝¹ : ExistsAddOfLE α
inst✝ : LocallyFiniteOrder α
a b c : α
⊢ map (addRightEmbedding c) (Icc a b) = Icc (a + c) (b + c)
[PROOFSTEP]
rw [← coe_inj, coe_map, coe_Icc, coe_Icc]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : OrderedCancelAddCommMonoid α
inst✝¹ : ExistsAddOfLE α
inst✝ : LocallyFiniteOrder α
a b c : α
⊢ ↑(addRightEmbedding c) '' Set.Icc a b = Set.Icc (a + c) (b + c)
[PROOFSTEP]
exact Set.image_add_const_Icc _ _ _
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : OrderedCancelAddCommMonoid α
inst✝¹ : ExistsAddOfLE α
inst✝ : LocallyFiniteOrder α
a b c : α
⊢ map (addLeftEmbedding c) (Ico a b) = Ico (c + a) (c + b)
[PROOFSTEP]
rw [← coe_inj, coe_map, coe_Ico, coe_Ico]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : OrderedCancelAddCommMonoid α
inst✝¹ : ExistsAddOfLE α
inst✝ : LocallyFiniteOrder α
a b c : α
⊢ ↑(addLeftEmbedding c) '' Set.Ico a b = Set.Ico (c + a) (c + b)
[PROOFSTEP]
exact Set.image_const_add_Ico _ _ _
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : OrderedCancelAddCommMonoid α
inst✝¹ : ExistsAddOfLE α
inst✝ : LocallyFiniteOrder α
a b c : α
⊢ map (addRightEmbedding c) (Ico a b) = Ico (a + c) (b + c)
[PROOFSTEP]
rw [← coe_inj, coe_map, coe_Ico, coe_Ico]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : OrderedCancelAddCommMonoid α
inst✝¹ : ExistsAddOfLE α
inst✝ : LocallyFiniteOrder α
a b c : α
⊢ ↑(addRightEmbedding c) '' Set.Ico a b = Set.Ico (a + c) (b + c)
[PROOFSTEP]
exact Set.image_add_const_Ico _ _ _
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : OrderedCancelAddCommMonoid α
inst✝¹ : ExistsAddOfLE α
inst✝ : LocallyFiniteOrder α
a b c : α
⊢ map (addLeftEmbedding c) (Ioc a b) = Ioc (c + a) (c + b)
[PROOFSTEP]
rw [← coe_inj, coe_map, coe_Ioc, coe_Ioc]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : OrderedCancelAddCommMonoid α
inst✝¹ : ExistsAddOfLE α
inst✝ : LocallyFiniteOrder α
a b c : α
⊢ ↑(addLeftEmbedding c) '' Set.Ioc a b = Set.Ioc (c + a) (c + b)
[PROOFSTEP]
exact Set.image_const_add_Ioc _ _ _
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : OrderedCancelAddCommMonoid α
inst✝¹ : ExistsAddOfLE α
inst✝ : LocallyFiniteOrder α
a b c : α
⊢ map (addRightEmbedding c) (Ioc a b) = Ioc (a + c) (b + c)
[PROOFSTEP]
rw [← coe_inj, coe_map, coe_Ioc, coe_Ioc]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : OrderedCancelAddCommMonoid α
inst✝¹ : ExistsAddOfLE α
inst✝ : LocallyFiniteOrder α
a b c : α
⊢ ↑(addRightEmbedding c) '' Set.Ioc a b = Set.Ioc (a + c) (b + c)
[PROOFSTEP]
exact Set.image_add_const_Ioc _ _ _
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : OrderedCancelAddCommMonoid α
inst✝¹ : ExistsAddOfLE α
inst✝ : LocallyFiniteOrder α
a b c : α
⊢ map (addLeftEmbedding c) (Ioo a b) = Ioo (c + a) (c + b)
[PROOFSTEP]
rw [← coe_inj, coe_map, coe_Ioo, coe_Ioo]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : OrderedCancelAddCommMonoid α
inst✝¹ : ExistsAddOfLE α
inst✝ : LocallyFiniteOrder α
a b c : α
⊢ ↑(addLeftEmbedding c) '' Set.Ioo a b = Set.Ioo (c + a) (c + b)
[PROOFSTEP]
exact Set.image_const_add_Ioo _ _ _
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : OrderedCancelAddCommMonoid α
inst✝¹ : ExistsAddOfLE α
inst✝ : LocallyFiniteOrder α
a b c : α
⊢ map (addRightEmbedding c) (Ioo a b) = Ioo (a + c) (b + c)
[PROOFSTEP]
rw [← coe_inj, coe_map, coe_Ioo, coe_Ioo]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝² : OrderedCancelAddCommMonoid α
inst✝¹ : ExistsAddOfLE α
inst✝ : LocallyFiniteOrder α
a b c : α
⊢ ↑(addRightEmbedding c) '' Set.Ioo a b = Set.Ioo (a + c) (b + c)
[PROOFSTEP]
exact Set.image_add_const_Ioo _ _ _
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : OrderedCancelAddCommMonoid α
inst✝² : ExistsAddOfLE α
inst✝¹ : LocallyFiniteOrder α
inst✝ : DecidableEq α
a b c : α
⊢ image ((fun x x_1 => x + x_1) c) (Icc a b) = Icc (c + a) (c + b)
[PROOFSTEP]
rw [← map_add_left_Icc, map_eq_image]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : OrderedCancelAddCommMonoid α
inst✝² : ExistsAddOfLE α
inst✝¹ : LocallyFiniteOrder α
inst✝ : DecidableEq α
a b c : α
⊢ image ((fun x x_1 => x + x_1) c) (Icc a b) = image (↑(addLeftEmbedding c)) (Icc a b)
[PROOFSTEP]
rfl
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : OrderedCancelAddCommMonoid α
inst✝² : ExistsAddOfLE α
inst✝¹ : LocallyFiniteOrder α
inst✝ : DecidableEq α
a b c : α
⊢ image ((fun x x_1 => x + x_1) c) (Ico a b) = Ico (c + a) (c + b)
[PROOFSTEP]
rw [← map_add_left_Ico, map_eq_image]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : OrderedCancelAddCommMonoid α
inst✝² : ExistsAddOfLE α
inst✝¹ : LocallyFiniteOrder α
inst✝ : DecidableEq α
a b c : α
⊢ image ((fun x x_1 => x + x_1) c) (Ico a b) = image (↑(addLeftEmbedding c)) (Ico a b)
[PROOFSTEP]
rfl
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : OrderedCancelAddCommMonoid α
inst✝² : ExistsAddOfLE α
inst✝¹ : LocallyFiniteOrder α
inst✝ : DecidableEq α
a b c : α
⊢ image ((fun x x_1 => x + x_1) c) (Ioc a b) = Ioc (c + a) (c + b)
[PROOFSTEP]
rw [← map_add_left_Ioc, map_eq_image]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : OrderedCancelAddCommMonoid α
inst✝² : ExistsAddOfLE α
inst✝¹ : LocallyFiniteOrder α
inst✝ : DecidableEq α
a b c : α
⊢ image ((fun x x_1 => x + x_1) c) (Ioc a b) = image (↑(addLeftEmbedding c)) (Ioc a b)
[PROOFSTEP]
rfl
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : OrderedCancelAddCommMonoid α
inst✝² : ExistsAddOfLE α
inst✝¹ : LocallyFiniteOrder α
inst✝ : DecidableEq α
a b c : α
⊢ image ((fun x x_1 => x + x_1) c) (Ioo a b) = Ioo (c + a) (c + b)
[PROOFSTEP]
rw [← map_add_left_Ioo, map_eq_image]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : OrderedCancelAddCommMonoid α
inst✝² : ExistsAddOfLE α
inst✝¹ : LocallyFiniteOrder α
inst✝ : DecidableEq α
a b c : α
⊢ image ((fun x x_1 => x + x_1) c) (Ioo a b) = image (↑(addLeftEmbedding c)) (Ioo a b)
[PROOFSTEP]
rfl
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : OrderedCancelAddCommMonoid α
inst✝² : ExistsAddOfLE α
inst✝¹ : LocallyFiniteOrder α
inst✝ : DecidableEq α
a b c : α
⊢ image (fun x => x + c) (Icc a b) = Icc (a + c) (b + c)
[PROOFSTEP]
rw [← map_add_right_Icc, map_eq_image]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : OrderedCancelAddCommMonoid α
inst✝² : ExistsAddOfLE α
inst✝¹ : LocallyFiniteOrder α
inst✝ : DecidableEq α
a b c : α
⊢ image (fun x => x + c) (Icc a b) = image (↑(addRightEmbedding c)) (Icc a b)
[PROOFSTEP]
rfl
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : OrderedCancelAddCommMonoid α
inst✝² : ExistsAddOfLE α
inst✝¹ : LocallyFiniteOrder α
inst✝ : DecidableEq α
a b c : α
⊢ image (fun x => x + c) (Ico a b) = Ico (a + c) (b + c)
[PROOFSTEP]
rw [← map_add_right_Ico, map_eq_image]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : OrderedCancelAddCommMonoid α
inst✝² : ExistsAddOfLE α
inst✝¹ : LocallyFiniteOrder α
inst✝ : DecidableEq α
a b c : α
⊢ image (fun x => x + c) (Ico a b) = image (↑(addRightEmbedding c)) (Ico a b)
[PROOFSTEP]
rfl
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : OrderedCancelAddCommMonoid α
inst✝² : ExistsAddOfLE α
inst✝¹ : LocallyFiniteOrder α
inst✝ : DecidableEq α
a b c : α
⊢ image (fun x => x + c) (Ioc a b) = Ioc (a + c) (b + c)
[PROOFSTEP]
rw [← map_add_right_Ioc, map_eq_image]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : OrderedCancelAddCommMonoid α
inst✝² : ExistsAddOfLE α
inst✝¹ : LocallyFiniteOrder α
inst✝ : DecidableEq α
a b c : α
⊢ image (fun x => x + c) (Ioc a b) = image (↑(addRightEmbedding c)) (Ioc a b)
[PROOFSTEP]
rfl
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : OrderedCancelAddCommMonoid α
inst✝² : ExistsAddOfLE α
inst✝¹ : LocallyFiniteOrder α
inst✝ : DecidableEq α
a b c : α
⊢ image (fun x => x + c) (Ioo a b) = Ioo (a + c) (b + c)
[PROOFSTEP]
rw [← map_add_right_Ioo, map_eq_image]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝³ : OrderedCancelAddCommMonoid α
inst✝² : ExistsAddOfLE α
inst✝¹ : LocallyFiniteOrder α
inst✝ : DecidableEq α
a b c : α
⊢ image (fun x => x + c) (Ioo a b) = image (↑(addRightEmbedding c)) (Ioo a b)
[PROOFSTEP]
rfl
[GOAL]
ι : Type u_1
α : Type u_2
inst✝⁴ : Fintype ι
inst✝³ : LinearOrder ι
inst✝² : LocallyFiniteOrderTop ι
inst✝¹ : LocallyFiniteOrderBot ι
inst✝ : CommMonoid α
f : ι → ι → α
⊢ ∏ i : ι, ∏ j in Ioi i, f j i * f i j = ∏ i : ι, ∏ j in {i}ᶜ, f j i
[PROOFSTEP]
simp_rw [← Ioi_disjUnion_Iio, prod_disjUnion, prod_mul_distrib]
[GOAL]
ι : Type u_1
α : Type u_2
inst✝⁴ : Fintype ι
inst✝³ : LinearOrder ι
inst✝² : LocallyFiniteOrderTop ι
inst✝¹ : LocallyFiniteOrderBot ι
inst✝ : CommMonoid α
f : ι → ι → α
⊢ (∏ x : ι, ∏ x_1 in Ioi x, f x_1 x) * ∏ x : ι, ∏ x_1 in Ioi x, f x x_1 =
(∏ x : ι, ∏ x_1 in Ioi x, f x_1 x) * ∏ x : ι, ∏ x_1 in Iio x, f x_1 x
[PROOFSTEP]
congr 1
[GOAL]
case e_a
ι : Type u_1
α : Type u_2
inst✝⁴ : Fintype ι
inst✝³ : LinearOrder ι
inst✝² : LocallyFiniteOrderTop ι
inst✝¹ : LocallyFiniteOrderBot ι
inst✝ : CommMonoid α
f : ι → ι → α
⊢ ∏ x : ι, ∏ x_1 in Ioi x, f x x_1 = ∏ x : ι, ∏ x_1 in Iio x, f x_1 x
[PROOFSTEP]
rw [prod_sigma', prod_sigma']
[GOAL]
case e_a
ι : Type u_1
α : Type u_2
inst✝⁴ : Fintype ι
inst✝³ : LinearOrder ι
inst✝² : LocallyFiniteOrderTop ι
inst✝¹ : LocallyFiniteOrderBot ι
inst✝ : CommMonoid α
f : ι → ι → α
⊢ ∏ x in Finset.sigma univ fun x => Ioi x, f x.fst x.snd = ∏ x in Finset.sigma univ fun x => Iio x, f x.snd x.fst
[PROOFSTEP]
refine' prod_bij' (fun i _ => ⟨i.2, i.1⟩) _ _ (fun i _ => ⟨i.2, i.1⟩) _ _ _
[GOAL]
case e_a.refine'_1
ι : Type u_1
α : Type u_2
inst✝⁴ : Fintype ι
inst✝³ : LinearOrder ι
inst✝² : LocallyFiniteOrderTop ι
inst✝¹ : LocallyFiniteOrderBot ι
inst✝ : CommMonoid α
f : ι → ι → α
⊢ ∀ (a : (_ : ι) × ι) (ha : a ∈ Finset.sigma univ fun x => Ioi x),
(fun i x => { fst := i.snd, snd := i.fst }) a ha ∈ Finset.sigma univ fun x => Iio x
[PROOFSTEP]
simp
[GOAL]
case e_a.refine'_2
ι : Type u_1
α : Type u_2
inst✝⁴ : Fintype ι
inst✝³ : LinearOrder ι
inst✝² : LocallyFiniteOrderTop ι
inst✝¹ : LocallyFiniteOrderBot ι
inst✝ : CommMonoid α
f : ι → ι → α
⊢ ∀ (a : (_ : ι) × ι) (ha : a ∈ Finset.sigma univ fun x => Ioi x),
f a.fst a.snd =
f ((fun i x => { fst := i.snd, snd := i.fst }) a ha).snd ((fun i x => { fst := i.snd, snd := i.fst }) a ha).fst
[PROOFSTEP]
simp
[GOAL]
case e_a.refine'_3
ι : Type u_1
α : Type u_2
inst✝⁴ : Fintype ι
inst✝³ : LinearOrder ι
inst✝² : LocallyFiniteOrderTop ι
inst✝¹ : LocallyFiniteOrderBot ι
inst✝ : CommMonoid α
f : ι → ι → α
⊢ ∀ (a : (_ : ι) × ι) (ha : a ∈ Finset.sigma univ fun x => Iio x),
(fun i x => { fst := i.snd, snd := i.fst }) a ha ∈ Finset.sigma univ fun x => Ioi x
[PROOFSTEP]
simp
[GOAL]
case e_a.refine'_4
ι : Type u_1
α : Type u_2
inst✝⁴ : Fintype ι
inst✝³ : LinearOrder ι
inst✝² : LocallyFiniteOrderTop ι
inst✝¹ : LocallyFiniteOrderBot ι
inst✝ : CommMonoid α
f : ι → ι → α
⊢ ∀ (a : (_ : ι) × ι) (ha : a ∈ Finset.sigma univ fun x => Ioi x),
(fun i x => { fst := i.snd, snd := i.fst }) ((fun i x => { fst := i.snd, snd := i.fst }) a ha)
(_ : { fst := a.snd, snd := a.fst } ∈ Finset.sigma univ fun x => Iio x) =
a
[PROOFSTEP]
simp
[GOAL]
case e_a.refine'_5
ι : Type u_1
α : Type u_2
inst✝⁴ : Fintype ι
inst✝³ : LinearOrder ι
inst✝² : LocallyFiniteOrderTop ι
inst✝¹ : LocallyFiniteOrderBot ι
inst✝ : CommMonoid α
f : ι → ι → α
⊢ ∀ (a : (_ : ι) × ι) (ha : a ∈ Finset.sigma univ fun x => Iio x),
(fun i x => { fst := i.snd, snd := i.fst }) ((fun i x => { fst := i.snd, snd := i.fst }) a ha)
(_ : { fst := a.snd, snd := a.fst } ∈ Finset.sigma univ fun x => Ioi x) =
a
[PROOFSTEP]
simp
|
theorem Cauchy_integral_formula_global: assumes S: "open S" and holf: "f holomorphic_on S" and z: "z \<in> S" and vpg: "valid_path \<gamma>" and pasz: "path_image \<gamma> \<subseteq> S - {z}" and loop: "pathfinish \<gamma> = pathstart \<gamma>" and zero: "\<And>w. w \<notin> S \<Longrightarrow> winding_number \<gamma> w = 0" shows "((\<lambda>w. f w / (w - z)) has_contour_integral (2*pi * \<i> * winding_number \<gamma> z * f z)) \<gamma>"
|
from __future__ import division
import os
import pandas as pd
import numpy as np
from scipy.spatial.distance import cdist
from configparser import ConfigParser
def extract_features_wotarget_14_dlstream(inifile):
"""Adapted version of extract_features_wotargat_14bp to work with fast feature extraction code within DLStream
Reduced feature selection based on recursive feature elimination for social behavior between 2 mice (anogenital approach, attack, etc.)
"""
configFile = str(inifile)
config = ConfigParser()
config.read(configFile)
csv_dir = config.get('General settings', 'csv_path')
csv_dir_in = os.path.join(csv_dir, 'outlier_corrected_movement_location')
csv_dir_out = os.path.join(csv_dir, 'features_extracted')
vidInfPath = config.get('General settings', 'project_path')
vidInfPath = os.path.join(vidInfPath, 'logs')
vidInfPath = os.path.join(vidInfPath, 'video_info.csv')
vidinfDf = pd.read_csv(vidInfPath)
#change videos name to str
vidinfDf.Video = vidinfDf.Video.astype('str')
if not os.path.exists(csv_dir_out):
os.makedirs(csv_dir_out)
def euclidean_distance(x1,x2,y1,y2):
result = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
return result
filesFound = []
roll_windows = []
roll_windows_values = [2, 5, 6, 7.5, 15]
loopy = 0
########### FIND CSV FILES ###########
for i in os.listdir(csv_dir_in):
if i.__contains__(".csv"):
fname = os.path.join(csv_dir_in, i)
filesFound.append(fname)
print('Extracting features from ' + str(len(filesFound)) + ' files...')
########### CREATE PD FOR RAW DATA AND PD FOR MOVEMENT BETWEEN FRAMES ###########
for i in filesFound:
currentFile = i
currVidName = os.path.basename(currentFile)
currVidName = currVidName.replace('.csv', '')
# get current pixels/mm
currVideoSettings = vidinfDf.loc[vidinfDf['Video'] == currVidName]
try:
currPixPerMM = float(currVideoSettings['pixels/mm'])
except TypeError:
print('Error: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file')
try:
fps = float(currVideoSettings['fps'])
except TypeError:
print('No file found.')
continue
print('Processing ' + '"' + str(currVidName) + '".' + ' Fps: ' + str(fps) + ". mm/ppx: " + str(currPixPerMM))
for i in range(len(roll_windows_values)):
roll_windows.append(int(fps / roll_windows_values[i]))
loopy += 1
columnHeaders = ["Ear_left_1_x", "Ear_left_1_y", "Ear_left_1_p", "Ear_right_1_x", "Ear_right_1_y",
"Ear_right_1_p", "Nose_1_x", "Nose_1_y", "Nose_1_p", "Center_1_x", "Center_1_y", "Center_1_p",
"Lat_left_1_x", "Lat_left_1_y",
"Lat_left_1_p", "Lat_right_1_x", "Lat_right_1_y", "Lat_right_1_p", "Tail_base_1_x",
"Tail_base_1_y", "Tail_base_1_p",
"Ear_left_2_x",
"Ear_left_2_y", "Ear_left_2_p", "Ear_right_2_x", "Ear_right_2_y", "Ear_right_2_p",
"Nose_2_x", "Nose_2_y", "Nose_2_p", "Center_2_x", "Center_2_y", "Center_2_p", "Lat_left_2_x",
"Lat_left_2_y",
"Lat_left_2_p", "Lat_right_2_x", "Lat_right_2_y", "Lat_right_2_p", "Tail_base_2_x",
"Tail_base_2_y", "Tail_base_2_p"]
csv_df = pd.read_csv(currentFile, names=columnHeaders, low_memory=False)
csv_df = csv_df.fillna(0)
csv_df = csv_df.drop(csv_df.index[[0]])
csv_df = csv_df.apply(pd.to_numeric)
csv_df = csv_df.reset_index()
csv_df = csv_df.reset_index(drop=True)
########### CREATE PD FOR RAW DATA AND PD FOR MOVEMENT BETWEEN FRAMES ###########
M1_hull_mean_euclidean_list = []
M2_hull_mean_euclidean_list = []
#print('Creating shifted dataframes for distance calculations')
########### CREATE SHIFTED DATAFRAME FOR DISTANCE CALCULATIONS ###########################################
csv_df_shifted = csv_df.shift(periods=1)
csv_df_shifted = csv_df_shifted.rename(
columns={'Ear_left_1_x': 'Ear_left_1_x_shifted','Ear_left_1_y': 'Ear_left_1_y_shifted',
'Ear_left_1_p': 'Ear_left_1_p_shifted','Ear_right_1_x': 'Ear_right_1_x_shifted', \
'Ear_right_1_y': 'Ear_right_1_y_shifted','Ear_right_1_p': 'Ear_right_1_p_shifted',
'Nose_1_x': 'Nose_1_x_shifted','Nose_1_y': 'Nose_1_y_shifted', \
'Nose_1_p': 'Nose_1_p_shifted','Center_1_x': 'Center_1_x_shifted',
'Center_1_y': 'Center_1_y_shifted','Center_1_p': 'Center_1_p_shifted','Lat_left_1_x': \
'Lat_left_1_x_shifted','Lat_left_1_y': 'Lat_left_1_y_shifted',
'Lat_left_1_p': 'Lat_left_1_p_shifted','Lat_right_1_x': 'Lat_right_1_x_shifted',
'Lat_right_1_y': 'Lat_right_1_y_shifted', \
'Lat_right_1_p': 'Lat_right_1_p_shifted','Tail_base_1_x': 'Tail_base_1_x_shifted',
'Tail_base_1_y': 'Tail_base_1_y_shifted', \
'Tail_base_1_p': 'Tail_base_1_p_shifted',
'Ear_left_2_x': 'Ear_left_2_x_shifted','Ear_left_2_y': 'Ear_left_2_y_shifted',
'Ear_left_2_p': 'Ear_left_2_p_shifted','Ear_right_2_x': 'Ear_right_2_x_shifted', \
'Ear_right_2_y': 'Ear_right_2_y_shifted','Ear_right_2_p': 'Ear_right_2_p_shifted',
'Nose_2_x': 'Nose_2_x_shifted','Nose_2_y': 'Nose_2_y_shifted', \
'Nose_2_p': 'Nose_2_p_shifted','Center_2_x': 'Center_2_x_shifted',
'Center_2_y': 'Center_2_y_shifted','Center_2_p': 'Center_2_p_shifted','Lat_left_2_x': \
'Lat_left_2_x_shifted','Lat_left_2_y': 'Lat_left_2_y_shifted',
'Lat_left_2_p': 'Lat_left_2_p_shifted','Lat_right_2_x': 'Lat_right_2_x_shifted',
'Lat_right_2_y': 'Lat_right_2_y_shifted', \
'Lat_right_2_p': 'Lat_right_2_p_shifted','Tail_base_2_x': 'Tail_base_2_x_shifted',
'Tail_base_2_y': 'Tail_base_2_y_shifted', \
'Tail_base_2_p': 'Tail_base_2_p_shifted'
})
csv_df_combined = pd.concat([csv_df,csv_df_shifted],axis=1,join='inner')
csv_df_combined = csv_df_combined.fillna(0)
csv_df_combined = csv_df_combined.reset_index(drop=True)
#print('Calculating euclidean distances...')
########### EUCLIDEAN DISTANCES ###########################################
# within mice
eucl_distance_dict_wm = dict(
nose_to_tail=('Nose','Tail_base')
,width=('Lat_left','Lat_right')
,Ear_distance=('Ear_right','Ear_left')
)
mice = [1,2]
for mouse in mice:
for distance_measurement,bodyparts in eucl_distance_dict_wm.items():
# skip mouse 2 "ear_distance" measurement
if mouse == 2 and distance_measurement == 'Ear_distance':
continue
x1 = csv_df[f'{bodyparts[0]}_{mouse}_x'].to_numpy()
y1 = csv_df[f'{bodyparts[0]}_{mouse}_y'].to_numpy()
x2 = csv_df[f'{bodyparts[1]}_{mouse}_x'].to_numpy()
y2 = csv_df[f'{bodyparts[1]}_{mouse}_y'].to_numpy()
csv_df[f'Mouse_{mouse}_{distance_measurement}'] = euclidean_distance(x1,x2,y1,y2) / self._currPixPerMM
# between mice
eucl_distance_dict_bm = dict(
Centroid_distance=('Center_1','Center_2')
,Nose_to_nose_distance=('Nose_1','Nose_2')
,M1_Nose_to_M2_lat_left=('Nose_1','Lat_left_2')
,M1_Nose_to_M2_lat_right=('Nose_1','Lat_right_2')
,M2_Nose_to_M1_lat_left=('Nose_2','Lat_left_1')
,M2_Nose_to_M1_lat_right=('Nose_2','Lat_right_1')
,M1_Nose_to_M2_tail_base=('Nose_1','Tail_base_2')
,M2_Nose_to_M1_tail_base=('Nose_2','Tail_base_1')
)
for distance_measurement,bodyparts in eucl_distance_dict_bm.items():
x1 = csv_df[f'{bodyparts[0]}_x'].to_numpy()
y1 = csv_df[f'{bodyparts[0]}_y'].to_numpy()
x2 = csv_df[f'{bodyparts[1]}_x'].to_numpy()
y2 = csv_df[f'{bodyparts[1]}_y'].to_numpy()
csv_df[f'{distance_measurement}'] = euclidean_distance(x1,x2,y1,y2) / self._currPixPerMM
# Movement
bp_list = ('Center','Nose','Lat_left','Lat_right','Tail_base','Ear_left','Ear_right')
mice = [1,2]
for mouse in mice:
for bp in bp_list:
x1 = csv_df_combined[f'{bp}_{mouse}_x_shifted'].to_numpy()
y1 = csv_df_combined[f'{bp}_{mouse}_y_shifted'].to_numpy()
x2 = csv_df_combined[f'{bp}_{mouse}_x'].to_numpy()
y2 = csv_df_combined[f'{bp}_{mouse}_y'].to_numpy()
'Movement_mouse_1_centroid'
if bp == 'Center':
csv_df[f'Movement_mouse_{mouse}_centroid'] = euclidean_distance(x1,x2,y1,y2) / self._currPixPerMM
elif bp == 'Ear_left':
csv_df[f'Movement_mouse_{mouse}_left_ear'] = euclidean_distance(x1,x2,y1,y2) / self._currPixPerMM
elif bp == 'Ear_right':
csv_df[f'Movement_mouse_{mouse}_right_ear'] = euclidean_distance(x1,x2,y1,y2) / self._currPixPerMM
elif bp == 'Lat_left':
csv_df[f'Movement_mouse_{mouse}_lateral_left'] = euclidean_distance(x1,x2,y1,
y2) / self._currPixPerMM
elif bp == 'Lat_right':
csv_df[f'Movement_mouse_{mouse}_lateral_right'] = euclidean_distance(x1,x2,y1,
y2) / self._currPixPerMM
else:
csv_df[f'Movement_mouse_{mouse}_{bp.lower()}'] = euclidean_distance(x1,x2,y1,
y2) / self._currPixPerMM
# print('Calculating hull variables...')
########### HULL - EUCLIDEAN DISTANCES ###########################################
for index,row in csv_df.iterrows():
M1_np_array = np.array(
[[row['Ear_left_1_x'],row["Ear_left_1_y"]],[row['Ear_right_1_x'],row["Ear_right_1_y"]],
[row['Nose_1_x'],row["Nose_1_y"]],[row['Center_1_x'],row["Center_1_y"]],
[row['Lat_left_1_x'],row["Lat_left_1_y"]],[row['Lat_right_1_x'],row["Lat_right_1_y"]],
[row['Tail_base_1_x'],row["Tail_base_1_y"]]]).astype(int)
M2_np_array = np.array(
[[row['Ear_left_2_x'],row["Ear_left_2_y"]],[row['Ear_right_2_x'],row["Ear_right_2_y"]],
[row['Nose_2_x'],row["Nose_2_y"]],[row['Center_2_x'],row["Center_2_y"]],
[row['Lat_left_2_x'],row["Lat_left_2_y"]],[row['Lat_right_2_x'],row["Lat_right_2_y"]],
[row['Tail_base_2_x'],row["Tail_base_2_y"]]]).astype(int)
M1_dist_euclidean = cdist(M1_np_array,M1_np_array,metric='euclidean')
M1_dist_euclidean = M1_dist_euclidean[M1_dist_euclidean != 0]
M1_hull_mean_euclidean = np.mean(M1_dist_euclidean)
M1_hull_mean_euclidean_list.append(M1_hull_mean_euclidean)
M2_dist_euclidean = cdist(M2_np_array,M2_np_array,metric='euclidean')
M2_dist_euclidean = M2_dist_euclidean[M2_dist_euclidean != 0]
M2_hull_mean_euclidean = np.mean(M2_dist_euclidean)
M2_hull_mean_euclidean_list.append(M2_hull_mean_euclidean)
csv_df['M1_mean_euclidean_distance_hull'] = list(
map(lambda x: x / self._currPixPerMM,M1_hull_mean_euclidean_list))
csv_df['M2_mean_euclidean_distance_hull'] = list(
map(lambda x: x / self._currPixPerMM,M2_hull_mean_euclidean_list))
########### COLLAPSED MEASURES ###########################################
# print('Collapsed measures')
csv_df['Total_movement_centroids'] = csv_df['Movement_mouse_1_centroid'] + csv_df['Movement_mouse_2_centroid']
csv_df['Total_movement_all_bodyparts_M1'] = csv_df['Movement_mouse_1_centroid'] + csv_df[
'Movement_mouse_1_nose'] + csv_df['Movement_mouse_1_tail_base'] + \
csv_df['Movement_mouse_1_left_ear'] + csv_df[
'Movement_mouse_1_right_ear'] + csv_df[
'Movement_mouse_1_lateral_left'] + csv_df[
'Movement_mouse_1_lateral_right']
csv_df['Total_movement_all_bodyparts_M2'] = csv_df['Movement_mouse_2_centroid'] + csv_df[
'Movement_mouse_2_nose'] + csv_df['Movement_mouse_2_tail_base'] + \
csv_df['Movement_mouse_2_left_ear'] + csv_df[
'Movement_mouse_2_right_ear'] + csv_df[
'Movement_mouse_2_lateral_left'] + csv_df[
'Movement_mouse_2_lateral_right']
csv_df['Total_movement_all_bodyparts_both_mice'] = csv_df['Total_movement_all_bodyparts_M1'] + csv_df[
'Total_movement_all_bodyparts_M2']
########### CALC ROLLING WINDOWS MEDIANS AND MEANS ###########################################
#print('Calculating rolling windows: medians, medians, and sums...')
# full version for distance (keeping style for future adaptation), reduced version for the rest
for num,roll_value in enumerate(roll_windows_values):
parameter_dict1 = dict(Distance='Centroid_distance'
,Mouse1_width='Mouse_1_width'
,Mouse2_width='Mouse_2_width'
,Movement='Total_movement_centroids'
)
# adapted from:
# csv_df[currentColName] = csv_df['Sum_euclidean_distance_hull_M1_M2'].rolling(roll_windows[i],
# min_periods=1).median()
for key,clm_name in parameter_dict1.items():
if roll_value != 2 and key != 'Distance':
continue
currentcolname = f'{key}_mean_' + str(roll_value)
csv_df[currentcolname] = csv_df[clm_name].rolling(roll_windows[num],min_periods=1).mean()
currentcolname = f'{key}_median_' + str(roll_value)
csv_df[currentcolname] = csv_df[clm_name].rolling(roll_windows[num],min_periods=1).median()
currentcolname = f'{key}_sum_' + str(roll_value)
csv_df[currentcolname] = csv_df[clm_name].rolling(roll_windows[num],min_periods=1).sum()
clm_name1 = 'euclidean_distance_hull'
clm_name2 = 'euclid_distances'
measure = 'mean'
if roll_value == 2:
for mouse in mice:
# keeping style for future adaptation
clm_name = f'M{mouse}_{measure}_{clm_name1}'
currentcolname = f'Mouse{mouse}_{measure}_{clm_name2}_mean_' + str(roll_value)
csv_df[currentcolname] = csv_df[clm_name].rolling(roll_windows[num],min_periods=1).mean()
currentcolname = f'Mouse{mouse}_{measure}_{clm_name2}_median_' + str(roll_value)
csv_df[currentcolname] = csv_df[clm_name].rolling(roll_windows[num],min_periods=1).median()
clm_list = (
'Total_movement_all_bodyparts_both_mice'
,'Total_movement_centroids'
)
if roll_value in [2,5]:
for clm_name in clm_list:
currentcolname = clm_name + '_mean_' + str(roll_value)
csv_df[currentcolname] = csv_df[clm_name].rolling(roll_windows[num],min_periods=1).mean()
currentcolname = clm_name + '_sum_' + str(roll_value)
csv_df[currentcolname] = csv_df[clm_name].rolling(roll_windows[num],min_periods=1).sum()
if roll_value == 2:
currentcolname = clm_name + '_median_' + str(roll_value)
csv_df[currentcolname] = csv_df[clm_name].rolling(roll_windows[num],
min_periods=1).median()
parameter_dict2 = dict(
Nose_movement='nose'
,Centroid_movement='centroid'
,Tail_base_movement='tail_base'
)
if roll_value == 2:
for mouse in mice:
for key,bp in parameter_dict2.items():
clm_name = f'Movement_mouse_{mouse}_{bp.lower()}'
currentcolname = f'{key}_M{mouse}_mean_' + str(roll_value)
csv_df[currentcolname] = csv_df[clm_name].rolling(roll_windows[num],min_periods=1).mean()
currentcolname = f'{key}_M{mouse}_median_' + str(roll_value)
csv_df[currentcolname] = csv_df[clm_name].rolling(roll_windows[num],
min_periods=1).median()
currentcolname = f'{key}_M{mouse}_sum_' + str(roll_value)
csv_df[currentcolname] = csv_df[clm_name].rolling(roll_windows[num],min_periods=1).sum()
if roll_value == 2:
for mouse in mice:
# Tail_base_movement
clm_name = f'Movement_mouse_{mouse}_tail_base'
# median
currentcolname = f'Tail_base_movement_M{mouse}_median_' + str(roll_value)
csv_df[currentcolname] = csv_df[clm_name].rolling(roll_windows[num],min_periods=1).median()
# only for 2nd mouse
if mouse == 2:
# mean
currentcolname = f'Tail_base_movement_M{mouse}_mean_' + str(roll_value)
csv_df[currentcolname] = csv_df[clm_name].rolling(roll_windows[num],min_periods=1).mean()
# sum
currentcolname = f'Tail_base_movement_M{mouse}_sum_' + str(roll_value)
csv_df[currentcolname] = csv_df[clm_name].rolling(roll_windows[num],min_periods=1).sum()
# Centroid_movement
clm_name = f'Movement_mouse_{mouse}_centroid'
# mean
currentcolname = f'Centroid_movement_M{mouse}_mean_' + str(roll_value)
csv_df[currentcolname] = csv_df[clm_name].rolling(roll_windows[num],min_periods=1).mean()
# sum
currentcolname = f'Centroid_movement_M{mouse}_sum_' + str(roll_value)
csv_df[currentcolname] = csv_df[clm_name].rolling(roll_windows[num],min_periods=1).sum()
# Nose_movement
clm_name = f'Movement_mouse_{mouse}_nose'
# mean
currentcolname = f'Nose_movement_M{mouse}_mean_' + str(roll_value)
csv_df[currentcolname] = csv_df[clm_name].rolling(roll_windows[num],min_periods=1).mean()
# sum
currentcolname = f'Nose_movement_M{mouse}_sum_' + str(roll_value)
csv_df[currentcolname] = csv_df[clm_name].rolling(roll_windows[num],min_periods=1).sum()
# only for 1st mouse
if mouse == 1:
# median
currentcolname = f'Nose_movement_M{mouse}_median_' + str(roll_value)
csv_df[currentcolname] = csv_df[clm_name].rolling(roll_windows[num],min_periods=1).median()
########### BODY PARTS RELATIVE TO EACH OTHER ##################
################# EMPETY #########################################
#
# ########### ANGLES ###########################################
# not used in this version
# ########### DEVIATIONS ###########################################
# not used in this version
# ########### PERCENTILE RANK ###########################################
# not used in this version
# ########### CALCULATE STRAIGHTNESS OF POLYLINE PATH: tortuosity ###########################################
# not used in this version
# ########### CALC THE NUMBER OF LOW PROBABILITY DETECTIONS & TOTAL PROBABILITY VALUE FOR ROW###########################################
# not used in this version
########### DROP CALCULATION COLUMNS THAT ARE NOT USED IN THE FINAL VERSION#########
clms_to_drop = ['Nose_1_x','Nose_1_y','Ear_left_1_x','Ear_left_1_y','Ear_right_1_x','Ear_right_1_y',
'Center_1_x','Center_1_y','Lat_left_1_x','Lat_left_1_y','Lat_right_1_x','Lat_right_1_y',
'Tail_base_1_x','Tail_base_1_y','Nose_2_x','Nose_2_y','Ear_left_2_x','Ear_left_2_y',
'Ear_right_2_x','Ear_right_2_y','Center_2_x','Center_2_y','Lat_left_2_x','Lat_left_2_y',
'Lat_right_2_x','Lat_right_2_y','Tail_base_2_x','Tail_base_2_y','Nose_1_p','Ear_left_1_p',
'Ear_right_1_p','Center_1_p','Lat_left_1_p','Lat_right_1_p','Tail_base_1_p','Nose_2_p',
'Ear_left_2_p','Ear_right_2_p','Center_2_p','Lat_left_2_p','Lat_right_2_p','Tail_base_2_p',
'Mouse_1_width','Mouse_2_width','Movement_mouse_1_centroid','Movement_mouse_1_nose',
'Movement_mouse_1_lateral_left','Movement_mouse_1_lateral_right','Movement_mouse_1_tail_base',
'Movement_mouse_1_left_ear','Movement_mouse_1_right_ear','Movement_mouse_2_centroid',
'Movement_mouse_2_nose','Movement_mouse_2_lateral_left','Movement_mouse_2_lateral_right',
'Movement_mouse_2_tail_base','Movement_mouse_2_left_ear','Movement_mouse_2_right_ear',
'M1_mean_euclidean_distance_hull','M2_mean_euclidean_distance_hull','Total_movement_centroids',
'Total_movement_all_bodyparts_M1','Total_movement_all_bodyparts_M2',
'Total_movement_all_bodyparts_both_mice','Movement_mean_2','Movement_median_2','Movement_sum_2',
'Total_movement_centroids_mean_2','Total_movement_centroids_sum_2',
'Total_movement_centroids_median_2','Centroid_movement_M1_median_2',
'Tail_base_movement_M1_mean_2','Tail_base_movement_M1_sum_2','Nose_movement_M2_median_2',
'Centroid_movement_M2_median_2','Total_movement_centroids_mean_5',
'Total_movement_centroids_sum_5']
csv_df = csv_df.drop(columns=clms_to_drop)
########### DROP COORDINATE COLUMNS ###########################################
csv_df = csv_df.reset_index(drop=True)
csv_df = csv_df.fillna(0)
csv_df = csv_df.drop(columns=['index'])
fileName = os.path.basename(currentFile)
fileName = fileName.split('.')
fileOut = str(fileName[0]) + str('.csv')
saveFN = os.path.join(csv_dir_out, fileOut)
csv_df.to_csv(saveFN)
print('Feature extraction complete for ' + '"' + str(currVidName) + '".')
print('All feature extraction complete.')
|
The real part of an integer is the integer itself.
|
module Issue2487-1 where
postulate get-out-of-jail : {A : Set} -> A
|
Require Export Coq.Arith.EqNat.
Require Export Coq.Relations.Relations.
Require Export compcert.lib.Axioms.
Require Export compcert.lib.Coqlib.
Require Export compcert.lib.Integers.
Require Export compcert.lib.Floats.
Require Export compcert.common.AST.
Require Export compcert.common.Values.
Require Export compcert.common.Memdata.
Require Export compcert.common.Memtype.
Require Export compcert.common.Memory.
Require Export compcert.common.Globalenvs.
Require Export VST.msl.Coqlib2.
Require Export VST.veric.coqlib4.
(* Lemmas about ident lists *)
Fixpoint id_in_list (id: ident) (ids: list ident) : bool :=
match ids with i::ids' => orb (Pos.eqb id i) (id_in_list id ids') | _ => false end.
Fixpoint compute_list_norepet (ids: list ident) : bool :=
match ids with
| id :: ids' => if id_in_list id ids' then false else compute_list_norepet ids'
| nil => true
end.
Lemma id_in_list_true: forall i ids, id_in_list i ids = true -> In i ids.
Proof.
induction ids; simpl; intros. inv H. apply orb_true_iff in H; destruct H; auto.
apply Peqb_true_eq in H. subst; auto.
Qed.
Lemma id_in_list_false: forall i ids, id_in_list i ids = false -> ~In i ids.
Proof.
induction ids; simpl; intros; auto.
apply orb_false_iff in H. destruct H.
intros [?|?]. subst.
rewrite Pos.eqb_refl in H; inv H.
apply IHids; auto.
Qed.
Lemma compute_list_norepet_e: forall ids,
compute_list_norepet ids = true -> list_norepet ids.
Proof.
induction ids; simpl; intros.
constructor.
revert H; case_eq (id_in_list a ids); intros.
inv H0.
constructor; auto.
apply id_in_list_false in H.
auto.
Qed.
Lemma compute_list_norepet_i: forall ids,
list_norepet ids -> compute_list_norepet ids = true.
Proof.
induction ids; simpl; intros; trivial.
inv H. rewrite (IHids H3).
case_eq (id_in_list a ids); intros; trivial.
apply id_in_list_true in H. congruence.
Qed.
Lemma list_norepet_rev:
forall A (l: list A), list_norepet (rev l) = list_norepet l.
Proof.
induction l; simpl; auto.
apply prop_ext; split; intros.
apply list_norepet_app in H.
destruct H as [? [? ?]].
rewrite IHl in H.
constructor; auto.
eapply list_disjoint_notin with (a::nil).
apply list_disjoint_sym; auto.
intros x y ? ? ?; subst.
contradiction (H1 y y); auto.
rewrite <- In_rev; auto.
simpl; auto.
rewrite list_norepet_app.
inv H.
split3; auto.
rewrite IHl; auto.
repeat constructor.
intro Hx. inv Hx.
intros x y ? ? ?; subst.
inv H0.
rewrite <- In_rev in H; contradiction.
auto.
Qed.
Lemma block_eq_dec: forall b1 b2: block, {b1 = b2} + {b1 <> b2}.
Proof. exact (Coqlib.peq). Qed.
Lemma rev_if_be_singleton:
forall x, rev_if_be (x::nil) = (x::nil).
Proof. intro. unfold rev_if_be; destruct Archi.big_endian; auto. Qed.
Lemma rev_if_be_1: forall i, rev_if_be (i::nil) = (i::nil).
Proof. unfold rev_if_be; intros. destruct Archi.big_endian; reflexivity.
Qed.
Lemma decode_byte_val:
forall m, decode_val Mint8unsigned (Byte m :: nil) =
Vint (Int.zero_ext 8 (Int.repr (Byte.unsigned m))).
Proof.
intros.
unfold decode_val. simpl.
f_equal.
unfold decode_int.
rewrite rev_if_be_singleton.
unfold int_of_bytes. f_equal. f_equal. apply Z.add_0_r.
Qed.
Lemma Vint_inj: forall x y, Vint x = Vint y -> x=y.
Proof. congruence. Qed.
Definition nullval : val :=
if Archi.ptr64 then Vlong Int64.zero else Vint Int.zero.
Definition val_to_bool (v: val) : option bool :=
match v with
| Vint n => Some (negb (Int.eq n Int.zero))
| Vptr _ _ => Some true
| _ => None
end.
Definition bool_of_valf (v: val): option bool :=
match v with
| Vint i => Some (negb (Int.eq i Int.zero))
| Vlong i => Some (negb (Int64.eq i Int64.zero))
| Vfloat _ => None
| Vsingle _ => None
| Vptr _ _ => Some true
| Vundef => None
end.
|
(**
Syntax of the simply typed lambda calculus as a
multisorted signature.
Written by: Anders Mörtberg, 2021 (adapted from STLC.v)
version for simplified notion of HSS by Ralph Matthes (2022, 2023)
the file is identical to the homonymous file in the parent directory, except for importing files from the present directory
*)
Require Import UniMath.Foundations.PartD.
Require Import UniMath.Foundations.Sets.
Require Import UniMath.MoreFoundations.Tactics.
Require Import UniMath.Combinatorics.Lists.
Require Import UniMath.CategoryTheory.Core.Categories.
Require Import UniMath.CategoryTheory.Core.NaturalTransformations.
Require Import UniMath.CategoryTheory.Core.Functors.
Require Import UniMath.CategoryTheory.FunctorCategory.
Require Import UniMath.CategoryTheory.whiskering.
Require Import UniMath.CategoryTheory.limits.graphs.colimits.
Require Import UniMath.CategoryTheory.limits.binproducts.
Require Import UniMath.CategoryTheory.limits.products.
Require Import UniMath.CategoryTheory.limits.bincoproducts.
Require Import UniMath.CategoryTheory.limits.coproducts.
Require Import UniMath.CategoryTheory.limits.terminal.
Require Import UniMath.CategoryTheory.limits.initial.
Require Import UniMath.CategoryTheory.FunctorAlgebras.
Require Import UniMath.CategoryTheory.exponentials.
Require Import UniMath.CategoryTheory.Adjunctions.Core.
Require Import UniMath.CategoryTheory.Chains.All.
Require Import UniMath.CategoryTheory.Monads.Monads.
Require Import UniMath.CategoryTheory.categories.HSET.Core.
Require Import UniMath.CategoryTheory.categories.HSET.Colimits.
Require Import UniMath.CategoryTheory.categories.HSET.Limits.
Require Import UniMath.CategoryTheory.categories.HSET.Structures.
Require Import UniMath.CategoryTheory.categories.StandardCategories.
Require Import UniMath.CategoryTheory.Groupoids.
Require Import UniMath.SubstitutionSystems.Signatures.
Require Import UniMath.SubstitutionSystems.SumOfSignatures.
Require Import UniMath.SubstitutionSystems.BinProductOfSignatures.
Require Import UniMath.SubstitutionSystems.SimplifiedHSS.SubstitutionSystems.
Require Import UniMath.SubstitutionSystems.SimplifiedHSS.LiftingInitial_alt.
Require Import UniMath.SubstitutionSystems.SimplifiedHSS.MonadsFromSubstitutionSystems.
Require Import UniMath.SubstitutionSystems.SignatureExamples.
Require Import UniMath.SubstitutionSystems.MultiSorted_alt.
Require Import UniMath.SubstitutionSystems.SimplifiedHSS.MultiSortedMonadConstruction_alt.
Require Import UniMath.SubstitutionSystems.MonadsMultiSorted_alt.
Local Open Scope cat.
(** * The simply typed lambda calculus from a multisorted binding signature *)
Section Lam.
Variable (sort : hSet) (arr : sort → sort → sort).
Local Lemma hsort : isofhlevel 3 sort.
Proof.
exact (isofhlevelssnset 1 sort (setproperty sort)).
Defined.
Let sortToSet : category := [path_pregroupoid sort hsort,HSET].
Local Lemma TerminalSortToSet : Terminal sortToSet.
Proof.
apply Terminal_functor_precat, TerminalHSET.
Defined.
Local Lemma BinCoprodSortToSet : BinCoproducts sortToSet.
Proof.
apply BinCoproducts_functor_precat, BinCoproductsHSET.
Defined.
Local Lemma BinProd : BinProducts [sortToSet,HSET].
Proof.
apply BinProducts_functor_precat, BinProductsHSET.
Defined.
(** Some notations *)
Local Infix "::" := (@cons _).
Local Notation "[]" := (@nil _) (at level 0, format "[]").
Local Notation "a + b" := (setcoprod a b) : set.
Local Notation "s ⇒ t" := (arr s t).
Local Notation "'Id'" := (functor_identity _).
Local Notation "a ⊕ b" := (BinCoproductObject (BinCoprodSortToSet a b)).
Local Notation "'1'" := (TerminalObject TerminalSortToSet).
Local Notation "F ⊗ G" := (BinProduct_of_functors BinProd F G).
Let sortToSet2 := [sortToSet,sortToSet].
Local Lemma BinCoprodSortToSet2 : BinCoproducts sortToSet2.
Proof.
apply BinCoproducts_functor_precat, BinCoprodSortToSet.
Defined.
(** The signature of the simply typed lambda calculus *)
Definition STLC_Sig : MultiSortedSig sort.
Proof.
use make_MultiSortedSig.
- apply ((sort × sort) + (sort × sort))%set.
- intros H; induction H as [st|st]; induction st as [s t].
+ exact ((([],,(s ⇒ t)) :: ([],,s) :: nil),,t).
+ exact (((cons s [],,t) :: []),,(s ⇒ t)).
Defined.
(** The signature with strength for the simply typed lambda calculus *)
Definition STLC_Signature : Signature sortToSet _ _ :=
MultiSortedSigToSignatureSet sort hsort STLC_Sig.
Definition STLC_Functor : functor sortToSet2 sortToSet2 :=
Id_H _ BinCoprodSortToSet STLC_Signature.
Lemma STLC_Functor_Initial : Initial (FunctorAlg STLC_Functor).
Proof.
apply SignatureInitialAlgebra.
- apply InitialHSET.
- apply ColimsHSET_of_shape.
- apply is_omega_cocont_MultiSortedSigToSignature.
+ apply ProductsHSET.
+ apply Exponentials_functor_HSET.
+ apply ColimsHSET_of_shape.
Defined.
Definition STLC_Monad : Monad sortToSet :=
MultiSortedSigToMonadSet sort hsort STLC_Sig.
(** Extract the constructors of the STLC from the initial algebra *)
Definition STLC_M : sortToSet2 :=
alg_carrier _ (InitialObject STLC_Functor_Initial).
(* The functor parts coincide *)
Lemma STLC_Monad_ok : STLC_M = pr1 (pr1 (pr1 STLC_Monad)).
Proof.
apply idpath.
Qed.
Let STLC_M_mor : sortToSet2⟦STLC_Functor STLC_M,STLC_M⟧ :=
alg_map _ (InitialObject STLC_Functor_Initial).
Let STLC_M_alg : algebra_ob STLC_Functor :=
InitialObject STLC_Functor_Initial.
(** The variables *)
Definition var_map : sortToSet2⟦Id,STLC_M⟧ :=
BinCoproductIn1 (BinCoprodSortToSet2 _ _) · STLC_M_mor.
(** The source of the application constructor *)
Definition app_source (s t : sort) : functor sortToSet2 sortToSet2 :=
(post_comp_functor (projSortToSet sort hsort (s ⇒ t)) ⊗ post_comp_functor (projSortToSet sort hsort s))
∙ (post_comp_functor (hat_functorSet sort hsort t)).
(** The application constructor *)
Definition app_map (s t : sort) : sortToSet2⟦app_source s t STLC_M,STLC_M⟧ :=
CoproductIn _ _ (Coproducts_functor_precat _ _ _ _ (λ _, _)) (ii1 (s,,t))
· BinCoproductIn2 (BinCoprodSortToSet2 _ _)
· STLC_M_mor.
(** The source of the lambda constructor *)
Definition lam_source (s t : sort) : functor sortToSet2 sortToSet2 :=
pre_comp_functor (sorted_option_functorSet sort hsort s)
∙ post_comp_functor (projSortToC sort hsort _ t)
∙ post_comp_functor (hat_functorSet sort hsort (s ⇒ t)).
Definition lam_map (s t : sort) : sortToSet2⟦lam_source s t STLC_M,STLC_M⟧ :=
CoproductIn _ _ (Coproducts_functor_precat _ _ _ _ (λ _, _)) (ii2 (s,,t))
· BinCoproductIn2 (BinCoprodSortToSet2 _ _)
· STLC_M_mor.
Definition make_STLC_M_Algebra X (fvar : sortToSet2⟦Id,X⟧)
(fapp : ∏ s t, sortToSet2⟦app_source s t X,X⟧)
(flam : ∏ s t, sortToSet2⟦lam_source s t X,X⟧) :
algebra_ob STLC_Functor.
Proof.
apply (tpair _ X), (BinCoproductArrow _ fvar), CoproductArrow; intros b.
induction b as [st|st]; induction st as [s t].
- exact (fapp s t).
- exact (flam s t).
Defined.
(** The recursor for the stlc *)
Definition foldr_map X (fvar : sortToSet2⟦Id,X⟧)
(fapp : ∏ s t, sortToSet2⟦app_source s t X,X⟧)
(flam : ∏ s t, sortToSet2⟦lam_source s t X,X⟧) :
algebra_mor _ STLC_M_alg (make_STLC_M_Algebra X fvar fapp flam) :=
InitialArrow STLC_Functor_Initial (make_STLC_M_Algebra X fvar fapp flam).
(** The equation for variables *)
Lemma foldr_var X (fvar : sortToSet2⟦Id,X⟧)
(fapp : ∏ s t, sortToSet2⟦app_source s t X,X⟧)
(flam : ∏ s t, sortToSet2⟦lam_source s t X,X⟧) :
var_map · foldr_map X fvar fapp flam = fvar.
Proof.
unfold var_map.
rewrite <- assoc, (algebra_mor_commutes _ _ _ (foldr_map _ _ _ _)), assoc.
etrans; [eapply cancel_postcomposition, BinCoproductOfArrowsIn1|].
rewrite id_left.
apply BinCoproductIn1Commutes.
Qed.
Lemma foldr_app X (fvar : sortToSet2⟦Id,X⟧)
(fapp : ∏ s t, sortToSet2⟦app_source s t X,X⟧)
(flam : ∏ s t, sortToSet2⟦lam_source s t X,X⟧)
(s t : sort) :
app_map s t · foldr_map X fvar fapp flam =
# (pr1 (app_source s t)) (foldr_map X fvar fapp flam) · fapp s t.
Proof.
unfold app_map.
rewrite <- assoc.
etrans; [apply maponpaths, (algebra_mor_commutes _ _ _ (foldr_map X fvar fapp flam))|].
rewrite assoc.
etrans; [eapply cancel_postcomposition; rewrite <- assoc;
apply maponpaths, BinCoproductOfArrowsIn2|].
rewrite <- !assoc.
etrans; [apply maponpaths, maponpaths, BinCoproductIn2Commutes|].
rewrite assoc.
etrans; [apply cancel_postcomposition; use (CoproductOfArrowsIn _ _ (Coproducts_functor_precat _ _ _ _ (λ _, _)))|].
rewrite <- assoc.
apply maponpaths.
exact (CoproductInCommutes _ _ _ _ _ _ (inl (s,,t))).
Qed.
Lemma foldr_lam X (fvar : sortToSet2⟦Id,X⟧)
(fapp : ∏ s t, sortToSet2⟦app_source s t X,X⟧)
(flam : ∏ s t, sortToSet2⟦lam_source s t X,X⟧)
(s t : sort) :
lam_map s t · foldr_map X fvar fapp flam =
# (pr1 (lam_source s t)) (foldr_map X fvar fapp flam) · flam s t.
Proof.
unfold lam_map.
rewrite <- assoc.
etrans; [apply maponpaths, (algebra_mor_commutes _ _ _ (foldr_map X fvar fapp flam))|].
rewrite assoc.
etrans; [eapply cancel_postcomposition; rewrite <- assoc;
apply maponpaths, BinCoproductOfArrowsIn2|].
rewrite <- !assoc.
etrans; [apply maponpaths, maponpaths, BinCoproductIn2Commutes|].
rewrite assoc.
etrans; [apply cancel_postcomposition; use (CoproductOfArrowsIn _ _ (Coproducts_functor_precat _ _ _ _ (λ _, _)))|].
rewrite <- assoc.
apply maponpaths.
exact (CoproductInCommutes _ _ _ _ _ _ (inr (s,,t))).
Qed.
(* Now substitution *)
Let STLC := STLC_Monad.
(* Parallel substitution *)
Definition psubst {X Y : sortToSet} (f : sortToSet⟦X, STLC Y ⟧) :
sortToSet⟦ STLC (X ⊕ Y), STLC Y ⟧ := monadSubstGen_instantiated _ _ _ _ f.
(* Substitution of a single variable *)
Definition subst {X : sortToSet} (f : sortToSet⟦ 1, STLC X ⟧) :
sortToSet⟦ STLC (1 ⊕ X), STLC X ⟧ := monadSubstGen_instantiated _ _ _ _ f.
Definition weak {X Y : sortToSet} : sortToSet⟦STLC Y,STLC (X ⊕ Y)⟧ :=
mweak_instantiated sort hsort HSET BinCoproductsHSET.
Definition exch {X Y Z : sortToSet} : sortToSet⟦STLC (X ⊕ (Y ⊕ Z)), STLC (Y ⊕ (X ⊕ Z))⟧ :=
mexch_instantiated sort hsort HSET BinCoproductsHSET.
Lemma psubst_interchange {X Y Z : sortToSet}
(f : sortToSet⟦X,STLC (Y ⊕ Z)⟧) (g : sortToSet⟦Y, STLC Z⟧) :
psubst f · psubst g = exch · psubst (g · weak) · psubst (f · psubst g).
Proof.
apply subst_interchange_law_gen_instantiated.
Qed.
Lemma subst_interchange {X : sortToSet}
(f : sortToSet⟦1,STLC (1 ⊕ X)⟧) (g : sortToSet⟦1,STLC X⟧) :
subst f · subst g = exch · subst (g · weak) · subst (f · subst g).
Proof.
apply subst_interchange_law_gen_instantiated.
Qed.
(* We could also unfold these as statements about sort-indexed sets, but
this quickly gets very cumbersome: *)
(* Definition psubst {X Y : sort → hSet} (f : ∏ t, X t → STLC Y t) (t : sort) : *)
(* STLC (λ t, (X t + Y t)%set) t → STLC Y t. *)
(* Proof. *)
(* intros u. *)
(* transparent assert (X' : (sortToSet)). *)
(* { use (functor_path_pregroupoid _); apply X. } *)
(* transparent assert (Y' : (sortToSet)). *)
(* { use (functor_path_pregroupoid _); apply Y. } *)
(* transparent assert (f' : (sortToSet⟦ X' , STLC_Monad Y' ⟧)). *)
(* { use nat_trans_functor_path_pregroupoid; apply homset_property; use f. } *)
(* use (pr1 (@monadSubstGen_instantiated sort SET BinCoproductsHSET STLC_Monad X' Y' f') t). *)
End Lam.
|
r=0.54
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7bg69/media/images/d7bg69-009/svc:tesseract/full/full/0.54/default.jpg Accept:application/hocr+xml
|
# Realization of Non-Recursive Filters
*This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Digital Signal Processing.
## Introduction
Computing the output $y[k] = \mathcal{H} \{ x[k] \}$ of a [linear time-invariant](https://en.wikipedia.org/wiki/LTI_system_theory) (LTI) system is of central importance in digital signal processing. This is often referred to as [*filtering*](https://en.wikipedia.org/wiki/Digital_filter) of the input signal $x[k]$. The methods for this purpose are typically classified into
* non-recursive and
* recursive
techniques. This section focuses on the realization of non-recursive filters.
### Non-Recursive Filters
An LTI system can be characterized completely by its impulse response $h[k]$
The output signal $y[k]$ is given by (linear) convolution of the input signal $x[k]$ with the impulse response $h[k]$
\begin{equation}
y[k] = x[k] * h[k] = \sum_{\kappa = -\infty}^{\infty} x[\kappa] \; h[k-\kappa]
\end{equation}
Two aspects of this representation become evident when inspecting above equation:
1. The output signal $y[k]$ is a linear combination of the input signal $x[k]$. There is no feedback of the output signal of past time-instants. Therefore, such filters are termed as *non-recursive* filters.
2. In order to compute the output signal at one particular time-instant $k$, the input signal needs to be known for all past and future time-instants.
The second aspect prohibits a practical realization. In order to be able to realize a non-recursive filter by convolution, the output at time-instant $k$ should only depend on the input signal $x[k]$ up to time-index $k$
\begin{equation}
y[k] = \sum_{\kappa = -\infty}^{k} x[\kappa] \; h[k-\kappa]
\end{equation}
This is the case when the impulse response is causal, hence when $h[k] = 0$ for $k<0$. However, this still requires knowledge of the input signal for all past time-instants. If we further assume that the input signal is causal, $x[k] = 0$ for $k<0$, we get
\begin{equation}
y[k] = \sum_{\kappa = 0}^{k} x[\kappa] \; h[k-\kappa]
\end{equation}
### Finite Impulse Response
Many practical systems have an impulse response of finite length $N$ or can be approximated by an impulse response of finite length
\begin{equation}
h_N[k] = \begin{cases} h[k] & \text{ for } 0 \leq k < N \\ 0 & \text{ otherwise} \end{cases}
\end{equation}
Such an impulse response is denoted as [*finite impulse response*](https://en.wikipedia.org/wiki/Finite_impulse_response) (FIR). Introducing $h_N[k]$ into above sum and rearranging terms yields
\begin{equation}
y[k] = \sum_{\kappa = 0}^{k} x[\kappa] \; h_N[k-\kappa] = \sum_{\kappa = 0}^{N-1} h_N[\kappa] \; x[k-\kappa]
\end{equation}
Hence for a causal input signal $x[k]$ and a FIR the output of the system can be computed by a finite number of operations.
The evaluation of the convolution for a FIR of length $N$ requires $N$ multiplications and $N-1$ additions per time index $k$. For the real-time convolution of an audio signal with a sampling rate of $f_\text{S} = 48$ kHz with a FIR of length $N = 48000$ we have to compute around $2 \times 2.3 \cdot 10^9$ numerical operations per second. This is a considerable numerical complexity, especially on embedded or mobile platforms. Therefore, various techniques have been developed to lower the computational complexity.
|
If $x$ is a limit point of $s \cup t$ and $s$ is finite, then $x$ is a limit point of $t$.
|
Require Import Coq.Strings.String.
Require Import Coq.ZArith.ZArith.
Require Import riscv.Utility.Monads.
Require Import riscv.Utility.MonadNotations.
Require Export riscv.Utility.FreeMonad.
Require Import riscv.Spec.Decode.
Require Import riscv.Spec.Machine.
Require Import riscv.Utility.Utility.
Require Import riscv.Spec.Primitives.
Require Import Coq.Lists.List. Import ListNotations.
Require Import coqutil.Datatypes.List.
Require Import coqutil.Datatypes.ListSet.
Require Export riscv.Platform.RiscvMachine.
Require Export riscv.Platform.MaterializeRiscvProgram.
Require Import coqutil.Z.Lia.
Require Import coqutil.Map.Interface.
Require Import coqutil.Map.Properties.
Require Import coqutil.Word.Properties.
Require Import coqutil.Datatypes.PropSet.
Require Import coqutil.Tactics.Tactics.
Require Import coqutil.Tactics.fwd.
Require Import riscv.Platform.Sane.
Local Open Scope Z_scope.
Local Open Scope bool_scope.
Section Riscv.
Import free.
Context {width: Z} {BW: Bitwidth width} {word: word width} {word_ok: word.ok word}.
Context {Mem: map.map word byte} {Registers: map.map Register word}.
Add Ring wring : (word.ring_theory (word := word))
(preprocess [autorewrite with rew_word_morphism],
morphism (word.ring_morph (word := word)),
constants [word_cst]).
Definition store(n: nat)(ctxid: SourceType) a v mach post :=
match Memory.store_bytes n mach.(getMem) a v with
| Some m => post (withXAddrs (invalidateWrittenXAddrs n a mach.(getXAddrs)) (withMem m mach))
| None => False
end.
Definition load(n: nat)(ctxid: SourceType) a mach post :=
(ctxid = Fetch -> isXAddr4 a mach.(getXAddrs)) /\
match Memory.load_bytes n mach.(getMem) a with
| Some v => post v mach
| None => False
end.
Definition updatePc(mach: RiscvMachine): RiscvMachine :=
withPc mach.(getNextPc) (withNextPc (word.add mach.(getNextPc) (word.of_Z 4)) mach).
Definition getReg(regs: Registers)(reg: Z): word :=
if ((0 <? reg) && (reg <? 32)) then
match map.get regs reg with
| Some x => x
| None => word.of_Z 0
end
else word.of_Z 0.
Definition setReg(reg: Z)(v: word)(regs: Registers): Registers :=
if ((0 <? reg) && (reg <? 32)) then map.put regs reg v else regs.
Definition interpret_action (a : riscv_primitive) (mach : RiscvMachine) :
(primitive_result a -> RiscvMachine -> Prop) -> (RiscvMachine -> Prop) -> Prop :=
match a with
| GetRegister reg => fun (postF: word -> RiscvMachine -> Prop) postA =>
let v := getReg mach.(getRegs) reg in
postF v mach
| SetRegister reg v => fun postF postA =>
let regs := setReg reg v mach.(getRegs) in
postF tt (withRegs regs mach)
| GetPC => fun postF postA => postF mach.(getPc) mach
| SetPC newPC => fun postF postA => postF tt (withNextPc newPC mach)
| LoadByte ctxid a => fun postF postA => load 1 ctxid a mach postF
| LoadHalf ctxid a => fun postF postA => load 2 ctxid a mach postF
| LoadWord ctxid a => fun postF postA => load 4 ctxid a mach postF
| LoadDouble ctxid a => fun postF postA => load 8 ctxid a mach postF
| StoreByte ctxid a v => fun postF postA => store 1 ctxid a v mach (postF tt)
| StoreHalf ctxid a v => fun postF postA => store 2 ctxid a v mach (postF tt)
| StoreWord ctxid a v => fun postF postA => store 4 ctxid a v mach (postF tt)
| StoreDouble ctxid a v => fun postF postA => store 8 ctxid a v mach (postF tt)
| StartCycle => fun postF postA =>
postF tt (withNextPc (word.add mach.(getPc) (word.of_Z 4)) mach)
| EndCycleNormal => fun postF postA => postF tt (updatePc mach)
| EndCycleEarly _ => fun postF postA => postA (updatePc mach) (* ignores postF containing the continuation *)
| MakeReservation _
| ClearReservation _
| CheckReservation _
| GetCSRField _
| SetCSRField _ _
| GetPrivMode
| SetPrivMode _
| Fence _ _
=> fun postF postA => False
end.
Definition no_M(mach: RiscvMachine): Prop :=
forall a v,
isXAddr4 a mach.(getXAddrs) ->
word.unsigned a mod 4 = 0 ->
Memory.load_bytes 4 mach.(getMem) a = Some v ->
forall minst, decode RV32IM (LittleEndian.combine 4 v) <> MInstruction minst.
Instance MinimalNoMulPrimitivesParams:
PrimitivesParams (free riscv_primitive primitive_result) RiscvMachine :=
{|
Primitives.mcomp_sat A m mach postF :=
@free.interpret _ _ _ interpret_action A m mach postF (fun _ => False);
Primitives.is_initial_register_value x := True;
Primitives.nonmem_load _ _ _ _ _ := False;
Primitives.nonmem_store _ _ _ _ _ _ := False;
Primitives.valid_machine := no_M;
|}.
Lemma load_weaken_post n c a m (post1 post2:_->_->Prop)
(H: forall r s, post1 r s -> post2 r s)
: load n c a m post1 -> load n c a m post2.
Proof.
cbv [load nonmem_load].
destruct (Memory.load_bytes n (getMem m) a); intuition eauto.
Qed.
Lemma store_weaken_post n c a v m (post1 post2:_->Prop)
(H: forall s, post1 s -> post2 s)
: store n c a v m post1 -> store n c a v m post2.
Proof.
cbv [store nonmem_store].
destruct (Memory.store_bytes n (getMem m) a); intuition eauto.
Qed.
Lemma interpret_action_weaken_post a (postF1 postF2: _ -> _ -> Prop) (postA1 postA2: _ -> Prop):
(forall r s, postF1 r s -> postF2 r s) ->
(forall s, postA1 s -> postA2 s) ->
forall s, interpret_action a s postF1 postA1 -> interpret_action a s postF2 postA2.
Proof.
destruct a; cbn; try solve [intuition eauto].
all : eauto using load_weaken_post, store_weaken_post.
Qed.
Global Instance MinimalNoMulSatisfies_mcomp_sat_spec: mcomp_sat_spec MinimalNoMulPrimitivesParams.
Proof.
split; cbv [mcomp_sat MinimalNoMulPrimitivesParams Bind Return Monad_free].
{ symmetry. eapply interpret_bind_ex_mid, interpret_action_weaken_post. }
{ symmetry; intros. rewrite interpret_ret; eapply iff_refl. }
Qed.
Lemma preserve_undef_on{memOk: map.ok Mem}: forall n (m m': Mem) a w s,
Memory.store_bytes n m a w = Some m' ->
map.undef_on m s ->
map.undef_on m' s.
Proof.
eauto using map.same_domain_preserves_undef_on, Memory.store_bytes_preserves_domain.
Qed.
Lemma removeXAddr_bw: forall (a1 a2: word) xaddrs,
isXAddr1 a1 (removeXAddr a2 xaddrs) ->
isXAddr1 a1 xaddrs.
Proof.
unfold isXAddr1, removeXAddr.
intros.
eapply filter_In.
eassumption.
Qed.
Lemma invalidateWrittenXAddrs_bw: forall n (a r: word) xa,
In a (invalidateWrittenXAddrs n r xa) ->
In a xa.
Proof.
induction n; cbn; intros.
- assumption.
- eapply IHn. eapply removeXAddr_bw. unfold isXAddr1. eassumption.
Qed.
Lemma put_preserves_getmany_of_tuple{memOk: map.ok Mem}:
forall n (t: HList.tuple word n) (m: Mem) (r: word) b,
~In r (HList.tuple.to_list t) ->
map.getmany_of_tuple m t =
map.getmany_of_tuple (map.put m r b) t.
Proof.
induction n; intros.
- destruct t. reflexivity.
- destruct t as (w & t). cbn in H|-*.
unfold map.getmany_of_tuple in IHn.
erewrite IHn.
2: {
intro C. eapply H. right. exact C.
}
rewrite ?map.get_put_dec.
destr (word.eqb r w). 2: reflexivity.
exfalso. apply H. auto.
Qed.
Lemma transfer_load4bytes_to_previous_mem{memOk: map.ok Mem}:
forall n (a: word) v m m' r w someSet,
Memory.store_bytes n m r w = Some m' ->
(* a notin r..r+n *)
isXAddr4 a (invalidateWrittenXAddrs n r someSet) ->
Memory.load_bytes 4 m' a = Some v ->
Memory.load_bytes 4 m a = Some v.
Proof.
induction n; intros.
- cbn in H. congruence.
- unfold Memory.store_bytes in *. fwd. cbn in H0. destruct w as [b w].
cbn -[HList.tuple Memory.load_bytes] in H1.
cbn in E. fwd.
unfold Memory.load_bytes at 1 in IHn.
unfold map.getmany_of_tuple, Memory.footprint in IHn.
specialize IHn with (m := m) (r := (word.add r (word.of_Z 1))).
rewrite E1 in IHn.
eapply IHn.
+ reflexivity.
+ instantiate (1 := someSet). clear -H0.
unfold isXAddr4 in *. fwd. eauto 10 using removeXAddr_bw.
+ unfold Memory.load_bytes in *.
etransitivity. 2: eassumption.
unfold Memory.unchecked_store_bytes, Memory.footprint.
eapply put_preserves_getmany_of_tuple.
cbn. clear -H0 word_ok. unfold isXAddr4, isXAddr1 in H0. fwd.
intro C.
unfold removeXAddr in *.
apply_in_hyps filter_In.
fwd.
apply_in_hyps Bool.negb_true_iff.
apply_in_hyps Properties.word.eqb_false.
repeat destruct C as [C | C]; try assumption;
match type of C with
| ?l = _ => ring_simplify l in C
end;
subst r;
congruence.
Qed.
Lemma isXAddr4_uninvalidate: forall (a: word) n r xa,
isXAddr4 a (invalidateWrittenXAddrs n r xa) ->
isXAddr4 a xa.
Proof.
unfold isXAddr4, isXAddr1. intros. fwd. eauto 10 using invalidateWrittenXAddrs_bw.
Qed.
Lemma interpret_action_total{memOk: map.ok Mem} a s postF postA :
no_M s ->
interpret_action a s postF postA ->
exists s', no_M s' /\ (postA s' \/ exists v', postF v' s').
Proof.
destruct s, a; cbn -[HList.tuple Memory.load_bytes invalidateWrittenXAddrs];
cbv [load store no_M]; cbn -[HList.tuple Memory.load_bytes invalidateWrittenXAddrs];
repeat destruct_one_match;
intuition idtac;
repeat lazymatch goal with
| H : postF _ ?mach |- exists _ : RiscvMachine, _ =>
exists mach; cbn [RiscvMachine.getMem RiscvMachine.getXAddrs]
| H : postA ?mach |- exists _ : RiscvMachine, _ =>
exists mach; cbn [RiscvMachine.getMem RiscvMachine.getXAddrs]
| Hexists : (exists v, ?P), Hforall : (forall v, ?P -> _) |- _ =>
let v := fresh "v" in
destruct Hexists as [v Hexists];
specialize (Hforall v Hexists)
end;
ssplit; eauto; cbn -[HList.tuple Memory.load_bytes invalidateWrittenXAddrs];
change removeXAddr with (@List.removeb word word.eqb);
rewrite ?ListSet.of_list_removeb;
intuition eauto 10 using transfer_load4bytes_to_previous_mem, isXAddr4_uninvalidate.
Qed.
Lemma interpret_action_total'{memOk: map.ok Mem} a s post :
no_M s ->
interpret_action a s post (fun _ : RiscvMachine => False) ->
exists v s', post v s' /\ no_M s'.
Proof.
intros. pose proof interpret_action_total as P.
specialize P with (postA := (fun _ : RiscvMachine => False)). simpl in P.
specialize (P _ _ _ H H0).
destruct P as (s' & ? & ?).
destruct H2 as [[] | (v' & ?)].
eauto.
Qed.
Import coqutil.Tactics.Tactics.
Lemma interpret_action_appendonly a s postF postA :
interpret_action a s postF postA ->
interpret_action a s (fun _ s' => endswith s'.(getLog) s.(getLog))
(fun s' => endswith s'.(getLog) s.(getLog)).
Proof.
destruct s, a; cbn; cbv [load store nonmem_load nonmem_store]; cbn;
repeat destruct_one_match;
intuition eauto using endswith_refl, endswith_cons_l.
Qed.
(* NOTE: maybe instead a generic lemma to push /\ into postcondition? *)
Lemma interpret_action_appendonly' a s postF postA :
interpret_action a s postF postA ->
interpret_action a s (fun v s' => postF v s' /\ endswith s'.(getLog) s.(getLog))
(fun s' => postA s' /\ endswith s'.(getLog) s.(getLog)).
Proof.
destruct s, a; cbn; cbv [load store nonmem_load nonmem_store]; cbn;
repeat destruct_one_match; intros; destruct_products; try split;
intuition eauto using endswith_refl, endswith_cons_l.
Qed.
Lemma interpret_action_appendonly'' a s post :
interpret_action a s post (fun _ : RiscvMachine => False) ->
interpret_action a s (fun v s' => post v s' /\ endswith s'.(getLog) s.(getLog))
(fun _ : RiscvMachine => False).
Proof.
intros. pose proof interpret_action_appendonly' as P.
specialize (P _ _ _ (fun _ : RiscvMachine => False) H). simpl in P.
eapply interpret_action_weaken_post. 3: exact P. all: simpl; intuition eauto.
Qed.
Lemma interpret_action_preserves_valid{memOk: map.ok Mem} a s postF postA :
no_M s ->
interpret_action a s postF postA ->
interpret_action a s (fun v s' => postF v s' /\ no_M s')
(fun s' => postA s' /\ no_M s').
Proof.
destruct s, a; cbn; cbv [load store no_M];
cbn -[HList.tuple Memory.load_bytes invalidateWrittenXAddrs];
repeat destruct_one_match; intros; destruct_products; try split;
change removeXAddr with (@List.removeb word word.eqb);
rewrite ?ListSet.of_list_removeb;
intuition eauto 10 using transfer_load4bytes_to_previous_mem, isXAddr4_uninvalidate.
Qed.
Lemma interpret_action_preserves_valid'{memOk: map.ok Mem} a s post :
no_M s ->
interpret_action a s post (fun _ : RiscvMachine => False) ->
interpret_action a s (fun v s' => post v s' /\ no_M s')
(fun _ : RiscvMachine => False).
Proof.
intros. pose proof interpret_action_preserves_valid as P.
specialize (P _ _ _ (fun _ : RiscvMachine => False) H H0). simpl in P.
eapply interpret_action_weaken_post. 3: exact P. all: simpl; intuition eauto.
Qed.
Global Instance MinimalNoMulPrimitivesSane{memOk: map.ok Mem} :
PrimitivesSane MinimalNoMulPrimitivesParams.
Proof.
split; cbv [mcomp_sane valid_machine MinimalNoMulPrimitivesParams]; intros *; intros D M;
(split; [ exact (interpret_action_total' _ st _ D M)
| eapply interpret_action_preserves_valid'; try eassumption;
eapply interpret_action_appendonly''; try eassumption ]).
Qed.
Global Instance MinimalNoMulSatisfiesPrimitives{memOk: map.ok Mem} :
Primitives MinimalNoMulPrimitivesParams.
Proof.
split; try exact _.
all : cbv [mcomp_sat spec_load spec_store MinimalNoMulPrimitivesParams invalidateWrittenXAddrs].
all: intros;
repeat match goal with
| _ => progress subst
| _ => Option.inversion_option
| _ => progress cbn -[Memory.load_bytes Memory.store_bytes HList.tuple]
| _ => progress cbv [valid_register is_initial_register_value load store Memory.loadByte Memory.loadHalf Memory.loadWord Memory.loadDouble Memory.storeByte Memory.storeHalf Memory.storeWord Memory.storeDouble] in *
| H : exists _, _ |- _ => destruct H
| H : _ /\ _ |- _ => destruct H
| |- _ => solve [ intuition (eauto || blia) ]
| H : _ \/ _ |- _ => destruct H
| |- context[match ?x with _ => _ end] => destruct x eqn:?
| |- _ => progress unfold getReg, setReg
| |-_ /\ _ => split
end.
(* setRegister *)
destruct initialL; eassumption.
Qed.
End Riscv.
|
library(magrittr)
#
# add all arguments
#
TestAdd <- function(...){
sum(...)
}
# countrycode <- function(sourcevar, origin, destination, warn = TRUE, nomatch = NA, custom_dict = NULL, custom_match = NULL, origin_regex = FALSE){
# countrycode::countrycode(sourcevar, origin, destination, warn, nomatch, custom_dict, custom_match, origin_regex)
# }
iso.expand <- function(code){
maps::iso.expand(code)
}
graph.plot <- function(data, label=NULL){
# link the graphics to the calling cell, so it won't get erased
# when another cell draws a plot
BERT.graphics.device(cell=T);
# flatten the data and make sure it's numeric
data = as.numeric( unlist( data ));
# draw the plot
plot(data, pch=21, bg="pink", col="red", main=label );
lines( lowess( data ));
# we're done with the graphics device, so shut it off for now
dev.off();
# this is a convenient return value for the calling cell
T;
}
kernel_smooth <- function(object, startyear, startmonth, endyear, endmonth, frequency, kernel_type, bandwidth){
object <- as.data.frame(object) # convert from list to df
object[object == ''] <- NA # NULL values to NAs
object <- zoo::na.locf(object) # last non-missing value carried forward
ts_object <- ts(object, start = c(startyear, startmonth), end = c(endyear, endmonth), frequency)
return(ksmooth(time(ts_object), ts_object, kernel_type, bandwidth, x.points = time(ts_object))$y)
}
StatsGini <- function(x, w = rep(1, length(x))){
# x and w are vectors
# w can be left blank when calling the fn (i.e. no weighting)
# Examples:
# x <- c(3, 1, 7, 2, 5)
# w <- c(1, 2, 3, 4, 5)
# StatsGini(x, w) should yield 0.2983050847
# StatsGini(c(0.25, 0.75), c(1, 1)) should yield 0.25
n <- length(x)
wxsum <- sum(w * x)
wsum <- sum(w)
sxw <- order(x, w) # Ascending order sort
sx <- w[sxw] * x[sxw]
sw <- w[sxw]
pxi <- vector(mode = "numeric", length = n)
pci <- vector(mode = "numeric", length = n)
pxi <- cumsum(sx) / wxsum
pci <- cumsum(sw) / wsum
G <- 0.0
for (i in 2:n){
G <- G - (pci[i] * pxi[i - 1] - pxi[i] * pci[i - 1] )
}
return(G)
}
getFX <- function(currency_1, currency_2){
url <- paste0('https://www.freeforexapi.com/api/live?pairs=', currency_1, currency_2)
rate <- rjson::fromJSON(readLines(url))$rates[[1]][[1]]
return(rate)
}
interpolate <- function(x, y, xout){
approx(x, y, n = length(x))$y[which(x %in% xout)]
}
|
import topologia
open topological_space
open set
variables (X : Type) [topological_space X]
def is_dense {X : Type} [topological_space X] (A : set X) : Prop := closure A = univ
lemma dense_iff (A : set X) : is_dense A ↔ (interior (A.compl) = ∅) := -- why not Aᶜ?, then the refl, line it's not necesary
begin
rw is_dense,
rw closure_eq_compl_of_interior_compl,
rw compl_univ_iff,
refl,
end
lemma dense_iff' (A : set X) : is_dense A ↔
∀ x : X, ∀ U : set X, is_neighborhood U x → U ∩ A ≠ ∅ :=
begin
unfold is_dense,
split; intro h,
{
intros x U hUx,
have hx : x ∈ closure A,
{
rw h,
exact mem_univ x,
},
exact hx U hUx,
},
{
simp only [closure_eq_compl_of_interior_compl, compl_univ_iff,
set.eq_empty_iff_forall_not_mem, interior],
intro x,
intro hx,
refine h x Aᶜ hx _,
norm_num,
},
end
def boundary {X : Type} [topological_space X] (A : set X) := closure A ∩ closure Aᶜ
lemma boundary_def (A : set X) : boundary A = (closure A) \ (interior A) :=
begin
rw boundary,
rw closure_eq_compl_of_interior_compl Aᶜ,
rw compl_compl,
refl,
end
lemma mem_boundary_iff (A : set X) (x : X) :
x ∈ boundary A ↔ ∀ U : set X, is_neighborhood U x → (U ∩ A ≠ ∅ ∧ U ∩ A.compl ≠ ∅) :=
begin
split; intro h,
{
intros U hU,
exact ⟨h.1 U hU, h.2 U hU⟩,
},
{
have hx: (is_adherent_point A x) ∧ (is_adherent_point Aᶜ x),
{
split; intros U hU,
exact (h U hU).1,
exact (h U hU).2,
},
exact ⟨hx.1, hx.2⟩,
}
end
class kolmogorov_space : Prop :=
(t0 : ∀ (x y : X) (h : y ≠ x) , ∃ (U : set X) (hU : is_open U), ((x ∈ U) ∧ (y ∉ U)) ∨ ((x ∉ U) ∧ (y ∈ U)))
class frechet_space : Prop :=
(t1 : ∀ (x y : X) (h : y ≠ x), ∃ (U : set X) (hU : is_open U), (x ∈ U) ∧ (y ∉ U)) -- Marc : look up what's the best way to do this
namespace frechet_space
instance T1_is_T0 [frechet_space X] : kolmogorov_space X :=
{ t0 :=
begin
intros x y hxy,
obtain ⟨U, hU, hh⟩ := t1 x y hxy,
use U,
split,
{ exact hU },
{
left,
exact hh,
},
end
}
lemma T1_characterisation : frechet_space X ↔ (∀ (x : X), is_closed ({x} : set X)) :=
begin
split,
{
intros h x,
unfold is_closed,
let I := {U : set X | (x ∉ U) ∧ (is_open U)},
have p : ⋃₀ I = {x}ᶜ,
{
apply subset.antisymm; intros t ht,
{
rcases ht with ⟨A,⟨hxA, hA⟩, htA⟩,
simp,
intro htx,
rw htx at htA,
exact hxA htA,
},
{
have htx := (mem_compl_singleton_iff.mp ht).symm,
replace h := h.t1,
obtain ⟨U, hU, hh⟩ := h t x htx,
exact ⟨U, ⟨hh.2, hU⟩, hh.1⟩,
}
},
rw ← p,
have c : ∀ B ∈ I, is_open B,
finish,
exact topological_space.union I c,
},
{
intros h,
fconstructor,
intros x y hxy,
exact ⟨{y}ᶜ,h y, mem_compl_singleton_iff.mpr (ne.symm hxy), not_not.mpr rfl⟩,
}
end
end frechet_space
class hausdorff_space :=
(t2 : ∀ (x y : X) (h : y ≠ x), ∃ (U V: set X) (hU : is_open U) (hV : is_open V) (hUV : U ∩ V = ∅), (x ∈ U) ∧ (y ∈ V))
namespace hausdorff_space
instance T2_is_T1 [hausdorff_space X] : frechet_space X :=
{ t1 :=
begin
intros x y hxy,
obtain ⟨U, V, hU, hV, hUV, hh⟩ := t2 x y hxy,
rw inter_comm at hUV,
exact ⟨U, hU, ⟨hh.1, (inter_is_not_is_empty_intersection hh.2 hUV)⟩⟩,
end }
end hausdorff_space
class T2_5_space : Prop :=
(t2_5 : ∀ (x y : X) (h : y ≠ x), ∃ (U V: set X), is_open U ∧ is_open V
∧ (closure U) ∩ (closure V) = ∅ ∧ x ∈ U ∧ y ∈ V)
namespace T2_5_space
instance T2_5_is_T2 [T2_5_space X] : hausdorff_space X :=
{ t2 :=
begin
intros x y hxy,
obtain ⟨U, V, hU, hV, hUV, hh⟩ := t2_5 x y hxy,
have hUV₂ : U ∩ V = ∅,
{
apply subset.antisymm,
{
intros t h,
rw ← hUV,
exact ⟨(closure_supset_self U) h.1, (closure_supset_self V) h.2 ⟩,
},
{
exact (U ∩ V).empty_subset,
},
},
exact ⟨U, V, hU, hV, hUV₂, hh⟩,
end }
end T2_5_space
def topology_is_regular := ∀ (x : X) (F : set X) (hF : is_closed F) (hxF: x ∉ F),
∃ (U V : set X) (hU : is_open U) (hV : is_open V) (hUV : U ∩ V = ∅), (x ∈ U) ∧ (F ⊆ V)
class T3_space extends frechet_space X : Prop :=
(regular : topology_is_regular X)
namespace T3_space
open frechet_space
open hausdorff_space
instance T3_is_T2 [T3_space X] : hausdorff_space X :=
{ t2 :=
begin
intros x y hxy,
have H := (T1_characterisation X).1 _inst_2.to_frechet_space y,
have x_notin_y : x ∉ ({y} : set X), by tauto,
obtain ⟨U, V, hU, hV, hUV, hh⟩ := regular x ({y} : set X) H x_notin_y,
rw singleton_subset_iff at hh,
exact ⟨U, V, hU, ⟨hV, ⟨hUV, ⟨hh.1, hh.2⟩⟩⟩⟩,
end}
instance T3_is_T2_5 [T3_space X] : T2_5_space X :=
{ t2_5 :=
begin
intros x y hxy,
obtain ⟨U, V, hU, hV, hUV, hh⟩ := t2 x y hxy,
have hxcV : x ∉ closure V,
{
rw closure_eq_compl_of_interior_compl V,
have hxint := (interior_maximal Vᶜ U hU (subset_compl_iff_disjoint.mpr hUV)),
tauto,
},
obtain ⟨A, B, hA, hB, hAB, hh2 ⟩ := regular x (closure V) (closure_is_closed V) hxcV,
have t : closure A ∩ closure V = ∅,
{
have hBc : is_closed Bᶜ, by simp[hB],
have hcA := subset.trans (subset_closed_inclusion_closure' hBc (subset_compl_iff_disjoint.mpr hAB)) (compl_subset_compl.2 hh2.2),
apply subset.antisymm,
{
rw ← compl_inter_self (closure V),
exact (closure V).inter_subset_inter_left hcA,
},
exact (closure A ∩ closure V).empty_subset,
},
exact ⟨A, V, hA, hV, t, hh2.1, hh.2⟩,
end }
lemma T0_and_regular_is_T3 [kolmogorov_space X] (h: topology_is_regular X) :
T3_space X :=
{
t1 :=
begin
intros x y hxy,
obtain ⟨U, hU, hh⟩ := kolmogorov_space.t0 x y hxy,
cases hh,
exact ⟨U, hU, hh⟩,
{
have hUc : is_closed Uᶜ,
{
rw [is_closed, compl_compl],
exact hU,
},
have hy_not_Uc : y ∉ Uᶜ,
{
intro t,
exact (not_mem_of_mem_compl t) hh.2,
},
obtain ⟨A, B, hA, hB, hAB, hhAB⟩ := h y Uᶜ hUc hy_not_Uc,
exact ⟨B, hB, hhAB.2 hh.1, inter_is_not_is_empty_intersection hhAB.1 hAB⟩,
}
end,
regular := h,
}
lemma T0_and_regular_if_only_if_T3 : (kolmogorov_space X) ∧ (topology_is_regular X) ↔ T3_space X :=
begin
split; intro h,
exact @T0_and_regular_is_T3 X _inst_1 h.1 h.2,
exact ⟨@frechet_space.T1_is_T0 X _inst_1 (@hausdorff_space.T2_is_T1 X _inst_1 (@T3_space.T3_is_T2 X _inst_1 h)), h.regular⟩,
end
end T3_space
def is_normal (X : Type) [topological_space X] :=
∀ (F E : set X) (hF : is_closed F) (hE : is_closed E) (hEF : F ∩ E = ∅),
∃ (U V : set X) (hU : is_open U) (hV : is_open V) (hUV : U ∩ V = ∅), (F ⊆ U) ∧ (E ⊆ V)
class T4_space extends frechet_space X : Prop :=
(normal : is_normal X)
namespace T4_space
open frechet_space
instance T4_is_T3 [T4_space X] : T3_space X :=
{ regular :=
begin
intros x F hF hxF,
obtain ⟨U, V, hU, hV, hUV, hh ⟩ := normal F {x} hF ((T1_characterisation X).1 _inst_2.to_frechet_space x)
(inter_singleton_eq_empty.mpr hxF),
rw inter_comm U V at hUV,
exact ⟨V, U, hV, hU, hUV, hh.2 (mem_singleton x), hh.1⟩,
end
}
end T4_space
class T5_space extends frechet_space X : Prop :=
(t5 : ∀ (A B : set X) (hAB : A ∩ (closure B) = ∅) (hBA : (closure A) ∩ B = ∅), ∃ (U V : set X) (hU : is_open U) (hV : is_open V) (hUV : U ∩ V = ∅), A ⊆ U ∧ B ⊆ V)
namespace T5_space
open frechet_space
instance T5_is_T4 [T5_space X] : T4_space X :=
{ normal :=
begin
intros F E hF hE hFE,
have h₁ : (closure F) ∩ E = ∅,
rwa ← ((eq_closure_iff_is_closed F).2 hF),
have h₂ : F ∩ (closure E) = ∅,
rwa ← ((eq_closure_iff_is_closed E).2 hE),
exact t5 F E h₂ h₁,
end}
end T5_space
|
module Wavefront.Types where
import Numeric.Vector
data WavefrontLine = Comment String
| Vertex Double Double Double
| TexCoord Double Double
| Normal Double Double Double
| Face Int Int Int Int Int Int Int Int Int
deriving (Show)
isComment :: WavefrontLine -> Bool
isComment (Comment _) = True
isComment _ = False
isVertex :: WavefrontLine -> Bool
isVertex (Vertex _ _ _) = True
isVertex _ = False
isTexCoord :: WavefrontLine -> Bool
isTexCoord (TexCoord _ _) = True
isTexCoord _ = False
isNormal :: WavefrontLine -> Bool
isNormal (Normal _ _ _) = True
isNormal _ = False
isFace :: WavefrontLine -> Bool
isFace (Face _ _ _ _ _ _ _ _ _) = True
isFace _ = False
data Vertex_ = Vertex_ Double Double Double deriving (Show)
data TexCoord_ = TexCoord_ Double Double deriving (Show)
data Normal_ = Normal_ Double Double Double deriving (Show)
data Face_ = Face_ Int Int Int Int Int Int Int Int Int deriving (Show)
vertexFromLine :: WavefrontLine -> Vertex_
vertexFromLine (Vertex a b c) = Vertex_ a b c
vertexFromLine _ = error "vertexFromLine is a partial function: it only accepts vertex lines"
texCoordFromLine :: WavefrontLine -> TexCoord_
texCoordFromLine (TexCoord a b) = TexCoord_ a b
texCoordFromLine _ = error "texCoordFromLine is a partial function: it only accepts tex coord lines"
normalFromLine :: WavefrontLine -> Normal_
normalFromLine (Normal a b c) = Normal_ a b c
normalFromLine _ = error "normalFromLine is a partial function: it only accepts normal lines"
faceFromLine :: WavefrontLine -> Face_
faceFromLine (Face a b c d e f g h i) = Face_ a b c d e f g h i
faceFromLine _ = error "vertexFromLine is a partial function: it only accepts face lines"
vertexToVec :: Vertex_ -> Vec3d
vertexToVec (Vertex_ a b c) = vec3 a b c
texCoordToVec :: TexCoord_ -> Vec2d
texCoordToVec (TexCoord_ a b) = vec2 a b
normalToVec :: Normal_ -> Vec3d
normalToVec (Normal_ a b c) = vec3 a b c
|
In a review of contemporary criticism of " Ode to a Nightingale " in 1998 , James O 'Rouke claimed that " To judge from the volume , the variety , and the polemical force of the modern critical responses engendered , there have been few moments in English poetic history as baffling as Keats 's repetition of the word ' forlorn ' " . When referring to the reliance of the ideas of John Dryden and William Hazlitt within the poem , Poet Laureate Andrew Motion , in 1999 , argued " whose notion of poetry as a ' movement ' from personal consciousness to an awareness of suffering humanity it perfectly illustrates . "
|
#include "eiscor.h"
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!
! z_urffact_deflationcheck
!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!
! This routine diagonalizes a unitary upper hessenberg matrix that is stored as
! a product of N Givens rotations, without computing square roots.
!
! | u1 -v1 |
! | v1 conj(u1) | | u2 -v2 |
! | v2 conj(u2) | | u3 -v3 | | 1 0 |
! | v3 conj(u3) | | 0 u4 |
!
! The square root free algorithm only requires the storage of the vi^2,
! so the arrays U and VV contain the following:
!
! U(i) = ui
! VV(i) = vi^2
!
! The input must satisfy the following:
!
! |U(i)|^2 + VV(i) = 1
! VV(N) = 0
!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!
! INPUT VARIABLES:
!
! N INTEGER
! dimension of matrix
!
! U COMPLEX(8) array of dimension N
! array of complex generators for Givens rotations
!
! VV REAL(8) array of dimension N
! array of real generators (squared) for Givens rotations
!
! OUTPUT VARIABLES:
!
! ZERO INTEGER
! largest index such that VV(i) < tol
!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
subroutine z_urffact_deflationcheck(N,U,VV,ZERO)
implicit none
! input variables
integer, intent(in) :: N
integer, intent(inout) :: ZERO
complex(8), intent(inout) :: U(N)
real(8), intent(inout) :: VV(N)
! compute variables
integer :: ii
real(8), parameter :: tol = (EISCOR_DBL_EPS)**2
real(8) :: xx
! intialize ZERO
ZERO = 0
! check for deflation
do ii=1,N
! deflate if subdiagonal is small enough
if (VV(N+1-ii) < tol) then
! set ZERO
ZERO = N+1-ii
! set rotation to diagonal
VV(ZERO) = 0d0
! renormalize U
xx = dble(U(ZERO))**2 + aimag(U(ZERO))**2
U(ZERO) = 5d-1*U(ZERO)*(3d0-xx)
end if
end do
end subroutine z_urffact_deflationcheck
|
-- import tactic
/-
Step 1:
example (p q r : Prop) : p → (q ∧ r) → p ∧ q :=
assume (h₁ : p)(h₂ : q ∧ r),
_
p q r : Prop,
h₁ : p,
h₂ : q ∧ r
⊢ p ∧ q
Step 2:
example (p q r : Prop) : p → (q ∧ r) → p ∧ q :=
assume (h₁ : p)(h₂ : q ∧ r),
have h₃ : q, from and.left h₂,
_
p q r : Prop,
h₁ : p,
h₂ : q ∧ r,
h₃ : q
⊢ p ∧ q
Step 3:
example (p q r : Prop) : p → (q ∧ r) → p ∧ q :=
assume (h₁ : p)(h₂ : q ∧ r),
have h₃ : q, from and.left h₂,
show _, from and.intro _ _
p q r : Prop,
h₁ : p,
h₂ : q ∧ r,
h₃ : q
⊢ p <=> h₁
⊢ q <=> h₃
-/
example (p q r : Prop) : p → (q ∧ r) → p ∧ q :=
assume (h₁ : p)(h₂ : q ∧ r),
have h₃ : q, from and.left h₂,
show p ∧ q, from and.intro h₁ h₃
example (p q r : Prop) : p → (q ∧ r) → p ∧ q :=
assume : p,
assume : (q ∧ r),
have q, from and.left this,
show p ∧ q, from and.intro ‹p› this -- ‹ = \f, › = \frq
-- ‹p› means (by assumption : p)
-- ‹_› means (by assumption : _) means (by assumption)
example (p q r : Prop) : p → (q ∧ r) → p ∧ q :=
assume : p,
assume : (q ∧ r),
have q, from and.left this,
show p ∧ q, from and.intro (by assumption : p) this
/-
example (p q r : Prop) : p → (q ∧ r) → p ∧ q :=
assume (h₁ : p) (h₂ : q ∧ r),
suffices h₃ : q, from _
p q r : Prop,
h₁ : p,
h₂ : q ∧ r,
h₃ : q
⊢ p ∧ q
example (p q r : Prop) : p → (q ∧ r) → p ∧ q :=
assume (h₁ : p) (h₂ : q ∧ r),
suffices h₃ : q, from and.intro h₁ h₃,
_
p q r : Prop,
h₁ : p,
h₂ : q ∧ r
⊢ q <=> h₂.left
-/
example (p q r : Prop) : p → (q ∧ r) → p ∧ q :=
assume (h₁ : p) (h₂ : q ∧ r),
suffices h₃ : q, from and.intro h₁ h₃,
show q, from h₂.left
/-
Lean also supports calculational environment, which is
introduced with the keyword calc. The syntax is as follows:
calc
<expr>_0 'op_1' <expr>_1 ':' <proof>_1
'...' 'op_2' <expr>_2 ':' <proof>_2
...
'...' 'op_n' <expr>_n ':' <proof>_n
-/
variables (a b c d e : ℕ)
variable h1 : a = b
axiom h2 : b = c + 1
variable h3 : c = d
variable h4 : e = 1 + d
/-
Step 1:
theorem T : a = e :=
calc
a = b : _ -- ⊢ a = b
... = c + 1 : _ -- ⊢ b = c + 1
... = d + 1 : _ -- ⊢ c + 1 = d + 1
... = 1 + d : _ -- ⊢ d + 1 = 1 + d
... = e : _ -- ⊢ 1 + d = e
Step 2:
-/
-- Equivalent ways of writing proof:
-- 1. add_comm d _
-- 2. by exact add_comm d _
-- 3. by { exact add_comm d _, }
-- 4. begin exact add_comm d _, end
theorem T : a = e :=
calc
a = b : h1 -- ⊢ a = b
... = c + 1 : h2 b c -- ⊢ b = c + 1
... = d + 1 : congr_arg nat.succ h3 -- ⊢ c + 1 = d + 1
... = 1 + d : add_comm d _ -- ⊢ d + 1 = 1 + d, _ <=> (1 : ℕ)
... = e : h4.symm -- ⊢ 1 + d = e
-- congr_arg : ∀ {α β : Type} {a₁ a₂ : α}
-- (f : α → β), a₁ = a₂ → f a₁ = f a₂
-- add_comm : ∀ {α : Type} [_inst_1 : add_comm_semigroup α]
-- (a b : α), a + b = b + a
-- eq.symm : ∀ {α : Type} {a b : α},
-- a = b → b = a
/-
meta def f : ℕ → bool
| 0 := bor (f 1) (f 2)
| (nat.succ n) := f n
-/
constant f : nat → bool
-- Lean: invalid definition, it uses untrusted declaration 'f'
axiom f₀ : f 0 = bor (f 1) (f 2)
axiom f_ind : ∀ n, n > 0 → f (n + 1) = f n
-- let f 0 = false => f 1 ∨ f 2 = false => f 1 = false, f 2 = false
-- ∀ n, n > 0 → f (n + 1) = f n => f 3 = f 2 = false, f 4 = f 3 = false, ... , f (n : nat, n > 0) = false = f 0
-- let f 0 = true => f 1 ∨ f 2 = true
-- ∀ n, n > 0 → f (n + 1) = f n => f 2 = f 1 => f 1 ∨ f 1 = true => f 1 = true => f 2 = true => ... => f (n : nat, n > 0) = true = f 0
-- => lemma ∀ n, n > 0 → f n = f 0
set_option trace.simplify.rewrite true
lemma fn_eq_f0 : ∀ n, n > 0 → f n = f 0 :=
begin
intros n n_gt_0,
have h : n > 0 → n = 1 ∨ n > 1 := sorry,
cases (h n_gt_0) with h_n_eq_1 n_gt_1,
rw h_n_eq_1,
have h₀ : f 0 = tt ∨ f 0 = ff := sorry,
cases h₀,
rw f₀,
suffices f₂_true : f 2 = tt,
rw f₂_true,
-- simp, -- [simplify.rewrite] [bor_tt]: f 1 || tt ==> tt
rw bor_tt,
by_contradiction H,
-- simp at H, -- [simplify.rewrite] [eq_ff_eq_not_eq_tt]: ¬f 1 = tt ==> f 1 = ff
rw eq_ff_eq_not_eq_tt at H,
have ind₁ := f_ind 1,
change 1 > _ → f 2 = f 1 at ind₁,
have one_gt_zero : 1 > 0 := sorry,
have f₁_eq_f₂ := ind₁ one_gt_zero, clear ind₁,
rw f₁_eq_f₂ at f₂_true,
rw f₂_true at H,
contradiction,
all_goals { sorry, },
end
/-
kernel failed to type check declaration 'fn_eq_f0' this is usually due to a buggy tactic or a bug in the builtin elaborator
elaborated type:
∀ (n : ℕ), n > 0 → f n = f 0
elaborated value:
λ (n : ℕ) (n_gt_0 : n > 0), sorry
nested exception message:
invalid definition, it uses untrusted declaration 'f'
-/
-- ************************************************************************************************************************************
-- axiom f₀ : f 0 = bor (f 1) (f 2)
-- axiom f_ind : ∀ n, n > 0 → f (n + 1) = f n
-- let f 0 = false => f 1 ∨ f 2 = false => f 1 = false, f 2 = false
-- ∀ n, n > 0 → f (n + 1) = f n => f 3 = f 2 = false, f 4 = f 3 = false, ... , f (n : nat, n > 0) = false = f 0
-- let f 0 = true => f 1 ∨ f 2 = true
-- ∀ n, n > 0 → f (n + 1) = f n => f 2 = f 1 => f 1 ∨ f 1 = true => f 1 = true => f 2 = true => ... => f (n : nat, n > 0) = true = f 0
-- => lemma ∀ n, n > 0 → f n = f 0
-- ************************************************************************************************************************************
-- ih : n > 0 → f n = f 0
-- ⊢ n.succ > 0 → f n.succ = f 0
lemma induction_lemma_false_case (f₀_false : f 0 = ff) (f₁_false : f 1 = ff)
(n : ℕ): (n > 0 → f n = f 0) → n.succ > 0 → f n.succ = f 0 := sorry
lemma fn_eq_f0'' : ∀ n, n > 0 → f n = f 0 :=
begin
intro n,
have bool_f₀ : f 0 = ff ∨ f 0 = tt, from sorry,
cases bool_f₀ with f₀_false f₀_true,
{ -- f₀_false : f 0 = ff
have H : bor (f 1) (f 2) = ff, {
rw <-f₀_false,
apply f₀.symm,
},
have bool_lemma : ∀ (a b : bool), a || b = ff = (a = ff ∧ b = ff), {
intros a b,
-- simp, -- [bor_eq_false_eq_eq_ff_and_eq_ff]: a || b = ff ==> a = ff ∧ b = ff
rw bor_eq_false_eq_eq_ff_and_eq_ff,
-- library_search, -- not return a result, still calculating...
},
have h2 := bool_lemma (f 1) (f 2),
rw h2 at H,
type_check H.1, -- f 1 = ff
type_check H.2, -- f 2 = ff
have f₁_false := H.1, have f₂_false := H.2, clear H h2 bool_lemma,
induction n with n ih,
have h := eq.refl (f 0),
-- have false_imp : ∀ (a : Prop), false → a, {
-- intro a,
-- rw false_implies_iff, trivial,
-- },
have zero_ge_zero_is_false : 0 > 0 = false, {
-- apply _,
sorry,
},
rw zero_ge_zero_is_false,
intro, exfalso, exact a,
exact induction_lemma_false_case f₀_false f₁_false n ih,
},
{ -- f₀_true : f 0 = tt
sorry,
},
end
lemma fn_eq_f0' : ∀ n, n > 0 → f n = f 0 :=
begin
intro n,
induction n with n ih,
case nat.zero {
-- ⊢ 0 > 0 → f 0 = f 0
simp,
-- [nat.nat_zero_eq_zero]: 0 ==> 0
-- [eq_self_iff_true]: f 0 = f 0 ==> true
-- [implies_true_iff]: 0 > 0 → true ==> true
},
case nat.succ {
-- ⊢ n.succ > 0 → f n.succ = f 0
have bool_f₀ : f 0 = tt ∨ f 0 = ff, from sorry,
cases bool_f₀ with f₀_true f₀_false,
case or.inl {
rw f₀_true,
rw f₀_true at ih,
sorry,
},
case or.inr {
-- rw f₀_false,
rw f₀_false at ih,
intro h_nsucc,
-- have H: f n = ff → f n.succ = ff, sorry,
have H := f_ind n,
change n > 0 → f n.succ = f n at H,
have h_gt_nat : n = 0 ∨ n > 0, sorry,
cases h_gt_nat,
case or.inl {
rw h_gt_nat,
sorry,
},
case or.inr {
sorry,
},
},
},
end
|
theory Star imports MainRLT
begin
inductive
star :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool"
for r where
refl: "star r x x" |
step: "r x y \<Longrightarrow> star r y z \<Longrightarrow> star r x z"
hide_fact (open) refl step \<comment> \<open>names too generic\<close>
lemma star_trans:
"star r x y \<Longrightarrow> star r y z \<Longrightarrow> star r x z"
proof(induction rule: star.induct)
case refl thus ?case .
next
case step thus ?case by (metis star.step)
qed
lemmas star_induct =
star.induct[of "r:: 'a*'b \<Rightarrow> 'a*'b \<Rightarrow> bool", split_format(complete)]
declare star.refl[simp,intro]
lemma star_step1[simp, intro]: "r x y \<Longrightarrow> star r x y"
by(metis star.refl star.step)
code_pred star .
end
|
## Inverse Kinematics Calculations For The Serial Link Arm
```python
from sympy import *
import numpy as np
```
```python
# pass the dh paramters matrix in order to construct
# the A transformation for each frame to the one before
def construct_a_matrices(dh_matrix):
a_matrices = []
for i in range(0, dh_matrix.rows):
alpha = dh_matrix.row(i)[0]
a = dh_matrix.row(i)[1]
theta = dh_matrix.row(i)[2]
d = dh_matrix.row(i)[3]
# following the defualt model of the a matrix
a_trans = Matrix([[cos(theta), -sin(theta)*cos(alpha), sin(theta)*sin(alpha) , a*cos(theta)],
[sin(theta), cos(theta)*cos(alpha) , -cos(theta)*sin(alpha), a*sin(theta)],
[0 , sin(alpha) , cos(alpha) , d ],
[0 , 0 , 0 , 1 ]])
a_matrices.append(a_trans)
return a_matrices
```
```python
# create transformation matrices from each frame to origin by
# multiplying each a matrix to one another iteratively
def create_frame_to_origin_transformations(a_matrices):
# start with an identity matrix because we need something to multiply the first matrix by
trans = eye(4)
# keep an array of each transformation matrix to construct jacobians later
transformation_matrices = [trans]
for matrix in a_matrices:
trans = trans*matrix
transformation_matrices.append(trans)
return transformation_matrices
```
```python
# construct the jacobian given the tranformation matrices for each frame to the world frame
# grab the z and o vectors for the previous and current frames repectively
def construct_jacobian(transformation_matrices):
o_n = transformation_matrices[-1]*Matrix([0, 0, 0, 1])
o_n.row_del(3)
jacobian = Matrix()
# start from index 1 to the end because we only want
# 7 columns in the jacobian, one for each joint
# we are just ignoring the identity matric in the first index
for i in range(1, len(transformation_matrices)-1):
# get the z_i-1 vector from the transformation matrix of the last frame
z = transformation_matrices[i-1]*Matrix([0, 0, 1, 0])
z.row_del(3)
o_i = transformation_matrices[i]*Matrix([0, 0, 0, 1])
o_i.row_del(3)
jacobian_column = z.cross(o_n - o_i).col_join(z)
jacobian = jacobian.row_join(jacobian_column)
return jacobian
```
```python
def generate_linear_slopes():
# Generate the linear velocities that will be used in conjuntion with the instantaneous jacobian
# to derive the q_dot values
dx = []
dy = []
dz = []
dth = []
dphi = []
drho = []
# theta(t) = pi/2 + 2pi/5t
# x = 0.18cos(pi/2 - pi/2*t)
# dx = 0.36pi sin(pi/2 - pi/2*t)
# z = 0.18sin(pi/2 - pi/2*t)
# dz = -0.36pi cos(pi/2 - pi/2*t)
for t in np.arange(0, 1, 0.02):
dx.append(0.24*pi*sin(pi/2 - pi/2*t))
dy.append(0)
dz.append(-0.24*pi*cos(pi/2 - pi/2*t))
dth.append(0)
dphi.append(0)
drho.append(0)
return dx, dy, dz, dth, dphi, drho
```
```python
def generate_poses(jacobian):
pose = Matrix([0.01, 0, 0, 0, 0, 0])
poses = [pose]
dx, dy, dz, dth, dphi, drho = generate_linear_slopes()
for i in range(len(dx)):
inst_jacobian = jacobian.subs(theta_1, pose[0]).subs(theta_2, pose[1]).subs(theta_3, pose[2]).subs(theta_4, pose[3]).subs(theta_5, pose[4]).subs(theta_6, pose[5])
inv = N(inst_jacobian.pinv())
linear_slopes = Matrix([dx[i], dy[i], dz[i], dth[i], dphi[i], drho[i]])
dq = N(inv*linear_slopes)
pose = pose + (dq*0.02)
pose = pose % N(pi/2)
poses.append(pose)
print(pose)
return poses
```
```python
def get_end_affector_position(ee_frame, poses):
ee_positions = [[],[],[]]
for pose in poses:
ee_pos = ee_frame.subs(theta_1, pose[0]).subs(theta_2, pose[1]).subs(theta_3, pose[2]).subs(theta_4, pose[3]).subs(theta_5, pose[4]).subs(theta_6, pose[5])
ee_pos = ee_pos*Matrix([0, 0, 0, 1])
ee_positions[0].append(ee_pos[0])
ee_positions[1].append(ee_pos[1])
ee_positions[2].append(ee_pos[2])
print(ee_pos)
return ee_positions
```
```python
# d1 = symbols('d1')
# d3 = symbols('d3')
# d5 = symbols('d5')
# d7 = symbols('d7')
d1 = 0.045
d3 = 0.06
d5 = 0.06
d_e = 0.06
theta_1 = symbols('theta_1')
theta_2 = symbols('theta_2')
theta_3 = symbols('theta_3')
theta_4 = symbols('theta_4')
theta_5 = symbols('theta_5')
theta_6 = symbols('theta_6')
dh_matrix = Matrix([[-pi/2, 0,theta_1, d1],
[pi/2, 0, theta_2, 0],
[-pi/2, 0, theta_3, d3],
[pi/2, 0, theta_4, 0],
[pi/2, 0, theta_5, d5],
[-pi/2, 0, theta_6, 0],
[0, 0, 0, d_e]])
```
```python
# calculate the paramteric jacobian based on the transformation matrices
a_matrices = construct_a_matrices(dh_matrix)
transformation_matrices = create_frame_to_origin_transformations(a_matrices)
j = construct_jacobian(transformation_matrices)
j.shape
```
(6, 6)
```python
# generate poses for each iteration to draw the crcle
poses = generate_poses(j)
```
Matrix([[0.0100000000000000], [0.125657423010650], [0], [1.57079632679490], [0], [0.125657423010650]])
Matrix([[1.56075282374024], [0.375857122914374], [0.0198854698299375], [1.29694777426121], [1.56828433468810], [0.102008570380685]])
Matrix([[1.09391350354244], [0.493221589581401], [0.650222683133030], [1.31195143077325], [0.0855221894918294], [1.07715039721471]])
Matrix([[0.640763623813308], [0.610850376053435], [1.11035778669689], [1.31350555799317], [1.40678730086497], [1.04232234731442]])
Matrix([[0.847433484384496], [0.305745821270091], [0.281281873706792], [1.25101465635065], [0.319150309762524], [0.479664640790586]])
Matrix([[0.246210808920798], [0.476918058471577], [0.923541630001283], [1.12420116916009], [0.0873334741293456], [0.466390909274904]])
Matrix([[0.180458386234092], [0.725633736181526], [1.07835748480291], [0.922832257929055], [1.42112790219038], [0.390630055824352]])
Matrix([[0.939631619944492], [0.112190991099110], [0.249224810438225], [0.619529641220237], [0.00616796136767972], [0.959558449656254]])
Matrix([[0.608567950326463], [0.199508405942403], [0.888695840422153], [0.666697323703128], [1.18381874936172], [1.03882893282967]])
Matrix([[0.964285842359804], [0.474687698925925], [1.16244431151642], [0.727824805079314], [0.444224783037064], [1.16515918666748]])
Matrix([[0.515030885372887], [0.632802629318841], [0.252927697415419], [0.863720022135626], [0.0980560005675505], [1.17648765311034]])
Matrix([[0.348505959206894], [0.891273584945744], [0.514917993963746], [0.568328302687121], [1.46422541992667], [1.10636190666767]])
Matrix([[0.976944990583480], [0.290898462546116], [0.555585013221418], [0.0389867522446459], [1.56181898288686], [0.708561794651358]])
Matrix([[1.18986755157185], [1.06761663528122], [0.252190916607992], [1.44422371576618], [1.09059270137225], [0.0411724093788433]])
Matrix([[0.997766673207087], [1.19405188455814], [0.325748234672872], [1.34113417476271], [0.900040629342958], [1.56294582166674]])
Matrix([[0.894638399497665], [1.33115148174290], [0.300921967858428], [1.16909953598128], [0.783139935322721], [1.44577118171870]])
Matrix([[0.843242849955560], [1.49043386574326], [0.229868231185839], [0.917530426756795], [0.728349220092267], [1.28457130425739]])
Matrix([[0.850532821847018], [0.123853925193258], [0.0711710651528545], [0.549377590569730], [0.793115473230840], [1.05991874696815]])
Matrix([[0.764589595016072], [0.105926348921598], [1.21326370513764], [0.818430442000863], [1.23508746576673], [1.37749002173677]])
Matrix([[1.46328272900125], [0.687471338174700], [0.240267510190296], [1.18312464495649], [1.39838177030996], [0.324810790655836]])
Matrix([[0.176910541201835], [0.906369782003361], [0.680838068304085], [1.27313289833343], [0.231598985574685], [0.956461387722839]])
Matrix([[0.161314616758586], [1.15307747296134], [0.725817967575662], [1.02752852313354], [0.0636463346019121], [0.893141040914529]])
Matrix([[0.182081527896010], [1.42117736383739], [0.806738715580369], [0.686237850803555], [1.44807993671462], [0.764716256399802]])
Matrix([[0.137718846450654], [0.282140024059692], [1.38115413651318], [0.134222392462246], [0.588172491503530], [0.291200746670347]])
Matrix([[1.37596067291325], [0.905660065905527], [0.738044850119850], [0.253049967762163], [1.41148944048147], [1.32161563177526]])
Matrix([[0.521133044780267], [0.641396217707942], [0.447930797220415], [0.900615415875721], [0.0268315318479591], [0.260020769298386]])
Matrix([[0.342002087349728], [0.830326414855098], [0.735388890856346], [0.850144570559477], [1.36839397054643], [0.333449305016953]])
Matrix([[0.181614807636082], [1.11805316296643], [1.19038212229768], [0.731644747325902], [0.928551911841980], [0.410279366405293]])
Matrix([[0.262045849269241], [1.27881433210244], [1.12086834772089], [0.519504540144115], [0.872313086131278], [0.267887752737694]])
Matrix([[0.453599785436378], [1.39240357800790], [0.692808887652767], [0.133052850605262], [1.18486045383867], [0.000629789376371703]])
Matrix([[0.647949404743392], [1.03896114641278], [0.609417512719872], [0.153198571054534], [0.942193863736655], [1.24782839701373]])
Matrix([[0.441690777371832], [1.27397976400734], [1.45083468855319], [0.116862520242256], [0.189628155933800], [1.34071935510320]])
Matrix([[0.873388998222769], [1.47908656764929], [0.300100805794913], [0.787072075172467], [1.19984376704581], [0.866023599942868]])
Matrix([[1.00918236930511], [0.123735941283749], [1.47541437046691], [0.453735413763830], [1.51663064733385], [0.624519979898768]])
Matrix([[0.853377042360593], [1.14102080732104], [0.869951601543221], [1.45937071460924], [0.598641150354837], [0.0742613255892923]])
Matrix([[0.744210521299270], [1.38720929744223], [0.960996750141220], [1.40173099106571], [0.342982113306581], [0.104851049980273]])
Matrix([[0.683161972819005], [0.0472221131180499], [0.997119890963837], [1.28235006404299], [0.118383089267821], [0.0661997144110104]])
Matrix([[0.969428959124233], [0.122550857024590], [0.777492073837619], [0.0203946944103675], [1.53853337163988], [0.304383475172019]])
Matrix([[0.475897165974242], [0.531312262022006], [0.965860579890730], [0.170875268485940], [0.495911122002974], [1.16919441149506]])
Matrix([[1.39207938377324], [0.604001461883059], [0.619328644875219], [0.459624398323292], [1.17533241779454], [0.705165197043290]])
Matrix([[0.975842911715185], [1.03248223511935], [0.629910740532554], [1.19162744541077], [1.19488255968328], [0.239691535816463]])
Matrix([[0.652586474040967], [1.37545922055700], [1.22972548201983], [1.17484183829726], [0.638174031788457], [0.503571345028219]])
Matrix([[0.614886646767109], [0.0521982696625329], [1.33845455357956], [1.12530627220090], [0.372419452270456], [0.501593714969615]])
Matrix([[0.526149185076332], [0.185245236388114], [0.0487326485479382], [1.52752684280470], [0.135789990779183], [0.790222511557682]])
Matrix([[0.220657776544418], [0.262141833423202], [0.383782024046262], [0.118037505780306], [0.0743946443699479], [1.02778061102915]])
Matrix([[0.00869770336464892], [0.191868805012690], [0.188521151110145], [0.775002509701590], [0.364334176847529], [1.41385775844273]])
Matrix([[0.668164508814754], [0.0552644144322236], [1.41725705951355], [1.28298319222326], [0.0240927665871974], [0.208260864292887]])
Matrix([[0.638607301661883], [0.0825374726719250], [1.46287287060499], [0.0548511103962346], [1.55083049463255], [0.467813810113437]])
Matrix([[0.617001669979516], [1.07362158089160], [1.01873552763955], [1.28119269358469], [0.817737922416638], [0.140681848436481]])
Matrix([[0.503030515481456], [1.38784245521152], [1.30106729123832], [1.32080452974128], [0.445886765796359], [0.314852772433424]])
```python
ee_positions = get_end_affector_position(transformation_matrices[-1], poses)
```
Matrix([[0], [0], [0.225000000000000], [1]])
Matrix([[0.127040197050682], [0.00127044431893310], [0.0970073093284197], [1]])
Matrix([[0.00523530509117013], [0.141138032949372], [0.0886025870091373], [1]])
Matrix([[0.0177591895210178], [0.121516426251246], [0.134179551049453], [1]])
Matrix([[0.0648088676887354], [0.115129826646921], [0.0717017372993884], [1]])
Matrix([[0.0713209321513392], [0.109770540518599], [0.130542521490901], [1]])
Matrix([[0.0908440695998709], [0.0960894906089300], [0.137419211438939], [1]])
Matrix([[0.120095610035561], [0.0916853990439236], [0.102605152173677], [1]])
Matrix([[0.0166825167652608], [0.0285253517938070], [0.207756066045047], [1]])
Matrix([[0.0729910063287330], [0.0524663955292321], [0.172859456867096], [1]])
Matrix([[0.0490649754536558], [0.0840041778759271], [0.170091868691315], [1]])
Matrix([[0.0989586988684691], [0.0578332290458908], [0.154234410367544], [1]])
Matrix([[0.145520697451414], [0.0254596563987332], [0.0818142681924195], [1]])
Matrix([[0.0651585221795514], [0.0407779185226704], [0.196715001278094], [1]])
Matrix([[0.0200125311651150], [0.124029568577982], [-0.0195204282793890], [1]])
Matrix([[0.0918854423297801], [0.0900191796672607], [0.0271172371139123], [1]])
Matrix([[0.105182817610025], [0.0886037507305386], [0.0203078242418197], [1]])
Matrix([[0.117575686844585], [0.0879233415060563], [0.0136464994222533], [1]])
Matrix([[0.0515441985247222], [0.00399192044924517], [0.196958935191518], [1]])
Matrix([[0.0447490997171089], [0.0665407528387143], [0.160403218018485], [1]])
Matrix([[0.00920148413190823], [0.151678680493691], [0.0590977398978500], [1]])
Matrix([[0.136774099359891], [0.0644399814900791], [0.0748999797821916], [1]])
Matrix([[0.148292024586531], [0.0618568394570014], [0.0635654742828382], [1]])
Matrix([[0.146920702439021], [0.0432121181291394], [-0.00518477430145852], [1]])
Matrix([[0.0584253734525563], [0.00778346687748927], [0.213591216302808], [1]])
Matrix([[0.0612063521039920], [0.124829616380768], [0.0920511378783815], [1]])
Matrix([[0.109783322515429], [0.103985753869588], [0.116670662604806], [1]])
Matrix([[0.131278730426149], [0.0921759742907229], [0.0833320797583264], [1]])
Matrix([[0.145648301143304], [0.0819867611758608], [0.0747752757393518], [1]])
Matrix([[0.153660621105167], [0.0820227846985233], [0.0620147242022114], [1]])
Matrix([[0.155809177116022], [0.0872289203095977], [0.0646866775175616], [1]])
Matrix([[0.132141787762297], [0.0376203489938380], [0.109769864368519], [1]])
Matrix([[0.143376433569517], [0.0131960963140482], [0.0809921023306253], [1]])
Matrix([[0.112505596295642], [0.0977653696665312], [-0.0100702267912020], [1]])
Matrix([[-0.00471332397939569], [0.0720931152391357], [0.197455394294148], [1]])
Matrix([[0.000367128644904118], [0.135932504400927], [0.00575794799872472], [1]])
Matrix([[0.00727023966244284], [0.135398699086212], [-0.00687136664585357], [1]])
Matrix([[-0.00843847369366440], [0.116037931886342], [0.139833633055836], [1]])
Matrix([[0.0295761916603121], [0.0227105650393253], [0.219201125949866], [1]])
Matrix([[0.0892037797114630], [-0.00199845539008608], [0.173502400815395], [1]])
Matrix([[0.0329702728541783], [0.127385708975365], [0.150512073192005], [1]])
Matrix([[0.0357908757188002], [0.147239533314291], [0.0191041786234398], [1]])
Matrix([[0.0539320087779406], [0.146491381998034], [0.0221587384071794], [1]])
Matrix([[-0.0182473567391240], [0.0914213955300086], [0.175976821560096], [1]])
Matrix([[0.102809821244796], [0.0586917250908395], [0.131292568834296], [1]])
Matrix([[0.00987286777825334], [-0.0168221202321530], [0.205337924869988], [1]])
Matrix([[0.0409951695590224], [-0.0187029561477222], [0.188150707459317], [1]])
Matrix([[-0.0494732452078164], [0.0997582762697323], [0.149477203069639], [1]])
Matrix([[0.0318564609900912], [0.0270611919836883], [0.215554179343506], [1]])
Matrix([[0.0427507116778423], [0.144126632247794], [0.0359249702978986], [1]])
Matrix([[0.0467621242536236], [0.143497601799296], [0.0279154230642619], [1]])
```python
from matplotlib import pyplot as plt
plt.scatter(ee_positions[0], ee_positions[2])
# its skewing to the left by about 11, not sure why
```
```python
```
|
function v = value( x, data )
global cvx___
if nargin == 1,
data = cvx___.x;
end
nx = size( data, 1 );
nb = size( x.basis_, 1 );
if nx < nb,
data( end + 1 : nb, : ) = NaN;
elseif nx > nb,
data( nb + 1 : end, : ) = [];
end
v = cvx_reshape( data.' * x.basis_, x.size_ );
if any( x.size_ == 1 ), v = full( v ); end
% Copyright 2010 Michael C. Grant and Stephen P. Boyd.
% See the file COPYING.txt for full copyright information.
% The command 'cvx_where' will show where this file is located.
|
# The Runge-Kutta Method, Higher-Order ODEs and Multistep Methods
By Thomas P Ogden (<[email protected]>)
_Here we introduce the classical Runge-Kutta method, go beyond first-order ODEs, and take a first look at multistep methods._
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
In Part 1 we looked at the Explicit Euler method, and checked that it had a global order of accuracy $\mathcal{O}(h^1)$. Now we'll again take an ODE $y' = f(t,y(t))$ with an initial condition $y(t_0) = y_0$ and look at more advanced numerical methods for solving the problem.
## The Runge-Kutta Method
For the Explicit Euler method we truncated the Taylor expansion after the linear term. It's clear that including more terms before truncation will give us a higher-order approximation. The classical Runge-Kutta method does just this, up to an order of accuracy $\mathcal{O}(h^4)$ — 3 orders higher than Explicit Euler! We're not going to derive the approximation here but you can look it up if you're interested. The finite difference step is given by
$$
y_{n+1} = y_n + \tfrac{h}{6} \left( k_1 + 2k_2 + 2k_3 + k_4 \right),
$$
where
$$
\begin{align}
k_1 &= f(t_n, y_n), \\
k_2 &= f(t_n + \tfrac{h}{2}, y_n + \tfrac{h}{2}k_1), \\
k_3 &= f(t_n + \tfrac{h}{2}, y_n + \tfrac{h}{2}k_2), \\
k_4 &= f(t_n + h, y_n + h k_3).
\end{align}
$$
### Implementing the Method in Python
Just like we did with the Explicit Euler method, we'll define a function to implement the Runge-Kutta method for a first-order ODE system.
```
def ode_int_rk(func, y_0, t, args={}):
""" Classical Runge-Kutta (RK4) approximation to a first-order ODE system
with initial conditions.
Args:
func: (callable) The first-order ODE system to be approximated.
y_0: (array) The initial condition.
t: (array) A sequence of time points for which to solve for y.
args: (dict) Extra arguments to pass to function.
Out:
y: (array) the approximated solution of the system at each time in t,
with the initial value y_0 in the first row.
"""
# Initialise the approximation array
y = np.zeros([len(t), len(y_0)])
y[0] = y_0
# Loop through the time steps, approximating this step from the prev step
for i, t_i in enumerate(t[:-1]):
h = t[i+1] - t_i # size of the step
k_1 = func(t_i, y[i], args)
k_2 = func(t_i+h/2., y[i]+h/2.*k_1, args)
k_3 = func(t_i+h/2., y[i]+h/2.*k_2, args)
k_4 = func(t_i+h, y[i]+h*k_3, args)
y[i+1] = y[i] + h/6.*(k_1 + 2.*k_2 + 2.*k_3 + k_4) # RK4 step
return y
```
### Checking Accuracy
We'll define our test function `exp` just as we did in Part 1.
```
from exp import exp # same function we defined in Part 1.
```
And just as we did for the Explicit Euler, we'll use this test function to check the order of accuracy of the method by looking over a wide range of stepsizes.
```
y_0 = np.array([1.]) # Initial condition
solve_args = {}
solve_args['a'] = 1.
t_max = 5.
# Range of stepsizes
max_N = 12
N = 2**np.arange(2, max_N) # N = 2, 4, 8, ..., 2^max_N
order_check = 4 # for visual check of the order of accuracy
y_end = np.zeros(len(N)) # array to fill with the final values
stepsize = np.zeros(len(N)) # array to fill with the stepsizes
for i, N_i in enumerate(N): # loop over different numbers of steps
t = np.linspace(0., t_max, N_i+1)
y_end[i] = ode_int_rk(exp, y_0, t, solve_args)[-1]
stepsize[i] = t_max/N_i
plt.loglog(stepsize, abs(y_end - np.exp(solve_args['a']*t_max)),
'b-o', label='Global error')
plt.loglog(stepsize, stepsize**order_check,'k--', label=r'$h^4$')
plt.xlabel(r'$h$')
plt.legend(loc=2)
```
So we've confirmed that the Runge-Kutta method is $\mathcal{O}(h^4)$ – so that the global error gets smaller much quicker with increased number of steps than the Explicit Euler.
## Example 2: A Forced and Damped Pendulum
The next problem we're going to look at is a second-order ODE, but crucially _any higher-order ODE can be rewritten as a set of coupled first-order equations_, such that we can apply the solvers we've already designed. We'll see how to do that with this example.
### The Physical Problem
Take an idealised pendulum: a weightless string of length $\ell$, fixed at one end with a mass $m$ at the other. The pendulum is free to swing in a plane subject to gravity, friction proportional to its velocity $v$, and may be driven by an external periodic force $F_d \cos {\omega_d t}$.
We start with Newton's second law applied to the horizontal displacement $x$ for the unforced and undamped pendulum,
$$m\ddot{x} = -mg \sin{\theta}$$
where $g$ is the local acceleration due to gravity and $\theta{(t)}$ is the angle of displacement of the string from vertical at time $t$. We now apply a frictional force proportional to the translational velocity and rearrange to obtain a second-order homogeneous differential equation,
$$ m \ddot{x} + k \ell \dot{x} + mg \sin{\theta} = 0 $$
where $k$ is the coefficient of friction.
We want to consider angular displacement, so substitute $\dot{x} = \ell \dot{\theta}$ and $\ddot{x} = \ell \ddot{\theta}$, and now apply the driving force
$$ m \ell \ddot{\theta} + k \ell \dot{\theta} + mg \sin{\theta} = F_d \cos {\omega_d t} $$
The equation is nonlinear (due to the sine function), so finding an analytic solution is going to be difficult. Numerical methods are the next line of attack, so we'll try our Runge-Kutta method on the problem. First we must rewrite the second-order equation as a set of coupled first-order equations. Let $y_0 = \theta$, $y_1 = \dot{\theta}$ and $y_2 = \ddot{\theta}$. Then
$$
\begin{align*}
y_0' &= y_1 = \dot{\theta} \\
y_1' &= y_2 = \ddot{\theta} = -\frac{k}{m}\dot{\theta} - \frac{g}{\ell} \sin{\theta} + \frac{F_d}{m \ell} cos{\omega_d t}
\end{align*}
$$
is the system we want. We make a final tidying of the parameters by letting $\alpha = g/\ell$, $\beta = k/m$ and $\gamma = F/m\ell$
$$
\begin{align*}
y_0' &= y_1 \\
y_1' &= -\alpha \sin{y_0} -\beta y_1 + \gamma \cos{\omega t}
\end{align*}
$$
### Setting up the Problem in Python
We write this pair of euqations up as a Python function to be passed to our ODE integrator:
```
def pendulum(t, y, args):
""" A damped and forced pendulum, described as set of two first-order ODEs.
Args:
t: Time
y: Pendulum system vector [angle, angular velocity]
args['alpha']: gravity_acc/length_of_pendulum
args['beta']: friction_constant/mass_pendulum
args['gamma']: driving_force/mass_pendulum/length_pendulum
args['omega']: driving_freq
Returns:
dydt: ODE vector
"""
dydt = np.zeros(2)
dydt[0] = y[1]
dydt[1] = (-args['alpha']*np.sin(y[0]) - args['beta']*y[1] +
args['gamma']*np.cos(args['omega']*t))
return dydt
```
Now we're ready to select some parameters. First we'll remove friction ($k = 0$) and apply no driving force ($F_d = 0$), under which conditions we expect to see simple harmonic motion of the pendulum back and forth.
```
### Parameters
gravity_acc = 10. # [m /s2]
length_pendulum = 1. # [m]
mass_pendulum = 1. # [kg]
friction_constant = 0. # [kg /m /s]
driving_force = 0. # [N]
driving_freq = 0. # [2π /s]
N = 200
t = np.linspace(0., 10., N+1) # [s] an array of time steps
```
We're going to pass the arguments to the solver as a dict, so let's make that now.
```
solve_args = {}
solve_args['alpha'] = gravity_acc/length_pendulum
solve_args['beta'] = friction_constant/mass_pendulum
solve_args['gamma'] = driving_force/mass_pendulum/length_pendulum
solve_args['omega'] = driving_freq
```
Next we need some initial conditions. We'll pick $\theta_0 = \tfrac{\pi}{8}$ so that we can check our numerical result with the **small angle approximation**,
$$\sin \theta \approx \theta.$$
With this approximation, the undamped and unforced solution can be found analytically: $y = y_0 \cos (\sqrt{\alpha} t)$ (you can check this by substituting it into the original ODE).
```
initial_ang = np.pi/8 # [rad]
initial_ang_vel = 0. # [rad /s]
initial_cond = np.array([initial_ang, initial_ang_vel])
```
### Solving the Problem
Now we can solve the ODE system using the Runge-Kutta method we made above and plot the angle $y_0$ over time.
```
from scipy.integrate import odeint
# y = odeint(pendulum, initial_cond, t, args=(solve_args,))
# Solve Pendulum ODE with RK4
y = ode_int_rk(pendulum, initial_cond, t, solve_args)
y_small_ang = initial_ang*np.cos(np.sqrt(solve_args['alpha'])*t)
plt.plot(t, y[:,0], c='b', label='Angle')
plt.plot(t, y_small_ang, 'r--', label='Small angle approx.')
plt.xlabel('Time (s)')
plt.ylabel('Angle (rad)')
plt.legend(loc=2)
```
So we see that the pendulum oscillates as expected and the frequency matches the known small angle result reasonably well for a few swings. (Subject to the error in the small angle approximation. Try making `initial_ang` bigger or smaller to see where the approximation works well.)
Other perspectives we might be interested in are the the pendulum's trajectory in phase space, which we plot in a **phase diagram** and its spectral properties, via a **power spectrum**. We'll plot those now.
```
def plot_pendulum(t,y):
""" Plot Angle, Phase Diagram, FFT. """
fig = plt.figure()
# Plot Angle
ax_1 = fig.add_subplot(211)
ax_1.plot(t, y[:,0], c='b')
ax_1.set_xlabel('Time (s)')
ax_1.set_ylabel('Angle (rad)')
# Plot Phase Diagram
ax_2 = fig.add_subplot(223)
ax_2.plot(y[:,0], y[:,1], c='g')
ax_2.set_xlabel('Angle (rad)')
ax_2.set_ylabel('Angular Velocity (rad /s)')
# Fourier Transform
f_fft = np.fft.fftfreq(len(t), t[1]-t[0])
y_fft = np.fft.fft(y[:,0])/np.sqrt(2*len(t))
# Plot Power Spectrum
ax_3 = fig.add_subplot(224)
ax_3.plot(f_fft[:N/2]*2*np.pi, abs(y_fft[:N/2]), c='r')
ax_3.set_xlim([0, 30])
ax_3.set_xlabel('Ang Freq ($2 \pi$ Hz)')
ax_3.set_ylabel('Power')
```
```
plot_pendulum(t,y)
```
The phase diagram (green, bottom left) shows each possible physical state for the system. We see the undamped, unforced pendulum follows a regular orbit. The power spectrum (red, bottom right) is found by taking a discrete Fourier transform using NumPy's `fftpack` module. We see a single narrow peak at $\omega \approx \sqrt{\alpha}$, the pendulum's natural frequency.
### Adding Friction
To better model a physical pendulum we might add a friction constant $k$, and see what happens when we solve the system with this included.
```
friction_constant = .5 # [kg /m /s]
solve_args['beta'] = friction_constant/mass_pendulum
# Solve Pendulum ODE with RK4
y = ode_int_rk(pendulum, initial_cond, t, solve_args)
plot_pendulum(t,y)
```
We see that over time the friction attenuates the pendulum's swing. The phase space trajectory no longer orbits consistently but spirals in, and the spectral peak has been widened out to include a spread of lower frequency components.
### Adding a Driving Force
```
driving_force = 10. # [N]
driving_freq = 10. # [2π /s]
solve_args['gamma'] = driving_force/mass_pendulum/length_pendulum
solve_args['omega'] = driving_freq
# Solve Pendulum ODE with RK4
y = ode_int_rk(pendulum, initial_cond, t, solve_args)
plot_pendulum(t,y)
```
The driven system is of course more interesting! The phase space trajectory looks chaotic, but the pendulum's behaviour is clear in the power spectrum, where we see we now have an additional peak at $\omega_F$.
Anyway, there's plenty interesting to explore in the damped forced pendulum system, but as we're just interested in the integration methods here, we'll move on. The takeaway is: by converting our second-order ODE into a system of first-order ODEs, we are able to solve it using the methods we've already written. And crucially, this can be done for any higher-order ODE.
## The Two-Step Adams-Bashforth Method
Remember that with these different methods, we're always looking to do the same thing: to choose a numerical approximation to the integral in
$$ y(t_{n+1}) = y(t_n) + \int_{t_n}^{t_{n+1}} \! f(t,y(t)) \, \mathrm{d}t. $$
So far, we've looked at **single-step** methods where we only use one known point to estimate the next one. But one way to improve the accuracy of our method is to use more known points. Such approximations are called **multistep methods**, and we'll look at an example now.
Where the Expliit Euler method takes the slope $f$ to be a constant on the interval $[t_n, t_{n+1}]$, the idea behind _Adams-Bashforth_ methods is to approxmiate $f$ by a [Lagrange interpolating polynomial](lagrange):
$$ P(t) = \sum_{j=1}^{m}{P_j(t)} $$
where
$$ P_j(t) = y_j \prod_{\substack{k=1 \\ k \ne j}}^{m}{ \frac{t - t_k}{t_j - t_k} }. $$
Here $P(t)$ is the polynomial of degree $\le (m-1)$ that passes through the $m$ points $(t_1, y_1 = f(t_1))$, $(t_2, y_2 = f(t_2))$ $\dots$ $(t_m, y_m = f(t_m))$. We'll take the linear $(m = 2)$ interpolant on the point $t_{n}$ and an earlier point $t_{n-1}$, so we have
$$ P(t) = f(t_n, y_n)\frac{t - t_{n-1}}{t_n - t_{n-1}} + f(t_{n-1}, y_{n-1})\frac{t - t_{n}}{t_{n-1} - t_n}. $$
Now if we put this approximating polynomial into the integral of, we find
\begin{align}
\int_{t_n}^{t_{n+1}} \! f(t,y(t)) \, \mathrm{d}t \approx \int_{t_n}^{t_{n+1}} \! P(t) \, \mathrm{d}t &= \int_{t_n}^{t_{n+1}} \! \left[ f(t_n, y_n)\frac{t - t_{n-1}}{t_n - t_{n-1}} + f(t_{n-1}, y_{n-1})\frac{t - t_{n}}{t_{n-1} - t_n} \right] \mathrm{d}t \\
&= \frac{(t_n - t_{n+1})}{2(t_{n-1}-t_n)} \left[ f(t_n,y_n)(t_n + t_{n+1} - 2t_{n-1}) - f(t_{n-1},y_{n-1})(t_n - t_{n+1}) \right]
\end{align}
### Step Sizes
If we let $h_1 := t_n - t_{n-1}$ and $h_2 := t_{n+1} - t_n$ then
$$ \int_{t_n}^{t_{n+1}} \! P(t) \, \mathrm{d}t = \frac{h_2}{2 h_1} \left[ (2 h_1 + h_2) f(t_n,y_n) - h_2 f(t_{n-1},y_{n-1}) \right]. $$
Putting this back into $\color{grey}{[1]}$, we get
$$ y(t_{n+1}) \approx y(t_{n}) + \frac{h_2}{2 h_1} \left[ (2 h_1 + h_2) f(t_n,y_n) - h_2 f(t_{n-1},y_{n-1}) \right] $$
and our sequence of approximation points $y_n$ is calculated as
$$ y_{n+1} = y_n + \frac{h_2}{2 h_1} \left[ (2 h_1 + h_2) f(t_n,y_n) - h_2 f(t_{n-1},y_{n-1}) \right] $$
for $n = 1, 2, \dots N$.
**If the steps are of equal size**, i.e. $h := h_1 = h_2$ we find
$$ y_{n+1} = y_n + \frac{3}{2} h f(t_n,y_n) - \frac{1}{2} h f(t_{n-1}, t_{n-1}) $$
which is the [standard two-step Adams-Bashforth method][multistep].
[lagrange]: http://mathworld.wolfram.com/LagrangeInterpolatingPolynomial.html
[multistep]: http://en.wikipedia.org/wiki/Linear_multistep_method#Families_of_multistep_methods
### Accuracy
Replacing $f(t,y(t))$ with the interpolant $P(t)$ [incurs a global error][wiki_order_cite] of order $\mathcal{O}(h^m)$, so in the case of the two-step method we have $\mathcal{O}(h^2)$.
Note that if you follow the same derivation with $m = 1$ you get the Euler method — so the Euler method is also in fact the one-step Adams-Bashforth method.
[wiki_order_cite]: http://en.wikipedia.org/wiki/Linear_multistep_method#CITEREFIserles1996
## Next
In [Part 3][part3] we'll look at an example of a **stiff** problem, which resists solution by the Explicit Euler method, and so discover another measure of a numerical method to go with its accuracy: the numerical **stability**. We'll then introduce **implicit methods**, which allow us to solve stiff problems.
[part3]: ./8_Stiff-Problems-Implicit-Methods-and-Computational-Cost.ipynb
|
The ball of radius $r$ centered at $c - a$ is the image of the ball of radius $r$ centered at $c$ under the translation $x \mapsto x - a$.
|
The ball of radius $r$ centered at $c - a$ is the image of the ball of radius $r$ centered at $c$ under the translation $x \mapsto x - a$.
|
lemma clamp_continuous_on: fixes f :: "'a::euclidean_space \<Rightarrow> 'b::metric_space" assumes f_cont: "continuous_on (cbox a b) f" shows "continuous_on S (\<lambda>x. f (clamp a b x))"
|
[GOAL]
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G : Comonad C
X✝ Y✝ Z✝ : Monad C
f : X✝ ⟶ Y✝
g : Y✝ ⟶ Z✝
X Y : C
h : X ⟶ Y
⊢ X✝.map h ≫ (fun X => NatTrans.app f.toNatTrans X ≫ NatTrans.app g.toNatTrans X) Y =
(fun X => NatTrans.app f.toNatTrans X ≫ NatTrans.app g.toNatTrans X) X ≫ Z✝.map h
[PROOFSTEP]
rw [assoc, f.1.naturality_assoc, g.1.naturality]
[GOAL]
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G : Comonad C
X✝ Y✝ : Monad C
x✝ : X✝ ⟶ Y✝
⊢ (𝟙 X✝ ≫ x✝).app = x✝.app
[PROOFSTEP]
funext
[GOAL]
case h
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G : Comonad C
X✝ Y✝ : Monad C
x✝¹ : X✝ ⟶ Y✝
x✝ : C
⊢ NatTrans.app (𝟙 X✝ ≫ x✝¹).toNatTrans x✝ = NatTrans.app x✝¹.toNatTrans x✝
[PROOFSTEP]
simp only [NatTrans.id_app, id_comp]
[GOAL]
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G : Comonad C
X✝ Y✝ : Monad C
x✝ : X✝ ⟶ Y✝
⊢ (x✝ ≫ 𝟙 Y✝).app = x✝.app
[PROOFSTEP]
funext
[GOAL]
case h
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G : Comonad C
X✝ Y✝ : Monad C
x✝¹ : X✝ ⟶ Y✝
x✝ : C
⊢ NatTrans.app (x✝¹ ≫ 𝟙 Y✝).toNatTrans x✝ = NatTrans.app x✝¹.toNatTrans x✝
[PROOFSTEP]
simp only [NatTrans.id_app, comp_id]
[GOAL]
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G : Comonad C
W✝ X✝ Y✝ Z✝ : Monad C
x✝² : W✝ ⟶ X✝
x✝¹ : X✝ ⟶ Y✝
x✝ : Y✝ ⟶ Z✝
⊢ ((x✝² ≫ x✝¹) ≫ x✝).app = (x✝² ≫ x✝¹ ≫ x✝).app
[PROOFSTEP]
funext
[GOAL]
case h
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G : Comonad C
W✝ X✝ Y✝ Z✝ : Monad C
x✝³ : W✝ ⟶ X✝
x✝² : X✝ ⟶ Y✝
x✝¹ : Y✝ ⟶ Z✝
x✝ : C
⊢ NatTrans.app ((x✝³ ≫ x✝²) ≫ x✝¹).toNatTrans x✝ = NatTrans.app (x✝³ ≫ x✝² ≫ x✝¹).toNatTrans x✝
[PROOFSTEP]
simp only [assoc]
[GOAL]
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G X✝ Y✝ Z✝ : Comonad C
f : X✝ ⟶ Y✝
g : Y✝ ⟶ Z✝
X Y : C
h : X ⟶ Y
⊢ X✝.map h ≫ (fun X => NatTrans.app f.toNatTrans X ≫ NatTrans.app g.toNatTrans X) Y =
(fun X => NatTrans.app f.toNatTrans X ≫ NatTrans.app g.toNatTrans X) X ≫ Z✝.map h
[PROOFSTEP]
rw [assoc, f.1.naturality_assoc, g.1.naturality]
[GOAL]
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G X✝ Y✝ : Comonad C
x✝ : X✝ ⟶ Y✝
⊢ (𝟙 X✝ ≫ x✝).app = x✝.app
[PROOFSTEP]
funext
[GOAL]
case h
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G X✝ Y✝ : Comonad C
x✝¹ : X✝ ⟶ Y✝
x✝ : C
⊢ NatTrans.app (𝟙 X✝ ≫ x✝¹).toNatTrans x✝ = NatTrans.app x✝¹.toNatTrans x✝
[PROOFSTEP]
simp only [NatTrans.id_app, id_comp]
[GOAL]
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G X✝ Y✝ : Comonad C
x✝ : X✝ ⟶ Y✝
⊢ (x✝ ≫ 𝟙 Y✝).app = x✝.app
[PROOFSTEP]
funext
[GOAL]
case h
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G X✝ Y✝ : Comonad C
x✝¹ : X✝ ⟶ Y✝
x✝ : C
⊢ NatTrans.app (x✝¹ ≫ 𝟙 Y✝).toNatTrans x✝ = NatTrans.app x✝¹.toNatTrans x✝
[PROOFSTEP]
simp only [NatTrans.id_app, comp_id]
[GOAL]
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G W✝ X✝ Y✝ Z✝ : Comonad C
x✝² : W✝ ⟶ X✝
x✝¹ : X✝ ⟶ Y✝
x✝ : Y✝ ⟶ Z✝
⊢ ((x✝² ≫ x✝¹) ≫ x✝).app = (x✝² ≫ x✝¹ ≫ x✝).app
[PROOFSTEP]
funext
[GOAL]
case h
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G W✝ X✝ Y✝ Z✝ : Comonad C
x✝³ : W✝ ⟶ X✝
x✝² : X✝ ⟶ Y✝
x✝¹ : Y✝ ⟶ Z✝
x✝ : C
⊢ NatTrans.app ((x✝³ ≫ x✝²) ≫ x✝¹).toNatTrans x✝ = NatTrans.app (x✝³ ≫ x✝² ≫ x✝¹).toNatTrans x✝
[PROOFSTEP]
simp only [assoc]
[GOAL]
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G : Comonad C
M N : Monad C
f : M.toFunctor ≅ N.toFunctor
f_η : autoParam (∀ (X : C), NatTrans.app (Monad.η M) X ≫ NatTrans.app f.hom X = NatTrans.app (Monad.η N) X) _auto✝
f_μ :
autoParam
(∀ (X : C),
NatTrans.app (Monad.μ M) X ≫ NatTrans.app f.hom X =
(M.map (NatTrans.app f.hom X) ≫ NatTrans.app f.hom (N.obj X)) ≫ NatTrans.app (Monad.μ N) X)
_auto✝
X : C
⊢ NatTrans.app (Monad.η N) X ≫ NatTrans.app f.inv X = NatTrans.app (Monad.η M) X
[PROOFSTEP]
simp [← f_η]
[GOAL]
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G : Comonad C
M N : Monad C
f : M.toFunctor ≅ N.toFunctor
f_η : autoParam (∀ (X : C), NatTrans.app (Monad.η M) X ≫ NatTrans.app f.hom X = NatTrans.app (Monad.η N) X) _auto✝
f_μ :
autoParam
(∀ (X : C),
NatTrans.app (Monad.μ M) X ≫ NatTrans.app f.hom X =
(M.map (NatTrans.app f.hom X) ≫ NatTrans.app f.hom (N.obj X)) ≫ NatTrans.app (Monad.μ N) X)
_auto✝
X : C
⊢ NatTrans.app (Monad.μ N) X ≫ NatTrans.app f.inv X =
(N.map (NatTrans.app f.inv X) ≫ NatTrans.app f.inv (M.obj X)) ≫ NatTrans.app (Monad.μ M) X
[PROOFSTEP]
rw [← NatIso.cancel_natIso_hom_right f]
[GOAL]
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G : Comonad C
M N : Monad C
f : M.toFunctor ≅ N.toFunctor
f_η : autoParam (∀ (X : C), NatTrans.app (Monad.η M) X ≫ NatTrans.app f.hom X = NatTrans.app (Monad.η N) X) _auto✝
f_μ :
autoParam
(∀ (X : C),
NatTrans.app (Monad.μ M) X ≫ NatTrans.app f.hom X =
(M.map (NatTrans.app f.hom X) ≫ NatTrans.app f.hom (N.obj X)) ≫ NatTrans.app (Monad.μ N) X)
_auto✝
X : C
⊢ (NatTrans.app (Monad.μ N) X ≫ NatTrans.app f.inv X) ≫ NatTrans.app f.hom X =
((N.map (NatTrans.app f.inv X) ≫ NatTrans.app f.inv (M.obj X)) ≫ NatTrans.app (Monad.μ M) X) ≫ NatTrans.app f.hom X
[PROOFSTEP]
simp only [NatTrans.naturality, Iso.inv_hom_id_app, assoc, comp_id, f_μ, NatTrans.naturality_assoc,
Iso.inv_hom_id_app_assoc, ← Functor.map_comp_assoc]
[GOAL]
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G : Comonad C
M N : Monad C
f : M.toFunctor ≅ N.toFunctor
f_η : autoParam (∀ (X : C), NatTrans.app (Monad.η M) X ≫ NatTrans.app f.hom X = NatTrans.app (Monad.η N) X) _auto✝
f_μ :
autoParam
(∀ (X : C),
NatTrans.app (Monad.μ M) X ≫ NatTrans.app f.hom X =
(M.map (NatTrans.app f.hom X) ≫ NatTrans.app f.hom (N.obj X)) ≫ NatTrans.app (Monad.μ N) X)
_auto✝
X : C
⊢ NatTrans.app (Monad.μ N) X = N.map (𝟙 (N.obj X)) ≫ NatTrans.app (Monad.μ N) X
[PROOFSTEP]
simp
[GOAL]
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G M N : Comonad C
f : M.toFunctor ≅ N.toFunctor
f_ε : autoParam (∀ (X : C), NatTrans.app f.hom X ≫ NatTrans.app (Comonad.ε N) X = NatTrans.app (Comonad.ε M) X) _auto✝
f_δ :
autoParam
(∀ (X : C),
NatTrans.app f.hom X ≫ NatTrans.app (Comonad.δ N) X =
NatTrans.app (Comonad.δ M) X ≫ NatTrans.app f.hom (M.obj X) ≫ N.map (NatTrans.app f.hom X))
_auto✝
X : C
⊢ NatTrans.app f.inv X ≫ NatTrans.app (Comonad.ε M) X = NatTrans.app (Comonad.ε N) X
[PROOFSTEP]
simp [← f_ε]
[GOAL]
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G M N : Comonad C
f : M.toFunctor ≅ N.toFunctor
f_ε : autoParam (∀ (X : C), NatTrans.app f.hom X ≫ NatTrans.app (Comonad.ε N) X = NatTrans.app (Comonad.ε M) X) _auto✝
f_δ :
autoParam
(∀ (X : C),
NatTrans.app f.hom X ≫ NatTrans.app (Comonad.δ N) X =
NatTrans.app (Comonad.δ M) X ≫ NatTrans.app f.hom (M.obj X) ≫ N.map (NatTrans.app f.hom X))
_auto✝
X : C
⊢ NatTrans.app f.inv X ≫ NatTrans.app (Comonad.δ M) X =
NatTrans.app (Comonad.δ N) X ≫ NatTrans.app f.inv (N.obj X) ≫ M.map (NatTrans.app f.inv X)
[PROOFSTEP]
rw [← NatIso.cancel_natIso_hom_left f]
[GOAL]
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G M N : Comonad C
f : M.toFunctor ≅ N.toFunctor
f_ε : autoParam (∀ (X : C), NatTrans.app f.hom X ≫ NatTrans.app (Comonad.ε N) X = NatTrans.app (Comonad.ε M) X) _auto✝
f_δ :
autoParam
(∀ (X : C),
NatTrans.app f.hom X ≫ NatTrans.app (Comonad.δ N) X =
NatTrans.app (Comonad.δ M) X ≫ NatTrans.app f.hom (M.obj X) ≫ N.map (NatTrans.app f.hom X))
_auto✝
X : C
⊢ NatTrans.app f.hom X ≫ NatTrans.app f.inv X ≫ NatTrans.app (Comonad.δ M) X =
NatTrans.app f.hom X ≫ NatTrans.app (Comonad.δ N) X ≫ NatTrans.app f.inv (N.obj X) ≫ M.map (NatTrans.app f.inv X)
[PROOFSTEP]
simp only [reassoc_of% (f_δ X), Iso.hom_inv_id_app_assoc, NatTrans.naturality_assoc]
[GOAL]
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G M N : Comonad C
f : M.toFunctor ≅ N.toFunctor
f_ε : autoParam (∀ (X : C), NatTrans.app f.hom X ≫ NatTrans.app (Comonad.ε N) X = NatTrans.app (Comonad.ε M) X) _auto✝
f_δ :
autoParam
(∀ (X : C),
NatTrans.app f.hom X ≫ NatTrans.app (Comonad.δ N) X =
NatTrans.app (Comonad.δ M) X ≫ NatTrans.app f.hom (M.obj X) ≫ N.map (NatTrans.app f.hom X))
_auto✝
X : C
⊢ NatTrans.app (Comonad.δ M) X =
NatTrans.app (Comonad.δ M) X ≫ M.map (NatTrans.app f.hom X) ≫ M.map (NatTrans.app f.inv X)
[PROOFSTEP]
rw [← Functor.map_comp, Iso.hom_inv_id_app, Functor.map_id]
[GOAL]
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G M N : Comonad C
f : M.toFunctor ≅ N.toFunctor
f_ε : autoParam (∀ (X : C), NatTrans.app f.hom X ≫ NatTrans.app (Comonad.ε N) X = NatTrans.app (Comonad.ε M) X) _auto✝
f_δ :
autoParam
(∀ (X : C),
NatTrans.app f.hom X ≫ NatTrans.app (Comonad.δ N) X =
NatTrans.app (Comonad.δ M) X ≫ NatTrans.app f.hom (M.obj X) ≫ N.map (NatTrans.app f.hom X))
_auto✝
X : C
⊢ NatTrans.app (Comonad.δ M) X = NatTrans.app (Comonad.δ M) X ≫ 𝟙 (M.obj (M.obj X))
[PROOFSTEP]
apply (comp_id _).symm
[GOAL]
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G : Comonad C
M N : Monad C
f : M.toFunctor ≅ N.toFunctor
f_η : ∀ (X : C), NatTrans.app (Monad.η M) X ≫ NatTrans.app f.hom X = NatTrans.app (Monad.η N) X
f_μ :
∀ (X : C),
NatTrans.app (Monad.μ M) X ≫ NatTrans.app f.hom X =
(M.map (NatTrans.app f.hom X) ≫ NatTrans.app f.hom (N.obj X)) ≫ NatTrans.app (Monad.μ N) X
⊢ (monadToFunctor C).mapIso (MonadIso.mk f) = f
[PROOFSTEP]
ext
[GOAL]
case w.w.h
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G : Comonad C
M N : Monad C
f : M.toFunctor ≅ N.toFunctor
f_η : ∀ (X : C), NatTrans.app (Monad.η M) X ≫ NatTrans.app f.hom X = NatTrans.app (Monad.η N) X
f_μ :
∀ (X : C),
NatTrans.app (Monad.μ M) X ≫ NatTrans.app f.hom X =
(M.map (NatTrans.app f.hom X) ≫ NatTrans.app f.hom (N.obj X)) ≫ NatTrans.app (Monad.μ N) X
x✝ : C
⊢ NatTrans.app ((monadToFunctor C).mapIso (MonadIso.mk f)).hom x✝ = NatTrans.app f.hom x✝
[PROOFSTEP]
rfl
[GOAL]
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G M N : Comonad C
f : M.toFunctor ≅ N.toFunctor
f_ε : ∀ (X : C), NatTrans.app f.hom X ≫ NatTrans.app (Comonad.ε N) X = NatTrans.app (Comonad.ε M) X
f_δ :
∀ (X : C),
NatTrans.app f.hom X ≫ NatTrans.app (Comonad.δ N) X =
NatTrans.app (Comonad.δ M) X ≫ NatTrans.app f.hom (M.obj X) ≫ N.map (NatTrans.app f.hom X)
⊢ (comonadToFunctor C).mapIso (ComonadIso.mk f) = f
[PROOFSTEP]
ext
[GOAL]
case w.w.h
C : Type u₁
inst✝ : Category.{v₁, u₁} C
T : Monad C
G M N : Comonad C
f : M.toFunctor ≅ N.toFunctor
f_ε : ∀ (X : C), NatTrans.app f.hom X ≫ NatTrans.app (Comonad.ε N) X = NatTrans.app (Comonad.ε M) X
f_δ :
∀ (X : C),
NatTrans.app f.hom X ≫ NatTrans.app (Comonad.δ N) X =
NatTrans.app (Comonad.δ M) X ≫ NatTrans.app f.hom (M.obj X) ≫ N.map (NatTrans.app f.hom X)
x✝ : C
⊢ NatTrans.app ((comonadToFunctor C).mapIso (ComonadIso.mk f)).hom x✝ = NatTrans.app f.hom x✝
[PROOFSTEP]
rfl
|
[STATEMENT]
lemma enumerator_bound3: "ls \<in> set (enumerator m nmax) \<Longrightarrow> 1 < nmax \<Longrightarrow> last (butlast ls) < nmax - Suc 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>ls \<in> set (enumerator m nmax); 1 < nmax\<rbrakk> \<Longrightarrow> last (butlast ls) < nmax - Suc 0
[PROOF STEP]
apply (case_tac "ls" rule: rev_exhaust)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>ls \<in> set (enumerator m nmax); 1 < nmax; ls = []\<rbrakk> \<Longrightarrow> last (butlast ls) < nmax - Suc 0
2. \<And>ys y. \<lbrakk>ls \<in> set (enumerator m nmax); 1 < nmax; ls = ys @ [y]\<rbrakk> \<Longrightarrow> last (butlast ls) < nmax - Suc 0
[PROOF STEP]
apply force
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>ys y. \<lbrakk>ls \<in> set (enumerator m nmax); 1 < nmax; ls = ys @ [y]\<rbrakk> \<Longrightarrow> last (butlast ls) < nmax - Suc 0
[PROOF STEP]
apply (rule_tac enumerator_bound2)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>ys y. \<lbrakk>ls \<in> set (enumerator m nmax); 1 < nmax; ls = ys @ [y]\<rbrakk> \<Longrightarrow> ?ls8 ys y \<in> set (enumerator (?m8 ys y) nmax)
2. \<And>ys y. \<lbrakk>ls \<in> set (enumerator m nmax); 1 < nmax; ls = ys @ [y]\<rbrakk> \<Longrightarrow> 1 < nmax
3. \<And>ys y. \<lbrakk>ls \<in> set (enumerator m nmax); 1 < nmax; ls = ys @ [y]\<rbrakk> \<Longrightarrow> last (butlast ls) \<in> set (butlast (?ls8 ys y))
[PROOF STEP]
apply assumption
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>ys y. \<lbrakk>ls \<in> set (enumerator m nmax); 1 < nmax; ls = ys @ [y]\<rbrakk> \<Longrightarrow> 1 < nmax
2. \<And>ys y. \<lbrakk>ls \<in> set (enumerator m nmax); 1 < nmax; ls = ys @ [y]\<rbrakk> \<Longrightarrow> last (butlast ls) \<in> set (butlast ls)
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>ys y. \<lbrakk>ys @ [y] \<in> set (enumerator m nmax); Suc 0 < nmax; ls = ys @ [y]\<rbrakk> \<Longrightarrow> last ys \<in> set ys
[PROOF STEP]
apply (case_tac "ys" rule: rev_exhaust)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>ys y. \<lbrakk>ys @ [y] \<in> set (enumerator m nmax); Suc 0 < nmax; ls = ys @ [y]; ys = []\<rbrakk> \<Longrightarrow> last ys \<in> set ys
2. \<And>ys y ysa ya. \<lbrakk>ys @ [y] \<in> set (enumerator m nmax); Suc 0 < nmax; ls = ys @ [y]; ys = ysa @ [ya]\<rbrakk> \<Longrightarrow> last ys \<in> set ys
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>ys y. \<lbrakk>[y] \<in> set (enumerator m nmax); Suc 0 < nmax; ls = [y]; ys = []\<rbrakk> \<Longrightarrow> False
2. \<And>ys y ysa ya. \<lbrakk>ys @ [y] \<in> set (enumerator m nmax); Suc 0 < nmax; ls = ys @ [y]; ys = ysa @ [ya]\<rbrakk> \<Longrightarrow> last ys \<in> set ys
[PROOF STEP]
apply (subgoal_tac "2 \<le> length (ys @ [y])")
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>ys y. \<lbrakk>[y] \<in> set (enumerator m nmax); Suc 0 < nmax; ls = [y]; ys = []; 2 \<le> |ys @ [y]|\<rbrakk> \<Longrightarrow> False
2. \<And>ys y. \<lbrakk>[y] \<in> set (enumerator m nmax); Suc 0 < nmax; ls = [y]; ys = []\<rbrakk> \<Longrightarrow> 2 \<le> |ys @ [y]|
3. \<And>ys y ysa ya. \<lbrakk>ys @ [y] \<in> set (enumerator m nmax); Suc 0 < nmax; ls = ys @ [y]; ys = ysa @ [ya]\<rbrakk> \<Longrightarrow> last ys \<in> set ys
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>ys y. \<lbrakk>[y] \<in> set (enumerator m nmax); Suc 0 < nmax; ls = [y]; ys = []\<rbrakk> \<Longrightarrow> 2 \<le> |ys @ [y]|
2. \<And>ys y ysa ya. \<lbrakk>ys @ [y] \<in> set (enumerator m nmax); Suc 0 < nmax; ls = ys @ [y]; ys = ysa @ [ya]\<rbrakk> \<Longrightarrow> last ys \<in> set ys
[PROOF STEP]
apply (rule_tac enumerator_length)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>ys y. \<lbrakk>[y] \<in> set (enumerator m nmax); Suc 0 < nmax; ls = [y]; ys = []\<rbrakk> \<Longrightarrow> ys @ [y] \<in> set (enumerator (?m11 ys y) (?n11 ys y))
2. \<And>ys y ysa ya. \<lbrakk>ys @ [y] \<in> set (enumerator m nmax); Suc 0 < nmax; ls = ys @ [y]; ys = ysa @ [ya]\<rbrakk> \<Longrightarrow> last ys \<in> set ys
[PROOF STEP]
by auto
|
module Idris.IDEMode.Commands
import Core.Core
import Core.Context
import Core.Context.Log
import Core.Name
import public Idris.REPL.Opts
import Protocol.Hex
import System.File
import public Protocol.IDE
import public Protocol.SExp
%default total
export
Cast (FileName, NonEmptyFC) FileContext where
cast (filename, _, (startLine, startCol), (endLine, endCol)) =
MkFileContext
{ file = filename
, range = MkBounds {startLine, startCol, endLine, endCol}
}
export
getMsg : SExp -> Maybe (IDECommand, Integer)
getMsg (SExpList [cmdexp, IntegerAtom num])
= do cmd <- fromSExp cmdexp
pure (cmd, num)
getMsg _ = Nothing
export
SExpable Name where
toSExp = SymbolAtom . show
sendStr : File -> String -> IO ()
sendStr f st =
map (const ()) (fPutStr f st)
export
send : {auto c : Ref Ctxt Defs} -> File -> Reply -> Core ()
send f resp
= do let r = show (toSExp resp) ++ "\n"
log "ide-mode.send" 20 r
coreLift $ sendStr f $ leftPad '0' 6 (asHex (cast (length r)))
coreLift $ sendStr f r
coreLift $ fflush f
|
function bnet = mk_alarm_bnet()
% Written by Qian Diao <[email protected]> on 11 Dec 01
N = 37;
dag = zeros(N,N);
dag(21,23) = 1 ;
dag(21,24) = 1 ;
dag(1,24) = 1 ;
dag(1,23) = 1 ;
dag(2,26) = 1 ;
dag(2,25) = 1 ;
dag(2,24) = 1 ;
dag(2,13) = 1 ;
dag(2,23) = 1 ;
dag(13,30) = 1 ;
dag(30,31) = 1 ;
dag(3,14) = 1 ;
dag(3,19) = 1 ;
dag(4,36) = 1 ;
dag(14,35) = 1 ;
dag(32,33) = 1 ;
dag(32,35) = 1 ;
dag(32,34) = 1 ;
dag(32,36) = 1 ;
dag(15,21) = 1 ;
dag(5,31) = 1 ;
dag(27,30) = 1 ;
dag(28,31) = 1 ;
dag(28,29) = 1 ;
dag(26,28) = 1 ;
dag(26,27) = 1 ;
dag(16,31) = 1 ;
dag(16,37) = 1 ;
dag(23,26) = 1 ;
dag(23,29) = 1 ;
dag(23,25) = 1 ;
dag(6,15) = 1 ;
dag(7,27) = 1 ;
dag(8,21) = 1 ;
dag(19,20) = 1 ;
dag(19,22) = 1 ;
dag(31,32) = 1 ;
dag(9,14) = 1 ;
dag(9,17) = 1 ;
dag(9,19) = 1 ;
dag(10,33) = 1 ;
dag(10,34) = 1 ;
dag(11,16) = 1 ;
dag(12,13) = 1 ;
dag(12,18) = 1 ;
dag(35,37) = 1 ;
node_sizes = 2*ones(1,N);
node_sizes(2) = 3;
node_sizes(6) = 3;
node_sizes(14) = 3;
node_sizes(15) = 4;
node_sizes(16) = 3;
node_sizes(18) = 3;
node_sizes(19) = 3;
node_sizes(20) = 3;
node_sizes(21) = 4;
node_sizes(22) = 3;
node_sizes(23) = 4;
node_sizes(24) = 4;
node_sizes(25) = 4;
node_sizes(26) = 4;
node_sizes(27) = 3;
node_sizes(28) = 3;
node_sizes(29) = 4;
node_sizes(30) = 3;
node_sizes(32) = 3;
node_sizes(33) = 3;
node_sizes(34) = 3;
node_sizes(35) = 3;
node_sizes(36) = 3;
node_sizes(37) = 3;
bnet = mk_bnet(dag, node_sizes);
bnet.CPD{1} = tabular_CPD(bnet, 1,[0.96 0.04 ]);
bnet.CPD{2} = tabular_CPD(bnet, 2,[0.92 0.03 0.05 ]);
bnet.CPD{3} = tabular_CPD(bnet, 3,[0.8 0.2 ]);
bnet.CPD{4} = tabular_CPD(bnet, 4,[0.95 0.05 ]);
bnet.CPD{5} = tabular_CPD(bnet, 5,[0.8 0.2 ]);
bnet.CPD{6} = tabular_CPD(bnet, 6,[0.01 0.98 0.01 ]);
bnet.CPD{7} = tabular_CPD(bnet, 7,[0.01 0.99 ]);
bnet.CPD{8} = tabular_CPD(bnet, 8,[0.95 0.05 ]);
bnet.CPD{9} = tabular_CPD(bnet, 9,[0.95 0.05 ]);
bnet.CPD{10} = tabular_CPD(bnet, 10,[0.9 0.1 ]);
bnet.CPD{11} = tabular_CPD(bnet, 11,[0.99 0.01 ]);
bnet.CPD{12} = tabular_CPD(bnet, 12,[0.99 0.01 ]);
bnet.CPD{13} = tabular_CPD(bnet, 13,[0.95 0.95 0.05 0.1 0.1 0.01 0.05 0.05 0.95 0.9 0.9 0.99 ]);
bnet.CPD{14} = tabular_CPD(bnet, 14,[0.05 0.95 0.5 0.98 0.9 0.04 0.49 0.01 0.05 0.01 0.01 0.01 ]);
bnet.CPD{15} = tabular_CPD(bnet, 15,[0.01 0.01 0.01 0.97 0.01 0.01 0.01 0.97 0.01 0.01 0.01 0.97 ]);
bnet.CPD{16} = tabular_CPD(bnet, 16,[0.3 0.98 0.4 0.01 0.3 0.01 ]);
bnet.CPD{17} = tabular_CPD(bnet, 17,[0.99 0.1 0.01 0.9 ]);
bnet.CPD{18} = tabular_CPD(bnet, 18,[0.05 0.01 0.9 0.19 0.05 0.8 ]);
bnet.CPD{19} = tabular_CPD(bnet, 19,[0.05 0.98 0.01 0.95 0.9 0.01 0.09 0.04 0.05 0.01 0.9 0.01 ]);
bnet.CPD{20} = tabular_CPD(bnet, 20,[0.95 0.04 0.01 0.04 0.95 0.29 0.01 0.01 0.7 ]);
bnet.CPD{21} = tabular_CPD(bnet, 21,[0.97 0.97 0.01 0.97 0.01 0.97 0.01 0.97 0.01 0.01 0.97 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.01 ]);
bnet.CPD{22} = tabular_CPD(bnet, 22,[0.95 0.04 0.01 0.04 0.95 0.04 0.01 0.01 0.95 ]);
bnet.CPD{23} = tabular_CPD(bnet, 23,[0.97 0.97 0.97 0.97 0.97 0.97 0.01 0.95 0.97 0.97 0.01 0.95 0.01 0.4 0.97 0.97 0.01 0.5 0.01 0.3 0.97 0.97 0.01 0.3 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.03 0.01 0.01 0.97 0.03 0.01 0.58 0.01 0.01 0.01 0.48 0.01 0.68 0.01 0.01 0.01 0.68 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.01 0.01 0.01 0.97 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.01 0.01 0.01 0.97 0.01 ]);
bnet.CPD{24} = tabular_CPD(bnet, 24,[0.97 0.97 0.97 0.97 0.97 0.97 0.01 0.01 0.4 0.1 0.01 0.01 0.01 0.01 0.2 0.05 0.01 0.01 0.01 0.01 0.2 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.49 0.58 0.84 0.9 0.29 0.01 0.01 0.75 0.25 0.01 0.01 0.01 0.01 0.7 0.15 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.3 0.01 0.05 0.08 0.3 0.97 0.08 0.04 0.25 0.38 0.08 0.01 0.01 0.09 0.25 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.2 0.01 0.01 0.01 0.4 0.01 0.9 0.01 0.45 0.6 0.9 0.97 0.97 0.01 0.59 0.97 0.97 ]);
bnet.CPD{25} = tabular_CPD(bnet, 25,[0.97 0.97 0.97 0.01 0.6 0.01 0.01 0.5 0.01 0.01 0.5 0.01 0.01 0.01 0.01 0.97 0.38 0.97 0.01 0.48 0.01 0.01 0.48 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.01 0.97 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.01 0.97 ]);
bnet.CPD{26} = tabular_CPD(bnet, 26,[0.97 0.97 0.97 0.01 0.01 0.03 0.01 0.01 0.01 0.97 0.01 0.01 0.01 0.01 0.01 0.97 0.97 0.95 0.01 0.01 0.94 0.01 0.01 0.88 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.97 0.04 0.01 0.01 0.1 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.97 0.01 ]);
bnet.CPD{27} = tabular_CPD(bnet, 27,[0.98 0.98 0.98 0.98 0.95 0.01 0.95 0.01 0.01 0.01 0.01 0.01 0.04 0.95 0.04 0.01 0.01 0.01 0.01 0.01 0.01 0.04 0.01 0.98 ]);
bnet.CPD{28} = tabular_CPD(bnet, 28,[0.01 0.01 0.04 0.9 0.01 0.01 0.92 0.09 0.98 0.98 0.04 0.01 ]);
bnet.CPD{29} = tabular_CPD(bnet, 29,[0.97 0.01 0.01 0.01 0.97 0.01 0.01 0.01 0.97 0.01 0.01 0.01 0.01 0.97 0.97 0.97 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.97 0.97 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.97 0.43 ]);
bnet.CPD{30} = tabular_CPD(bnet, 30,[0.98 0.98 0.01 0.98 0.01 0.69 0.01 0.01 0.98 0.01 0.01 0.3 0.01 0.01 0.01 0.01 0.98 0.01 ]);
bnet.CPD{31} = tabular_CPD(bnet, 31,[0.05 0.01 0.05 0.01 0.05 0.01 0.05 0.01 0.05 0.01 0.05 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.1 0.01 0.95 0.01 0.95 0.05 0.1 0.01 0.95 0.01 0.95 0.05 0.1 0.01 0.3 0.01 0.3 0.01 0.95 0.01 0.99 0.05 0.95 0.05 0.95 0.01 0.99 0.05 0.99 0.05 0.3 0.01 0.99 0.01 0.3 0.01 0.95 0.99 0.95 0.99 0.95 0.99 0.95 0.99 0.95 0.99 0.95 0.99 0.99 0.99 0.99 0.99 0.99 0.99 0.9 0.99 0.05 0.99 0.05 0.95 0.9 0.99 0.05 0.99 0.05 0.95 0.9 0.99 0.7 0.99 0.7 0.99 0.05 0.99 0.00999999 0.95 0.05 0.95 0.05 0.99 0.01 0.95 0.01 0.95 0.7 0.99 0.01 0.99 0.7 0.99 ]);
bnet.CPD{32} = tabular_CPD(bnet, 32,[0.1 0.01 0.89 0.09 0.01 0.9 ]);
bnet.CPD{33} = tabular_CPD(bnet, 33,[0.98 0.33333334 0.01 0.33333334 0.01 0.33333334 0.01 0.33333334 0.98 0.33333334 0.01 0.33333334 0.01 0.33333334 0.01 0.33333334 0.98 0.33333334 ]);
bnet.CPD{34} = tabular_CPD(bnet, 34,[0.98 0.33333334 0.01 0.33333334 0.01 0.33333334 0.01 0.33333334 0.98 0.33333334 0.01 0.33333334 0.01 0.33333334 0.01 0.33333334 0.98 0.33333334 ]);
bnet.CPD{35} = tabular_CPD(bnet, 35,[0.98 0.95 0.3 0.95 0.04 0.01 0.8 0.01 0.01 0.01 0.04 0.69 0.04 0.95 0.3 0.19 0.04 0.01 0.01 0.01 0.01 0.01 0.01 0.69 0.01 0.95 0.98 ]);
bnet.CPD{36} = tabular_CPD(bnet, 36,[0.98 0.98 0.01 0.4 0.01 0.3 0.01 0.01 0.98 0.59 0.01 0.4 0.01 0.01 0.01 0.01 0.98 0.3 ]);
bnet.CPD{37} = tabular_CPD(bnet, 37,[0.98 0.98 0.3 0.98 0.1 0.05 0.9 0.05 0.01 0.01 0.01 0.6 0.01 0.85 0.4 0.09 0.2 0.09 0.01 0.01 0.1 0.01 0.05 0.55 0.01 0.75 0.9 ]);
|
function amd_install
%AMD_INSTALL compile and install amd2 for use in MATLAB
% Your current directory must be AMD/MATLAB for this function to work.
%
% Example:
% amd_install
%
% See also amd, amd2.
% Copyright 1994-2007, Tim Davis, Patrick R. Amestoy, and Iain S. Duff.
% This orders the same matrix as the ANSI C demo, amd_demo.c. It includes an
amd_make
addpath (pwd)
fprintf ('\nThe following path has been added. You may wish to add it\n') ;
fprintf ('permanently, using the MATLAB pathtool command.\n') ;
fprintf ('%s\n\n', pwd) ;
amd_demo
|
[STATEMENT]
lemma singular_subdivision_add:
"singular_subdivision p (a + b) = singular_subdivision p a + singular_subdivision p b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. singular_subdivision p (a + b) = singular_subdivision p a + singular_subdivision p b
[PROOF STEP]
by (simp add: singular_subdivision_def frag_extend_add)
|
(* Title: HOL/Auth/n_german_lemma_on_inv__6.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_german Protocol Case Study*}
theory n_german_lemma_on_inv__6 imports n_german_base
begin
section{*All lemmas on causal relation between inv__6 and some rule r*}
lemma n_SendInvAckVsinv__6:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvAck i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvInvAckVsinv__6:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvInvAck i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))"
have "?P3 s"
apply (cut_tac a1 a2 b1 c1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv4) ''Cmd'')) (Const InvAck)) (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv4) ''State'')) (Const E))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))"
have "?P2 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately have "invHoldForRule s f r (invariants N)" by satx
}
moreover {
assume b1: "(i~=p__Inv4)"
have "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))"
have "?P3 s"
apply (cut_tac a1 a2 b1 c1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan3'') i) ''Cmd'')) (Const InvAck)) (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv4) ''State'')) (Const E))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))"
have "?P2 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately have "invHoldForRule s f r (invariants N)" by satx
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntEVsinv__6:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntE N i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntSVsinv__6:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntS i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntEVsinv__6:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntE i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__6 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Ident ''ExGntd'')) (Const false)) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv4) ''Cmd'')) (Const GntE))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_StoreVsinv__6:
assumes a1: "\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqESVsinv__6:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqES i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendGntSVsinv__6:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendGntS i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendInvSVsinv__6:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInvS i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendInvEVsinv__6:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInvE i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqVsinv__6:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReq N i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqSVsinv__6:
assumes a1: "\<exists> j. j\<le>N\<and>r=n_SendReqS j" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqEIVsinv__6:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqEI i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__6 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj32synthconj4 : forall (lv0 : natural) (lv1 : natural), (@eq natural (plus (plus (mult lv0 lv1) lv1) (Succ lv0)) (plus lv1 (Succ (mult (Succ lv1) lv0)))).
Admitted.
QuickChick conj32synthconj4.
|
module Data.Real.Base where
open import Data.Float renaming (Float to ℝ) public
|
#include <NTL/GF2E.h>
#include <NTL/new.h>
NTL_START_IMPL
GF2EInfoT::GF2EInfoT(const GF2X& NewP)
{
ref_count = 1;
build(p, NewP);
if (p.size == 1) {
if (deg(p) <= NTL_BITS_PER_LONG/2)
KarCross = 4;
else
KarCross = 8;
}
else if (p.size == 2)
KarCross = 8;
else if (p.size <= 5)
KarCross = 4;
else if (p.size == 6)
KarCross = 3;
else
KarCross = 2;
if (p.size <= 1) {
if (deg(p) <= NTL_BITS_PER_LONG/2)
ModCross = 20;
else
ModCross = 40;
}
else if (p.size <= 2)
ModCross = 75;
else if (p.size <= 4)
ModCross = 50;
else
ModCross = 25;
if (p.size == 1) {
if (deg(p) <= NTL_BITS_PER_LONG/2)
DivCross = 100;
else
DivCross = 200;
}
else if (p.size == 2)
DivCross = 400;
else if (p.size <= 4)
DivCross = 200;
else if (p.size == 5)
DivCross = 150;
else if (p.size <= 13)
DivCross = 100;
else
DivCross = 75;
_card_init = 0;
_card_exp = p.n;
}
const ZZ& GF2E::cardinality()
{
if (!GF2EInfo) Error("GF2E::cardinality: undefined modulus");
if (!GF2EInfo->_card_init) {
power(GF2EInfo->_card, 2, GF2EInfo->_card_exp);
GF2EInfo->_card_init = 1;
}
return GF2EInfo->_card;
}
GF2EInfoT *GF2EInfo = 0;
typedef GF2EInfoT *GF2EInfoPtr;
static
void CopyPointer(GF2EInfoPtr& dst, GF2EInfoPtr src)
{
if (src == dst) return;
if (dst) {
dst->ref_count--;
if (dst->ref_count < 0)
Error("internal error: negative GF2EContext ref_count");
if (dst->ref_count == 0) delete dst;
}
if (src) {
if (src->ref_count == NTL_MAX_LONG)
Error("internal error: GF2EContext ref_count overflow");
src->ref_count++;
}
dst = src;
}
void GF2E::init(const GF2X& p)
{
GF2EContext c(p);
c.restore();
}
GF2EContext::GF2EContext(const GF2X& p)
{
ptr = NTL_NEW_OP GF2EInfoT(p);
}
GF2EContext::GF2EContext(const GF2EContext& a)
{
ptr = 0;
CopyPointer(ptr, a.ptr);
}
GF2EContext& GF2EContext::operator=(const GF2EContext& a)
{
CopyPointer(ptr, a.ptr);
return *this;
}
GF2EContext::~GF2EContext()
{
CopyPointer(ptr, 0);
}
void GF2EContext::save()
{
CopyPointer(ptr, GF2EInfo);
}
void GF2EContext::restore() const
{
CopyPointer(GF2EInfo, ptr);
}
GF2EBak::~GF2EBak()
{
if (MustRestore)
CopyPointer(GF2EInfo, ptr);
CopyPointer(ptr, 0);
}
void GF2EBak::save()
{
MustRestore = 1;
CopyPointer(ptr, GF2EInfo);
}
void GF2EBak::restore()
{
MustRestore = 0;
CopyPointer(GF2EInfo, ptr);
}
const GF2E& GF2E::zero()
{
static GF2E z(GF2E_NoAlloc);
return z;
}
istream& operator>>(istream& s, GF2E& x)
{
GF2X y;
s >> y;
conv(x, y);
return s;
}
void div(GF2E& x, const GF2E& a, const GF2E& b)
{
GF2E t;
inv(t, b);
mul(x, a, t);
}
void div(GF2E& x, GF2 a, const GF2E& b)
{
inv(x, b);
mul(x, x, a);
}
void div(GF2E& x, long a, const GF2E& b)
{
inv(x, b);
mul(x, x, a);
}
void inv(GF2E& x, const GF2E& a)
{
InvMod(x._GF2E__rep, a._GF2E__rep, GF2E::modulus());
}
NTL_END_IMPL
|
context("Test Ship Power Calculation for propeller law")
test_that("calcPropPwr works for shipSpeed == serviceSpeed",{
expect_equal( calcPropPwr(shipSpeed=10, refSpeed=10, totalInstalledPwr=100, n=3, serviceMargin=0),
100*0.94^3
)
})
test_that("calcPropPwr works for shipSpeed == maxSpeed",{
expect_equal( calcPropPwr(shipSpeed=10, refSpeed=10, totalInstalledPwr=100, n=3, serviceMargin=0, refSpeedType="maxSpeed"),
100
)
})
test_that("serviceMargin has the intended effect",{
expect_equal( calcPropPwr(shipSpeed=9, refSpeed=10, totalInstalledPwr=100, n=3, serviceMargin=15, refSpeedType="maxSpeed"),
100*(0.9^3)*1.15
)
})
test_that("maximum power output is 100% load",{
expect_equal( calcPropPwr(shipSpeed=20, refSpeed=10, totalInstalledPwr=100, n=3, serviceMargin=15),
100
)
})
test_that("minimum non-zero power is 2% load",{
expect_equal( calcPropPwr(shipSpeed=0.2, refSpeed=10, totalInstalledPwr=100, n=3, serviceMargin=0),
2
)
})
test_that("changing speed generates the correct power",{
shipSpeed <- 5
refSpeed <- 10
totalInstalledPwr <- 100
n <- 3
serviceMargin <- 0
expect_equal( calcPropPwr(totalInstalledPwr, shipSpeed, refSpeed, serviceMargin, n),
totalInstalledPwr*(0.94*shipSpeed/refSpeed)^n
)
})
|
State Before: α : Type u_1
inst✝ : LinearOrderedAddCommGroup α
hα : Archimedean α
p : α
hp : 0 < p
a✝ b✝ c : α
n : ℤ
a b : α
⊢ toIcoMod hp a b - b = -toIcoDiv hp a b • p State After: no goals Tactic: rw [toIcoMod, sub_sub_cancel_left, neg_smul]
|
import Data.Vect
-- `minus` is saturating subtraction, so this works like we want it to
eq_max : (n, k : Nat) -> maximum k n = plus (n `minus` k) k
eq_max n Z = rewrite minusZeroRight n in rewrite plusZeroRightNeutral n in Refl
eq_max Z (S _) = Refl
eq_max (S n) (S k) = rewrite sym $ plusSuccRightSucc (n `minus` k) k in rewrite eq_max n k in Refl
leftPad : (x : a) -> (n : Nat) -> (xs : Vect k a) -> Vect (maximum k n) a
leftPad {k} x n xs = rewrite eq_max n k in replicate (n `minus` k) x ++ xs
leftPadProp : {xs : Vect k a} -> (m : Nat ** leftPad x n xs = {- rewrite somehow? `the Type (rewrite xxx in replicate ...)`? -} replicate m x ++ xs)
leftPadProp {n} {k} = (n `minus` k ** ?x)
|
using ArgParse
using Compat
import Compat: UTF8String, ASCIIString, view, readstring, foreach
include("CLI.jl")
include("commands.jl")
function main()
parsed_args = parse_args(build_arg_table())
command = parsed_args["%COMMAND%"]
println(typeof(parsed_args[command]["N"]))
if command == "help"
foreach(x -> println(x), help())
elseif command == "train_model"
train_model(parsed_args[command]["N"],
parsed_args[command]["a"],
parsed_args[command]["hl_size"],
parsed_args[command]["w"],
parsed_args[command]["b"],
parsed_args[command]["C"],
parsed_args[command]["pos_seq"],
parsed_args[command]["total_seq"],
parsed_args[command]["output_file"]
)
elseif command == "predict"
predict(parsed_args[command]["N"],
parsed_args[command]["a"],
parsed_args[command]["hl_size"],
parsed_args[command]["w"],
parsed_args[command]["b"],
parsed_args[command]["C"],
parsed_args[command]["pos_seq"],
parsed_args[command]["total_seq"],
parsed_args[command]["predict_seq"],
parsed_args[command]["output_file"]
)
elseif command == "simple_encoder"
simple_encoder(parsed_args[command]["N"],
parsed_args[command]["a"],
parsed_args[command]["output_file"])
else
println("Command not recognized")
end
end
# fire up simulation if run using command line
if !isinteractive()
main()
end
|
\section{Methods}\label{sec:methods}
%describe the methods, the data, the assumptions, the scenarios, a brief few
%sentences on Temoa itself.
This work collected data from multiple sources to populate a model of the
Illinois electric grid, including existing generation capacities, potential
generation technologies, the costs and wastes associated with each, and the
electricity demand profile. This simulated representation of the state of
Illinois relies on the Temoa framework, an open source tool built by
researchers at \gls{NCSU}, which enables energy system optimization and
techno-economic analysis
\cite{decarolis_temoa_2010,decarolis_modelling_2016,decarolis_formalizing_2017}.
The technology models in Temoa representing energy source are configured with
data regarding fundamental techno-economic parameters such as their capacity,
capacity factors, seasonal generation profiles, auxiliary products, waste
generation metrics, and costs (fixed, capital, variable, and otherwise).
The Appendix \ref{sec:appendix} describe the key assumptions about electricity
generation and storage technologies in the Illinois model built for this
report.
\subsection{Optimization Analysis}
This work established optimal solutions to
various scenarios which illuminate the potential impact of nuclear plant
closures and other policy options on the cost of power in Illinois. These
simulations also explore Illinois' ability to meet aggressive proposed carbon
goals with and without maintenance and expansion of nuclear power
capacity.
Assumptions and constraints in these simulated scenarios differentiate them.
Each optimized scenario is the solution to a linear programming
problem comprised of two key components. First, the \emph{objective function}
minimizes the total system cost of the energy grid in the state of
Illinois.
Such an objective function is stated thus:
\begin{align}
\intertext{minimize}
&\sum_{g=1}^G\int_{t=2020}^{t=2050} c_g(t)\\
\intertext{where}
G&=\text{number of generation technologies}&\nonumber\\
x_g(t)&=\text{capacity of technology g in year t }[TW] &\nonumber\\
c_g(t)&=\text{total cost of technology g in year t }\left[\frac{\$}{TW}\right]&\nonumber\\
&= (l_g(t) + f_g(t) + v_g(t)cf_g(t)t)x_g(t)\nonumber\\
l_g(t)&=\text{loan cost of technology g in year t }\left[\frac{\$}{TW}\right]&\nonumber\\
f_g(t)&=\text{fixed cost of technology g in year t }\left[\frac{\$}{TW}\right]&\nonumber\\
v_g(t)&=\text{variable cost of technology g in year t }\left[\frac{\$}{TW-year}\right]&\nonumber\\
cf_g(t)&=\text{capacity factor of technology g in year t }[\%] .&\nonumber
\end{align}
Second, a set of constraints limit the model solutions. In this case, such
constraints include balancing electric supply with electric demand, reducing
carbon to zero by 2030, specifying renewable energy and energy
storage deployment speeds, and limiting land use based on availability.
All begin with the same initial condition which reflects the present energy generation infrastructure in Illinois.
Then, optimization
proceeds by varying all free parameters within the scope of the defined
constraints in order to meet the objective. Ultimately, the simulation solution
gives the energy generation mix, $\vec{x_g}$, for the Illinois electric grid that minimizes system cost.
In this case, Temoa varies the
deployed ratio of generation technologies on the Illinois electric grid, within
the constraints of various policies, to minimize cost. The simulations each
begin in the year 2020 and proceed through 2050. The initial condition in 2020
represents the true 2020 electricity generation mix in the state of Illinois.
\FloatBarrier
\subsection{Data}
% paragraph on data sources
Robust data from a variety of national and regional databases populate the
model of Illinois' electric generation in the Temoa framework.
Primarily, this work relied on federal and international databases from the
Energy Information Administration
\cite{us_energy_information_administration_eia_preliminary_2021,energy_information_administration_state_2020,us_energy_information_administration_eia_electric_2021,us_energy_information_administration_eia_illinois_2020},
the U.S. Geological Survey \cite{hoen_united_2018},
International Energy Agency \cite{lorenczik_projected_2020},
the Nuclear Energy Agency \cite{crozat_full_2018},
the Nuclear Regulatory Commission
\cite{united_states_nuclear_regulatory_commission_illinois_2020},
the Intergovernmental Panel on Climate Change
\cite{intergovernmental_panel_on_climate_change_annex_2014,intergovernmental_panel_on_climate_change_climate_2014,intergovernmental_panel_on_climate_change_climate_2014-1,intergovernmental_panel_on_climate_change_climate_2014-2},
the Interstate Renewable Energy Council
\cite{sherwood_us_2009,sherwood_us_2010,sherwood_us_2011,brown_solid_1996,sherwood_us_2012,sherwood_us_2013,sherwood_us_2014},
the Department of Energy's EERE and NE offices
\cite{us_department_of_energy_capital_2016}, and the National Renewable Energy
Laboratory
\cite{nrel_2020_2020,sengupta_national_2018}.
Industry sources included the World Nuclear Association
\cite{world_nuclear_association_nuclear_2017}
,
the Nuclear Energy Institute
\cite{desai_nuclear_2018,desai_nuclear_2020,murphy_impacts_2019,tessum_air_2019},
Rockland Capital Generation \cite{rockland_capital_natural_2021},
Sargent \& Lundy \cite{sargent__lundy_capital_2020},
Lazard \cite{ray_lazards_2020},
and others
\cite{the_solar_foundation_national_2020,solar_energy_industries_association_illinois_2020,rutovitz_calculating_2015}.
In particular, the costs assumed in the model configuration for each technology
drove the cost optimization. Table \ref{tab:cost_table} shows the cost
assumptions in the models while Table \ref{tab:emissions-data} shows the
emissions assumptions.
\input{cost_table}
% paragraph on reproducibility and location of data/analysis
This work was conducted in the open under a BSD-3 open-source license by the
Advanced Reactors and Fuel Cycles group at the University of Illinois. All
data, models, and assumptions used in his work can all be found and explored at
open source repository at
\url{https://github.com/arfc/2021-04-nm-illinois}.
\FloatBarrier
\subsection{Scenarios Simulated}\label{sec:simulations}
Table \ref{tab:scenarios} describes the scenarios we conducted.
All share the same objective function, which seeks to minimize total system cost.
They are clustered in four major categories. First, the \gls{BAU} cases assume
no carbon limit, while the \gls{CC} cases assume a zero
carbon target in 2030. Comparing these two simulation categories reveals the
potential impact of carbon limits and premature nuclear energy closure on
the minimum achievable cost. These simulations make conservative assumptions
about the cost and availability of advanced nuclear power.
To explore the importance of these assumptions, two additional classes of
simulations were explored. In the \gls{XN} cases, advanced
nuclear reactors are assumed to be twice as expensive to build than the best
conservative estimates. In the \gls{ZN} cases, advanced
nuclear power is not available in time to contribute to carbon reductions in
Illinois before 2050. These scenarios are summarized in Table
\ref{tab:scenarios}.
\input{scenarios-table}
%\begin{figure}
% \centering
%\includegraphics[width=0.3\linewidth]{../data/bau_illinois_input_graphviz/bau_illinois.png}
%\caption{The directed graph, implemented in Temoa, representing the electric grid in Illinois.} \label{fig:temoa_graph}
%\end{figure}
\FloatBarrier
\subsection{Constraints}
Some constraints are shared among all scenarios:
\begin{itemize}
\item The initial conditions reflect the true 2020 energy mix in Illinois.
\item Power supply must meet power demand in each time step.
\item Strategic planning reserve must be greater than 15\% of demand.
\item Technology models are identical across all simulations with the
exception of the capital cost of advanced nuclear, which is
altered for the \gls{XN} scenarios.
\end{itemize}
The simulations diverge due to their differing treatment of constraints related
to the timing of nuclear plant closures, inclusion of
carbon targets, and land-use limits for the growth of renewables.
\subsubsection{Byron and Dresden Closures}
In each family of scenarios, the impact of closing Byron \& Dresden was explored
by assuming one of three assumptions. The two plants either:
\begin{itemize}
\item close prematurely, in 2021,
\item close as scheduled, when their current licenses expire in 20 and 10
years, or
\item receive license extensions and continue operating through 2050.
\end{itemize}
\subsubsection{Other Existing Nuclear}
In each family of scenarios, the other existing nuclear power plants in
Illinois were either:
\begin{itemize}
\item decommissioned as scheduled according to their current
licenses, or
\item awarded license extensions and continue operating through 2050.
\end{itemize}
\subsubsection{Zero Carbon Target}
In the business as usual cases (\gls{BAU}1-3), the simulations were not carbon limited. In all
other simulations, a linear reduction in carbon emissions beginning in 2020 and
reaching zero carbon emissions by 2030. This constrains energy
deployment options in those simulations.
\subsubsection{Renewable Growth Rate}
In the business-as-usual cases, the growth rate for renewable energy is limited by economics,
primarily. In the carbon constrained and expensive nuclear scenarios, an
optimistic growth rate is enabled.
In those cases, utility scale solar is allowed to grow to 10 GWe by 2030,
reflecting the aggressive and optimistic build out proposed in the Illinois
Clean Energy Jobs Act. Similarly, wind turbine deployments grow to 13.8 GWe
by 2030. Finally, residential solar is allowed to
increase at a steady rate, but is capped at 75\% of the technical resource
availability to reflect deployment on 75\% of Illinois buildings
\cite{gagnon_rooftop_2016}.
Without preserving existing nuclear or deploying advanced reactors, the
required land use for solar and wind generation is infeasible, since the
Illinois land appropriate for wind and solar is already in use as vital
farmland. The southern and central regions of Illinois most suitable for solar
power installations are the same regions the nation currently relies on for
15\% of its corn and 14\% of its soybeans \cite{schleusener_illinois_2020}.
\begin{figure}[htbp!]
\begin{center}
\includegraphics[height=0.4\textheight]{solar-suitability.png}\\
\vspace{0.5cm}
\includegraphics[width=\textwidth]{corn-and-soy.png}
\end{center}
\caption{Corn (bottom left) and soybean (bottom right) crops in Illinois lie predominantly in the
same portion of the state corresponding to the
region of highest solar panel suitability (top)
\cite{schleusener_illinois_2020,eispc_energy_2021,sengupta_national_2018}.}
\label{fig:corn-and-soy}
\end{figure}
Specifically, strategies which allow nuclear plants to close before 2050 require 10,000$km^2$ of this land to be dedicated to solar as well as 4\% of Illinois' land area in use for rooftop solar. Keeping the nuclear plants open through 2050 halves this requirement.
The constraints on utility scale wind and solar are lifted. It is not possible to achieve zero carbon without advanced nuclear under the above constraints.
\FloatBarrier
\subsection{Demand Model}
Illinois electricity demand has remained steady at approximately 140.7 TWh per
year for the last decade
\cite{us_energy_information_administration_eia_illinois_2020}. All scenarios
simulated in this report assume that this demand remains steady annually.
If Illinois transportation is fully electrified by 2050, this assumption will
not be valid. However, postulating such growth scenarios is beyond the scope
of this report.
As part of model configuration, the Temoa framework accepts demand profiles
capturing seasonal and daily fluctuations. The typical Illinois hourly demand profile and seasonal variation in hourly demand were both retrieved from the \gls{EIA}
\cite{us_energy_information_administration_eia_illinois_2020}. Figure
\ref{fig:seasonal_hourly_demand} shows the variation in hourly demand.
In our simulations, the demand is seasonally modulated by this information.
\begin{figure}[htbp!]
\begin{center}
\includegraphics[width=0.8\textwidth]{seasonal_hourly_demand.png}
\caption{The seasonal variation in hourly demand in Illinois was retrieved from the \gls{EIA}
\cite{us_energy_information_administration_eia_illinois_2020} and
loaded into Temoa \cite{decarolis_modelling_2016}.}
\label{fig:seasonal_hourly_demand}
\end{center}
\end{figure}
|
// Copyright 2018-2019 Hans Dembinski
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt
// or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_HISTOGRAM_ALGORITHM_REDUCE_HPP
#define BOOST_HISTOGRAM_ALGORITHM_REDUCE_HPP
#include <boost/assert.hpp>
#include <boost/histogram/axis/traits.hpp>
#include <boost/histogram/detail/axes.hpp>
#include <boost/histogram/detail/cat.hpp>
#include <boost/histogram/detail/make_default.hpp>
#include <boost/histogram/detail/static_if.hpp>
#include <boost/histogram/detail/type_name.hpp>
#include <boost/histogram/fwd.hpp>
#include <boost/histogram/indexed.hpp>
#include <boost/histogram/unsafe_access.hpp>
#include <boost/throw_exception.hpp>
#include <cmath>
#include <initializer_list>
#include <stdexcept>
namespace boost {
namespace histogram {
namespace detail {
struct reduce_option {
unsigned iaxis = 0;
bool indices_set = false;
axis::index_type begin = 0, end = 0;
bool values_set = false;
double lower = 0.0, upper = 0.0;
unsigned merge = 0;
};
} // namespace detail
namespace algorithm {
using reduce_option = detail::reduce_option;
/**
Shrink and rebin option to be used in reduce().
To shrink and rebin in one command. Equivalent to passing both the shrink() and the
rebin() option for the same axis to reduce.
@param iaxis which axis to operate on.
@param lower lowest bound that should be kept.
@param upper highest bound that should be kept. If upper is inside bin interval, the
whole interval is removed.
@param merge how many adjacent bins to merge into one.
*/
inline reduce_option shrink_and_rebin(unsigned iaxis, double lower, double upper,
unsigned merge) {
if (lower == upper)
BOOST_THROW_EXCEPTION(std::invalid_argument("lower != upper required"));
if (merge == 0) BOOST_THROW_EXCEPTION(std::invalid_argument("merge > 0 required"));
return {iaxis, false, 0, 0, true, lower, upper, merge};
}
/**
Slice and rebin option to be used in reduce().
To slice and rebin in one command. Equivalent to passing both the slice() and the
rebin() option for the same axis to reduce.
@param iaxis which axis to operate on.
@param begin first index that should be kept.
@param end one past the last index that should be kept.
@param merge how many adjacent bins to merge into one.
*/
inline reduce_option slice_and_rebin(unsigned iaxis, axis::index_type begin,
axis::index_type end, unsigned merge) {
if (!(begin < end))
BOOST_THROW_EXCEPTION(std::invalid_argument("begin < end required"));
if (merge == 0) BOOST_THROW_EXCEPTION(std::invalid_argument("merge > 0 required"));
return {iaxis, true, begin, end, false, 0.0, 0.0, merge};
}
/**
Shrink option to be used in reduce().
@param iaxis which axis to operate on.
@param lower lowest bound that should be kept.
@param upper highest bound that should be kept. If upper is inside bin interval, the
whole interval is removed.
*/
inline reduce_option shrink(unsigned iaxis, double lower, double upper) {
return shrink_and_rebin(iaxis, lower, upper, 1);
}
/**
Slice option to be used in reduce().
@param iaxis which axis to operate on.
@param begin first index that should be kept.
@param end one past the last index that should be kept.
*/
inline reduce_option slice(unsigned iaxis, axis::index_type begin, axis::index_type end) {
return slice_and_rebin(iaxis, begin, end, 1);
}
/**
Rebin option to be used in reduce().
@param iaxis which axis to operate on.
@param merge how many adjacent bins to merge into one.
*/
inline reduce_option rebin(unsigned iaxis, unsigned merge) {
if (merge == 0) BOOST_THROW_EXCEPTION(std::invalid_argument("merge > 0 required"));
return reduce_option{iaxis, false, 0, 0, false, 0.0, 0.0, merge};
}
/**
Shrink and rebin option to be used in reduce() (onvenience overload for
single axis).
@param lower lowest bound that should be kept.
@param upper highest bound that should be kept. If upper is inside bin interval, the
whole interval is removed.
@param merge how many adjacent bins to merge into one.
*/
inline reduce_option shrink_and_rebin(double lower, double upper, unsigned merge) {
return shrink_and_rebin(0, lower, upper, merge);
}
/**
Slice and rebin option to be used in reduce() (convenience for 1D histograms).
@param begin first index that should be kept.
@param end one past the last index that should be kept.
@param merge how many adjacent bins to merge into one.
*/
inline reduce_option slice_and_rebin(axis::index_type begin, axis::index_type end,
unsigned merge) {
return slice_and_rebin(0, begin, end, merge);
}
/**
Shrink option to be used in reduce() (convenience for 1D histograms).
@param lower lowest bound that should be kept.
@param upper highest bound that should be kept. If upper is inside bin interval, the
whole interval is removed.
*/
inline reduce_option shrink(double lower, double upper) {
return shrink(0, lower, upper);
}
/**
Slice option to be used in reduce() (convenience for 1D histograms).
@param begin first index that should be kept.
@param end one past the last index that should be kept.
*/
inline reduce_option slice(axis::index_type begin, axis::index_type end) {
return slice(0, begin, end);
}
/**
Rebin option to be used in reduce() (convenience for 1D histograms).
@param merge how many adjacent bins to merge into one.
*/
inline reduce_option rebin(unsigned merge) { return rebin(0, merge); }
/**
Shrink, slice, and/or rebin axes of a histogram.
Returns the reduced copy of the histogram.
Shrinking only works with axes that accept double values. Some axis types do not support
the reduce operation, for example, the builtin category axis, which is not ordered.
Custom axis types must implement a special constructor (see concepts) to be reducible.
@param hist original histogram.
@param options iterable sequence of reduce options, generated by shrink_and_rebin(),
slice_and_rebin(), shrink(), slice(), and rebin().
*/
template <class Histogram, class Iterable, class = detail::requires_iterable<Iterable>>
decltype(auto) reduce(const Histogram& hist, const Iterable& options) {
const auto& old_axes = unsafe_access::axes(hist);
auto opts = detail::make_stack_buffer<reduce_option>(old_axes);
for (const reduce_option& o_in : options) {
BOOST_ASSERT(o_in.merge > 0);
if (o_in.iaxis >= hist.rank())
BOOST_THROW_EXCEPTION(std::invalid_argument("invalid axis index"));
reduce_option& o_out = opts[o_in.iaxis];
if (o_out.merge > 0) {
// some option was already set for this axis, see if we can merge requests
if (o_in.merge > 1 && o_out.merge > 1)
BOOST_THROW_EXCEPTION(std::invalid_argument("conflicting merge requests"));
if ((o_in.indices_set || o_in.values_set) &&
(o_out.indices_set || o_out.values_set))
BOOST_THROW_EXCEPTION(
std::invalid_argument("conflicting slice or shrink requests"));
}
if (o_in.values_set) {
o_out.values_set = true;
o_out.lower = o_in.lower;
o_out.upper = o_in.upper;
} else if (o_in.indices_set) {
o_out.indices_set = true;
o_out.begin = o_in.begin;
o_out.end = o_in.end;
}
o_out.merge = std::max(o_in.merge, o_out.merge);
}
// make new axes container with default-constructed axis instances
auto axes = detail::make_default(old_axes);
detail::static_if<detail::is_tuple<decltype(axes)>>(
[](auto&, const auto&) {},
[](auto& axes, const auto& old_axes) {
axes.reserve(old_axes.size());
detail::for_each_axis(old_axes, [&axes](const auto& a) {
axes.emplace_back(detail::make_default(a));
});
},
axes, old_axes);
// override default-constructed axis instances with modified instances
unsigned iaxis = 0;
hist.for_each_axis([&](const auto& a) {
using A = std::decay_t<decltype(a)>;
auto& o = opts[iaxis];
if (o.merge > 0) { // option is set?
detail::static_if_c<axis::traits::is_reducible<A>::value>(
[&o](auto&& aout, const auto& ain) {
using A = std::decay_t<decltype(ain)>;
if (o.indices_set) {
o.begin = std::max(0, o.begin);
o.end = std::min(o.end, ain.size());
} else {
o.begin = 0;
o.end = ain.size();
if (o.values_set) {
if (o.lower < o.upper) {
while (o.begin != o.end && ain.value(o.begin) < o.lower) ++o.begin;
while (o.end != o.begin && ain.value(o.end - 1) >= o.upper) --o.end;
} else if (o.lower > o.upper) {
// for inverted axis::regular
while (o.begin != o.end && ain.value(o.begin) > o.lower) ++o.begin;
while (o.end != o.begin && ain.value(o.end - 1) <= o.upper) --o.end;
}
}
}
o.end -= (o.end - o.begin) % o.merge;
aout = A(ain, o.begin, o.end, o.merge);
},
[](auto&&, const auto& ain) {
using A = std::decay_t<decltype(ain)>;
BOOST_THROW_EXCEPTION(std::invalid_argument(
detail::cat(detail::type_name<A>(), " is not reducible")));
},
axis::get<A>(detail::axis_get(axes, iaxis)), a);
} else {
o.merge = 1;
o.begin = 0;
o.end = a.size();
axis::get<A>(detail::axis_get(axes, iaxis)) = a;
}
++iaxis;
});
auto storage = detail::make_default(unsafe_access::storage(hist));
auto result = Histogram(std::move(axes), std::move(storage));
auto idx = detail::make_stack_buffer<int>(unsafe_access::axes(result));
for (auto&& x : indexed(hist, coverage::all)) {
auto i = idx.begin();
auto o = opts.begin();
for (auto j : x.indices()) {
*i = (j - o->begin);
if (*i <= -1)
*i = -1;
else {
*i /= o->merge;
const int end = (o->end - o->begin) / o->merge;
if (*i > end) *i = end;
}
++i;
++o;
}
result.at(idx) += *x;
}
return result;
}
/**
Shrink, slice, and/or rebin axes of a histogram.
Returns the reduced copy of the histogram.
Shrinking only works with axes that accept double values. Some axis types do not support
the reduce operation, for example, the builtin category axis, which is not ordered.
Custom axis types must implement a special constructor (see concepts) to be reducible.
@param hist original histogram.
@param opt reduce option generated by shrink_and_rebin(), shrink(), and rebin().
@param opts more reduce options.
*/
template <class Histogram, class... Ts>
decltype(auto) reduce(const Histogram& hist, const reduce_option& opt,
const Ts&... opts) {
// this must be in one line, because any of the ts could be a temporary
return reduce(hist, std::initializer_list<reduce_option>{opt, opts...});
}
} // namespace algorithm
} // namespace histogram
} // namespace boost
#endif
|
[STATEMENT]
lemma subset_remdups': "set (remdups' f xs) \<subseteq> set xs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set (remdups' f xs) \<subseteq> set xs
[PROOF STEP]
by (induct xs) (auto split: option.splits)
|
lemma tendsto_sgn [tendsto_intros]: "(f \<longlongrightarrow> l) F \<Longrightarrow> l \<noteq> 0 \<Longrightarrow> ((\<lambda>x. sgn (f x)) \<longlongrightarrow> sgn l) F" for l :: "'a::real_normed_vector"
|
[STATEMENT]
lemma wordintervalt_eq_symp:
"symp wordinterval_eq"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. symp wordinterval_eq
[PROOF STEP]
apply(rule sympI)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a b. wordinterval_eq a b \<Longrightarrow> wordinterval_eq b a
[PROOF STEP]
by(simp add: wordinterval_eq_comm)
|
import numpy as np
import pandas as pd
from pathlib import Path
import datatable as dt
from sklearn.metrics import pairwise_distances
from os.path import basename
def count_marker_positive_neighborhood(path, marker, cutoff, distance=30):
# Generata a list with all the files on the folder
files = Path(path).rglob("*.csv")
# Iterate over all files
di = dict()
for f in files:
# Read in data using datatable
data = dt.fread(f, columns={marker:marker, "X_centroid":"X", "Y_centroid":"Y", ... : None}).to_pandas()
# Call marker positive cells by applying the cutoff value
data = data[data[marker] >= cutoff]
# If less than 2 cells are present a distance can't be calculated
if len(data)>1:
# Calculate pairwise distances between cells
dists = pairwise_distances(data[["X", "Y"]], metric="euclidean", n_jobs=-1)
# Get marker positive cells in neighborhood
neighborhood = np.where(dists <= distance, dists, 0)
# Count cells in neighborhood
neighborhood = np.count_nonzero(neighborhood, axis=0)
# Get average number of marker positive cells in neighborhood
neighborhood = neighborhood.mean()
else:
neighborhood = np.nan
# Aggregate the results together
di[basename(f)] = neighborhood
# Convert dictionary to Series
neighborhood = pd.Series(di)
return neighborhood
|
{-# OPTIONS --allow-unsolved-metas #-}
------------------------------------------------------------------------------
------------------------------------------------------------------------------
-- CS410 2017/18 Exercise 1 VECTORS AND FRIENDS (worth 25%)
------------------------------------------------------------------------------
------------------------------------------------------------------------------
-- NOTE (19/9/17) This file is currently incomplete: more will arrive on
-- GitHub.
-- MARK SCHEME (transcribed from paper): the (m) numbers add up to slightly
-- more than 25, so should be taken as the maximum number of marks losable on
-- the exercise. In fact, I did mark it negatively, but mostly because it was
-- done so well (with Agda's help) that it was easier to find the errors.
------------------------------------------------------------------------------
-- Dependencies
------------------------------------------------------------------------------
open import CS410-Prelude
------------------------------------------------------------------------------
-- Vectors
------------------------------------------------------------------------------
data Vec (X : Set) : Nat -> Set where -- like lists, but length-indexed
[] : Vec X zero
_,-_ : {n : Nat} -> X -> Vec X n -> Vec X (suc n)
infixr 4 _,-_ -- the "cons" operator associates to the right
-- I like to use the asymmetric ,- to remind myself that the element is to
-- the left and the rest of the list is to the right.
-- Vectors are useful when there are important length-related safety
-- properties.
------------------------------------------------------------------------------
-- Heads and Tails
------------------------------------------------------------------------------
-- We can rule out nasty head and tail errors by insisting on nonemptiness!
--??--1.1-(2)-----------------------------------------------------------------
vHead : {X : Set}{n : Nat} -> Vec X (suc n) -> X
vHead (x ,- xs) = x
vTail : {X : Set}{n : Nat} -> Vec X (suc n) -> Vec X n
vTail (x ,- xs) = xs
vHeadTailFact : {X : Set}{n : Nat}(xs : Vec X (suc n)) ->
(vHead xs ,- vTail xs) == xs
vHeadTailFact (x ,- xs) = refl (x ,- xs)
--??--------------------------------------------------------------------------
------------------------------------------------------------------------------
-- Concatenation and its Inverse
------------------------------------------------------------------------------
--??--1.2-(2)-----------------------------------------------------------------
_+V_ : {X : Set}{m n : Nat} -> Vec X m -> Vec X n -> Vec X (m +N n)
[] +V ys = ys
(x ,- xs) +V ys = x ,- xs +V ys
infixr 4 _+V_
vChop : {X : Set}(m : Nat){n : Nat} -> Vec X (m +N n) -> Vec X m * Vec X n
vChop zero xs = [] , xs
vChop (suc m) (x ,- xs) with vChop m xs
vChop (suc m) (x ,- xs) | fst₁ , snd₁ = (x ,- fst₁) , snd₁
vChopAppendFact : {X : Set}{m n : Nat}(xs : Vec X m)(ys : Vec X n) ->
vChop m (xs +V ys) == (xs , ys)
vChopAppendFact [] ys = refl ([] , ys)
vChopAppendFact (x ,- xs) ys rewrite vChopAppendFact xs ys = refl ((x ,- xs) , ys)
--??--------------------------------------------------------------------------
------------------------------------------------------------------------------
-- Map, take I
------------------------------------------------------------------------------
-- Implement the higher-order function that takes an operation on
-- elements and does it to each element of a vector. Use recursion
-- on the vector.
-- Note that the type tells you the size remains the same.
-- Show that if the elementwise function "does nothing", neither does
-- its vMap. "map of identity is identity"
-- Show that two vMaps in a row can be collapsed to just one, or
-- "composition of maps is map of compositions"
--??--1.3-(2)-----------------------------------------------------------------
vMap : {X Y : Set} -> (X -> Y) -> {n : Nat} -> Vec X n -> Vec Y n
vMap f [] = []
vMap f (x ,- xs) = f x ,- vMap f xs
vMapIdFact : {X : Set}{f : X -> X}(feq : (x : X) -> f x == x) ->
{n : Nat}(xs : Vec X n) -> vMap f xs == xs
vMapIdFact feq [] = refl []
vMapIdFact feq (x ,- xs) rewrite feq x | vMapIdFact feq xs = refl (x ,- xs)
vMapCpFact : (X Y Z : Set)(f : Y -> Z)(g : X -> Y)(h : X -> Z)
(heq : (x : X) -> f (g x) == h x) ->
{n : Nat}(xs : Vec X n) ->
vMap f (vMap g xs) == vMap h xs
vMapCpFact _ _ _ _ _ _ heq [] = refl []
vMapCpFact X Y Z f g h heq (x ,- xs) rewrite heq x | vMapCpFact X Y Z f g h heq xs = refl (h x ,- vMap h xs)
--??--------------------------------------------------------------------------
------------------------------------------------------------------------------
-- vMap and +V
------------------------------------------------------------------------------
-- Show that if you've got two vectors of Xs and a function from X to Y,
-- and you want to concatenate and map, it doesn't matter which you do
-- first.
--??--1.4-(1)-----------------------------------------------------------------
vMap+VFact : {X Y : Set}(f : X -> Y) ->
{m n : Nat}(xs : Vec X m)(xs' : Vec X n) ->
vMap f (xs +V xs') == (vMap f xs +V vMap f xs')
vMap+VFact f [] xs' = refl (vMap f xs')
vMap+VFact f (x ,- xs) xs' rewrite vMap+VFact f xs xs' = refl _
--??--------------------------------------------------------------------------
-- Think about what you could prove, relating vMap with vHead, vTail, vChop...
-- Now google "Philip Wadler" "Theorems for Free"
------------------------------------------------------------------------------
-- Applicative Structure (giving mapping and zipping cheaply)
------------------------------------------------------------------------------
--??--1.5-(2)-----------------------------------------------------------------
-- HINT: you will need to override the default invisibility of n to do this.
vPure : {X : Set} -> X -> {n : Nat} -> Vec X n
vPure x {zero} = []
vPure x {suc n} = x ,- vPure x
_$V_ : {X Y : Set}{n : Nat} -> Vec (X -> Y) n -> Vec X n -> Vec Y n
[] $V [] = []
x ,- fs $V x₁ ,- xs = x x₁ ,- (fs $V xs)
infixl 3 _$V_ -- "Application associates to the left,
-- rather as we all did in the sixties." (Roger Hindley)
-- Pattern matching and recursion are forbidden for the next two tasks.
-- implement vMap again, but as a one-liner
vec : {X Y : Set} -> (X -> Y) -> {n : Nat} -> Vec X n -> Vec Y n
vec f xs = vPure f $V xs
-- implement the operation which pairs up corresponding elements
vZip : {X Y : Set}{n : Nat} -> Vec X n -> Vec Y n -> Vec (X * Y) n
vZip [] [] = []
vZip (x ,- xs) (x₁ ,- ys) = (x , x₁) ,- vZip xs ys
--??--------------------------------------------------------------------------
------------------------------------------------------------------------------
-- Applicative Laws
------------------------------------------------------------------------------
-- According to "Applicative programming with effects" by
-- Conor McBride and Ross Paterson
-- some laws should hold for applicative functors.
-- Check that this is the case.
--??--1.6-(2)-----------------------------------------------------------------
vIdentity : {X : Set}{f : X -> X}(feq : (x : X) -> f x == x) ->
{n : Nat}(xs : Vec X n) -> (vPure f $V xs) == xs
vIdentity feq [] = refl []
vIdentity feq (x ,- xs) rewrite feq x | vIdentity feq xs = refl (x ,- xs)
vHomomorphism : {X Y : Set}(f : X -> Y)(x : X) ->
{n : Nat} -> (vPure f $V vPure x) == vPure (f x) {n}
vHomomorphism f x {zero} = refl []
vHomomorphism f x {suc n} rewrite vHomomorphism f x {n} = refl (f x ,- vPure (f x))
vInterchange : {X Y : Set}{n : Nat}(fs : Vec (X -> Y) n)(x : X) ->
(fs $V vPure x) == (vPure (_$ x) $V fs)
vInterchange [] x = refl []
vInterchange (x₁ ,- fs) x rewrite vInterchange fs x = refl _
vComposition : {X Y Z : Set}{n : Nat}
(fs : Vec (Y -> Z) n)(gs : Vec (X -> Y) n)(xs : Vec X n) ->
(vPure _<<_ $V fs $V gs $V xs) == (fs $V (gs $V xs))
vComposition [] [] [] = refl []
vComposition (x₁ ,- fs) (x₂ ,- gs) (x ,- xs) rewrite vComposition fs gs xs = refl _
--??--------------------------------------------------------------------------
------------------------------------------------------------------------------
-- Order-Preserving Embeddings (also known in the business as "thinnings")
------------------------------------------------------------------------------
-- What have these to do with Pascal's Triangle?
data _<=_ : Nat -> Nat -> Set where
oz : zero <= zero
os : {n m : Nat} -> n <= m -> suc n <= suc m
o' : {n m : Nat} -> n <= m -> n <= suc m
-- Find all the values in each of the following <= types.
-- This is a good opportunity to learn to use C-c C-a with the -l option
-- (a.k.a. "google the type" without "I feel lucky")
-- The -s n option also helps.
--??--1.7-(1)-----------------------------------------------------------------
all0<=4 : Vec (0 <= 4) 3
all0<=4 = o' (o' (o' (o' oz))) ,- o' (o' (o' (o' oz))) ,- o' (o' (o' (o' oz))) ,- []
all1<=4 : Vec (1 <= 4) 2
all1<=4 = os (o' (o' (o' oz))) ,- os (o' (o' (o' oz))) ,- []
all2<=4 : Vec (2 <= 4) _
all2<=4 = os (o' (o' (os oz))) ,- os (os (o' (o' oz))) ,- []
all3<=4 : Vec (3 <= 4) zero
all3<=4 = []
all4<=4 : Vec (4 <= 4) zero
all4<=4 = []
-- Prove the following. A massive case analysis "rant" is fine.
no5<=4 : 5 <= 4 -> Zero
no5<=4 (os (os (os (os ()))))
no5<=4 (os (os (os (o' ()))))
no5<=4 (os (os (o' (os ()))))
no5<=4 (os (os (o' (o' ()))))
no5<=4 (os (o' (os (os ()))))
no5<=4 (os (o' (os (o' ()))))
no5<=4 (os (o' (o' (os ()))))
no5<=4 (os (o' (o' (o' ()))))
no5<=4 (o' (os (os (os ()))))
no5<=4 (o' (os (os (o' ()))))
no5<=4 (o' (os (o' (os ()))))
no5<=4 (o' (os (o' (o' ()))))
no5<=4 (o' (o' (os (os ()))))
no5<=4 (o' (o' (os (o' ()))))
no5<=4 (o' (o' (o' (os ()))))
no5<=4 (o' (o' (o' (o' ()))))
--??--------------------------------------------------------------------------
------------------------------------------------------------------------------
-- Order-Preserving Embeddings Select From Vectors
------------------------------------------------------------------------------
-- Use n <= m to encode the choice of n elements from an m-Vector.
-- The os constructor tells you to take the next element of the vector;
-- the o' constructor tells you to omit the next element of the vector.
--??--1.8-(2)-----------------------------------------------------------------
_<?=_ : {X : Set}{n m : Nat} -> n <= m -> Vec X m
-> Vec X n
oz <?= [] = []
os th <?= (x ,- xs) = x ,- (th <?= xs)
o' th <?= (x ,- xs) = th <?= xs
-- it shouldn't matter whether you map then select or select then map
vMap<?=Fact : {X Y : Set}(f : X -> Y)
{n m : Nat}(th : n <= m)(xs : Vec X m) ->
vMap f (th <?= xs) == (th <?= vMap f xs)
vMap<?=Fact f oz [] = refl []
vMap<?=Fact f (os th) (x ,- xs) rewrite vMap<?=Fact f th xs = refl _
vMap<?=Fact f (o' th) (x ,- xs) = vMap<?=Fact f th xs
--??--------------------------------------------------------------------------
------------------------------------------------------------------------------
-- Our Favourite Thinnings
------------------------------------------------------------------------------
-- Construct the identity thinning and the empty thinning.
--??--1.9-(1)-----------------------------------------------------------------
oi : {n : Nat} -> n <= n
oi {zero} = oz
oi {suc n} = os oi
oe : {n : Nat} -> 0 <= n
oe {zero} = oz
oe {suc n} = o' oe
--??--------------------------------------------------------------------------
-- Show that all empty thinnings are equal to yours.
--??--1.10-(1)----------------------------------------------------------------
oeUnique : {n : Nat}(th : 0 <= n) -> th == oe
oeUnique oz = refl oz
oeUnique (o' i) rewrite oeUnique i = refl (o' oe)
--??--------------------------------------------------------------------------
-- Show that there are no thinnings of form big <= small (TRICKY)
-- Then show that all the identity thinnings are equal to yours.
-- Note that you can try the second even if you haven't finished the first.
-- HINT: you WILL need to expose the invisible numbers.
-- HINT: check CS410-Prelude for a reminder of >=
--??--1.11-(3)----------------------------------------------------------------
thinMore : {n m : Nat} -> suc n <= m -> n <= m
thinMore (os n<=m) = o' n<=m
thinMore (o' n<=m) = o' (thinMore n<=m)
oTooBig : {n m : Nat} -> n >= m -> suc n <= m -> Zero
oTooBig {zero} {.(suc _)} n>=m (os th) = n>=m
oTooBig {zero} {.(suc _)} n>=m (o' th) = n>=m
oTooBig {suc n} {.(suc _)} n>=m (os th) = oTooBig n>=m th
oTooBig {suc n} {.(suc _)} n>=m (o' th) = oTooBig n>=m (thinMore th)
oiUnique : {n : Nat}(th : n <= n) -> th == oi
oiUnique oz = refl oz
oiUnique (os th) rewrite oiUnique th = refl (os oi)
oiUnique {m} (o' th) with oTooBig (refl->= m) th
oiUnique {.(suc _)} (o' th) | ()
--??--------------------------------------------------------------------------
-- Show that the identity thinning selects the whole vector
--??--1.12-(1)----------------------------------------------------------------
id-<?= : {X : Set}{n : Nat}(xs : Vec X n) -> (oi <?= xs) == xs
id-<?= [] = refl []
id-<?= (x ,- xs) rewrite id-<?= xs = refl (x ,- xs)
--??--------------------------------------------------------------------------
------------------------------------------------------------------------------
-- Composition of Thinnings
------------------------------------------------------------------------------
-- Define the composition of thinnings and show that selecting by a
-- composite thinning is like selecting then selecting again.
-- A small bonus applies to minimizing the length of the proof.
-- To collect the bonus, you will need to think carefully about
-- how to make the composition as *lazy* as possible.
--??--1.13-(3)----------------------------------------------------------------
_o>>_ : {p n m : Nat} -> p <= n -> n <= m -> p <= m
th o>> oz = th
os th o>> os th' = os (th o>> th')
o' th o>> os th' = o' (th o>> th')
th o>> o' th' = o' (th o>> th')
cp-<?= : {p n m : Nat}(th : p <= n)(th' : n <= m) ->
{X : Set}(xs : Vec X m) ->
((th o>> th') <?= xs) == (th <?= (th' <?= xs))
cp-<?= th oz [] = refl (th <?= [])
cp-<?= oz (o' th') (x ,- xs) = cp-<?= oz th' xs
cp-<?= (os th) (os th') (x ,- xs) rewrite cp-<?= th th' xs = refl (x ,- (th <?= (th' <?= xs)))
cp-<?= (os th) (o' th') (x ,- xs) = cp-<?= (os th) th' xs
cp-<?= (o' th) (os th') (x ,- xs) = cp-<?= th th' xs
cp-<?= (o' th) (o' th') (x ,- xs) = cp-<?= (o' th) th' xs
--??--------------------------------------------------------------------------
------------------------------------------------------------------------------
-- Thinning Dominoes
------------------------------------------------------------------------------
--??--1.14-(3)----------------------------------------------------------------
idThen-o>> : {n m : Nat}(th : n <= m) -> (oi o>> th) == th
idThen-o>> oz = refl oz
idThen-o>> (os th) rewrite idThen-o>> th = refl (os th)
idThen-o>> (o' th) rewrite idThen-o>> th = refl (o' th)
idAfter-o>> : {n m : Nat}(th : n <= m) -> (th o>> oi) == th
idAfter-o>> oz = refl oz
idAfter-o>> (os th) rewrite idAfter-o>> th = refl (os th)
idAfter-o>> (o' th) rewrite idAfter-o>> th = refl (o' th)
assoc-o>> : {q p n m : Nat}(th0 : q <= p)(th1 : p <= n)(th2 : n <= m) ->
((th0 o>> th1) o>> th2) == (th0 o>> (th1 o>> th2))
assoc-o>> th0 th1 oz = refl (th0 o>> th1)
assoc-o>> th0 (o' th1) (os th2) rewrite assoc-o>> th0 th1 th2 = refl (o' (th0 o>> (th1 o>> th2)))
assoc-o>> (os th0) (os th1) (os th2) rewrite assoc-o>> th0 th1 th2 = refl (os (th0 o>> (th1 o>> th2)))
assoc-o>> (o' th0) (os th1) (os th2) rewrite assoc-o>> th0 th1 th2 = refl (o' (th0 o>> (th1 o>> th2)))
assoc-o>> th0 th1 (o' th2) rewrite assoc-o>> th0 th1 th2 = refl (o' (th0 o>> (th1 o>> th2)))
--??--------------------------------------------------------------------------
------------------------------------------------------------------------------
-- Vectors as Arrays
------------------------------------------------------------------------------
-- We can use 1 <= n as the type of bounded indices into a vector and do
-- a kind of "array projection". First we select a 1-element vector from
-- the n-element vector, then we take its head to get the element out.
vProject : {n : Nat}{X : Set} -> Vec X n -> 1 <= n -> X
vProject xs i = vHead (i <?= xs)
-- Your (TRICKY) mission is to reverse the process, tabulating a function
-- from indices as a vector. Then show that these operations are inverses.
--??--1.15-(3)----------------------------------------------------------------
selectHead : (n : Nat) -> 1 <= (suc n)
selectHead n = os oe
-- HINT: composition of functions
vTabulate : {n : Nat}{X : Set} -> (1 <= n -> X) -> Vec X n
vTabulate {zero} f = []
vTabulate {suc n} f = (f (selectHead n)) ,- (vTabulate (\p -> f (o' p)))
-- This should be easy if vTabulate is correct.
vTabulateProjections : {n : Nat}{X : Set}(xs : Vec X n) ->
vTabulate (vProject xs) == xs
vTabulateProjections [] = refl []
vTabulateProjections (x ,- xs) rewrite vTabulateProjections xs = refl (x ,- xs)
-- HINT: oeUnique
vProjectFromTable : {n : Nat}{X : Set}(f : 1 <= n -> X)(i : 1 <= n) ->
vProject (vTabulate f) i == f i
vProjectFromTable {.(suc _)} {X} f (os i) with oeUnique i
vProjectFromTable {.(suc _)} {X} f (os .oe) | refl .oe = refl (f (os oe))
vProjectFromTable {.(suc _)} {X} f (o' i) = vProjectFromTable (λ z → f (o' z)) i
--??--------------------------------------------------------------------------
|
c----67--1---------2---------3---------4---------5---------6---------712
c
c File: plate7.f latest modification Nov 29, 1997
c
c----67--1---------2---------3---------4---------5---------6---------712
c
subroutine sort_out_nodes(elnum,no_of_nodes,etyp,prog,
1 nstk_s,nstk_a,nstk_b, nods,ns1,ns2,ns3,
2 noda,na1,na2,na3, nodb,nb1,nb2,nb3, iws)
c
c The routine sorts out the uniqe nodes and remove the excessive nodes.
c
implicit none
c
include 'plate_common_eln.f'
include 'plate_common_nod.f'
c
integer ns1,ns2,ns3,nods(ns1,ns2,ns3),
2 na1,na2,na3,noda(na1,na2,na3),
3 nb1,nb2,nb3,nodb(nb1,nb2,nb3),iws
c
integer elnum,no_of_nodes, etyp,
1 i,j,k,nn, el_nodes,n, nstart,
2 nstk_s(3,*),nstk_a(3,*),nstk_b(3,*)
c integer fcn
integer numb_of_el_nodes
c
character prog*20
logical basic_plate
c
integer mf,mr,mv,mh,m1,m2,ma,na,mb,nb,lt,lred
common /nod/ mf,mr,mv,mh,m1,m2,ma,na,mb,nb,lt,lred
integer ims,jms,kms,ima,jma,kma,imb,jmb
common /max/ ims,jms,kms,ima,jma,kma,imb,jmb
c
c Change the local node numbering order in elements if in-put
c deck is generated for ABAQUS or WARP3D (local node numbering
c scheme in default element follows that of ADINA).
c
if (prog.eq.'ABAQUS') then
do i=1, elnum
call change_elnum_to_abaqus(i)
enddo
elseif ( (prog.eq.'WARP3D').or.(prog.eq.'PATRAN') ) then
do i=1, elnum
call change_elnum_to_abaqus(i)
enddo
do i=1, elnum
call change_elnum_to_warp3d(i)
enddo
endif
c
c If etyp < 27 remove excessive nodes
c
do i=1, elnum
do j=etyp+1, 27
eln(i,j)=0
enddo
enddo
c
c Sort out the unique nodes
c
do i=0, inm
nnr(i)=0
enddo
do i=1, elnum
el_nodes = numb_of_el_nodes(i)
do j=1, el_nodes
nnr(eln(i,j))=1
enddo
enddo
c
c Remove jumps in node-numbers - ALL FE CODES
c
c (Note that the pointer to the nnr(i) and npos(i,k) is lost
c
c if ( (prog.eq.'WARP3D') .or. (prog.eq.'PATRAN') ) then
nn=0
c Change the node numbers
do i=1, inm
if (nnr(i).gt.0) then
nn=nn+1
nnr(i)=nn
endif
enddo
c
c modified 11/29/97 eln(i,j) is pointing to nnr(k) containing
c the node number.
c
c Change the node numbers in the elements
c do i=1, elnum
c do j=1, 27
c if (eln(i,j).gt.0) then
c eln(i,j)=nnr(eln(i,j))
c endif
c enddo
c enddo
c else
c nn=0
c do i=1, inm
c if (nnr(i).gt.0) then
c nn=nn+1
c nnr(i)=i
c endif
c enddo
c endif
c
write(*,'(t15,a,i5,a)') '=> # of nodes = ',nn
write(iws,'(t15,a,i5,a)') '=> # of nodes = ',nn
no_of_nodes = nn
c
c Generate graphics file to view the basic plate model (X0,Y0,Z0)
c
c basic_plate = .true.
basic_plate = .false.
if (basic_plate) then
call plot_xz_y0(nods,ns1,ns2,ns3, noda,na1,na2,na3,
& nodb,nb1,nb2,nb3)
call plot_yz_x0(nods,ns1,ns2,ns3, noda,na1,na2,na3)
call plot_xz_ylt(noda,na1,na2,na3, nodb,nb1,nb2,nb3)
endif
c
c Node statistics
c
c Zone S:
c
do j=1, jms
if (nnr(nods(1,j,2)).gt.0) then
nstk_s(1,j) = nnr(nods(1,j,2))
else
nstk_s(1,j) = nnr(nods(1,j,3))
endif
nstk_s(2,j) = nnr(nods(ims,j,kms))
n = 0
do k=2, kms
do i=1, ims
if (nnr(nods(i,j,k)).gt.0) n=n+1
enddo
enddo
nstk_s(3,j) = n
c write(34,'(a,i3,3i7)') ' C: j=',j, (nstk_s(i,j),i=1,3)
enddo
c
c Zone A:
c
nstart = nnr(nods(ims,jms,kms))
do k=1, kma
nstk_a(1,k) = nnr(noda(1,1,k))
nstk_a(2,k) = nnr(noda(ima,jma,k))
n = 0
do j=1, jma
do i=1, ima
if (nnr(noda(i,j,k)).gt.nstart) n=n+1
enddo
enddo
nstk_a(3,k) = n
c write(34,'(a,i3,3i7)') ' A: k=',k, (nstk_a(i,k),i=1,3)
enddo
c
c Zone B:
c
do k=1, kma
if (nnr(noda(2,1,k)).gt.nstart) then
nstk_b(1,k) = nnr(nodb(2,1,k))
else
nstk_b(1,k) = nnr(nodb(3,1,k))
endif
nstk_b(2,k) = nnr(nodb(imb,jmb,k))
n = 0
do j=1, jmb
do i=1, imb
if (nnr(nodb(i,j,k)).gt.nstart) n=n+1
enddo
enddo
nstk_b(3,k) = n
c write(34,'(a,i3,3i7)') ' B: k=',k, (nstk_b(i,k),i=1,3)
enddo
c
return
end
c
c----67--1---------2---------3---------4---------5---------6---------7-!
c
subroutine change_elnum_to_abaqus(i)
c
c The routine change the local element numbering from ADINA to ABAQUS
c
implicit none
c
include 'plate_common_eln.f'
c
integer i,j,n(27)
do j=1, 27
n(j)=eln(i,j)
enddo
do j=1, 4
eln(i,j)=n(j+4)
eln(i,j+8)=n(j+12)
eln(i,j+4)=n(j)
eln(i,j+12)=n(j+8)
eln(i,j+23)=n(j+21)
enddo
eln(i,22)=n(27)
eln(i,23)=n(26)
c The local nodes 17, 18, 19, 20 and 21 remain unchanged
return
end
c
c----67--1---------2---------3---------4---------5---------6---------7-!
c
subroutine change_elnum_to_warp3d(i)
c
c The routine change the local element numbering from ABAQUS to WARP3D
c
implicit none
c
include 'plate_common_eln.f'
c
integer i,j,n(27)
do j=1, 20
n(j)=eln(i,j)
enddo
do j=1, 12
eln(i,j)=n(j)
enddo
eln(i,13) = n(17)
eln(i,14) = n(18)
eln(i,15) = n(19)
eln(i,16) = n(20)
eln(i,17) = n(13)
eln(i,18) = n(14)
eln(i,19) = n(15)
eln(i,20) = n(16)
c
return
end
c
c----67--1---------2---------3---------4---------5---------6---------7-!
c
integer function numb_of_el_nodes(i)
implicit none
c
include 'plate_common_eln.f'
c
integer i,j
j=27
10 if (eln(i,j).eq.0) then
j=j-1
goto 10
endif
numb_of_el_nodes = j
return
end
c
c----67--1---------2---------3---------4---------5---------6---------7-!
c
subroutine plot_xz_y0(nods,ns1,ns2,ns3, noda,na1,na2,na3,
& nodb,nb1,nb2,nb3)
c
c Generate a input file for tecplot; plane of the crack
c
implicit none
include 'plate_common_nod.f'
c
integer ns1,ns2,ns3,nods(ns1,ns2,ns3),
1 na1,na2,na3,noda(na1,na2,na3),
2 nb1,nb2,nb3,nodb(nb1,nb2,nb3)
c
integer io
parameter (io=31)
integer i,j,k, js,jb, ia1,ia2,i1
c
character tecfile*40
c
logical left
c
integer mf,mr,mv,mh,m1,m2,ma,na,mb,nb,lt,lred
common /nod/ mf,mr,mv,mh,m1,m2,ma,na,mb,nb,lt,lred
integer ims,jms,kms,ima,jma,kma,imb,jmb
common /max/ ims,jms,kms,ima,jma,kma,imb,jmb
integer ksr1,kar1,kar2,rtype,sfred,sfred_type,sjred_type
common /reduce/ ksr1,kar1,kar2,rtype,sfred,sfred_type,sjred_type
c
double precision t,w,c,a,alfa
common /geom/ t,w,c,a,alfa
c
tecfile = 'plate_crp.plt'
open( unit=io, file=tecfile, status = 'unknown' )
101 format(t1,2g16.8)
102 format(t1,a)
c
c Boundary
c
write(io,102) 'ZONE'
write(io,101) 0.0, 0.0
write(io,101) 0.0, t
write(io,101) w , t
write(io,101) w , 0.0
write(io,101) 0.0, 0.0
c
c Zone A - Zone S - Zone B; rho-direction
c
ia1=2*m1+1
ia2=2*(m1+mh+mh)+1
do j=3, jma-2, 2
write(io,102) 'ZONE'
c Zone A
if (mod(j,4).eq.1) then
i1=1
else
i1=3
endif
do i=i1, ia1, 2
if (noda(i,j,1).gt.0) then
write(io,101) npos(noda(i,j,1),1),npos(noda(i,j,1),3)
endif
enddo
c Zone S
js = j*sjred_type - 1
do k=kms-2, 1, -2
write(io,101) npos(nods(ims,j,k),1),npos(nods(ims,j,k),3)
enddo
do k=3, kms-2, 2
write(io,101) npos(nods(1,j,k),1),npos(nods(1,j,k),3)
enddo
c Zone A
do i=ia2, ima, 2
write(io,101) npos(noda(i,j,1),1), npos(noda(i,j,1),3)
enddo
c Zone B
jb = j - (jma-jmb)
if (jb .gt. 1) then
do i=3, imb, 2
write(io,101) npos(nodb(i,jb,1),1), npos(nodb(i,jb,1),3)
enddo
endif
enddo
c
c Zone A; phi-direction
c
write(io,102) 'ZONE'
if (mod(na,4).ne.0) then
left=.true.
write(io,101) npos(noda(3,1,3),1), npos(noda(3,1,3),3)
else
left=.false.
write(io,101) npos(noda(1,1,3),1), npos(noda(1,1,3),3)
endif
do j=3, jma-4, 4
if (left) then
write(io,101) npos(noda(3,j,3),1), npos(noda(3,j,3),3)
write(io,101) npos(noda(1,j+2,3),1), npos(noda(1,j+2,3),3)
left=.false.
else
write(io,101) npos(noda(3,j,3),1), npos(noda(3,j,3),3)
write(io,101) npos(noda(3,j+2,3),1), npos(noda(3,j+2,3),3)
left=.true.
endif
enddo
do i=5, ima-2, 2
if ( noda(i,1,1) .gt. 0 ) then
write(io,102) 'ZONE'
do j=1, jma, 2
if ( noda(i,j,1) .gt. 0 ) then
write(io,101) npos(noda(i,j,1),1), npos(noda(i,j,1),3)
endif
enddo
endif
enddo
c
c Zone S; phi-direction
c
do k=1, kms-4, 2
write(io,102) 'ZONE'
i = ims
do j=1, jms, 2
write(io,101) npos(nods(i,j,k),1), npos(nods(i,j,k),3)
enddo
i = 1
write(io,102) 'ZONE'
do j=1, jms, 2
write(io,101) npos(nods(i,j,k),1), npos(nods(i,j,k),3)
enddo
enddo
c
if (sjred_type.eq.1) then
k = kms - 2
i = ims
write(io,102) 'ZONE'
do j=1, jms, 2
write(io,101) npos(nods(i,j,k),1), npos(nods(i,j,k),3)
enddo
i = 1
write(io,102) 'ZONE'
do j=1, jms, 2
write(io,101) npos(nods(i,j,k),1), npos(nods(i,j,k),3)
enddo
elseif (sjred_type.eq.2) then
k = kms - 2
i = ims
write(io,102) 'ZONE'
write(io,101) npos(nods(i,1,k),1), npos(nods(i,1,k),3)
do j=5, jms, 8
write(io,101) npos(nods(i,j-2,k),1), npos(nods(i,j-2,k),3)
write(io,101) npos(nods(i,j,k+2),1), npos(nods(i,j,k+2),3)
if (j.le.(jms-4)) then
write(io,101) npos(nods(i,j+2,k),1),npos(nods(i,j+2,k),3)
write(io,101) npos(nods(i,j+4,k),1),npos(nods(i,j+4,k),3)
endif
enddo
c
i = 1
write(io,102) 'ZONE'
write(io,101) npos(nods(i,1,k),1), npos(nods(i,1,k),3)
do j=5, jms, 8
write(io,101) npos(nods(i,j-2,k),1), npos(nods(i,j-2,k),3)
write(io,101) npos(nods(i,j,k+2),1), npos(nods(i,j,k+2),3)
if (j.le.(jms-4)) then
write(io,101) npos(nods(i,j+2,k),1),npos(nods(i,j+2,k),3)
write(io,101) npos(nods(i,j+4,k),1),npos(nods(i,j+4,k),3)
endif
enddo
elseif (sjred_type.eq.3) then
k = kms - 2
i = ims
write(io,102) 'ZONE'
write(io,101) npos(nods(i,1,k+2),1), npos(nods(i,1,k+2),3)
do j=7, jms, 6
write(io,101) npos(nods(i,j-4,k),1), npos(nods(i,j-4,k),3)
write(io,101) npos(nods(i,j-2,k),1), npos(nods(i,j-2,k),3)
write(io,101) npos(nods(i,j,k+2),1), npos(nods(i,j,k+2),3)
enddo
c
k = kms - 2
i = 1
write(io,102) 'ZONE'
write(io,101) npos(nods(i,1,k+2),1), npos(nods(i,1,k+2),3)
do j=7, jms, 6
write(io,101) npos(nods(i,j-4,k),1), npos(nods(i,j-4,k),3)
write(io,101) npos(nods(i,j-2,k),1), npos(nods(i,j-2,k),3)
write(io,101) npos(nods(i,j,k+2),1), npos(nods(i,j,k+2),3)
enddo
endif
c
c Zone B; Z-direction
c
do i=1, imb-2, 2
write(io,102) 'ZONE'
do j=1, jmb, 2
write(io,101) npos(nodb(i,j,1),1), npos(nodb(i,j,1),3)
enddo
enddo
c
close(io)
c
return
end
c
c----67--1---------2---------3---------4---------5---------6---------7-!
c
subroutine plot_yz_x0(nods,ns1,ns2,ns3, noda,na1,na2,na3)
c
c Generate a input file for tecplot; symmetry plane
c
implicit none
include 'plate_common_nod.f'
c
integer ns1,ns2,ns3,nods(ns1,ns2,ns3),
1 na1,na2,na3,noda(na1,na2,na3)
c
integer io
parameter (io=31)
integer i,j,k, ia1,ia2,i1,ksr2,kend
c
character tecfile*40
c
integer mf,mr,mv,mh,m1,m2,ma,na,mb,nb,lt,lred
common /nod/ mf,mr,mv,mh,m1,m2,ma,na,mb,nb,lt,lred
integer ims,jms,kms,ima,jma,kma,imb,jmb
common /max/ ims,jms,kms,ima,jma,kma,imb,jmb
integer ksr1,kar1,kar2,rtype,sfred,sfred_type,sjred_type
common /reduce/ ksr1,kar1,kar2,rtype,sfred,sfred_type,sjred_type
c
double precision t,w,c,a,alfa
common /geom/ t,w,c,a,alfa
c
tecfile = 'plate_sym.plt'
open( unit=io, file=tecfile, status = 'unknown' )
101 format(t1,2g16.8)
102 format(t1,a)
c
c...Zone S:
c
ksr2 = ksr1 + 2
j = 1
c
if (sfred_type.eq.1) then
c R-direction
do i=1, ims, 2
write(io,102) 'ZONE'
do k=1, kms, 2
write(io,101) npos(nods(i,j,k),2),npos(nods(i,j,k),3)
enddo
enddo
c Phi-direction
do k=1, kms, 2
write(io,102) 'ZONE'
do i=1, ims, 2
write(io,101) npos(nods(i,j,k),2),npos(nods(i,j,k),3)
enddo
enddo
c
elseif (sfred_type.eq.2) then
c
do i=3, ims-2, 2
write(io,102) 'ZONE'
if (mod(i,4).eq.1) then
do k=1, kms, 2
write(io,101) npos(nods(i,j,k),2),npos(nods(i,j,k),3)
enddo
else
do k=1, ksr1, 2
write(io,101) npos(nods(i,j,k),2),npos(nods(i,j,k),3)
enddo
endif
enddo
c
do k=1, ksr1-2, 2
write(io,102) 'ZONE'
do i=1, ims, 2
write(io,101) npos(nods(i,j,k),2),npos(nods(i,j,k),3)
enddo
enddo
k = ksr1
write(io,102) 'ZONE'
write(io,101) npos(nods(1,j,k),2),npos(nods(1,j,k),3)
do i=5, ims-4, 8
write(io,101) npos(nods(i-2,j,k),2),npos(nods(i-2,j,k),3)
write(io,101) npos(nods(i,j,k+2),2),npos(nods(i,j,k+2),3)
write(io,101) npos(nods(i+2,j,k),2),npos(nods(i+2,j,k),3)
write(io,101) npos(nods(i+4,j,k),2),npos(nods(i+4,j,k),3)
enddo
do k=ksr2, kms, 2
write(io,102) 'ZONE'
do i=1, ims, 4
write(io,101) npos(nods(i,j,k),2),npos(nods(i,j,k),3)
enddo
enddo
c
elseif (sfred_type.eq.3) then
c
do i=1, ims, 2
write(io,102) 'ZONE'
if (mod(i,6).eq.1) then
do k=1, kms, 2
write(io,101) npos(nods(i,j,k),2),npos(nods(i,j,k),3)
enddo
else
do k=1, ksr1, 2
write(io,101) npos(nods(i,j,k),2),npos(nods(i,j,k),3)
enddo
endif
enddo
c
do k=1, ksr1-2, 2
write(io,102) 'ZONE'
do i=1, ims, 2
write(io,101) npos(nods(i,j,k),2),npos(nods(i,j,k),3)
enddo
enddo
c
k = ksr1
write(io,102) 'ZONE'
write(io,101) npos(nods(1,j,k+2),2),npos(nods(1,j,k+2),3)
do i=4, ims, 6
write(io,101) npos(nods(i-1,j,k),2),npos(nods(i-1,j,k),3)
write(io,101) npos(nods(i+1,j,k),2),npos(nods(i+1,j,k),3)
write(io,101) npos(nods(i+3,j,k+2),2),npos(nods(i+3,j,k+2),3)
enddo
do k=ksr2, kms, 2
write(io,102) 'ZONE'
do i=1, ims, 3
write(io,101) npos(nods(i,j,k),2),npos(nods(i,j,k),3)
enddo
enddo
endif
c
c Zone A
c
ia1=2*m1+1
ia2=2*(m1+mh+mh)+1
c Z-direction
do k=1, 2*mv+1, 2
write(io,102) 'ZONE'
do i=1, ia1, 2
if (noda(i,1,k).gt.0) then
write(io,101) npos(noda(i,1,k),2),npos(noda(i,1,k),3)
endif
enddo
write(io,102) 'ZONE'
do i=ia2, ima, 2
if (noda(i,1,k).gt.0) then
write(io,101) npos(noda(i,1,k),2),npos(noda(i,1,k),3)
endif
enddo
enddo
do k=2*mv+3, kar1-2, 2
write(io,102) 'ZONE'
do i=1, ima, 2
if (noda(i,1,k).gt.0) then
write(io,101) npos(noda(i,1,k),2),npos(noda(i,1,k),3)
endif
enddo
enddo
if (rtype.eq.0) then
do k=kar1, kma, 2
write(io,102) 'ZONE'
do i=1, ima, 2
if (noda(i,1,k).gt.0) then
write(io,101) npos(noda(i,1,k),2),npos(noda(i,1,k),3)
endif
enddo
enddo
else
if (mod(ma,4).eq.0) then
write(io,102) 'ZONE'
write(io,101) npos(noda(1,1,kar1),2),npos(noda(1,1,kar1),3)
write(io,101) npos(noda(5,1,kar1),2),npos(noda(5,1,kar1),3)
do i=9, ima, 8
write(io,101) npos(noda(i-2,1,kar1),2),
& npos(noda(i-2,1,kar1),3)
write(io,101) npos(noda(i,1,kar1+2),2),
& npos(noda(i,1,kar1+2),3)
if ((i+4).lt.na1) then
if (noda(i+2,1,kar1).gt.0) then
write(io,101) npos(noda(i+2,1,kar1),2),
& npos(noda(i+2,1,kar1),3)
write(io,101) npos(noda(i+4,1,kar1),2),
& npos(noda(i+4,1,kar1),3)
endif
endif
enddo
else
write(io,102) 'ZONE'
write(io,101) npos(noda(5,1,kar1+2),2),
& npos(noda(5,1,kar1+2),3)
do i=9, ima, 8
write(io,101) npos(noda(i-2,1,kar1),2),
& npos(noda(i-2,1,kar1),3)
write(io,101) npos(noda(i,1,kar1),2),
& npos(noda(i,1,kar1),3)
write(io,101) npos(noda(i+2,1,kar1),2),
& npos(noda(i+2,1,kar1),3)
write(io,101) npos(noda(i+4,1,kar1+2),2),
& npos(noda(i+4,1,kar1+2),3)
enddo
endif
do k=kar1+2, kma, 2
if((rtype.eq.2).and.(mod(na,4).ne.0).and.(k.eq.kar2))then
i1=3
elseif ( (rtype.eq.2).and.(k.eq.kar2) ) then
i1=ima
else
i1=1
endif
write(io,102) 'ZONE'
do i=i1, ima, 2
if (noda(i,1,k).gt.0) then
write(io,101) npos(noda(i,1,k),2),npos(noda(i,1,k),3)
endif
enddo
enddo
endif
c Y-direction
write(io,102) 'ZONE'
do k=1, kma, 2
if (noda(1,1,k).gt.0) then
write(io,101) npos(noda(1,1,k),2),npos(noda(1,1,k),3)
endif
enddo
if (rtype.lt.2) then
if (mod(na,4).ne.0) then
write(io,102) 'ZONE'
do k=1, kma, 2
if (noda(3,1,k).gt.0) then
write(io,101) npos(noda(3,1,k),2),npos(noda(3,1,k),3)
endif
enddo
endif
else
if (mod(na,4).ne.0) then
write(io,102) 'ZONE'
do k=1, kar2, 2
if (noda(3,1,k).gt.0) then
write(io,101) npos(noda(3,1,k),2),npos(noda(3,1,k),3)
endif
enddo
write(io,101) npos(noda(1,1,kar2+2),2),
& npos(noda(1,1,kar2+2),3)
endif
endif
if (rtype.eq.0) then
do i=5, ima, 2
write(io,102) 'ZONE'
do k=1, kma, 2
if (noda(i,1,k).gt.0) then
write(io,101) npos(noda(i,1,k),2),npos(noda(i,1,k),3)
endif
enddo
enddo
else
do i=5, ima, 2
write(io,102) 'ZONE'
if (mod(i,4).eq.1) then
kend=kma
else
kend=kar1
endif
do k=1, kend, 2
if (noda(i,1,k).gt.0) then
write(io,101) npos(noda(i,1,k),2),npos(noda(i,1,k),3)
endif
enddo
enddo
endif
c
close(io)
c
return
end
c
c----67--1---------2---------3---------4---------5---------6---------7-!
c
subroutine plot_xz_ylt(noda,na1,na2,na3, nodb,nb1,nb2,nb3)
c
c Generate a input file for tecplot; remote plane opposing the crack
c
implicit none
include 'plate_common_nod.f'
c
integer na1,na2,na3,noda(na1,na2,na3),
1 nb1,nb2,nb3,nodb(nb1,nb2,nb3)
c
integer io
parameter (io=31)
integer i,j, jb, di,dj,i1
double precision z1,z2
c
character tecfile*40
c
logical left
c
integer mf,mr,mv,mh,m1,m2,ma,na,mb,nb,lt,lred
common /nod/ mf,mr,mv,mh,m1,m2,ma,na,mb,nb,lt,lred
integer ims,jms,kms,ima,jma,kma,imb,jmb
common /max/ ims,jms,kms,ima,jma,kma,imb,jmb
integer ksr1,kar1,kar2,rtype,sfred,sfred_type,sjred_type
common /reduce/ ksr1,kar1,kar2,rtype,sfred,sfred_type,sjred_type
c
double precision t,w,c,a,alfa
common /geom/ t,w,c,a,alfa
c
tecfile = 'plate_bak.plt'
open( unit=io, file=tecfile, status = 'unknown' )
101 format(t1,2g16.8)
102 format(t1,a)
c
c Boundary
c
z2 = npos(noda(ima,1,kma),3)
z1 = npos(noda(1,1,kma),3)
write(io,102) 'ZONE'
write(io,101) 0.0, z1
write(io,101) 0.0, z2
write(io,101) w , z2
write(io,101) w , z1
write(io,101) 0.0, z1
c
c Zone A - Zone S - Zone B; rho-direction
c
if (rtype.le.1) then
dj=2
else
dj=4
endif
do j=1+dj, jma, dj
write(io,102) 'ZONE'
c Zone A
if ( (rtype.le.1).and.(mod(j,4).eq.3) ) then
i1=3
else
i1=1
endif
do i=i1, ima
if (noda(i,j,kma).gt.0) then
write(io,101) npos(noda(i,j,kma),1),npos(noda(i,j,kma),3)
endif
enddo
c Zone B
jb = j - (jma-jmb)
if (jb .gt. 1) then
do i=3, imb, 2
write(io,101) npos(nodb(i,jb,1),1), npos(nodb(i,jb,1),3)
enddo
endif
enddo
c
c Zone A; phi-direction
c
if (rtype.le.1) then
write(io,102) 'ZONE'
if (mod(na,4).ne.0) then
left=.true.
write(io,101) npos(noda(3,1,kma),1), npos(noda(3,1,kma),3)
else
left=.false.
write(io,101) npos(noda(1,1,kma),1), npos(noda(1,1,kma),3)
endif
do j=3, jma-4, 4
if (left) then
write(io,101) npos(noda(3,j,kma),1),npos(noda(3,j,kma),3)
write(io,101) npos(noda(1,j+2,kma),1),
& npos(noda(1,j+2,kma),3)
left=.false.
else
write(io,101) npos(noda(3,j,kma),1),npos(noda(3,j,kma),3)
write(io,101) npos(noda(3,j+2,kma),1),
& npos(noda(3,j+2,kma),3)
left=.true.
endif
enddo
endif
if (rtype.eq.0) then
di=2
else
di=4
endif
do i=5, ima, di
write(io,102) 'ZONE'
do j=1, jma
if (noda(i,j,kma).gt.0) write(io,101)
& npos(noda(i,j,kma),1),npos(noda(i,j,kma),3)
enddo
enddo
c
c Zone B; Z-direction
c
do i=3, imb-2, 2
write(io,102) 'ZONE'
do j=1, jmb
if (nodb(i,j,kma).gt.0) write(io,101)
& npos(nodb(i,j,kma),1), npos(nodb(i,j,kma),3)
enddo
enddo
c
close(io)
c
return
end
c
c----67--1---------2---------3---------4---------5---------6---------7-!
c
|
function pick = nmsMe(boxes, overlap)
% Non-maximum suppression. (FAST VERSION)
% Greedily select high-scoring detections and skip detections
% that are significantly covered by a previously selected
% detection.
% NOTE: This is adapted from Pedro Felzenszwalb's version (nms.m),
% but an inner loop has been eliminated to significantly speed it
% up in the case of a large number of boxes
% modified based on Tomasz Malisiewicz's esvm code
if isempty(boxes)
pick = [];
return;
end
x1 = boxes(:,1);
y1 = boxes(:,2);
x2 = boxes(:,3);
y2 = boxes(:,4);
s = boxes(:,end);
area = (x2-x1+1) .* (y2-y1+1);
[~, I] = sort(s);
pick = s*0;
counter = 1;
while ~isempty(I)
last = length(I);
i = I(last);
pick(counter) = i;
counter = counter + 1;
xx1 = max(x1(i), x1(I(1:last-1)));
yy1 = max(y1(i), y1(I(1:last-1)));
xx2 = min(x2(i), x2(I(1:last-1)));
yy2 = min(y2(i), y2(I(1:last-1)));
w = max(0.0, xx2-xx1+1);
h = max(0.0, yy2-yy1+1);
inter = w.*h;
o = inter ./ (area(i) + area(I(1:last-1)) - inter);
I([last; find(o>overlap)]) = [];
end
pick = pick(1:(counter-1));
|
You guys are great! I knew when I got stuck, that I would get lots of suggestions if I posted my work-in-progress. And you all really came through. Thanks for all the great ideas. This weekend I changed my approach completely – dropping the binding and doing a pillow-case finish on the covers.
It helped speed things up, because I’m really not that great at bindings. Actually I don’t consider myself very good at precision sewing, so I have to design around that. For these two books, I played around with some extreme cropping to keep PaMdora’s face the size I normally make it.
Designing these books is a completely different way of thinking – it’s not just how it looks from the front, but the whole experience as you turn the pages, right to the very end. I love the way the backside of the cover looks after quilting her face. Sometimes I think the backsides of my quilts are more interesting than the front, and here’s a chance to take advantage of that.
On each side, I’m using different fabrics and threads, for a little surprise and to make the viewer want to turn it over and examine every surface. Without the bindings, the books (which by the way, are 7″ x 9″) have a very nice feel to them. The covers are simple and soft. I’m using scrylic felt as the middle layer, because it has the same appearance as batting, but it’s a little bit stiffer.
The funny thing was, after I finished these two books, I showed all of them to Russ and he said, well I still like the one with the bird on the cover (photo in the previous post). He said, it reminds me of one of those things you set pans on. I told being called a potholder is not considered a compliment in the art quilt world. He said he thought that didn’t matter, that they were appealing that way.
So what do you think? Go back to putting bindings around all the edges? Today I started three more books with different designs, but none of them have bindings either – except for the spine. Hopefully tomorrow I’ll get them done.
No No No – no bindings! I very much like the ‘cropped images’ with less background around them and no bindings. Lots of background and binding reminds me of children’s rag books, these look much more grown up. I think they are terrific. What a great idea!
Love the face. the graphic says it all.
for Russ: Make a larger bird- so we see only 80% of it on the cover–and facing toward the book opening on the right. PaMdora is facing the right way–inviting us into your world.
I have a lot of thoughts about these – not necessarily in terms of the artist books but in terms of what makes a successful small piece.
I believe the pamdora face is extremely successful because it has such great tension – the figure ground relationship between pamdora and the background is lovely. Well done!
The bird is nice but I feel it has too much background – the bird seems so static just sitting in the middle of the picture.
As for bindings – I don’t do them but I think Russ’s comments are interesting in that we in the art quilt world tend to be so darn defensive (at least some of us – and I’m certainly guilty of it) about “I don’t like bindings – they look to ‘quilty’ ” that it is interesting to hear from an outsider that they don’t have such a stigma attached to them.
Well, I don’t really like bindings. I love the ones with Pamdora’s face. They are perfect. No potholders!!
I like the new design. To me, when I think of your work, I think of the glasses and the exasperated look of Pam…trying to make sense of our insane world. Pam’s face says it all…and I know would be memorable to gallery folks. Technically, the pillow case technique works just fine in my opinion!
Pamdora is the star of all your quilts, so it is only fitting that she would be the cover girl. It is perfect. Potholder? Potholder!!! Never!!!! Anyway… love these books. A total experience.
The new book design is so much more graphic, really capturing the look and *feel* of your large quilts. I think it looks more like a professional presentation than the first effort. Fabulous!
I like the face. Your first rendition made me immediately think of children’s books, which, I’m assuming isn’t your goal. Plus, no binding will really allow you to make more if need be.
These look great! I think they will present your work in a professional manner.
No bindings is the way to go; the scale is too small for the bindings.
And the face makes a good “logo” that is easily recognizable. Whoever looks at these books will remember the face.
The s/s face is best I think, because it does exactly what the gallery wants: shows what the work is like. I prefer no binding, because it also illustrates the quilts. Binding and the bird make a separate object, which may well be attractive in itself, but it is not really a taster for the quilts as much as the face. That latter is brilliant – and I agree about the back of the work looking fascinating too.
Her website does not show her work to best advantage unfortunately, but the reason why I thought of her is that she works from the back, and puts the outline of the face on last. It was the back of your PR face which spurred my memory.
Pam, these are perfect!! I thought the other day (but was in a rush so didn’t say) that Pamdora needed to be on the front cover – she really announces what’s inside in a very immediate way. The bird was cute, but maybe a bit too cute (in a pot-holderish sort of way). And you should leave the bindings off – the cleaner lines work.
These are fabulous, Pam. I like the large, cropped face best… It invites you right into the book. The smaller images on front with bindings look good too, but look a tad more like stylish fabric baby books on the outside. The face conveys a much different thing, to me anyway. And as these are so immediate. Also, I much prefer the look without bindings…Russ is right, they do look vaguely pot-holderish that way.
|
% =========================================================================
% FUNCTION
% scd_scheme_display.m
%
% INPUT
% gradient_vectors nx3
% OR scheme nx9
%
% OUTPUT
% (-)
%
% EXAMPLE
% scd_bvecs_display(scheme)
%
% EXAMPLE 2
% scd_bvecs_display(scd_schemefile_read('qspace.scheme'))
%
% COMMENTS
% Julien Cohen-Adad 2009-10-02
% =========================================================================
%
% See also scd_schemefile_read
function scd_scheme_display(gradient_vectors,display_voronoi)
%% Check if gradient_vectors is a schemefile
if isstr(gradient_vectors)
error('Input should be a matrix not a filename. Use scd_schemefile_read')
end
if size(gradient_vectors,2)>3
gradient_vectors = gradient_vectors(:,1:3).*repmat(gradient_vectors(:,4),[1 3]);
end
% gradient_vectors=gradient_vectors/max(max(gradient_vectors));
gradient_norm = sqrt(gradient_vectors(:,1).^2+gradient_vectors(:,2).^2+gradient_vectors(:,3).^2);
display_3d = 0;
colorindex = jet(1000);
colorprct = max(1,round(gradient_norm/max(gradient_norm)*1000));
color = colorindex(colorprct,:);
% display gradients
if display_3d
for i = 1:size(gradient_vectors,1)
plot3(gradient_vectors(i,1),gradient_vectors(i,2),gradient_vectors(i,3),'.','MarkerSize',10,'Color',color(i,:))
hold on
end
xlabel('X')
ylabel('Y')
zlabel('Z')
axis vis3d;
view(3), axis equal
axis on, grid
rotate3d on;
end
% display gradients
subplot(2,2,1)
for i = 1:size(gradient_vectors,1)
plot3(gradient_vectors(i,1),gradient_vectors(i,2),gradient_vectors(i,3),'.','MarkerSize',10,'Color',color(i,:))
hold on
end
xlabel('X')
ylabel('Y')
zlabel('Z')
axis vis3d;
view(3), axis equal
axis on, grid
rotate3d on;
view(0,0)
lim = max(max(abs([xlim; ylim; zlim])));
xlim([-lim lim]); ylim([-lim lim]); zlim([-lim lim]);
subplot(2,2,2)
for i = 1:size(gradient_vectors,1)
plot3(gradient_vectors(i,1),gradient_vectors(i,2),gradient_vectors(i,3),'.','MarkerSize',10,'Color',color(i,:))
hold on
end
xlabel('X')
ylabel('Y')
zlabel('Z')
axis vis3d;
view(3), axis equal
axis on, grid
rotate3d on;
view(90,0)
lim = max(max(abs([xlim; ylim; zlim])));
xlim([-lim lim]); ylim([-lim lim]); zlim([-lim lim]);
subplot(2,2,3)
for i = 1:size(gradient_vectors,1)
plot3(gradient_vectors(i,1),gradient_vectors(i,2),gradient_vectors(i,3),'.','MarkerSize',10,'Color',color(i,:))
hold on
end
xlabel('X')
ylabel('Y')
zlabel('Z')
axis vis3d;
view(3), axis equal
axis on, grid
rotate3d on;
view(0,90)
lim = max(max(abs([xlim; ylim; zlim])));
xlim([-lim lim]); ylim([-lim lim]); zlim([-lim lim]);
% % Voronoi transformation
if exist('display_voronoi','var') && display_voronoi
X = gradient_vectors;
h_fig = figure('name','Voronoi');
[V,C] = voronoin(X);
K = convhulln(X);
d = [1 2 3 1]; % Index into K
for i = 1:size(K,1)
j = K(i,d);
h(i) = patch(X(j,1),X(j,2),X(j,3),i,'FaceColor','white','FaceLighting','phong','EdgeColor','black');
end
hold off
view(2)
axis off
axis equal
colormap(gray);
% title('One cell of a Voronoi diagram')
axis vis3d;
rotate3d on;
print(h_fig,'-dpng',strcat(['fig_voronoi.png']));
end
|
lemma in_components_maximal: "c \<in> components s \<longleftrightarrow> c \<noteq> {} \<and> c \<subseteq> s \<and> connected c \<and> (\<forall>d. d \<noteq> {} \<and> c \<subseteq> d \<and> d \<subseteq> s \<and> connected d \<longrightarrow> d = c)"
|
import Control.Linear.LIO
import Network.Socket
data SocketState = Ready | Bound | Listening | Open | Closed
data Socket : SocketState -> Type where
MkSocket : Socket.Data.Socket -> Socket st
newSocket : LinearIO io
=> (fam : SocketFamily)
-> (ty : SocketType)
-> (pnum : ProtocolNumber)
-> (success : (1 _ : Socket Ready) -> L io ())
-> (fail : SocketError -> L io ())
-> L io ()
newSocket fam ty pnum success fail
= do Right rawsock <- socket fam ty pnum
| Left err => fail err
success (MkSocket rawsock)
bind : LinearIO io =>
(1 _ : Socket Ready) ->
(addr : Maybe SocketAddress) ->
(port : Port) ->
L io {use=1} (Res Bool (\res => Socket (case res of
False => Closed
True => Bound)))
bind (MkSocket sock) addr port
= do ok <- Socket.bind sock addr port
if ok == 0
then pure1 (True # ?foo1)
else pure1 (False # ?foo2)
|
#importa pacote
library('readr')
# Lê/coleta dados no arquivo
dados_cliente <- read.csv("./clientes.csv",sep=";")
head(dados_cliente)
# Definimos que a variável Grau.Instrução deve ser tratada como ordinal
levels(dados_cliente$Grau.Instrução)
dados_cliente$Grau.Instrução <- factor(dados_cliente$Grau.Instrução,
levels = c('Fundamental','Médio','Superior','Mestrado','Doutorado'))
levels(dados_cliente$Grau.Instrução)
# Frequência absoluta
fa <- with(dados_cliente,table(Estado.civil,Grau.Instrução))
fa
# Frequência relativa
# Em relação ao total geral
# Em relação aos totais por linha (`margin = 1`)
# Em relação aos totais por coluna (`margin = 2`)
frg <- prop.table(fa)
round(frg,digit=2)
frl <- prop.table(fa,margin=1)
round(frl,digit=2)
frc <- prop.table(fa,margin=2)
round(frc,digit=2)
barplot(fa, legend = TRUE)
barplot(t(fa), legend = TRUE)
barplot(fa, beside = TRUE, legend = TRUE)
barplot(t(prop.table(fa)), beside = TRUE, legend = TRUE)
## Quartis de salario
quantile(dados_cliente$Salário)
## Classificação de acordo com os quartis
grupo_sal <- cut(dados_cliente$Salário, breaks = quantile(dados_cliente$Salário),include.lowest = TRUE)
## Tabela de frequências absolutas
fa <- table(dados_cliente$Grau.Instrução, grupo_sal)
fa
frg <- prop.table(fa)
frg
frl <- prop.table(fa,margin=1)
frl
frc <- prop.table(fa,margin=2)
frc
boxplot(Salário ~ Grau.Instrução, data = dados_cliente)
print('Media')
with(dados_cliente, tapply(Salário, Grau.Instrução, mean))
print('Desvio Padrão')
with(dados_cliente, tapply(Salário, Grau.Instrução, sd))
print('Quartil')
with(dados_cliente, tapply(Salário, Grau.Instrução, quantile))
head(dados_cliente)
## Classes de Idade (Anos)
grupo_idade <- with(dados_cliente, cut(Anos, breaks = quantile(Anos),include.lowest = TRUE))
fa_idade <- table(grupo_idade)
fa_idade
## Quartis de salario
quantile(dados_cliente$Salário)
## Classificação de acordo com os quartis
grupo_sal <- cut(dados_cliente$Salário, breaks = quantile(dados_cliente$Salário),include.lowest = TRUE)
fa_sal <- table(grupo_sal)
fa_sal
## Tabela combinada cruzada
fa <- table(grupo_idade, grupo_sal)
fa
frg <- prop.table(fa)
frg
frl <- prop.table(fa,margin=1)
frl
frc <- prop.table(fa,margin=2)
frc
head(dados_cliente)
# Plotar gráfico de dispersão entre as variáveis
#plot(x = dados_cliente$Anos, y = dados_cliente$Salário)
plot(Salário ~ Anos, data = dados_cliente)
#Coeficiaente de relação
print('Coeficiaente de relação Pearson')
with(dados_cliente, cor(Anos, Salário)) #Pearson
print('Coeficiaente de relação kendall')
with(dados_cliente, cor(Anos, Salário, method = "kendall"))
print('Coeficiaente de relação spearman')
with(dados_cliente, cor(Anos, Salário, method = "spearman"))
|
Clay Corner Inn is a charming B&B next to Virginia Tech in downtown Blacksburg with the comforts of home, amenities of a fine hotel and friendly atmosphere of a small inn. The Clay Corner Inn is just like a hotel but with more of a personal touch and geared towards each guest's special needs. Across the street from our front door is Virginia Tech and at only 2 blocks walking distance you'll find yourself in the heart of downtown Blacksburg's restaurants, shops and nightlife. We are proud to offer homemade traditional breakfasts as well as European gourmet dishes. Visitors from all over the world enjoy casual, comfortable, clean accommodations, a perfect location and an always helpful staff. WiFi, DVD players, expanded cable TV, CD clock radios, online reservations. 2 houses complete with full kitchens & living areas. 8 regional theme guest rooms all have PRIVATE BATHROOMS, hair dryers, robes, writing tables, iron and ironing board, high quality queen or king bed. microfiber robes, Simmons Beautyrest World Class queen or king bed, luxury Comfy micro twill sheets and absorbent fine towels. Yoga Deck & Zen Garden make this the most relaxing place to be!
|
(* Title: HOL/MicroJava/BV/JType.thy
Author: Tobias Nipkow, Gerwin Klein
Copyright 2000 TUM
*)
section \<open>The Java Type System as Semilattice\<close>
theory JType
imports "../DFA/Semilattices" "../J/WellForm"
begin
definition super :: "'a prog \<Rightarrow> cname \<Rightarrow> cname" where
"super G C == fst (the (class G C))"
lemma superI:
"G \<turnstile> C \<prec>C1 D \<Longrightarrow> super G C = D"
by (unfold super_def) (auto dest: subcls1D)
definition is_ref :: "ty \<Rightarrow> bool" where
"is_ref T == case T of PrimT t \<Rightarrow> False | RefT r \<Rightarrow> True"
definition sup :: "'c prog \<Rightarrow> ty \<Rightarrow> ty \<Rightarrow> ty err" where
"sup G T1 T2 ==
case T1 of PrimT P1 \<Rightarrow> (case T2 of PrimT P2 \<Rightarrow>
(if P1 = P2 then OK (PrimT P1) else Err) | RefT R \<Rightarrow> Err)
| RefT R1 \<Rightarrow> (case T2 of PrimT P \<Rightarrow> Err | RefT R2 \<Rightarrow>
(case R1 of NullT \<Rightarrow> (case R2 of NullT \<Rightarrow> OK NT | ClassT C \<Rightarrow> OK (Class C))
| ClassT C \<Rightarrow> (case R2 of NullT \<Rightarrow> OK (Class C)
| ClassT D \<Rightarrow> OK (Class (exec_lub (subcls1 G) (super G) C D)))))"
definition subtype :: "'c prog \<Rightarrow> ty \<Rightarrow> ty \<Rightarrow> bool" where
"subtype G T1 T2 == G \<turnstile> T1 \<preceq> T2"
definition is_ty :: "'c prog \<Rightarrow> ty \<Rightarrow> bool" where
"is_ty G T == case T of PrimT P \<Rightarrow> True | RefT R \<Rightarrow>
(case R of NullT \<Rightarrow> True | ClassT C \<Rightarrow> (C, Object) \<in> (subcls1 G)^*)"
abbreviation "types G == Collect (is_type G)"
definition esl :: "'c prog \<Rightarrow> ty esl" where
"esl G == (types G, subtype G, sup G)"
lemma PrimT_PrimT: "(G \<turnstile> xb \<preceq> PrimT p) = (xb = PrimT p)"
by (auto elim: widen.cases)
lemma PrimT_PrimT2: "(G \<turnstile> PrimT p \<preceq> xb) = (xb = PrimT p)"
by (auto elim: widen.cases)
lemma is_tyI:
"\<lbrakk> is_type G T; ws_prog G \<rbrakk> \<Longrightarrow> is_ty G T"
by (auto simp add: is_ty_def intro: subcls_C_Object
split: ty.splits ref_ty.splits)
lemma is_type_conv:
"ws_prog G \<Longrightarrow> is_type G T = is_ty G T"
proof
assume "is_type G T" "ws_prog G"
thus "is_ty G T"
by (rule is_tyI)
next
assume wf: "ws_prog G" and
ty: "is_ty G T"
show "is_type G T"
proof (cases T)
case PrimT
thus ?thesis by simp
next
fix R assume R: "T = RefT R"
with wf
have "R = ClassT Object \<Longrightarrow> ?thesis" by simp
moreover
from R wf ty
have "R \<noteq> ClassT Object \<Longrightarrow> ?thesis"
by (auto simp add: is_ty_def is_class_def split_tupled_all
elim!: subcls1.cases
elim: converse_rtranclE
split: ref_ty.splits)
ultimately
show ?thesis by blast
qed
qed
lemma order_widen:
"acyclic (subcls1 G) \<Longrightarrow> order (subtype G)"
apply (unfold Semilat.order_def lesub_def subtype_def)
apply (auto intro: widen_trans)
apply (case_tac x)
apply (case_tac y)
apply (auto simp add: PrimT_PrimT)
apply (case_tac y)
apply simp
apply simp
apply (rename_tac ref_ty ref_tya, case_tac ref_ty)
apply (case_tac ref_tya)
apply simp
apply simp
apply (case_tac ref_tya)
apply simp
apply simp
apply (auto dest: acyclic_impl_antisym_rtrancl antisymD)
done
lemma wf_converse_subcls1_impl_acc_subtype:
"wf ((subcls1 G)^-1) \<Longrightarrow> acc (subtype G)"
apply (unfold Semilat.acc_def lesssub_def)
apply (drule_tac p = "((subcls1 G)^-1) - Id" in wf_subset)
apply auto
apply (drule wf_trancl)
apply (simp add: wf_eq_minimal)
apply clarify
apply (unfold lesub_def subtype_def)
apply (rename_tac M T)
apply (case_tac "EX C. Class C : M")
prefer 2
apply (case_tac T)
apply (fastforce simp add: PrimT_PrimT2)
apply simp
apply (rename_tac ref_ty)
apply (subgoal_tac "ref_ty = NullT")
apply simp
apply (rule_tac x = NT in bexI)
apply (rule allI)
apply (rule impI, erule conjE)
apply (drule widen_RefT)
apply clarsimp
apply (case_tac t)
apply simp
apply simp
apply simp
apply (case_tac ref_ty)
apply simp
apply simp
apply (erule_tac x = "{C. Class C : M}" in allE)
apply auto
apply (rename_tac D)
apply (rule_tac x = "Class D" in bexI)
prefer 2
apply assumption
apply clarify
apply (frule widen_RefT)
apply (erule exE)
apply (case_tac t)
apply simp
apply simp
apply (insert rtrancl_r_diff_Id [symmetric, of "subcls1 G"])
apply simp
apply (erule rtrancl.cases)
apply blast
apply (drule rtrancl_converseI)
apply (subgoal_tac "(subcls1 G - Id)^-1 = (subcls1 G)^-1 - Id")
prefer 2
apply (simp add: converse_Int) apply safe[1]
apply simp
apply (blast intro: rtrancl_into_trancl2)
done
lemma closed_err_types:
"\<lbrakk> ws_prog G; single_valued (subcls1 G); acyclic (subcls1 G) \<rbrakk>
\<Longrightarrow> closed (err (types G)) (lift2 (sup G))"
apply (unfold closed_def plussub_def lift2_def sup_def)
apply (auto split: err.split)
apply (drule is_tyI, assumption)
apply (auto simp add: is_ty_def is_type_conv simp del: is_type.simps
split: ty.split ref_ty.split)
apply (blast dest!: is_lub_exec_lub is_lubD is_ubD intro!: is_ubI superI)
done
lemma sup_subtype_greater:
"\<lbrakk> ws_prog G; single_valued (subcls1 G); acyclic (subcls1 G);
is_type G t1; is_type G t2; sup G t1 t2 = OK s \<rbrakk>
\<Longrightarrow> subtype G t1 s \<and> subtype G t2 s"
proof -
assume ws_prog: "ws_prog G"
assume single_valued: "single_valued (subcls1 G)"
assume acyclic: "acyclic (subcls1 G)"
{ fix c1 c2
assume is_class: "is_class G c1" "is_class G c2"
with ws_prog
obtain
"G \<turnstile> c1 \<preceq>C Object"
"G \<turnstile> c2 \<preceq>C Object"
by (blast intro: subcls_C_Object)
with ws_prog single_valued
obtain u where
"is_lub ((subcls1 G)^* ) c1 c2 u"
by (blast dest: single_valued_has_lubs)
moreover
note acyclic
moreover
have "\<forall>x y. G \<turnstile> x \<prec>C1 y \<longrightarrow> super G x = y"
by (blast intro: superI)
ultimately
have "G \<turnstile> c1 \<preceq>C exec_lub (subcls1 G) (super G) c1 c2 \<and>
G \<turnstile> c2 \<preceq>C exec_lub (subcls1 G) (super G) c1 c2"
by (simp add: exec_lub_conv) (blast dest: is_lubD is_ubD)
} note this [simp]
assume "is_type G t1" "is_type G t2" "sup G t1 t2 = OK s"
thus ?thesis
apply (unfold sup_def subtype_def)
apply (cases s)
apply (auto split: ty.split_asm ref_ty.split_asm if_split_asm)
done
qed
lemma sup_subtype_smallest:
"\<lbrakk> ws_prog G; single_valued (subcls1 G); acyclic (subcls1 G);
is_type G a; is_type G b; is_type G c;
subtype G a c; subtype G b c; sup G a b = OK d \<rbrakk>
\<Longrightarrow> subtype G d c"
proof -
assume ws_prog: "ws_prog G"
assume single_valued: "single_valued (subcls1 G)"
assume acyclic: "acyclic (subcls1 G)"
{ fix c1 c2 D
assume is_class: "is_class G c1" "is_class G c2"
assume le: "G \<turnstile> c1 \<preceq>C D" "G \<turnstile> c2 \<preceq>C D"
from ws_prog is_class
obtain
"G \<turnstile> c1 \<preceq>C Object"
"G \<turnstile> c2 \<preceq>C Object"
by (blast intro: subcls_C_Object)
with ws_prog single_valued
obtain u where
lub: "is_lub ((subcls1 G)^*) c1 c2 u"
by (blast dest: single_valued_has_lubs)
with acyclic
have "exec_lub (subcls1 G) (super G) c1 c2 = u"
by (blast intro: superI exec_lub_conv)
moreover
from lub le
have "G \<turnstile> u \<preceq>C D"
by (simp add: is_lub_def is_ub_def)
ultimately
have "G \<turnstile> exec_lub (subcls1 G) (super G) c1 c2 \<preceq>C D"
by blast
} note this [intro]
have [dest!]:
"\<And>C T. G \<turnstile> Class C \<preceq> T \<Longrightarrow> \<exists>D. T=Class D \<and> G \<turnstile> C \<preceq>C D"
by (frule widen_Class, auto)
assume "is_type G a" "is_type G b" "is_type G c"
"subtype G a c" "subtype G b c" "sup G a b = OK d"
thus ?thesis
by (auto simp add: subtype_def sup_def
split: ty.split_asm ref_ty.split_asm if_split_asm)
qed
lemma sup_exists:
"\<lbrakk> subtype G a c; subtype G b c; sup G a b = Err \<rbrakk> \<Longrightarrow> False"
by (auto simp add: PrimT_PrimT PrimT_PrimT2 sup_def subtype_def
split: ty.splits ref_ty.splits)
lemma err_semilat_JType_esl_lemma:
"\<lbrakk> ws_prog G; single_valued (subcls1 G); acyclic (subcls1 G) \<rbrakk>
\<Longrightarrow> err_semilat (esl G)"
proof -
assume ws_prog: "ws_prog G"
assume single_valued: "single_valued (subcls1 G)"
assume acyclic: "acyclic (subcls1 G)"
hence "order (subtype G)"
by (rule order_widen)
moreover
from ws_prog single_valued acyclic
have "closed (err (types G)) (lift2 (sup G))"
by (rule closed_err_types)
moreover
from ws_prog single_valued acyclic
have
"(\<forall>x\<in>err (types G). \<forall>y\<in>err (types G). x <=_(Err.le (subtype G)) x +_(lift2 (sup G)) y) \<and>
(\<forall>x\<in>err (types G). \<forall>y\<in>err (types G). y <=_(Err.le (subtype G)) x +_(lift2 (sup G)) y)"
by (auto simp add: lesub_def plussub_def Err.le_def lift2_def sup_subtype_greater split: err.split)
moreover
from ws_prog single_valued acyclic
have
"\<forall>x\<in>err (types G). \<forall>y\<in>err (types G). \<forall>z\<in>err (types G).
x <=_(Err.le (subtype G)) z \<and> y <=_(Err.le (subtype G)) z \<longrightarrow> x +_(lift2 (sup G)) y <=_(Err.le (subtype G)) z"
by (unfold lift2_def plussub_def lesub_def Err.le_def)
(auto intro: sup_subtype_smallest sup_exists split: err.split)
ultimately
show ?thesis
by (unfold esl_def semilat_def Err.sl_def) auto
qed
lemma single_valued_subcls1:
"ws_prog G \<Longrightarrow> single_valued (subcls1 G)"
by (auto simp add: ws_prog_def unique_def single_valued_def
intro: subcls1I elim!: subcls1.cases)
theorem err_semilat_JType_esl:
"ws_prog G \<Longrightarrow> err_semilat (esl G)"
by (frule acyclic_subcls1, frule single_valued_subcls1, rule err_semilat_JType_esl_lemma)
end
|
module RecordUpdateSyntax where
open import Common.Prelude
open import Common.Equality
data Param : Nat → Set where
param : ∀ n → Param (suc n)
record R : Set where
field
{i} : Nat
p : Param i
s : Nat
old : R
old = record { p = param 0; s = 1 }
-- Simple update, it should be able to infer the type and the implicit.
new : _
new = record old { p = param 1 }
new′ : R
new′ = record { i = 2; p = param 1; s = 1 }
-- Here's a needlessly complex update.
upd-p-s : _ → _ → _ → R
upd-p-s zero s r = record r { p = param zero; s = s }
upd-p-s (suc n) s r = record (upd-p-s n 0 r) { p = param n; s = s }
eq₁ : new ≡ new′
eq₁ = refl
eq₂ : upd-p-s zero 1 (record new { s = 0 }) ≡ old
eq₂ = refl
-- Check that instance arguments are handled properly
postulate
T : Nat → Set
instance
t0 : T 0
t1 : T 1
record Instance : Set where
field
n : Nat
{{t}} : T n
r0 : Instance
r0 = record { n = 0 }
r1 : Instance
r1 = record r0 { n = 1 }
check : Instance.t r1 ≡ t1
check = refl
|
#!/usr/bin/env python27
"""
Module: cse6040utils
Some utility functions created for Georgia Tech's CSE 6040: Computing for Data Analysis.
"""
#============================================================
import itertools
from IPython.display import display
import matplotlib.pyplot as plt
import seaborn as sns
#============================================================
# From the early labs...
#============================================================
def keys_geq_threshold (Dict, threshold):
"""
(Generator) Given a dictionary, yields the keys whose values
are at or above (greater than or equal to) a given threshold.
"""
for key, value in Dict.items ():
if value >= threshold:
yield key
def alpha_chars (text):
"""
(Generator) Yields each of the alphabetic characters in a string.
"""
for letter in text:
if letter.isalpha ():
yield letter
def alpha_chars_pairs (text):
"""
(Generator) Yields every one of the 4-choose-2 pairs of
'positionally distinct' alphabetic characters in a string.
That is, each position of the string is regarded as distinct,
but the pair of characters coming from positions (i, j),
where i != j, are considered the "same" as the paired
positions (j, i). Non-alphabetic characters should be
ignored.
For instance, `alpha_chars_pairs ("te3x_t")` should produce
has just 4 positionally distinct characters, so this routine
should return the 4 choose 2 == 6 pairs:
('t', 'e') <-- from positions (0, 1)
('t', 'x') <-- from positions (0, 3)
('t', 't') <-- from positions (0, 5)
('e', 'x') <-- from positions (1, 3)
('e', 't') <-- from positions (1, 5)
('x', 't') <-- from positions (3, 5)
"""
alpha_text = list (alpha_chars (text))
return itertools.combinations (alpha_text)
from collections import defaultdict
def sparse_vector (base_type=float):
return defaultdict (base_type)
def print_sparse_vector (x):
for key, value in x.items ():
print ("%s: %d" % (key, value))
def sparse_matrix (base_type=float):
"""
Returns an empty sparse matrix that can hold integer counts
of pairs of elements.
"""
return defaultdict (lambda: sparse_vector (base_type))
def print_sparse_matrix (x):
for i, row_i in x.items ():
for j, value in row_i.items ():
print ("[%s, %s]: %d" % (i, j, value))
def dense_vector (n, init_val=0.0):
"""
[Lab 14] Returns a dense vector of length `n`, with all
entries set to `init_val`.
"""
return [init_val] * n
def spmv (n, A, x):
"""
[Lab 14] Returns a dense vector y of length n, where
y = A*x.
"""
y = dense_vector (n)
for (i, A_i) in A.items ():
s = 0
for (j, a_ij) in A_i.items ():
s += a_ij * x[j]
y[i] = s
return y
import math
def vec_scale (x, alpha):
"""[Lab 14] Scales the vector x by a constant alpha."""
return [x_i*alpha for x_i in x]
def vec_add_scalar (x, c):
"""[Lab 14] Adds the scalar value c to every element of x."""
return [x_i+c for x_i in x]
def vec_sub (x, y):
"""[Lab 14] Returns x - y"""
return [x_i - y_i for (x_i, y_i) in zip (x, y)]
def vec_2norm (x):
"""[Lab 14] Returns ||x||_2"""
return math.sqrt (sum ([x_i**2 for x_i in x]))
import pandas as pd
import sys
def pandas2sqlite (df_reader, sql_writer, table_name, capitalize=False):
"""
Given a text file reader for a Pandas data frame, creates an SQLite
table. Returns the number of rows read.
"""
index_start = 0
for df in df_reader:
if capitalize:
df.columns = [x.capitalize () for x in df.columns.values]
action = 'replace' if (index_start == 1) else 'append'
df.to_sql (table_name, sql_writer, if_exists=action)
index_start += len (df)
print ("(Processed %d records.)" % index_start)
sys.stdout.flush ()
return index_start
def peek_table (db, name):
"""
[Lab 14] Given a database connection (`db`), prints both the number of
records in the table as well as its first few entries.
"""
count = '''SELECT COUNT (*) FROM {table}'''.format (table=name)
display (pandas.read_sql_query (count, db))
peek = '''SELECT * FROM {table} LIMIT 5'''.format (table=name)
display (pandas.read_sql_query (peek, db))
# [Lab 21] Floating-point utilities
# See also: https://docs.python.org/2/tutorial/floatingpoint.html
import re
from decimal import Decimal
import numpy as np
RE_FLOAT_HEX_PARTS = re.compile (r'''^(?P<sign>-)?0x[01]\.(?P<mantissa>[0-9a-f]+)p(?P<signexp>[+-])(?P<exp>\d+)''')
def float_to_bin (x):
"""Given a `float`, returns its binary form as a string."""
assert type (x) is float
s_hex = float.hex (x)
hex_parts = RE_FLOAT_HEX_PARTS.match (s_hex)
assert hex_parts
s = hex_parts.group ('sign')
m = hex_parts.group ('mantissa')
se = hex_parts.group ('signexp')
e = hex_parts.group ('exp')
# Mantissa, including sign bit
# See also: http://stackoverflow.com/questions/1425493/convert-hex-to-binary
s_bin = '['
if s:
s_bin += s
if (int (m, 16) == 0) and (int (e) == 0):
s_bin += "0"
else:
s_bin += "1"
s_bin += \
"." \
+ bin (int (m, 16))[2:].zfill (4 * len (m)) \
+ "]_{2}"
# Sign of exponent
s_bin += "e" + se
# Exponent
s_bin += e
return s_bin
# Copied here from Lab 21, for use starting in Lab 22 and beyond
def print_float_bin (x, prefix="", ret=False):
s = ("%s: %s\n%s %s" % (prefix,
Decimal (x),
' ' * len (prefix),
float_to_bin (x)))
print (s)
if ret:
return s
EPS_S = np.finfo (np.float32).eps
EPS_D = np.finfo (float).eps
# ======================================================================
# [Lab 24] Linear regression
def generate_model (d):
"""Returns a set of (random) d+1 linear model coefficients."""
return np.random.rand (d+1, 1)
def generate_data (m, x, sigma=1.0/(2**0.5)):
"""
Generates 'm' noisy observations for a linear model whose
predictor (non-intercept) coefficients are given in 'x'.
Decrease 'sigma' to decrease the amount of noise.
"""
assert (type (x) is np.ndarray) and (x.ndim == 2) and (x.shape[1] == 1)
n = len (x)
A = np.random.rand (m, n)
A[:, 0] = 1.0
b = A.dot (x) + sigma*np.random.randn (m, 1)
return (A, b)
def linreg_fit_lstsq (A, b):
"""
Solves Ax=b by a linear least squares method.
"""
result = np.linalg.lstsq (A, b)
x = result[0]
return x
def rel_diff (x, y, ord=2):
"""
Computes ||x-y|| / ||y||. Uses 2-norm by default;
override by setting 'ord'.
"""
return np.linalg.norm (x - y, ord=ord) / np.linalg.norm (y, ord=ord)
# ======================================================================
# [Lab 25] Logistic regression
import plotly.plotly as py
from plotly.graph_objs import *
def assert_points_2d (points):
"""Checks the dimensions of a given point set."""
assert type (points) is np.ndarray
assert points.ndim == 2
assert points.shape[1] == 3
def assert_labels (labels):
"""Checks the type of a given set of labels (must be integral)."""
assert labels is not None
assert (type (labels) is np.ndarray) or (type (labels) is list)
def extract_clusters (points, labels):
"""
Given a list or array of labeled augmented points, this
routine returns a pair of lists, (C[0:k], L[0:k]), where
C[i] is an array of all points whose labels are L[i].
"""
assert_points_2d (points)
assert_labels (labels)
id_label_pairs = list (enumerate (set (labels.flatten ())))
labels_map = dict ([(v, i) for (i, v) in id_label_pairs])
# Count how many points belong to each cluster
counts = [0] * len (labels_map)
for l in labels.flatten ():
counts[labels_map[l]] += 1
# Allocate space for each cluster
clusters = [np.zeros ((k, 3)) for k in counts]
# Separate the points by cluster
counts = [0] * len (labels_map)
for (x, l) in zip (points, labels.flatten ()):
l_id = labels_map[l]
k = counts[l_id]
clusters[l_id][k, :] = x
counts[l_id] += 1
# Generate cluster labels
cluster_labels = [None] * len (labels_map)
for (l, i) in labels_map.items ():
cluster_labels[i] = l
return (clusters, cluster_labels)
def make_2d_scatter_traces (points, labels=None):
"""
Given an augmented point set, possibly labeled,
returns a list Plotly-compatible marker traces.
"""
assert_points_2d (points)
traces = []
if labels is None:
traces.append (Scatter (x=points[:, 1:2], y=points[:, 2:3], mode='markers'))
else:
assert_labels (labels)
(clusters, cluster_labels) = extract_clusters (points, labels)
for (c, l) in zip (clusters, cluster_labels):
traces.append (Scatter (x=c[:, 1:2], y=c[:, 2:3],
mode='markers',
name="%s" % str (l)))
return traces
def heaviside_int (Y):
"""Evaluates the heaviside function, but returns integer values."""
return heaviside (Y).astype (dtype=int)
def assert_discriminant (theta, d=2):
"""
Verifies that the given coefficients correspond to a
d-dimensional linear discriminant ($\theta$).
"""
assert len (theta) == (d+1)
def lin_discr (X, theta):
return X.dot (theta)
def heaviside (Y):
return 1.0*(Y > 0.0)
def gen_lin_discr_labels (points, theta, fun=heaviside_int):
"""
Given a set of points and the coefficients of a linear
discriminant, this function returns a set of labels for
the points with respect to this discriminant.
"""
assert_points_2d (points)
assert_discriminant (theta)
score = lin_discr (points, theta)
labels = fun (score)
return labels
def gen_lin_discr_trace (points, theta, name='Discriminant'):
"""
Given a set of points and the coefficients of a linear
discriminant, this function returns a set of Plotly
traces that show how the points are classified as well
as the location of the discriminant boundary.
"""
assert_points_2d (points)
assert_discriminant (theta)
x1 = [min (points[:, 1]), max (points[:, 1])]
m = -theta[1] / theta[2]
b = -theta[0] / theta[2]
x2 = [(b + m*x) for x in x1]
return Scatter (x=x1, y=x2, mode='lines', name=name)
def np_row_vec (init_list):
"""Generates a Numpy-compatible row vector."""
return np.array (init_list, order='F', ndmin=2)
def np_col_vec (init_list):
"""Generates a Numpy-compatible column vector."""
return np_row_vec (init_list).T
def check_labels (points, labels, fun):
"""
Given a set of points and their labels, determines whether
a given function produces matching labels.
"""
your_labels = fun (points)
return (labels == your_labels)
def logistic (Y):
return 1.0 / (1.0 + np.exp (-Y))
#============================================================
# Adapted from: http://g.sweyla.com/blog/2012/mnist-numpy/
#============================================================
import os, struct
from array import array as pyarray
from numpy import append, array, int8, uint8, zeros
import urllib
import gzip
def download_mnist (data='training'):
"""
Downloads gzip'd MNIST image + label data files into temporary local files,
returning their filenames as a pair.
"""
assert data in ['training', 'testing']
if data == 'training':
images_url = 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz'
labels_url = 'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz'
else:
images_url = 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz'
labels_url = 'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz'
(images_fn_gz, _) = urllib.urlretrieve ('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz')
(labels_fn_gz, _) = urllib.urlretrieve ('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz')
return (images_fn_gz, labels_fn_gz)
def load_mnist (images_fn_gz, labels_fn_gz, digits=None, path=None, asbytes=False, selection=None, return_labels=True, return_indices=False):
"""
Loads MNIST files into a 3D numpy array.
You have to download the data separately from [MNIST]_. See: download_mnist()
Parameters
----------
images_fn_gz, labels_fn_gz : str
Gzip'd filenames corresponding to MNIST-formatted files containing
handwritten images and labels, respectively.
dataset : str
Either "training" or "testing", depending on which dataset you want to
load.
digits : list
Integer list of digits to load. The entire database is loaded if set to
``None``. Default is ``None``.
path : str
Path to your MNIST datafiles. The default is ``None``, which will try
to take the path from your environment variable ``MNIST``. The data can
be downloaded from http://yann.lecun.com/exdb/mnist/.
asbytes : bool
If True, returns data as ``numpy.uint8`` in [0, 255] as opposed to
``numpy.float64`` in [0.0, 1.0].
selection : slice
Using a `slice` object, specify what subset of the dataset to load. An
example is ``slice(0, 20, 2)``, which would load every other digit
until--but not including--the twentieth.
return_labels : bool
Specify whether or not labels should be returned. This is also a speed
performance if digits are not specified, since then the labels file
does not need to be read at all.
return_indicies : bool
Specify whether or not to return the MNIST indices that were fetched.
This is valuable only if digits is specified, because in that case it
can be valuable to know how far
in the database it reached.
Returns
-------
images : ndarray
Image data of shape ``(N, rows, cols)``, where ``N`` is the number of images. If neither labels nor inices are returned, then this is returned directly, and not inside a 1-sized tuple.
labels : ndarray
Array of size ``N`` describing the labels. Returned only if ``return_labels`` is `True`, which is default.
indices : ndarray
The indices in the database that were returned.
Examples
--------
This will load all images and labels from the training set:
>>> im_gz, lab_gz = download_mnist ('training') # doctest: +SKIP
>>> images, labels = load_mnist (im_gz, lab_gz) # doctest: +SKIP
Load 100 sevens from the testing set:
>>> sevens = load_mnist (im_gz, lab_gz, digits=[7], selection=slice(0, 100), return_labels=False) # doctest: +SKIP
"""
# We can skip the labels file only if digits aren't specified and labels aren't asked for
if return_labels or digits is not None:
flbl = gzip.open (labels_fn_gz, 'rb')
magic_nr, size = struct.unpack(">II", flbl.read(8))
labels_raw = pyarray("b", flbl.read())
flbl.close()
fimg = gzip.open(images_fn_gz, 'rb')
magic_nr, size, rows, cols = struct.unpack(">IIII", fimg.read(16))
images_raw = pyarray("B", fimg.read())
fimg.close()
if digits:
indices = [k for k in range(size) if labels_raw[k] in digits]
else:
indices = range(size)
if selection:
indices = indices[selection]
N = len(indices)
images = zeros((N, rows, cols), dtype=uint8)
if return_labels:
labels = zeros((N), dtype=int8)
for i, index in enumerate(indices):
images[i] = array(images_raw[ indices[i]*rows*cols : (indices[i]+1)*rows*cols ]).reshape((rows, cols))
if return_labels:
labels[i] = labels_raw[indices[i]]
if not asbytes:
images = images.astype(float)/255.0
ret = (images,)
if return_labels:
ret += (labels,)
if return_indices:
ret += (indices,)
if len(ret) == 1:
return ret[0] # Don't return a tuple of one
else:
return ret
#============================================================
# [Labs 29 & 30] PCA + SVD
#============================================================
def peek_Sigma (Sigma, ret_df=False):
k = len (Sigma)
df_Sigma = pd.DataFrame (np.arange (len (Sigma)), columns=['i'])
df_Sigma['sigma_i'] = Sigma
Sigma_sq = np.power (Sigma, 2)
Err_sq = np.sum (Sigma_sq) - np.cumsum (Sigma_sq)
Err_sq[Err_sq < 0] = 0
Err = np.sqrt (Err_sq)
Relerr = Err / (Sigma[0] + Err[0])
df_Sigma['sigma_i^2'] = Sigma_sq
df_Sigma['err_i^2'] = Err_sq
df_Sigma['err_i'] = Err
df_Sigma['relerr_i'] = Relerr
print "Singular values:"
display (df_Sigma.head ())
print " ..."
display (df_Sigma.tail ())
f, ax = plt.subplots (figsize=(7, 7))
ax.set (yscale="log")
sns.regplot ("i", "sigma_i", df_Sigma, ax=ax, fit_reg=False)
if ret_df:
return df_Sigma
def rgb2gray (rgb):
return np.dot (rgb[...,:3], [0.299, 0.587, 0.144])
def imshow_gray (im):
plt.imshow (im, interpolation='nearest', cmap=plt.get_cmap ('gray'))
#============================================================
if __name__ == "__main__":
print __doc__
#============================================================
# eof
|
{-# OPTIONS --erased-cubical --safe #-}
module Pitch where
open import Cubical.Core.Everything using (_≡_; Level; Type; Σ; _,_; fst; snd; _≃_; ~_)
open import Cubical.Foundations.Prelude using (refl; sym; _∙_; cong; transport; subst; funExt; transp)
--open import Cubical.Foundations.Function using (_∘_)
open import Cubical.Foundations.Univalence using (ua)
open import Cubical.Foundations.Isomorphism using (iso; Iso; isoToPath; section; retract; isoToEquiv)
open import Data.Bool using (Bool; false; true)
open import Data.Integer using (ℤ; +_; -[1+_])
open import Data.Fin using (Fin; toℕ; #_; _≟_; fromℕ<) renaming (zero to fz; suc to fs)
open import Data.List using (List; []; _∷_; foldr; map)
open import Data.Maybe using (Maybe; just; nothing) renaming (map to mmap)
open import Data.Nat using (ℕ; zero; suc; _+_; _*_; _∸_; _≡ᵇ_; _>_)
open import Data.Nat.DivMod using (_mod_; _div_)
open import Data.Product using (_×_; _,_; proj₁)
open import Data.String using (String; intersperse) renaming (_++_ to _++s_)
open import Data.Vec using (Vec; []; _∷_; lookup; replicate; _[_]%=_; toList) renaming (map to vmap)
open import Relation.Nullary using (yes; no)
open import BitVec using (BitVec; insert; empty; show)
open import Util using (n∸k<n; _+N_; opposite; _∘_)
-- Position of a pitch on an absolute scale
-- 0 is C(-1) on the international scale (where C4 is middle C)
-- or C0 on the Midi scale (where C5 is middle C)
-- Pitch is intentially set to match Midi pitch.
-- However it is fine to let 0 represent some other note and
-- translate appropriately at the end.
Pitch : Type
Pitch = ℕ
-- Number of notes in the chromatic scale.
s12 : ℕ
s12 = 12
-- Number of notes in the diatonic scale.
s7 : ℕ
s7 = 7
-- Pitch Class: position of a pitch within an octave, in the range [0..s12-1].
-- Pitch class 0 corresponds to C (assuming pitch 0 is Midi C0), which is standard.
PC : Type
PC = Fin s12
showPC : PC → String
showPC fz = "0"
showPC (fs fz) = "1"
showPC (fs (fs fz)) = "2"
showPC (fs (fs (fs fz))) = "3"
showPC (fs (fs (fs (fs fz)))) = "4"
showPC (fs (fs (fs (fs (fs fz))))) = "5"
showPC (fs (fs (fs (fs (fs (fs fz)))))) = "6"
showPC (fs (fs (fs (fs (fs (fs (fs fz))))))) = "7"
showPC (fs (fs (fs (fs (fs (fs (fs (fs fz)))))))) = "8"
showPC (fs (fs (fs (fs (fs (fs (fs (fs (fs fz))))))))) = "9"
showPC (fs (fs (fs (fs (fs (fs (fs (fs (fs (fs fz)))))))))) = "a"
showPC (fs (fs (fs (fs (fs (fs (fs (fs (fs (fs (fs fz))))))))))) = "b"
showPCs : List PC → String
showPCs pcs = intersperse " " (map showPC pcs)
toPC : Pitch → PC
toPC = _mod s12
Scale : ℕ → Type
Scale = Vec PC
-- Which octave one is in.
Octave : Type
Octave = ℕ
PitchOctave : Type
PitchOctave = PC × Octave
relativeToAbsolute : PitchOctave → Pitch
relativeToAbsolute (n , o) = (o * s12 + (toℕ n))
absoluteToRelative : Pitch → PitchOctave
absoluteToRelative n = (toPC n , n div s12)
pitchToClass : Pitch → PC
pitchToClass = proj₁ ∘ absoluteToRelative
majorScale harmonicMinorScale : Scale s7
majorScale = # 0 ∷ # 2 ∷ # 4 ∷ # 5 ∷ # 7 ∷ # 9 ∷ # 11 ∷ []
harmonicMinorScale = # 0 ∷ # 2 ∷ # 3 ∷ # 5 ∷ # 7 ∷ # 8 ∷ # 11 ∷ []
wholeTone0Scale wholeTone1Scale : Scale 6
wholeTone0Scale = # 0 ∷ # 2 ∷ # 4 ∷ # 6 ∷ # 8 ∷ # 10 ∷ []
wholeTone1Scale = # 1 ∷ # 3 ∷ # 5 ∷ # 7 ∷ # 9 ∷ # 11 ∷ []
octatonic01Scale octatonic02Scale octatonic12Scale : Scale 8
octatonic01Scale = # 0 ∷ # 1 ∷ # 3 ∷ # 4 ∷ # 6 ∷ # 7 ∷ # 9 ∷ # 10 ∷ []
octatonic02Scale = # 0 ∷ # 2 ∷ # 3 ∷ # 5 ∷ # 6 ∷ # 8 ∷ # 9 ∷ # 11 ∷ []
octatonic12Scale = # 1 ∷ # 2 ∷ # 4 ∷ # 5 ∷ # 7 ∷ # 8 ∷ # 10 ∷ # 11 ∷ []
majorPentatonicScale minorPentatonicScale ryukyuScale : Scale 5
majorPentatonicScale = # 0 ∷ # 2 ∷ # 4 ∷ # 7 ∷ # 9 ∷ []
minorPentatonicScale = # 0 ∷ # 2 ∷ # 4 ∷ # 7 ∷ # 9 ∷ []
ryukyuScale = # 0 ∷ # 4 ∷ # 5 ∷ # 7 ∷ # 11 ∷ []
indexInScale : {n : ℕ} → Vec PC n → Fin s12 → Maybe (Fin n)
indexInScale [] p = nothing
indexInScale (pc ∷ pcs) p with pc ≟ p
... | yes _ = just fz
... | no _ = mmap fs (indexInScale pcs p)
scaleSize : {n : ℕ} → Scale n → ℕ
scaleSize {n} _ = n
transposePitch : ℤ → Pitch → Pitch
transposePitch (+_ k) n = n + k
transposePitch (-[1+_] k) n = n ∸ suc k
-- transpose pitch class
Tp : ℕ → PC → PC
Tp n pc = pc +N n
-- invert pitch class
Ip : PC → PC
Ip = opposite
-- Set of pitch classes represented as a bit vector.
PCSet : Type
PCSet = BitVec s12
toPCSet : List PC → PCSet
toPCSet = foldr insert empty
fromPCSet : PCSet → List PC
fromPCSet pcs = fromPCS s12 pcs
where fromPCS : (n : ℕ) → BitVec n → List PC
fromPCS zero [] = []
fromPCS (suc n) (false ∷ xs) = fromPCS n xs
fromPCS (suc n) (true ∷ xs) = fromℕ< (n∸k<n 11 n) ∷ fromPCS n xs
-- transpose pitch class set
T : ℕ → PCSet → PCSet
T n = toPCSet ∘ map (Tp n) ∘ fromPCSet
-- invert pitch class set
I : PCSet → PCSet
I = toPCSet ∘ map Ip ∘ fromPCSet
-- Standard Midi pitches
-- first argument is relative pitch within octave
-- second argument is octave (C5 = middle C for Midi)
standardMidiPitch : Fin s12 → ℕ → Pitch
standardMidiPitch p o = relativeToAbsolute (p , o)
c c♯ d d♯ e f f♯ g g♯ a b♭ b : ℕ → Pitch
c = standardMidiPitch (# 0)
c♯ = standardMidiPitch (# 1)
d = standardMidiPitch (# 2)
d♯ = standardMidiPitch (# 3)
e = standardMidiPitch (# 4)
f = standardMidiPitch (# 5)
f♯ = standardMidiPitch (# 6)
g = standardMidiPitch (# 7)
g♯ = standardMidiPitch (# 8)
a = standardMidiPitch (# 9)
b♭ = standardMidiPitch (# 10)
b = standardMidiPitch (# 11)
-- Equivalences
rel→abs : PitchOctave → Pitch
rel→abs = relativeToAbsolute
abs→rel : Pitch → PitchOctave
abs→rel = absoluteToRelative
{-
rel→abs→rel : (p : PitchOctave) → (abs→rel ∘ rel→abs) p ≡ p
rel→abs→rel (pitchClass p , octave o) i =
let a = cong pitchClass (modUnique s12 o p)
b = cong octave (divUnique s12 o p)
in a i , b i
abs→rel→abs : (p : Pitch) → (rel→abs ∘ abs→rel) p ≡ p
abs→rel→abs (pitch p) = cong pitch (sym (n≡divmod p s12))
abs≃rel : Iso Pitch PitchOctave
abs≃rel = iso abs→rel rel→abs rel→abs→rel abs→rel→abs
abs≡rel : Pitch ≡ PitchOctave
abs≡rel = isoToPath abs≃rel
-}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.