content
stringlengths 10
4.9M
|
---|
/**
* Verify that injected ME instances injected by the container can be shutdown
*/
public void shutdownContainerInstance() throws Exception {
throwAway1.shutdown();
assertTrue(throwAway1.awaitTermination(MAX_WAIT_SEC, TimeUnit.SECONDS));
assertTrue(throwAway1.isShutdown());
throwAway2.shutdownNow();
assertTrue(throwAway2.awaitTermination(MAX_WAIT_SEC, TimeUnit.SECONDS));
assertTrue(throwAway1.isShutdown());
} |
// Package schema provides helpers for openapi3 schema
package schema
import (
"encoding/json"
"fmt"
"github.com/getkin/kin-openapi/openapi3"
)
// Validator describes openapi3 schema validator.
type Validator struct {
swagger *openapi3.Swagger
}
// Swagger is alias for openapi3.Swagger.
type Swagger = openapi3.Swagger
// NewValidatorFromSwagger returns a new Validator from Swagger.
func NewValidatorFromSwagger(s *Swagger) *Validator {
return &Validator{swagger: s}
}
// NewValidatorFromPath returns a new Validator from spec path.
func NewValidatorFromPath(path string) (*Validator, error) {
schema, err := openapi3.NewSwaggerLoader().LoadSwaggerFromFile(path)
if err != nil {
return nil, err
}
return NewValidatorFromSwagger(schema), nil
}
// Validate validates value by schema name.
func (v *Validator) Validate(schemaName string, value interface{}) error {
vv, err := toI(value)
if err != nil {
return fmt.Errorf("value is not a valid json: %w", err)
}
schema, ok := v.swagger.Components.Schemas[schemaName]
if !ok {
return fmt.Errorf("schema %s does not exists", schemaName)
}
if schema.Value == nil {
return fmt.Errorf("schema %s does not have Value", schemaName)
}
if err := schema.Value.VisitJSON(vv); err != nil {
return fmt.Errorf("validation error: %w", err)
}
return nil
}
func toI(value interface{}) (interface{}, error) {
var vv interface{}
b, err := json.Marshal(value)
if err != nil {
return nil, err
}
if err := json.Unmarshal(b, &vv); err != nil {
return nil, err
}
return vv, nil
}
|
<gh_stars>0
package org.i3xx.util.rna.core;
public interface ITimeSliceIterator {
/**
* Liefert den heute g�ltigen TimeSlice zur�ck
* Der Iterator wird auf diesem Element positioniert
* @return TimeSlice der heute g�ltig ist
* wenn keiner gefunden wurde null
*/
IBrick getActualTimeSlice();
/**
* Liefert den TimeSlice der zu dem �bergebenen Datumsstring g�ltig ist
* Der Iterator wird auf diesem Element positioniert
* @param date Datum f�r G�ltigkeitspr�fung
* @return TimeSlice der zum �bergebenen Datum g�ltig ist
* wenn keiner gefunden wurde null;
*/
IBrick getTimeSlice(String date);
/**
* Liefert die fr�heste TimeSlice
* Der Iterator wird auf diesem Element positioniert
* @return Fr�heste TimeSlice
*/
IBrick getEarliestTimeSlice();
/**
* Liefert die sp�testeste TimeSlice
* Der Iterator wird auf diesem Element positioniert
* @return sp�teste TimeSlice
*/
IBrick getLatestTimeSlice();
/**
* Pr�ft ob es eine weitere, sp�tere Zeitscheibe gibt
* @return True, wenn es noch eine sp�tere Zeitscheibe gibt
* False, sonst
*/
boolean hasNextTimeSlice();
/**
* Liefert die n�chste, sp�tere Zeitscheibe.
* Wenn es keine weitere mehr gibt, wird null zur�ckgegeben
* Der Iterator wird auf dem Ergebniselement positioniert
* @return n�chste, sp�tere Zeitscheibe
*/
IBrick nextTimeSlice();
/**
* Pr�ft, ob es eine fr�here Zeitscheibe gibt
* @return True, wenn es eine fr�here Zeitscheibe gibt
* False, sonst
*/
boolean hasPrevTimeSlice();
/**
* Liefert die n�chste, fr�here Zeitscheibe.
* Wenn es keine weitere mehr gibt, wird null zur�ckgegeben
* Der Iterator wird auf dem Ergebniselement positioniert
* @return n�chste, fr�here Zeitscheibe
*/
IBrick prevTimeSlice();
/**
* Pr�ft, ob es zu der aktuellen Zeitscheibe eine gleiche Zeitscheibe gibt, die
* sp�ter gilt
* @return True, wenn es zur der aktuellen Zeitscheibe, eine gleiche Zeitscheibe
* gibt die sp�ter gilt
* False, sonst
*/
boolean hasNextChange();
/**
* Liefert zu der aktuellen Zeitscheibe eine gleiche Zeitscheibe, die sp�ter gilt
* @return
*/
IBrick nextChange();
/**
* Pr�ft, ob es zu der aktuellen Zeitscheibe eine gleiche Zeitscheibe gibt, die
* fr�her gilt
* @return True, wenn es zur der aktuellen Zeitscheibe, eine gleiche Zeitscheibe
* gibt die fr�her gilt
* False, sonst
*/
boolean hasPrevChange();
/**
* Liefert zu der aktuellen Zeitscheibe eine gleiche Zeitscheibe, die fr�her gilt
* @return
*/
IBrick prevChange();
}
|
Designing public spaces for democratic stories
We argue that civic discourse can also be public storytelling and propose three reasons to consider this relationship: stories' relational nature - their ability to represent uniquely human perspectives and emotions - may ameliorate aspects of citizens' disinterest in civic life; the ability of stories to represent both individual perspectives and cultural norms may offer a form of public opinion that is relevant on both personal and collective scales; and the inherent transparency of familiar narrative forms may offer new ways to explicate unfamiliar aspects civic discourse. We propose a relationship between civic discourse and public storytelling and review one system called TexTales in relation to a developing model of "democratic stories. |
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package com.sisgeproinv.modelos;
/**
*
* @author <NAME>
*/
public class Proveedor {
private int IDPROVEEDOR;
private String PROVEEDOR;
private String CONTACTO;
private String TELF;
private String EMAIL;
public Proveedor(int IDPROVEEDOR, String PROVEEDOR, String CONTACTO, String TELF, String EMAIL) {
this.IDPROVEEDOR = IDPROVEEDOR;
this.PROVEEDOR = PROVEEDOR;
this.CONTACTO = CONTACTO;
this.TELF = TELF;
this.EMAIL = EMAIL;
}
public int getIDPROVEEDOR() {
return IDPROVEEDOR;
}
public void setIDPROVEEDOR(int IDPROVEEDOR) {
this.IDPROVEEDOR = IDPROVEEDOR;
}
public String getPROVEEDOR() {
return PROVEEDOR;
}
public void setPROVEEDOR(String PROVEEDOR) {
this.PROVEEDOR = PROVEEDOR;
}
public String getCONTACTO() {
return CONTACTO;
}
public void setCONTACTO(String CONTACTO) {
this.CONTACTO = CONTACTO;
}
public String getTELF() {
return TELF;
}
public void setTELF(String TELF) {
this.TELF = TELF;
}
public String getEMAIL() {
return EMAIL;
}
public void setEMAIL(String EMAIL) {
this.EMAIL = EMAIL;
}
}
|
#include "soundManager.h"
SoundManager::SoundManager() {
// not needed ?
}
SoundManager::~SoundManager() {
for (list<soundListing>::iterator sl = sounds.begin(); sl != sounds.end(); sl++) {
Mix_FreeChunk(sl->sound);
}
}
void SoundManager::loadSound(string name, string file) {
soundListing listing;
listing.name = name;
listing.sound = Mix_LoadWAV(file.c_str());
sounds.push_back(listing);
}
void SoundManager::playSound(string name) {
for (list<soundListing>::iterator sl = sounds.begin(); sl != sounds.end(); sl++) {
if (sl->name == name) {
Mix_PlayChannel(-1, sl->sound, 0);
break;
}
}
}
SoundManager SoundManager::soundManager; |
<filename>logger.go<gh_stars>0
// Package logger provides a single logging facility/interface that can be used with
// Appengine-contained code (e.g. GCP Standard Enviromnment for Go) or inside 'normal' Go runtime
// environments (GCP Flexible and most others).
//
// This package was written specifically to enable same-logging-package usage in both types of environments,
// and to facilitate porting code from the Standard Environment to the Flexible Environment.
//
// Appengine logging differs in two key ways from plain-jane go log:
//
// 1) It uses level-tied methods for its logging, e.g.:
// - Debugf
// - Infof
// - Warningf
// - Errorf
//
// 2) Appengine logging always requires a context as the first argument. The GCP logging infrastructure uses
// the context to tie a log line to a particular request, and also does not accepts any logs that are not
// tied to a particular request via a context. (You can see comtext-less logs in development, but not in GCP
// infrastructure)
//
// This logger uses the appengine-style interface, but let's you use it with appengine-hosted code or not.
// It uses the 'appengine' build tag to conditionally build the right logger for the environment.
package logger
import (
"fmt"
"io"
"log"
"os"
"strings"
"github.com/efixler/config"
)
const LogPrefixConfigKey = "LOG_PREFIX"
const LogLevelConfKey = "LOG_LEVEL"
type LogLevel int
const (
Debug LogLevel = iota
Info
Warning
Error
)
var (
Request *RequestLogger
Context *ContextLogger
Std *StdLogger
// NB: Package-wide log-level setting is ignored when using appengine logging providers
Level LogLevel
headers = [4]string{"DEBUG: ","INFO: ","WARNING: ","ERROR: "}
)
type StdLogger struct {
dlog *log.Logger
elog *log.Logger
}
func newStdLogger() *StdLogger {
prefix := config.Default().GetOrDefault(LogPrefixConfigKey, "")
if len(prefix) > 0 {
prefix = prefix + " "
}
d := log.New(os.Stderr, prefix, log.Ldate|log.Ltime|log.LUTC)
e := log.New(os.Stderr, prefix, log.Ldate|log.Ltime|log.LUTC|log.Lshortfile)
sl := &StdLogger{ dlog: d, elog: e }
return sl
}
func (l *StdLogger) Debugf(format string, args ...interface{}) {
l.output(Debug, 1, format, args...)
}
func (l *StdLogger) Infof(format string, args ...interface{}) {
l.output(Info, 1, format, args...)
}
func (l *StdLogger) Warningf(format string, args ...interface{}) {
l.output(Warning, 1, format, args...)
}
func (l *StdLogger) Errorf(format string, args ...interface{}) {
l.output(Error, 1, format, args...)
}
func (l *StdLogger) SetOutput(w io.Writer) {
l.dlog.SetOutput(w)
l.elog.SetOutput(w)
}
func (l *StdLogger) setFlags(flag int) {
l.dlog.SetFlags(flag)
l.elog.SetFlags(flag)
}
// This function is provided so in-package callers can adjust the stack backcount
func (l *StdLogger) output(level LogLevel, stackAdjust int, format string, args ...interface{}) {
if level < Level {
return
}
ll := l.dlog
switch level {
case Error: fallthrough
case Warning: ll = l.elog
}
if int(level) < len(headers) && int(level) >= 0 {
format = headers[int(level)] + format
}
ll.Output(stackAdjust + 2, fmt.Sprintf(format, args...))
}
func levelStringToLogLevel(ls string) LogLevel {
ls = strings.ToUpper(strings.TrimSpace(ls))
switch ls {
case "ERROR": return Error
case "WARNING": return Warning
case "INFO": return Info
case "DEBUG": fallthrough
default: return Debug
}
}
func init() {
Level = levelStringToLogLevel(config.Default().GetOrDefault(LogLevelConfKey, "DEBUG"))
Std = newStdLogger()
}
|
Cutaneous manifestations associated with malignancy of the head and neck.
Most cutaneous malignancies of the head and neck (HN) are non-melanoma skin cancers, predominantly basal cell carcinomas (BCCs) and squamous cell carcinomas (SCCs). Less common entities include Merkel cell carcinoma (MCC), sebaceous carcinoma (SC), and angiosarcoma. Treatment is based on histology subtype, stage, and extent of involvement. Surgery is the primary means of treatment and includes wide local excision, Mohs micrographic surgery, sentinel lymph node biopsy, and cervical lymphadenectomy. Multidisciplinary management including radiation and targeted chemotherapy are critical adjuncts to surgery. Surgical planning must balance oncologic, functional, and cosmetic considerations. This review addresses cutaneous manifestations of primary malignancies of the HN and dermatologic complications of small molecule inhibitors used for targeted therapy. A working knowledge of both the cutaneous malignancies (CM) in the head and neck as well as the secondary dermatologic manifestations is relevant to multiple disciplines including dermatology, medical oncology, radiation oncology, and surgical oncology. |
/**
* Represents a Real16 number
*/
private class Real16 extends AbstractNumericOther {
private Real16(PdbByteReader reader) throws PdbException {
super(reader, 2);
}
} |
/**
* Copyright 2019-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <pybind11/operators.h>
#include <stack>
#include "kernel/oplib/oplib.h"
#include "pipeline/jit/pipeline.h"
#include "frontend/operator/composite/composite.h"
#include "pipeline/pynative/pynative_execute.h"
#include "utils/symbolic.h"
#include "include/common/pybind_api/api_register.h"
#include "include/common/utils/python_adapter.h"
#ifndef ENABLE_SECURITY
#include "include/common/utils/summary/event_writer.h"
#endif
#include "include/common/utils/config_manager.h"
#include "include/common/utils/mpi/mpi_config.h"
#include "utils/ms_utils.h"
#include "include/common/utils/parallel_context.h"
#include "frontend/parallel/costmodel_context.h"
#include "frontend/optimizer/ad/dfunctor.h"
#ifdef ENABLE_GPU_COLLECTIVE
#include "plugin/device/gpu/hal/device/distribution/collective_init.h"
#else
#include "plugin/device/gpu/hal/device/distribution/collective_fake_init.h"
#endif
#if ((defined ENABLE_CPU) && (!defined _WIN32))
#include "ps/util.h"
#endif
#include "ps/ps_context.h"
#include "distributed/init.h"
#include "distributed/recovery/recovery_context.h"
#include "distributed/collective/collective_manager.h"
#include "pybind_api/gil_scoped_long_running.h"
namespace py = pybind11;
using GraphExecutorPy = mindspore::pipeline::GraphExecutorPy;
using Pipeline = mindspore::pipeline::Pipeline;
using PrimitivePy = mindspore::PrimitivePy;
using MetaFuncGraph = mindspore::MetaFuncGraph;
#ifndef ENABLE_SECURITY
using EventWriter = mindspore::summary::EventWriter;
#endif // ENABLE_SECURITY
using OpLib = mindspore::kernel::OpLib;
using ParallelContext = mindspore::parallel::ParallelContext;
using CostModelContext = mindspore::parallel::CostModelContext;
using mindspore::MsCtxParam;
using PSContext = mindspore::ps::PSContext;
using CollectiveManager = mindspore::distributed::collective::CollectiveManager;
using RecoveryContext = mindspore::distributed::recovery::RecoveryContext;
// Interface with python
PYBIND11_MODULE(_c_expression, m) {
// The OMP_NUM_THREADS has no effect when set in backend, so set it here in advance.
mindspore::common::SetOMPThreadNum();
m.doc() = "MindSpore c plugin";
auto fns = mindspore::PybindDefineRegister::AllFuncs();
auto inheritance_map = mindspore::PybindDefineRegister::GetInheritanceMap();
std::set<std::string> has_inited = {""};
auto get_inherit_stack = [&inheritance_map](const string &class_name) -> std::vector<std::string> {
std::vector<string> parent_names;
for (auto parent_name = inheritance_map.find(class_name); parent_name != inheritance_map.end();
parent_name = inheritance_map.find(parent_name->second)) {
parent_names.emplace_back(parent_name->second);
}
return parent_names;
};
for (auto &item : fns) {
if (has_inited.find(item.first) != has_inited.end()) {
continue;
}
auto parent_names = get_inherit_stack(item.first);
// Init parent class
std::for_each(parent_names.rbegin(), parent_names.rend(), [&fns, &has_inited, &m](const std::string &parent_name) {
if (has_inited.find(parent_name) == has_inited.end()) {
fns[parent_name](&m);
has_inited.emplace(parent_name);
}
});
// Init current class
item.second(&m);
has_inited.emplace(item.first);
}
mindspore::ScopedLongRunning::SetHook(std::make_unique<mindspore::GilScopedLongRunningHook>());
// Class Pipeline interface
(void)py::class_<GraphExecutorPy, std::shared_ptr<GraphExecutorPy>>(m, "GraphExecutor_")
.def_static("get_instance", &GraphExecutorPy::GetInstance, "Executor get_instance.")
.def("__call__", &GraphExecutorPy::Run, py::arg("args"), py::arg("phase") = py::str(""), "Executor run function.")
.def("del_net_res", &GraphExecutorPy::DelNetRes, py::arg("network_id") = py::set(), "Delete network resource.")
.def("get_func_graph", &GraphExecutorPy::GetFuncGraph, py::arg("phase") = py::str(""), "Get graph pointer.")
.def("get_func_graph_proto", &GraphExecutorPy::GetFuncGraphProto, py::arg("phase") = py::str(""),
py::arg("type") = py::str("onnx_ir"), "Get graph proto string by specifying ir type.")
.def("compile", &GraphExecutorPy::Compile, py::arg("obj"), py::arg("args"), py::arg("phase") = py::str(""),
py::arg("use_vm") = py::bool_(false), "Compile obj by executor.")
.def("updata_param_node_default_input", &GraphExecutorPy::UpdataParamNodeDefaultInput, py::arg("phase"),
py::arg("params"), "Fetch the inputs of Conv or Matmul for quant export.")
.def("get_parameter_layout", &GraphExecutorPy::GetParameterLayout, py::arg("phase") = py::str("train"),
"Get Parameter Tensor Layout Dictionary.")
.def("get_parallel_graph_info", &GraphExecutorPy::GetParallelGraphInfo, py::arg("phase") = py::str("train"),
"Get graph info in step_parallel stage.")
.def("get_parallel_parameter_name_list", &GraphExecutorPy::GetParallelParameterNameList,
py::arg("phase") = py::str("train"), "Get Parallel Parameter Name List.")
.def("get_strategy", &GraphExecutorPy::GetCNodeStrategy, py::arg("phase") = py::str("train"),
"Get CNode Strategy Dictionary.")
.def("get_num_parallel_ops", &GraphExecutorPy::GetNumOpsInfo, py::arg("phase") = py::str("train"),
"Get the number of parallel operators.")
.def("get_allreduce_fusion", &GraphExecutorPy::GetAllreduceFusion, py::arg("phase") = py::str("train"),
"Get Allreduce Fusion Dictionary.")
.def("fetch_info_for_quant_export", &GraphExecutorPy::FetchInfoForQuantExport, py::arg("phase") = py::str("train"),
"Fetch the inputs of Conv or Matmul for quant export.")
.def("build_data_graph", &GraphExecutorPy::BuildGraph, py::arg("build_params"), py::arg("phase") = py::str("train"),
py::arg("broadcast_params") = py::dict(), "Build data graph.")
.def("has_compiled", &GraphExecutorPy::HasCompiled, py::arg("phase") = py::str(""), "Get if cell compiled.")
.def("run_init_graph", &GraphExecutorPy::RunInitGraph, "Run init Graph.")
.def("set_py_exe_path", &GraphExecutorPy::PyExePath, py::arg("py_exe_path") = py::str(""),
"Set python executable path.")
.def("set_kernel_build_server_dir", &GraphExecutorPy::KernelBuildServerDir,
py::arg("kernel_build_server_dir") = py::str(""), "Set kernel build server directory path.")
.def("set_queue_name", &GraphExecutorPy::set_queue_name, py::arg("queue_name") = py::str(""),
"Set queue name for the graph loaded from compile cache.")
.def("set_enable_tuple_broaden", &GraphExecutorPy::set_enable_tuple_broaden,
py::arg("enable_tuple_broaden") = py::bool_(false), "Set tuple broaden enable.")
.def("set_compile_cache_dep_files", &GraphExecutorPy::set_compile_cache_dep_files,
py::arg("compile_cache_dep_files") = py::list(), "Set the compilation cache dependent files.")
.def("set_weights_values", &GraphExecutorPy::set_weights_values, py::arg("weights") = py::dict(),
"Set values of weights.")
.def("get_optimize_graph_proto", &GraphExecutorPy::GetOptimizeGraphProto, py::arg("phase") = py::str(""),
"Get the optimize graph proto string.")
.def("set_jit_config", &GraphExecutorPy::SetJitConfig, py::arg("jit_config") = py::dict(), "Set the jit config.")
.def("generate_arguments_key", &GraphExecutorPy::GenerateArgumentsKey, "Generate unique key of argument.");
(void)m.def("real_run_op", &mindspore::pynative::RealRunOp, "Run op pynatively.");
(void)m.def("reset_op_id", &mindspore::pipeline::ResetOpId, "Reset Operator Id");
(void)m.def("init_hccl", &mindspore::pipeline::InitHccl, "Init Hccl");
(void)m.def("finalize_hccl", &mindspore::pipeline::FinalizeHccl, "Finalize Hccl");
(void)m.def("get_hccl_rank_id", &mindspore::pipeline::GetHcclRankId, "Get Hccl Rank Id");
(void)m.def("get_hccl_rank_size", &mindspore::pipeline::GetHcclRankSize, "Get Hccl Rank Size");
(void)m.def("verify_inputs_signature", &mindspore::pipeline::VerifyInputSignature, "Verify input signature.");
(void)m.def("init_exec_dataset", &mindspore::pipeline::InitExecDataset, py::arg("queue_name"), py::arg("size"),
py::arg("batch_size"), py::arg("types"), py::arg("shapes"), py::arg("input_indexs"),
py::arg("phase") = py::str("dataset"), py::arg("need_run") = py::bool_(true), "Init and exec dataset.");
(void)m.def("_set_dataset_mode_config", &mindspore::ConfigManager::SetDatasetModeConfig, "API for set dataset mode.");
(void)m.def("init_pipeline", &mindspore::pipeline::InitPipeline, "Init Pipeline.");
(void)m.def("export_graph", &mindspore::pipeline::ExportGraph, "Export Graph.");
(void)m.def("load_mindir", &mindspore::pipeline::LoadMindIR, py::arg("file_name"), py::arg("dec_key") = nullptr,
py::arg("key_len") = py::int_(0), py::arg("dec_mode") = py::str("AES-GCM"), "Load model as Graph.");
(void)m.def("init_cluster", &mindspore::distributed::Initialize, "Init Cluster");
(void)py::class_<mindspore::MpiConfig, std::shared_ptr<mindspore::MpiConfig>>(m, "MpiConfig")
.def_static("get_instance", &mindspore::MpiConfig::GetInstance, "Get mpi config instance.")
.def("get_enable_mpi", &mindspore::MpiConfig::enable_mpi, "Get whether enable mpi.")
.def("set_enable_mpi", &mindspore::MpiConfig::set_enable_mpi, "Set whether to enable mpi.");
(void)py::class_<ParallelContext, std::shared_ptr<ParallelContext>>(m, "AutoParallelContext")
.def_static("get_instance", &ParallelContext::GetInstance, "Get auto parallel context instance.")
.def("get_device_num", &ParallelContext::device_num, "Get device num.")
.def("set_hccl_test_avaible", &ParallelContext::set_hccl_test_available, "Set hccl test available.")
.def("set_device_num", &ParallelContext::set_device_num, "Set device num.")
.def("get_device_num_is_set", &ParallelContext::device_num_is_set, "Get device num is set.")
.def("set_fusion_threshold_mb", &ParallelContext::set_fusion_threshold_mb, "Set fusion threshold.")
.def("set_allgather_fusion_threshold_mb", &ParallelContext::set_allgather_fusion_threshold_mb,
"Set allgather fusion threshold.")
.def("set_reducescatter_fusion_threshold_mb", &ParallelContext::set_reducescatter_fusion_threshold_mb,
"Set reducescatter fusion threshold.")
.def("fusion_threshold_mb", &ParallelContext::fusion_threshold_mb, "Get allreduce fusion threshold.")
.def("allgather_fusion_threshold_mb", &ParallelContext::allgather_fusion_threshold_mb,
"Get allgather fusion threshold.")
.def("reducescatter_fusion_threshold_mb", &ParallelContext::reducescatter_fusion_threshold_mb,
"Get reduce_scatter fusion threshold.")
.def("set_fusion_mode", &ParallelContext::set_fusion_mode, "Get fusion mode.")
.def("get_fusion_mode", &ParallelContext::get_fusion_mode, "Get fusion mode.")
.def("get_global_rank", &ParallelContext::global_rank, "Get global rank.")
.def("set_global_rank", &ParallelContext::set_global_rank, "Set global rank.")
.def("get_grad_accumulation_shard", &ParallelContext::grad_accumulation_shard, "Get grad_accumulation_shard.")
.def("set_grad_accumulation_shard", &ParallelContext::set_grad_accumulation_shard, "Set grad_accumulation_shard.")
.def("get_parallel_optimizer_threshold", &ParallelContext::get_parallel_optimizer_threshold, "Get opt threshold.")
.def("set_parallel_optimizer_threshold", &ParallelContext::set_parallel_optimizer_threshold, "Set opt threshold.")
.def("get_global_rank_is_set", &ParallelContext::global_rank_is_set, "Get global rank is set.")
.def("get_gradients_mean", &ParallelContext::gradients_mean, "Get mirror mean.")
.def("set_gradients_mean", &ParallelContext::set_gradients_mean, "Set mirror mean.")
.def("get_gradient_fp32_sync", &ParallelContext::gradient_fp32_sync, "Get cast before mirror.")
.def("set_gradient_fp32_sync", &ParallelContext::set_gradient_fp32_sync, "Set cast before mirror.")
.def("get_loss_repeated_mean", &ParallelContext::loss_repeated_mean, "Get loss repeated mean.")
.def("set_loss_repeated_mean", &ParallelContext::set_loss_repeated_mean, "Set loss repeated mean.")
.def("get_parallel_mode", &ParallelContext::parallel_mode, "Get parallel mode.")
.def("set_parallel_mode", &ParallelContext::set_parallel_mode, "Set parallel mode.")
.def("get_grad_accumulation_step", &ParallelContext::grad_accumulation_step, "Get grad accumulation step.")
.def("set_grad_accumulation_step", &ParallelContext::set_grad_accumulation_step, "Set grad accumulation step.")
.def("get_strategy_search_mode", &ParallelContext::strategy_search_mode, "Get strategy search mode.")
.def("set_strategy_search_mode", &ParallelContext::set_strategy_search_mode, "Set strategy search mode.")
.def("set_all_reduce_fusion_split_indices", &ParallelContext::SetAllReduceFusionSplitIndices,
"Set all reduce fusion split indices.")
.def("get_all_reduce_fusion_split_indices", &ParallelContext::GetAllReduceFusionSplitIndices,
"Get all reduce fusion split indices.")
.def("set_all_reduce_fusion_split_sizes", &ParallelContext::SetAllReduceFusionSplitSizes,
"Set all reduce fusion split sizes.")
.def("get_all_reduce_fusion_split_sizes", &ParallelContext::GetAllReduceFusionSplitSizes,
"Get all reduce fusion split sizes.")
.def("set_enable_all_reduce_fusion", &ParallelContext::set_enable_all_reduce_fusion,
"Set enable/disable all reduce fusion.")
.def("get_enable_all_reduce_fusion", &ParallelContext::enable_all_reduce_fusion,
"Get enable/disable all reduce fusion.")
.def("set_enable_all_gather_fusion", &ParallelContext::set_enable_all_gather_fusion,
"Set enable/disable all gather fusion.")
.def("get_enable_all_gather_fusion", &ParallelContext::enable_all_gather_fusion,
"Get enable/disable all gather fusion.")
.def("set_enable_reduce_scatter_fusion", &ParallelContext::set_enable_reduce_scatter_fusion,
"Set enable/disable reduce scatter fusion.")
.def("get_enable_reduce_scatter_fusion", &ParallelContext::enable_reduce_scatter_fusion,
"Get enable/disable reduce scatter fusion.")
.def("get_parameter_broadcast", &ParallelContext::parameter_broadcast, "Get parameter broadcast.")
.def("get_parameter_broadcast_is_set", &ParallelContext::parameter_broadcast_is_set,
"Get parameter broadcast is set.")
.def("set_parameter_broadcast", &ParallelContext::set_parameter_broadcast, "Set parameter broadcast.")
.def("set_strategy_ckpt_load_file", &ParallelContext::set_strategy_ckpt_load_file,
"Set strategy checkpoint load file.")
.def("set_strategy_ckpt_save_file", &ParallelContext::set_strategy_ckpt_save_file,
"Set strategy checkpoint save file.")
.def("get_strategy_ckpt_load_file", &ParallelContext::strategy_ckpt_load_file, "Get strategy checkpoint load file.")
.def("get_strategy_ckpt_save_file", &ParallelContext::strategy_ckpt_save_file, "Get strategy checkpoint save file.")
.def("set_group_ckpt_save_file", &ParallelContext::set_group_ckpt_save_file, "Set group checkpoint save file.")
.def("set_pipeline_stage_split_num", &ParallelContext::set_pipeline_stage_split_num,
"Set pipeline stage split num.")
.def("get_pipeline_stage_split_num", &ParallelContext::pipeline_stage_split_num, "Get pipeline stage split num.")
.def("set_full_batch", &ParallelContext::set_full_batch, "Set whether load full batch on each device.")
.def("get_full_batch", &ParallelContext::full_batch, "Get whether load full batch on each device.")
.def("set_dataset_strategy", &ParallelContext::set_dataset_strategy, "Set dataset sharding strategy.")
.def("get_dataset_strategy", &ParallelContext::dataset_strategy, "Get dataset sharding strategy.")
.def("set_enable_parallel_optimizer", &ParallelContext::set_enable_parallel_optimizer,
"Set enable/disable parallel optimizer.")
.def("get_enable_parallel_optimizer", &ParallelContext::enable_parallel_optimizer,
"Get enable/disable parallel optimizer.")
.def("set_communi_parallel_mode", &ParallelContext::set_communi_parallel_mode, "Set communication parallel mode.")
.def("get_communi_parallel_mode", &ParallelContext::communi_parallel_mode, "Get communication parallel mode.")
.def("set_optimizer_weight_shard_size", &ParallelContext::set_optimizer_weight_shard_size,
"Set opt shard group size when not fully use parallel optimizer.")
.def("get_optimizer_weight_shard_size", &ParallelContext::optimizer_weight_shard_size,
"Get opt shard group size when not fully use parallel optimizer.")
.def("set_optimizer_weight_shard_aggregated_save", &ParallelContext::set_optimizer_weight_shard_aggregated_save,
"Set whether to integrated save weight shard when enable parallel optimizer.")
.def("get_optimizer_weight_shard_aggregated_save", &ParallelContext::optimizer_weight_shard_aggregated_save,
"Get whether to integrated save weight shard when enable parallel optimizer.")
.def("set_enable_alltoall", &ParallelContext::set_enable_all2all, "Set the enabling AllToAll value.")
.def("get_enable_alltoall", &ParallelContext::enable_all2all, "Get the enabling AllToAll value.")
.def("set_sharding_propagation", &ParallelContext::set_sharding_propagation,
"Set sharding strategy propagation value.")
.def("get_sharding_propagation", &ParallelContext::sharding_propagation, "Get sharding strategy propagation value.")
.def("reset", &ParallelContext::Reset, "Reset auto parallel context.");
(void)py::class_<CostModelContext, std::shared_ptr<CostModelContext>>(m, "CostModelContext")
.def_static("get_instance", &CostModelContext::GetInstance, "Get cost_model context instance.")
.def("set_device_memory_capacity", &CostModelContext::set_device_memory_capacity,
"Set the capacity of device memory.")
.def("get_device_memory_capacity", &CostModelContext::device_memory_capacity, "Get the capacity of device memory.")
.def("set_costmodel_alpha", &CostModelContext::set_costmodel_alpha,
"Set the parameter cost_model_alpha of the DP algorithm.")
.def("get_costmodel_alpha", &CostModelContext::costmodel_alpha,
"Get the parameter cost_model_alpha of the DP algorithm.")
.def("set_costmodel_beta", &CostModelContext::set_costmodel_beta,
"Set the parameter cost_model_beta of the DP algorithm.")
.def("get_costmodel_beta", &CostModelContext::costmodel_beta,
"Get the parameter cost_model_beta of the DP algorithm.")
.def("set_costmodel_gamma", &CostModelContext::set_costmodel_gamma,
"Set the parameter cost_model_gamma of the DP algorithm")
.def("get_costmodel_gamma", &CostModelContext::costmodel_gamma,
"Get the parameter cost_model_gamma of the DP algorithm.")
.def("set_costmodel_communi_threshold", &CostModelContext::set_costmodel_communi_threshold,
"Set the parameter cost_model_communi_threshold of the DP algorithm.")
.def("get_costmodel_communi_threshold", &CostModelContext::costmodel_communi_threshold,
"Get the parameter cost_model_communi_threshold of the DP algorithm.")
.def("set_costmodel_communi_const", &CostModelContext::set_costmodel_communi_const,
"Set the parameter cost_model_communi_const of the DP algorithm.")
.def("get_costmodel_communi_const", &CostModelContext::costmodel_communi_const,
"Get the parameter cost_model_communi_const of the DP algorithm.")
.def("set_costmodel_communi_bias", &CostModelContext::set_costmodel_communi_bias,
"Set the parameter cost_model_communi_bias of the DP algorithm.")
.def("get_costmodel_communi_bias", &CostModelContext::costmodel_communi_bias,
"Get the parameter cost_model_communi_bias of the DP algorithm.")
.def("set_multi_subgraphs", &CostModelContext::set_multi_subgraphs, "Set the parameter is_multi_subgraphs.")
.def("get_multi_subgraphs", &CostModelContext::is_multi_subgraphs, "Get the parameter is_multi_subgraphs.")
.def("set_run_phase", &CostModelContext::set_run_phase, "Set the flag run_phase.")
.def("get_run_phase", &CostModelContext::run_phase, "Get the flag run_phase.")
.def("set_costmodel_allreduce_fusion_algorithm", &CostModelContext::set_costmodel_allreduce_fusion_algorithm,
"Set the parameter gradient AllReduce fusion algorithm.")
.def("get_costmodel_allreduce_fusion_algorithm", &CostModelContext::costmodel_allreduce_fusion_algorithm,
"Get the parameter gradient AllReduce fusion algorithm.")
.def("set_costmodel_allreduce_fusion_times", &CostModelContext::set_costmodel_allreduce_fusion_times,
"Set the parameter gradient AllReduce times.")
.def("get_costmodel_allreduce_fusion_times", &CostModelContext::costmodel_allreduce_fusion_times,
"Get the parameter gradient AllReduce times.")
.def("set_costmodel_allreduce_fusion_tail_percent", &CostModelContext::set_costmodel_allreduce_fusion_tail_percent,
"Set the parameter gradient AllReduce fusion tail percent.")
.def("get_costmodel_allreduce_fusion_tail_percent", &CostModelContext::costmodel_allreduce_fusion_tail_percent,
"Get the parameter gradient AllReduce fusion tail percent.")
.def("set_costmodel_allreduce_fusion_tail_time", &CostModelContext::set_costmodel_allreduce_fusion_tail_time,
"Set the parameter gradient AllReduce fusion tail time.")
.def("get_costmodel_allreduce_fusion_tail_time", &CostModelContext::costmodel_allreduce_fusion_tail_time,
"Get the parameter gradient AllReduce fusion tail time.")
.def("set_costmodel_allreduce_fusion_allreduce_inherent_time",
&CostModelContext::set_costmodel_allreduce_fusion_allreduce_inherent_time,
"Set the parameter gradient AllReduce fusion allreduce inherent time.")
.def("get_costmodel_allreduce_fusion_allreduce_inherent_time",
&CostModelContext::costmodel_allreduce_fusion_allreduce_inherent_time,
"Get the parameter gradient AllReduce fusion allreduce inherent time.")
.def("set_costmodel_allreduce_fusion_allreduce_bandwidth",
&CostModelContext::set_costmodel_allreduce_fusion_allreduce_bandwidth,
"Set the parameter gradient AllReduce fusion allreduce bandwidth.")
.def("get_costmodel_allreduce_fusion_allreduce_bandwidth",
&CostModelContext::costmodel_allreduce_fusion_allreduce_bandwidth,
"Get the parameter gradient AllReduce fusion allreduce bandwidth.")
.def("set_costmodel_allreduce_fusion_computation_time_parameter",
&CostModelContext::set_costmodel_allreduce_fusion_computation_time_parameter,
"Set the parameter gradient AllReduce fusion computation time parameter.")
.def("get_costmodel_allreduce_fusion_computation_time_parameter",
&CostModelContext::costmodel_allreduce_fusion_computation_time_parameter,
"Get the parameter gradient AllReduce fusion computation time parameter.")
.def("set_tensor_slice_align_enable", &CostModelContext::set_tensor_slice_alignment_enable,
"Set the parameter tensor_slice_align_enable in strategy generation.")
.def("get_tensor_slice_align_enable", &CostModelContext::tensor_slice_alignment_enable,
"Get the parameter tensor_slice_align_enable in strategy generation.")
.def("set_tensor_slice_align_size", &CostModelContext::set_tensor_slice_alignment_size,
"Set the parameter tensor_slice_size in strategy generation.")
.def("get_tensor_slice_align_size", &CostModelContext::tensor_slice_alignment_size,
"Get the parameter tensor_slice_size in strategy generation.")
.def("set_fully_use_devices", &CostModelContext::set_fully_use_device,
"Set the parameter fully_use_devices in the DP algorithm.")
.def("get_fully_use_devices", &CostModelContext::fully_use_device,
"Get the parameter fully_use_devices in the DP algorithm.")
.def("set_elementwise_op_strategy_follow", &CostModelContext::set_elementwise_stra_follow,
"Set the parameter elementwise_op_strategy_follow in the DP algorithm.")
.def("get_elementwise_op_strategy_follow", &CostModelContext::elementwise_stra_follow,
"Get the parameter elementwise_op_strategy_follow in the DP algorithm.")
.def("set_dp_algo_enable_approxi", &CostModelContext::set_dp_algo_enable_approxi,
"Set the flag whether enabling approximation in the DP algorithm.")
.def("get_dp_algo_enable_approxi", &CostModelContext::dp_algo_enable_approxi,
"Get the flag whether enabling approximation in the DP algorithm.")
.def("set_dp_algo_approxi_epsilon", &CostModelContext::set_dp_algo_approxi_epsilon,
"Set the epsilon which is used in the approximation of DP algorithm.")
.def("get_dp_algo_approxi_epsilon", &CostModelContext::dp_algo_approxi_epsilon,
"Get the epsilon which is used in the approximation of DP algorithm.")
.def("set_dp_algo_single_loop", &CostModelContext::set_dp_algo_single_loop,
"Set the flag of generating a single suite of OperatorInfos in for-loop.")
.def("get_dp_algo_single_loop", &CostModelContext::dp_algo_single_loop,
"Get the flag of whether or not generating a single suite of OperatorInfos in for-loop.")
.def("reset_cost_model", &CostModelContext::ResetCostModel, "Reset the CostModelContext.")
.def("reset_algo_parameters", &CostModelContext::ResetAlgoParameters, "Reset the AlgoParameters.");
(void)py::module::import("atexit").attr("register")(py::cpp_function{[&]() -> void {
#ifdef ENABLE_MINDDATA
MS_LOG(INFO) << "Start releasing dataset handles...";
py::module iterators = py::module::import("mindspore.dataset.engine.iterators");
(void)iterators.attr("_cleanup")();
MS_LOG(INFO) << "End release dataset handles.";
#endif
// only in case that c++ calling python interface, ClearResAtexit should be called.
if (mindspore::python_adapter::IsPythonEnv()) {
mindspore::pipeline::ClearResAtexit();
}
}});
#ifndef ENABLE_SECURITY
(void)py::class_<EventWriter, std::shared_ptr<EventWriter>>(m, "EventWriter_")
.def(py::init<const std::string &>())
.def("GetFileName", &EventWriter::GetFileName, "Get the file name.")
.def("Open", &EventWriter::Open, "Open the write file.")
.def("Write", &EventWriter::Write, "Write the serialize event.")
.def("EventCount", &EventWriter::GetWriteEventCount, "Write event count.")
.def("Flush", &EventWriter::Flush, "Flush the event.")
.def("Close", &EventWriter::Close, "Close the write.")
.def("Shut", &EventWriter::Shut, "Final close the write.");
#endif // ENABLE_SECURITY
(void)py::class_<OpLib, std::shared_ptr<OpLib>>(m, "Oplib")
.def(py::init())
.def_static("reg_op", &OpLib::RegOp, "Register op info.");
#ifdef ENABLE_GPU_COLLECTIVE
(void)m.def("init_gpu_collective", &mindspore::device::gpu::CollectiveInitializer::InitCollective,
"Init gpu collective communication mode.");
(void)m.def("finalize_gpu_collective", &mindspore::device::gpu::CollectiveInitializer::FinalizeCollective,
"Finalize gpu collective communication mode.");
(void)m.def("get_rank_id", &mindspore::device::gpu::CollectiveInitializer::GetRankID,
"Finalize gpu collective communication mode.");
(void)m.def("get_rank_size", &mindspore::device::gpu::CollectiveInitializer::GetRankSize,
"Finalize gpu collective communication mode.");
#else
(void)m.def("init_gpu_collective", &mindspore::device::gpu::CollectiveFakeInitializer::InitCollective,
"Init gpu collective communication mode.");
(void)m.def("finalize_gpu_collective", &mindspore::device::gpu::CollectiveFakeInitializer::FinalizeCollective,
"Finalize gpu collective communication mode.");
(void)m.def("get_rank_id", &mindspore::device::gpu::CollectiveFakeInitializer::GetRankID,
"Finalize gpu collective communication mode.");
(void)m.def("get_rank_size", &mindspore::device::gpu::CollectiveFakeInitializer::GetRankSize,
"Finalize gpu collective communication mode.");
#endif
(void)py::class_<CollectiveManager, std::shared_ptr<CollectiveManager>>(m, "CollectiveManager")
.def_static("get_instance", &CollectiveManager::instance, "Get collective manager instance.")
.def("get_rank_id", &CollectiveManager::GetRankId, "Get the node rank id.")
.def("get_group_size", &CollectiveManager::GetGroupSize, "Get the nodes number in the collective communication.");
(void)py::class_<PSContext, std::shared_ptr<PSContext>>(m, "PSContext")
.def_static("get_instance", &PSContext::instance, "Get PS context instance.")
.def("set_ps_enable", &PSContext::SetPSEnable, "Set PS mode enabled or disabled.")
.def("is_ps_mode", &PSContext::is_ps_mode, "Get PS mode enable-disable status.")
.def("reset", &PSContext::Reset, "Reset PS context attributes.")
.def("is_worker", &PSContext::is_worker, "Get whether the role of this process is Worker.")
.def("is_server", &PSContext::is_server, "Get whether the role of this process is PServer.")
.def("is_scheduler", &PSContext::is_scheduler, "Get whether the role of this process is Scheduler.")
.def("ps_rank_id", &PSContext::ps_rank_id, "Get Worker and PServer rank id.")
.def("insert_hash_table_size", &PSContext::InsertHashTableSize, "Insert hash table size.")
.def("reinsert_hash_table_size", &PSContext::ReInsertHashTableSize,
"Insert hash table size with new parameter name.")
.def("insert_weight_init_info", &PSContext::InsertWeightInitInfo, "Insert embedding table initialization seed.")
.def("insert_accumu_init_info", &PSContext::InsertAccumuInitInfo, "Insert accumulation initialization value.")
.def("clone_hash_table", &PSContext::CloneHashTable, "Clone a hash table.")
.def("set_cache_enable", &PSContext::set_cache_enable, "Set ps mode cache enable or not.")
.def("cache_enable", &PSContext::cache_enable, "Get ps mode cache enable or not.")
.def("set_rank_id", &PSContext::set_rank_id, "Set rank id for worker on ps mode.")
.def("set_server_mode", &PSContext::set_server_mode, "Set server mode.")
.def("server_mode", &PSContext::server_mode, "Get server mode.")
.def("set_ms_role", &PSContext::set_ms_role, "Set role for this process.")
.def("ms_role", &PSContext::ms_role, "Get role for this process.")
.def("set_worker_num", &PSContext::set_worker_num, "Set worker number.")
.def("worker_num", &PSContext::worker_num, "Get worker number.")
.def("set_server_num", &PSContext::set_server_num, "Set server number.")
.def("server_num", &PSContext::server_num, "Get server number.")
.def("set_scheduler_ip", &PSContext::set_scheduler_ip, "Set scheduler ip.")
.def("scheduler_ip", &PSContext::scheduler_ip, "Get scheduler ip.")
.def("set_scheduler_port", &PSContext::set_scheduler_port, "Set scheduler port.")
.def("scheduler_port", &PSContext::scheduler_port, "Get scheduler port.")
.def("set_fl_server_port", &PSContext::set_fl_server_port, "Set federated learning server port.")
.def("fl_server_port", &PSContext::fl_server_port, "Get federated learning server port.")
.def("set_fl_client_enable", &PSContext::set_fl_client_enable, "Set federated learning client.")
.def("fl_client_enable", &PSContext::fl_client_enable, "Get federated learning client.")
.def("set_start_fl_job_threshold", &PSContext::set_start_fl_job_threshold,
"Set threshold count for startFLJob round.")
.def("start_fl_job_threshold", &PSContext::start_fl_job_threshold, "Get threshold count for startFLJob round.")
.def("set_start_fl_job_time_window", &PSContext::set_start_fl_job_time_window,
"Set time window for startFLJob round.")
.def("start_fl_job_time_window", &PSContext::start_fl_job_time_window, "Get time window for startFLJob round.")
.def("set_update_model_ratio", &PSContext::set_update_model_ratio,
"Set threshold count ratio for updateModel round.")
.def("update_model_ratio", &PSContext::update_model_ratio, "Get threshold count ratio for updateModel round.")
.def("set_update_model_time_window", &PSContext::set_update_model_time_window,
"Set time window for updateModel round.")
.def("update_model_time_window", &PSContext::update_model_time_window, "Get time window for updateModel round.")
.def("set_share_secrets_ratio", &PSContext::set_share_secrets_ratio,
"Set threshold count ratio for share secrets round.")
.def("share_secrets_ratio", &PSContext::share_secrets_ratio, "Get threshold count ratio for share secrets round.")
.def("set_cipher_time_window", &PSContext::set_cipher_time_window, "Set time window for each cipher round.")
.def("cipher_time_window", &PSContext::cipher_time_window, "Get time window for cipher rounds.")
.def("set_reconstruct_secrets_threshold", &PSContext::set_reconstruct_secrets_threshold,
"Set threshold count for reconstruct secrets round.")
.def("reconstruct_secrets_threshold", &PSContext::reconstruct_secrets_threshold,
"Get threshold count for reconstruct secrets round.")
.def("set_fl_name", &PSContext::set_fl_name, "Set federated learning name.")
.def("fl_name", &PSContext::fl_name, "Get federated learning name.")
.def("set_fl_iteration_num", &PSContext::set_fl_iteration_num, "Set federated learning iteration number.")
.def("fl_iteration_num", &PSContext::fl_iteration_num, "Get federated learning iteration number.")
.def("set_client_epoch_num", &PSContext::set_client_epoch_num, "Set federated learning client epoch number.")
.def("client_epoch_num", &PSContext::client_epoch_num, "Get federated learning client epoch number.")
.def("set_client_batch_size", &PSContext::set_client_batch_size, "Set federated learning client batch size.")
.def("client_batch_size", &PSContext::client_batch_size, "Get federated learning client batch size.")
.def("set_client_learning_rate", &PSContext::set_client_learning_rate,
"Set federated learning client learning rate.")
.def("client_learning_rate", &PSContext::client_learning_rate,
"Get worker's standalone training step number before communicating with server.")
.def("set_worker_step_num_per_iteration", &PSContext::set_worker_step_num_per_iteration,
"Set worker's standalone training step number before communicating with server..")
.def("worker_step_num_per_iteration", &PSContext::worker_step_num_per_iteration,
"Get federated learning client learning rate.")
.def("set_secure_aggregation", &PSContext::set_secure_aggregation,
"Set federated learning client using secure aggregation.")
.def("set_dp_eps", &PSContext::set_dp_eps, "Set dp epsilon for federated learning secure aggregation.")
.def("dp_eps", &PSContext::dp_eps, "Get dp epsilon for federated learning secure aggregation.")
.def("set_dp_delta", &PSContext::set_dp_delta, "Set dp delta for federated learning secure aggregation.")
.def("dp_delta", &PSContext::dp_delta, "Get dp delta for federated learning secure aggregation.")
.def("set_dp_norm_clip", &PSContext::set_dp_norm_clip,
"Set dp norm clip for federated learning secure aggregation.")
.def("dp_norm_clip", &PSContext::dp_norm_clip, "Get dp norm clip for federated learning secure aggregation.")
.def("set_encrypt_type", &PSContext::set_encrypt_type,
"Set encrypt type for federated learning secure aggregation.")
.def("encrypt_type", &PSContext::encrypt_type, "Get encrypt type for federated learning secure aggregation.")
.def("set_root_first_ca_path", &PSContext::set_root_first_ca_path, "Set root first ca path.")
.def("root_first_ca_path", &PSContext::root_first_ca_path, "Get root first ca path.")
.def("set_root_second_ca_path", &PSContext::set_root_second_ca_path, "Set root second ca path.")
.def("root_second_ca_path", &PSContext::root_second_ca_path, "Get root second ca path.")
.def("set_pki_verify", &PSContext::set_pki_verify, "Set pki verify.")
.def("pki_verify", &PSContext::pki_verify, "Get pki verify.")
.def("set_scheduler_manage_port", &PSContext::set_scheduler_manage_port,
"Set scheduler manage port used to scale out/in.")
.def("scheduler_manage_port", &PSContext::scheduler_manage_port, "Get scheduler manage port used to scale out/in.")
.def("set_equip_crl_path", &PSContext::set_equip_crl_path, "Set root second crl path.")
.def("set_replay_attack_time_diff", &PSContext::set_replay_attack_time_diff, "Set replay attack time diff.")
.def("equip_crl_path", &PSContext::equip_crl_path, "Get root second crl path.")
.def("replay_attack_time_diff", &PSContext::replay_attack_time_diff, "Get replay attack time diff.")
.def("set_enable_ssl", &PSContext::set_enable_ssl, "Set PS SSL mode enabled or disabled.")
.def("enable_ssl", &PSContext::enable_ssl, "Get PS SSL mode enabled or disabled.")
.def("set_client_password", &PSContext::set_client_password, "Set the client password to decode the p12 file.")
.def("client_password", &PSContext::client_password, "Get the client password to decode the p12 file.")
.def("set_server_password", &PSContext::set_server_password, "Set the server password to decode the p12 file.")
.def("server_password", &PSContext::server_password, "Get the server password to decode the p12 file.")
.def("set_config_file_path", &PSContext::set_config_file_path,
"Set configuration files required by the communication layer.")
.def("config_file_path", &PSContext::config_file_path,
"Get configuration files required by the communication layer.")
.def("set_encrypt_type", &PSContext::set_encrypt_type,
"Set encrypt type for federated learning secure aggregation.")
.def("set_sign_k", &PSContext::set_sign_k, "Set sign k for federated learning SignDS.")
.def("sign_k", &PSContext::sign_k, "Get sign k for federated learning SignDS.")
.def("set_sign_eps", &PSContext::set_sign_eps, "Set sign eps for federated learning SignDS.")
.def("sign_eps", &PSContext::sign_eps, "Get sign eps for federated learning SignDS.")
.def("set_sign_thr_ratio", &PSContext::set_sign_thr_ratio, "Set sign thr ratio for federated learning SignDS.")
.def("sign_thr_ratio", &PSContext::sign_thr_ratio, "Get sign thr ratio for federated learning SignDS.")
.def("set_sign_global_lr", &PSContext::set_sign_global_lr, "Set sign global lr for federated learning SignDS.")
.def("sign_global_lr", &PSContext::sign_global_lr, "Get sign global lr for federated learning SignDS.")
.def("set_sign_dim_out", &PSContext::set_sign_dim_out, "Set sign dim out for federated learning SignDS.")
.def("sign_dim_out", &PSContext::sign_dim_out, "Get sign dim out for federated learning SignDS.")
.def("set_http_url_prefix", &PSContext::set_http_url_prefix, "Set http url prefix for http communication.")
.def("http_url_prefix", &PSContext::http_url_prefix, "http url prefix for http communication.")
.def("set_global_iteration_time_window", &PSContext::set_global_iteration_time_window,
"Set global iteration time window.")
.def("global_iteration_time_window", &PSContext::global_iteration_time_window, "Get global iteration time window.")
.def("set_upload_compress_type", &PSContext::set_upload_compress_type, "Set upload compress type.")
.def("upload_compress_type", &PSContext::upload_compress_type, "Get upload compress type.")
.def("set_upload_sparse_rate", &PSContext::set_upload_sparse_rate, "Set upload sparse rate.")
.def("upload_sparse_rate", &PSContext::upload_sparse_rate, "Get upload sparse rate.")
.def("set_download_compress_type", &PSContext::set_download_compress_type, "Set download compress type.")
.def("download_compress_type", &PSContext::download_compress_type, "Get download compress type.")
.def("set_checkpoint_dir", &PSContext::set_checkpoint_dir, "Set server checkpoint directory.")
.def("checkpoint_dir", &PSContext::checkpoint_dir, "Server checkpoint directory.")
.def("set_instance_name", &PSContext::set_instance_name, "Set instance name.")
.def("instance_name", &PSContext::instance_name, "Get instance name.")
.def("set_participation_time_level", &PSContext::set_participation_time_level, "Set participation time level.")
.def("participation_time_level", &PSContext::participation_time_level, "Get participation time level.")
.def("set_continuous_failure_times", &PSContext::set_continuous_failure_times, "Set continuous failure times")
.def("continuous_failure_times", &PSContext::continuous_failure_times, "Get continuous failure times.");
(void)m.def("_encrypt", &mindspore::pipeline::PyEncrypt, "Encrypt the data.");
(void)m.def("_decrypt", &mindspore::pipeline::PyDecrypt, "Decrypt the data.");
(void)m.def("_is_cipher_file", &mindspore::pipeline::PyIsCipherFile, "Determine whether the file is encrypted");
(void)py::class_<RecoveryContext, std::shared_ptr<RecoveryContext>>(m, "RecoveryContext")
.def_static("get_instance", &RecoveryContext::GetInstance, "Get recovery context instance.")
.def("enable_recovery", &RecoveryContext::enable_recovery, "Get whether enable recovery.")
.def("latest_ckpt_file", &RecoveryContext::latest_ckpt_file, "Get latest checkpoint file path.")
.def("latest_ckpt_epoch", &RecoveryContext::latest_ckpt_epoch, "Get the epoch of latest checkpoint.")
.def("latest_ckpt_step", &RecoveryContext::latest_ckpt_step, "Get the step of latest checkpoint.")
.def("set_need_reset", &RecoveryContext::set_need_reset,
"Set whether should call reset minddata and load ckpt for disaster recovery.")
.def("need_reset", &RecoveryContext::need_reset,
"Get whether should call reset minddata and load ckpt for disaster recovery.")
.def("recovery_path", &RecoveryContext::recovery_path,
"Get the recovery path used to save that need to be persisted.")
.def("ckpt_path", &RecoveryContext::GetCkptPath, "Get the recovery path used to save checkpoint.")
.def("set_ckpt_path", &RecoveryContext::SetCkptPath, "Set the recovery path used to save checkpoint.");
#ifndef _WIN32
(void)m.def("_export_bprop_mindir", &mindspore::ad::KPrim::ExportBpropMindir,
"Export the backpropagation function to mindir file.");
#endif
(void)m.def("_ms_memory_recycle", &mindspore::pipeline::MemoryRecycle, "Recycle memory used by mindspore.");
}
|
/**
* find_first_and_bit - find the first set bit in both memory regions
* @addr1: The first address to base the search on
* @addr2: The second address to base the search on
* @size: The bitmap size in bits
*
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
static inline
unsigned long find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2,
unsigned long size)
{
if (small_const_nbits(size)) {
unsigned long val = *addr1 & *addr2 & GENMASK(size - 1, 0);
return val ? __ffs(val) : size;
}
return _find_first_and_bit(addr1, addr2, size);
} |
// substitute any punctuation with special code as necessary
public static String replacePunc(String s, int tType, int oType)
{
int i, n;
String s1, s2, snew;
ArrayList<String> to, from;
switch (oType) {
case ECHO: to = puncReplaceTo_ECHO; from = puncReplaceFrom_ECHO; break;
case HTML: to = puncReplaceTo_HTML; from = puncReplaceFrom_HTML; break;
case LATEX: if (tType == COMMENT) {
to = puncReplaceTo_LATEX; from = puncReplaceFrom_LATEX;
} else {
to = puncReplaceTo_LATEXMATHS; from = puncReplaceFrom_LATEXMATHS;
} break;
case PRISMGUI: to = puncReplaceTo_PRISMGUI; from = puncReplaceFrom_PRISMGUI; break;
default: to = puncReplaceTo_ECHO; from = puncReplaceFrom_ECHO; break;
}
snew = s;
n = from.size();
for (i = 0; i < n; i++) {
s1 = from.get(i);
s2 = to.get(i);
snew = snew.replaceAll(s1, s2);
}
if (oType == LATEX && s.contains("\"")) {
snew = snew.replaceAll("\"", (quoteAlternator==1)?"\\\\mbox{``}":"\\\\mbox{''}");
quoteAlternator = 3 - quoteAlternator;
}
return snew;
} |
<gh_stars>0
#include "p2p.h"
#include <stdio.h>
#include <string.h> // memset()
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <netdb.h>
#include <sys/epoll.h>
#include <sys/eventfd.h>
#include "logger.h"
#include "../protobuf/client.pb.h"
namespace DBC {
P2P::P2P(){
mRunning = false;
}
P2P::~P2P(){
stop();
}
bool P2P::start(){
std::unique_lock<std::mutex> lock(mThreadMutex);
std::unique_lock<std::mutex> lock2(mDataMutex);
Logger::log(LogType::P2P, DEBUG, "Starting P2P network");
if(mRunning){
Logger::log(LogType::P2P, INFO, "P2P network already running");
//stop();
return true; //Its ok
}
// We first do the initialization of socket in the main thread
// This make seasy to report errors
// If everything in the setup succeeds, then start the thread loop
struct addrinfo hints, *res;
int reuseaddr = 1;
// Get the address info
memset(&hints, 0, sizeof hints);
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
if (getaddrinfo(NULL, mPort, &hints, &res) != 0) {
Logger::log(LogType::P2P, ERROR, "getaddrinfo %d", errno);
return false;
}
// Create the socket
mMainSocket = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
if (mMainSocket == -1) {
Logger::log(LogType::P2P, ERROR, "socket");
return false;
}
// Enable the socket to reuse the address
if (setsockopt(mMainSocket, SOL_SOCKET, SO_REUSEADDR, &reuseaddr, sizeof(int)) == -1) {
Logger::log(LogType::P2P, ERROR, "setsockopt %d", errno);
return false;
}
// Bind to the address
if (bind(mMainSocket, res->ai_addr, res->ai_addrlen) == -1) {
Logger::log(LogType::P2P, ERROR, "bind %d", errno);
return false;
}
// Listen
if (listen(mMainSocket, mListenQueue) == -1) {
Logger::log(LogType::P2P, ERROR, "listen %d", errno);
return false;
}
freeaddrinfo(res);
Logger::log(LogType::P2P, INFO, "opened listen socket on port %s", mPort);
mEventFd = eventfd(0,0);
if(mEventFd == -1){
Logger::log(LogType::P2P, ERROR, "Eventfd not set up %d", errno);
return false;
}
//Launch thread
mClientSocket.clear();
mThread = std::thread(&P2P::thread_loop, this);
mRunning = true;
return true;
}
bool P2P::stop(){
std::unique_lock<std::mutex> lock(mThreadMutex);
if(mRunning){
//Send signal to thread via signalfd
uint64_t u64 = 1;
write(mEventFd, &u64, sizeof(uint64_t));
mThread.join();
close(mEventFd);
mEventFd = -1;
}
mRunning = false;
}
int P2P::thread_loop(void) {
Logger::log(LogType::P2P, INFO, "thread created");
//Inaitialize epoll
int epfd = epoll_create(1);
struct epoll_event event;
event.events = EPOLLIN;
event.data.fd = -1; //-1 is the main socket, not a client socket
epoll_ctl(epfd, EPOLL_CTL_ADD, mMainSocket, &event);
event.data.fd = -2; //-2 is the event socket
epoll_ctl(epfd, EPOLL_CTL_ADD, mEventFd, &event);
// Main loop
while (mRunning) {
if(epoll_wait(epfd, &event, 1, -1) == -1){
Logger::log(LogType::P2P, ERROR, "epoll error %d", errno);
break;
}
//Exit signal
if(event.data.fd == -2){
break;
}
std::unique_lock<std::mutex> lock(mDataMutex);
//Process the socket that has data
if(event.data.fd == -1){
//New connection in Main socket
socklen_t size = sizeof(struct sockaddr_in);
struct sockaddr_in their_addr;
int newsock = accept(mMainSocket, (struct sockaddr*)&their_addr, &size);
if (newsock == -1) {
Logger::log(LogType::P2P, ERROR, "accept error %d", errno);
}else{
Logger::log(LogType::P2P, INFO, "Got a connection (id %d) from %s on port %d",
newsock, inet_ntoa(their_addr.sin_addr), htons(their_addr.sin_port));
//Add it to the client socket list & epoll
mClientSocket[newsock] = {their_addr,{}};
event.events = EPOLLIN;
event.data.fd = newsock;
epoll_ctl(epfd, EPOLL_CTL_ADD, newsock, &event);
}
}else{
//Attend this socket
ClientData& data = mClientSocket[event.data.fd];
data.rxBuf.resize(data.rxBuf.size()+4096); //Extend it by 4096
int valread = -1;
if ((valread = read(event.data.fd, data.rxBuf.data()+data.rxBuf.size()-4096, 4096)) == 0){
//Somebody disconnected
Logger::log(LogType::P2P, INFO, "Disconnected (id %d) from %s on port %d",
event.data.fd, inet_ntoa(data.addr.sin_addr), htons(data.addr.sin_port));
//Close the socket and remove from poll
close(event.data.fd);
mClientSocket.erase(event.data.fd);
epoll_ctl(epfd, EPOLL_CTL_DEL, event.data.fd, NULL);
}else{
//Packet on already connected client
Logger::log(LogType::P2P, DEBUG, "Packet on connected client (size %d)", valread);
data.rxBuf.resize(data.rxBuf.size()-4096+valread); //Shrink it again
//Process it
//TODO
//For the time being just print it
Logger::log(LogType::P2P, DEBUG, "Client: %s", data.rxBuf.data());
}
}
}
//Stop epoll
close(epfd);
std::unique_lock<std::mutex> lock(mDataMutex);
//Stop all sockets
for(auto& i : mClientSocket){
close(i.first);
}
mClientSocket.clear();
close(mMainSocket);
mMainSocket = -1;
Logger::log(LogType::P2P, INFO, "thread stopped");
return 0;
}
int P2P::getNumClients(){
std::unique_lock<std::mutex> lock(mDataMutex);
int num = 0;
for(auto& i : mClientSocket){
num += i.second.mHandshake;
}
return num;
}
} //namespace DBC
|
LEGAL AWARENESS ABOUT WOMEN RIGHTS: TEACHERS PERSPECTIVE
Legal awareness can be a significant tool for attaining equality of women in all spheres of social and political life. Legal Awareness as the name suggests is the knowledge of laws, acts, rights and legal provisions offered to a countrys citizens for their security, affluence and equal access to opportunities. This creates a respectable social environment where all are same in the eyes of law hence leading towards the peace and prosperity of mankind. This study presents a comprehensive Meta analytical research about how legal awareness of ones rights provides greater stability in the day to day activities of any social structure. The major objective of this is to interpret the knowledge teacher have about the laws made for the well-being of women in the country and to what extent do they perceive it to give way to major changes which affect their position in the society. |
<reponame>UptakeOpenSource/airflow-api
class APIParam(object):
def __init__(
self,
name, param_help,
data_type=None, choices=None, action=None, required=False, default=None, location=None, example=None
):
self.name = name
self.data_type = data_type
self.param_help = param_help
self.choices = choices
self.action = action
self.required = required
self.default = default
self.location = location
self.example = example
|
Why do These Match? Explaining the Behavior of Image Similarity Models
Explaining a deep learning model can help users understand its behavior and allow researchers to discern its shortcomings. Recent work has primarily focused on explaining models for tasks like image classification or visual question answering. In this paper, we introduce an explanation approach for image similarity models, where a model's output is a semantic feature representation rather than a classification. In this task, an explanation depends on both of the input images, so standard methods do not apply. We propose an explanation method that pairs a saliency map identifying important image regions with an attribute that best explains the match. We find that our explanations are more human-interpretable than saliency maps alone, and can also improve performance on the classic task of attribute recognition. The ability of our approach to generalize is demonstrated on two datasets from very different domains, Polyvore Outfits and Animals with Attributes 2.
Introduction
Many problems in artificial intelligence that reason about complex relationships can be solved by learning some feature embedding to measure similarity between images and/or other modalities such as text. Examples of these tasks include scoring fashion compatibility , image retrieval , or zero-shot recognition . Reasoning about the behavior of similarity models can aid researchers in identifying potential improvements, or help users understand the model's predictions which can build trust . However, prior work on producing explanations for neural networks has primarily focused on explaining classification models (e.g. ) and does not directly apply to similarity models. Given a single input image, such methods produce a saliency map which identifies pixels that played a significant role towards a particular class prediction (see Figure 1a for an example). On the other hand, a similarity model requires at least two images to produce a score. The interaction between both images defines which features are more important, so replacing just one of the images can result in identifying different salient traits.
For image pairs where similarity is determined by the presence or absence of an object, a saliency map may be sufficient to understand model behavior. However, when we consider the image pair in Figure 1b, highlighting the necklace as the region that contributes most to the similarity score is reasonable, but uninformative given that there are no other objects in the image. Instead, what is important is the fact that it shares a similar color with the ring. Whether these image properties or saliency maps are a better fit as an explanation is not determined by the image domain (i.e. attributes Figure 1: Existing explanation methods focus on image classification problems (left), whereas we explore explanations for image similarity models (right). While saliency maps which identify important image regions in both prior work and ours may explain similarity, for some examples they provide little useful information. To compensate, we also pair our saliency map with an attribute (e.g., golden) for a more informative explanation.
for e-commerce imagery vs. saliency for natural imagery), but instead by the images themselves. For example, an image can be matched as formal-wear with an explanation pointing to a shirt's collar, while two images of animals can match because both have stripes.
Guided by this intuition, we introduce Salient Attributes for Network Explanation (SANE). Our approach generates a saliency map to explain a model's similarity score, paired with an attribute explanation which identifies important image properties. SANE is a "black box" method, meaning it can explain any network architecture and only needs to measure changes to a similarity score when provided with different inputs. Unlike a standard classifier, which simply predicts the most likely attributes for a given image, our explanation method predicts which attributes are important for the similarity score predicted by a model. Predictions are made for each image in a pair, and allowed to be non-symmetric, e.g., the explanation for why the ring in Figure 1b matches the necklace may be that it contains "black", even though the explanation for why the necklace matches the ring could be that it is "golden." A different similarity model may also result in different attributes being deemed important for the same pair of images.
Our SANE model combines two major components: an attribute predictor and a saliency map generator. Given an input image, the attribute predictor outputs a confidence score for each attribute, in addition to an attribute activation map that indicates regions within the image associated with that attribute. We rank attributes as explanations for an image pair by how well their attribute activation map matches the saliency map produced by the generator. Our underlying assumption is that at least one of the attributes present in the image should be able to explain the similarity score assigned to the pair. Although we evaluate only the top-ranked attribute in our experiments, in practice more than one attribute could be used to explain a similarity score. We find that using saliency maps as supervision for the attribute activation maps during training not only improves the attribute-saliency matching, resulting in better attribute explanations, but also boosts attribute recognition performance using standard metrics like average precision.
We evaluate several candidate saliency map generation methods which are primarily adaptations of "black box" approaches that do not rely on a particular model architecture or require access to network parameters to produce a saliency map . These methods generally identify important regions by measuring a change in the output class score resulting from some perturbation of the input image. Similarity models, however, typically rely on a learned embedding space to reason about relationships between images, where proximity between points or the lack thereof indicates some degree of correspondence. An explanation system for embedding models must therefore consider how the distances between embedded points, and thus their similarity, change based on perturbing one or both of the pair of input images. We explore two strategies for adapting these approaches to our task. First, we manipulate just a single image (the one we wish to produce an explanation for) while keeping the other image fixed. Second, we manipulate both images to allow for more complex interactions between the pair. Additional discussion on the ramifications of this choice and details of the saliency methods can be found in Section 3.2.
Our paper makes the following contributions: 1) we provide the first study of explaining the behavior of image similarity models; 2) we propose a novel explanation approach that combines saliency maps and attributes, which to our knowledge is the first explanation work to use attributes; 3) we validate Figure 2: Attribute Model Overview. We use the saliency maps used to explain why two items are considered similar to provide supervision for the attribute heatmaps. During training, each saliency map produced by the generator is encouraged to match at least ground truth attribute's heatmap.
our method with both automatic metrics and a user study on two diverse datasets, and find that it produces more informative explanations and also improves attribute recognition performance.
Related Work
Saliency-based Explanations. Saliency methods can generally be split into "white box" and "black box" approaches. "White box" methods assume access to internal components of a neural network, either in the form of gradients or activations of specific layers(e.g. ). Most of them produce a saliency map by using some version of backpropagation from class probability to an input image. In contrast, "black box" approaches require no knowledge of the internals (e.g. weights, gradients) of the models. These methods obtain saliency maps by perturbing the input in a predefined way and measuring the effect of it on the model output, such as class score. We adapt and compare three "black box" and one "white box" methods for our saliency map generator in Figure 2. "Black box" approaches include a Sliding Window , which masks image regions sequentially, and Randomized Input Sampling for Explanations (RISE) , which masks random sets of regions. Both measure the effect removing these regions have on the class score. LIME first obtains a super-pixel representation of an image. Super-pixel regions are randomly deleted, and their importance is estimated using Lasso. "White box" Mask learns a saliency map directly by using different perturbation operators and propagating the error to a low resolution mask. Note that all the methods discussed above (including the four we adapt) do not operate directly on similarity models, which we will discuss further in Section 3.2.
Natural Language-based Explanations. Instead of producing saliency maps, which can sometimes be difficult to interpret, researchers have explored methods of producing text-based explanations. These include methods which justify a model's answer in the visual question answering task , rationalize the behavior of a self-driving vehicle , or describe why a category was selected in fine-grained object classification . Recently, Hendricks et al. leveraged attributes to correct mistakes in text-based explanations for fine-grained object classification. However, their goal is to justify a model's decision by pointing to evidence rather than capturing a model's behavior. Lad et al. used human-generated attribute explanations describing why two images are similar or dissimilar as guidance for image clustering. Our approach could be used to automatically generate these explanations rather than relying on human feedback.
Salient Attributes for Network Explanations (SANE)
We are given a fixed model that predicts the similarity between two images, and must explain why a query image is similar to a reference image. While typical models for predicting similarity are learned from data, using an embedding method and a triplet loss, our approach is agnostic as to how the model being explained is built. Our method consists of two components: the attribute explanation model (Section 3.1), and the saliency map generator (Section 3.2). Although we train a CNN to produce attribute annotations, the image similarity model we wish to produce explanations for is kept fixed. At test time, one recovers a saliency map for the match from the query image in a pair, then uses the attribute explanation model to determine which attribute explains the map (Section 3.3).
Attribute Explanation Model
Suppose we have access to pairs of images (I r , I q ). Here, I r denotes a reference image and I q a query image. We wish to obtain an explanation for the match between I r and I q . Associated with each pair is a saliency map m q produced by a saliency map generator as described in Section 3.2. Note that saliency is a relation that is not symmetric, meaning that if we were to produce an analogical m r by swapping the query and the reference image, m r will almost surely differ from m q . Finally, assume we have access to binary attribute annotations a i , i = 1, . . . , A, and let a gt ∈ {0, 1} A be the set of ground truth attribute annotations for a given query image. If no attribute annotations are provided, an attribute discovery method could be employed (e.g., ). We explore using an attribute discovery method in the appendix.
Our attribute explanation model produces confidence scoresâ ∈ R A for I q . Unlike a standard attribute classifier, however, our goal is not just to predict the most likely attributes in I q , but rather to identify which attributes contribute the most to the similarity score s(I r , I q ) produced by the similarity model we wish to obtain explanations for. To accomplish this, we associate with each attribute a i an attribute activation map n i representing a downsampled mask of an image that identifies prominent regions in I q for that attribute. The attribute activation maps are learned by encouraging the saliency map m q to match one of the attribute activation maps corresponding to the ground truth attributes a gt in I q (see Figure 2 for an overview). Our underlying assumption is that at least one of the ground truth attributes of I q should be able to explain why I q is similar to I r . Thus, at least one of the attribute activation maps n i should closely resemble the saliency map for the match, m q .
Each attribute confidence score is obtained using a global average pooling layer on its attribute activation map followed by a softmax activation function. The attribute explanation network is trained using a Huber loss , sometimes referred to as a smooth 1 loss, which helps encourage sparsity in the predictions. More formally, given a set of confidence scoresâ and attribute labels a gt , our loss is, Note that multiple attributes can be present in the image; note also that this loss operates on attributes, not attribute activation maps. Since the confidence scores sum to one (due to the softmax function), we scale a binary label vector by the number of ground truth attributes A gt (e.g., if there are four attributes for an image, its label would be 0.25 for each ground truth attribute, and zero for all others).
Leveraging saliency maps during training. Rather than simply hoping our attribute activation maps match a saliency map, we explicitly encourage attributes which are useful in explaining the predictions of an image similarity model. Some ground truth attributes may be irrelevant, however, and the rankings of likely attributes for an image may change depending on what it is compared to. We obtain a set of regions that may be important to the decisions of an image similarity model by generating a set of K saliency maps M q to up to K reference images that are similar. For the image under consideration, we also construct a set of attribute activation maps N gt corresponding to each ground truth attribute. Then, for each saliency map we find its best match in N gt . We match saliency maps to attributes rather than the other way around since not all annotated attributes are necessarily relevant to the explanation of s(I r , I q ). We use an 2 loss between the selected attribute activation map and saliency map, i.e., Combined with the attribute classification loss, our model's complete loss function is: where λ is a scalar parameter. See appendix for implementation details and parameter values.
Saliency Map Generator
A straightforward approach to producing a saliency map is to manipulate the input image by removing image regions and measuring the effect this has on the similarity score. If a large drop in similarity is measured, then the region must be of significance to the score. If almost no change was measured, then the model considers the image region irrelevant. The saliency map is generated from this approach by averaging the similarity scores for each pixel location over all instances where it was removed from the input. The challenge then is to determine how to manipulate the input image to discover these important regions. We adapt and compare four saliency methods: Sliding Window , RISE , LIME , and Mask . We now describe how we adapt these models for our task; additional details on each method can be found in the appendix.
Computing similarity scores. Each saliency method we compare was designed to operate on a single image and to measure the effect manipulating the image has on the prediction of a specific object class. However, an image similarity model's predictions are made for two or more images. Let us consider the case described in Section 3.1 where we are just comparing two images, a query image (i.e. the image we want to produce an explanation for), and a reference image, although our approach extends to consider multiple reference images. Even though we do not have access to a class label, we can measure the effect manipulating an image has on the similarity score between the query and reference images. Two approaches are possible: manipulate both images, or manipulate only the query image.
Manipulating both images would result in NM forward passes through the image similarity model (for N, M the number of query and reference image manipulations, respectively), which is prohibitively expensive unless M << N. But we need only an accurate saliency map for the query image, and so we set M << N in our experiments. There is another danger: for example, consider two images of clothing items that are similar if either they both contain or do not contain a special button. Masking out the button in one image and not the other would cause a drop in similarity score, but masking out the button in both images would result in high image similarity. These conflicting results could make accurately identifying the correct image regions contributing to a decision difficult.
The alternative is to manipulate the query image alone, i.e. keep a fixed reference image.
Selecting Informative Attributes
At test time, given a similarity model and a pair of inputs, SANE generates a saliency map and selects an attribute to show to the user. We suspect that not all attributes annotated for a dataset may prove to be useful in explaining the decisions of every image similarity model. We take into account how useful each attribute is at explaining predictions made by a similarity model using held out data. First we count how often an attribute was the best explanation for a pair of images in the validation set. Then, we rank potential attribute explanations using a weighted combination of the attribute confidence scoreâ, how well the attribute activation map n matches the generated saliency map m q , and the prior probability p that each attribute is the best explanation for an image pair. The explanation score is given by, where d cos denotes cosine similarity, and φ 1−3 are scalar parameters estimated using grid search on held out data.
Experiments
Datasets. We evaluate our approach using two datasets from different domains to demonstrate its ability to generalize. The Polyvore Outfits dataset consists of 365,054 fashion product images annotated with 205 attributes and composed into 53,306/10,000/5,000 train/test/validation outfits. The Animals with Attributes 2 (AwA) dataset consists of 37,322 natural images of 50 animal classes annotated with 85 attributes, and is split into 40 animal classes for training, and 10 used at test time. To evaluate our explanations we randomly sample 10,000 ground-truth (query, reference) pairs of similar images for each dataset from the test set.
Image Similarity Models. For the Polyvore Outfits dataset we use the type-aware embedding model released by Vasileva et al. . This model captures item compatibility (i.e. how well two pieces of clothing go together) using a set of learned projections on top of a general embedding, each of which compares a specific pair of item types (i.e. a different projection is used when comparing a top-bottom pair than when comparing a top-shoe pair). For AwA we train a feature representation
Saliency Map Evaluation
Metrics. Following Petsiuk et al. , we evaluate the generated saliency maps using insertion and deletion metrics which measure the change in performance of the model being explained as pixels are inserted into a blank image, or deleted from the original image. For our task, we generate saliency maps for all query images, and insert or delete pixels in that image only. If a saliency map correctly captures the most important image regions, we should expect a sharp drop in performance as pixels are deleted (or a sharp increase as they are inserted). We report the area under the curve (AUC) created as we insert/delete pixels at a rate of 1% per step for both metrics. We normalize the similarity scores for each image pair across these thresholds so they fall in a interval.
Results. Table 1 compares the different saliency map generation methods on the insertion and deletion tasks. We found no consistent winner between the two datasets, with RISE performing best on the Polyvore Outfits dataset and LIME obtaining best performance on the AwA dataset. This is not surprising, since LIME learns which super-pixels contribute to a similarity score. For AwA this means that parts of the animals could be segmented out and deleted or inserted in their entirety before moving onto the next super-pixel. On Polyvore Outfits, however, the important components may be along the boundaries of objects (e.g. the cut of a dress), something not well represented by super-pixel segmentation. Although Mask does not perform as well as other approaches, it tends to produce the most compact regions of salient pixels as it searches for a saliency map with minimal support (see our qualitative comparison of the different methods provided in Figure 3). Notably, we generally obtained better performance when the reference image was kept fixed and only the query image was manipulated. This may be due to the issues from noisy similarity scores as discussed in Section 3.2 and suggests extra care must be taken when manipulating both images.
Attribute Prediction Evaluation
Metrics. To begin, we report the overall performance of our attribute model using mean average precision (mAP) on the standard task of attribute recognition computed over all images in the test set. Two additional metrics are used to evaluate our attribute explanations using the (query, reference) image pairs used in the saliency map experiments. First, we measure the accuracy of the top scoring attribute explanation for each image (i.e. is the returned attribute among the ground truth annotations?) Second, we simulate the effect that removing the attribute from the image would have on the similarity score. After generating the attribute explanation for the query image, we find the most similar image to the query in the test set that does not contain that attribute. For AwA we use the ground truth attribute annotations to identify if an image has an attribute. For Polyvore Outfits, whose attributes are sparsely labeled, we also ensure that the retrieved image has low confidence in the attribute used for an explanation. After retrieving this new image, we compute its similarity with the reference image and return the difference in similarity compared with the original (query, reference) pair. Intuitively, if an attribute was critical for an explanation, then the similarity score should drop more than if a different attribute was selected. Examples of this process can be found in the appendix.
Reference Image
Query Image Sliding Window LIME Mask RISE Figure 3: Comparison of saliency maps for different methods when using a fixed reference image. Compared methods. We provide three baseline approaches: a random baseline, a sample attribute classifier (i.e. no attribute activation maps), and a modified version of FashionSearchNet , an attribute recognition model which also creates a weakly-supervised attribute activation map for comparison. Additional details on these models can be found in the appendix.
Results. Table 2 compares the performance of the compared attribute models for our metrics. Our attribute removal metrics demonstrate the effectiveness of our attribute explanations, with our model which matches saliency maps getting the best performance on both datasets. This shows that when we "remove" the attribute predicted by SANE from the image, it has the largest drop in similarity score, compared to baselines. We also see that training our attribute model so it can produce explanations performs best even on the standard attribute recognition task measured with mAP. The top ranked attribute also becomes significantly more accurate when matching it to the saliency map produced for the query image, increasing top1 accuracy by almost 2% for Polyvore Outfits and 8.5% for AwA. We provide qualitative examples of our explanations in Figure 4. Generally, attributes tend to "shift" as the categories of items changed: for instance, "bulbous" is often seen in examples like the hippopotamus example in the right column, but this became notably less common for categories like chimpanzee or Persian cat. Examples demonstrate that our explanations pass important sanity checks. Firstly, the explanation attribute is well-correlated with the localization of important pixels in the saliency map for each pair. Notice that "striped", "knitted" and "embroidered" on the left of Figure 4 are sensibly localized, and are also reasonable explanations for the match, while a more abstract explanation like "feminine" is linked to the open toe of the heel, the curve of the sole, and the ankle strap. Secondly, the similarity scores are lower for pairs that are more dissimilar: the second row on the right achieves the lowest similarity score, with the explanation for the model's decision being that the pig is a ground animal (while the whale is clearly not).
Note further that the explanations are non-trivial: they often differ from the most likely attribute in the query image, as predicted by a standard attribute classifier. In other words, our explanation model is indeed utilizing information from each pair of images and the saliency map characterizing the match to produce a sensible interpretable explanation. Lastly, it is a sensible sanity check to ask, does the same query image matched with different reference images result in different explanations? Our qualitative results demonstrate our explanation system indeed has this desirable property: note that in the bottom two rows on the right of Figure 4, the property that makes the hippopotamus similar to the leopard is that it is brown, but the property that makes it similar to the seal is that it is bulbous. We include more examples to support these observations in the appendix.
In Figure 5 we show an example of how directly removing the attribute predicted as the explanation can affect similarity (possible here because the attribute is a color.) Here we see that when we modify the white dress to be a different color, the similarity score drops significantly. The only exception is when we make the dress the same color (black) as the attribute explanation of the pants it is being Figure 5: Example of the effect replacing the attribute used as an explanation of the model's behavior has on image similarity score (higher score means items are more compatible). compared to. This demonstrates in a causal way how our predicted attributes can play a significant role in the similarity scores.
User Study
To evaluate the quality of our explanations, we perform a user study with 20 subjects in the age range 14-50. Using a web form, we present 10 unique questions per subject per dataset of the type "What property of item B better explains why it matches item A?" for randomly selected pairs of similar images, and ask participants to select between the attribute provided by our explanation model, or a random one. We report the percentage of users that favored our explanations vs. random in Table 3. On both datasets, subjects prefer our explanations to random by a significant margin, with a prominent difference between the male and the female user pool on the Polyvore Outfits dataset.
Conclusion
In this paper we introduced SANE, a method of explaining an image similarity model's behavior by identifying attributes which were important to the similarity score paired with saliency maps indicating import image regions. We confirmed that humans commonly agree with the attributes selected by SANE to supplement our comparison using machine generated metrics. In future work we believe closely integrating the saliency generator and attribute explanation model, enabling each component to take advantage of the predictions of the other, would help improve performance.
A Candidate Salience Map Generator Descriptions
In this section we provide additional details about each of the candidate saliency map generation methods used in our paper. We split these approaches into two groups: methods which analyze behavior solely through input manipulation (described in Section A.1) and those which use an optimization procedure to learn some parameters in combination with input manipulation (described in Section A.2). Please see Section 3.2 for a description of how these methods are adapted to our task. We also provide a runtime comparison of each approach in Table 4.
A.1 Saliency Maps by Input Manipulation
A straightforward approach to producing a saliency map is to manipulate the input image by removing image regions and measuring the effect this has on the similarity score. If a large drop in similarity is measured, then the region must be important to this decision. If almost no change was measured, then the model considers the image region irrelevant. The saliency map is generated from this approach by averaging the similarity scores for each pixel location over all instances where it was removed from the input. The challenge then is to determine how to manipulate the input image to discover these important regions.
Sliding Window . The first approach to removing regions of an image we shall discuss is a sliding window, where regions are sampled regularly across an image. There is a direct tradeoff, however, with how densely frames are sampled and the computational time it takes to do a forward pass through the network for each manipulated image. If frames are not densely sampled to enable an efficient solution, then it wouldn't be able to localize important regions accurately. If regions are too densely sampled then removing them might not make enough of a difference in the similarity score to take measurements accurately.
RISE . This method uses Monte Carlo approach to generate saliency maps. A set of N random binary masks of size h × w is sampled where each element is independently set to 1 with probably p, and all other elements are set to 0. Typically these masks are much smaller than the input image, so they are upsampled using bilinear interpolation. This produces small continuous regions within the upsampled mask that can be used to manipulate the input image. To remove the fixed grid structure the masks are upsampled to larger than image size and then cropped randomly. Although this approach does require a significant number of random masks (we found 2,000 to be sufficient in our experiments), we found this approach significantly outperforms using a sliding window that samples a similar number of masks on our task.
A.2 Learned Saliency Maps
We shall now discuss methods which combine input manipulation with an optimization procedure used to directly learn a saliency map. As in Section A.1, we compare generating saliency maps for a single query image at a time using a fixed reference image as well as generating a saliency map by manipulating both the query and reference images. LIME . Rather than masking regions without any concern over the continuity of a region, this approach to generating saliency maps operates over a superpixel segmentation of an image. Images are manipulated by randomly deleting superpixels in the image. After sampling N manipulated inputs, the importance of each superpixel is estimated using Lasso. Finally, important regions are selected using submodular optimization.
Mask . In this approach a low resolution saliency map is directly learned using stochastic gradient decent and upsampled to the image size. Instead of manipulating an image by just deleting regions as in other methods, two additional perturbation operators are defined: adding Gaussian noise and image blurring. To help avoid artifacts when learning the mask a total-variation norm is used in addition to an L1 regularization to promote sparsity. This approach removes the reliance on superpixels and tends to converge in fewer iterations than LIME, although it is considerably slower in practice than other approaches (see Table 4). That said -one advantage it does have over other approaches is the ability to learn the salience map for both the query and reference image jointly (which we take advantage of when we are not using a fixed reference image).
B.1 Compared Methods
In addition to a random baseline, we provide two for comparison to our model for our attribute experiments in Section 4.2 of the paper. First, we train a simple attribute classifier (i.e. no attribute activation map). Second, we use a modified version of FashionSearchNet , which was designed for fashion search using attribute information. This network uses an attribute activation map to identify and extract a region of interest for each attribute. These extracted regions are fed into two branches consisting of three fully connected layers which is trained for both attribute classification and image retrieval. We remove the image retrieval components, and use the same 18-layer ResNet base image encoder used for our other methods (replacing AlexNet which was used for the image encoder in the original paper). This provides a simple baseline and a model with a generic weakly-supervised attribute activation map for comparison.
B.2 Saliency Map Generator Details
Sliding Window. When manipulating the inputs of the reference image, we apply 625 occlusion windows each covering a square region of about 12% of image area. When manipulating both images we apply 36 occlusion windows to the reference image.
RISE.
For both datasets we randomly sample 2,000 random masks upsampled from 8 × 8 mask with the probability of preserving a region of 0.5. When manipulating the inputs of the reference image, we generate 30 random masks. LIME. We generate LIME saliency maps using 1000 samples.
Mask. We learn a 14 × 14 perturbation mask for both datasets. We train the mask for 500 iterations using Adam with a learning rate of 0.1.
B.3 SANE Details
Due to its efficient (see Table 4) and overall good performance (see Table 1 in the paper) we selected the fixed-reference RISE as our saliency map generator. For each training image, we sample up to five similar images using the ground truth annotations of each dataset and generate saliency maps using each sampled image as the reference image. We train our attribute model for 300 epochs using Adam with a learning rate of 5e −4 and set λ = 5e −3 in Eq. (2) from the paper. After each epoch, we computed mAP on the validation set and kept the best performing model according to this metric. At test time φ 1−3 are set to (0.1, 0.9, 0.05) on Polyvore Outfits, respectively, and (0.4, 0.6, 0.05) for AwA, respectively. Effectively, map matching obtained the largest weight on both datasets, followed by attribute confidence, with the prior only taking a small weight.
We provide an example of the attribute removal process in Figure 6. After identifying an attribute to remove in an image, we search for the most similar image to the input from a database that doesn't contain the input attribute. We see on the left side of Figure 6 that some attributes like colors are largely retained when the attribute has to do with a non-color based attribute. On the returned AwA images on the right side of Figure 6 we see how some attributes can lead to significant changes in the images or almost none at all depending on the attribute selected to remove.
In Section 3.3 we discuss how we estimate how likely each attribute is a "good" explanation in held-out data. This is used as a prior to bias our attribute selections towards attributes that are known to be good attribute explanations. In Figure 7 we show the prior for the AwA dataset. Note, however, that this prior would change for a different image similarity model. For example, if the image similarity model was more biased towards colors, then we would expect to see the likelihood for "black," "brown," and "gray" to increase.
B.4 User Study Examples
Users were tasked with selecting an attribute which best describes why two items are similar. One attribute was selected by our model, and the other was selected at random. An example of the Attribute to Remove "studded" Input Image Returned Image "lace" "silver" Attribute to Remove "walks" Input Image Returned Image "active" "fast" Figure 6: Examples of the attribute removal process used to evaluate how good an attribute is as an explanation. We measure the similarity of the input image and some reference image as well as between the returned image and the reference image. If a large drop in similarity is measured then the attribute is considered a "good" explanation. If similarity stays about the same or increases, the attribute is considered a "poor" explanation, e.g., trying to remove "active" from the pandas on the right. Figure 7: The likelihood each attribute in the AwA dataset was identified as the best attribute for an image pair on held-out data. We use this prior in Section 3.3 as a bias in our attribute selection procedure. questions presented to users for each dataset is provided in Figure 8. The results are found in Section 4.2.
C Discovering Useful Attributes
For datasets without attribute annotations, or those where the annotated attributes doesn't cover the extent of the visual attributes present in the dataset (i.e. there are many unannotated attributes) we propose a method of discovering attributes that are useful for providing model explanations. An attribute that is useful for explantions would commonly appear in the high importance regions of saliency maps. When generating saliency maps for a query image, if many reference images attend to the same region of the query image then it is likely they are all matching to it for similar reasons (i.e. there may be some attribute that they share which matches the query). Given this observation, we discover attributes using the following saliency-based procedure: 1. Obtain K similar images for query image q using k-NN. 2. Generate a saliency map over q for each of the similar (reference) images. 3. Keep only those reference images which have their saliency peaks in the most common location (such as a unit square in a 7 × 7 grid) and pick top N of them that have the highest similarity. 4. For each reference image, generate its saliency map with q and crop a 30 × 30 patch around the peak saliency region in the reference image. 5. Upsample all the generated patches to full image resolution and get their embeddings. 6. Cluster the patches produced for multiple queries q. Each cluster represents an attribute. If multiple patches were extracted from an image and they got assigned to different clusters, this image would be labeled with multiple attributes. Figure 9a illustrates the clustering produced by this procedure for a set of queries from Polyvore Outfits dataset.
To evaluate this approach we compare it to randomly assigning images to clusters and to clustering based on their own embeddings, disregarding the saliency of image regions (Figure 9b). Saliencybased attribute discovery works best among the three unsupervised methods for Polyvore Outfits data, but full-frame clustering outperforms it for the AwA dataset (Table 5). We suspect the full frame clustering works better for AwA since it considers the background more than the patch-based method (Polyvore Outfits image's typically have white backgrounds). In addition, our discovered attributes would likely be noisier due to the similarity model focusing on the background patches in some images as well. Although our initial results are promising, attempting to discover attributes useful for explanations warrants additional investigation. |
Evidence-based dentistry: a clinician's perspective.
Evidence-based dentistry is a discipline that provides best, explicit-based evidence to dentists and their patients in shared decision-making. Currently, dentists are being trained and directed to adopt the role of translational researchers in developing evidence-based dental practices. Practically, evidence-based dentistry is not usable in its current mode for the provision of labor-intensive services that characterize current dental practice. The purpose of this article is to introduce a model of evidence-based dental practice. This model conceptualizes a team approach in explaining problems and solutions to change current dental practice. These changes constitute an evidence-based dental practice that involves the electronic chart, centralized database, knowledge management software, and personnel in optimizing effective oral health care to dental patients. |
// Return the next non-blank line in the slice of strings, trimmed.
// This line and any preceding blank lines are removed from the slice.
func NextNBLine(lines *[]string) (s string, err error) {
if lines != nil {
for len(*lines) > 0 {
s = strings.TrimSpace((*lines)[0])
*lines = (*lines)[1:]
if s != "" {
return
}
}
err = ExhaustedStringArray
}
return
} |
/**
* Retrieve the RdSpanningCombinatorialIterator Instance associated with the Underlying Vector Space
*
* @param aR1CV Array of R^1 Combinatorial Vectors
*
* @return The RdSpanningCombinatorialIterator Instance associated with the Underlying Vector Space
*/
public static final RdSpanningCombinatorialIterator Standard (
final org.drip.spaces.tensor.R1CombinatorialVector[] aR1CV)
{
if (null == aR1CV) return null;
int iDimension = aR1CV.length;
int[] aiMax = new int[iDimension];
if (0 == iDimension) return null;
for (int i = 0; i < iDimension; ++i)
aiMax[i] = (int) aR1CV[i].cardinality().number();
try {
return new RdSpanningCombinatorialIterator (aR1CV, aiMax);
} catch (java.lang.Exception e) {
e.printStackTrace();
}
return null;
} |
/**
* This API is used to recover password via selected recovery option.
*
* @param recoveryRequest Recovery request. (required)
* @param tenantDomain Tenant Domain which user belongs. Default `carbon.super` (optional)
* @param headers Any additional headers to be embedded. (optional)
* @return Recovery response.
* @throws ApiException If fails to make API call.
*/
public RecoveryResponse recoverPassword(RecoveryRequest recoveryRequest, String tenantDomain,
Map<String, String> headers) throws ApiException {
String localVarPath = "/password/recover".replaceAll("\\{format\\}", "json");
return recover(recoveryRequest, tenantDomain, headers, localVarPath);
} |
// Infer upstream RST_STREAM status code from downstream HTTP/2
// error code.
uint32_t infer_upstream_rst_stream_status_code(uint32_t downstream_error_code) {
if (downstream_error_code == NGHTTP2_REFUSED_STREAM) {
return SPDYLAY_REFUSED_STREAM;
} else {
return SPDYLAY_INTERNAL_ERROR;
}
} |
One of the easiest to use email encryption apps ever built can trace its roots right back to the laboratories of the National Security Agency.
Exactly one year to the day since Edward Snowden leaked tens of thousands of documents that shed light on the vast scope of cybersurveillance that the agency conducts around the world, former NSA security architect Will Ackerly is using the Snowden-fueled boom in privacy awareness to build a multimillion dollar business.
He sells strong, usable encryption to everyone from moms and pops to big business—and he’s using technology he invented at the NSA to do it.
Virtru, Ackerly’s Washington-based security firm that launched in January, already has one of the best encryption apps available to the public. It’s easy to use, requires no complicated keys, and takes less than a minute to get going. Unlike Google’s forthcoming End-To-End encryption app, you can start using Virtru right now. The program, which has 10,000 regular users after six months on the market, is simpler to use and comes with more features than Google’s extension probably ever will.
Now, to mark the Snowden anniversary, Virtru is releasing its Android app, expanding its service into the a mobile market. Virtru is also available for Apple iOS devices, as a plugin for Firefox and Chrome, and for Outlook and OS X Mac Mail.
Is it secure enough to trust your most sensitive data to? Because Virtru produces encryption keys in their cloud server, it is undeniably not as secure as locally running PGP encryption on your own computer. To be sure, however, Virtru is exponentially better than plain old email.
Furthermore, on the day that the Reset the Net campaign is telling millions of activists that a mass encryption movement is key to fighting global surveillance, the singularly convenient Virtru app has the potential to attract large swaths of users that will likely not learn or use relatively complicated PGP encryption. Virtru can introduce thousands to to the world of securely encrypted communications.
Virtru’s most significant limitation—that keys can only be stored in the cloud—is not a permanent pitfall. Ackerly says local storage is coming, so the program is likely only going to get more secure over the coming months, as development continues.
Above all else, Virtru deftly solves the single most pressing issue in encryption today: Ease of use.
Encryption powerful enough to stonewall cybercriminals and intelligence agencies has been freely available to the public for a long time. But, as far as the general public goes, few want to use it because they are often too complicated for most people to bother with.
“One of the biggest insights I had [at the NSA],” Ackerly said, “was that really good fundamental encryption technology is out there, but if you don’t deploy it in a way that is really easy for people to get their jobs done and so people don’t have to change the way they do their job, then you haven’t really deployed it at all. I want to built it in an easy-to-use tool used every day in your life.”
Based on our tests, the Virtru user experience is so easy and smooth—in that respect, it’s unparalleled in the world of strong encryption—that it really does have mass adoption potential.
In 2008, while at the NSA, Ackerly invented the Trusted Data Format, an open-source file format that enables features that other encryption protocols can’t match. As a result, encrypted attachments, forward protection, access control, expiration dates, message revoking, and message tracking all come with the free Virtru package.
Anyone who has ever sent a sensitive document or photo over email will love the efficient protection offered here.
Instead of PGP (a popular encryption technology), Virtru uses Advanced Encryption Standard (AES-256), a protocol used by the United States government that is believed to be unbreakable. It’s faster and simpler than PGP, but is less secure for personal communications.
Ackerly, who serves as chief technology officer at Virtru, cofounded the company right after an eight-year stint on both the offensive and defensive side of the NSA’s cyberwars. His last two years on duty were spent as a cloud security architect for the spy agency, a job that entails collecting, analyzing, and widely sharing enormous amounts of data within the U.S. government while simultaneously protecting it from global adversaries.
Ackerly runs the company with his brother, a former Bush White House policy maker. Most of Virtru’s employees and contractors have NSA backgrounds as well. These strong connections to the feds understandably raise eyebrows. And while he used the Snowden anniversary as a launchpad for Virtru’s Android app, Ackerly’s feelings on the leaker are less clear cut.
“I’m torn,” he told the Daily Dot when asked about Snowden’s leaks and activism. “I think the NSA has a very important mission. I experienced this particularly working overseas with the army.”
Signals intelligence, the sort gathered by the terabyte by the NSA, is “the biggest weapon to help protect our soldiers,” Ackerly said. “If [Snowden] was trying to help the U.S.A., I think he made the wrong choice. There are some things in the system—the framework of laws and regulations need fixing, but I don’t think he made the right move.”
Virtru is meant to secure your communications most of all against cybercriminals and foreign threats. And the tool’s development began even before Snowden ever spoke up. Ackerly says he saw a rising “trillion-dollar problem” of identity and intellectual property theft that he was uniquely suited to solve.
If the U.S. government, rather than hackers, wants to read your Virtru-encrypted emails without you knowing, the current build of Virtru won’t completely protect you. While a court order to both Virtru and your email provider will not reveal emails you sent or received in the past (because Virtru’s encryption keys are ephemeral and unretrievable), it would open up future communications to eavesdropping. And if gag orders are in place, you’ll never know that your supposedly secure communications have been compromised. That’s the inherent danger of encryption in the cloud.
Virtru has already set up a clever “canary in the coalmine,” Ackerly insists, in order to warn against any potential gagged court orders they receive. If law enforcement forces them to give up a users keys without notifying the user, Virtru says it will stop publishing its quarterly transparency reports. If police fight that subtle signal to users, Ackerly says he has the assurance of the American Civil Liberties Union and Electronic Frontier Foundation that the two organizations will support them.
“At that point, we would fight it all the way up,” Ackerly said.
While the current state of Virtru leaves something to be desired in terms of security, the user experience is unmatched. And Ackerly promises big things for the future of the program, including developments that are essential for the acceptance of this incredibly convenient tool into the world of information security.
Crucially, the code will be open sourced and the ability to store keys on your own computer will be given to all users, potentially creating the easy-to-use, decentralized security tool that so many encryption-advocates have been waiting for. When that happens, Virtru’s vast potential may finally be realized.
In just four months, Virtru has smartly set to solving the problem of getting the mainstream to adopt encryption. The cryptography is strong, the user experience is unmatched, and the legal foundation is solid.
At 10,000 users and growing, it’s clear that the program has caught on. As Ackerly targets businesses big and small, the market potential is clearly there.
However, only when Virtru goes open source and allows for local key storage can the combination of security, convenience, and growing global privacy awareness allow the program to be one of the possible sparks that leads to a real privacy renaissance.
Photo via .Bala/Flickr (CC BY-SA 2.0) |
<filename>plotly/examples/subplots.rs
use plotly::common::{Font, Side, Title};
use plotly::layout::{Axis, GridPattern, Layout, LayoutGrid, Legend, RowOrder};
use plotly::{Plot, Rgb, Scatter};
// Subplots
fn simple_subplot(show: bool) {
let trace1 = Scatter::new(vec![1, 2, 3], vec![4, 5, 6]).name("trace1");
let trace2 = Scatter::new(vec![20, 30, 40], vec![50, 60, 70])
.name("trace2")
.x_axis("x2")
.y_axis("y2");
let mut plot = Plot::new();
plot.add_trace(trace1);
plot.add_trace(trace2);
let layout = Layout::new().grid(
LayoutGrid::new()
.rows(1)
.columns(2)
.pattern(GridPattern::Independent),
);
plot.set_layout(layout);
if show {
plot.show();
}
println!("{}", plot.to_inline_html(Some("simple_subplot")));
}
fn custom_sized_subplot(show: bool) {
let trace1 = Scatter::new(vec![1, 2, 3], vec![4, 5, 6]).name("trace1");
let trace2 = Scatter::new(vec![20, 30, 40], vec![50, 60, 70])
.name("trace2")
.x_axis("x2")
.y_axis("y2");
let mut plot = Plot::new();
plot.add_trace(trace1);
plot.add_trace(trace2);
let layout = Layout::new()
.x_axis(Axis::new().domain(&[0., 0.7]))
.y_axis2(Axis::new().anchor("x2"))
.x_axis2(Axis::new().domain(&[0.8, 1.]));
plot.set_layout(layout);
if show {
plot.show();
}
println!("{}", plot.to_inline_html(Some("custom_sized_subplot")));
}
fn multiple_subplots(show: bool) {
let trace1 = Scatter::new(vec![1, 2, 3], vec![4, 5, 6]).name("trace1");
let trace2 = Scatter::new(vec![20, 30, 40], vec![50, 60, 70])
.name("trace2")
.x_axis("x2")
.y_axis("y2");
let trace3 = Scatter::new(vec![300, 400, 500], vec![600, 700, 800])
.x_axis("x3")
.y_axis("y3");
let trace4 = Scatter::new(vec![4000, 5000, 6000], vec![7000, 8000, 9000])
.x_axis("x4")
.y_axis("y4");
let mut plot = Plot::new();
plot.add_trace(trace1);
plot.add_trace(trace2);
plot.add_trace(trace3);
plot.add_trace(trace4);
let layout = Layout::new().grid(
LayoutGrid::new()
.rows(2)
.columns(2)
.pattern(GridPattern::Independent),
);
plot.set_layout(layout);
if show {
plot.show();
}
println!("{}", plot.to_inline_html(Some("multiple_subplots")));
}
fn stacked_subplots(show: bool) {
let trace1 = Scatter::new(vec![0, 1, 2], vec![10, 11, 12]).name("trace1");
let trace2 = Scatter::new(vec![2, 3, 4], vec![100, 110, 120])
.name("trace2")
.x_axis("x2")
.y_axis("y2");
let trace3 = Scatter::new(vec![3, 4, 5], vec![1000, 1100, 1200])
.x_axis("x3")
.y_axis("y3");
let mut plot = Plot::new();
plot.add_trace(trace1);
plot.add_trace(trace2);
plot.add_trace(trace3);
let layout = Layout::new().grid(
LayoutGrid::new()
.rows(3)
.columns(1)
.pattern(GridPattern::Independent)
.row_order(RowOrder::BottomToTop),
);
plot.set_layout(layout);
if show {
plot.show();
}
println!("{}", plot.to_inline_html(Some("stacked_subplots")));
}
fn stacked_subplots_with_shared_x_axis(show: bool) {
let trace1 = Scatter::new(vec![0, 1, 2], vec![10, 11, 12]).name("trace1");
let trace2 = Scatter::new(vec![2, 3, 4], vec![100, 110, 120])
.name("trace2")
.y_axis("y2");
let trace3 = Scatter::new(vec![3, 4, 5], vec![1000, 1100, 1200]).y_axis("y3");
let mut plot = Plot::new();
plot.add_trace(trace1);
plot.add_trace(trace2);
plot.add_trace(trace3);
let layout = Layout::new()
.y_axis(Axis::new().domain(&[0., 0.33]))
.legend(Legend::new().trace_order("reversed"))
.y_axis2(Axis::new().domain(&[0.33, 0.66]))
.y_axis3(Axis::new().domain(&[0.66, 1.]));
plot.set_layout(layout);
if show {
plot.show();
}
println!(
"{}",
plot.to_inline_html(Some("stacked_subplots_with_shared_x_axis"))
);
}
fn multiple_custom_sized_subplots(show: bool) {
let trace1 = Scatter::new(vec![1, 2], vec![1, 2]).name("(1,1)");
let trace2 = Scatter::new(vec![1, 2], vec![1, 2])
.name("(1,2,1)")
.x_axis("x2")
.y_axis("y2");
let trace3 = Scatter::new(vec![1, 2], vec![1, 2])
.name("(1,2,2)")
.x_axis("x3")
.y_axis("y3");
let trace4 = Scatter::new(vec![1, 2], vec![1, 2])
.name("{(2,1), (2,2)}")
.x_axis("x4")
.y_axis("y4");
let mut plot = Plot::new();
plot.add_trace(trace1);
plot.add_trace(trace2);
plot.add_trace(trace3);
plot.add_trace(trace4);
let layout = Layout::new()
.title(Title::new("Multiple Custom Sized Subplots"))
.x_axis(Axis::new().domain(&[0., 0.45]).anchor("y1"))
.y_axis(Axis::new().domain(&[0.5, 1.]).anchor("x1"))
.x_axis2(Axis::new().domain(&[0.55, 1.]).anchor("y2"))
.y_axis2(Axis::new().domain(&[0.8, 1.]).anchor("x2"))
.x_axis3(Axis::new().domain(&[0.55, 1.]).anchor("y3"))
.y_axis3(Axis::new().domain(&[0.5, 0.75]).anchor("x3"))
.x_axis4(Axis::new().domain(&[0., 1.]).anchor("y4"))
.y_axis4(Axis::new().domain(&[0., 0.45]).anchor("x4"));
plot.set_layout(layout);
if show {
plot.show();
}
println!(
"{}",
plot.to_inline_html(Some("multiple_custom_sized_subplots"))
);
}
// Multiple Axes
fn two_y_axes(show: bool) {
let trace1 = Scatter::new(vec![1, 2, 3], vec![40, 50, 60]).name("trace1");
let trace2 = Scatter::new(vec![2, 3, 4], vec![4, 5, 6])
.name("trace2")
.y_axis("y2");
let mut plot = Plot::new();
plot.add_trace(trace1);
plot.add_trace(trace2);
let layout = Layout::new()
.title(Title::new("Double Y Axis Example"))
.y_axis(Axis::new().title(Title::new("yaxis title")))
.y_axis2(
Axis::new()
.title(Title::new("yaxis2 title").font(Font::new().color(Rgb::new(148, 103, 189))))
.tick_font(Font::new().color(Rgb::new(148, 103, 189)))
.overlaying("y")
.side(Side::Right),
);
plot.set_layout(layout);
if show {
plot.show();
}
println!("{}", plot.to_inline_html(Some("two_y_axes")));
}
fn multiple_axes(show: bool) {
let trace1 = Scatter::new(vec![1, 2, 3], vec![4, 5, 6]).name("trace1");
let trace2 = Scatter::new(vec![2, 3, 4], vec![40, 50, 60])
.name("trace2")
.y_axis("y2");
let trace3 = Scatter::new(vec![4, 5, 6], vec![40_000, 50_000, 60_000]).y_axis("y3");
let trace4 = Scatter::new(vec![5, 6, 7], vec![400_000, 500_000, 600_000]).y_axis("y4");
let mut plot = Plot::new();
plot.add_trace(trace1);
plot.add_trace(trace2);
plot.add_trace(trace3);
plot.add_trace(trace4);
let layout = Layout::new()
.title(Title::new("multiple y-axes example"))
.width(800)
.x_axis(Axis::new().domain(&[0.3, 0.7]))
.y_axis(
Axis::new()
.title(Title::new("yaxis title").font(Font::new().color("#1f77b4")))
.tick_font(Font::new().color("#1f77b4")),
)
.y_axis2(
Axis::new()
.title(Title::new("yaxis2 title").font(Font::new().color("#ff7f0e")))
.tick_font(Font::new().color("#ff7f0e"))
.anchor("free")
.overlaying("y")
.side(Side::Left)
.position(0.15),
)
.y_axis3(
Axis::new()
.title(Title::new("yaxis3 title").font(Font::new().color("#d62728")))
.tick_font(Font::new().color("#d62728"))
.anchor("x")
.overlaying("y")
.side(Side::Right),
)
.y_axis4(
Axis::new()
.title(Title::new("yaxis4 title").font(Font::new().color("#9467bd")))
.tick_font(Font::new().color("#9467bd"))
.anchor("free")
.overlaying("y")
.side(Side::Right)
.position(0.85),
);
plot.set_layout(layout);
if show {
plot.show();
}
println!("{}", plot.to_inline_html(Some("multiple_axes")));
}
fn main() -> std::io::Result<()> {
// Subplots
simple_subplot(true);
custom_sized_subplot(true);
multiple_subplots(true);
stacked_subplots(true);
stacked_subplots_with_shared_x_axis(true);
multiple_custom_sized_subplots(true);
// Multiple Axes
two_y_axes(true);
multiple_axes(true);
Ok(())
}
|
/**
* A DependencyKey is used as the value of setLocatorMap {@link #setLocatorMap} such that given a task we can
* quickly find the set of preceding tasks on which it depends. The id {@link DependencyKey#id} is then used to
* search the dependencyMap {@link #dependencyMap} in order to locate the rest of the tasks that rely on the
* completion of taskIds {@link DependencyKey#taskIds}.
*/
private static final class DependencyKey {
private static AtomicInteger counter = new AtomicInteger(0);
private Set<Integer> taskIds;
private int id;
public DependencyKey(Set<Integer> taskIds) {
this.taskIds = taskIds;
this.id = counter.getAndIncrement();
}
public Set<Integer> getSet() {
return taskIds;
}
public int getId() {
return id;
}
} |
i=int(input())
d=i%2+8
print(d,i-d)
######################## |
For the first time since September’s Emmy Awards, the cast of “Mr. Robot” reunited for a special SAG panel at the NeueHouse Hollywood on Wednesday evening to discuss season two of the buzzy cyber series. Stars Rami Malek and Christian Slater opened up about the joys and challenges of their roles on USA Network’s acclaimed psychological thriller, before joining show creator Sam Esmail onstage for a Q&A session.
“I was just shooting a movie in Serbia and so many young kids came out and waited outside my hotel to talk to me about [‘Mr. Robot’]. They had nothing but good things to say about the show and how the characters have affected them. I’ve never had that experience before,” said Malek, who stars as lonely vigilante hacker Elliot Alderson. “I’m very proud of what Sam has created and that the show is bringing attention to people suffering with mental illness. To help people around the world is very satisfying.”
Malek earned an Emmy Award for best actor in September, and the series won two Golden Globes — best TV drama and supporting actor for Slater — earlier this year.
“It’s certainly one of the best jobs I’ve had,” said Slater, who has acted in nearly 100 film and television projects. “I’ve learned to appreciate every moment and be grateful for it. Being a part of this show and having other experiences that didn’t necessarily go in the direction that I would have liked to have gone in has certainly made this a much deeper and gratifying experience.”
Related ‘Mr. Robot’ Creator Sam Esmail Kicks Off Variety’s New TV Podcast ‘Remote Controlled’ (Listen)
For Malek, portraying a character who is uncensored is one of his favorite aspects of the role. “The things that he gets to say are on all of our minds and to actually get to say it day in and day out on camera is a fun and surreal experience that you don’t have as a human being,” he explained. “It’s something that we all wish we could have in one moment or another and that’s one thing that I love about him.”
The hardest part for Malek is to authentically portray his character’s anti-social disorder. “There are so many people in the world who are suffering silently like Elliot. So it’s important that I do it as accurately as possible. It’s a difficult thing to do in one sense and it’s invigorating in another because it brings the reality to so many people at home watching. These kinds of trials for people exist.”
As for Slater, “the challenge is really in the writing and getting all the dialogue in my head,” he said. “When you get involved in a television show, it’s a very fast moving train, but something that I learned from Lars von Trier is not to rush through and to take your time and appreciate the writing, which will help your performance feel more real in the end.”
Esmail emphasized that casting the right actors is more important than a project’s storyline. “A TV show is only as great as its characters. I don’t really give a shit about plot,” he said. “We’ve seen all the plots and who cares. It’s about the characters, how they make those choices, how interesting they are, what their worldview is like. That, to me, is the thing that really is fascinating.”
Some highlights from the panel included a blooper reel and Carly Chaiken‘s audition tape – in which she acted out the scene in season one where Elliot first meets her while taking a shower. Other cast members present included Portia Doubleday, Grace Gummer and Stephanie Corneliussen.
The premiere date for season three has yet to be announced by the network, and the cast gave few hints about the upcoming season. Esmail remained coy about any plot details, but gave a one-word description for season three: “Disintegration.” |
/**
* @brief Initializes a new object to represent workload parameters for a
* generic workload.
* @param[in] _n Number of inputs to the operation.
* @param[in] _m Number of outputs for the operation.
* @details
* This constructor will generate workload parameters according to the specification
* for generic workload. Since `ResultComponent` and `InputParam` depend on `m` and `n`
* respectively, clients should utilize accessor methods provided in this class. Accessing
* and changing workload parameters through the base `Generic` interface is discouraged
* and should only be done for advanced processing: incorrectly modifying the workload
* parameters may cause undefined results.
*/
Generic(std::uint64_t _n = 1, std::uint64_t _m = 1) :
Common(_n + _m + 2)
{
this->set<std::uint64_t>(Index_N, _n, "n");
this->set<std::uint64_t>(Index_M, _m, "m");
for (std::size_t i = 0; i < _n; ++i)
set<std::uint64_t>(2 + i, 1, "length_InputParam" + std::to_string(i));
for (std::size_t i = 0; i < _m; ++i)
set<std::uint64_t>(2 + _n + i, 1, "length_ResultComponent" + std::to_string(i));
} |
<filename>POO/aulas/aula01/h02.cpp<gh_stars>0
#include <stdio.h>
float proximaParcela(float s, int n, float j, int *corrente, float *p) {
if (*corrente == 0) // Primeira Parcela
*p = s;
else if (*corrente <= n)
*p = *p + (*p * (j / 100));
else
*p = 0; // não há mais parcelas
(*corrente)++;
return *p;
}
int main() {
float s = 200;
int n = 5;
float j = 1;
int corrente = 0; // Controla parcela corrente
float p = s; // controla valor da parcela corrrente
for (int i = 0; i <= n; i++) {
printf("O valor da parcela %d eh %3.2f \n", i, p);
p = proximaParcela(s, n, j, &corrente, &p);
}
return 0;
}
|
VisualV is a graphic overhaul modification for Grand Theft Auto V, bringing you a completely redone weather effects, edited modificators for areas/interiors, improved color correction and much much more to add some life to Los Santos and Blaine County as well as a fixed rendering code, so your playing experience will be more smooth and nicer.
What was changed in VisualV?
- All weathers rewritten from scratch, based on hundreds of Los Angeles photos and movies.
- Draw distance of lights was increased and they are more visible.
- Moon movement is corrected (no more moonrise at west!).
- Moon size was matched to the real world.
- Volumetric fog effect was improved, now it's stunning as it should be.
- Clouds colors, intensity and movement were built from scratch.
- Shadows under vehicles are in higher quality, so they aren't blocky anymore.
- Chromatic aberration and vignetting were removed from the normal gameplay.
- Lens flares are all the same for three lead characters.
- Dithering effect from the grass was removed for good, rainy weathers will make it great!
- And many, many minor fixes.
VisualV has own ReShade and ENB presets, with heat haze effect available only for VisualV.
VisualV has introduced several optional changes to improve your game experience, here is the list of them:
- Advanced motion blur - enables motion blur, effect is stronger adequately to vehicle's speed.
- Better positioning of pause menu - slightly changed Options tab, added First Person menu, Graphics and Advanced Graphics tabs are sorted better.
- Dynamic shadows from pedestrian vehicles - renders shadows from vehicle's headlights driven by NPC.
- Earth's Atmosphere - script, which changes a sky behaviour when flying on high altitude, sky is getting darker.
- Enabled blur during weapons radiostations switching - self explained.
- ENB settings - ENB config, more informations above.
- Removed grass dithering + trees reflections - removes grass dithering, especially visible in Los Santos; trees are now visible in reflections.
- No coronas - removes vehicles coronas.
- No lens flares - removes lens flares from the game.
- Original coronas lights - restores vanilla settings of lights like strength, intensity of distant lights etc.
- Procedural shadows - enables shadows casted by small objects like cans, rubbish on the tarmac etc. (may cause reduced game performance).
- RadianceV compatibility - adds compatibility to RadianceV mod.
- ReShade preset - ReShade settings exclusive to VisualV, with MXAO shading technique, ambient light and heat haze effect.
================CHANGELOG=============
1.0.420 version:
- Compatibility with Arena War update (1.0.1604.0).
More changes description available in ReadMe file.
OpenIV mod Package is included, you can install VisualV in few clicks!
===============INSTALLATION===============
For OpenIV modPackage:
1. Install OpenIV - http://openiv.com/WebIV/guest.php?get=1
2. Run OpenIV, click Tools -> ASI Manager and install ASI Loader and OpenIV.asi.
3. Click Tools -> Package Installer and choose VisualV.oiv.
4. Choose Install to mods folder.
If you have ANY problem with GTA V modding, go here:
https://steamcommunity.com/sharedfiles/filedetails/?id=558079253
Previous versions of VisualV are available here:
https://www.mediafire.com/folder/vrv4z3navwafa/VisualV
=====================KNOWN BUGS=========================
- Advanced motion blur is not compatibile with NaturalVision Remastered scripts (game doesn't allow to use more than one timecyc modifier using natives). |
<reponame>TREiop/v6d<gh_stars>0
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package schedulers
import (
"context"
"fmt"
"sort"
"strconv"
"strings"
"time"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
listerv1 "k8s.io/client-go/listers/core/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
v1alpha1 "github.com/v6d-io/v6d/k8s/api/k8s/v1alpha1"
clientset "github.com/v6d-io/v6d/k8s/generated/clientset/versioned"
clientsetv1alpha1 "github.com/v6d-io/v6d/k8s/generated/clientset/versioned/typed/k8s/v1alpha1"
_ "github.com/v6d-io/v6d/k8s/generated/informers/externalversions/k8s/v1alpha1"
_ "github.com/v6d-io/v6d/k8s/generated/listers/k8s/v1alpha1"
)
// SchedulerState records the status of current scheduling
type SchedulerState struct {
state map[string]map[string]string // { jobname: { pod: nodename }}
localctl clientsetv1alpha1.LocalObjectInterface
globalctl clientsetv1alpha1.GlobalObjectInterface
}
// Append records the action of appending a new pod in job to given node.
func (ss *SchedulerState) Append(job string, pod string, nodeName string) error {
klog.V(5).Infof("assign job %v pod %v to node %v", job, pod, nodeName)
if s, ok := ss.state[job]; ok {
if _, ok := s[pod]; ok {
return fmt.Errorf("The pod has already been scheduled")
}
s[pod] = nodeName
return nil
}
ss.state[job] = make(map[string]string)
ss.state[job][pod] = nodeName
return nil
}
// Compute the placement of a pod in job, assuming the useable nodes, and based on the given objects pool.
//
// Use a deterministic strategy.
func (ss *SchedulerState) Compute(ctx context.Context, job string, replica int64, rank int64, requires []string, nodeName string) (int64, error) {
// if requires no vineyard object, raise
if len(requires) == 0 {
return 0, fmt.Errorf("No nodes available")
}
// if no replica, raise
if replica == 0 {
return 0, fmt.Errorf("No replica information in the job spec")
}
// accumulates all local required objects
globalObjects, err := ss.getGlobalObjectsByID(ctx, requires)
if err != nil {
return 0, err
}
klog.V(5).Infof("job %v requires objects %v", job, globalObjects)
localsigs := make([]string, 0)
for _, globalObject := range globalObjects {
for _, sig := range globalObject.Spec.Members {
localsigs = append(localsigs, sig)
}
}
localObjects, err := ss.getLocalObjectsBySignatures(ctx, localsigs)
if err != nil {
return 0, err
}
if len(localObjects) == 0 {
return 0, fmt.Errorf("No local chunks found")
}
klog.V(5).Infof("job %v requires local chunks %v", job, localObjects)
locations := make(map[string][]string)
for _, localObject := range localObjects {
host := localObject.Spec.Hostname
if _, ok := locations[host]; !ok {
locations[host] = make([]string, 0)
}
locations[host] = append(locations[host], localObject.Spec.ObjectID)
}
// total frags
totalfrags := int64(len(localObjects))
// frags for per pod
nchunks := totalfrags / replica
if totalfrags%replica != 0 {
nchunks++
}
// find the node
nodes := make([]string, 0)
for k := range locations {
nodes = append(nodes, k)
}
sort.Strings(nodes)
var cnt int64 = 0
target := ""
for _, node := range nodes {
localfrags := int64(len(locations[node]))
if cnt+localfrags >= (nchunks*rank + (nchunks+1)/2) {
target = node
break
}
cnt += localfrags
}
if target == "" {
klog.V(5).Infof("Unable to find a target: replica = %v, rank = %v, locations = %v", replica, rank, locations)
return 0, fmt.Errorf("Unable to find a pod: internal error")
}
if target == nodeName {
return 100, nil
} else {
return 1, nil
}
}
func (ss *SchedulerState) getGlobalObjectsByID(ctx context.Context, objectIds []string) ([]*v1alpha1.GlobalObject, error) {
objects := make([]*v1alpha1.GlobalObject, 0)
for _, globalObjectID := range objectIds {
if globalObject, err := ss.globalctl.Get(ctx, globalObjectID, metav1.GetOptions{}); err != nil {
return nil, err
} else {
objects = append(objects, globalObject)
}
}
return objects, nil
}
func (ss *SchedulerState) getLocalObjectsBySignatures(ctx context.Context, signatures []string) ([]*v1alpha1.LocalObject, error) {
objects := make([]*v1alpha1.LocalObject, 0)
for _, sig := range signatures {
options := metav1.ListOptions{
LabelSelector: fmt.Sprintf("k8s.v6d.io/signature=%v", sig),
}
if localObjects, err := ss.localctl.List(ctx, options); err != nil {
return nil, err
} else {
for _, localObject := range localObjects.Items {
objects = append(objects, &localObject)
}
}
}
return objects, nil
}
// VineyardScheduling is a plugin that schedules pods that requires vineyard objects as inputs.
type VineyardScheduling struct {
handle framework.FrameworkHandle
podLister listerv1.PodLister
scheduleTimeout *time.Duration
state map[string]*SchedulerState
client *clientset.Clientset
}
var _ framework.ScorePlugin = &VineyardScheduling{}
var _ framework.PreFilterPlugin = &VineyardScheduling{}
// var _ framework.PermitPlugin = &VineyardScheduling{}
var _ framework.PostBindPlugin = &VineyardScheduling{}
const (
// Name is the name of the plugin used in Registry and configurations.
Name = "Vineyard"
// Timeout is the default timeout for the scheduler plugin.
Timeout = 60
// VineyardJobName is the pod group name
VineyardJobName = "scheduling.k8s.v6d.io/job"
// VineyardJobRequired is the object ids that required by this job
VineyardJobRequired = "scheduling.k8s.v6d.io/required"
// VineyardJobReplica is the replication of pods in this job.
VineyardJobReplica = "scheduling.k8s.v6d.io/replica"
)
// New initializes a vineyard scheduler
// func New(obj runtime.Object, handle framework.FrameworkHandle) (framework.Plugin, error) {
func New(configuration runtime.Object, handle framework.FrameworkHandle) (framework.Plugin, error) {
klog.Info("Initializing the vineyard scheduler plugin ...")
timeout := Timeout * time.Second
state := make(map[string]*SchedulerState)
client := clientset.NewForConfigOrDie(ctrl.GetConfigOrDie())
scheduling := &VineyardScheduling{
handle: handle,
podLister: handle.SharedInformerFactory().Core().V1().Pods().Lister(),
scheduleTimeout: &timeout,
state: state,
client: client,
}
return scheduling, nil
}
// Name returns name of the plugin. It is used in logs, etc.
func (vs *VineyardScheduling) Name() string {
return Name
}
// Less compares the priority of two
// func (vs *VineyardScheduling) Less(pod1, pod2 *framework.QueuedPodInfo) bool {
func (vs *VineyardScheduling) Less(pod1, pod2 *framework.PodInfo) bool {
prio1 := podutil.GetPodPriority(pod1.Pod)
prio2 := podutil.GetPodPriority(pod2.Pod)
return prio1 > prio2
}
// PreFilter for a pod
func (vs *VineyardScheduling) PreFilter(ctx context.Context, state *framework.CycleState, pod *v1.Pod) *framework.Status {
return framework.NewStatus(framework.Success, "")
}
// PreFilterExtensions is None
func (vs *VineyardScheduling) PreFilterExtensions() framework.PreFilterExtensions {
return nil
}
// Score compute the score for a pod based on the status of required vineyard objects.
//
func (vs *VineyardScheduling) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) {
// nodeInfo, err := ps.handle.SnapshotSharedLister().NodeInfos().Get(nodeName)
// if err != nil {
// return 0, framework.NewStatus(framework.Error, fmt.Sprintf("Faild to get node %q: %v", nodeName, err))
// }
klog.V(5).Infof("scoring for pod %v on node %v", GetNamespacedName(pod), nodeName)
job, replica, requires, err := vs.GetVineyardLabels(pod)
if err != nil {
return 0, framework.NewStatus(framework.Unschedulable, err.Error())
}
rank, err := vs.GetPodRank(pod)
if err != nil || rank == -1 {
rank = replica - 1
}
klog.V(5).Infof("scoring for pod of job %v, with %v replicas (rank %v), and requires %v", job, replica, rank, requires)
namespace := pod.GetNamespace()
schedulerState := vs.MakeSchedulerStateForNamespace(namespace)
score, err := schedulerState.Compute(ctx, job, replica, rank, requires, nodeName)
if err != nil {
return 0, framework.NewStatus(framework.Unschedulable, err.Error())
}
klog.Infof("score for pod of job %v on node %v is: %v", job, nodeName, score)
return score, framework.NewStatus(framework.Success, "")
}
// ScoreExtensions of the Score plugin.
func (vs *VineyardScheduling) ScoreExtensions() framework.ScoreExtensions {
return vs
}
// NormalizeScore normalizes the score of all nodes for a pod.
func (vs *VineyardScheduling) NormalizeScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
// Find highest and lowest scores.
return framework.NewStatus(framework.Success, "")
}
// Permit only permit runs on the node that has vineyard installed.
func (vs *VineyardScheduling) Permit(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (*framework.Status, time.Duration) {
return framework.NewStatus(framework.Success, ""), 0
}
// PostBind do nothing
func (vs *VineyardScheduling) PostBind(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeName string) {
klog.V(5).Infof("bind pod %v on node %v", GetNamespacedName(pod), nodeName)
job, replica, requires, err := vs.GetVineyardLabels(pod)
if err != nil {
// ignore: might not be a vineyard job
return
}
klog.V(5).Infof("bind pod of job %v, with %v replicas, and requires %v", job, replica, requires)
// ignore
//
// namespace := pod.GetNamespace()
// schedulerState := vs.MakeSchedulerStateForNamespace(namespace)
// schedulerState.Append(job, GetNamespacedName(pod), nodeName)
}
// MakeSchedulerStateForNamespace initializes a state for the given namespace, if not exists.
func (vs *VineyardScheduling) MakeSchedulerStateForNamespace(namespace string) *SchedulerState {
if _, ok := vs.state[namespace]; !ok {
state := make(map[string]map[string]string)
localctl := vs.client.K8sV1alpha1().LocalObjects(namespace)
globalctl := vs.client.K8sV1alpha1().GlobalObjects(namespace)
vs.state[namespace] = &SchedulerState{
state: state,
localctl: localctl,
globalctl: globalctl,
}
}
return vs.state[namespace]
}
func (vs *VineyardScheduling) getJobName(pod *v1.Pod) (string, error) {
jobName, exist := pod.Labels[VineyardJobName]
if !exist || jobName == "" {
return "", fmt.Errorf("Failed to get vineyard job name for %v", GetNamespacedName(pod))
}
klog.V(5).Infof("job name is: %v", jobName)
return jobName, nil
}
func (vs *VineyardScheduling) getJobReplica(pod *v1.Pod) (int64, error) {
jobReplica, exist := pod.Labels[VineyardJobReplica]
if !exist || jobReplica == "" {
return -1, fmt.Errorf("Failed to get vineyard job name for %v", GetNamespacedName(pod))
}
klog.V(5).Infof("job replica is: %v", jobReplica)
if val, err := strconv.Atoi(jobReplica); err != nil {
return -1, err
} else {
return int64(val), nil
}
}
func (vs *VineyardScheduling) getJobRequired(pod *v1.Pod) ([]string, error) {
objects, exist := pod.Labels[VineyardJobRequired]
if !exist || objects == "" {
return nil, fmt.Errorf("Failed to get vineyard job name for %v", GetNamespacedName(pod))
}
klog.V(5).Infof("job requires: %v", objects)
return strings.Split(objects, "-"), nil
}
// GetVineyardLabels requires (job, replica, requires) information of a pod.
func (vs *VineyardScheduling) GetVineyardLabels(pod *v1.Pod) (string, int64, []string, error) {
job, err := vs.getJobName(pod)
if err != nil {
return "", 0, nil, err
}
replica, err := vs.getJobReplica(pod)
if err != nil {
return "", 0, nil, err
}
requires, err := vs.getJobRequired(pod)
if err != nil {
return "", 0, nil, err
}
return job, replica, requires, nil
}
// GetPodRank returns the rank of this pod
func (vs *VineyardScheduling) GetPodRank(pod *v1.Pod) (int64, error) {
names := strings.Split(pod.GetName(), "-")
if rank, err := strconv.Atoi(names[len(names)-1]); err != nil {
return -1, err
} else {
return int64(rank), nil
}
}
// GetNamespacedName returns the namespaced name of an kubernetes object.
func GetNamespacedName(object metav1.Object) string {
return fmt.Sprintf("%v/%v", object.GetNamespace(), object.GetName())
}
|
def RankedCompletions(suggestions, invocation):
def _FlagAlreadyUsed(flag):
return flag in [token.value for token in invocation.flags]
def _ShouldPrioritizeUnusedRequiredFlag(token_name):
return (IsFlag(token_name) and
(FlagIsRequired(suggestions[token_name]) or
FlagBelongsToRequiredGroup(suggestions[token_name],
invocation.GetPossibleFlagGroups())) and
not _FlagAlreadyUsed(token_name) and
not _FlagFromMutexGroupUsed(token_name))
def _FlagFromMutexGroupUsed(flag):
flag_group = invocation.GetPossibleFlags().get(flag, {}).get('group', None)
return (flag_group in invocation.flag_groups and
invocation.flag_groups[flag_group].get('is_mutex', False))
def _FlagFromGroupAlreadyUsed(flag_group):
return any(_FlagAlreadyUsed(flag) for flag in flag_group)
def _ShouldPrioritizeUnusedLocationFlag(token_name):
return (IsFlag(token_name) and token_name in _LOCATION_FLAGS and
not _FlagFromGroupAlreadyUsed(_LOCATION_FLAGS))
def _PrioritizedUnusedRequiredFlags(keys):
res = sorted(keys, key=_ShouldPrioritizeUnusedLocationFlag, reverse=True)
return sorted(res, key=_ShouldPrioritizeUnusedRequiredFlag, reverse=True)
return _PrioritizedUnusedRequiredFlags(sorted(suggestions)) |
<filename>src/common/constants/index.ts
export * from './coin-icons';
export * from './coin-links';
|
def print_groups(groups):
for tag, grp in groups.items():
print "-", tag, "-" * 70
for task in grp:
print '- [%s]: %s %s' % (task.done, task.text, task.tags) |
def mu_stats(self, aper, band, to_mujy=True, rerun='s18a', sigma=3.5,
kde=False, bw=None, prefix=None):
u_factor = self.CGS_TO_MUJY if to_mujy else 1.0
assert band in self.FILTER_SHORT, "# Wrong filter name: {}".format(band)
flux_col = aper.flux(rerun=rerun, band=band)
try:
mu = self.skyobjs[flux_col] * u_factor / aper.area_arcsec
except ValueError:
raise Exception("# Wrong flux column name: {0}".format(flux_col))
return utils.stats_summary(mu, sigma=sigma, n_min=self.n_min,
kde=kde, bw=bw, prefix=prefix) |
// -*- C++ -*-
/*!
\file composite_compare.h
\brief Implements functions and classes for a comparing composite numbers.
*/
#if !defined(__ads_composite_compare_h__)
#define __ads_composite_compare_h__
#include "../array/FixedArray.h"
#include <functional>
namespace ads {
//-----------------------------------------------------------------------------
/*! \defgroup functor_composite_compare Functor: Composite Compare */
// @{
//
// Comparisons for N-D numbers.
//
//! Compare the \c n coordinate of an N-dimensional composite number.
template <int N, typename PointType>
bool
less_composite_fcn(const int n, const PointType& a, const PointType& b);
/* Defined in the .ipp file. */
//! Compare the \c i coordinate of a N-dimensional composite number.
/* CONTINUE
template <int N, typename T>
bool
less_composite_fcn( int i, const FixedArray<N,T>* a,
const FixedArray<N,T>* b );
*/
//! Composite comparison of points.
template <int N, typename PointType>
class less_composite :
public std::binary_function<PointType, PointType, bool> {
private:
int _n;
public:
//! Default constructor. Starting coordinate is invalid.
less_composite() :
_n(-1) {}
//! Set the coordinate to start comparing.
void
set(const int n) {
_n = n;
}
//! Composite comparison of points.
bool
operator()(const PointType& a, const PointType& b) const {
return less_composite_fcn<N>(_n, a, b);
}
};
/* CONTINUE: I don't know if I need this.
template <class PointType>
class less_composite<PointType*> :
public std::binary_function<const PointType*, const PointType*, bool>
{
private:
int _n;
public:
void
set( int n )
{
_n = n;
}
bool
operator()( const PointType* a, const PointType* b )
{
return less_composite_fcn( _n, *a, *b );
}
};
*/
//
// Comparisons for 3-D numbers.
//
//! Compare x coordinate using the composite number (x,y,z).
template <typename Pt3D>
bool
xless_composite_compare(const Pt3D& a, const Pt3D& b);
//! Compare y coordinate using the composite number (y,z,x).
template <typename Pt3D>
bool
yless_composite_compare(const Pt3D& a, const Pt3D& b);
//! Compare z coordinate using the composite number (z,x,y).
template <typename Pt3D>
bool
zless_composite_compare(const Pt3D& a, const Pt3D& b);
//! Compare the \c i coordinate.
template <typename Pt3D>
bool
less_composite_compare(int i, const Pt3D& a, const Pt3D& b);
//! Compare x coordinate using the composite number (x,y,z).
template <class PointType>
struct xless_composite :
public std::binary_function<PointType, PointType, bool> {
//! Compare x coordinate using the composite number (x,y,z).
bool
operator()(const PointType& a, const PointType& b) {
return xless_composite_compare(a, b);
}
};
//! Compare y coordinate using the composite number (y,z,x).
template <class PointType>
struct yless_composite :
public std::binary_function<PointType, PointType, bool> {
//! Compare y coordinate using the composite number (y,z,x).
bool
operator()(const PointType& a, const PointType& b) {
return yless_composite_compare(a, b);
}
};
//! Compare z coordinate using the composite number (z,x,y).
template <class PointType>
struct zless_composite :
public std::binary_function<PointType, PointType, bool> {
//! Compare z coordinate using the composite number (z,x,y).
bool
operator()(const PointType& a, const PointType& b) {
return zless_composite_compare(a, b);
}
};
// CONTINUE
//--------------------------------------------------------------------------
#if 0
//! Compare x coordinate using the composite number (x,y,z).
template <typename T>
bool
xless_composite_compare(const FixedArray<3, T>& a,
const FixedArray<3, T>& b);
//! Compare y coordinate using the composite number (y,z,x).
template <typename T>
bool
yless_composite_compare(const FixedArray<3, T>& a,
const FixedArray<3, T>& b);
//! Compare z coordinate using the composite number (z,x,y).
template <typename T>
bool
zless_composite_compare(const FixedArray<3, T>& a,
const FixedArray<3, T>& b);
//! Compare x coordinate using the composite number (x,y,z).
template <typename T>
bool
xless_composite_compare(const FixedArray<3, T>* a,
const FixedArray<3, T>* b);
//! Compare y coordinate using the composite number (y,z,x).
template <typename T>
bool
yless_composite_compare(const FixedArray<3, T>* a,
const FixedArray<3, T>* b);
//! Compare z coordinate using the composite number (z,x,y).
template <typename T>
bool
zless_composite_compare(const FixedArray<3, T>* a,
const FixedArray<3, T>* b);
//! Compare \c i coordinate.
template <typename T>
bool
less_composite_compare(int i, const FixedArray<3, T>& a,
const FixedArray<3, T>& b);
//! Compare \c i coordinate.
template <typename T>
bool
less_composite_compare(int i, const FixedArray<3, T>* a,
const FixedArray<3, T>* b);
template <class PointType>
struct xless_composite :
public std::binary_function<PointType, PointType, bool> {
bool operator()(const PointType& a, const PointType& b) {
return xless_composite_compare(a, b);
}
};
template <class PointType>
struct xless_composite<PointType*> :
public std::binary_function < const PointType*,
const PointType*, bool > {
bool operator()(const PointType* a, const PointType* b) {
return xless_composite_compare(a, b);
}
};
template <class PointType>
struct yless_composite :
public std::binary_function<PointType, PointType, bool> {
bool operator()(const PointType& a, const PointType& b) {
return yless_composite_compare(a, b);
}
};
template <class PointType>
struct yless_composite<PointType*> :
public std::binary_function<const PointType*, const PointType*, bool> {
bool operator()(const PointType* a, const PointType* b) {
return yless_composite_compare(a, b);
}
};
template <class PointType>
struct zless_composite :
public std::binary_function<PointType, PointType, bool> {
bool operator()(const PointType& a, const PointType& b) {
return zless_composite_compare(a, b);
}
};
template <class PointType>
struct zless_composite<PointType*> :
public std::binary_function < const PointType*,
const PointType*, bool > {
bool operator()(const PointType* a, const PointType* b) {
return zless_composite_compare(a, b);
}
};
//--------------------------------------------------------------------------
#endif
// @}
} // namespace ads
#define __ads_composite_compare_ipp__
#include "composite_compare.ipp"
#undef __ads_composite_compare_ipp__
#endif
|
package com.zrkworld.cinema.Config;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.core.StringRedisTemplate;
import org.springframework.data.redis.core.ValueOperations;
import org.springframework.stereotype.Component;
import javax.annotation.Resource;
import java.util.concurrent.TimeUnit;
@Component
public class RedisUtil {
@Autowired
StringRedisTemplate stringRedisTemplate;
@Autowired
RedisTemplate<Object, Object> redisTemplate;
@Resource(name = "stringRedisTemplate")
ValueOperations<String, String> valOpsStr;
@Resource(name = "redisTemplate")
ValueOperations<Object, Object> valOpsObj;
/**
* 根据指定key获取String
*
* @param key
* @return
*/
public String getStr(String key) {
return valOpsStr.get(key);
}
/**
* 设置Str缓存
*
* @param key
* @param val
*/
public void setStr(String key, String val) {
valOpsStr.set(key, val);
}
/**
* 设置Str缓存(带有失效时间的keyvalue)
*
* @param key
* @param val
*/
public void setStr(String key, String val,long timeout,TimeUnit timeUnit) {
valOpsStr.set(key, val,timeout,timeUnit);
}
/*
*设置key的失效时间
* */
public void expireStr(String o1){
valOpsObj.getOperations().expire(o1,ConstantKit.TOKEN_EXPIRE_TIME,TimeUnit.SECONDS);
}
/**
* 删除指定key
*
* @param key
*/
public void del(String key) {
stringRedisTemplate.delete(key);
}
/**
* 根据指定o获取Object
*
* @param o
* @return
*/
public Object getObj(Object o) {
return valOpsObj.get(o);
}
/**
* 设置obj缓存
*
* @param o1
* @param o2
*/
public void setObj(Object o1, Object o2) {
valOpsObj.set(o1, o2);
}
/*
*设置key的失效时间
* */
public void expireObj(Object o1){
valOpsObj.getOperations().expire(o1,ConstantKit.TOKEN_EXPIRE_TIME,TimeUnit.SECONDS);
}
/**
* 删除Obj缓存
*
* @param o
*/
public void delObj(Object o) {
redisTemplate.delete(o);
}
}
|
Exclusive: Rodney Croome, Kerryn Phelps and Ivan Hinton-Teoh blame flaws in the process after forms sent to old postboxes
Prominent LGBTI advocates Rodney Croome, Kerryn Phelps and Ivan Hinton-Teoh have all complained that their marriage law survey forms went to the wrong addresses and have raised fears some Australians will miss out on a vote.
The trio each told Guardian Australia that their forms went to old post office boxes, despite all of them updating or checking their enrolments with the Australian Electoral Commission before the 24 August deadline.
Phelps, the former Australian Medical Association president who appeared in the first Equality Campaign ad, said she was “not calling it a conspiracy” but “indicative of some flaws in the process we always knew would be there”.
High court accepts $122m for same-sex marriage postal survey was 'unforeseen' Read more
Croome, a long-time marriage equality advocate, said despite living in the same place for 26 years and checking his enrolment “just in case”, he discovered his postal survey had been sent “to a postbox I haven’t used for a decade”.
“That postal address wasn’t visible in my online AEC record and I had no idea the AEC would use it as my preferred address,” he said.
“I fear this glitch has the potential to disenfranchise many voters who may not have my very level high of motivation to participate.”
After the deadline for receiving a form to vote on same-sex marriage elapsed on Monday, eligible voters have been encouraged to check their enrolment with the AEC and apply to the Australian Bureau of Statistics for a replacement form online or by phone.
Phelps, a silent elector whose form was mailed by the AEC, discovered the error when her 18-year old daughter’s survey arrived but hers and her wife’s did not.
Phelps said she couldn’t figure out why the forms went to an old post-office box, since AEC communications had previously come to her residential address.
“We haven’t had a post box address for seven years,” she said. “If your survey form has not arrived and you want to vote yes, then check with the AEC because you should have received it.”
Hinton-Teoh, the Just Equal spokesman, said he and his partner Chris had updated their details online but their surveys went to a postbox they hadn’t used for 15 years.
“It is a clear sign that neither the electoral roll nor the update mechanisms the AEC provided were up to the task for this unprecedented and entirely unnecessary process,” he said.
“More than ever, it is clear that this survey result will reflect the motivated and persistent on both sides of this issue, considering the barriers we’ve experienced.”
Hinton-Teoh suggested the ABS should allow all Australians to use an online mechanism made available to Australians currently overseas for voting, rather than the “inefficient and unreliable postal service”.
In September the interim chief executive of Australia Post, Christine Corbett, told a Senate committee inquiry that 99% of all letters arrived on time or one day late but took on notice what proportion failed to arrive at all.
Phelps said that although about one million people had updated their details before the cut-off, her experience was indicative of many people who are “finding their surveys going to old, defunct addresses”.
Guardian Australia has received several reader complaints about forms going to old addresses, post office boxes, and in one case a commercial address registered for the Sydney local council election.
An AEC spokesman said it had provided the ABS with the electoral roll as at 24 August including voters’ residential address (the enrolled address) and “if provided by that person as part of their roll record, their postal address”.
In some cases the errors may have resulted from voters not updating both addresses, but Croome, Phelps and Hinton-Teoh all said they checked that their postal address matched their residential address. The AEC spokesman stressed it is voters’ responsibility to keep their details up to date.
Guardian Australia has also discovered enrolled Australians seeking replacement postal survey forms are being rejected by the ABS website because it cannot “verify” them due to tiny errors in the electoral roll.
Several readers who applied for replacements using the website received automated ABS responses informing them “unfortunately we have not been able to verify your details against the commonwealth electoral roll so cannot process your request at this time”.
An ABS spokesman told Guardian Australia that the online process for requesting a replacement form “looks for exact matches between a person’s electoral roll details and those provided to the ABS”.
He said if the details aren’t verified, people are told to check their details and call the information line, allowing staff to “identify minor discrepancies and support eligible Australians to obtain their survey form”.
Call centre staff ask questions such as alternative names or addresses, and if a person’s identity and enrolment is confirmed they get a replacement form. |
// EnsureDiscoFinalizerRemoved ensure the finalizer is removed from an existing Object.
func (k8s *K8sFramework) EnsureDiscoFinalizerRemoved(obj runtime.Object) error {
isHasDeletionTimestamp, err := HasDeletionTimestamp(obj)
if err != nil {
return err
}
if hasDiscoFinalizer(obj, k8s.finalizer) && isHasDeletionTimestamp {
newObj := obj.DeepCopyObject()
objMeta, err := meta.Accessor(newObj)
if err != nil {
return err
}
finalizers := objMeta.GetFinalizers()
for i, fin := range finalizers {
if fin == k8s.finalizer {
finalizers = append(finalizers[:i], finalizers[i+1:]...)
}
}
switch t := newObj.(type) {
case *extensionsv1beta1.Ingress:
ing := newObj.(*extensionsv1beta1.Ingress)
ing.Finalizers = finalizers
newObj = ing
case *coreV1.Service:
svc := newObj.(*coreV1.Service)
svc.Finalizers = finalizers
newObj = svc
case *discov1.Record:
rec := newObj.(*discov1.Record)
rec.Finalizers = finalizers
newObj = rec
default:
return fmt.Errorf("unknown type: %q", t)
}
k8s.logger.LogDebug("removing finalizer", "key", fmt.Sprintf("%s/%s/%s", obj.GetObjectKind(), objMeta.GetNamespace(), objMeta.GetName()), "finalizer", k8s.finalizer)
return k8s.UpdateObjectAndWait(
obj, newObj,
func(event apimachineryWatch.Event) (bool, error) {
switch event.Type {
case apimachineryWatch.Deleted:
return false, apiErrors.NewNotFound(schema.GroupResource{Resource: obj.GetObjectKind().GroupVersionKind().Kind}, objMeta.GetName())
}
switch ing := event.Object.(type) {
case *extensionsv1beta1.Ingress, *coreV1.Service, *discov1.Record:
return !hasDiscoFinalizer(ing, k8s.finalizer), nil
}
return false, nil
},
)
}
return nil
} |
// NewCustCode creates a new CustCode object
func NewCustCode(cs CustCodeStore) *CustCode {
return &CustCode{
store: cs,
}
} |
/**
* restart the server
*
* @return {@link MServer} instance
*/
public static MServer restart() {
if (isRunning())
stop();
return start();
} |
<filename>Palindrome_Check.py
def isPalindrome(s):
rev = ''.join(reversed(s))
if s == rev:
return True
return False
ans = isPalindrome("")
if (ans):
print("Yes")
else:
print("No") |
<reponame>Prokyo/ProkyoNet
package de.prokyo.network.common.packet;
import de.prokyo.network.common.buffer.PacketBuffer;
import de.prokyo.network.common.event.Event;
/**
* Represents a packet containing a bunch of information.
*/
public interface Packet extends Event {
/**
* Encodes the information and writes it to the given buffer.<br>
*
* @param buffer A packet buffer which will contain the encoded data
*/
void encode(PacketBuffer buffer);
/**
* Decodes the information of the given buffer and sets it to the variables of the implementation.<br>
*
* @param buffer A packet buffer containing the encoded data
*/
void decode(PacketBuffer buffer);
}
|
Zipf and Type-Token rules for the English and Irish languages
The Zipf curve of log of frequency against log of rank for a large English corpus of 500 million word tokens and 689,000 word types is shown to have the usual slope close to –1 for rank less than 5,000, but then for a higher rank it turns to give a slope close to –2. This is apparently mainly due to foreign words and place names. The Zipf curve for a highly-inflected language (the Indo-European Celtic language, Irish) is also given. Because of the larger number of word types per lemma, it remains flatter than the English curve maintaining a slope of –1 until a turning point of about rank 30,000. A formula which calculates the number of tokens given the number of types is derived in terms of the rank at the turning point, 5,000 for English and 30,000 for Irish. |
/*
* Calculate the keybits and highside/lowside of the freemap node the
* caller is creating.
*
* This routine will specify the next higher-level freemap key/radix
* representing the lowest-ordered set. By doing so, eventually all
* low-ordered sets will be moved one level down.
*
* We have to be careful here because the freemap reserves a limited
* number of blocks for a limited number of levels. So we can't just
* push indiscriminately.
*/
int
hammer2_chain_indkey_freemap(hammer2_chain_t *parent, hammer2_key_t *keyp,
int keybits, hammer2_blockref_t *base, int count)
{
hammer2_chain_core_t *above;
hammer2_chain_t *child;
hammer2_blockref_t *bref;
hammer2_key_t key;
int locount;
int hicount;
int i;
key = *keyp;
above = parent->core;
locount = 0;
hicount = 0;
keybits = 64;
spin_lock(&above->cst.spin);
for (i = 0; i < count; ++i) {
child = hammer2_chain_find_locked(parent, i);
if (child) {
if (child->flags & HAMMER2_CHAIN_DELETED)
continue;
bref = &child->bref;
} else if (base && base[i].type) {
bref = &base[i];
} else {
continue;
}
if (keybits > bref->keybits) {
key = bref->key;
keybits = bref->keybits;
} else if (keybits == bref->keybits && bref->key < key) {
key = bref->key;
}
}
spin_unlock(&above->cst.spin);
switch(keybits) {
case HAMMER2_FREEMAP_LEVEL0_RADIX:
keybits = HAMMER2_FREEMAP_LEVEL1_RADIX;
break;
case HAMMER2_FREEMAP_LEVEL1_RADIX:
keybits = HAMMER2_FREEMAP_LEVEL2_RADIX;
break;
case HAMMER2_FREEMAP_LEVEL2_RADIX:
keybits = HAMMER2_FREEMAP_LEVEL3_RADIX;
break;
case HAMMER2_FREEMAP_LEVEL3_RADIX:
keybits = HAMMER2_FREEMAP_LEVEL4_RADIX;
break;
case HAMMER2_FREEMAP_LEVEL4_RADIX:
panic("hammer2_chain_indkey_freemap: level too high");
break;
default:
panic("hammer2_chain_indkey_freemap: bad radix");
break;
}
*keyp = key;
return (keybits);
} |
Deaths and tumours among rotogravure printers exposed to toluene.
A cohort of 1020 rotogravure printers exposed to toluene and employed for a minimum period of three months in eight plants during 1925-85 was studied. Air levels of toluene were available since 1943 in one plant and since 1969 in most. Based on these measurements and on present concentrations of toluene in blood and subcutaneous fat, the yearly average air levels in each plant were estimated. They reached a maximum of about 450 ppm in the 1940s and 1950s but were only about 30 ppm by the mid-1980s. Exposure to benzene had occurred up to the beginning of the 1960s. Compared with regional rates, total mortality did not increase during the observation period 1952-86 (129 observed deaths v 125 expected; SMR = 1.03). There was no increase in mortality from non-malignant diseases of the lungs, nervous system, or gastrointestinal and urinary tracts. There was no overall excess of tumours 1958-85 (68 v 54, SMR = 1.26; 95% confidence interval, CI = 0.95-1.7). Among the specific cancers, only those of the respiratory tract were significantly increased (16 v 9; SMR = 1.76, CI = 1.03-2.9). Statistical significance was not attained, however, when only subjects with an exposure period of at least five years and a latency period of at least 10 years were considered. Further, there were no dose response relations with cumulated toluene dose (ppm years). There were no significant increases of tumours at other sites, including leukaemias/lymphomas/myelomas. |
def parse_message(data: typing.AnyStr) -> tuple:
split_by_delimiter = data.split(DELIMITER)
cmd = split_by_delimiter[0]
msg = split_by_delimiter[-1]
if len(split_by_delimiter) != 3:
return None, None
if len(split_by_delimiter[1]) != 4:
return None, None
try:
if int(split_by_delimiter[1]) != len(msg):
return None, None
except ValueError:
return None, None
for char in split_by_delimiter[1]:
if char != " ":
if not char.isalnum():
return None, None
cmd = cmd.strip()
return cmd, msg |
// run executes the migrations found in the repository.
func run(db *sql.DB, repo repository.Source, toVersion int) error {
err := db.Ping()
if err != nil {
return err
}
var currentVersion int
err = db.QueryRow("SELECT version FROM migration ORDER BY version DESC").Scan(¤tVersion)
if err != nil {
currentVersion = -1
}
migrations, err := repo.Load()
if err != nil {
return err
}
var versions []int
for v := range migrations {
versions = append(versions, v)
}
sort.Ints(versions)
for _, v := range versions {
if currentVersion != -1 && currentVersion >= v {
continue
}
if err := executeQuery(db, migrations[v]); err != nil {
return err
}
if err := executeQuery(db, fmt.Sprintf("INSERT INTO migration VALUES(%d);", v)); err != nil {
return err
}
if toVersion > 0 && v == toVersion {
break
}
}
return nil
} |
import * as dom from "@typeup/dom"
import { Source } from "../Source"
import * as block from "./block"
function parse(source: Source): dom.block.Block[] | undefined {
let peeked = ""
let p: string | undefined
while (p = source.peekIs(peeked + "\t"))
peeked = p
let result: dom.block.Block[] | undefined
if (source.readIf(peeked + "-")) {
while ((source.peek() || "").match(/\s/))
source.read()
const current = new dom.block.ListItem(block.parseAll(source.requirePrefix("\t")) || [], source.mark())
const next = block.parse(source)
let index = 0
while (next && next.length > 0 && next[index] instanceof dom.block.EmptyLine)
index++
if (next && next.length > 0 && next[index] instanceof dom.block.UnorderedList) {
while (index-- > 0)
next.shift()
next[0] = new dom.block.UnorderedList([current].concat((next[0] as dom.block.UnorderedList).content))
result = next
} else {
result = [new dom.block.UnorderedList([current])]
if (next && next.length > 0)
result = result.concat(next)
}
}
return result
}
block.addParser(parse)
|
<reponame>mambaru/wrtstat
#pragma once
#include <wrtstat/aggregator/api/reduced_data.hpp>
#include <wrtstat/aggregator/api/aggregated_perc.hpp>
#include <memory>
#include <functional>
namespace wrtstat {
struct aggregated_data
: reduced_data
, aggregated_perc
{
typedef std::unique_ptr<aggregated_data> ptr;
typedef std::function< void(ptr) > handler;
};
}
|
/**
* Restores the heap property by re-arranging the elements in the backing
* array as necessary following any heap modifications.
*/
public void heapify() {
for (int i = data.size() / 2; i >= 0; i--) {
heapify(i);
}
} |
Predictors of the feasibility of primary endoscopic management of biliary strictures after adult living donor liver transplantation
Biliary strictures are a major cause of morbidity and mortality for liver transplant recipients. The endoscopic management of biliary strictures is not well established after living donor liver transplantation (LDLT) in comparison with deceased donor liver transplantation. The aims of this study were to assess the initial success rate of primary endoscopic treatment of biliary strictures after LDLT and to identify predictors of the feasibility of endoscopic management. One hundred thirty‐seven adult patients who underwent LDLT and were confirmed to have biliary strictures by endoscopic retrograde cholangiopancreatography (ERCP) were enrolled. The biliary strictures were primarily managed endoscopically with internal drainage or nasobiliary catheterization. The initial success rate for the primary endoscopic management of biliary strictures after LDLT was 46.7% (64 of 137 patients), and the feasibility of endoscopic management was associated with the stricture‐to‐ERCP interval (the interval between the development of the total bilirubin, aspartate aminotransferase, or alanine aminotransferase level to >2 times the upper limit of normal and the performance of ERCP) as well as cholangiographic findings (eg, the stricture morphology and the tip shape of the distal duct). In conclusion, when biliary strictures are noticed after LDLT, prompt endoscopic interventions may improve the initial success rate of primary endoscopic management. In addition, the feasibility of primary endoscopic management can be predicted by the cholangiographic findings, which may help with the choice of the therapeutic modality. Liver Transpl, 2011. © 2011 AASLD. |
<gh_stars>0
// /src/action/arrow_circle_right/materialiconstwotone/24px.svg
import { createSvgIcon } from './createSvgIcon';
export const SvgArrowCircleRightTwotone = createSvgIcon(
`<svg xmlns="http://www.w3.org/2000/svg" enable-background="new 0 0 24 24" height="24" viewBox="0 0 24 24" width="24">
<g>
<rect fill="none" height="24" width="24"/>
</g>
<g>
<g>
<path d="M4,12c0-4.41,3.59-8,8-8s8,3.59,8,8s-3.59,8-8,8S4,16.41,4,12 M12,11l-4,0v2l4,0v3l4-4l-4-4V11z" opacity=".3"/>
<path d="M4,12c0-4.41,3.59-8,8-8s8,3.59,8,8s-3.59,8-8,8S4,16.41,4,12 M2,12c0,5.52,4.48,10,10,10c5.52,0,10-4.48,10-10 c0-5.52-4.48-10-10-10C6.48,2,2,6.48,2,12L2,12z M12,11l-4,0v2l4,0v3l4-4l-4-4V11z"/>
</g>
</g>
</svg>`
);
|
<filename>web/router.go<gh_stars>0
package web
import (
"fmt"
"log"
"net/http"
"github.com/deeper-x/weblog/messages"
"github.com/deeper-x/weblog/settings"
)
// Run the web server
func Run() {
log.Println(messages.StartServer)
http.HandleFunc("/save", save)
http.HandleFunc("/load", load)
crt := fmt.Sprintf("%s/tls/%s", settings.RootDir, "server.crt")
key := fmt.Sprintf("%s/tls/%s", settings.RootDir, "server.key")
err := http.ListenAndServeTLS(":443", crt, key, nil)
if err != nil {
log.Panic(err)
}
}
|
If you've got a Samsung Gear VR, you can soon connect your Facebook account to "Like" and share 360-degree videos from inside it. There are already Facebook 360-degree clips in the Gear VR's video app, but next week, Oculus Video will start letting users sign into Facebook and get a personalized feed based on pages and people they follow. In the "coming weeks," they'll also be able to add reactions to the videos or share them while inside VR.
Aside from adding 360-degree Facebook videos at all, this is the first big tie-in between Oculus' Gear VR platform and the social network. Facebook bought Oculus in 2014, but it was slow about making its influence seen. Last month, though, it announced that a dedicated social VR team was figuring out how to use virtual reality to "connect and share." It also integrated new streaming technology to improve performance of 360-degree video.
Outside the Facebook integration, the Gear VR has some other new social components this week. As of tomorrow, users will be able to create profiles through Oculus' Gear VR app, where they can find friends or leave app reviews. Users can create rooms to watch Twitch or Vimeo streams with friends, and there's a group trivia app, along with a cooperative multiplayer version of the Gear VR's third-person hack-n-slash game Herobound.
We're still a long way from "Facebook in VR," but adding actual social features makes the Facebook 360-degree feed less like just another video channel. It's plausible that Facebook is working on dedicated apps as well, for either the Gear VR or the high-end Oculus Rift that's being shipped on March 28th. And Samsung, for its part, is pushing the Gear VR hard — the headset will ship free with all orders of Samsung's Galaxy S7 and S7 Edge until March 18th. |
/**
* @file script-engine.cpp
* @author Sina Karvandi ([email protected])
* @brief Interpret script engine affairs
* @details
* @version 0.1
* @date 2021-09-23
*
* @copyright This project is released under the GNU Public License v3.
*
*/
#include "pch.h"
//
// Global Variables
//
extern UINT64 g_ResultOfEvaluatedExpression;
extern UINT32 g_ErrorStateOfResultOfEvaluatedExpression;
extern BOOLEAN g_IsSerialConnectedToRemoteDebuggee;
/**
* @brief Get the value from the evaluation of single expression
* from local debuggee and remote debuggee
*
* @param Expr
* @param HasError
* @return UINT64
*/
UINT64
ScriptEngineEvalSingleExpression(string Expr, PBOOLEAN HasError)
{
PVOID CodeBuffer;
UINT64 BufferAddress;
UINT32 BufferLength;
UINT32 Pointer;
UINT64 Result = NULL;
//
// Prepend and append 'formats(' and ')'
//
Expr.insert(0, "formats(");
Expr.append(");");
//
// Run script engine handler
//
CodeBuffer = ScriptEngineParseWrapper((char *)Expr.c_str(), FALSE);
if (CodeBuffer == NULL)
{
//
// return to show that this item contains an script
//
*HasError = TRUE;
return NULL;
}
//
// Print symbols (test)
//
// PrintSymbolBufferWrapper(CodeBuffer);
//
// Set the buffer and length
//
BufferAddress = ScriptEngineWrapperGetHead(CodeBuffer);
BufferLength = ScriptEngineWrapperGetSize(CodeBuffer);
Pointer = ScriptEngineWrapperGetPointer(CodeBuffer);
//
// Check if it's connected over remote debuggee (in the Debugger Mode)
//
if (g_IsSerialConnectedToRemoteDebuggee)
{
//
// Send over serial
//
//
// Send it to the remote debuggee
//
KdSendScriptPacketToDebuggee(BufferAddress, BufferLength, Pointer, TRUE);
//
// Check whether there was an error in evaluation or not
//
if (g_ErrorStateOfResultOfEvaluatedExpression == DEBUGGER_OPERATION_WAS_SUCCESSFUL)
{
//
// Everything was fine, return the result of the evaluated
// expression and null the global holders
//
Result = g_ResultOfEvaluatedExpression;
g_ErrorStateOfResultOfEvaluatedExpression = NULL;
g_ResultOfEvaluatedExpression = NULL;
*HasError = FALSE;
}
else
{
//
// There was an error evaluating the expression from the kernel (debuggee)
//
g_ErrorStateOfResultOfEvaluatedExpression = NULL;
g_ResultOfEvaluatedExpression = NULL;
*HasError = TRUE;
Result = NULL;
}
}
else
{
//
// It's in vmi-mode,
// execute it locally with regs set to ZERO
//
Result = ScriptEngineEvalUInt64StyleExpressionWrapper(Expr, HasError);
}
//
// Remove the buffer of script engine interpreted code
//
ScriptEngineWrapperRemoveSymbolBuffer(CodeBuffer);
return Result;
}
|
/**
* Init props for storage policy.
*
* @param props properties for storage policy
*/
public void init(final Map<String, String> props) throws AnalysisException {
if (props == null) {
throw new AnalysisException("properties config is required");
}
checkRequiredProperty(props, STORAGE_RESOURCE);
this.storageResource = props.get(STORAGE_RESOURCE);
boolean hasCooldownDatetime = false;
boolean hasCooldownTtl = false;
if (props.containsKey(COOLDOWN_DATETIME)) {
hasCooldownDatetime = true;
SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
try {
this.cooldownDatetime = df.parse(props.get(COOLDOWN_DATETIME));
} catch (ParseException e) {
throw new AnalysisException(String.format("cooldown_datetime format error: %s",
props.get(COOLDOWN_DATETIME)), e);
}
}
if (props.containsKey(COOLDOWN_TTL)) {
hasCooldownTtl = true;
this.cooldownTtl = props.get(COOLDOWN_TTL);
}
if (hasCooldownDatetime && hasCooldownTtl) {
throw new AnalysisException(COOLDOWN_DATETIME + " and " + COOLDOWN_TTL + " can't be set together.");
}
if (!hasCooldownDatetime && !hasCooldownTtl) {
throw new AnalysisException(COOLDOWN_DATETIME + " or " + COOLDOWN_TTL + " must be set");
}
if (!Catalog.getCurrentCatalog().getResourceMgr().containsResource(this.storageResource)) {
throw new AnalysisException("storage resource doesn't exist: " + this.storageResource);
}
} |
<gh_stars>1-10
{-#LANGUAGE QuasiQuotes#-}
module Language.Thesaurus.RogetLite.Vindication where
import Language.Thesaurus
vindication :: ThProg
vindication = [thesaurus|
# Sentiment
## Vindication
noun:
* vindication
justification, warant, exoneration, exculpation, acquittal , whitewashing
*extenuation
palliation, palliative, softening, mitigation
*reply
defense, recrimination
*apology
gloss, varnish, plea , salvo, excuse, extenuating circumstances, allowance, allowance to be made, locus paenitentiae
*apologist
vindicator, justifier, defendant
*justifiable charge
true bill
*justify
warrant, be an excuse for, lend a color, furnish a handle, vindicate, exculpate, disculpate, acquit , clear, set right, exonerate, whitewash, clear the skirts of
*extenuate
palliate, excuse, soften, apologize, varnish, slur, gloze, put a gloss, put a good face upon, mince, gloss over, bolster up, help a lame dog over a stile
*advocate
defend, plead one's cause, stand up for, stick up for, speak up for, contend for, speak for, bear out, keep in countenance, support, plead, , say in defense, plead ignorance, confess and avoid, propugn, put in a good word for
*take the will for the deed
make allowance for, do justice to, give one his due, give the Devil his due
*make good
prove the truth of, prove one's case, be justified by the event
adj:
* vindicated
vindicating, exculpatory, apologetic
*excusable
defensible, pardonable, venial, veniable, specious, plausible, justifiable
|]
|
<filename>test/Chapter4/Chapter4Spec.hs
module Chapter4.Chapter4Spec (spec) where
import Test.Hspec
import Chapter4.Chapter4
main :: IO ()
main = hspec spec
spec :: Spec
spec = do
describe "4.8 練習問題" $ do
it "1" $
halve [1, 2, 3, 4, 5, 6] `shouldBe` ([1, 2, 3], [4, 5, 6])
it "2" $
safetail [1, 2, 3] `shouldBe` [2, 3]
-- it "2 - 2" $
-- safetail [] `shouldBe` []
it "3" $
True `orElse` True `shouldBe` True
it "3 - 2" $
True `orElse2` False `shouldBe` True
it "3 - 3" $
False `orElse3` True `shouldBe` True
it "3 - 4" $
False `orElse` False `shouldBe` False
it "4" $
True `andAlso` True `shouldBe` True
it "4 - 2" $
True `andAlso` False `shouldBe` False
it "6" $
mult 2 3 4 `shouldBe` 24
|
package libgin
// Common utilities for the GIN services
import (
"fmt"
"net/http"
"os"
)
// ReadConfDefault returns the value of a configuration env variable.
// If the variable is not set, the default is returned.
func ReadConfDefault(key, defval string) string {
value, ok := os.LookupEnv(key)
if !ok {
return defval
}
return value
}
// ReadConf returns the value of a configuration env variable.
// If the variable is not set, an empty string is returned (ignores any errors).
func ReadConf(key string) string {
value, _ := os.LookupEnv(key)
return value
}
// GetArchiveSize returns the size of the archive at the given URL.
// If the URL is invalid or unreachable, an error is returned.
func GetArchiveSize(archiveURL string) (uint64, error) {
resp, err := http.Get(archiveURL)
if err != nil {
return 0, fmt.Errorf("Request for archive %q failed: %s\n", archiveURL, err.Error())
}
if resp.StatusCode != http.StatusOK {
return 0, fmt.Errorf("Request for archive %q failed: %s\n", archiveURL, resp.Status)
}
if resp.ContentLength < 0 {
// returns -1 when size is unknown; let's turn it into an error
return 0, fmt.Errorf("Unable to determine size of %q", archiveURL)
}
return uint64(resp.ContentLength), nil
}
|
/**
*
*
* @author Yin, Jack
* @since 1.0
*/
public class DateCellValuePopulator implements CellValuePopulator<Date> {
@Override
public void accept(Date input, Cell cell) {
cell.setCellValue(input);
}
} |
use std::io::Read;
fn main() {
let mut buf = String::new();
std::io::stdin().read_to_string(&mut buf).unwrap();
let mut iter = buf.split_whitespace();
let n: usize = iter.next().unwrap().parse().unwrap();
let a: i32 = iter.next().unwrap().parse().unwrap();
let b: i32 = iter.next().unwrap().parse().unwrap();
let c: i32 = iter.next().unwrap().parse().unwrap();
let l: Vec<i32> = (0..n)
.map(|_| iter.next().unwrap().parse().unwrap())
.collect();
let x = 4u32.pow(n as u32);
let mut ans = std::i32::MAX;
for bit in 0..x {
let mut sumA = 0i32;
let mut sumB = 0i32;
let mut sumC = 0i32;
let mut cost = 0i32 ;
let mut t = bit;
let mut cursor = 0;
while t > 0 {
let m = t % 4;
if m == 1 {
if sumA > 0 {
cost += 10;
}
sumA += l[cursor];
} else if m == 2 {
if sumB > 0 {
cost += 10;
}
sumB += l[cursor];
} else if m == 3 {
if sumC > 0 {
cost += 10;
}
sumC += l[cursor];
}
t /= 4;
cursor += 1;
}
if sumA == 0 || sumB == 0 || sumC == 0 {
continue;
}
ans = std::cmp::min(ans, cost + (a - sumA).abs() + (b - sumB).abs() + (c - sumC).abs());
}
println!("{}", ans);
}
|
import smartbot.plugin
from smartbot.utils.web import requests_session, sprunge
from smartbot.exceptions import StopCommand, StopCommandWithHelp
from smartbot.formatting import Style
class Plugin(smartbot.plugin.Plugin):
"""A plugin for searching Freebase."""
names = ["define", "freebase"]
def __init__(self, key):
self.key = key
def _search_mid(self, query):
url = "https://www.googleapis.com/freebase/v1/search"
payload = {
"query": query,
"key": self.key,
"limit": 1
}
session = requests_session()
res = session.get(url, params=payload).json()
if res["result"]:
return res["result"][0]["mid"]
def _topic(self, mid):
url = "https://www.googleapis.com/freebase/v1/topic{}".format(mid)
session = requests_session()
return session.get(url).json()
def _look_for_text(self, topic):
description = topic["property"].get("/common/topic/description")
if description:
return description["values"][0]["text"], \
description["values"][0]["value"]
else:
return None, None
def on_command(self, msg, stdin, stdout):
query = " ".join(msg["args"][1:])
if not query:
query = stdin.read().strip()
if query:
mid = self._search_mid(query)
if mid:
topic = self._topic(mid)
short_text, long_text = self._look_for_text(topic)
if short_text and long_text:
url = sprunge(long_text)
print("{} {}".format(short_text, url), file=stdout)
else:
raise StopCommand(
"There isn't much information about this.")
else:
raise StopCommand("I don't know what you're on about.")
else:
raise StopCommandWithHelp(self)
def on_help(self):
return "{}|{} {}".format(
self.bot.format("define", Style.bold),
self.bot.format("freebase", Style.bold),
self.bot.format("topic", Style.underline),
)
|
/*
* Solution: the idea is put all the gate coordinates in a queue and then perform breadth-first search
* from each of the gates to calculate the distance to all empty rooms.
*
* Time complexity: O((m * n) + (m * n))
* Space complexity: O(n * m)
*/
class Solution
{
public:
bool onGrid(int m, int n, int row, int column)
{
return ((row >= 0 && row < m) && (column >= 0 && column < n));
}
void wallsAndGates(std::vector<std::vector<int>> & rooms)
{
if(rooms.empty())
{
return;
}
int m=int(rooms.size());
int n=int(rooms[0].size());
std::queue<std::pair<int, int>> Q;
for(int row=0;row<m;row++)
{
for(int column=0;column<n;column++)
{
if(rooms[row][column]==0)
{
Q.emplace(std::make_pair(row, column));
}
}
}
int level=1;
while(!Q.empty())
{
int qSize=int(Q.size());
for(int count=0;count<qSize;count++)
{
auto current=Q.front();
Q.pop();
int currentRow=current.first;
int currentColumn=current.second;
std::array<std::pair<int, int>, 4> directions={{{-1, 0}, {1, 0}, {0, -1}, {0, 1}}};
for(const auto & direction : directions)
{
int nextRow=currentRow + direction.first;
int nextColumn=currentColumn + direction.second;
if(onGrid(m, n, nextRow, nextColumn))
{
if(rooms[nextRow][nextColumn]==std::numeric_limits<int>::max())
{
rooms[nextRow][nextColumn]=level;
Q.emplace(std::make_pair(nextRow, nextColumn));
}
}
}
}
level++;
}
}
} |
Childhood neglect: The role of the paediatrician.
Neglect is a pervasive form of child maltreatment. Health care practitioners often struggle with deciding when an action (or lack of action) by a caregiver constitutes inadequate care and is neglectful. The present article discusses the epidemiology, risk factors and outcomes of neglect. In addition, assessment using objective markers, such as harm and potential harm, in the identification of neglect is described, and unique factors that impact assessing and addressing issues of neglect in the clinical setting are discussed. Practical strategies for intervening in cases of neglect are discussed, including how to engage families in which there are concerns for neglect, mandated reporting, working collaboratively with children's services, ongoing monitoring of families, and how health care professionals can effectively engage in neglect prevention and advocacy. |
<filename>joplin/pages/service_page/fixtures/test_cases/step_with_1_location.py
import os
from pages.service_page.fixtures.helpers.create_fixture import create_fixture
import pages.service_page.fixtures.helpers.components as components
# A Service Page that has a step with a location
def step_with_1_location():
steps = components.step_with_1_location()
home = components.home()
page_data = {
"imported_revision_id": None,
"live": False,
"parent": home,
"coa_global": False,
"title": "Service Page with 1 location step",
"slug": "service-page-with-1-location-step",
"add_topics": {
"topics": []
},
"short_description": "This is a very short description",
"additional_content": components.additional_content,
"steps": steps,
}
return create_fixture(page_data, os.path.basename(__file__))
|
def runner():
def done():
raise StopIteration()
dag.nodes[name]['run'] = done
return dag.nodes[name]['session'].run() |
/// Send a fire-and-forget direct-send message to remote peer `recipient`.
///
/// The returned Future simply resolves when the message has been enqueued on
/// the network actor's event queue. It therefore makes no reliable delivery
/// guarantees.
///
/// The Future will resolve to an `Err` if the event queue is unexpectedly
/// shutdown.
pub async fn send_to(
&mut self,
recipient: PeerId,
protocol: ProtocolId,
message: TMessage,
) -> Result<(), NetworkError> {
self.inner
.send(NetworkRequest::SendMessage(
recipient,
direct_send::Message {
protocol,
mdata: message.to_bytes().unwrap(),
},
))
.await?;
Ok(())
} |
def file_decrypt(self, local_input_file, local_output_file):
original_md5, decrypted_md5 = crypto.decrypt_file(local_input_file, local_output_file, self.crypto_key)
self.logger.info(f'Derypted local file "{local_input_file}" with md5 "{original_md5}" '
f'to output file "{local_output_file}" with md5 {decrypted_md5}')
return {'original_md5': original_md5, 'decrypted_md5': decrypted_md5} |
It is a bitter indictment of an army in trouble. It was written by the American soldier Bowe Bergdahl in his last email to his parents sent just before he walked off his base in eastern Afghanistan on 30 June 2009.
Within hours, he was picked by the Taliban who held him for five years until his exchange for five senior Taliban leaders held in the US prison at Guantanamo Bay.
“The US army is the biggest joke the world has to laugh at,” wrote Sergeant Bergdahl in an email later published by Rolling Stone magazine. “It is the army of liars, backstabbers, fools, and bullies. The few good SGTs [sergeants] are getting out as soon as they can, and they are telling us privates to do the same.”
Sgt Bergdahl had joined the army when it was short of soldiers to send to Afghanistan as part of the “surge” in the number of combat brigades there. With too few men, it had started to issue “waivers” to recruits facing felony charges or drugs problems who previously would have been turned down for the army. For Sgt Bergdahl, a crack shot, well-educated and with a romantic vision of what professional soldiering involved, disillusionment set in fast.
His company was understrength and demoralised. He complained that three good sergeants had been forced to move to another company and “one of the biggest shit bags is being put in charge of the team”. The commander of his battalion was a “conceited old fool” and other officers were as bad: “In the US army you are cut down for being honest… but if you are a conceited brown-nosing shit bag you will be allowed to do whatever you want, and you will be handed your higher rank.”
Sgt Bergdahl had taken seriously the counter-insurgency strategy supposedly aimed at winning the “hearts and minds” of Afghans. Instead, he found that US soldiers regarded Afghans with aggressive contempt: “I am sorry for everything here. These people need help, yet what they get is the most conceited country in the world telling them that they are nothing and that they are stupid, that they have no idea how to live.”
He spoke of seeing an Afghan child run over by an American heavy-armoured truck, an event which his parents believe may have led him to leave his base. His father responded to his last message with an email in which the subject line was titled: “Obey Your Conscience.”
The life stories of the six men – five Afghans and one American – exchanged this weekend shows how quickly the mood of armies in Afghanistan can switch from full confidence in victory to frustration and defeat. In the summer of 2001 the Taliban rightly believed they were close to taking over the whole of Afghanistan as their enemies were penned into the mountains of the north-east.
But 9/11 changed all that and by November the Americans were cock-a-hoop that they had won an easy success. Eight years later Sgt Bergdahl’s reasons for going Awol illustrate how far Afghanistan turned into a demoralising and unwinnable war for the US.
The Taliban had also seen hopes of victory turn sour in a much shorter period. Mullah Mohammed Fazl, also known as Mullah Fazel Mazloom, was the leader of 10,000 Taliban fighters held responsible for massacres of Hazara and Tajiks in northern Afghanistan.
He surrendered to the opposition Northern Alliance in 2001. With him was the governor of Balkh province, Mullah Norullah Noori. They were taken to the battleship USS Bataan and then to Guantanamo.
ORDER IT NOW
Ever since exploratory talks started between the US and the Taliban, the first demand of the latter was for these two men to be released. Other prisoners include Khirullah Said Wali Khairkhwa who was a founding member of the Taliban in 1994. In these early days after the fall of the Taliban, an over-confident US saw no reason why former Taliban leaders should be conciliated. Among those senior Taliban security official reported to have vainly reached out to the Americans are the two remaining detainees.
Who could have imagined at the end of 2001 that 13 years later the US would be exchanging prisoners with the Taliban? For the US, getting back their only prisoner detaches them further from Afghanistan, the handover of the five leaders is a sign of their legitimacy and strength. |
<filename>test/modules/comment.spec.ts<gh_stars>10-100
import { suite, test, slow, timeout } from 'mocha-typescript';
import { mockFetch } from '../mock/fetch';
import store from '@/store';
import 'chai';
@suite('Comment Test')
class CommentTest {
@test 'comment getList'() {
let newsId = 123; // 随便写不影响
let comments = [
{
id: 1000001,
content: 'comments 1',
author: {
id: 1003,
name: '<NAME>',
email: '<EMAIL>'
}
},
{
id: 1000002,
content: 'comments 2',
author: {
id: 1003,
name: '<NAME>',
email: '<EMAIL>'
}
}
];
mockFetch(comments);
return store
.dispatch('comment/getList', newsId)
.then(() => {
let commentIds = [1000001, 1000002];
let checkState = store.state.comment.map[newsId].should.deep.equal(commentIds);
let checkEntities = store.getters['comment/list'](newsId).should.deep.equal(comments);
return checkState && checkEntities;
});
}
@test async 'comment delete'() {
let newsId = Date.now();
let deleteCommentId = 1000001;
const comments = [
{
id: deleteCommentId,
content: 'comments 1',
author: {
id: 1006,
name: '<NAME>',
email: '<EMAIL>'
}
},
{
id: 1000002,
content: 'comments 2',
author: {
id: 1006,
name: '<NAME>',
email: '<EMAIL>'
}
}
];
// mock get commemts
mockFetch(comments);
// mock delete comment
mockFetch({
id: deleteCommentId
});
await store.dispatch('comment/getList', newsId);
return store
.dispatch('comment/delete', {newsId, commentId: deleteCommentId})
.then(() => {
let commentIds = [1000001, 1000002];
let checkState = store.state.comment.map[newsId].should.deep.equal([1000002]);
let checkEntities = store.getters['comment/list'](newsId).should.deep.equal([comments[1]]);
return checkState && checkEntities;
});
}
} |
<gh_stars>10-100
// Copyright 2021 D2iQ, Inc. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package archive
import (
"fmt"
"os"
"path/filepath"
"github.com/mholt/archiver/v3"
)
func ArchiveDirectory(dir, outputFile string) error {
fi, err := os.ReadDir(dir)
if err != nil {
return fmt.Errorf("failed to read directory: %w", err)
}
filesToArchive := make([]string, 0, len(fi))
for _, f := range fi {
filesToArchive = append(filesToArchive, filepath.Join(dir, f.Name()))
}
tempTarArchive := filepath.Join(filepath.Dir(outputFile), "."+filepath.Base(outputFile))
defer os.Remove(tempTarArchive)
if err = archiver.Archive(filesToArchive, tempTarArchive); err != nil {
return fmt.Errorf("failed to create archive: %w", err)
}
if err := os.Rename(tempTarArchive, outputFile); err != nil {
return fmt.Errorf("failed to rename temporary tar bundle to output file: %w", err)
}
return nil
}
|
/**
* class to indicate the sub tokens which are
* the value inside the braces, called by the BlockParser
*/
public class Indicator {
/**
* Just for a representation that this
* is an indicator class for the tokens
*/
@Override
public String toString() {
return "<block>";
}
} |
<filename>src/main/java/com/sudoplay/mc/kormetals/module/metal/item/ItemDust.java
package com.sudoplay.mc.kormetals.module.metal.item;
import com.sudoplay.mc.kor.core.config.text.TextConfigData;
import com.sudoplay.mc.kor.core.generation.annotation.*;
import com.sudoplay.mc.kor.spi.Kor;
import com.sudoplay.mc.kor.spi.registry.dependency.KorRegistrationTextConfigDependency;
import com.sudoplay.mc.kor.spi.registry.dependency.KorTextConfigDependency;
import com.sudoplay.mc.kor.spi.registry.injection.KorInject;
import com.sudoplay.mc.kor.spi.registry.injection.KorTextConfig;
import com.sudoplay.mc.kormetals.KorMetals;
import com.sudoplay.mc.kormetals.KorMetalsCreativeTab;
import com.sudoplay.mc.kormetals.module.metal.ModuleMetal;
import com.sudoplay.mc.kormetals.shared.MetalType;
import static com.sudoplay.mc.kormetals.module.metal.ModuleMetal.Config.*;
/**
* Created by sk3lls on 11/12/2016.
*/
@KorRegistrationTextConfigDependency(dependsOnAtLeastOneOf = {
@KorTextConfigDependency(filename = FILENAME, category = CATEGORY_ITEM_DUST, key = "aluminum"),
@KorTextConfigDependency(filename = FILENAME, category = CATEGORY_ITEM_DUST, key = "copper"),
@KorTextConfigDependency(filename = FILENAME, category = CATEGORY_ITEM_DUST, key = "lead"),
@KorTextConfigDependency(filename = FILENAME, category = CATEGORY_ITEM_DUST, key = "nickel"),
@KorTextConfigDependency(filename = FILENAME, category = CATEGORY_ITEM_DUST, key = "platinum"),
@KorTextConfigDependency(filename = FILENAME, category = CATEGORY_ITEM_DUST, key = "silver"),
@KorTextConfigDependency(filename = FILENAME, category = CATEGORY_ITEM_DUST, key = "tin"),
@KorTextConfigDependency(filename = FILENAME, category = CATEGORY_ITEM_DUST, key = "zinc")
})
@KorGenerateItemSubTypedAssets(
name = ItemDust.NAME,
modId = KorMetals.MOD_ID,
subTypes = {
"aluminum",
"copper",
"lead",
"nickel",
"platinum",
"silver",
"tin",
"zinc"
}
)
@KorGenerateLangEntries(entries = {
@KorLangEntry(key = "item." + ItemDust.NAME + "_aluminum.name", value = "Alumina Dust"),
@KorLangEntry(key = "item." + ItemDust.NAME + "_copper.name", value = "Copper Dust"),
@KorLangEntry(key = "item." + ItemDust.NAME + "_lead.name", value = "Lead Dust"),
@KorLangEntry(key = "item." + ItemDust.NAME + "_nickel.name", value = "Nickel Dust"),
@KorLangEntry(key = "item." + ItemDust.NAME + "_platinum.name", value = "Platinum Dust"),
@KorLangEntry(key = "item." + ItemDust.NAME + "_silver.name", value = "Silver Dust"),
@KorLangEntry(key = "item." + ItemDust.NAME + "_tin.name", value = "Tin Dust"),
@KorLangEntry(key = "item." + ItemDust.NAME + "_zinc.name", value = "Zinc Dust")
})
@KorGenerateImageSlices(slices = {
@KorImageSliceEntry(col = 17, row = 3, target = "items/" + ItemDust.NAME + "_aluminum", source = "KorMetals.png"),
@KorImageSliceEntry(col = 1, row = 3, target = "items/" + ItemDust.NAME + "_copper", source = "KorMetals.png"),
@KorImageSliceEntry(col = 4, row = 3, target = "items/" + ItemDust.NAME + "_lead", source = "KorMetals.png"),
@KorImageSliceEntry(col = 5, row = 3, target = "items/" + ItemDust.NAME + "_nickel", source = "KorMetals.png"),
@KorImageSliceEntry(col = 6, row = 3, target = "items/" + ItemDust.NAME + "_platinum", source = "KorMetals.png"),
@KorImageSliceEntry(col = 3, row = 3, target = "items/" + ItemDust.NAME + "_silver", source = "KorMetals.png"),
@KorImageSliceEntry(col = 2, row = 3, target = "items/" + ItemDust.NAME + "_tin", source = "KorMetals.png"),
@KorImageSliceEntry(col = 16, row = 3, target = "items/" + ItemDust.NAME + "_zinc", source = "KorMetals.png")
})
public class ItemDust extends
AbstractItemMetal {
/* package */ static final String NAME = "dust";
@KorInject
public ItemDust(
Kor kor,
@KorTextConfig(path = ModuleMetal.MODULE_ID, file = ModuleMetal.MODULE_ID + ".cfg") TextConfigData configData
) {
super(
kor.getModId(),
NAME,
"dust",
MetalType.values(),
configData
);
this.setCreativeTab(kor.get(KorMetalsCreativeTab.class));
}
}
|
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package interviews.intelligrape;
import java.io.*;
import java.util.*;
import java.text.*;
import java.math.*;
import java.util.regex.*;
/**
*
* @author bharat
*/
public class QuestionOne {
static int minimumDenominations(int totalAmount) {
int result = 0;
int denom[] = {20, 10, 5, 2, 1};
for (int i = 0; i < denom.length; i++) {
result += totalAmount / denom[i];
totalAmount = totalAmount % denom[i];
}
return result;
}
static boolean validate(String identifier) {
char[] id = identifier.toCharArray();
if (identifier.startsWith("$") || identifier.startsWith("_") || (id[0] >= 'A' && id[0] <= 'Z') || (id[0] >= 'a' && id[0] <= 'z')) {
for (int i = 1; i < id.length; i++) {
if (!(id[i] == '$' || id[i] == '_' || (id[i] >= 'A' && id[i] <= 'Z') || (id[i] >= 'a' && id[i] <= 'z') || (id[i] >= '0' && id[i] <= '9'))) {
return false;
}
}
return true;
}
return false;
}
/*
* Complete the function below.
*/
static int[] searchMinSumPair(int[] array) {
int min1, min2;
if (array[0] <= array[1]) {
min1 = array[0];
min2 = array[1];
} else {
min1 = array[1];
min2 = array[0];
}
for (int i = 2; i < array.length; i++) {
if (array[i] < min1) {
min2 = min1;
min1 = array[i];
} else if (array[i] < min2) {
min2 = array[i];
}
}
return new int[]{min1, min2};
}
static int convert(int n) {
int b = 5;
int q = n, r;
String ans = "";
while (q != 0) {
r = q % b;
ans = r + ans;
q = q / b;
}
return Integer.parseInt(ans);
}
static int[] sortArray(int[] arr) {
int res[] = new int[arr.length];
Map mapping = new HashMap<Integer, Integer>();
mapping.put(0, 9);
mapping.put(1, 4);
mapping.put(2, 8);
mapping.put(3, 7);
mapping.put(4, 2);
mapping.put(5, 1);
mapping.put(6, 6);
mapping.put(7, 5);
mapping.put(8, 0);
mapping.put(9, 3);
Map rmapping = new HashMap<Integer, Integer>();
rmapping.put(9, 0);
rmapping.put(4, 1);
rmapping.put(8, 2);
rmapping.put(7, 3);
rmapping.put(2, 4);
rmapping.put(1, 5);
rmapping.put(6, 6);
rmapping.put(5, 7);
rmapping.put(0, 8);
rmapping.put(3, 9);
int[] newArr = new int[arr.length];
for (int i = 0; i < arr.length; i++) {
newArr[i] = (Integer) mapping.get(arr[i]);
}
Arrays.sort(newArr);
for (int i = 0; i < newArr.length; i++) {
res[i] = (Integer) rmapping.get(newArr[i]);
}
return res;
}
public static void main(String[] args) throws IOException {
//int toBeSorted[] = {1, 2, 3, 5, 1};
int toBeSorted[] = {7, 3, 2, 8, 4};
int[] res;
res = sortArray(toBeSorted);
for (int res_i = 0; res_i < res.length; res_i++) {
//System.out.println(String.valueOf(res[res_i]));
}
System.err.println("Minimum Denominations for 99 : "+minimumDenominations(99));
}
}
|
/// Generates a set of writes necessary to "fix" the bids, ie.:
/// - set the bids of the new validators to their desired stakes,
/// - remove the bids of the old validators that are no longer validators,
/// - remove all the bids that are larger than the smallest bid among the new validators
/// (necessary, because such bidders would outbid the validators decided by the social consensus).
pub fn generate_entries_removing_bids(
builder: &mut LmdbWasmTestBuilder,
validators_diff: &ValidatorsDiff,
new_snapshot: &SeigniorageRecipientsSnapshot,
) -> BTreeMap<Key, StoredValue> {
let large_bids = find_large_bids(builder, new_snapshot);
let to_unbid = validators_diff.removed.union(&large_bids);
validators_diff
.added
.iter()
.map(|pkey| {
let amount = *new_snapshot
.values()
.next()
.unwrap()
.get(pkey)
.unwrap()
.stake();
let account_hash = pkey.to_account_hash();
let account = builder.get_account(account_hash).unwrap();
(
Key::Bid(account_hash),
Bid::unlocked(
pkey.clone(),
account.main_purse(),
amount,
Default::default(),
)
.into(),
)
})
.chain(to_unbid.into_iter().map(|pkey| {
let account_hash = pkey.to_account_hash();
let account = builder.get_account(account_hash).unwrap();
(
Key::Bid(account_hash),
Bid::empty(pkey.clone(), account.main_purse()).into(),
)
}))
.collect()
} |
def rstrip(self, chars=None):
new_text = self.text.rstrip() if chars is None else self.text.rstrip(chars)
pattern = TextPattern(new_text)
return pattern |
/**
* JUnit test extension that provides configurable PostgreSQL instance.
* Additional layer of abstraction that allow to easily switch between
* embedded PostgreSQL implementations (otj-opentable, zonkyio, testcontainers).
*
* @author Nikolay Kondratyev
*/
public class PostgresDbExtension implements BeforeAllCallback, AfterAllCallback {
private volatile DataSource dataSource;
private volatile PreparedDbProvider provider;
private volatile ConnectionInfo connectionInfo;
private final List<Consumer<EmbeddedPostgres.Builder>> builderCustomizers = new CopyOnWriteArrayList<>();
@Override
public void beforeAll(ExtensionContext extensionContext) throws Exception {
provider = PreparedDbProvider.forPreparer(dataSource -> {}, builderCustomizers);
connectionInfo = provider.createNewDatabase();
dataSource = provider.createDataSourceFromConnectionInfo(connectionInfo);
}
@Override
public void afterAll(ExtensionContext extensionContext) {
dataSource = null;
connectionInfo = null;
provider = null;
}
public DataSource getTestDatabase() {
if (dataSource == null) {
throw new AssertionError("not initialized");
}
return dataSource;
}
public int getPort() {
if (connectionInfo == null) {
throw new AssertionError("not initialized");
}
return connectionInfo.getPort();
}
public PostgresDbExtension withAdditionalStartupParameter(String key, String value) {
builderCustomizers.add(builder -> builder.setServerConfig(key, value));
return this;
}
} |
<reponame>bxxasn/Pureon-Android-App<filename>app/src/main/java/com/pureon/pur_wallet/view/webview/OnSignMessageListener.java
package com.pureon.pur_wallet.view.webview;
import com.pureon.pur_wallet.view.webview.item.Message;
import com.pureon.pur_wallet.view.webview.item.Transaction;
public interface OnSignMessageListener {
void onSignMessage(Message<Transaction> message);
}
|
package backends
import (
"strings"
"time"
"github.com/jmoiron/sqlx"
)
type Backend interface {
// Setup does the initial configuration of the backend.
Setup(db *sqlx.DB, table string, tableSchema string)
// InsertRecord migration record into the DB.
InsertRecord(tx *sqlx.Tx, name string, hash string, comment string) error
// HasMigrationTable returns true if the migration table exists.
HasMigrationTable() (bool, error)
// QueryPrevious queries and sets the records of all previous migrations.
QueryPrevious() (map[string]string, error)
// CreateMigrationTable makes the migrations table, and return the query used to
// do it.
CreateMigrationTable() (string, error)
RepairHashes(tx *sqlx.Tx, hashes map[string]string) error
}
type MigrationRecord struct {
ID int `db:"id"`
Name string `db:"name"`
Hash string `db:"hash"`
Date time.Time `db:"date"`
Comment string `db:"comment"`
}
// nameTable takes a query and replaces all instances of "??" with the tableName
func nameTable(query string, tableName string) string {
return strings.Replace(query, "??", tableName, -1)
}
func InsertRecord(tx *sqlx.Tx, query string, args ...interface{}) error {
_, err := tx.Exec(query, args...)
return err
}
func HasMigrationTable(db *sqlx.DB, query string) (bool, error) {
exists := false
err := db.Get(&exists, query)
// If this query fails something has gone terribly wrong.
if err != nil {
return false, err
}
return exists, nil
}
// QueryPrevious runs the query from the Backend.QueryPrevious and returns the
// results.
func QueryPrevious(db *sqlx.DB, query string) (map[string]string, error) {
mr := make([]MigrationRecord, 0, 10)
err := db.Select(&mr, query)
if err != nil {
return nil, err
}
prev := make(map[string]string)
for _, r := range mr {
prev[r.Name] = r.Hash
}
return prev, nil
}
func CreateMigrationTable(db *sqlx.DB, query string) (string, error) {
_, err := db.Exec(query)
return query, err
}
func RepairHashes(tx *sqlx.Tx, query string, hashes map[string]string) error {
for name, hash := range hashes {
if hash == "" {
continue
}
_, err := tx.Exec(query, hash, name)
if err != nil {
return err
}
}
return nil
}
|
/**
* An implementation of {@link INeighborhood} interface which provides <i>Gaussian</i>
* neighborhood.
*/
public class DoubleArrayNormNeighborhood implements INeighborhood<DoubleArraySolution> {
private static final Random rand = new Random();
private double[] deltas;
/**
* Constructor.
*
* @param deltas deltas.
*/
public DoubleArrayNormNeighborhood(double[] deltas) {
this.deltas = deltas;
}
@Override
public DoubleArraySolution randomNeighbor(DoubleArraySolution solution) {
DoubleArraySolution neighbor = solution.duplicate();
double[] bounds = Arrays.stream(deltas).map(d -> rand.nextGaussian() * d).toArray();
neighbor.randomize(rand, Arrays.stream(bounds).map(d -> d * -1).toArray(), bounds);
return neighbor;
}
} |
<filename>src/rx-drag/models/RxNode.ts
import { after, before, first, insertAfter, insertBefore, last, remove } from "rx-drag/utils/ArrayHelper";
import { IRect } from "rx-drag/models/IRect";
import { cloneObject } from "rx-drag/utils/cloneObject";
import { ID } from "rx-drag/models/baseTypes";
import { makeAutoObservable } from "mobx";
import { getDomByRxid } from "rx-drag/utils/getDomByRxid";
export const DADA_RXID_CONST = "data-rxid"
export class RxNode<T>{
static idSeed:number = 1;
id: ID = '';
meta!: T;
children: Array<RxNode<T>>;
parent?:RxNode<T>;
static make<T>(meta:T){
let node = new RxNode<T>();
node.seedId();
node.meta = meta;
let metaAny = meta as any
const metaChildren = metaAny.children as Array<T>|undefined;
node.children = [];
metaChildren?.forEach(child=>{
let childNode = RxNode.make<T>(child);
childNode.parent = node;
node.children.push(childNode);
})
//去掉Meta的Children,避免后期数据污染
metaAny.children = undefined;
return node;
}
constructor(){
this.children = [];
makeAutoObservable(this);
}
setMeta(meta:T){
this.meta = meta;
}
setChildren(children:Array<RxNode<T>>|undefined){
this.children = children || [];
}
seedId(){
this.id = RxNode.idSeed.toString();
RxNode.idSeed ++
}
get rxid(){
return 'rx-' + this.id;
}
get rect():IRect|undefined{
return this.dom?.getBoundingClientRect();
}
//完全复制包括ID的复制
copy(){
let copy = new RxNode<T>();
copy.meta = cloneObject(this.meta);
copy.id = this.id;
copy.children = [];
this.children.forEach(child=>{
let childCopy = child.copy();
childCopy.parent = copy;
copy.children.push(childCopy);
})
return copy;
}
//复制一个副本,创建新ID,不插入父节点
clone(){
let metaCopy = cloneObject(this.getMeta());
let newNode = RxNode.make<T>(metaCopy);
return newNode;
}
duplicate(){
const newNode = this.clone();
newNode.parent = this.parent;
newNode.moveAfter(this);
return newNode;
}
getNode(id?:ID):RxNode<T>|undefined{
if(id === this.id){
return this;
}
for(var i = 0; i < this.children.length; i ++){
const child = this.children[i];
let childOfChild = child.getNode(id);
if(childOfChild){
return childOfChild
}
}
return undefined;
}
get dom(){
return getDomByRxid(this.rxid);
}
remove(){
this.parent && remove(this, this.parent?.children);
this.parent = undefined;
}
moveBefore(target:RxNode<T>){
this.remove();
insertBefore(this, target, target.parent?.children);
this.parent = target.parent;
}
moveAfter(target:RxNode<T>){
this.remove();
insertAfter(this, target, target.parent?.children);
this.parent = target.parent;
}
moveIn(target:RxNode<T>){
this.remove();
target.children.push(this);
this.parent = target;
}
moveInTop(target:RxNode<T>){
this.remove();
target.children = [this, ...target.children];
this.parent = target;
}
firstChild(){
return first(this.children);
}
lastChild(){
return last(this.children);
}
previousSibling() : RxNode<T>|undefined{
if(this.parent?.children){
//避免[mobx] Out of bounds read, map转换一下
return before(this, this.parent?.children.map(child=>child))
}
}
nextSibling() : RxNode<T>|undefined{
if(this.parent?.children){
//避免[mobx] Out of bounds read, map转换一下
return after(this, this.parent?.children.map(child=>child));
}
}
getMeta(){
let metaAny = cloneObject(this.meta);
metaAny.children = [];
this.children.forEach(child=>{
metaAny.children.push(child.getMeta());
})
return metaAny;
}
//判断是否是某个节点的祖先
isAncestorOf(targetId:ID):boolean{
if(!this.children){
return false;
}
for(var i = 0; i < this.children.length; i++){
if(this.children[i].id === targetId){
return true;
}
if(this.children[i].isAncestorOf(targetId)){
return true;
}
}
return false;
}
//判断是否是某个节点的后代
isPosterityOf(targetId:ID):boolean{
if(this.parent){
if(this.parent.id === targetId){
return true;
}
return this.parent.isPosterityOf(targetId)
}
return false;
}
exchangeTo(target:RxNode<T>){
let targetMeta = target.meta;
let targetChildren = target.children;
let targetId = target.id;
target.meta = this.meta;
target.children = this.children;
target.id = this.id;
this.meta = targetMeta;
this.children = targetChildren;
this.id = targetId;
}
getChildrenMetas(){
let metas:Array<T> = [];
this.children.forEach(child=>{
metas.push(child.getMeta());
})
return metas;
}
parse(metas?:Array<T>){
this.children = [];
metas && metas.forEach((meta: any)=>{
let node = RxNode.make<T>(meta);
node.parent = this;
this.children.push(node);
})
}
} |
<filename>model-generator/src/test/java/com/cyc/model/generator/WebGeneratorTest.java
package com.cyc.model.generator;
import com.cyc.model.generator.TemplateGenerator;
import com.cyc.model.generator.WebGenerator;
import com.cyc.model.templates.BaseTemplateTest;
import com.cyc.model.xml.XmlProjectLoader;
import com.cyc.model.xml.XmlProjectLoaderTest;
import com.cyc.model.objects.ProjectObj;
import com.cyc.kb.exception.KbException;
import java.util.List;
import javax.xml.bind.JAXBException;
import org.junit.Test;
import static org.junit.Assert.*;
import org.xml.sax.SAXException;
/**
*
* @author nwinant
*/
public class WebGeneratorTest extends BaseTemplateTest {
@Test
public void test() throws JAXBException, SAXException, KbException, Exception {
final XmlProjectLoader loader = new XmlProjectLoader(XmlProjectLoaderTest.WORKSPACE);
final ProjectObj project = loader.loadProject(XmlProjectLoaderTest.CMD_VALID);
final List<String> directives = WebGenerator.getDefaultDirectives();
directives.add(TemplateGenerator.GENERATE_STRINGS_ONLY_DIRECTIVE);
final WebGenerator generator = new WebGenerator(directives);
generator.generate(project);
//fail("Requires testing of results.");
org.junit.Assume.assumeTrue("Requires testing of results.", false); // TODO
}
}
|
import * as core from '@actions/core';
import * as path from 'path';
import * as util from 'util';
import * as fs from 'fs';
import * as toolCache from '@actions/tool-cache';
import * as os from 'os';
const clusterctlToolName = 'clusterctl';
function getkubectlDownloadURL(version: string): string {
switch (os.type()) {
case 'Linux':
return util.format('https://github.com/kubernetes-sigs/cluster-api/releases/download/%s/clusterctl-linux-amd64', version);
case 'Darwin':
return util.format('https://github.com/kubernetes-sigs/cluster-api/releases/download/%s/clusterctl-darwin-amd64', version);
}
}
async function downloadClusterctl(version: string): Promise<string> {
let cachedToolpath = toolCache.find(clusterctlToolName, version);
let clusterctlDownloadPath = '';
if (!cachedToolpath) {
try {
clusterctlDownloadPath = await toolCache.downloadTool(getkubectlDownloadURL(version));
} catch (exception) {
throw new Error('DownloadclusterctlFailed');
}
cachedToolpath = await toolCache.cacheFile(clusterctlDownloadPath, clusterctlToolName + getExecutableExtension(), clusterctlToolName, version);
}
const clusterPath = path.join(cachedToolpath, clusterctlToolName + getExecutableExtension());
fs.chmodSync(clusterPath, '777');
return clusterPath;
}
function getExecutableExtension(): string {
if (os.type().match(/^Win/)) {
return '.exe';
}
return '';
}
async function run() {
let version = core.getInput('clusterctl_version', { 'required': true });
let cachedPath = await downloadClusterctl(version);
console.log(`clusterctl tool version: '${version}' has been cached at ${cachedPath}`);
core.setOutput('kubectl-path', cachedPath);
}
run().catch(core.setFailed); |
// Appends the |debugDatabase| to the end of |baseFile| and writes the footer
// so the runtime can find it.
static LogicalResult appendDebugDatabase(std::vector<int8_t> &baseFile,
Artifact &debugFileArtifact) {
auto debugFileOr = debugFileArtifact.read();
if (!debugFileOr.hasValue()) {
return failure();
}
auto debugFile = std::move(debugFileOr).getValue();
auto baseFileSize = IREE::Util::align(baseFile.size(), 16);
auto debugFileSize = IREE::Util::align(debugFile.size(), 16);
struct Footer {
uint8_t magic[8];
uint32_t version;
uint32_t flags;
uint64_t libraryOffset;
uint64_t librarySize;
uint64_t debugOffset;
uint64_t debugSize;
} footer = {{0}};
std::memcpy(footer.magic, "IREEDBG\0", sizeof(footer.magic));
footer.version = 0;
footer.librarySize = baseFile.size();
footer.debugOffset = baseFileSize;
footer.debugSize = debugFile.size();
baseFile.resize(baseFileSize + debugFileSize + sizeof(footer));
std::memcpy(baseFile.data() + baseFileSize, debugFile.data(),
debugFile.size());
std::memcpy(baseFile.data() + baseFileSize + debugFileSize, &footer,
sizeof(footer));
return success();
} |
<reponame>kusa-mochi/account-book-simulator-2
/// <reference path="enums.ts"/>
interface JQuery {
datepicker(param): JQuery
}
interface HTMLElement {
value: string
}
interface FrequencyData {
mode: App.Enums.FrequencyMode,
count?: number,
month?: number,
date?: Date,
amount: number
}
interface TermData {
from: Date,
to: Date
}
interface ZogenData {
mode: App.Enums.FrequencyMode,
count?: number,
month?: number,
date?: Date,
amount: number
}
interface ItemData {
selected: boolean,
name: string,
spendingIncome: boolean, // true->spending, false->income
frequency: FrequencyData,
term: TermData,
zogen: ZogenData
}
interface GraphSettingData {
vMax: number, // 縦軸の最大値(undefinedでライブラリの自動設定に従う)
vMin: number // 縦軸の最小値(undefinedでライブラリの自動設定に従う)
}
interface SettingData {
graphSetting: GraphSettingData
}
interface Window {
File,
FileReader,
FileList
}
declare class AutoNumeric {
constructor(selector: string, options: object);
getNumber(): number;
set(n: number, options?: object): void
}
|
<gh_stars>1-10
/**
* Copyright 2005-2014 The Kuali Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ecl2.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kuali.rice.location.impl.campus;
import org.kuali.rice.krad.bo.PersistableBusinessObjectBase;
import org.kuali.rice.krad.data.jpa.converters.BooleanYNConverter;
import org.kuali.rice.location.framework.campus.CampusEbo;
import javax.persistence.Column;
import javax.persistence.Convert;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.Id;
import javax.persistence.JoinColumn;
import javax.persistence.OneToOne;
import javax.persistence.Table;
@Entity
@Table(name = "KRLC_CMP_T")
public class CampusBo extends PersistableBusinessObjectBase implements CampusEbo {
private static final long serialVersionUID = 787567094298971223L;
@Id
@Column(name = "CAMPUS_CD")
private String code;
@Column(name = "CAMPUS_NM")
private String name;
@Column(name = "CAMPUS_SHRT_NM")
private String shortName;
@Column(name = "CAMPUS_TYP_CD")
private String campusTypeCode;
@Column(name = "ACTV_IND")
@Convert(converter = BooleanYNConverter.class)
private boolean active;
@OneToOne(fetch = FetchType.EAGER)
@JoinColumn(name = "CAMPUS_TYP_CD", insertable = false, updatable = false)
private CampusTypeBo campusType;
@Override
public String getCode() {
return code;
}
public void setCode(String code) {
this.code = code;
}
@Override
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
public String getShortName() {
return shortName;
}
public void setShortName(String shortName) {
this.shortName = shortName;
}
public String getCampusTypeCode() {
return campusTypeCode;
}
public void setCampusTypeCode(String campusTypeCode) {
this.campusTypeCode = campusTypeCode;
}
@Override
public boolean isActive() {
return active;
}
@Override
public void setActive(boolean active) {
this.active = active;
}
@Override
public CampusTypeBo getCampusType() {
return campusType;
}
public void setCampusType(CampusTypeBo campusType) {
this.campusType = campusType;
}
/**
* Converts a mutable bo to its immutable counterpart
* @param bo the mutable business object
* @return the immutable object
*/
public static org.kuali.rice.location.api.campus.Campus to(CampusBo bo) {
if (bo == null) {
return null;
}
return org.kuali.rice.location.api.campus.Campus.Builder.create(bo).build();
}
/**
* Converts a immutable object to its mutable counterpart
* @param im immutable object
* @return the mutable bo
*/
public static CampusBo from(org.kuali.rice.location.api.campus.Campus im) {
if (im == null) {
return null;
}
CampusBo bo = new CampusBo();
bo.code = im.getCode();
bo.name = im.getName();
bo.shortName = im.getShortName();
bo.active = im.isActive();
if (im.getCampusType() != null) {
bo.campusTypeCode = im.getCampusType().getCode();
}
bo.campusType = CampusTypeBo.from(im.getCampusType());
bo.versionNumber = im.getVersionNumber();
bo.objectId = im.getObjectId();
return bo;
}
} |
Lightroom for mobile Android 1.2 now available
Tonight we’re announcing the immediate availability of Lightroom version 1.2 for Android phones and tablets.
Edit images faster by copying image adjustments and pasting them onto another photo
Crop the perfect photo with a re-designed experience on your Android device that enables you to quickly adjust, align and auto-straighten.
Easily find your favorite images! The new Segmented view in Collections give you a different way to view and engage with your photos.
Raw file support for the Panasonic Lumix CM1 Communication Camera
Version 1.2 also includes fixes for bugs and other issues identified by customers, including:
Screen occasionally went blank after changing the orientation from landscape to portrait.
Lightroom could crash when customers tap the sign-in button.
Fixed issue that could cause crashes when scrolling through a large collection and changing the device orientation from portrait to landscape (or vice versa).
Hardware back button was not closing any contextual menus.
Slow performance when swiping from image to image.
Copying or moving an image into a different collection was not working correctly. Please note that this issue only occurred on certain Samsung tablets.
Lightroom sometimes caused automated logouts.
Edits not visible when sharing photos to certain 3rd party apps and services.
Crash occurred when occasionally navigating from grid view to collections view.
How to get started:
Download Lightroom mobile
Visit the Google Play store and download Lightroom mobile. Once you login with the same Creative Cloud account, you’ll see all of your synced Collections.
Sign In
Lightroom mobile utilizes cloud services to sync Smart Previews and changes between Lightroom desktop and Lightroom mobile. Lightroom mobile requires a qualifying Creative Cloud or Photoshop Photography Plan subscription:
Creative Cloud Photography plan
Creative Cloud complete plan
Creative Cloud Student and Teacher Edition
Creative Cloud for teams complete plan
A free 30-day trial of Lightroom is available
Download the latest version of Lightroom CC
Lightroom for Android is designed to work together seamlessly with Lightroom CC on the desktop. Check out Lightroom CC here
Sync a collection
Lightroom mobile is organized around Collections. Images within Collections will be synced and be available in Lightroom mobile.
Check out your photos at lightroom.adobe.com
In addition to Lightroom mobile, we’ve also launched Lightroom web, a new way to view and share your images from any web browser. Available at http://lightroom.adobe.com.
System Requirements
Processor : Quad Core CPU with 1.7 GHZ ARMv7 architecture
RAM : 1 GB
Internal Storage: 8 GB
Android Jellybean, KitKat, or Lollipop
Give us feedback!
Once you’ve gotten started with Lightroom mobile, don’t forget to leave us feedback about your experiences. Lightroom wouldn’t be what it is today without the loyal community of regular customers who help us find and fix common issues. If you keep talking to us, we’ll keep listening.
Here are a few ways that you can send us feedback and learn more:
Lightroom videos on YouTube
Give feedback on the Lightroom user-to-user forums
Feature requests on GetSatisfaction
Engage with us on our social channels on Facebookand Twitter
Thanks! |
/**
* This class adds a way for nodes inside a group to be piped to the group node itself.
*/
public class NodeGroupBridge extends JOIComponentNode {
public NodeGroupBridge(int width, int height, int componentId, JOIComponent component, EditorWindow editorWindow) {
super(150, 0, componentId, component, editorWindow);
setUserData("group_bridge");
setId("NodeGroupBridge");
if(((GroupBridge) getJoiComponent()).isInputBridge()) {
createNewOutputConnectionPoint("group input", "normal_output");
} else {
createNewInputConnectionPoint("group output");
}
}
@Override
public void focusState(boolean value) {
if (value) {
setStyle("-fx-background-color: #5a5a5a;" +
"-fx-background-radius: 10;" +
"-fx-background-insets: 8;" +
"-fx-effect: dropshadow(three-pass-box, deepskyblue, 10, 0, 0, 1);" +
"-fx-opacity: 1;"
);
} else {
setStyle("-fx-background-color: #5a5a5a;" +
"-fx-background-radius: 10;" +
"-fx-background-insets: 8;" +
"-fx-effect: dropshadow(three-pass-box, black, 10, 0, 0, 1);" +
"-fx-opacity: 1;"
);
}
}
@Override
protected boolean openDialog() {
return true;
}
} |
<reponame>usds/anet
package mil.dds.anet.beans.search;
import io.leangen.graphql.annotations.GraphQLInputField;
import io.leangen.graphql.annotations.GraphQLQuery;
import java.util.Objects;
import mil.dds.anet.beans.Organization.OrganizationStatus;
import mil.dds.anet.beans.Organization.OrganizationType;
public class OrganizationSearchQuery extends AbstractSearchQuery<OrganizationSearchSortBy> {
@GraphQLQuery
@GraphQLInputField
private OrganizationStatus status;
@GraphQLQuery
@GraphQLInputField
private OrganizationType type;
// Search for organizations with a specific parent Org.
@GraphQLQuery
@GraphQLInputField
private String parentOrgUuid;
// Include descendants recursively from the specified parent.
// If true will include all orgs in the tree of the parentOrg
// Including the parent Org.
@GraphQLQuery
@GraphQLInputField
private Boolean parentOrgRecursively;
public OrganizationSearchQuery() {
super(OrganizationSearchSortBy.NAME);
}
public OrganizationStatus getStatus() {
return status;
}
public void setStatus(OrganizationStatus status) {
this.status = status;
}
public OrganizationType getType() {
return type;
}
public void setType(OrganizationType type) {
this.type = type;
}
public String getParentOrgUuid() {
return parentOrgUuid;
}
public void setParentOrgUuid(String parentOrgUuid) {
this.parentOrgUuid = parentOrgUuid;
}
public boolean getParentOrgRecursively() {
return Boolean.TRUE.equals(parentOrgRecursively);
}
public void setParentOrgRecursively(Boolean parentOrgRecursively) {
this.parentOrgRecursively = parentOrgRecursively;
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), status, type, parentOrgUuid, parentOrgRecursively);
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof OrganizationSearchQuery)) {
return false;
}
final OrganizationSearchQuery other = (OrganizationSearchQuery) obj;
return super.equals(obj) && Objects.equals(getStatus(), other.getStatus())
&& Objects.equals(getType(), other.getType())
&& Objects.equals(getParentOrgUuid(), other.getParentOrgUuid())
&& Objects.equals(getParentOrgRecursively(), other.getParentOrgRecursively());
}
}
|
/**
* A striped lock that uses hashCode on the key to give back hopefully non-used
* lock.
*/
public class StripedLock {
/**
* We use a number here based on our typical concurrency profile. So 10
* indexing threads and 2048 locks seems OK.
*/
public static final int DEFAULT_NUMBER_OF_MUTEXES = 2048;
private static final int DOUG_LEA_BLACK_MAGIC_OPERAND_1 = 20;
private static final int DOUG_LEA_BLACK_MAGIC_OPERAND_2 = 12;
private static final int DOUG_LEA_BLACK_MAGIC_OPERAND_3 = 7;
private static final int DOUG_LEA_BLACK_MAGIC_OPERAND_4 = 4;
private Lock[] mutexes = null;
public StripedLock() {
mutexes = new Lock[DEFAULT_NUMBER_OF_MUTEXES];
for (int i = 0; i < mutexes.length; i++) {
mutexes[i] = new ReentrantLock();
}
}
public Lock getLockForKey(final Object key) {
int lockNumber = selectLock(key, DEFAULT_NUMBER_OF_MUTEXES);
return mutexes[lockNumber];
}
/**
* Returns a hash code for non-null Object x.
* <p/>
* This function ensures that hashCodes that differ only by constant
* multiples at each bit position have a bounded number of collisions. (Doug
* Lea)
*/
private int hash(Object object) {
int h = object.hashCode();
h ^= (h >>> DOUG_LEA_BLACK_MAGIC_OPERAND_1) ^ (h >>> DOUG_LEA_BLACK_MAGIC_OPERAND_2);
return h ^ (h >>> DOUG_LEA_BLACK_MAGIC_OPERAND_3) ^ (h >>> DOUG_LEA_BLACK_MAGIC_OPERAND_4);
}
/**
* Selects a lock for a key. The same lock is always used for a given key.
*/
private int selectLock(final Object key, int numberOfLocks) {
int number = numberOfLocks & (numberOfLocks - 1);
if (number != 0) {
throw new RuntimeException("Lock number must be a power of two: " + numberOfLocks);
}
if (key == null) {
return 0;
} else {
int hash = hash(key) & (numberOfLocks - 1);
return hash;
}
}
} |
<filename>pkg/controller/servicebindingrequest/bindinginfo.go
package servicebindingrequest
import (
"fmt"
"strings"
)
// BindingInfo represents the pieces of a binding as parsed from an annotation.
type BindingInfo struct {
// FieldPath is the field in the Backing Service CR referring to a bindable property, either
// embedded or a reference to a related object..
FieldPath string
// Path is the field that will be collected from the Backing Service CR or a related object.
Path string
// Descriptor is the field reference to another manifest
Descriptor string
}
// NewBindingInfo parses the encoded in the annotation name, returning its parts.
func NewBindingInfo(name string, value string) (*BindingInfo, error) {
cleanName := strings.TrimPrefix(name, ServiceBindingOperatorAnnotationPrefix)
parts := strings.SplitN(cleanName, "-", 2)
// if there is only one part, it means the value of the referenced field itself will be used
if len(parts) == 1 {
return &BindingInfo{
FieldPath: parts[0],
Path: parts[0],
Descriptor: strings.Join([]string{value, parts[0]}, ":"),
}, nil
}
// the annotation is a reference to another manifest
if len(parts) == 2 {
return &BindingInfo{
FieldPath: parts[0],
Path: parts[1],
Descriptor: strings.Join([]string{value, parts[1]}, ":"),
}, nil
}
return nil, fmt.Errorf("should have two parts")
}
|
import {SchemaRepositoryState, SchemaRepositoryStateInit, reduce as schemaRepository} from "./schema-repository/schema-repository.state";
import {combineReducers} from "@ngrx/store";
export interface ConfigurationState {
schemaRepository:SchemaRepositoryState
}
export const ConfigurationStateInit:ConfigurationState = {
schemaRepository: SchemaRepositoryStateInit
};
export const reduce = combineReducers({
schemaRepository
}); |
<filename>parse.go
// Package mergeips provides a way to convert list of IP randes definitions,
// like individual IPs, CIDR subners and begin-end ranges to the minimal list of net IPNet
package mergeips
import (
"bytes"
"errors"
"fmt"
"net"
"strings"
"github.com/Djarvur/go-mergeips/ipnet"
"github.com/Djarvur/go-mergeips/iprange"
)
// Errors
var (
ErrInputInvalid = errors.New("invalid input")
)
// Scanner is a simple interface to support Scan() function.
// Intentionnaly compatible with bufio.Scanner
type Scanner interface {
Scan() bool
Text() string
Err() error
}
// Scan is used to parse source to the list of net.IPNet
func Scan(s Scanner) (res []*net.IPNet, err error) {
for s.Scan() {
subnets, err := Parse(s.Text(), false) // nolint: govet
if err != nil {
return nil, err
}
res = append(res, subnets...)
}
if err = s.Err(); err != nil {
return nil, err
}
return res, nil
}
// Parse parses a string to net.IPNet
// String might be in 3 forms:
// ip address itself, in v4 or v6 notation
// CIDR subnet address, v4 or v6
// IP adresses range, v4 or v6, in form begin-end
// If strict is false CIDR form subnet could be defined with not-a-first addrsss in the subnet.
// Otherwise the error will be returned
func Parse(s string, strict bool) ([]*net.IPNet, error) {
fields := strings.Split(s, "/")
if len(fields) > 2 {
return nil, fmt.Errorf("%q: %w", s, ErrInputInvalid)
}
if len(fields) == 2 {
return parseCIDR(s, strict)
}
fields = strings.Split(s, "-")
if len(fields) > 2 {
return nil, fmt.Errorf("%q: %w", s, ErrInputInvalid)
}
if len(fields) == 2 {
return parseRange(fields[0], fields[1])
}
return parseIP(s)
}
// Merge merges list of net.IPNet to the smallest possible set
func Merge(nets []*net.IPNet) []*net.IPNet {
return ipnet.MergeSorted(ipnet.DedupSorted(ipnet.Sort(nets)))
}
func parseCIDR(s string, strict bool) ([]*net.IPNet, error) {
ip, n, err := net.ParseCIDR(s)
if err != nil || (strict && !ip.Equal(n.IP)) {
return nil, fmt.Errorf("%q: %w", s, ErrInputInvalid)
}
return []*net.IPNet{n}, nil
}
func parseRange(beginString string, endString string) ([]*net.IPNet, error) {
var (
begin = net.ParseIP(beginString)
end = net.ParseIP(endString)
)
if begin == nil || end == nil || (begin.To4() == nil) != (end.To4() == nil) || bytes.Compare(begin, end) > 0 {
return nil, fmt.Errorf("%q-%q: %w", beginString, endString, ErrInputInvalid)
}
return iprange.Merge(begin, end), nil
}
func parseIP(s string) ([]*net.IPNet, error) {
begin := net.ParseIP(s)
if begin == nil {
return nil, fmt.Errorf("%q: %w", s, ErrInputInvalid)
}
bits := 128
if ipV4 := begin.To4(); ipV4 != nil {
bits = 32
begin = ipV4
}
return []*net.IPNet{{IP: begin, Mask: net.CIDRMask(bits, bits)}}, nil
}
|
def create_CSV_files(f_analysis,
f_csv_stats,
f_csv_histdep_data,
f_csv_auto_MI_data,
analysis_num,
**kwargs):
stats = get_analysis_stats(f_analysis,
analysis_num,
**kwargs)
f_csv_stats.write("#{}\n".format(",".join(stats.keys())))
f_csv_stats.write("{}\n".format(",".join(stats.values())))
histdep_data = get_histdep_data(f_analysis,
analysis_num,
**kwargs)
f_csv_histdep_data.write("#{}\n".format(",".join(histdep_data.keys())))
histdep_data_m = np.array([vals for vals in histdep_data.values()])
for line_num in range(np.size(histdep_data_m, axis=1)):
f_csv_histdep_data.write("{}\n".format(",".join(histdep_data_m[:,line_num])))
auto_MI_data = get_auto_MI_data(f_analysis,
analysis_num,
**kwargs)
f_csv_auto_MI_data.write("#{}\n".format(",".join(auto_MI_data.keys())))
auto_MI_data_m = np.array([vals for vals in auto_MI_data.values()])
for line_num in range(np.size(auto_MI_data_m, axis=1)):
f_csv_auto_MI_data.write("{}\n".format(",".join(auto_MI_data_m[:,line_num]))) |
Microvascularization of the pineal gland in the freshwater turtle, Pseudemys scripta elegans (Reptilia): A scanning electron microscopic study of vascular corrosion casts
Abstract: Gross supply, microvascular patterns, and drainage routes of the pineal gland and its vascular relations with associated structures (dorsal sac, paraphysis, choroid plexus of the third ventricle) were studied by scanning electron microscopy of microvascular corrosion casts in 10 specimens of the freshwater turtle, Pseudemys scripta elegans. Light microscopy of tissue sections (one transverse and one longitudinal series) served to attribute cast vascular territories to anatomical structures. The tubular pineal gland body is supplied bilaterally by small branches of the saccular artery, a branch of the lateral choroidal artery. Branches of the diencephalic artery supply the pineal stalk. The pineal gland microvascular bed is a two‐dimensional network that embraces the tubular gland. The network is made up mainly of venules with few true capillaries. Venules draining the ventral surface of the pineal gland body join those coming from the choroid plexus of the third ventricle and drain into the sagittal sinus. The less dense vascular network embracing the dorsal surface of the pineal body drains directly into the sagittal sinus. The pineal stalk drains into the diencephalic vein or directly into the sagittal sinus. No efferent (venous) vascular connections capable as transport route for pineal secretions toward surrounding telencephalic, diencephalic, or mesencephalic areas were found. |
import sys
index = 0;
for line in sys.stdin:
if(index == 0):
index += 1;
continue;
all_nums_str = line.split(" ");
all_nums = [];
for word in all_nums_str:
all_nums.append(int(word));
all_nums.sort();
for num in all_nums[0:len(all_nums)-1]:
sys.stdout.write(str(num) + " ");
sys.stdout.write(str(all_nums[len(all_nums)-1])); |
package main
import (
"fmt"
"github.com/koesie10/smartmeter/serialinput"
"os"
"text/tabwriter"
"time"
"github.com/koesie10/smartmeter/smartmeter"
"github.com/spf13/cobra"
)
var readCmd = &cobra.Command{
Use: "read",
Short: "read a single P1 packet to stdout",
RunE: func(cmd *cobra.Command, args []string) error {
port, err := serialinput.Open(&config.Options)
if err != nil {
return fmt.Errorf("failed to open port: %v", err)
}
defer port.Close()
sm, err := smartmeter.New(port)
if err != nil {
return fmt.Errorf("failed to open smart meter: %v", err)
}
packet, err := sm.Read()
if err != nil {
return fmt.Errorf("failed to read packet: %v", err)
}
tw := tabwriter.NewWriter(os.Stdout, 10, 0, 2, ' ', tabwriter.AlignRight)
fmt.Fprintln(tw, "Time\tTotal kWh Tariff 1 Consumed\tTotal kWh Tariff 2 consumed\tTotal gas consumed m^3\tCurrent consumption kW\tGas Measured At")
fmt.Fprintf(tw, "%s\t%.3f\t%.3f\t%.3f\t%.3f\t%s", time.Now(), packet.Electricity.Tariffs[0].Consumed, packet.Electricity.Tariffs[1].Consumed, packet.Gas.Consumed, packet.Electricity.CurrentConsumed-packet.Electricity.CurrentProduced, packet.Gas.MeasuredAt)
return tw.Flush()
},
}
func init() {
rootCmd.AddCommand(readCmd)
}
|
Immunosuppression in human peripheral blood T lymphocytes by fluvastatin.
To investigate the immunosuppressive effect of fluvastatin on the PHA-activated T lymphocytes. T lymphocytes were isolated from the blood of healthy volunteers, cell proliferation and the activation markers expression were examined by flow cytometric analysis. Cytokine secretion was assayed by ELISA. LDH-release assay was used to detect activity of killer cells. NFAT activation was evaluated by TransAM ELISA kit. Results were as following. (1) Whereas no modification in CD25 expression was seen, fluvastatin at 5 microM caused a lower level of CD69 expression, accompanied by an essential suppression on proliferation, IL-2 production and cytotoxicity development in PHA-stimulated T cells. However, the level of secreted IL-10 had no change, and the level of IL-4 even experienced a significant increase. (2) Combined with cyclosporine A (CsA), fluvastatin would further repress CD69 expression, cells proliferation and activity of killer cells, meanwhile significantly induced the secretion of IL-4 and IL-10. (3) Fluvastatin treatment also resulted in a strong inhibition of NFAT activation. In conclusion, partly involving the blockage of activation of NFAT, fluvastatin exhibited an immunosuppressive effect in vitro. |
By Hunter Wallace
In the aftermath of the Dylann Roof shooting in Charleston, I calmed everyone and made a prediction: within the next two months, I predicted that there would be another mass shooter in the US with a different set of motives, and that it would have nothing to do with the Confederate Battle Flag. A month later, the jihadist Muhammad Abdulazeez went on his rampage shooting in Chattanooga.
Since then, a number of events have happened that are a cause for serious alarm: dozens, perhaps hundreds, of Confederate monuments have been vandalized, a black shooter opened fire on a Confederate flag rally in Ocala, FL, the #NoFlaggingChallenge has taken the vandalism of Confederate flags onto private property, there have been cases of rocks and bottles being thrown at Confederate flag parades or guns pointed at pro-Confederate demonstrators, and we know of at least three black-on-white mob attacks over the Confederate flag in Columbia, SC and Texas.
In the midst of this Southern Kristallnacht, the murder of Anthony Hervey near Oxford (which we expect to be confirmed soon), the Atlanta City Council’s vote to vandalize Stone Mountain (more on that later), and now the actions of this group of blacks in Memphis, who “symbolically” attempted to dig up Nathan Bedford Forrest from his grave, threatening to bring “a back hoe” and “tractors” to “raise Bedford Forrest from the soil of Memphis” should the State of Tennessee (likely) quash their iconoclasm this fall, are provocations on a whole new level.
Let me go record here with my next prediction: if this isn’t brought to an end soon and continues to escalate, someone out there is going to decide to fight back and turn to violence. I can’t predict when or where it will happen. I can’t predict what will happen, or how it will happen, but the atmosphere is increasingly ripe for violence. To draw an analogy here, its like when you look outside in North Alabama and can tell that a tornado is coming without having to turn on The Weather Channel.
To put it mildly, there are countless people out there like Dylann Roof who lack self control and who are probably even more heavily armed than he was. The leftwing media, political opportunists on the Left, gutless Republicans, and bloodthirsty minority groups have done everything in their power since Charleston to polarize and assure that new Dylann Roofs are created. In the aftermath of Charleston, the entire country with the exception of the most extreme, sociopathic fringe of White Nationalists, condemned what Roof did in Charleston and sympathized with the victims. It could and should have ended there.
Dylann Roof’s stated goal in Charleston was to ignite a race war. There are obviously many in the black community like the New Black Panthers who have long wanted a race war themselves for their own reasons and who are going out of their way in Memphis to provoke one.
I can see it happening in several ways:
1.) It could start with a simple rock or bottle thrown, or maybe a gunshot, at a truck with a Confederate flag, at which point the owner of the truck could return fire with his AR-15.
2.) It could easily start in a fight at a political demonstration in which a White man is assaulted like in Columbia, but turns out to be carrying and opens fire.
3.) It could start when a group of thugs are caught vandalizing a Confederate monument and it ends in a gun fight.
4.) It could start when another lone wolf like Dylann Roof decides he has had enough and decides to go out in a blaze of glory.
5.) It could start at a demonstration in which blacks open fire on pro-Confederate demonstrators in a place like Birmingham.
There are any number of plausible, realistic scenarios in which that shoe could drop. It wouldn’t be a bolt out of the blue either. We’re powerless to forestall such a chain of events, but this atmosphere of anti-Southern hate has become so frightening that we are issuing a warning.
Black mob violence, whether it assumes the color of law (Memphis, Birmingham, Atlanta) or takes place outside the boundaries of law (Ferguson, Baltimore, Columbia) or is tolerated by authorities (the Obama administration), must be reigned in by a higher authority to defuse this explosive atmosphere before it escalates any further.
Note: Once again, let me reiterate the C of CC deplores violence and would like to see this animosity abate before any further damage is inflicted on historic monuments. Outrageous provocations of this sort though have us worried that politicians are allowing racial tensions to spiral out of control. |
def register_label(server_url, phone_label):
post_msg = {
"user": phone_label,
}
print("About to retrieve messages using %s" % post_msg)
response = requests.post(server_url+"/profile/create", json=post_msg)
print("response = %s" % response)
response.raise_for_status()
local_uuid = response.json()["uuid"]
print("Found local uuid %s" % local_uuid)
return local_uuid |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.