content
stringlengths 10
4.9M
|
---|
/**
* A wrapped/encapsulation of outbound REST requests to the map service.
* <p>
* The URL for the map service and the API key are injected via CDI: {@code
* <jndiEntry />} elements defined in server.xml maps the environment variables
* to JNDI values.
* </p>
* <p>
* CDI will create this (the {@code MapClient} as an application scoped bean.
* This bean will be created when the application starts, and can be injected
* into other CDI-managed beans for as long as the application is valid.
* </p>
*
* @see ApplicationScoped
*/
@ApplicationScoped
public class MapClient {
public static final String DEFAULT_MAP_URL = "https://game-on.org/map/v1/sites";
/**
* The URL for the target map service.
* This is set via the environment variable MAP_URL. This value is read
* in server.xml.
*/
@Resource(lookup = "mapUrl")
private String mapLocation;
/**
* The root target used to define the root path and common query parameters
* for all outbound requests to the concierge service.
*
* @see WebTarget
*/
private WebTarget queryRoot;
/**
* The {@code @PostConstruct} annotation indicates that this method should
* be called immediately after the {@code MapClient} is instantiated
* with the default no-argument constructor.
*
* @see PostConstruct
* @see ApplicationScoped
*/
@PostConstruct
public void initClient() {
try {
// The Map URL is an optional value.
if (mapLocation == null || mapLocation.contains("MAP_URL") ) {
MapClientLog.log(Level.FINER, this, "No MAP_URL environment variable provided. Will use default.");
mapLocation = DEFAULT_MAP_URL;
}
Client queryClient = ClientBuilder.newBuilder()
.property("com.ibm.ws.jaxrs.client.ssl.config", "DefaultSSLSettings")
.property("com.ibm.ws.jaxrs.client.disableCNCheck", true)
.build();
queryClient.register(MapResponseReader.class);
// create the jax-rs 2.0 client
this.queryRoot = queryClient.target(mapLocation);
MapClientLog.log(Level.INFO, this, "Map client initialized. Map URL set to {0}", mapLocation);
} catch ( Exception ex ) {
Log.log(Level.SEVERE, this, "Unable to initialize map service", ex);
}
}
public boolean ok() {
return queryRoot != null;
}
public void updateRoom(String roomId, RoomDescription roomDescription) {
MapData data = getMapData(roomId);
if ( data != null ) {
roomDescription.updateData(data);
}
}
public MapData getMapData(String siteId) {
WebTarget target = this.queryRoot.path(siteId);
MapClientLog.log(Level.FINER, this, "making request to {0} for room", target.getUri().toString());
Response r = null;
try {
r = target.request(MediaType.APPLICATION_JSON).get();
if (r.getStatusInfo().getFamily().equals(Response.Status.Family.SUCCESSFUL)) {
MapData data = r.readEntity(MapData.class);
return data;
}
return null;
} catch (ResponseProcessingException rpe) {
Response response = rpe.getResponse();
MapClientLog.log(Level.FINER, this, "Exception fetching room list uri: {0} resp code: {1} ",
target.getUri().toString(),
response.getStatusInfo().getStatusCode() + " " + response.getStatusInfo().getReasonPhrase());
MapClientLog.log(Level.FINEST, this, "Exception fetching room list", rpe);
} catch (ProcessingException e) {
MapClientLog.log(Level.FINEST, this, "Exception fetching room list (" + target.getUri().toString() + ")", e);
} catch (WebApplicationException ex) {
MapClientLog.log(Level.FINEST, this, "Exception fetching room list (" + target.getUri().toString() + ")", ex);
}
// Sadly, badness happened while trying to get the endpoints
return null;
}
} |
import { SetupServer } from '@src/server'
let server: SetupServer
beforeAll(async () => {
server = new SetupServer()
server.init()
await server.startConnection()
})
afterAll(async () => await server.closeConnection())
|
<reponame>ArtiomTr/morfix
export interface CodeProps {
meta: Record<string, boolean | string>;
language?: string;
code: string;
metastring: string;
}
|
/*
Run a GraphQL request from the server with the proper context
*/
import { graphql, GraphQLError } from 'graphql';
import { localeSetting } from '../../lib/publicSettings';
import { getExecutableSchema } from './apollo-server/initGraphQL';
import { getCollectionsByName, generateDataLoaders } from './apollo-server/context';
function writeGraphQLErrorToStderr(errors: readonly GraphQLError[])
{
// eslint-disable-next-line no-console
console.error(`runQuery error: ${errors[0].message}`);
// eslint-disable-next-line no-console
console.error(errors);
}
let onGraphQLError = writeGraphQLErrorToStderr;
export function setOnGraphQLError(fn: ((errors: readonly GraphQLError[])=>void)|null)
{
if (fn)
onGraphQLError = fn;
else
onGraphQLError = writeGraphQLErrorToStderr;
}
// note: if no context is passed, default to running requests with full admin privileges
export const runQuery = async (query: string, variables: any = {}, context?: Partial<ResolverContext>) => {
const executableSchema = getExecutableSchema();
const queryContext = createAdminContext(context);
// see http://graphql.org/graphql-js/graphql/#graphql
const result = await graphql(executableSchema, query, {}, queryContext, variables);
if (result.errors) {
onGraphQLError(result.errors);
throw new Error(result.errors[0].message);
}
return result;
};
export const createAnonymousContext = (options?: Partial<ResolverContext>): ResolverContext => {
const queryContext = {
userId: null,
currentUser: null,
headers: null,
locale: localeSetting.get(),
isGreaterWrong: false,
...getCollectionsByName(),
...generateDataLoaders(),
...options,
};
return queryContext;
}
export const createAdminContext = (options?: Partial<ResolverContext>): ResolverContext => {
return {
...createAnonymousContext(),
// HACK: Instead of a full user object, this is just a mostly-empty object with isAdmin set to true
currentUser: {isAdmin: true} as DbUser,
...options,
};
}
|
use std::error::Error;
use std::fs::File;
use std::io::prelude::*;
mod vm;
use vm::RustVM;
use vm::defs::Register;
use vm::instruction::Instruction;
pub struct Config {
program_filename: String,
}
impl Config {
pub fn new(args: &[String]) -> Result<Config, &'static str> {
if args.len() != 2 {
return Err("Usage: rustyvm <program_filename>");
}
let program_filename = args[1].clone();
Ok(Config { program_filename })
}
}
pub fn run(config: Config) -> Result<(), Box<Error>> {
let mut f = File::open(config.program_filename)?;
let mut contents = String::new();
f.read_to_string(&mut contents)?;
let mut vm = RustVM::new(parse(&contents));
while vm.running == true {
// vm.print_status();
let instr = vm.fetch();
vm.decode(&instr);
vm.step();
}
Ok(())
}
pub fn parse(contents: &str) -> Vec<Instruction> {
let mut program = Vec::new();
for line in contents.lines() {
// let line = line.trim();
if line.contains("push") {
let v: Vec<&str> = line.split(' ').collect();
let operand = v[1].parse::<i32>().unwrap();
program.push(Instruction::push(operand));
}
else if line.contains("add") {
program.push(Instruction::add());
}
else if line.contains("pop") {
let v: Vec<&str> = line.split(' ').collect();
let register = v[1].parse::<Register>().unwrap();
program.push(Instruction::pop(register));
}
else if line.contains("set") {
let v: Vec<&str> = line.split(' ').collect();
let register = v[1].parse::<Register>().unwrap();
let operand = v[2].parse::<i32>().unwrap();
program.push(Instruction::set(register, operand));
}
else if line.contains("mov") {
let v: Vec<&str> = line.split(' ').collect();
let register_a = v[1].parse::<Register>().unwrap();
let register_b = v[2].parse::<Register>().unwrap();
program.push(Instruction::mov(register_a, register_b));
}
else if line.contains("halt") {
program.push(Instruction::halt());
}
else if line.contains("log") {
let v: Vec<&str> = line.split(' ').collect();
let register = v[1].parse::<Register>().unwrap();
program.push(Instruction::log(register));
}
else {
panic!("Illegal Instruction");
}
}
program
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn push_instruction() {
let contents = "push 3";
let golden = vec![Instruction::push(3)];
assert_eq!(
golden,
parse(contents)
);
}
#[test]
fn add_instruction() {
let contents = "add";
let golden = vec![Instruction::add()];
assert_eq!(
golden,
parse(contents)
);
}
#[test]
fn pop_instruction() {
let contents = "pop A";
let golden = vec![Instruction::pop(Register::A)];
assert_eq!(
golden,
parse(contents)
);
}
#[test]
fn set_instruction() {
let contents = "set A 3";
let golden = vec![Instruction::set(Register::A, 23)];
assert_eq!(
golden,
parse(contents)
);
}
#[test]
fn mov_instruction() {
let contents = "mov A C";
let golden = vec![Instruction::mov(Register::A, Register::C)];
assert_eq!(
golden,
parse(contents)
);
}
#[test]
fn halt_instruction() {
let contents = "halt";
let golden = vec![Instruction::halt()];
assert_eq!(
golden,
parse(contents)
);
}
#[test]
fn log_instruction() {
let contents = "log A";
let golden = vec![Instruction::log(Register::A)];
assert_eq!(
golden,
parse(contents)
);
}
}
|
// Parse returns values from an HTML element.
func (si *SelectorInfo) Parse(e *colly.HTMLElement) []string {
text := e.Text
if si.Attribute != "" {
text = e.Attr(si.Attribute)
}
matches := si.runRegexIfExists(text)
if matches == nil {
return []string{text}
}
return matches[1:]
} |
from django.http import JsonResponse
from django.views.decorators.http import require_POST
from SevenLema.utils import require_login, require_post_param, require_image
from cmdb.models.dish import Dish
from cmdb.models.shop import Shop
@require_POST
@require_login
@require_post_param('shop_id', int)
@require_post_param('name')
@require_image()
@require_post_param('desc')
@require_post_param('price', float)
def create(request, user, shop_id, name, image, desc, price):
try:
shop = Shop.objects.get(id=shop_id)
except Shop.DoesNotExist:
return JsonResponse({'code': 102, 'msg': '商户不存在'})
if shop.user_id != user.id:
return JsonResponse({'code': 105, 'msg': '商户必须在当前用户名下'})
dish = Dish.objects.create(
shop_id=shop_id,
name =name,
image =image,
desc =desc,
price =int(100 * price),
sales =0,
serving=True)
dish.save()
return JsonResponse({'code': 0, 'msg': '', 'data': {'dish_id': dish.id}})
@require_POST
@require_login
@require_post_param('dish_id', int)
@require_post_param('name', None, False)
@require_image(False)
@require_post_param('desc', None, False)
@require_post_param('price', float, False)
@require_post_param('serving', bool, False)
def edit(request, user, dish_id, name, image, desc, price, serving):
try:
dish = Dish.objects.get(id=dish_id)
if dish.shop.user_id != user.id:
return JsonResponse({'code': 103, 'msg': '权限不足'})
except Dish.DoesNotExist:
return JsonResponse({'code': 102, 'msg': '菜品不存在'})
if dish.shop.user_id != user.id:
return JsonResponse({'code': 105, 'msg': '商户必须在当前用户名下'})
if name is not None:
dish.name = name
if image is not None:
dish.image = image
if desc is not None:
dish.desc = desc
if price is not None:
dish.set_actual_price(price)
if serving is not None:
dish.serving = serving
dish.save()
return JsonResponse({'code': 0, 'msg': '', 'data': None})
|
<filename>DeviceCode/GHI/Drivers/SoftwareI2C/SoftwareI2C.cpp
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) GHI Electronics, LLC.
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include <GHI\Include\GHI_OSHW_HAL.h>
#define TIMEOUT_STICK 1000000 //1s//2000 // micro second ////millisecon - timeout should be 1s same as managed side
#define I2CDELAY(x) HAL_Time_Sleep_MicroSeconds_InterruptEnabled(1)// depends on clock Khz...
#define ClearSCL() MakePinOutput(_i2c->scl)
#define ClearSDA() MakePinOutput(_i2c->sda)
BOOL _start;
GHI_OSHW_HAL_SoftwareI2C *_i2c;
void MakePinOutput(GPIO_PIN pin)
{
CPU_GPIO_EnableOutputPin(pin, FALSE);
}
void MakePinInput(GPIO_PIN pin)
{
CPU_GPIO_EnableInputPin(pin, FALSE, NULL, GPIO_INT_NONE, RESISTOR_PULLUP);
}
BOOL ReadPinState(GPIO_PIN pin)
{
CPU_GPIO_EnableInputPin(pin, FALSE, NULL, GPIO_INT_NONE, RESISTOR_PULLUP);
return CPU_GPIO_GetPinState(pin);
}
BOOL ReadBit()
{
// "ReadSDA" makes SDA an input - processor lets go of pin and internal
// pull-up resistor makes it high. Now slave can drive the pin.
ReadPinState(_i2c->sda);
I2CDELAY(i2cSpeed);
// Clock stretching - Makes SCL an input and pull-up resistor makes
// the pin high. Slave device can pull SCL low to extend clock cycle.
long endStretch = 0;//Utility.GetMachineTime().Ticks + timeOutTicks;
while (ReadPinState(_i2c->scl)==0 && endStretch <TIMEOUT_STICK)
{
// How long have we been stuck in the while loop?
//if (Utility.GetMachineTime().Ticks >= endStretch)
// throw new TimeOutException(); // Too long, so bail out by throwing an exception.
HAL_Time_Sleep_MicroSeconds_InterruptEnabled(1); // 1 microsecond;
endStretch++;
}
// At this point, SCL is high and SDA is valid - so read the bit.
BYTE bit = ReadPinState(_i2c->sda);
I2CDELAY(i2cSpeed);
ClearSCL(); // Pull the serial clock line low ...
return bit; // and return.
}
BOOL WriteBit(BOOL bit)
{
if (bit)
{
ReadPinState(_i2c->sda); // Make SDA an input ... so pin is pulled up.
}
else
{
ClearSDA(); // Make SDA an output ... so pin is pulled low.
}
I2CDELAY(i2cSpeed);
// Clock stretching - Makes SCL an input and pull-up resistor makes
// the pin high. Slave device can pull SCL low to extend clock cycle.
long endStretch = 0;//Utility.GetMachineTime().Ticks + TIMEOUT_STICK;
while (!ReadPinState(_i2c->scl)&& endStretch <TIMEOUT_STICK)
{
// How long have we been stuck in the while loop?
//if (Utility.GetMachineTime().Ticks >= endStretch)
// throw new TimeOutException(); // Too long, so bail out by throwing an exception.
HAL_Time_Sleep_MicroSeconds_InterruptEnabled(1); // 1 microsecond;
endStretch++;
}
// SCL is high and SDA is valid ...
// Check that nobody else is driving SDA
if (bit && !ReadPinState(_i2c->sda))
{
return FALSE;// Lost arbitration
}
I2CDELAY(i2cSpeed);
ClearSCL();
return TRUE; // Success!
}
BOOL SendStartCondition()
{
if (_start)
{
// set SDA to 1
ReadPinState(_i2c->sda);
I2CDELAY(i2cSpeed);
//
// Clock stretching - Makes SCL an input and pull-up resistor makes
// the pin high. Slave device can pull SCL low to extend clock cycle.
long endStretch = 0;//Utility.GetMachineTime().Ticks + TIMEOUT_STICK;
while (!ReadPinState(_i2c->scl) && endStretch <TIMEOUT_STICK )
{
// How long have we been stuck in the while loop?
//if (Utility.GetMachineTime().Ticks >= endStretch)
//throw new TimeOutException(); // Too long, so bail out by throwing an exception.
HAL_Time_Sleep_MicroSeconds_InterruptEnabled(1); // 1 microsecond;
endStretch++;
}
}
if (!ReadPinState(_i2c->sda))
{
return FALSE;
}
// SCL is high, set SDA from 1 to 0
ClearSDA();
I2CDELAY(i2cSpeed);
ClearSCL();
_start = TRUE;
return TRUE;
}
BOOL SendStopCondition()
{
// set SDA to 0
ClearSDA();
I2CDELAY(i2cSpeed);
//
// Clock stretching - Makes SCL an input and pull-up resistor makes
// the pin high. Slave device can pull SCL low to extend clock cycle.
long endStretch = 0;//Utility.GetMachineTime().Ticks + TIMEOUT_STICK;
while (!ReadPinState(_i2c->scl)&& endStretch <TIMEOUT_STICK)
{
// How long have we been stuck in the while loop?
//if (Utility.GetMachineTime().Ticks >= endStretch)
// throw new TimeOutException(); // Too long, so bail out by throwing an exception.
HAL_Time_Sleep_MicroSeconds_InterruptEnabled(1); // 1 microsecond;
endStretch++;
}
//
// SCL is high, set SDA from 0 to 1
if (!ReadPinState(_i2c->sda))
{
return FALSE;
}
I2CDELAY(i2cSpeed);
_start = FALSE;
return TRUE;
}
BOOL Transmit(BOOL sendStartCondition, BOOL sendStopCondition, BYTE ByteToSend)
{
UINT32 bit;
BOOL nack;
if (sendStartCondition)
{
SendStartCondition();
}
for (bit = 0; bit < 8; bit++)
{
WriteBit((ByteToSend & 0x80) != 0);
ByteToSend <<= 1;
}
nack = ReadBit();
//
if (sendStopCondition)
{
SendStopCondition();
}
// Return value is "TRUE" for NAK
// "FALSE" for ACK.
return nack;
}
BYTE Receive(BOOL sendAcknowledgeBit, BOOL sendStopCondition)
{
BYTE d = 0;
//BOOL b;
UINT32 bit=0;
for (bit = 0; bit < 8; bit++)
{
d <<= 1;
//b = ReadBit();
if ( ReadBit())
d |= 1;
}
//
WriteBit(!sendAcknowledgeBit);
//
if (sendStopCondition)
{
SendStopCondition();
}
//
return d;
}
UINT32 WriteBytes( BYTE *data, UINT32 length)
{
if (length == 0) return 0;
UINT32 numWrite=0;
UINT32 i = 0;
if (!Transmit(TRUE, FALSE, (BYTE)((_i2c->address) << 1)))
{
for (i = 0; i < length - 1; i++)
{
if (!Transmit(FALSE, FALSE, data[i]))
{
numWrite++;
}
}
}
if (!Transmit(FALSE, TRUE, data[i])) numWrite++;
return numWrite;
}
UINT32 ReadBytes(BYTE *data, UINT32 length)
{
if (length == 0) return 0;
UINT32 numRead = 0;
UINT32 i = 0;
if (Transmit(TRUE, FALSE, (BYTE)(((_i2c->address) << 1) | 1)))
{
for (i = 0; i < length - 1; i++)
{
data[i] = Receive(TRUE, FALSE);
numRead++;
}
}
data[i] = Receive(FALSE, TRUE);
numRead++;
return numRead;
}
BOOL SoftwareI2C_Initialize(GHI_OSHW_HAL_SoftwareI2C *i2c)
{
_i2c = i2c;
_start = FALSE;
MakePinInput(_i2c->sda);
MakePinInput(_i2c->scl);
return TRUE;
}
BOOL GHI_OSHW_HAL_SoftwareI2C_WriteRead(GHI_OSHW_HAL_SoftwareI2C *i2c, BYTE *writeBuffer, UINT32 writeLength, BYTE *readBuffer, UINT32 readLength, UINT32 *numWritten, UINT32 *numRead)
{
*numWritten = *numRead = 0;
SoftwareI2C_Initialize(i2c);
UINT32 i = 0;
BYTE write =0;
BYTE read =0;
// For write
if (writeLength >0)
{
if (!Transmit(TRUE, FALSE, (BYTE)((_i2c->address) << 1))) // Write address for write
{
for (i = 0; i < writeLength - 1; i++)
{
if (!Transmit(FALSE, FALSE, writeBuffer[i])) // Write data
{
(write)++;
}
}
}
// if (!Transmit(FALSE, TRUE, writeBuffer[i])) (write)++; // Write last byte - send stop condition
// *numWritten = write;
// 26-07-2012
// Fix issue: Remove Stop condition if user want to Write-Read in one transaction
if (!Transmit(FALSE, (readLength==0), writeBuffer[i])) (write)++;
*numWritten = write;
}
// For Read
if (readLength > 0)
{
if (!Transmit(TRUE, FALSE, (BYTE)(((_i2c->address) << 1) | 1))) // Write address for Read
{
for (i = 0; i < readLength - 1; i++)
{
readBuffer[i] = Receive(TRUE, FALSE);
(read)++;
}
}
readBuffer[i] = Receive(FALSE, TRUE); // Read last byte
(read)++;
*numRead = read;
}
return (write+read) == (writeLength+readLength);
} |
<filename>PSME/application/src/rest/model/handlers/file_database.cpp
/*!
* @brief General database interface
*
* @header{License}
* @copyright Copyright (c) 2016-2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @header{Filesystem}
* @file database.hpp
*/
#include "psme/rest/model/handlers/file_database.hpp"
#include <logger/logger.hpp>
#include <logger/logger_factory.hpp>
#include <safe-string/safe_lib.hpp>
#include <cassert>
#include <cstring>
#include <mutex>
#include <memory>
extern "C" {
#include <dirent.h>
#include <sys/stat.h>
#include <unistd.h>
#include <stdlib.h>
}
namespace psme {
namespace rest {
namespace model {
namespace handler {
const std::string FileDatabase::EXT{".db"};
const std::string FileDatabase::EMPTY{""};
const unsigned FileDatabase::VALUE_LEN = 512;
/*! FAIL condition for assert. Raise assertion when given condition is met. */
constexpr bool FAIL(const char*, bool when = true) { return !when; }
std::recursive_mutex FileDatabase::mutex{};
std::string FileDatabase::PATH{""};
FileDatabase::FileDatabase(const std::string& _name) :
Database(_name),
iterating_state(IteratingState::NOT_STARTED) {
if (PATH.empty()) {
PATH = get_directory();
}
assert (!PATH.empty());
}
FileDatabase::~FileDatabase() {
if (iterating_state != IteratingState::NOT_STARTED) {
FileDatabase::end();
}
}
bool FileDatabase::start() {
/*! @todo Double locking on iterating state */
std::lock_guard<std::recursive_mutex> lock(mutex);
if (iterating_state != IteratingState::NOT_STARTED) {
log_error(GET_LOGGER("db"), "Iterating in progress");
assert (FAIL("In progress"));
return false;
}
iterating_state = IteratingState::STARTED;
return true;
}
bool FileDatabase::next(Serializable& key, Serializable& value) {
std::lock_guard<std::recursive_mutex> lock(mutex);
switch (iterating_state) {
case IteratingState::NOT_STARTED:
log_error(GET_LOGGER("db"), "Iterating not started");
assert (FAIL("Not iterating"));
return false;
case IteratingState::STARTED:
iterated_files.clear();
foreach(key, [this](const std::string& stripped_name) -> bool {
iterated_files.push_back(stripped_name);
return true;
});
current_file = iterated_files.begin();
iterating_state = IteratingState::ITERATE;
break;
case IteratingState::ITERATE:
break;
case IteratingState::NO_MORE_DATA:
return false;
default:
assert (FAIL("Unreachable code"));
return false;
}
while (iterated_files.end() != current_file) {
const std::string& stripped_name = *current_file;
current_file++;
/* parse key, it was checked just before, so it is proper */
key.unserialize(stripped_name);
/* parse value, if not valid, then log error and check next one */
std::string data{};
if (!read_file(full_name(stripped_name), data)) {
log_error(GET_LOGGER("db"), "Cannot read data for " << stripped_name);
continue;
} else if (!value.unserialize(data)) {
log_error(GET_LOGGER("db"), "Incorrect data for " << stripped_name << ":: " << data);
continue;
}
/* key/value are filled without any issue */
return true;
}
/* no more data */
return false;
}
void FileDatabase::end() {
std::lock_guard<std::recursive_mutex> lock(mutex);
switch (iterating_state) {
case IteratingState::NOT_STARTED:
log_error(GET_LOGGER("db"), "Not iterating");
assert (FAIL("Not iterating"));
return;
case IteratingState::STARTED:
log_error(GET_LOGGER("db"), "Iterating started but not proceeded");
assert (FAIL("Not iterating"));
break;
case IteratingState::ITERATE:
log_warning(GET_LOGGER("db"), "Not all files were checked while iterating");
break;
case IteratingState::NO_MORE_DATA:
break;
default:
assert (FAIL("Unreachable code"));
return;
}
iterating_state = IteratingState::NOT_STARTED;
}
bool FileDatabase::get(const Serializable& key, Serializable& value) {
std::lock_guard<std::recursive_mutex> lock(mutex);
std::string data{};
if (!read_file(full_name(key.serialize()), data)) {
return false;
}
return value.unserialize(data);
}
bool FileDatabase::put(const Serializable& key, const Serializable& value) {
std::lock_guard<std::recursive_mutex> lock(mutex);
return save_file(full_name(key.serialize()), value.serialize());
}
bool FileDatabase::remove(const Serializable& key) {
std::lock_guard<std::recursive_mutex> lock(mutex);
return remove_file(full_name(key.serialize()));
}
bool FileDatabase::invalidate(const Serializable& key) {
std::lock_guard<std::recursive_mutex> lock(mutex);
return invalidate_file(full_name(key.serialize()));
}
Database::EntityValidity FileDatabase::get_validity(const Serializable& key, std::chrono::seconds interval) {
std::lock_guard<std::recursive_mutex> lock(mutex);
return file_validity(full_name(key.serialize()), interval);
}
unsigned FileDatabase::cleanup(Serializable& key, std::chrono::seconds interval) {
return foreach(key, [this, interval](const std::string& stripped_name) -> bool {
std::string file_name = full_name(stripped_name);
switch (file_validity(file_name, interval)) {
case EntityValidity::ERROR:
return false;
case EntityValidity::VALID:
return invalidate_file(file_name);
case EntityValidity::INVALID:
return false;
case EntityValidity::OUTDATED:
return remove_file(file_name);
default:
assert (FAIL("Unreachable code"));
return false;
}
});
}
unsigned FileDatabase::wipe_outdated(Serializable& key, std::chrono::seconds interval) {
return foreach(key, [this, interval](const std::string& stripped_name) -> bool {
std::string file_name = full_name(stripped_name);
switch (file_validity(file_name, interval)) {
case EntityValidity::ERROR:
case EntityValidity::VALID:
case EntityValidity::INVALID:
return false;
case EntityValidity::OUTDATED:
return remove_file(file_name);
default:
assert (FAIL("Unreachable code"));
return false;
}
});
}
unsigned FileDatabase::drop(Serializable& key) {
return foreach(key, [this](const std::string& stripped_name) -> bool {
std::string file_name = full_name(stripped_name);
return remove_file(file_name);
});
}
unsigned FileDatabase::foreach(Serializable& key, ForeachFunction function) {
std::lock_guard<std::recursive_mutex> lock(mutex);
DIR* directory = opendir(PATH.c_str());
if (nullptr == directory) {
log_error(GET_LOGGER("db"), "Cannot open directory " << PATH
<< " for reading:: " << strerror(errno));
return 0;
}
/* appoint all names to be processed */
IteratedFiles names;
while (nullptr != directory) {
errno = 0;
struct dirent* dir_entry = readdir(directory);
/* no more entries in the directory */
if (nullptr == dir_entry) {
if (errno) {
log_warning(GET_LOGGER("db"), "Reading directory " << PATH
<< ":: " << strerror(errno));
}
closedir(directory);
directory = nullptr;
break;
}
/* only plain files contains DB entries */
if (DT_REG != dir_entry->d_type) {
continue;
}
std::string stripped_name = strip_name(dir_entry->d_name);
/* file from other db.. or even non-db file */
if (stripped_name.empty()) {
continue;
}
/* parse key */
if (!key.unserialize(stripped_name)) {
continue;
}
names.push_back(stripped_name);
}
/* process all names */
unsigned num = 0;
for (IteratedFiles::const_iterator it = names.begin(); it != names.end(); it++) {
if (function(*it)) {
num++;
}
}
return num;
}
std::string FileDatabase::full_name(const std::string& stripped_name) const {
if (stripped_name.empty()) {
return EMPTY;
}
std::string ret = PATH + "/";
if (!get_name().empty()) {
ret.append(get_name() + ".");
}
ret.append(stripped_name);
ret.append(EXT);
return ret;
}
std::string FileDatabase::strip_name(const std::string& d_name) const {
/* there's no PATH in the name to strip */
std::string stripped = d_name;
if (!get_name().empty()) {
/* strip database name */
if (stripped.substr(0, get_name().length()) != get_name()) {
return EMPTY;
}
stripped.erase(0, get_name().length());
/* strip '.' */
if (stripped.substr(0, 1) != ".") {
return EMPTY;
}
stripped.erase(0, 1);
}
/* strip extension */
if ((stripped.length() < EXT.length()) || (stripped.substr(stripped.length() - EXT.length()) != EXT)) {
return EMPTY;
}
stripped.erase(stripped.length() - EXT.length());
return stripped;
}
bool FileDatabase::read_file(const std::string& file_name, std::string& data) {
if (file_name.empty()) {
log_error(GET_LOGGER("db"), "No file name given");
return false;
}
char buf[VALUE_LEN];
FILE* file = fopen(file_name.c_str(), "rb");
if (!file) {
log_debug(GET_LOGGER("db"), "Cannot read file " << file_name << ":: " << strerror(errno));
return false;
}
size_t bytes = fread(&buf, sizeof(char), sizeof(buf), file);
bool ok;
if (ferror(file)) {
log_error(GET_LOGGER("db"), "Cannot read file " << file_name << ":: " << strerror(errno));
ok = false;
}
else if (!feof(file)) {
log_error(GET_LOGGER("db"), "File " << file_name << " is longer than allowed");
ok = false;
} else {
data = std::string(buf, bytes);
ok = true;
}
fclose(file);
return ok;
}
bool FileDatabase::save_file(const std::string& file_name, const std::string& data) {
if (file_name.empty()) {
log_error(GET_LOGGER("db"), "No file name given");
return false;
}
if (data.empty()) {
log_error(GET_LOGGER("db"), "Writing empty data forbidden, file " << file_name);
return false;
}
std::shared_ptr<FILE> file{fopen(file_name.c_str(), "wb"), fclose};
if (!file) {
log_error(GET_LOGGER("db"), "Cannot write file " << file_name);
return false;
}
int file_descriptor = fileno(file.get());
size_t bytes = fwrite(data.data(), sizeof(char), data.length(), file.get());
if (data.length() != bytes) {
log_error(GET_LOGGER("db"), "Cannot write whole file " << file_name << ", only " << bytes << " written");
return false;
}
/* check current mode of the file */
struct stat stats;
if (0 != fstat(file_descriptor, &stats)) {
log_error(GET_LOGGER("db"), "Cannot set mode for " << file_name << ":: " << strerror(errno));
return false;
}
/* already marked as valid */
if (S_ISVTX == (stats.st_mode & S_ISVTX)) {
return true;
}
/* remove sticky bit from the file, this alter stats.st_ctim as well */
return 0 == fchmod(file_descriptor, stats.st_mode | S_ISVTX);
}
bool FileDatabase::remove_file(const std::string& file_name) {
if (file_name.empty()) {
log_error(GET_LOGGER("db"), "No file name given");
return false;
}
/* POSIX compatible.. */
return (::remove(file_name.c_str()) == 0);
}
Database::EntityValidity FileDatabase::file_validity(const std::string& file_name, std::chrono::seconds interval) {
struct stat stats;
errno = 0;
if (0 != stat(file_name.c_str(), &stats)) {
log_error(GET_LOGGER("db"), "Cannot check mode for " << file_name << ":: " << strerror(errno));
return EntityValidity::ERROR;
}
/* still marked as valid */
if (S_ISVTX == (stats.st_mode & S_ISVTX)) {
return EntityValidity::VALID;
}
/* If times match.. then file is outdated! */
if (stats.st_ctim.tv_sec + interval.count() <= time(nullptr)) {
return EntityValidity::OUTDATED;
}
return EntityValidity::INVALID;
}
bool FileDatabase::invalidate_file(const std::string& file_name) {
if (file_name.empty()) {
return false;
}
std::shared_ptr<FILE> file{fopen(file_name.c_str(), "r+b"), fclose};
if (!file) {
log_error(GET_LOGGER("db"), "Cannot read file" << file_name);
return false;
}
int file_descriptor = fileno(file.get());
/* check current mode of the file */
struct stat stats;
errno = 0;
if (0 != fstat(file_descriptor, &stats)) {
log_error(GET_LOGGER("db"), "Cannot check mode for " << file_name << ":: " << strerror(errno));
return false;
}
/* already marked as invalid */
if (S_ISVTX != (stats.st_mode & S_ISVTX)) {
return false;
}
/* remove sticky bit from the file, this alter stats.st_ctim as well */
return 0 == fchmod(file_descriptor, stats.st_mode & (~S_ISVTX));
}
std::string FileDatabase::get_directory() {
char file_name[128] = "/var/opt/psme/check_dir_XXXXXX";
int fd;
errno = 0;
fd = mkstemp(file_name);
if (fd < 0) {
log_warning(GET_LOGGER("db"), "Cannot create tmp file " << file_name << ":: " << strerror(errno));
::strcpy_s(file_name, sizeof(file_name), "/tmp/database_XXXXXX");
fd = mkstemp(file_name);
if (fd < 0) {
log_error(GET_LOGGER("db"), "Cannot get temporary directory name:: " << strerror(errno));
assert (FAIL("Cannot get temporary directory name"));
return "/tmp";
}
/* replace temp file with directory writable for all */
close(fd);
::remove(file_name);
log_info(GET_LOGGER("db"), "Database created in temporary " << file_name);
mkdir(file_name, 01777);
::strcat_s(file_name, sizeof(file_name), "/check_dir_XXXXXX");
fd = mkstemp(file_name);
}
if (fd < 0) {
log_error(GET_LOGGER("db"), "Cannot create database checker " << file_name << ":: " << strerror(errno));
assert (FAIL("Cannot create database file"));
return "/tmp";
}
close(fd);
::remove(file_name);
std::string path = file_name;
path.erase(path.rfind('/'));
return path;
}
} // @i{handler}
} // @i{model}
} // @i{rest}
} // @i{psme}
|
/*
* Copyright (c) 2020. Self learning and applying project advanced concepts through out.
*/
package com.dhiren.springboot.mongodb.repository;
import com.dhiren.springboot.mongodb.constants.FlightType;
import com.dhiren.springboot.mongodb.entity.FlightDetails;
import com.dhiren.springboot.mongodb.entity.FlightInformation;
import org.springframework.data.mongodb.repository.MongoRepository;
import org.springframework.data.mongodb.repository.Query;
import org.springframework.stereotype.Repository;
import java.util.List;
import java.util.stream.Collectors;
@Repository
public interface FlightDetailsRepository extends MongoRepository<FlightDetails,String> {
}
|
def atomicscatterer_fromSite(self, site):
position = site.xyz_cartn
print("cartesian coordinates of atom:", position)
atom = site
import periodictable
elem = getattr(periodictable, atom.element)
mass = elem.mass
coh_xs = elem.neutron.coherent
inc_xs = elem.neutron.incoherent
abs_xs = elem.neutron.absorption
from math import sqrt, pi
coh_b = sqrt( coh_xs * 100 /4/pi )
return self.atomicscatterer(
position, mass,
coh_b, coh_xs, inc_xs, abs_xs,
) |
// Create a new Oid from a Sha1 string of length 40.
func NewOidFromString(sha1 string) (*Oid, error) {
b, err := hex.DecodeString(sha1)
if err != nil {
return nil, err
}
o := new(Oid)
for i := 0; i < 20; i++ {
o.Bytes[i] = b[i]
}
return o, nil
} |
package vaccinations
const bioNTechPfizerVaccination = "Corona-Erstimpfung (BioNTech/Pfizer)"
const astraZenecaVaccination = "Corona-Erstimpfung (AstraZeneca)"
const johnsonAndJohnsonVaccination = "Corona-Impfung (Johnson & Johnson)"
const errorMessageTemplate = "ERROR for doctor '%d' and service '%s'" |
/**
* Adds the event into the queue for subsequent batch processing.
*
* @param record a record from the MongoDB oplog
* @throws InterruptedException if the thread is interrupted while waiting to enqueue the record
*/
public void enqueue(SourceRecord record) throws InterruptedException {
if(record!=null) {
records.put(record);
}
} |
import numpy as np
import torch
import gym
from mujoco_py import GlfwContext
from transformers import DecisionTransformerModel
GlfwContext(offscreen=True) # Create a window to init GLFW.
def get_action(model, states, actions, rewards, returns_to_go, timesteps):
# we don't care about the past rewards in this model
states = states.reshape(1, -1, model.config.state_dim)
actions = actions.reshape(1, -1, model.config.act_dim)
returns_to_go = returns_to_go.reshape(1, -1, 1)
timesteps = timesteps.reshape(1, -1)
if model.config.max_length is not None:
states = states[:, -model.config.max_length :]
actions = actions[:, -model.config.max_length :]
returns_to_go = returns_to_go[:, -model.config.max_length :]
timesteps = timesteps[:, -model.config.max_length :]
# pad all tokens to sequence length
attention_mask = torch.cat(
[torch.zeros(model.config.max_length - states.shape[1]), torch.ones(states.shape[1])]
)
attention_mask = attention_mask.to(dtype=torch.long, device=states.device).reshape(1, -1)
states = torch.cat(
[
torch.zeros(
(states.shape[0], model.config.max_length - states.shape[1], model.config.state_dim),
device=states.device,
),
states,
],
dim=1,
).to(dtype=torch.float32)
actions = torch.cat(
[
torch.zeros(
(actions.shape[0], model.config.max_length - actions.shape[1], model.config.act_dim),
device=actions.device,
),
actions,
],
dim=1,
).to(dtype=torch.float32)
returns_to_go = torch.cat(
[
torch.zeros(
(returns_to_go.shape[0], model.config.max_length - returns_to_go.shape[1], 1),
device=returns_to_go.device,
),
returns_to_go,
],
dim=1,
).to(dtype=torch.float32)
timesteps = torch.cat(
[
torch.zeros(
(timesteps.shape[0], model.config.max_length - timesteps.shape[1]), device=timesteps.device
),
timesteps,
],
dim=1,
).to(dtype=torch.long)
else:
attention_mask = None
_, action_preds, _ = model(
states=states,
actions=actions,
rewards=rewards,
returns_to_go=returns_to_go,
timesteps=timesteps,
attention_mask=attention_mask,
return_dict=False,
)
return action_preds[0, -1]
# build the environment
env = gym.make("Hopper-v3")
state_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
max_ep_len = 1000
device = "cuda"
scale = 1000.0 # normalization for rewards/returns
TARGET_RETURN = 3600 / scale # evaluation conditioning targets, 3600 is reasonable from the paper LINK
state_mean = np.array(
[
1.311279,
-0.08469521,
-0.5382719,
-0.07201576,
0.04932366,
2.1066856,
-0.15017354,
0.00878345,
-0.2848186,
-0.18540096,
-0.28461286,
]
)
state_std = np.array(
[
0.17790751,
0.05444621,
0.21297139,
0.14530419,
0.6124444,
0.85174465,
1.4515252,
0.6751696,
1.536239,
1.6160746,
5.6072536,
]
)
state_mean = torch.from_numpy(state_mean).to(device=device)
state_std = torch.from_numpy(state_std).to(device=device)
# Create the decision transformer model
model = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-medium")
model = model.to(device)
model.eval()
for ep in range(10):
episode_return, episode_length = 0, 0
state = env.reset()
target_return = torch.tensor(TARGET_RETURN, device=device, dtype=torch.float32).reshape(1, 1)
states = torch.from_numpy(state).reshape(1, state_dim).to(device=device, dtype=torch.float32)
actions = torch.zeros((0, act_dim), device=device, dtype=torch.float32)
rewards = torch.zeros(0, device=device, dtype=torch.float32)
timesteps = torch.tensor(0, device=device, dtype=torch.long).reshape(1, 1)
for t in range(max_ep_len):
env.render()
# add padding
actions = torch.cat([actions, torch.zeros((1, act_dim), device=device)], dim=0)
rewards = torch.cat([rewards, torch.zeros(1, device=device)])
action = get_action(
model,
(states.to(dtype=torch.float32) - state_mean) / state_std,
actions.to(dtype=torch.float32),
rewards.to(dtype=torch.float32),
target_return.to(dtype=torch.float32),
timesteps.to(dtype=torch.long),
)
actions[-1] = action
action = action.detach().cpu().numpy()
state, reward, done, _ = env.step(action)
cur_state = torch.from_numpy(state).to(device=device).reshape(1, state_dim)
states = torch.cat([states, cur_state], dim=0)
rewards[-1] = reward
pred_return = target_return[0, -1] - (reward / scale)
target_return = torch.cat([target_return, pred_return.reshape(1, 1)], dim=1)
timesteps = torch.cat([timesteps, torch.ones((1, 1), device=device, dtype=torch.long) * (t + 1)], dim=1)
episode_return += reward
episode_length += 1
if done:
break
|
def cluster_keyslot(self, key):
return self.execute_command("CLUSTER KEYSLOT", key) |
/**
* Basic KBase driver for narrative interaction.
*/
public class App {
private static GUIForm gui;
/**
* @param args [-t <jobType = FBA (default) | FUTURE_TYPE>, -f <path to config file for job inputs>]
*/
public static void main(String[] args) throws ClassNotFoundException,
UnsupportedLookAndFeelException, InstantiationException, IllegalAccessException {
parseProgramArgsAndRunCorrectMode(args);
}
private static void parseProgramArgsAndRunCorrectMode(String[] args)
throws ClassNotFoundException, InstantiationException,
IllegalAccessException, UnsupportedLookAndFeelException {
boolean fbaType = true;
boolean fromConfigFile = false;
boolean remoteWebDriver = false;
String inputFileName = "";
String configFileName = "app.config";
String remoteWebDriverURL = "";
try {
CommandLine commandLine = configureAndParseOptions(args);
if (commandLine.hasOption("f")) {
fromConfigFile = true;
inputFileName = commandLine.getOptionValue("f");
if (!commandLine.hasOption("c")) {
throw new Error("Configuration file is required when reading from an input file");
}
}
if (commandLine.hasOption("c")) {
configFileName = commandLine.getOptionValue("c");
}
if (commandLine.hasOption("r")) {
remoteWebDriver = true;
remoteWebDriverURL = commandLine.getOptionValue("r");
}
if (commandLine.hasOption("t") && !commandLine.getOptionValue("t").equals("FBA")) {
fbaType = false;
}
} catch (ParseException e) {
e.printStackTrace();
return;
}
// Load configuration files
Properties appProps;
Properties envProps;
try {
ConfigurationLoader configuration;
if (fromConfigFile && remoteWebDriver) {
System.out.println("From remote web driver: " + remoteWebDriverURL);
configuration = new ConfigurationLoader(configFileName, "chrome_remote", remoteWebDriverURL);
} else {
configuration = new ConfigurationLoader();
}
configuration.loadConfiguration();
appProps = configuration.getApplicationProperties();
envProps = configuration.getEnvironmentProperties();
} catch (IOException e) {
e.printStackTrace();
return;
}
initializeConfigurationsAndStart(fbaType, fromConfigFile, inputFileName, appProps, envProps);
}
private static void initializeConfigurationsAndStart(boolean fbaType,
boolean fromConfigFile,
String inputFileName,
Properties appProps,
Properties envProps)
throws ClassNotFoundException, InstantiationException,
IllegalAccessException, UnsupportedLookAndFeelException {
// Pull specific configuration values from the files
String globusUser;
String globusPass;
String driverPath;
SeleniumDrivers driverType;
String narrativeIdentifier;
boolean seleniumHeadless;
try {
globusUser = appProps.getProperty("kbase.auth.globus.user");
globusPass = appProps.getProperty("kbase.auth.globus.pass");
driverType = SeleniumDriverUtilities.getDriverFromString(envProps.getProperty("selenium.driver.type"));
driverPath = envProps.getProperty("selenium.driver.path");
narrativeIdentifier = appProps.getProperty("kbase.narrative_identifier");
seleniumHeadless = Boolean.parseBoolean(appProps.getProperty("selenium.headless"));
} catch (InvalidSeleniumDriverException e) {
System.err.println("Invalid Selenium driver specified in configuration.");
e.printStackTrace();
return;
}
// Initialize the job manager
JobManager manager = new JobManager();
new Thread(manager).start();
// Initialize and add a Selenium runner to the manager
manager.initializeRunners(
jobManager -> new SeleniumRunner(
manager,
new SeleniumConfiguration(globusUser, globusPass, driverType,
driverPath, narrativeIdentifier, seleniumHeadless)
),
1 // Let's leave it at a single runner for now
);
// Input config file
if (fromConfigFile && fbaType) {
runHeadlessMode(manager, inputFileName);
} else {
runGUIMode(manager);
}
}
private static CommandLine configureAndParseOptions(String[] args) throws ParseException {
Options options = new Options();
options.addOption("c", true, "path to app config file");
options.addOption("f", true, "path to input file");
options.addOption("t", true, "kbase test type (ex. FBA)");
options.addOption("r", true, "path to a remote web driver url - for docker use");
return new DefaultParser().parse(options, args);
}
private static void runGUIMode(JobManager manager) throws ClassNotFoundException,
InstantiationException, IllegalAccessException, UnsupportedLookAndFeelException {
//Give the GUI a more authentic feel according to use OS
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
gui = new GUIForm(manager);
SwingUtilities.invokeLater(() -> {
//Generate our GUI, this has control of the web driver.
gui.setVisible(true);
});
}
private static void runHeadlessMode(JobManager manager, String inputFileName) {
FileInputReader<FBAParameters> fbaFileInputReader = new JSONFileInputReader();
try {
for (FBAParameters jobParams : fbaFileInputReader.parseFromFile(inputFileName)) {
manager.scheduleJob(new Job(jobParams));
}
} catch (FileNotFoundException e) {
e.printStackTrace();
System.out.println("Config file not found.");
System.exit(-1);
} catch (JobManagerStoppedException e) {
e.printStackTrace();
}
}
} |
/// System console operations.
pub mod console {
/// Console write functions.
///
/// `core::fmt::Write` is exactly what we need for now. Re-export it here because
/// implementing `console::Write` gives a better hint to the reader about the
/// intention.
pub use core::fmt::Write;
/// Console read functions.
pub trait Read {
/// Read a single character.
fn read_char(&self) -> char {
' '
}
}
} |
def pql_get_db_type():
require_access(AccessLevels.EVALUATE)
return pyvalue_inst(get_db().target, T.string) |
from lxml import etree
from ocd_backend.extractors import BaseExtractor, HttpRequestMixin
from ocd_backend.extractors import log
class WikimediaCommonsExtractor(BaseExtractor, HttpRequestMixin):
"""This extractor fetches metadata of 'File' pages in a particular
Wikimedia Commons category.
The Wikipedia API is used first to query for File pages in a specific
category. For each found page, the metadata is retrieved by using the
`Commons API <http://tools.wmflabs.org/magnus-toolserver/commonsapi.php>`_.
"""
commons_api_url = 'http://tools.wmflabs.org/magnus-toolserver/commonsapi.php'
def __init__(self, *args, **kwargs):
super(WikimediaCommonsExtractor, self).__init__(*args, **kwargs)
self.base_url = self.source_definition['wikimedia_base_url']
self.wikimedia_category = self.source_definition['wikimedia_category']
def wikimedia_api_call(self, params={}):
"""Calls the MediaWiki API and returns the response as a string.
:type params: dict
:param params: a dictonary sent as arguments in the query string
"""
req_params = {
'action': 'query',
'list': 'categorymembers',
'cmtype': 'file',
'cmtitle': self.wikimedia_category,
'cmlimit': 250,
'format': 'xml'
}
req_params.update(params)
log.debug('Getting %s (params: %s)' % (self.base_url, params))
r = self.http_session.get(self.base_url, params=req_params)
r.raise_for_status()
return r.content
def commons_api_call(self, image_name):
"""Use the Wikimedia Commons API to retrieve media metadata from
Commons as XML. The response is returned as a string.
:type image_name: str
:param image_name: the title of the Commons page containing the
image (e.g. ``File:Studioportretten.jpg``)
"""
params = {
'image': image_name,
'forcehtml': '',
}
log.debug('Getting %s (params: %s)' % (self.commons_api_url, params))
r = self.http_session.get(self.commons_api_url, params=params)
r.raise_for_status()
return r.content
def get_all_records(self):
cmcontinue = None
while True:
req_params = {}
if cmcontinue:
req_params['cmcontinue'] = cmcontinue
# Get the file pages in the specified Wiki category
file_pages = etree.fromstring(self.wikimedia_api_call(req_params))
# Request the metadata of each page
for file_page in file_pages.findall('.//cm'):
page_title = file_page.attrib['title']
page_meta = self.commons_api_call(page_title)
page_meta_tree = etree.fromstring(page_meta)
# Skip this page if the response contains errors (the Commons
# API doesn't return proper HTTP status codes)
page_meta_error = page_meta_tree.find('.//error')
if page_meta_error:
log.warning('Skipping "%s" because of Commons API error: %s'
% (page_title, page_meta_error.text))
continue
yield 'application/xml', page_meta
try:
cmcontinue = file_pages.xpath('.//query-continue/categorymembers/@cmcontinue')[0]
except IndexError:
cmcontinue = None
# When cmcontinue is empty or None, we've reached the last page
if not cmcontinue:
log.debug('cmcontinue empty, done fetching category pages')
break
def run(self):
for record in self.get_all_records():
yield record
|
// integrate with respect to the (k - 1)st variable and divide by k
mpq_t *integrate_and_divide(mpq_t *B, int n, int k, cycle_types cs, cycle_types new_cs) {
mpq_t *result = malloc(new_cs.count * sizeof(mpq_t));
int index = 0;
mpq_t divisor;
mpq_init(divisor);
for (int new_index = 0; new_index < new_cs.count; new_index++) {
cycle_type c = cs.cycle_types[index];
c.vals[k - 1]++;
mpq_set_ui(divisor, 1, c.vals[k - 1] * k);
mpq_init(result[new_index]);
if (!compare_cycle_types(&c, &new_cs.cycle_types[new_index])) {
mpq_mul(result[new_index], B[index], divisor);
index++;
} else {
mpq_set_ui(result[new_index], 0, 1);
}
}
mpq_clear(divisor);
return result;
} |
// TestFrontendHostDeleteDestination tests that a destination can be deleted
func (s *FrontendHostSuite) TestFrontendHostDeleteDestination() {
testPath := s.generateKey("/foo/bar")
frontendHost, ctx := s.utilGetContextAndFrontend()
s.mockController.On("DeleteDestination", mock.Anything, mock.Anything).Return(nil)
req := c.NewDeleteDestinationRequest()
req.Path = common.StringPtr(testPath)
err := frontendHost.DeleteDestination(ctx, req)
s.NoError(err)
} |
The future of computer vision and machine learning can be seen trundling at about 1 mile per hour at a lettuce field in the Salinas Valley of California. In certain fields, a tractor is pulling a highly specialized robot called the “lettuce bot.” The robot, made by Blue River Technology, contains enough smarts to differentiate the weeds from the budding lettuce plants and then kill those weeds with an injection of fertilizer.
Advertisement
The result is a weed-free field without the use of expensive and harmful pesticides — making Blue River’s robot a threat to the $31-billion pesticide business and a friend of organic farmers. The startup, founded in 2011, on Monday said it has raised $3.1 million in a Series A round led by Khosla Ventures. Steve Blank, Ulu Ventures, and Stanford Angels and Entrepreneurs also joined the round. In addition, Ryan Kottenstette of Khosla Ventures has joined the company’s board of directors.
Jorge Heraud, co-founder and CEO of Blue River Technology, said the funding, which comes after a $400,000 angel round and an early grant from the National Science Foundation, will help Blue River commercialize its prototype. The goal is to speed the robot up to 3 miles per hour while maintaining its accuracy at both classifying plants and then killing them. And while today’s model uses an injection of fertilizer to kill weeds, he hopes to eventually make the process mechanical so as to save on the cost of buying fertilizer.
How does it work?
To create a robot that can replace human weed pickers is no easy task. Heraud explains that there are three different algorithms at work inside the robot. The first simply takes the readings of a camera that faces the ground and identifies whether or not it is seeing a plant. This means more than just figuring out that green is a plant. The algorithm also has to be able to tell where one plant ends or begins so it can later identify it.
The second, called the Classify algorithm, determines if the plant is a weed or not. Heraud says the algorithm is about 98 or 99 percent accurate and was trained using a wooden cart they ran along participating farmers fields. “The farmers have been very cooperative,” says Heraud. He notes that it can reduce their spending on pesticides as well as their reliance on labor.
The final algorithm is simply called the Kill algorithm and its purpose is to determine when the correct moment is to inject the deadly dose of fertilizer. Because the killing mechanism isn’t in the field of view for the robot, it must determine where to inject the fertilizer based on how fast it is a moving and where the plant was when it classified it. It’s accurate to one-quarter of an inch at 1 mile per hour and the goal is to keep it accurate to one-half of an inch while moving at 3 miles per hour.
Toward a machine learning utopia
Heraud’s lettuce bot is one example of a way we can use machine learning and computer vision to adapt machines to do more and more tasks. We have to program a computer to do something, but efforts like the lettuce bot are a way of letting computers understand their environments and then take pre-programmed actions based on the data they have. It’s not artificial intelligence, but it’s getting us closer.
While having a weed-whacking robot might seem like a step away from machines that take over the planet, in reality they are highly specialized and limited pieces of equipment. For example, the lettuce bot works on iceberg and romaine lettuces, but it wouldn’t work for corn or broccoli crops. The algorithms would have to be trained for each type of crop that Blue River wants to design an implement for.
But with the growing number of people to feed, and the threat of pesticides on the environments, as well as the growing number of pesticide-resistent weeds, figuring out a cheaper and less chemically intense method of farming is an area where the brains behind machine learning can make the world a better place using some fancy math and decent compute power. This five-person startup may be working in the Salinas Valley, but its roots are in Silicon Valley. |
#include "list_view.h"
#include <QDebug>
ListView::ListView(QWidget *parent, CBHash settingsSet, QJsonObject *hash)
: View(dynamic_cast<Model *>(new ListModel(hash)), parent, settingsSet) {
}
ListView::~ListView() {
}
|
An IDS scheme against Black hole Attack to Secure AOMDV Routing in MANET
In Mobile Ad hoc Network (MANET) all the nodes are freely moves in the absence of without ant centralized coordination system. Due to that the attackers or malicious nodes are easily affected that kind of network and responsible for the routing misbehavior. The routing is network is mandatory to deliver data in between source and destination. In this research we work on security field in MANET and proposed a novel security scheme against routing misbehavior through Black hole attack. The Ad hoc On demand Multipath Routing (AOMDV) protocol is consider for routing and also to improves the routing quality as compare to single path routing protocol. The attacker is affected all the possible paths that is selected by sender for sending data in network. The malicious nodes are forward optimistic reply at the time of routing by that their identification is also a complex procedure. The proposed Intrusion Detection System (IDS) scheme is identified the attacker information through hop count mechanism. The routing information of actual data is reached to which intermediate node and the next hop information is exist at that node is confirm by IDS scheme. The black hole attacker node Identification (ID) is forward in network by that in future attacker is not participating in routing procedure. The proposed security scheme detects and provides the deterrence against routing misbehavior through malicious attack. Here we compare the routing performance of AOMDV, Attack and IDS scheme. The performance of normal multipath routing and proposed IDS scheme is almost equal. The attacker has degrades the whole routing performance but observed that in presence of attacker, routing misbehavior is completely block by the proposed IDS scheme and recovers 95 % of data as compare to normal routing. Keywords- -AOMDV, MANET, IDS, Black hole attack, Routing misbehavior.
I. Introduction
A mobile ad hoc network (MANET) is a network consisting wireless mobile nodes that conversation with each other without centralized control or established infrastructure . These nodes which are inside each and every one radio range can convey precisely, while distance nodes count on their neighboring nodes to forward packets. In MANETS every node can be a host or router. Mobility, an advantage of wireless communication, allows a freedom of moving around although being linked to a network environment. Ad-hoc networks are so adaptable that nodes can join and move a network easily as compare to wired network . Such networks can be used in the battlefield application, in disaster management and in remote areas where establishment and management of fixed network is not possible. These can also be used in the areas where the establishment of fixed infrastructure is very difficult. MANETs can also be used to deploy and coordinate the drones in the battle field.
But this flexibility of mobile nodes proved in an ever-changing topology that forms it very tough in establishing secure ad-hoc routing protocols. The radio channel employ for ad hoc networks is propagate in nature and is common by all the nodes in the network. Data transmitted by a node is collect by all the nodes within its direct transmission range. So attackers can easily affect the data being transmitted in the network. If the ad hoc network lacks some form of network level or link layer security, a MANET routing protocol will be more vulnerable to many forms of malicious attacks. It can be simple attack like snooping network traffic, transmissions replay, manipulation of the packet headers, and redirecting the routing messages, within an Ad hoc network without any appropriate security provisions. In Black hole attack, malicious nodes get a chance to attack during route discovery process. A black hole means that one malicious node apply the routing protocol to affirm itself of having shortest path to the destination node, and drops routing packets and does not send packets to its adjacent node . A single Black hole node can easily attack on mobile Ad hoc networks . There is various detection schemes for detecting single black hole, but failed when cooperative black hole attack occurs. Cooperative black hole attack means malicious nodes act in a group. In this attack, one malicious node receives data and forwards it to other malicious node instead of forwarding it towards destination. To provide complete security in this paper we proposed a secure IDS scheme for AOMDV routing protocol of MANET. We required secure routing procedure protocol for appends the possibilities of actual data delivery in network as compare to attacker infected data. Since nodes involved in the routing cannot by themselves ensure the secure and uninterrupted delivery of transmitted data.
The absence of centralized authority is the major reason of attack in MANET. The malicious nodes are easily affected the routing performance to be a part of it. It implies malicious node are already present in network and always perform the same function as normal nodes, hence the identification of these nodes are not easy.
The routing misbehavior of attacker or malicious nodes drop the data routed through that nodes or range on these nodes. Misbehaving nodes can interrupt the route discovery by acting the destination, by replying with stagnant or demoralized routing information, or by propagating progressed control traffic. With the substantial thing in secure routing in the mobile Ad-hoc network we propose a new algorithm for developing network interpretation from attack like throughput, Routing load and delay .
Security in Mobile Ad-Hoc Network (MANET) is the greatest essential firm for the basic affinity of network. Accessibility of network services, esoteric and integrity of the data can be accomplished by ensuring that security issues have been met. MANET frequently suffer from security attacks due to its features like unclosed medium, changeful its topology dynamically, absence of central monitoring and management, cooperative algorithms and no clear defense mechanism. These aspects have developed the battle field position for the MANET against the security threats.
The remaining of this paper is organized as follows. In Section II we show the typed of attack classification. The Section III is presents the Previous work done in this field. Section IV is described the proposed IDS security scheme in depth. The Section V presents the description of simulation environment. The section VI is presents the simulation results using ns2 are presented and In Section V, we conclude our work with future extension.
II. Types of attacks in MANET
The attacks in MANETS are classified into two major categories , namely Passive Attacks -Passive attacks are those, established by the adversaries merely to intrude the data exchanged in the network. These attackers in any mode don't divert the action of the network. Such attacks recognition get very rigorous since network itself does not impacted and they can depreciate by using persuasive encryption techniques.
Active Attacks -The active attack attempts to change or demolish the data that is being exchanged, by that distressing the standard operation of the network. In MANET malicious and unwanted nodes to interrupted the natural operating in the network.
Some of the common attacks in MANET are as follows:-1) Black Hole Attack -The aim of this attack is to enhance the congestion in network. In this attack the adversary node does not transmit any packets transmitted to it, rather drops them all. Due to this attack the packets sends by the nodes do not come their proposed destination and the congestion in the network expand due to retransmissions.
2) Wormhole Attack -In wormhole attack, two concurring nodes are mentioned and one node tunnels the packet to some other node in the consistent network over a huge speed private wired link or wireless link these packets are then dislike from that location into the network. This tunnel between two selfish nodes is known as wormhole.
3) Denial of Service (DoS) Attack and Flooding -
The purpose of this attack is to exhaust the gentle working of the network. This attack is executed by constantly forwarding packets into the network inducing the proposed node in the network to work them and hold them employed resulting in the colliding of that node. By executing this attack, the attacker holds the targeted node active in developing its induced packets and discarding the legitimized RREQs to be dropped. This attack can get the network substructure to break-down.
4) Sybil Attack -
In the Sybil attack , an attacker act to have multiple identities. A malicious node can play as if it were a enormous number of nodes either by acting other nodes or justly by claiming wrong identities. Sybil attacks are divided into three divisions like direct or indirect communication, fabricated or stolen identity, and simultaneity. In the direct communication, Sybil nodes convey instantly with legitimize nodes, whereas in the indirect convey messages sent to Sybil nodes are routed through malicious nodes. An attacker can assemble a new identity or it can simply rob it after destructing or temporarily disenabling the impersonated node. All Sybil identities can act together in the network or they may be cycled through. compromised intermediate node perform by itself, or a set of compromised intermediate nodes execute in complicity and perform attacks such as generating routing loops, transmitting packets through non-optimum route, or selectively dropping packets, which outcomes in annoyance or degradation of the routing services. 6) Rushing attack -In rushing attack an attacker occupy the RREQ packet from source node disperse over the packet promptly to all the other nodes in the network, earlier they get the same packet from the source. Formerly the primary RREQ packet comes to the nodes, they consider it is a replicate and repudiate it since they preliminarily have the packet from adversary.
III. Work Accomplished in this field
The Secured routing schemes for MANETs have received increasing attention in the recent years, with main focus on data forwarding.
In this paper , an approach have been proposed to diminish the black hole attack using AOMDV (Ad hoc on Demand Multipath Distance Vector) routing protocol. Some recoveries have been made in AOMDV protocol. The proposed approach based upon AOMDV (Ad hoc on demand Multipath Distance Vector) route discovery and creates a new logic for avoiding black hole attack using Legitimacy or trust factor of a node. This approach detects black hole node and discovers node disjoint multipath, which reduces the overhead of a specific node. These meliorate produce the protocol vigorous against black hole attack along multipath route discovery process.
In this paper they proposed SAODV protocol which is the extension of AODV. This protocol is secure and efficient MANET routing protocol which aims to address the security is not strong of the AODV protocol and is able to perform of enduring the black hole attack. This paper proposed a secured message security scheme for MANETs (our so-called T-AOMDV) that uses a trust-based multipath AOMDV routing combined with a soft-encryption methodology to securely transfer messages. More precisely, our approach consists of three steps: (1) Message encryption: where at the source node, the message is segmented into three parts and these parts are encrypted using one another using some XOR operations, (2) Message routing: where the message parts are routed separately through different trust based multiple paths using a novel node disjoint AOMDV protocol, and (3) Message decryption: where the destination node decrypts the message parts to recover the original message.
In this paper , they proposed a method uses Intrusion Detection using Anomaly Detection (IDAD) to defend against black hole attacks established by both single and multiple black hole nodes. It proved the specific result increases network performance by reducing formation of control (routing) packets including effectively defend black hole attacks opposed to mobile adhoc networks.
In this paper , they proposed a method uses promiscuous mode to find malicious node and transmit the data of malicious node to every some other nodes in the network. The efficiency of suggested mechanism as throughput of the network does not decay in existence of the black holes.
In this paper , they proposed two possible solutions to study black hole attack. The first solution is to study several route to the destination. The second is to apply the packet sequence number contained in any packet header. In study to AODV routing scheme, the second solution is superior and of the route to the destination rely upon on the pause time at a lowest cost of the delay in the networks.
In this paper , they have proposed a solution the requesting node wait and check the replies from all neighboring node to find a safe route. It is provide better performance than the conventional AODV in the existence of Black holes with smallest additional delay and overhead.
In this paper , they apply a reactive routing protocol called as Ad hoc On-demand Distance Vector (AODV) routing for examine of the outcome of the black hole attack when the destination sequence number is altered via simulation. Then, they determine characteristic in order to define the normal state from the character of black hole attack. They proposed training scheme for huge accuracy detection by modifying the training data in every given time intervals and adaptively specifying the normal state according to the changing network environment.
IV. Problem Statement
In MANET environment the problem is, nodes are supposed to collaborate among each other dynamically to give routing service and transmit packets. This need represent a security challenge when malicious nodes are exhibit in the network. Certainly, the presence of such nodes may not simply interrupt the normal network operations, but cause serious message security concerns. The security concern is necessary in absence of centralized administration because of no system in network is to monitor the routing information and malicious activities. The routing misbehavior is degrades the network performance by dropping the data packets or capturing the data packets in network. 1. The attacker replies false route information. 2. The all data is dropped by attacker to forward through malicious nodes. 3. The sender has continuously tried to send data after failure number of iterations. 4. The routing packets are more deliver to send little amount of data.
The proposed scheme is improves the routing misbehavior from malicious nodes
V. Proposed IDS Scheme to secure MANET
Route discovery is a susceptibility of on demand Multipath Ad-hoc routing protocols, especially (Ad hoc On Demand Multipath Distance Vector) AOMDV, which an adversary can get to act a black hole attack on mobile ad-hoc networks. A malicious node in the network evolving an RREQ message respond to source nodes by forwarding a fake RREP message that includes valuable parameters to be preferred for packet delivery to destination nodes. After reassuring (by sending a fake RREP to affirm it has a route to a destination node) to source node that it will transmit data, a malicious node begins to drop all the network traffic it. Secure connection has been established between source nodes to destination nodes. The Proposed IDS algorithm finds out the multiple routes from source to destination using AOMDV routing algorithm. After finding multiple routes, all the routes are classified based on the conviction factor of existence of attacker in route. Then it will prefer the best route which is having no existence of malicious attack or black-hole attack. In this method the data is secured in presence of IDS algorithm. Then it routes the data through best single route of multiple established path in MANET.
In this research we consider three modules of routing: c) The third is proposed IDS module: This module is proposed to provide security in presence of black hole attack. The attackers are absolutely performing no routing misbehavior and provide reliable routing. The whole procedure of IDS algorithm is mentioned in next point. Step2: Add the next hop in routing table if we have to destination route, otherwise rebroadcast the request and maintaining the hop count information.
Proposed Algorithm to Identify and Prevent from
Step 3: If destination is found then select the route of minimum hop count and deliver data through that minimum hop count path h. Step4: We compare the AOMDV routing table through IDS system to next hop routing table, if table is matched it means no attack in the network and route is true, and then forward all data packet.
Step5: If next node is false, and the next hop information is not matched (M means data entry) If next hop h1 , h 2, h 3…. h n-1 ≠ M.
It means no previous data deliver through that hop, insert the table new entry which have shortest path to destination.
Step6: If next hop is true, sending data through that hop is false then send the data packet for checking the reliability through proposed IDS security scheme.
Step7: IDS (Intrusion Detection system) verify if routing table information is not matched related to actual hop count means some misbehavior activity occurs in the network through malicious nodes.
Step8: Applied prevention scheme is and block that hop and change the path, forward data packet. Also forward the nodes ID (identification) in network by that the attacker neither is nor select in routing procedure.
Step 9 If the attacker is be present in selecting path for data delivery then avoids that path and preferred another suitable path from multiple paths established by AOMDV.
Select route of Hop count h 1, h 2, h 3 …….. h n ≥ Min
Step10: If routing is matched then forward data packet until send all data packet reach to destination.
Flow Chart of Proposed IDS Security Scheme against Malicious Attack
The flow of proposed security scheme is represents the steps to identified the attacker and obstruct the activities of attacker by that secure communication in possible. Hop count -The hop count represents the total number of devices of data packet passes between sources to destination. The more hops data must traverse to reach their destination, the greater the transmission delay incurred.
Routing Table -A routing table contents the data essential to transmit a packet along the finest route toward its destination. Each packet includes data about its source and destination. When a packet is collected, a network tool observes the packet and equals it to the routing table entering giving the best equivalent for its destination. The tables then supply the tool with instructions for forwarding the packet to the next hop on its path across the network.
IDS -Intrusion
Detection system (IDS) is the process of detecting an adversary and preventing its subsequent action. It is anomaly activities will monitor network traffic and compare normal activities A network was creating for the simulation aim and then observed for a number of parameters. The TCL (Tool Command Language) of modules simulated for 20, 40, 60, 80 and 100 nodes. Simulation time is taken 100 sec. Each node moves randomly and has a transmission range of 250m. The minimum speed for the simulations is 3 m/s while the maximum speed is 30 m/s. Each mobile node in the MANET is allotted primary location within the simulation dimensions of 800×800 meters and joins the network at an arbitrary time. The packets are created using FTP and CBR with rate of 3 packets per seconds. Nodes are generally allocated when initiated, and the original location for the node is defined in a movement scenario file generated for the simulation using a factor inside ns-2. The propagation model is used two ray ground and the MAC layer technology of 802.11 is considered for wireless communication. The number of attacker nodes is created 4 and against them IDS nodes are plot 2 in network.
Performance Metrics
The following performance metrics are used for comparing the performance of three modules: i. The packet delivery ratio -The ratio of the data delivered to the destination to the data sent out by the source. The PDF shows how successful a protocol performs delivering packets from origin to destination.
ii. The average end-to end delay -This is the average delay between the sending of packets by the source and its receipt by the receiver. This includes all possible delays reasons during data gaining, route discovery, processing at intermediate nodes, retransmission delays, and propagation time. It is measured in milliseconds.
iii. The normalized routing overhead -The number of routing packets transmitted per data packet delivered at the destination. The routing overhead minimum is shows better performance.
iv. Throughput -Throughput is the average rate of successful message delivery over a communication channel. A high throughput network is desirable.
V. EVALUATED RESULT
The results evaluated on the basis of considered simulation parameters are mentioned in this section.
. Packet Delivery Ratio Analysis in case of AOMDV, Attack and IDS
The packets successful transmission is improves the performance of network besides that the packet dropping is degrades the performance of network. The routing misbehavior through black hole attack is degrades the percentage of data receiving in node densities of 20, 40, 60, 80 and 100. The attacker is consuming whole data packets that are not forwarded to destination after positive route reply. The percentage of packets successful receiving in case of normal AOMDV, Attack and IDS scheme is illustrated in this graph. The attacker performance is about 2 % up to simulation time of 50 seconds. The attacker has drop the most of the data packets by that the routing performance of AOMDV routing is degrades. The proposed IDS security scheme is improve the PDR performance is presence of attacker. The PDR in case of IDS scheme is about 95 % and it is almost equal to normal AOMDV routing performance.
c). Routing Overhead Analysis in case of AOMDV, Attack and IDS
The routing overhead is counted through the number of routing packets are deliver in network. The routing packets are flooding in network to establishment connection in between sender and receiver through intermediate nodes. The nodes are forming dynamic topology by that the link establishment is the challenging issue in MANET. This graph illustrated the routing overhead in case of AOMDV, Attack and IDS scheme and observes that the performance of IDS scheme is recovers the performance in presence of attacker in 20, 40, 60, 80 and 100 nodes scenario. The routing overhead of 20 nodes is about 315 and rest of them is more than 20 except 40 nodes. In case of IDS the packet receiving is more as respect to routing packets are deliver in network but in case of attacker it is negligible compare to that. The routing overhead of normal and IDS are overlapped due to that not visible clearly and it is too much less then attacker performance.
Fig. 3 Routing Load Analysis d). Throughput Analysis in case of AOMDV, Attack and IDS
The packets receiving in MANET is not being on any supervision or administrator. The data delivery in that kind of network is not safe. In this graph we illustrated the throughput analysis in case of normal AOMDV, Attack and proposed IDS scheme. The packet per unit of time in case of attack is almost negligible in network but in case of proposed IDS scheme the throughput is much better as compare to attacker in 20, 40, 60, 80 and 100 nodes scenario. The throughput in case of normal AOMDV routing is about more than 3500 packets/seconds and not less than 600packets/seconds. It implies that the throughput in case of IDS is more as compare to normal AOMDV routing. The reason behind is that if the attacker is existing in established path then in that case that path is not select for data delivery to maintaining the reliability and the next alternative path is chosen more reliable and strong that minimizes packet dropping and improves data delivery in presence of attacker.
e). Packets Receive Analysis in case of AOMDV, Attack and IDS
Multipath routing is enhanced the possibility of successful receiving by proving the alternative path in network if first one is fail. The UDP protocol is the transport layer protocol provides end to end delivery in network. In this graph the performance of UDP packet received is examine in case of normal AOMDV, Attack in AOMDV and proposed IDS scheme. The packet receiving in case attacker is negligible due to routing misbehavior of AOMDV protocol. The packet receiving in case of proposed IDS scheme is much more in 20, 40, 60 80 and 100 nodes densities also AOMDV provides the almost same performance. That provides the better receiving.
VI. Conclusion & Future Work
The central coordination system absence, security is the major issue in MANET. The data packets in network are delivering in between sender and receiver through routing mechanism of connection establishment. The performance is illustrated in 20, 40, 60, 80 and 100 nodes scenario. The attackers are dropping the all data packets that are the reason of routing misbehavior in MANET. The malicious attacker action is wedged by proposed IDS security scheme and provides the attacker free network. The AOMDV protocol provides the alternative if the problem in accessible path is occurred. The routing performance is measured by performance metrics in case of normal AOMDV routing, Malicious Attack and proposed IDS scheme. The proposed IDS scheme identified the attacker through next hop information of data delivery and also forward the Identification of node ID of attacker in network. If that ID is exist in routing establishment then the alternative route is select for data delivery.
The routing performance of AOMDV protocol and IDS scheme on AOMDV is almost equal that means nearly the network is provides equivalent performance. In attacker module degrades the whole performance of network but in presence of attacker their activities are completely blocked by IDS scheme after identifying them in network. Moreover after dump the performance of network by attacker proposed IDS scheme recovers 95 % of data loss as compare to normal AOMDV.
In future we also apply this IDS scheme on other routing attacks like wormhole attack and Greyhole attack. Also analyze the effect of attack on energy consumption of mobile nodes i.e. the major or only source of communication. Without energy existence nodes in MANET are not survives for a long time. |
Ruptured rudimentary horn at 22 weeks
Rudimentary horn is a developmental anomaly of the uterus. Pregnancy in a non-communicating rudimentary horn is very difficult to diagnose before it ruptures. A case of undiagnosed rudimentary horn pregnancy at 22 weeks presented to Nizwa regional referral hospital in shock with features of acute abdomen. Chances of rupture in first or second trimester are increased with catastrophic haemorrhage leading to increased maternal and perinatal morbidity and mortality. Management of such cases is a challenge till today due to diagnostic dilemma. Expertise in ultrasonography and early resort to surgical management is life saving in such cases. |
// Write implements io.Writer.
func (c *Conn) Write(p []byte) (int, error) {
c.writeLock.Lock()
defer c.writeLock.Unlock()
total := len(p)
for {
n, err := c.Encode(c.OutMsg, p)
if err != nil {
return 0, err
}
if c.WriteLock != nil {
c.WriteLock.Lock()
}
err = c.Stream.WriteMessage(websocket.BinaryMessage, c.OutMsg.(*pbx.Packet).Data)
if c.WriteLock != nil {
c.WriteLock.Unlock()
}
if err != nil {
return 0, err
}
if n == len(p) {
return total, nil
}
p = p[n:]
}
} |
President Obama’s recent moves to address the United States’ singular problem with mass incarceration culminated Thursday when he became the first sitting president to visit one of his government’s many federal prisons. The visit follows the president’s announcement on Monday that he’d granted thecommutation of 46 Americans’ prison sentences—all drug-related—levied under draconian Drug War laws and Tuesday’s speech before the NAACP’s annual conference, in which the president proposed reforms and remarked on the “inequities” of the criminal justice system.
But something looms over and haunts the president’s talk about those inequities, the astounding racial and economic disproportionality in the system. An even bigger monster lurked behind the rhetoric, something it felt Obama might have been trying to keep caged with vague description amidst an otherwise data-heavy and precise address. It wasn't until late in the speech that Obama alluded to the problem, speaking of the vast prison apparatus of recent decades as “containing and controlling problems that the rest of us are not facing up to and willing to do something about.”
Advertisement:
He then repeats the frightening notion again with the same word: "contain." Police and prisons have been used, he said, to “contain the hopelessness when we are not willing to make the investments to help lift those communities out of hopelessness.” He continues: “That's not just a police problem; that's a societal problem.”
Yeah, that’s a pretty big problem. It means that we’ve effectively warehoused a problem that we don’t know what else to do with: the problem of poor people, whether it be the still-uncorrected problem of black poverty or that of general, cross-racial poverty that comes as a side effect of contemporary global capitalism, which so enriches others. The problem of the former is now compounded by the intensification of the latter. Obama addressed both dilemmas in March, when he invited “The Wire” creator David Simon to the White House for a discussion of mass incarceration. Simon has been clear that his landmark show, Obama’s favorite, was about the “triumph of capitalism,” about those who get left behind and what to do with them. Obama and Simon spoke frankly about the effects of globalization and deindustrialization and the misery left in its wake, especially in the inner-city.
The two spoke about the drug economy that came into places like Baltimore after the above-ground economy disintegrated and white capital fled. The two spoke of the crisis of deindustrialization and the evaporation of well-paying jobs, or really jobs of any kind in some of the forgotten corners of America. Obama concluded the discussion by saying that it’s “ultimately jobs and re-industrialization” that would preclude the need for mass incarceration and, as he’d say on Tuesday, “lift those communities out of hopelessness,” that hopelessness and desperation that the prison system has grown to “contain.”
The drug problem is a poverty problem, says Obama. “What we understand,” he said to Simon, is that “if kids are left so far behind that they don’t have recourse, they’re going to see what else is available to survive.” Or to put more starkly: Survival becomes a crime.
So while we congratulate ourselves on not having political prisoners like China or Cuba, we do have what we might call prisoners of politics. Again, Obama described the incarceration crisis as “containing and controlling problems that the rest of us are not facing up to and willing to do something about.” Politicians have not been willing to face up to and do something about the underlying problems and all too willing to seek means of “containing” them—i.e., warehousing the people left behind. The political decisions made in the age of neoliberalism and globalization, concurrent with the War on Drugs, have resulted in a surplus population that cannot be absorbed by the sort of economy advocated by Washington and a severe criminalization of the one economy that does work in communities left behind.
How much worse are political prisoners than prisoners of politics? How meaningful a distinction can be made when political decisions can be seen to create both the circumstances for the “hopelessness” and the means of obsessively “containing” it? Black Americans, it is well argued, have never been admitted into the white world of economic well-being despite the civil rights reluctantly and slowly granted; and the racial wealth gap is an unmitigated societal disaster. Meanwhile, since Reagan, the police and prison apparatus has become a quasi-militarized leviathan with a clear preference for poor black men at every level.
Advertisement:
Since emancipation, black Americans have been a “problem,” as W.E.B. Dubois described it in 1900, that white America didn’t know what to do with. Eldridge Cleaver, in his prison writings in 1965, saw the prison structure as a way to shunt the “problem” of black Americans, and he found himself and his fellow black inmates “in vociferous rebellion against what we perceived as a continuation of slavery on a higher plane.”
He continued:
“All respect we may have had for politicians, lawyers, governors, presidents, senators, congressmen was utterly destroyed as we watched them temporizing and compromising over right and wrong, over legality and illegality, over constitutionality and unconstitutionality. We knew that in the end what they were clashing over was us, what to do with the blacks...”
What to do with the blacks. Did that question get answered in the five intervening decades? “You’ve got entire generations of men being locked up,” said Obama in his conversation with Simon. Is that what we did with black Americans, warehoused “entire generations” of the ones we didn’t know what to do with, the ones whose hopelessness we saw as a danger?
But as income inequality widens and the economy sorts more of us to the bottom—black, white, Latino and otherwise—Cleaver's concern might need an update: What to do with the poor? In the new normal of low-paying jobs, even many of those lucky enough to work are in poverty. Studies find that poverty itself is being criminalized. Poverty, debt, desperation and hopelessness grow in America without regard to race, and our plan since around 1980 has been to simply warehouse those left behind. Too much poverty and desperation is dangerous, for governments and the social order, so it's not unwise, at least in the short term, for governments to decide to lock that danger away. “But prisons do not disappear problems,” writes prison abolitionist Angela Davis, “they disappear human beings.” Have we just been hiding a problem we’re unwilling to solve? If so, it cannot be disappeared by locking it out of the discourse. |
How can we take into account student conceptions of the facial angle in a palaeontology laboratory work?
This study investigates student conceptions of the facial angle as a way to attain understanding elements of the theory of human evolution. The chosen laboratory work involved determining the species of a human cranium, and students had to design and write down their own experimental procedure. Three versions of the laboratory work were carried out leading to different student productions. Three aspects will be presented in the present paper, which are related to the three conceptual difficulties that appeared in our a priori analysis: the importance of students' everyday knowledge of angles and of anatomy of human crania, the problem of knowing how many points make up an angle, and finally, the way in which students determined a reference system to construct an angle. |
<gh_stars>1000+
/* Print matched file sets
* This file is part of jdupes; see jdupes.c for license information */
#include <stdio.h>
#include <stdint.h>
#include <inttypes.h>
#include "jdupes.h"
#include "jody_win_unicode.h"
#include "act_printmatches.h"
extern void printmatches(file_t * restrict files)
{
file_t * restrict tmpfile;
int printed = 0;
int cr = 1;
LOUD(fprintf(stderr, "printmatches: %p\n", files));
if (ISFLAG(a_flags, FA_PRINTNULL)) cr = 2;
while (files != NULL) {
if (ISFLAG(files->flags, FF_HAS_DUPES)) {
printed = 1;
if (!ISFLAG(a_flags, FA_OMITFIRST)) {
if (ISFLAG(a_flags, FA_SHOWSIZE)) printf("%" PRIdMAX " byte%c each:\n", (intmax_t)files->size,
(files->size != 1) ? 's' : ' ');
fwprint(stdout, files->d_name, cr);
}
tmpfile = files->duplicates;
while (tmpfile != NULL) {
fwprint(stdout, tmpfile->d_name, cr);
tmpfile = tmpfile->duplicates;
}
if (files->next != NULL) fwprint(stdout, "", cr);
}
files = files->next;
}
if (printed == 0) fwprint(stderr, "No duplicates found.", 1);
return;
}
/* Print files that have no duplicates (unique files) */
extern void printunique(file_t *files)
{
file_t *chain, *scan;
int printed = 0;
int cr = 1;
LOUD(fprintf(stderr, "print_uniques: %p\n", files));
if (ISFLAG(a_flags, FA_PRINTNULL)) cr = 2;
scan = files;
while (scan != NULL) {
if (ISFLAG(scan->flags, FF_HAS_DUPES)) {
chain = scan;
while (chain != NULL) {
SETFLAG(chain->flags, FF_NOT_UNIQUE);
chain = chain->duplicates;
}
}
scan = scan->next;
}
while (files != NULL) {
if (!ISFLAG(files->flags, FF_NOT_UNIQUE)) {
printed = 1;
if (ISFLAG(a_flags, FA_SHOWSIZE)) printf("%" PRIdMAX " byte%c each:\n", (intmax_t)files->size,
(files->size != 1) ? 's' : ' ');
fwprint(stdout, files->d_name, cr);
}
files = files->next;
}
if (printed == 0) fwprint(stderr, "No unique files found.", 1);
return;
}
|
def datautility(self) -> DataUtility:
return self._datautility |
Even though Kurt Rambis counts eight NBA championship rings to his collection from the Lakers, the Knicks’ interim coach realizes the albatross of his two-season, 32-132 stint with Minnesota is how he often is judged.
According to a former Wolves staffer familiar with his tenure, Rambis’ 15-67 record in 2009-10 and 17-65 mark in 2010-11 was lowlighted by a bad relationship with their young star, Kevin Love, and owner Glen Taylor, a mixed-bag coaching staff and a push to install a triangle-like offense on a young crew not sophisticated enough to grasp it.
Also keep in mind, according to the staffer, the Wolves had been one of the least successful teams in the NBA since being established in 1989-90 and were coming off four straight losing seasons. That’s when Pacers general manager David Kahn left Indiana to take over the Wolves and hired Rambis.
“Whoever the coach, Kurt or whoever else, would’ve been under severe stress and pressure,” the staffer said. “The team was terrible.”
Rambis continued to struggle in the win-loss department as Knicks interim coach (9-19), but his longtime cohort, Phil Jackson, still is leaning toward keeping him despite a coaching search that, by the Zen Master’s own admission, is narrow.
Jackson has contacted David Blatt and spoken with Luke Walton, who could be bound for the Lakers after Byron Scott was fired. Jackson, though, has recommitted to keeping the triangle in place — a system with which Rambis has shown expertise.
The problem for Rambis in Minnesota was youth and a failure to get point guard Ricky Rubio to leave Spain, which left the roster ill-equipped. Rambis eventually scaled back the triangle to a one-guard front — not the traditional two-guard triangle front Jackson employed obsessively with the Bulls and Lakers.
Point guard Jonny Flynn, whom Kahn drafted before he knew he was hiring Rambis, proved to be a bad fit for the triangle. Flynn also turned out to be damaged goods — a major hip condition he suffered at Syracuse went undetected by NBA doctors before the draft.
“He didn’t have the veteran, experienced team that runs the system he wanted to run,” the Minnesota staffer said. “The style of system is not conducive to total rebuilding situations, as the Knicks are. Total rebuilding situations should emphasize maniacal, in-your-face defense and a simplistic offense and try to grind games out.”
Rambis also became too obsessed with the triangle.
“He wanted the triangle to be perfect,” the former Wolves staffer added. “One night, after we gave up 120 points, he’s walking off with the coaching staff and he says, ‘We got to work on our offense.’ It sucks so much time in preparation. It required more of a veteran team like the ones Jackson had [with the Lakers and Bulls].”
When Kahn was hired in May 2009, Kevin McHale was acting coach after his demotion from club president. It was an awkward situation, so McHale was let go.
Kahn interviewed Rambis, Mark Jackson, Monty Williams and Elston Turner. Rambis had been a longtime Lakers assistant and even had success as their interim head coach. None of the other candidates brought head-coaching experience. Rambis also got a big recommendation from Pat Riley, who coached Rambis in Los Angeles.
“Kurt was next man up as far as assistants went at the time,” the Minnesota staffer said. “He worked for Phil a number of years, was an interim. It was his turn, so to speak.”
Rambis said after becoming Knicks interim coach in February he didn’t have autonomy in Minnesota and complained to insiders he was not allowed to pick his own coaching staff.
However, Rambis didn’t have a huge array of candidates he wanted to add, his network modest.
“Kurt and Phil, guys from that ilk, don’t have a reservoir of guys,” the former Wolves staffer said. “They’re in an insulated, cloistered type of environment. When you become a head coach, a coach gets 30 calls, feeling you owe them or just from past relationships. Kurt heard from one guy — David Wohl.”
Rambis’ other assistants were picked by Wolves brass — Bill Laimbeer and Reggie Theus. It was not considered a particularly esteemed staff.
Rambis also was done in by a cool rapport with Love and team owner Glen Taylor. The staffer said Rambis tried to get Love traded after his first season to no avail. And apparently he mistreated a couple of Taylor’s trusted employees, leading the owner to hold a grudge.
“Kurt kind of got sideways with the owner,” the staffer said.
Rambis had a four-year deal, but he ended up getting dismissed after two seasons, with the former Lakers coach pleading his triangle would pay off if given the full four seasons. Rambis never got a chance to prove it. |
export declare var Vue: any;
export declare var WjPivotGrid: any;
export declare var WjPivotChart: any;
export declare var WjPivotPanel: any;
|
def detectOne(
self, image: VLImage, detectArea: Optional[Rect] = None, detectLandmarks: bool = True
) -> Union[None, HumanDetection]:
assertImageForDetection(image)
if detectArea is None:
forDetection = ImageForDetection(image=image, detectArea=image.rect)
else:
forDetection = ImageForDetection(image=image, detectArea=detectArea)
imgs, detectAreas = getArgsForCoreDetectorForImages([forDetection])
error, detectRes = self._detector.detect(
[imgs[0]], [detectAreas[0]], 1, self._getDetectionType(detectLandmarks)
)
assertError(error)
return HumanDetection(detectRes[0][0], image) if detectRes[0] else None |
/**
* Validates the the getForecastWeatherMetrics method for scenario when the underlying service is down
*/
@Test
public void testGetForecastWeatherMetricsWithError() throws Exception {
City city = new City();
city.setId(3647637L);
when(restTemplate.getForObject(anyString(), any())).thenReturn(new WeatherData());
ForecastWeatherMetrics forecastWeatherMetrics = systemUnderTest.getForecastWeatherMetrics(city);
assertNotNull(forecastWeatherMetrics);
assertFalse(forecastWeatherMetrics.isValid());
} |
import { Heading } from '@chakra-ui/react';
import { Box, BoxProps, HStack, Stack } from '@chakra-ui/react';
import React from 'react';
export interface SectionHeadingProps extends BoxProps {
title: string;
description?: string;
isCentered?: boolean;
colorMode?: 'dark' | 'white';
}
export const SectionHeading = (props: SectionHeadingProps) => {
const {
title,
description,
colorMode = 'dark',
isCentered = false,
...restOfProps
} = props;
return (
<Box maxW={600} my={4} {...restOfProps}>
<Stack
alignItems={isCentered ? 'center' : 'left'}
textAlign={isCentered ? 'center' : 'left'}
spacing={3}
color={colorMode === 'white' ? 'white' : 'black'}
>
<Heading
fontSize="xx-large"
color={colorMode === 'white' ? 'white' : 'primary'}
>
{title}
</Heading>
<Box
w={20}
h={1}
borderRadius={4}
background={colorMode === 'white' ? 'white' : 'primary'}
/>
{description && (
<Box mb={{ base: -0.5, md: -1.5 }} mt={10}>
{description}
</Box>
)}
</Stack>
</Box>
);
};
|
class Solution {
public:
string XXX(vector<string>& strs) {
int n=strs.size();
int j=0;
int flag=1;
if(n==0)
return "";
if(n==1)
return strs[0];
while(1){
for(int i=1;i<n;i++){
if(strs[0][j]!=strs[i][j]){
flag=0;
break;
}
}
j++;
if(j>strs[0].length()||flag==0)
break;
}
string s=strs[0].substr(0,j-1);
return s;
}
};
|
WASHINGTON (Reuters) - The U.S. economy grew faster than initially estimated in the third quarter as businesses aggressively accumulated stock, but underlying domestic demand remained sluggish.
A tug boat passes in front of a freighter at the Port of Oakland in Oakland, California November 12, 2013. REUTERS/Robert Galbraith
Gross domestic product grew at a 3.6 percent annual rate instead of the 2.8 percent pace reported earlier, the Commerce Department said on Thursday. Economists polled by Reuters had expected output would be revised up to only a 3.0 percent rate.
The third-quarter pace is the fastest since the first quarter of 2012 and marked an acceleration from the April-June period’s 2.5 percent rate.
Businesses accumulated $116.5 billion worth of inventories, the largest increase since the first quarter of 1998. That compared to prior estimates of only $86 billion.
Inventories accounted for a massive 1.68 percentage points of the advance made in the July-September quarter, the largest contribution since the fourth quarter of 2011.
The contribution from inventories had previously been estimated at 0.8 percentage point. Stripping out inventories, the economy grew at a 1.9 percent rate rather than the 2.0 percent pace estimated last month.
A gauge of domestic demand rose at just a 1.8 percent rate.
The strong inventory accumulation in the face of a slowdown in domestic demand means businesses will need to draw down on stocks, which will weigh on GDP growth this quarter.
Fourth quarter growth estimates are already on the low side, with a 16-day shutdown of the government in October expected to shave off as much as half a percentage point from GDP.
Consumer spending, which accounts for more than two-thirds of U.S. economic activity, was revised down to a 1.4 percent rate, the lowest since the fourth quarter of 2009. Spending had previously been estimated to have increased at 1.5 percent pace.
Consumer spending grew at a 1.8 percent rate in the April-June period.
There were upward revisions to business spending, but estimates for residential construction were lowered. The trade deficit was larger than previously estimated, resulting in trade being neutral to growth in the third quarter. |
/**
* Created by IntelliJ IDEA.
* User: mortenkjetland
* Date: 3/2/11
* Time: 9:02 PM
* To change this template use File | Settings | File Templates.
*/
public class ConfigurablePluginDisablingPluginTest {
@Before
public void before(){
//each test must begin with empty memory..
ConfigurablePluginDisablingPlugin.previousDisabledPlugins.clear();
}
@Test
public void verify_without_config() throws Exception {
Properties config = new Properties();
config.put("some.setting", "some value");
PluginCollection pc = new PluginCollection();
TestPlugin p = new TestPlugin();
pc.addPlugin( p );
internalTest(config, pc, Arrays.asList(p));
}
private void internalTest(Properties config, PluginCollection pc, List<? extends PlayPlugin> correctPluginListAfter) {
Play.configuration = config;
Play.pluginCollection = pc;
ConfigurablePluginDisablingPlugin plugin = new ConfigurablePluginDisablingPlugin();
plugin.onConfigurationRead();
assertThat(pc.getEnabledPlugins()).containsOnly(correctPluginListAfter.toArray());
}
@Test
public void verify_disableing_plugins() throws Exception {
PluginCollection pc = new PluginCollection();
TestPlugin p = new TestPlugin();
pc.addPlugin( p );
TestPlugin2 p2 = new TestPlugin2();
pc.addPlugin( p2 );
Properties config = new Properties();
config.put("some.setting", "some value");
config.put("plugins.disable", "play.plugins.TestPlugin");
internalTest(config, pc, Arrays.asList(p2));
}
@Test
public void verify_disableing_many_plugins() throws Exception {
PluginCollection pc = new PluginCollection();
TestPlugin p = new TestPlugin();
pc.addPlugin( p );
TestPlugin2 p2 = new TestPlugin2();
pc.addPlugin( p2 );
Properties config = new Properties();
config.put("some.setting", "some value");
config.put("plugins.disable", "play.plugins.TestPlugin");
config.put("plugins.disable.2", "play.plugins.TestPlugin2");
internalTest(config, pc, new ArrayList<PlayPlugin>());
}
@Test
public void verify_disableing_missing_plugins() throws Exception {
PluginCollection pc = new PluginCollection();
TestPlugin p = new TestPlugin();
pc.addPlugin( p );
TestPlugin2 p2 = new TestPlugin2();
pc.addPlugin( p2 );
Properties config = new Properties();
config.put("some.setting", "some value");
config.put("plugins.disable", "play.plugins.TestPlugin_XX");
internalTest(config, pc, Arrays.asList(p,p2));
}
@Test
public void verify_reenabling_disabled_plugins() throws Exception {
PluginCollection pc = new PluginCollection();
TestPlugin p = new TestPlugin();
pc.addPlugin( p );
TestPlugin2 p2 = new TestPlugin2();
pc.addPlugin( p2 );
Properties config = new Properties();
config.put("some.setting", "some value");
config.put("plugins.disable", "play.plugins.TestPlugin");
internalTest(config, pc, Arrays.asList(p2));
//remove the disabling from config
config = new Properties();
config.put("some.setting", "some value");
internalTest(config, pc, Arrays.asList(p,p2));
}
@Test
public void verify_that_the_plugin_gets_loaded(){
PluginCollection pc = new PluginCollection();
new PlayBuilder().build();
pc.loadPlugins();
PlayPlugin pi = pc.getPluginInstance(ConfigurablePluginDisablingPlugin.class);
assertThat(pi).isInstanceOf(ConfigurablePluginDisablingPlugin.class);
assertThat(pc.getEnabledPlugins()).contains( pi );
}
} |
Suburban office space : an exploration of continuity and difference
Anonymous boxes whose primary architectural features are stacked floor plates and horizontal bands of reflective glass predominate the suburban office landscape. These objects, scattered across fields and along freeways, are outdated images of the industrial age when capital was the most important business resource. As architecture, they represent the values of this past age: their homogenous and hermetic character depicts an efficient, regimented workplace where the office is isolated from the community and work is separated from the rest of life. In today's information society and global economy, people have replaced capital as the key business resource, and thus the values of the workplace are shifting. Efficiency is being replaced by effectiveness doing the right things with the right people in the right way and separation is being countered by corporate participation in the community and by company support of their employees' lives both within and beyond the workplace. The thesis is that suburban office space ought to reflect these new values and proposes the exploration of two relationships: continuity and difference, as the means to this end. Continuity is chosen for it's possibilities in defining relationships of connection and difference is chosen as a way of promoting the varied activities of the new workplace. Thesis Supervisor: William Porter Title: Professor of Architecture |
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1beta1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *File) DeepCopyInto(out *File) {
*out = *in
if in.ContentFrom != nil {
in, out := &in.ContentFrom, &out.ContentFrom
*out = new(FileSource)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new File.
func (in *File) DeepCopy() *File {
if in == nil {
return nil
}
out := new(File)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FileSource) DeepCopyInto(out *FileSource) {
*out = *in
out.Secret = in.Secret
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileSource.
func (in *FileSource) DeepCopy() *FileSource {
if in == nil {
return nil
}
out := new(FileSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KThreesAgentConfig) DeepCopyInto(out *KThreesAgentConfig) {
*out = *in
if in.NodeLabels != nil {
in, out := &in.NodeLabels, &out.NodeLabels
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.NodeTaints != nil {
in, out := &in.NodeTaints, &out.NodeTaints
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.KubeletArgs != nil {
in, out := &in.KubeletArgs, &out.KubeletArgs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.KubeProxyArgs != nil {
in, out := &in.KubeProxyArgs, &out.KubeProxyArgs
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KThreesAgentConfig.
func (in *KThreesAgentConfig) DeepCopy() *KThreesAgentConfig {
if in == nil {
return nil
}
out := new(KThreesAgentConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KThreesConfig) DeepCopyInto(out *KThreesConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KThreesConfig.
func (in *KThreesConfig) DeepCopy() *KThreesConfig {
if in == nil {
return nil
}
out := new(KThreesConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *KThreesConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KThreesConfigList) DeepCopyInto(out *KThreesConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]KThreesConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KThreesConfigList.
func (in *KThreesConfigList) DeepCopy() *KThreesConfigList {
if in == nil {
return nil
}
out := new(KThreesConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *KThreesConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KThreesConfigSpec) DeepCopyInto(out *KThreesConfigSpec) {
*out = *in
if in.Files != nil {
in, out := &in.Files, &out.Files
*out = make([]File, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.PreK3sCommands != nil {
in, out := &in.PreK3sCommands, &out.PreK3sCommands
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.PostK3sCommands != nil {
in, out := &in.PostK3sCommands, &out.PostK3sCommands
*out = make([]string, len(*in))
copy(*out, *in)
}
in.AgentConfig.DeepCopyInto(&out.AgentConfig)
in.ServerConfig.DeepCopyInto(&out.ServerConfig)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KThreesConfigSpec.
func (in *KThreesConfigSpec) DeepCopy() *KThreesConfigSpec {
if in == nil {
return nil
}
out := new(KThreesConfigSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KThreesConfigStatus) DeepCopyInto(out *KThreesConfigStatus) {
*out = *in
if in.BootstrapData != nil {
in, out := &in.BootstrapData, &out.BootstrapData
*out = make([]byte, len(*in))
copy(*out, *in)
}
if in.DataSecretName != nil {
in, out := &in.DataSecretName, &out.DataSecretName
*out = new(string)
**out = **in
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make(apiv1beta1.Conditions, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KThreesConfigStatus.
func (in *KThreesConfigStatus) DeepCopy() *KThreesConfigStatus {
if in == nil {
return nil
}
out := new(KThreesConfigStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KThreesConfigTemplate) DeepCopyInto(out *KThreesConfigTemplate) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KThreesConfigTemplate.
func (in *KThreesConfigTemplate) DeepCopy() *KThreesConfigTemplate {
if in == nil {
return nil
}
out := new(KThreesConfigTemplate)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *KThreesConfigTemplate) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KThreesConfigTemplateList) DeepCopyInto(out *KThreesConfigTemplateList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]KThreesConfigTemplate, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KThreesConfigTemplateList.
func (in *KThreesConfigTemplateList) DeepCopy() *KThreesConfigTemplateList {
if in == nil {
return nil
}
out := new(KThreesConfigTemplateList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *KThreesConfigTemplateList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KThreesConfigTemplateResource) DeepCopyInto(out *KThreesConfigTemplateResource) {
*out = *in
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KThreesConfigTemplateResource.
func (in *KThreesConfigTemplateResource) DeepCopy() *KThreesConfigTemplateResource {
if in == nil {
return nil
}
out := new(KThreesConfigTemplateResource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KThreesConfigTemplateSpec) DeepCopyInto(out *KThreesConfigTemplateSpec) {
*out = *in
in.Template.DeepCopyInto(&out.Template)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KThreesConfigTemplateSpec.
func (in *KThreesConfigTemplateSpec) DeepCopy() *KThreesConfigTemplateSpec {
if in == nil {
return nil
}
out := new(KThreesConfigTemplateSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KThreesServerConfig) DeepCopyInto(out *KThreesServerConfig) {
*out = *in
if in.KubeAPIServerArgs != nil {
in, out := &in.KubeAPIServerArgs, &out.KubeAPIServerArgs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.KubeControllerManagerArgs != nil {
in, out := &in.KubeControllerManagerArgs, &out.KubeControllerManagerArgs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.TLSSan != nil {
in, out := &in.TLSSan, &out.TLSSan
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.DisableComponents != nil {
in, out := &in.DisableComponents, &out.DisableComponents
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KThreesServerConfig.
func (in *KThreesServerConfig) DeepCopy() *KThreesServerConfig {
if in == nil {
return nil
}
out := new(KThreesServerConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretFileSource) DeepCopyInto(out *SecretFileSource) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretFileSource.
func (in *SecretFileSource) DeepCopy() *SecretFileSource {
if in == nil {
return nil
}
out := new(SecretFileSource)
in.DeepCopyInto(out)
return out
}
|
def decode_covariance_roll_pitch_yaw(radii, rotations, invert=False):
d = 1.0 / (radii + DIV_EPSILON) if invert else radii
diag = torch.diag_embed(d)
rotation = roll_pitch_yaw_to_rotation_matrices(rotations)
return torch.matmul(torch.matmul(rotation, diag), torch.transpose(rotation, -2, -1)) |
interface State {};;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
enum InstructionType
{
CpyValue,
CpyRegister,
Inc,
Dec,
JnzValue,
JnzRegister
};;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
interface Instruction {
type: InstructionType,
arg1: any,
arg2: any
};;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
interface Instruction {
process(state: State): State;/*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;*/
};;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
export default "need an export so this file isn't seen as global. Nothing to see here"
|
/**
* Try the same get at a few border points and see if anything blows up
*/
@Test
public void checkSafeGetAlongBorder() {
T img = createImage(width, height);
GImageMiscOps.fillUniform(img, rand, 0, 100);
InterpolatePixel<T> interp = wrap(img, 0, 100);
interp.get(0,0);
interp.get(width/2,0);
interp.get(0,height/2);
interp.get(width-1,height-1);
interp.get(width/2,height-1);
interp.get(width-1,height/2);
} |
<gh_stars>0
import {
FilterIcon,
GlobeIcon,
GraphIcon,
MoonIcon,
SunIcon,
TerminalIcon,
} from "@primer/octicons-react";
import { StyledOcticon } from "@primer/react";
import { FunctionComponent } from "react";
import { Navbar, NavbarLink, NavbarLinks } from "../components/Navbar";
interface Props {
selected: string;
handleSetSelected: (view: string) => void;
colorMode: string;
}
export const MobileNavbar: FunctionComponent<Props> = ({
selected,
handleSetSelected,
colorMode,
}) => {
return (
<Navbar aria-label="Main">
<NavbarLinks>
<NavbarLink
selected={selected === "map"}
onClick={() => handleSetSelected("map")}
>
<StyledOcticon size={20} icon={GlobeIcon} />
</NavbarLink>
<NavbarLink
selected={selected === "filters"}
onClick={() => handleSetSelected("filters")}
>
<StyledOcticon size={20} icon={FilterIcon} />
</NavbarLink>
<NavbarLink
selected={selected === "charts"}
onClick={() => handleSetSelected("charts")}
>
<StyledOcticon size={20} icon={GraphIcon} />
</NavbarLink>
<NavbarLink
selected={selected === "logs"}
onClick={() => handleSetSelected("logs")}
>
<StyledOcticon size={20} icon={TerminalIcon} />
</NavbarLink>
<NavbarLink
selected={selected === "preferences"}
onClick={() => handleSetSelected("preferences")}
>
<StyledOcticon
size={20}
icon={colorMode === "night" ? MoonIcon : SunIcon}
/>
</NavbarLink>
</NavbarLinks>
</Navbar>
);
};
|
/*--------------------------------------------------------------*
Copyright (C) 2006-2015 OpenSim Ltd.
This file is distributed WITHOUT ANY WARRANTY. See the file
'License' for details on this and other legal matters.
*--------------------------------------------------------------*/
package org.omnetpp.ned.editor.graph.misc;
import org.eclipse.gef.requests.CreationFactory;
import org.omnetpp.common.util.StringUtils;
import org.omnetpp.ned.core.NedResourcesPlugin;
import org.omnetpp.ned.model.INedElement;
import org.omnetpp.ned.model.ex.NedElementFactoryEx;
import org.omnetpp.ned.model.interfaces.IHasName;
import org.omnetpp.ned.model.interfaces.INedTypeResolver;
import org.omnetpp.ned.model.interfaces.ISubmoduleOrConnection;
import org.omnetpp.ned.model.pojo.CommentElement;
import org.omnetpp.ned.model.pojo.NedElementTags;
/**
* A factory used to create new model elements corresponding to graphical editor elements.
*
* @author rhornig
*/
public class ModelFactory implements CreationFactory {
protected int tagCode;
protected String name;
protected String type;
protected boolean useLike = false;
protected String bannerComment;
/**
* @param objectType The class identifier of the INedElement passed NedElementFactoryEx.getInstance().createElement
*/
public ModelFactory(int tagCode) {
this(tagCode, null);
}
/**
* @param objectType The class identifier of the INedElement
* @param name The optional name of the new element
*/
public ModelFactory(int tagCode, String name) {
this(tagCode, name, null, false);
}
/**
* @param objectType The class identifier of the INedElement
* @param name The optional name of the new element
* @param type The optional type of the new element (for submodules and connections)
*/
public ModelFactory(int tagCode, String name, String type) {
this(tagCode, name, type, false);
}
/**
* @param objectType The class identifier of the INedElement
* @param name The optional name of the new element
* @param type The optional type of the new element (for submodules and connections)
* @param useLike The provided type will be used as an interface type t
*/
public ModelFactory(int tagCode, String name, String type, boolean useLike) {
this.tagCode = tagCode;
this.name = name;
this.type = type;
this.useLike = useLike;
}
/**
* Set the banner commend added to the element. Set it to "" or null to avoid comment creation.
*/
void setBannerComment(String comment) {
bannerComment = comment;
}
public Object getNewObject() {
INedTypeResolver resolver = NedResourcesPlugin.getNedResources();
INedElement element = NedElementFactoryEx.getInstance().createElement(resolver, tagCode);
if (!StringUtils.isEmpty(bannerComment)) {
// add trailing empty lines
CommentElement ceTrail = (CommentElement)NedElementFactoryEx.getInstance().createElement(NedElementTags.NED_COMMENT);
ceTrail.setLocid("trailing");
ceTrail.setContent("\n\n");
element.insertChildBefore(element.getFirstChild(), ceTrail);
CommentElement ce = (CommentElement)NedElementFactoryEx.getInstance().createElement(NedElementTags.NED_COMMENT);
ce.setLocid("banner");
ce.setContent(bannerComment);
element.insertChildBefore(ceTrail, ce);
}
if (element instanceof IHasName)
((IHasName)element).setName(name);
if (element instanceof ISubmoduleOrConnection) {
if (!useLike)
((ISubmoduleOrConnection)element).setType(type);
else {
((ISubmoduleOrConnection)element).setLikeType(type);
// TODO maybe we could ask the user to specify the param name in a dialog box
((ISubmoduleOrConnection)element).setLikeExpr("paramName");
}
}
return element;
}
public Object getObjectType() {
return tagCode;
}
}
|
<reponame>likun123456/RAN<filename>src/main/java/com/thinkgem/jeesite/modules/cheat/entity/CheatFreeBusiness.java
package com.thinkgem.jeesite.modules.cheat.entity;
import com.thinkgem.jeesite.common.persistence.DataEntity;
/**
* 免费业务欺诈流量评估
* @author yanghai
*
*/
public class CheatFreeBusiness extends DataEntity<CheatFreeBusiness> {
private static final long serialVersionUID = 1843200383018112893L;
private String recordtime;
private String ratingGroup;
private String ratingGroupName;
private String dataUp;
private String dataDown;
private String total;
public String getNetId() {
return netId;
}
public void setNetId(String netId) {
this.netId = netId;
}
public String getNetName() {
return netName;
}
public void setNetName(String netName) {
this.netName = netName;
}
private String netId;
private String netName;
private int timeH;
private int timeD;
private int timeM;
private String gran;
public String getGran() {
return gran;
}
public void setGran(String gran) {
this.gran = gran;
}
public String getRecordtime() {
return recordtime;
}
public void setRecordtime(String recordtime) {
this.recordtime = recordtime;
}
public String getRatingGroup() {
return ratingGroup;
}
public void setRatingGroup(String ratingGroup) {
this.ratingGroup = ratingGroup;
}
public String getRatingGroupName() {
return ratingGroupName;
}
public void setRatingGroupName(String ratingGroupName) {
this.ratingGroupName = ratingGroupName;
}
public String getDataUp() {
return dataUp;
}
public void setDataUp(String dataUp) {
this.dataUp = dataUp;
}
public String getDataDown() {
return dataDown;
}
public void setDataDown(String dataDown) {
this.dataDown = dataDown;
}
public String getTotal() {
return total;
}
public void setTotal(String total) {
this.total = total;
}
public int getTimeH() {
return timeH;
}
public void setTimeH(int timeH) {
this.timeH = timeH;
}
public int getTimeD() {
return timeD;
}
public void setTimeD(int timeD) {
this.timeD = timeD;
}
public int getTimeM() {
return timeM;
}
public void setTimeM(int timeM) {
this.timeM = timeM;
}
}
|
use futures::executor::block_on;
use futures::prelude::*;
use libp2p::ping::{Ping, PingConfig};
use libp2p::swarm::{Swarm, SwarmEvent};
use libp2p::{identity, PeerId};
use std::error::Error;
use std::task::Poll;
fn main() -> Result<(), Box<dyn Error>> {
let local_key = identity::Keypair::generate_ed25519();
let local_peer_id = PeerId::from(local_key.public());
println!("Local peer id: {:?}", local_peer_id);
let transport = block_on(libp2p::development_transport(local_key))?;
// Create a ping network behaviour.
//
// For illustrative purposes, the ping protocol is configured to
// keep the connection alive, so a continuous sequence of pings
// can be observed.
let behaviour = Ping::new(PingConfig::new().with_keep_alive(true));
let mut swarm = Swarm::new(transport, behaviour, local_peer_id);
// Tell the swarm to listen on all interfaces and a random, OS-assigned
// port.
swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?;
// Dial the peer identified by the multi-address given as the second
// command-line argument, if any.
if let Some(addr) = std::env::args().nth(1) {
let remote = addr.parse()?;
swarm.dial_addr(remote)?;
println!("Dialed {}", addr)
}
block_on(future::poll_fn(move |cx| loop {
match swarm.poll_next_unpin(cx) {
Poll::Ready(Some(event)) => {
if let SwarmEvent::NewListenAddr { address, .. }= event {
println!("Listening on {:?}", address);
}
},
Poll::Ready(None) => return Poll::Ready(()),
Poll::Pending => return Poll::Pending
}
}));
Ok(())
}
|
<reponame>EdwardZX/hoomd-blue
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""Test hoomd.update.RemoveDrift."""
import hoomd
from hoomd.conftest import operation_pickling_check
import pytest
import numpy as np
# note: The parameterized tests validate parameters so we can't pass in values
# here that require preprocessing
valid_constructor_args = [
dict(trigger=hoomd.trigger.Periodic(10),
reference_positions=[(0, 0, 0), (1, 0, 1)]),
dict(trigger=hoomd.trigger.After(10),
reference_positions=[(0, 0, 0), (1, 0, 1)]),
dict(trigger=hoomd.trigger.Before(10),
reference_positions=[(0, 0, 0), (1, 0, 1)])
]
valid_attrs = [('trigger', hoomd.trigger.Periodic(10000)),
('trigger', hoomd.trigger.After(100)),
('trigger', hoomd.trigger.Before(12345)),
('reference_positions', [(0, 0, 0), (1, 0, 1)])]
@pytest.mark.parametrize("constructor_args", valid_constructor_args)
def test_valid_construction(constructor_args):
"""Test that RemoveDrift can be constructed with valid arguments."""
remove_drift = hoomd.update.RemoveDrift(**constructor_args)
# validate the params were set properly
for attr, value in constructor_args.items():
assert np.all(getattr(remove_drift, attr) == value)
@pytest.mark.parametrize("constructor_args", valid_constructor_args)
def test_valid_construction_and_attach(simulation_factory,
two_particle_snapshot_factory,
constructor_args):
"""Test that RemoveDrift can be attached with valid arguments."""
remove_drift = hoomd.update.RemoveDrift(**constructor_args)
sim = simulation_factory(
two_particle_snapshot_factory(particle_types=['A', 'B'], d=2, L=50))
sim.operations.updaters.append(remove_drift)
sim.run(0)
# validate the params were set properly
for attr, value in constructor_args.items():
assert np.all(getattr(remove_drift, attr) == value)
@pytest.mark.parametrize("attr,value", valid_attrs)
def test_valid_setattr(attr, value):
"""Test that RemoveDrift can get and set attributes."""
remove_drift = hoomd.update.RemoveDrift(trigger=hoomd.trigger.Periodic(10),
reference_positions=[(0, 0, 1),
(-1, 0, 1)])
setattr(remove_drift, attr, value)
assert np.all(getattr(remove_drift, attr) == value)
@pytest.mark.parametrize("attr,value", valid_attrs)
def test_valid_setattr_attached(attr, value, simulation_factory,
two_particle_snapshot_factory):
"""Test that RemoveDrift can get and set attributes while attached."""
remove_drift = hoomd.update.RemoveDrift(trigger=hoomd.trigger.Periodic(10),
reference_positions=[(0, 0, 1),
(-1, 0, 1)])
sim = simulation_factory(
two_particle_snapshot_factory(particle_types=['A', 'B'], L=50))
sim.operations.updaters.append(remove_drift)
sim.run(0)
setattr(remove_drift, attr, value)
assert np.all(getattr(remove_drift, attr) == value)
def test_remove_drift(simulation_factory, lattice_snapshot_factory):
"""Test that RemoveDrift modifies positions correctly."""
# reference positions in a simple cubic lattice with a=1
reference_positions = [[-0.5, -0.5, -0.5], [-0.5, -0.5, 0.5],
[-0.5, 0.5, -0.5], [-0.5, 0.5,
0.5], [0.5, -0.5, -0.5],
[0.5, -0.5, 0.5], [0.5, 0.5, -0.5], [0.5, 0.5, 0.5]]
# initialize simulation with randomized positions (off lattice)
snap = lattice_snapshot_factory(dimensions=3, n=2, a=1, r=0.1)
sim = simulation_factory(snap)
# add remove drift updater and run
remove_drift = hoomd.update.RemoveDrift(
trigger=hoomd.trigger.Periodic(1),
reference_positions=reference_positions)
sim.operations.updaters.append(remove_drift)
sim.run(1)
# ensure the drift is close to zero after the updater has been executed
s = sim.state.get_snapshot()
if s.communicator.rank == 0:
new_positions = s.particles.position
drift = np.mean(new_positions - reference_positions, axis=0)
assert np.allclose(drift, [0, 0, 0])
def test_pickling(simulation_factory, two_particle_snapshot_factory):
"""Test that RemoveDrift objects are picklable."""
sim = simulation_factory(two_particle_snapshot_factory())
remove_drift = hoomd.update.RemoveDrift(trigger=hoomd.trigger.Periodic(5),
reference_positions=[(0, 0, 1),
(-1, 0, 1)])
operation_pickling_check(remove_drift, sim)
|
Recently, when I was sitting in Carter Notch Hut in – well, Carter Notch – I noticed a sign bearing the ubiquitous logos of trip advisor and yelp, and I realized that people actually must ‘yelp’ about these iconic landmarks in the White Mountains. I started to wonder what else people yelp about – what other natural, uncontrollable environments do tourists rate as though they were a restaurant or a hotel?
I saw the one-star National Park reviews online and loved it, so I thought: why not a one-star trip review for the Appalachian Trail? This way, hikers can be warned ahead of time about the true peril of the AT: poor service.
All spelling and grammatical errors have been retained to set the tone of the review, and there might be a few 2 and 3 star reviews mixed in here that I couldn’t bring myself to pass up. I would like to thank yelpers everywhere for writing this article for me. I would also like to thank the internet for the following pictures, because when I try to photograph a beautiful view my depth perception fails and my hands turn into flippers, leaving me to flop against my touch screen like a manic seal.
Without further ado, here are the one star reviews of the Appalachian Trail:
Amicalola Falls State Park – Don’t expect the beginning of your thru-hike to be anything special.
Great Smoky Mountains National Park – ‘Not the Rockies’ by someone very confused about America’s geography:
Great Smoky Mountains National Park – ‘not fulfilling the promis’ by an anonymous caveman with internet access:
Clingmans Dome – On the subject of the steep, 0.5-mile paved trail from the parking lot:
Max Patch – bring your livestock here!
Roan Mountain – Don’t waste all that energy driving up there when you could go to the Rockies right next door.
Grayson Highlands State Park – That’s strange… where is this coming from??
Shenandoah National Park, from Skyline Drive – on the National Park which has move paved road than trail:
Bear Mountain State Park – Apparently written by someone right after encountering thru-hikers:
Bear Mountain, NY (elevation: 1,283 feet) – Experienced Mountaineer Gets Lost in Wilderness
Delaware Water Gap National Recreation Area – Seriously though…
Mount Greylock – From the mouth of a Nutmegger:
Lake of the Clouds Hut, Mt. Washington – I think this also might have been inspired by thru-hikers:
The Mount Washington Cog Railroad – Just a bunch of ugly mountains.
Mount Washington – Regarding the fine dining found at the Cafeteria on the summit:
I hope you’ve enjoyed this edition of the Appalachian Trail’s one-star reviews! Don’t forget to put in your own two cents the next time you find your shelter doesn’t have wifi, or your hostel owner forgot to leave you a mint. We can change the service of the AT, one review at a time! |
// Build returns an instance of KeycloakService ready to be used.
// If a custom realm is configured (WithRealmConfig called), then always Keycloak provider is used
// irrespective of the `builder.config.SelectSSOProvider` value
func (builder *keycloakServiceBuilder) Build() KeycloakService {
notNilPredicate := func(x interface{}) bool {
return x.(*keycloak.KeycloakRealmConfig) != nil
}
if builder.config.SelectSSOProvider == keycloak.MAS_SSO ||
builder.realmConfig != nil {
_, realmConfig := arrays.FindFirst(notNilPredicate, builder.realmConfig, builder.config.KafkaRealm)
return newKeycloakService(builder.config, realmConfig.(*keycloak.KeycloakRealmConfig))
} else {
_, realmConfig := arrays.FindFirst(notNilPredicate, builder.realmConfig, builder.config.RedhatSSORealm)
client := redhatsso.NewSSOClient(builder.config, realmConfig.(*keycloak.KeycloakRealmConfig))
return &keycloakServiceProxy{
accessTokenProvider: client,
service: &redhatssoService{
client: client,
},
}
}
} |
/**
* Common list utilities
*/
public class ListUtils {
/**
* Merges the 2 lists and returns a new list without repetitions
* @param list1 First list to merge
* @param list2 Second list to merge
* @return a new list which contains all elements in both lists without repetitions
*/
public static List<String> mergeLists(List<String> list1, List<String> list2) {
if (list1 == null || list2 == null) {
throw new IllegalArgumentException("Arguments cannot be null");
}
Set<String> asSet = new HashSet<>(list1);
asSet.addAll(list2);
return new ArrayList<>(asSet);
}
} |
#include <sys/types.h>
#include <sys/uio.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <xok/sys_ucall.h>
#include <xok/pctr.h>
#include "test-server.h"
arg_t *s_state; // the shared state
unsigned int initialized;
arg_t private_area; // the private state
arg_t *p_state = &private_area;
#ifdef IPC
extern int fd[2];
int S_empty (void)
{
return 0;
}
int S_init (int unused)
{
unsigned int pte;
if (read (fd[0], &pte, sizeof(pte)) != sizeof(pte)) {
printf ("S_init: read from pipe failed\n");
return (-1);
}
if (pte == 0)
return 0;
s_state = (arg_t *) malloc (sizeof(arg_t));
if (s_state == NULL || sys_self_insert_pte (0, pte, (uint) s_state) != 0) {
printf ("SERVER: Error in S_init\n");
return -1;
}
p_state->nbytes=0;
initialized = 1;
return 0;
}
int S_push (void)
{
if (initialized == 0 || p_state->nbytes + s_state->nbytes > MAX_LEN) {
printf ("SERVER: Error in S_push\n");
return -1;
}
memcpy (p_state->buf + p_state->nbytes, s_state->buf, s_state->nbytes);
p_state->nbytes += s_state->nbytes;
return 0;
}
int S_pop (void)
{
if (initialized == 0 || p_state->nbytes - s_state->nbytes < 0) {
printf ("SERVER: Error in S_pop\n");
return -1;
}
memcpy (p_state->buf + p_state->nbytes - 1, s_state->buf, s_state->nbytes);
p_state->nbytes -= s_state->nbytes;
return 0;
}
#define DELTA (200 * 1000 * 100)
int S_compute (void)
{
pctrval t0, t=0;
for (t0=rdtsc() ; t < t0 + DELTA ; t=rdtsc());
return 0;
}
int dispatch (int unused0, int meth_num, int optional, int unused, uint caller)
{
switch (meth_num) {
case INIT: return (S_init (optional));
case PUSH: return (S_push ());
case POP: return (S_pop ());
case EMPTY: return (S_empty());
case COMPUTE: return (S_compute());
}
return -1;
}
#else // meaning IPC is turned off
int S_empty (void)
{
return 0;
}
int S_init (int pte)
{
if (pte == 0)
return 0;
s_state = (arg_t *) malloc (sizeof(arg_t));
if (s_state == NULL || sys_self_insert_pte (0, pte, (uint) s_state) != 0) {
printf ("SERVER: Error in S_init\n");
return -1;
}
p_state->nbytes=0;
initialized = 1;
return 0;
}
int S_push (void)
{
if (initialized == 0 || p_state->nbytes + s_state->nbytes > MAX_LEN) {
printf ("SERVER: Error in S_push\n");
return -1;
}
memcpy (p_state->buf + p_state->nbytes, s_state->buf, s_state->nbytes);
p_state->nbytes += s_state->nbytes;
return 0;
}
int S_pop (void)
{
if (initialized == 0 || p_state->nbytes - s_state->nbytes < 0) {
printf ("SERVER: Error in S_pop\n");
return -1;
}
memcpy (p_state->buf + p_state->nbytes - 1, s_state->buf, s_state->nbytes);
p_state->nbytes -= s_state->nbytes;
return 0;
}
#define DELTA (200 * 1000 * 100)
int S_compute (void)
{
pctrval t0, t=0;
for (t0=rdtsc() ; t < t0 + DELTA ; t=rdtsc());
return 0;
}
int dispatch (int unused0, int meth_num, int optional, int unused, uint caller)
{
switch (meth_num) {
case INIT: return (S_init (optional));
case PUSH: return (S_push ());
case POP: return (S_pop ());
case EMPTY: return (S_empty());
case COMPUTE: return (S_compute());
}
return -1;
}
#endif // IPC
|
/// Attempts to generate a `UserId` for the given origin server with a localpart consisting of
/// 12 random ASCII characters.
///
/// Fails if the given homeserver cannot be parsed as a valid host.
pub fn new(homeserver_host: &str) -> Result<Self, Error> {
let user_id = format!(
"@{}:{}",
generate_localpart(12).to_lowercase(),
homeserver_host
);
let (localpart, host, port) = parse_id('@', &user_id)?;
Ok(Self {
hostname: host,
localpart: localpart.to_string(),
port,
is_historical: false,
})
} |
/*
* This function is meant explicitly to grab the definition string from an already generated query
*/
static inline int
extract_defstring(const char * restrict sql, char * restrict defstr) {
register char *i, *j, *defstart;
register int_fast8_t retc;
i = NULL; j = NULL; defstart = NULL;
retc = 0;
if (dbg) {
NOMDBG("Entering with sql = %s, defstr = %p\n", sql, (void *)defstr);
}
if (sql == NULL || defstr == NULL) {
NOMERR("%s","This should not be possible, passed invalid arguments!");
return(NOM_INVALID);
}
for (i = (char *)&sql[33], defstart = defstr; *i != 0; i++) {
if (*i == ',') {
j = i+2;
i = j;
} else if (j != NULL) {
if (*i == 0x27) { break; }
*defstr = *i; defstr++;
retc++;
}
}
*defstr = 0;
defstr = defstart;
if (retc > 0 && dbg) {
NOMDBG("Copied %d characters: %s\n", retc, defstart);
} else {
NOMINF("Extracted definition as: %s\n", defstr);
retc ^= retc;
}
return(retc);
} |
Practitioner review: schizophrenia spectrum disorders and the at-risk mental state for psychosis in children and adolescents--evidence-based management approaches.
BACKGROUND
Schizophrenia spectrum disorders are severe mental illnesses which often result in significant distress and disability. Attempts have been made to prospectively identify and treat young people viewed as at high risk of impending nonaffective psychosis. Once a schizophrenia spectrum disorder has developed, prompt identification and management is required.
METHODS
This article reviews the literature relating to the assessment and management of 'at-risk mental states' (ARMS) and the treatment of schizophrenia spectrum disorders in children and adolescents. A systematic search of the literature was undertaken using EMBASE, MEDLINE, PsycINFO databases for the period January 1970-December 2012.
RESULTS
Evidence suggests that young people fulfilling the ARMS criteria are at high risk of adverse mental health outcomes but that the majority do not develop nonaffective psychosis over the medium term. Although clinical trial findings have been inconsistent, psychosocial approaches, such as cognitive behaviour therapy, may reduce the risk of transition to psychosis and improve some symptoms, at least over the short term. The effectiveness of psychotropic medication for the ARMS is uncertain although there is accumulating evidence for potential adverse effects of antipsychotic medication, even at low dose, in this population. For the schizophrenias, clinical trial findings suggest that, as in adults, antipsychotics should be selected on the basis of side-effect profile although clozapine may be helpful in treatment refractory illness. There are almost no studies of psychosocial treatments for schizophrenia in young people under 18, and some caution must be exercised when extrapolating the findings of adult studies to younger individuals.
CONCLUSIONS
A stepped care approach to the ARMS in young people represents a plausible potential management approach for those at high risk of serious mental health problems. However, predictive models currently lack precision and should focus on accurately identifying those at high risk for a variety of poor outcomes who may benefit most from intervention. There is also an urgent need for age-specific research in the area of psychosocial treatments for children and adolescents with schizophrenia. |
It would be important to evaluate whether this variable remain significantly associated with adenoma in multivariate analyze. Based on the results showed in Table 1, 367 patients had chronic active gastritis, of those 305 were H.pylori positive and 62 were H.pylori negative. It will be interesting to analyze specifically the association of H.pylori-positive chronic active gastritis with colonic lesions. 4-References The references are relevant to the article, however it should be updated since recently it has been published several papers including meta-analysis about the |
/**
* Called into by the commands infrastructure.
*/
void execCommandClient(OperationContext* opCtx,
Command* c,
int queryOptions,
StringData dbname,
BSONObj& cmdObj,
BSONObjBuilder& result) {
ON_BLOCK_EXIT([opCtx, &result] { appendRequiredFieldsToResponse(opCtx, &result); });
dassert(dbname == nsToDatabase(dbname));
StringMap<int> topLevelFields;
for (auto&& element : cmdObj) {
StringData fieldName = element.fieldNameStringData();
if (fieldName == "help" && element.type() == Bool && element.Bool()) {
std::stringstream help;
help << "help for: " << c->getName() << " ";
c->help(help);
result.append("help", help.str());
Command::appendCommandStatus(result, true, "");
return;
}
uassert(ErrorCodes::FailedToParse,
str::stream() << "Parsed command object contains duplicate top level key: "
<< fieldName,
topLevelFields[fieldName]++ == 0);
}
Status status = Command::checkAuthorization(c, opCtx, dbname.toString(), cmdObj);
if (!status.isOK()) {
Command::appendCommandStatus(result, status);
return;
}
c->_commandsExecuted.increment();
if (c->shouldAffectCommandCounter()) {
globalOpCounters.gotCommand();
}
StatusWith<WriteConcernOptions> wcResult =
WriteConcernOptions::extractWCFromCommand(cmdObj, dbname.toString());
if (!wcResult.isOK()) {
Command::appendCommandStatus(result, wcResult.getStatus());
return;
}
bool supportsWriteConcern = c->supportsWriteConcern(cmdObj);
if (!supportsWriteConcern && !wcResult.getValue().usedDefault) {
Command::appendCommandStatus(
result, Status(ErrorCodes::InvalidOptions, "Command does not support writeConcern"));
return;
}
rpc::TrackingMetadata trackingMetadata;
trackingMetadata.initWithOperName(c->getName());
rpc::TrackingMetadata::get(opCtx) = trackingMetadata;
auto metadataStatus = processCommandMetadata(opCtx, cmdObj);
if (!metadataStatus.isOK()) {
Command::appendCommandStatus(result, metadataStatus);
return;
}
std::string errmsg;
bool ok = false;
try {
if (!supportsWriteConcern) {
ok = c->run(opCtx, dbname.toString(), cmdObj, errmsg, result);
} else {
const auto oldWC = opCtx->getWriteConcern();
ON_BLOCK_EXIT([&] { opCtx->setWriteConcern(oldWC); });
opCtx->setWriteConcern(wcResult.getValue());
ok = c->run(opCtx, dbname.toString(), cmdObj, errmsg, result);
}
} catch (const DBException& e) {
result.resetToEmpty();
const int code = e.getCode();
if (code == ErrorCodes::RecvStaleConfig || code == ErrorCodes::SendStaleConfig) {
throw;
}
errmsg = e.what();
result.append("code", code);
}
if (!ok) {
c->_commandsFailed.increment();
}
Command::appendCommandStatus(result, ok, errmsg);
} |
/**
* The type TL abs input photo.
*/
public abstract class TLAbsInputPhoto extends TLObject {
/**
* Instantiates a new TL abs input photo.
*/
protected TLAbsInputPhoto() {
super();
}
} |
import math
def solve(a,b,c,d):
x = math.ceil(c / b)
y = math.ceil(a / d)
return "Yes" if x <= y else "No"
a,b,c,d = map(int, input().split())
ans = solve(a,b,c,d)
print(ans) |
package org.doremus.diaboloConverter.musResource;
import org.apache.jena.vocabulary.RDF;
import org.doremus.diaboloConverter.files.DiaboloRecord;
import org.doremus.ontology.CIDOC;
import org.doremus.ontology.FRBROO;
public class F24_PublicationExpression extends DoremusResource {
public F24_PublicationExpression(DiaboloRecord record) {
super(record);
this.resource.addProperty(RDF.type, FRBROO.F24_Publication_Expression);
}
public F24_PublicationExpression add(F22_SelfContainedExpression expression) {
this.resource.addProperty(CIDOC.P165_incorporates, expression.asResource());
return this;
}
}
|
// Deleted return the dxfile status of whether it is deleted
func (df *DxFile) Deleted() bool {
df.lock.RLock()
defer df.lock.RUnlock()
return df.deleted
} |
<reponame>Commercionetwork/Commercio.network
package cli_test
import (
"testing"
clitestutil "github.com/cosmos/cosmos-sdk/testutil/cli"
"github.com/stretchr/testify/require"
"github.com/commercionetwork/commercionetwork/x/documents/client/cli"
"github.com/commercionetwork/commercionetwork/x/documents/types"
)
func testCmdShowDocument(t *testing.T) {
for _, tt := range []struct {
name string
args []string
expected *types.Document
wantErr bool
}{
{
name: "ok",
args: []string{types.ValidDocument.UUID},
expected: &types.ValidDocument,
},
{
name: "document not in store",
args: []string{types.ValidDocumentReceiptRecipient1.UUID},
wantErr: true,
},
{
name: "no args",
args: []string{},
wantErr: true,
},
} {
t.Run(tt.name, func(t *testing.T) {
out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdShowDocument(), tt.args)
if tt.wantErr {
require.Error(t, err)
} else {
require.NoError(t, err)
var response types.QueryGetDocumentResponse
require.NoError(t, ctx.JSONMarshaler.UnmarshalJSON(out.Bytes(), &response))
require.Equal(t, tt.expected, response.Document)
}
})
}
}
func testCmdSentDocuments(t *testing.T) {
for _, tt := range []struct {
name string
args []string
flags []string
expected []*types.Document
wantErr bool
}{
{
name: "ok",
args: []string{types.ValidDocument.Sender},
expected: documentsGenesisState.Documents,
},
{
name: "invalid address",
args: []string{"abc"},
wantErr: true,
},
{
name: "no documents expected",
args: []string{types.ValidDocumentReceiptRecipient1.Sender},
},
// unknown flag: --page
// missing AddPaginationFlagsToCmd in CmdSentDocuments
// {
// name: "invalid pagination flags",
// args: []string{types.ValidDocumentReceiptRecipient1.Sender},
// flags: []string{
// fmt.Sprintf("--%s=2", flags.FlagPage),
// fmt.Sprintf("--%s=true", flags.FlagOffset),
// },
// },
{
name: "no args",
args: []string{},
wantErr: true,
},
} {
t.Run(tt.name, func(t *testing.T) {
commandArgs := append(tt.args, tt.flags...)
out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdSentDocuments(), commandArgs)
if tt.wantErr {
require.Error(t, err)
} else {
require.NoError(t, err)
var response types.QueryGetSentDocumentsResponse
require.NoError(t, ctx.JSONMarshaler.UnmarshalJSON(out.Bytes(), &response))
require.ElementsMatch(t, tt.expected, response.Document)
}
})
}
}
func testCmdReceivedDocuments(t *testing.T) {
for _, tt := range []struct {
name string
args []string
flags []string
expected []*types.Document
wantErr bool
}{
{
name: "ok",
args: []string{types.ValidDocumentReceiptRecipient1.Sender},
expected: documentsGenesisState.Documents,
},
{
name: "invalid address",
args: []string{"abc"},
wantErr: true,
},
{
name: "no documents expected",
args: []string{types.ValidDocument.Sender},
},
// unknown flag: --page
// missing AddPaginationFlagsToCmd in CmdReceivedDocuments
// {
// name: "invalid pagination flags",
// args: []string{types.ValidDocumentReceiptRecipient1.Sender},
// flags: []string{
// fmt.Sprintf("--%s=2", flags.FlagPage),
// fmt.Sprintf("--%s=true", flags.FlagOffset),
// },
// },
{
name: "no args",
args: []string{},
wantErr: true,
},
} {
t.Run(tt.name, func(t *testing.T) {
commandArgs := append(tt.args, tt.flags...)
out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdReceivedDocuments(), commandArgs)
if tt.wantErr {
require.Error(t, err)
} else {
require.NoError(t, err)
var response types.QueryGetReceivedDocumentResponse
require.NoError(t, ctx.JSONMarshaler.UnmarshalJSON(out.Bytes(), &response))
require.ElementsMatch(t, tt.expected, response.ReceivedDocument)
}
})
}
}
|
#include<bits/stdc++.h>
using namespace std;
int fr;
string st;
int hh, mm;
int main(){
cin>>fr;
cin>>st;
hh = 10*(st[0]-'0')+(st[1]-'0');
mm = 10*(st[3]-'0')+(st[4]-'0');
if(mm>59){
st[3]='0';
}
if(fr==24){
if(hh>23){
st[0]='0';
}
}
else{
if(hh==0){
st[0]='1';
}
else if(hh>12){
if(st[1]!='0'){
st[0]='0';
}else st[0]='1';
}
}
cout<<st<<endl;
}
|
<reponame>pietelite/SpongeAPI<filename>src/main/java/org/spongepowered/api/state/StateMatcher.java
/*
* This file is part of SpongeAPI, licensed under the MIT License (MIT).
*
* Copyright (c) SpongePowered <https://www.spongepowered.org>
* Copyright (c) contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package org.spongepowered.api.state;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.spongepowered.api.Sponge;
import org.spongepowered.api.block.BlockState;
import org.spongepowered.api.block.BlockType;
import org.spongepowered.api.data.KeyValueMatcher;
import org.spongepowered.api.fluid.FluidState;
import org.spongepowered.api.fluid.FluidType;
import org.spongepowered.api.util.CopyableBuilder;
import java.util.List;
import java.util.function.Predicate;
import java.util.function.Supplier;
/**
* A {@link StateMatcher} that will match various {@link State}s
* according to a pre-built list of {@link StateProperty}s and their
* values, such that not all {@link StateProperty}s contained in a
* {@link State} must be matched.
*/
public interface StateMatcher<S extends State<S>> extends Predicate<S> {
/**
* Creates a new {@link Builder} for matching {@link BlockState}s.
*
* @return The builder
*/
static Builder<BlockState, BlockType> blockStateMatcherBuilder() {
return Sponge.game().factoryProvider().provide(StateMatcher.Factory.class).blockStateMatcherBuilder();
}
/**
* Creates a new {@link Builder} for matching {@link FluidState}s.
*
* @return The builder
*/
static Builder<FluidState, FluidType> fluidStateMatcherBuilder() {
return Sponge.game().factoryProvider().provide(StateMatcher.Factory.class).fluidStateMatcherBuilder();
}
/**
* Gets a {@code true} return value if the provided {@link State}
* sufficiently matches the pre-defined {@link StateProperty} values
* and {@link KeyValueMatcher}s.
*
* @param state The state in question
* @return True if the state sufficiently matches
*/
boolean matches(S state);
/**
* Gets a {@link List} of compatible {@link State}s.
*
* @return The list of compatible states
*/
List<S> compatibleStates();
@Override
default boolean test(final S state) {
return this.matches(state);
}
/**
* Factories for generating builders.
*/
interface Factory {
/**
* Gets a {@link Builder} for {@link BlockState} matching.
*
* @return The builder
*/
Builder<BlockState, BlockType> blockStateMatcherBuilder();
/**
* Gets a {@link Builder} for {@link FluidState} matching.
*
* @return The builder
*/
Builder<FluidState, FluidType> fluidStateMatcherBuilder();
}
/**
* A builder for {@link StateMatcher}s.
*/
interface Builder<S extends State<S>, T extends StateContainer<S>> extends org.spongepowered.api.util.Builder<StateMatcher<S>, Builder<S, T>>,
CopyableBuilder<StateMatcher<S>, Builder<S, T>> {
/**
* Sets the root {@link StateContainer} for the {@link StateMatcher}.
*
* @param type The {@link StateContainer} to use
* @return This builder, for chaining
*/
default Builder<S, T> type(final Supplier<? extends T> type) {
return this.type(type.get());
}
/**
* Sets the root {@link StateContainer} for the {@link StateMatcher}.
*
* @param type The {@link StateContainer} to use
* @return This builder, for chaining
*/
Builder<S, T> type(T type);
/**
* Adds a {@link StateProperty} that needs to be present
* on a {@link State} to match.
*
* <p>{@link #type(StateContainer)} or {@link #type(Supplier)}
* <strong>must</strong> be called before this is called as supported
* {@link StateProperty state properties} are specific to the type</p>
*
* @param stateProperty The state property
* @return This builder, for chaining
*/
Builder<S, T> supportsStateProperty(StateProperty<@NonNull ?> stateProperty);
/**
* Adds a {@link StateProperty} that needs to be present
* on a {@link State} to match.
*
* <p>{@link #type(StateContainer)} or {@link #type(Supplier)}
* <strong>must</strong> be called before this is called as supported
* {@link StateProperty state properties} are specific to the type</p>
*
* @param stateProperty The state property
* @return This builder, for chaining
*/
default Builder<S, T> supportsStateProperty(final Supplier<? extends StateProperty<@NonNull ?>> stateProperty) {
return this.supportsStateProperty(stateProperty.get());
}
/**
* Adds a {@link StateProperty} and value that needs to
* match on a {@link State} to match.
*
* <p>{@link #type(StateContainer)} or {@link #type(Supplier)}
* <strong>must</strong> be called before this is called as supported
* {@link StateProperty state properties} are specific to the type</p>
*
* @param stateProperty The state property
* @param value The value to match
* @param <V> The value type
* @return This builder, for chaining
*/
<V extends Comparable<V>> Builder<S, T> stateProperty(StateProperty<V> stateProperty, V value);
/**
* Adds a {@link StateProperty} and value that needs to
* match on a {@link State} to match.
*
* @param stateProperty The state property
* @param value The value to match
* @param <V> The value type
* @return This builder, for chaining
*/
default <V extends Comparable<V>> Builder<S, T> stateProperty(final Supplier<? extends StateProperty<V>> stateProperty, final V value) {
return this.stateProperty(stateProperty.get(), value);
}
/**
* Adds a {@link KeyValueMatcher} that the {@link State}
* needs to match with.
*
* @param matcher The matcher
* @return This builder, for chaining
*/
Builder<S, T> matcher(KeyValueMatcher<?> matcher);
/**
* Builds a {@link StateMatcher}.
*
* @return The built state matcher
*/
@Override
StateMatcher<S> build();
}
}
|
On August 22, Sixpoint Brewery announced that it hired Eric Bachli as the chief product officer. Bachli was previously the head brewer at Trillium Brewing Company, where he scaled production and helped turn Trillium into a destination brewery known for big, hazy IPAs and special releases.
In the press release announcing the hire, Sixpoint stated that Bachli and the other new hires for the product development team will work on Sixpoint’s current beers, “devise new limited beers, and will also imminently launch a new wave of small batch, innovative beers starting this fall through a first-of-its kind sales channel.”
Which made many in the beer community to wonder: Will Sixpoint build their own brewery with the help of Bachli? Is a Sixpoint taproom finally in the works? And, perhaps most pressingly for a certain subset of beer lovers, does this mean Sixpoint will come out with a New England style IPA similar to what Trillium puts out?
To the last point, BeerAdvocate user MJS08 writes, “if they try the haze craze, I’ll play.” But Sixpoint only vaguely addressed questions with the answers of “plenty of new innovations coming,” and “lots more updates to follow.”
VinePair caught up with Bachli about the new hire to learn more about his motivations for joining Sixpoint and what’s next for the brewery.
What inspired the move from Trillium to Sixpoint?
Sixpoint is one of the breweries that inspired me to turn professional. I had been a home brewer for quite some time and always dreamed about taking it to the next level. While working part time at Craft Beer Cellar in 2012 I had the opportunity to meet many brewers, Shane Welch being one of them. The encouragement from those interactions motivated me to work even harder and learn as much as possible about brewing. So naturally, this year when Shane reached out to me I was totally tuned in on hearing about the opportunity he had at Sixpoint. I think it’s fascinating how people’s paths converge in life, and after meeting with Shane and the Sixpoint team I knew this was a great fit for me. From a creative, technical, and technological perspective it is a brewer’s dream job and working with Shane Welch, Horace Cunningham and the Sixpoint crew with all their collective passion, experience and knowledge is the icing on the cake.
I’m sure things are busy getting ready to be one of the first members of a new brewery division, but what’s the first thing you want to accomplish when you official join in November?
We have a lot of projects on the horizon that we’ll be announcing soon. On day one I’ll be jumping on recipe development for a new line of small batch innovations. We’re changing the frequency of the beers we’re going to release, changing the way we release them, and putting the incredible supply chain Sixpoint has developed over the past few years (especially some incredible hops) to its best possible use. The stuff coming out of here is going to blow some minds.
What are some of the things from your past experience you’re going to bring to Sixpoint?
I spent the better part of a decade as an exploratory scientist before becoming a brewer. I’ve always been a scientist at heart, and that experience has been complementary to my brewing career from a quality, process, and creativity perspective. I’ve also had the opportunity to work with some amazing brewers and breweries, learning different perspectives on processes and techniques over the years. At Trillium I was given the opportunity to build a production brewery including scale-up of recipes, development of brewing and cellar processes, implementing record keeping and standard operating procedures, and building and managing a production team. This is all experience and perspective I will definitely bring to the table at Sixpoint.
We all need quality control, but the whole “devise new limited beers” part of the Product Development team is the most eye catching. Any insight you can give on what beers will be coming out and when?
We’ve got a few announcements coming up, so keep your eyes peeled on the Instagram page. We’re going to start putting out lots of new formulas. I’ve already been talking to the team over there and the creativity is flowing.
More to the point: Is a New England IPA coming to Sixpoint?
They already have Puff, which is hazy, but not quite full-on New England style. We’re going to focus on new, innovative beers, but we’re absolutely going to explore the techniques both Sixpoint and I have been using to make the best possible beers. The sky’s the limit here! Stay tuned.
Will there be any changes to Sixpoint’s current lineup of beers?
There might be tweaks, but there always have been — that’s Shane’s team’s whole approach. I’m going to comb over every recipe with them and talk about how to improve them, just like those guys do every day. We just went through a reformulation of the year-round beers. You can check out the details here.
You oversaw a huge amount of growth at Trillium. Do you see that type of expansion happening in the future for Sixpoint?
We’re focused on making awesome beers and creating an awesome customer experience, not any aggressive expansion plan. When we brew the beer we’re capable of, any growth will come naturally and organically as a result.
What are your top five favorite beers currently at Sixpoint, and do you think that’s going to change in the near future with a new release?
I am a big fan of sour beers and was blown away by Lil’ Raspy, Galacto, and Jammer. I’m also a big fan of Resin and Puff. We’ve got some exciting new additions that will be brewed on a limited and small batch release scale later this year and early into next year and I’m sure I’ll have more to add to my favorites list!
Sixpoint keeps hyping several announcements to come, your hire being an important piece of the puzzle. Any insight you can give on what’s next?
Give it two weeks. You’ll see. |
/** Condition that applies if the field is true */
public class FieldIsTrueCondition extends AbstractBooleanFieldCondition {
FieldIsTrueCondition(String field) {
super(field);
}
@Override
protected boolean applies(@Nullable Boolean bool) {
return Boolean.TRUE.equals(bool);
}
@Override
public String getConditionCode() {
return "constraint.condition.FieldIsTrue";
}
} |
import java.util.*;
public class temp {
public static void main(String sdf[]){
Scanner s = new Scanner(System.in);
int t;
t = s.nextInt();
int a[] = new int[t];
int arr[];
String ans[] = new String[t];
for(int i=0;i<t;i++){
a[i] = s.nextInt();
arr = new int[a[i]];
for(int k=0;k<a[i];k++)
arr[k] = s.nextInt();
int j;
for( j=0;j<a[i]-1;j++){
if(Math.abs(arr[j]-arr[j+1])==a[i]-1 || Math.abs(arr[j]-arr[j+1])==1 ){
continue;
}
else{
ans[i]="NO";
break;
}
}
if(j==a[i]-1)
ans[i]="YES";
}
for(int i=0;i<t;i++)
System.out.println(ans[i]);
}
} |
/* --------------------------------------------------------------------
dont_learn
Hack for learning. Allow user to denote states in which learning
shouldn't occur when "learning" is set to "except".
-------------------------------------------------------------------- */
Symbol *dont_learn_rhs_function_code (agent* thisAgent, list *args, void* ) {
Symbol *state;
if (!args) {
print (thisAgent, "Error: 'dont-learn' function called with no arg.\n");
return NIL;
}
state = static_cast<Symbol *>(args->first);
if (state->common.symbol_type != IDENTIFIER_SYMBOL_TYPE) {
print_with_symbols (thisAgent, "Error: non-identifier (%y) passed to dont-learn function.\n", state);
return NIL;
} else if (! state->id.isa_goal) {
print_with_symbols(thisAgent, "Error: identifier passed to dont-learn is not a state: %y.\n",state);
}
if (args->rest) {
print (thisAgent, "Error: 'dont-learn' takes exactly 1 argument.\n");
return NIL;
}
if (! member_of_list (state, thisAgent->chunk_free_problem_spaces)) {
push(thisAgent, state, thisAgent->chunk_free_problem_spaces);
}
return NIL;
} |
/*!
* brief Initializes the LCDIF v2.
*
* This function ungates the LCDIF v2 clock and release the peripheral reset.
*
* param base LCDIF v2 peripheral base address.
*/
void LCDIFV2_Init(LCDIFV2_Type *base)
{
#if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
uint32_t instance = LCDIFV2_GetInstance(base);
CLOCK_EnableClock(s_lcdifv2Clocks[instance]);
#endif
LCDIFV2_ResetRegister(base);
base->CTRL = 0U;
} |
<gh_stars>0
import React from 'react';
import { inject, observer } from 'mobx-react';
import { withStyles, CircularProgress } from '@material-ui/core';
import styles from 'assets/jss/Users';
import Users from 'components/Users';
import { User, UsersContainerProps, UsersContainerState } from 'models/Users';
@inject('usersStore')
@observer
class UsersContainer extends React.Component<
UsersContainerProps,
Readonly<UsersContainerState>
> {
constructor(props: UsersContainerProps) {
super(props);
this.handleSort = this.handleSort.bind(this);
this.state = { order: 'asc', orderBy: 'id' };
}
componentDidMount(): void {
const { usersStore } = this.props;
usersStore.getAll();
}
handleSort(property: keyof User): void {
const { orderBy, order } = this.state;
const isAsc = orderBy === property && order === 'asc';
this.setState({
order: isAsc ? 'desc' : 'asc',
orderBy: property,
});
}
render(): JSX.Element {
const { usersStore, classes } = this.props;
const { order, orderBy } = this.state;
const { handleSort } = this;
if (usersStore.inProgress) {
return (
<CircularProgress size={80} className={classes.circularProgress} />
);
}
return (
<Users
users={usersStore.allUsers}
classes={classes}
order={order}
orderBy={orderBy}
handleSort={handleSort}
/>
);
}
}
export default withStyles(styles)(UsersContainer);
|
/**
* <code>checkBeforeSend</code> è un metodo che verifica gli eventi avversi inseriti dall'utente.
* <br>È dichiarato <strong>private</strong> in quanto metodo utilizzato solo all'interno della classe
*/
private void checkBeforeSend() {
clearLists();
for (EventoAvversoPerLista elem : listEvento) {
if (elem.getValoreSeverita() > 0) {
if (!elem.getNota().isEmpty()) {
boolean res = firstLetter(elem.getNota());
if (res)
listErrorEvento.add(elem);
else
listApproveEvento.add(elem);
} else
listApproveEvento.add(elem);
} else
listEventoNonToccato.add(elem);
}
} |
// This test exists because looping over pointers as done when cleaning up expired/deposed
// resources (and deposed secrets) can lead to surprising behaviors. The test below ensures
// that things are working as intended.
func TestProcessCleanup(t *testing.T) {
tests := []struct {
description string
config string
resources []*sidecred.Resource
secrets []*sidecred.Secret
expectedDestroyCalls int
}{
{
description: "cleanup works",
config: strings.TrimSpace(`
---
version: 1
namespace: team-name
stores:
- type: inprocess
`),
resources: []*sidecred.Resource{
{
Type: sidecred.Randomized,
ID: "r1",
Expiration: time.Now(),
},
{
Type: sidecred.Randomized,
ID: "r2",
Expiration: time.Now(),
},
{
Type: sidecred.Randomized,
ID: "r3",
Expiration: time.Now(),
},
},
secrets: []*sidecred.Secret{
{
ResourceID: "r1",
Path: "path1",
Expiration: time.Now(),
},
{
ResourceID: "r1",
Path: "path2",
Expiration: time.Now(),
},
{
ResourceID: "r2",
Path: "path3",
Expiration: time.Now(),
},
},
expectedDestroyCalls: 3,
},
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
var (
store = inprocess.New()
state = sidecred.NewState()
provider = &fakeProvider{}
logger = zaptest.NewLogger(t)
)
for _, r := range tc.resources {
state.AddResource(r)
}
for _, s := range tc.secrets {
state.AddSecret(&sidecred.StoreConfig{Type: store.Type()}, s)
}
s, err := sidecred.New([]sidecred.Provider{provider}, []sidecred.SecretStore{store}, 10*time.Minute, logger)
require.NoError(t, err)
cfg, err := config.Parse([]byte(tc.config))
require.NoError(t, err)
err = s.Process(cfg, state)
require.NoError(t, err)
assert.Equal(t, tc.expectedDestroyCalls, provider.DestroyCallCount(), "destroy calls")
for _, p := range state.Providers {
if !assert.Equal(t, 0, len(p.Resources)) {
for _, s := range p.Resources {
assert.Nil(t, s)
}
}
}
for _, p := range state.Stores {
if !assert.Equal(t, 0, len(p.Secrets)) {
for _, s := range p.Secrets {
assert.Nil(t, s)
}
}
}
})
}
} |
<filename>api/src/main/java/de/adesso/objectfieldcoverage/api/filter/AssignmentsRightHandSideTypeFilter.java
package de.adesso.objectfieldcoverage.api.filter;
import spoon.reflect.code.CtAssignment;
import spoon.reflect.code.CtExpression;
import spoon.reflect.visitor.filter.TypeFilter;
/**
* A {@link TypeFilter} which filters for {@link spoon.reflect.code.CtAssignment}s which used a specific expression
* on the right hand side of the assignment.
*/
public class AssignmentsRightHandSideTypeFilter<T> extends TypeFilter<CtAssignment<T, ? extends T>> {
/**
* The {@link CtExpression} which must match the right-hand-side of an {@link CtAssignment}.
*/
private CtExpression<?> rightHandSideExpression;
/**
*
* @param rightHandSideExpression
* The expression
*/
public AssignmentsRightHandSideTypeFilter(CtExpression<T> rightHandSideExpression) {
super(CtAssignment.class);
this.rightHandSideExpression = rightHandSideExpression;
}
/**
*
* @param assignment
* The {@link CtAssignment} to check, not {@code null}.
*
* @return
* {@code true}, if the given {@code assignments} right-hand-side is equal to the configured
* expression. {@code false} is returned otherwise.
*/
@Override
public boolean matches(CtAssignment<T, ? extends T> assignment) {
if(!super.matches(assignment)) {
return false;
}
return rightHandSideExpression.equals(assignment.getAssignment());
}
}
|
<filename>cli/commands/run/run.block.ts<gh_stars>0
import { assertExists } from '../../utils';
import { RunHandlersOnBlock } from '../../utils/run.handlers.on.block';
// runs agent handlers against specified block number/hash
export type RunBlock = (blockNumberOrHash: string) => Promise<void>
export function provideRunBlock(
runHandlersOnBlock: RunHandlersOnBlock,
): RunBlock {
assertExists(runHandlersOnBlock, 'runHandlersOnBlock')
return async function runBlock(blockNumberOrHash: string) {
await runHandlersOnBlock(blockNumberOrHash)
}
} |
<filename>streamlit_webrtc/frontend/src/ThemeProvider.tsx
import React from "react";
import { Theme } from "streamlit-component-lib";
import {
createTheme,
ThemeProvider as MuiThemeProvider,
} from "@material-ui/core/styles";
import chroma from "chroma-js";
interface StreamlitThemeProviderProps {
theme: Theme | undefined;
}
export const ThemeProvider: React.VFC<
React.PropsWithChildren<StreamlitThemeProviderProps>
> = (props) => {
const stTheme = props.theme;
const muiTheme = React.useMemo(() => {
if (stTheme == null) {
return undefined;
}
const textColorScale = chroma
.scale([stTheme.textColor, stTheme.backgroundColor])
.mode("lab");
return createTheme({
palette: {
primary: {
main: stTheme.primaryColor,
},
background: {
default: stTheme.backgroundColor,
paper: stTheme.secondaryBackgroundColor,
},
text: {
primary: stTheme.textColor,
secondary: textColorScale(0.1).hex(),
disabled: textColorScale(0.5).hex(),
},
},
typography: {
fontFamily: stTheme.font,
},
});
}, [stTheme]);
if (muiTheme == null) {
return <>{props.children}</>;
}
return <MuiThemeProvider theme={muiTheme}>{props.children}</MuiThemeProvider>;
};
export default ThemeProvider;
|
def evaluate(self,
x: np.ndarray,
y: np.ndarray,
batch_size: int = None,
verbose: int = 1,
prefix: str = "") -> dict:
x, y = self.__check_data(x, y)
batch_size = batch_size or len(x)
self.__update_io_shapes(batch_size)
avg_loss = 0.0
avg_metrics = {prefix + metric: 0.0 for metric in self.metrics}
n_batches = len(x) // batch_size
for i in range(0, n_batches * batch_size, batch_size):
y_pred = x[i:i + batch_size]
y_batch = y[i:i + batch_size]
y_pred = self.predict(y_pred)
loss = self.loss.compute_loss(y_batch, y_pred)
metrics = {prefix + metric: self.metrics[metric](y_batch, y_pred) for metric in self.metrics}
avg_loss += 1/(i + 1) * (loss - avg_loss)
for metric in self.metrics:
avg_metrics[prefix + metric] += 1/(i + 1) * (metrics[prefix + metric] - avg_metrics[prefix + metric])
if verbose == 2 and batch_size != len(x):
print(f"Batch ({i // batch_size + 1}/{n_batches}) - loss: {loss}")
print(f"\t{', '.join([prefix + f'{metric}: {metrics[prefix + metric]:.4f}' for metric in self.metrics])}")
if verbose > 0:
print(prefix + f"loss: {avg_loss}")
print(f"{', '.join([prefix + f'{metric}: {avg_metrics[prefix + metric]:.4f}' for metric in self.metrics])}")
return {prefix + 'loss': avg_loss, **avg_metrics} |
/**
* Factory to build an AndroidPlatformTarget that corresponds to a given Google API level.
*/
private static class AndroidWithGoogleApisFactory implements Factory {
@Override
public AndroidPlatformTarget newInstance(File androidSdkDir, int apiLevel) {
String addonPath = String.format("/add-ons/addon-google_apis-google-%d/libs/", apiLevel);
File addonDirectory = new File(androidSdkDir.getPath() + addonPath);
String[] addonFiles;
if (!addonDirectory.isDirectory() ||
(addonFiles = addonDirectory.list(new AddonFilter())) == null ||
addonFiles.length == 0) {
throw new HumanReadableException(
"Google APIs not found in %s.\n" +
"Please run '%s/tools/android sdk' and select both 'SDK Platform' and " +
"'Google APIs' under Android (API %d)",
addonDirectory.getAbsolutePath(),
androidSdkDir.getPath(),
apiLevel);
}
ImmutableSet.Builder<String> builder = ImmutableSet.builder();
Arrays.sort(addonFiles);
for (String filename : addonFiles) {
builder.add(addonPath + filename);
}
Set<String> additionalJarPaths = builder.build();
return createFromDefaultDirectoryStructure(
String.format("Google Inc.:Google APIs:%d", apiLevel),
androidSdkDir,
String.format("platforms/android-%d", apiLevel),
additionalJarPaths);
}
} |
// "by cc" in order to support sort
bool LiveConfigsWnd::SelectByCCValue(int _configId, int _cc, bool _selectOnly)
{
if (_configId == g_configId)
{
SWS_ListView* lv = GetListView();
HWND hList = lv ? lv->GetHWND() : NULL;
if (lv && hList)
{
for (int i=0; i < lv->GetListItemCount(); i++)
{
LiveConfigItem* item = (LiveConfigItem*)lv->GetListItem(i);
if (item && item->m_cc == _cc)
{
if (_selectOnly)
ListView_SetItemState(hList, -1, 0, LVIS_SELECTED);
ListView_SetItemState(hList, i, LVIS_SELECTED, LVIS_SELECTED);
ListView_EnsureVisible(hList, i, true);
return true;
}
}
}
}
return false;
} |
def _sibling_path(self, label):
return '//td[contains(., "%s:")]/following-sibling::td[1]' % (label,) |
#ifndef MKCREFLECT_TEST_H_
#define MKCREFLECT_TEST_H_
#include <string.h>
#include <stdio.h>
#ifdef __linux__
# define PRINT_COLOR
#endif
#ifdef PRINT_COLOR
# define COLOR_RED "\x1B[31m"
# define COLOR_GREEN "\x1B[32m"
# define COLOR_RESET "\033[0m"
#else
# define COLOR_RED
# define COLOR_GREEN
# define COLOR_RESET
#endif
#define PRINT_ERR(...) printf(COLOR_RED "Failure" COLOR_RESET __VA_ARGS__)
#define PRINT_OK(...) printf(COLOR_GREEN "Passed" COLOR_RESET __VA_ARGS__)
#define ASSERT_EQ_(expected, actual, cmp, print_op) do { \
if (!(cmp)) \
{ \
PRINT_ERR(" %s %d:\n * %s != %s\n * Expected: " print_op \
"\n * Actual: " print_op "\n", __FILE__, __LINE__, \
#expected, #actual, expected, actual); \
return 1; \
} \
PRINT_OK(" %s == %s\n", #expected, #actual); \
} while (0)
#define ASSERT_STREQ(expected, actual) ASSERT_EQ_(expected, actual, strcmp(expected, actual) == 0, "%s")
#define ASSERT_UEQ(expected, actual) ASSERT_EQ_(expected, actual, expected == actual, "%lu")
#define ASSERT_EQ(expected, actual) ASSERT_EQ_(expected, actual, expected == actual, "%d")
#define ASSERT_FUNC(FNC_CALL) do { \
if (FNC_CALL) { \
return 1; \
} \
} while (0)
#endif /* MKCREFLECT_TEST_H_ */
|
<filename>Week 08/id_090/LeetCode_387_090.go
package main
import "fmt"
func main() {
s := "abcaaaa"
fmt.Println(firstUniqChar(s))
}
func firstUniqChar(s string) int {
lens := len(s)
if lens <= 0 {
return -1
}
m := make(map[uint8]int, lens)
for i := range s {
m[s[i]]++
}
for i := range s {
if m[s[i]] == 1 {
return i
}
}
return -1
}
|
/**
* <p>Clase Java para SetAccountFeatureOptInResult complex type.
*
* <p>El siguiente fragmento de esquema especifica el contenido que se espera que haya en esta clase.
*
* <pre>
* <complexType name="SetAccountFeatureOptInResult">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element name="Status" type="{http://www.w3.org/2001/XMLSchema}int"/>
* <element name="ErrorMessage" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/>
* <element name="Success" type="{http://www.w3.org/2001/XMLSchema}boolean"/>
* <element name="Message" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/>
* <element name="TermsAndConditionsLink" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "SetAccountFeatureOptInResult", propOrder = {
"status",
"errorMessage",
"success",
"message",
"termsAndConditionsLink"
})
public class SetAccountFeatureOptInResult {
@XmlElement(name = "Status")
protected int status;
@XmlElement(name = "ErrorMessage")
protected String errorMessage;
@XmlElement(name = "Success")
protected boolean success;
@XmlElement(name = "Message")
protected String message;
@XmlElement(name = "TermsAndConditionsLink")
protected String termsAndConditionsLink;
/**
* Obtiene el valor de la propiedad status.
*
*/
public int getStatus() {
return status;
}
/**
* Define el valor de la propiedad status.
*
*/
public void setStatus(int value) {
this.status = value;
}
/**
* Obtiene el valor de la propiedad errorMessage.
*
* @return
* possible object is
* {@link String }
*
*/
public String getErrorMessage() {
return errorMessage;
}
/**
* Define el valor de la propiedad errorMessage.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setErrorMessage(String value) {
this.errorMessage = value;
}
/**
* Obtiene el valor de la propiedad success.
*
*/
public boolean isSuccess() {
return success;
}
/**
* Define el valor de la propiedad success.
*
*/
public void setSuccess(boolean value) {
this.success = value;
}
/**
* Obtiene el valor de la propiedad message.
*
* @return
* possible object is
* {@link String }
*
*/
public String getMessage() {
return message;
}
/**
* Define el valor de la propiedad message.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setMessage(String value) {
this.message = value;
}
/**
* Obtiene el valor de la propiedad termsAndConditionsLink.
*
* @return
* possible object is
* {@link String }
*
*/
public String getTermsAndConditionsLink() {
return termsAndConditionsLink;
}
/**
* Define el valor de la propiedad termsAndConditionsLink.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setTermsAndConditionsLink(String value) {
this.termsAndConditionsLink = value;
}
} |
US seeks to reassure Turkey that Kurdish troops will not be in a position to take over Sunni Arab city in Syria
Arab forces will lead the fight to recapture Raqqa from Islamic State, the US has said, as it seeks to soothe Turkish concerns that Kurdish troops could take over the predominantly Sunni Arab city in Syria.
Ankara, which wants to prioritise the removal of the US-backed Kurdish YPG forces from northern Syria, has reiterated its hostility towards the involvement of Kurds in the Syrian Democratic Forces, the coalition of Kurdish and Arab fighters that has been on the offensive in the area.
Turkey views the YPG (Kurdish People’s Defence Units) as an arm of the PKK, which has been waging a decades-long insurgency in the south-east of Turkey. It wants to ensure that the YPG cannot knit Kurdish enclaves in northern Syria into a de facto state that could strengthen the PKK.
In an effort to manage the deepening conflict between its two warring allies, and reassure the Turks, the chairman of the US Joint Chiefs of Staff, Joseph Dunford, said he did not think the YPG would be involved in the direct capture of the town, but only in its isolation, a process that he said may take months.
He also suggested Turkish views would be taken into account before any final assault, which in any case is not likely to occur for many months.
“The coalition and Turkey will work together on the long-term plan for seizing, holding and governing Raqqa,” Dunford said.
The SDF has been on the offensive in the area since late Saturday, aiming to initially isolate and encircle Raqqa, which has been under Isis control since 2014. The US-led coalition has said it will provide air support, signalling at the minimum that the US wants to put simultaneous pressure on the extremists in Raqqa and in their other urban stronghold of Mosul, in Iraq.
Facebook Twitter Pinterest Joseph Dunford, chairman of the US Joint Chiefs of Staff. Photograph: Cliff Owen/AP
“The SDF is making sure the IS forces that are in Mosul cannot reinforce the IS forces in Raqqa, and that the force in Raqqa cannot conduct external operations into Turkey, into Europe and into the United States,” Dunford said.
He added: “We always knew the SDF wasn’t the solution for holding and governing Raqqa. What we are working on right now is to find the right mix of forces for the operation.”
The right mix is for local tribes and other people from the vicinity of Raqqa to spearhead the operation and remain to hold and govern the city once it is taken from Isis, Dunford said.
“[The operation needs] a predominantly Arab and Sunni Arab force,” he said. “And there are forces like that. There is the moderate Syrian opposition, the vetted Syrian forces and the Free Syrian Army forces, and there is some initial outreach to forces in Raqqa proper.”
He was speaking after meeting his opposite numbers in the Turkish military in Ankara, at which he promised to be completely transparent with the Turks about the Raqqa operation by embedding Turks in the planning operation.
Turkey’s deputy prime minister, Numan Kurtulmuş, said on Monday the Turkish priority was to limit Kurdish advances in Syria.
Using non-Arab elements to drive Isis out of Raqqa would not contribute to peace in the region, he said. He argued that Sunnis in Raqqa, regardless of their dislike of Isis, would not welcome a Kurdish liberation force.
Kurtumuluş also added that “legitimacy cannot be obtained through a terror organisation brandishing arms” and that the “United States will sooner or later see this reality”.
The Raqqa operation is in part designed to reduce Isis options as Iraqi Kurdish peshmerga forces join others in trying to storm Isis-held towns north-east of Mosul.
The Iraqi operation, involving a 100,000-strong alliance of troops, security forces, peshmerga and Shia fighters backed by US-led airstrikes, has made inroads to the south of Mosul and entered the city from the east. Progress inside the city will be dependent on the quality of intelligence the Iraqi forces gather from the local population, so the army is moving slowly.
US-led airstrikes against Islamic State in Syria and Iraq – interactive Read more
In Bashiqa, about 10 miles north of Mosul, the first waves of a 2,000-strong peshmerga force entered the town on foot and in armoured vehicles or Humvees.
Isis fighters have sought to slow the offensive on their Mosul stronghold with waves of suicide car bomb attacks, burning of oil heads and artillery shells containing mustard gas.
To the south of Mosul, security forces said they had recaptured and secured the town of Hammam al-Alil from Isis fighters, who they said had kept thousands of residents as human shields as well as marching many others alongside retreating militants towards Mosul as cover from airstrikes. |
/// Upload a file to Azure Blob Store using a fully qualified SAS token
pub fn upload_sas(filename: &str, sas: &str, block_size: usize) -> Result<(), Box<dyn Error>> {
let block_size = cmp::min(block_size, MAX_BLOCK_SIZE);
let (account, container, path) = parse(sas)?;
let client = Client::azure_sas(&account, sas)?;
let mut core = Core::new()?;
let mut file = File::open(filename)?;
let size = usize::try_from(file.metadata()?.len())?;
let mut sent = 0;
let mut blocks = BlockList { blocks: Vec::new() };
let mut data = vec![0; block_size];
while sent < size {
let send_size = cmp::min(block_size, size - sent);
let block_id = to_id(sent as u64)?;
data.resize(send_size, 0);
file.read_exact(&mut data)?;
retry(
Exponential::from_millis(BACKOFF)
.map(jitter)
.take(BACKOFF_COUNT),
|| {
let response = core.run(
client
.put_block()
.with_container_name(&container)
.with_blob_name(&path)
.with_body(&data)
.with_block_id(&block_id)
.finalize(),
);
match response {
Ok(x) => OperationResult::Ok(x),
Err(x) => match x {
AzureError::HyperError(_) => OperationResult::Retry(x),
_ => OperationResult::Err(x),
},
}
},
)?;
blocks.blocks.push(BlobBlockType::Uncommitted(block_id));
sent += send_size;
}
retry(
Exponential::from_millis(BACKOFF)
.map(jitter)
.take(BACKOFF_COUNT),
|| {
let response = core.run(
client
.put_block_list()
.with_container_name(&container)
.with_blob_name(&path)
.with_block_list(&blocks)
.finalize(),
);
match response {
Ok(x) => OperationResult::Ok(x),
Err(x) => match x {
AzureError::HyperError(_) => OperationResult::Retry(x),
_ => OperationResult::Err(x),
},
}
},
)?;
Ok(())
} |
#pragma once
#include "calotypes/DatasetFunctions.hpp"
#include <boost/foreach.hpp>
#include <memory>
namespace calotypes
{
// TODO Constify
/*! \brief Interface class for selecting a subset of data. */
template <class Data>
class DataSelector
{
public:
typedef std::shared_ptr<DataSelector> Ptr;
typedef std::vector<Data> Dataset;
DataSelector() {}
virtual void SelectData( const Dataset& data, unsigned int subsetSize, Dataset& subset) = 0;
};
/*! \brief Returns the full data. */
template <class Data>
class DefaultDataSelector
: public DataSelector<Data>
{
public:
typedef std::shared_ptr<DefaultDataSelector> Ptr;
typedef typename DataSelector<Data>::Dataset Dataset;
DefaultDataSelector() {}
virtual void SelectData( const Dataset& data, unsigned int subsetSize, Dataset& subset )
{
subset = data;
}
};
} // end namespace calotypes
|
def recurseMayaScriptPath(roots=[], verbose=False, excludeRegex=None, errors='warn', prepend=False):
pass |
Authorities in the eastern Chinese province of Zhejiang are pressing ahead with a demolition campaign targeting “illegal” Christian crosses amid growing resistance from local believers.
Government-backed demolition gangs have taken down crosses from the tops of churches in provincial capital Wenzhou, Taizhou, Huzhou and Jinhua cities in recent days, in a slew of demolitions billed in state media as a building safety campaign.
A total of 16 believers and pastors were taken away by police following scuffles with a cross demolition gang last week, and eight have yet to be released, a lawyer for the church members told RFA on .
“They were detained because they refused to allow the cross to be taken down, and so they arrested them,” lawyer Pang Kun said, adding that only seven of the detainees were known to be under criminal detention, while the status of the eighth had yet to be confirmed.
“They are all being held on suspicion of ‘running an illegal business,’ ‘obstructing official duty,’ and ‘running secret accounting procedures,’ random stuff like that,” Pang said.
“This is about the fact that they were opposed to the removal of the cross from their church, and this is a form of revenge against them,” he said. “They want to send any church that doesn’t comply into disarray.”
“This is an unreasonable act of revenge, which seriously violates the rights of these believers to freedom of religious belief,” Pang added. “It is also against the law.”
Pastors hauled away
Meanwhile, Christians in Wenzhou said that the authorities had detained eight local Protestant pastors and preachers, none of whom had been released by evening local time.
“They were called in for a chat and I don’t think they’ve come out since, because I have asked about them,” a pastor who declined to be named said. “It is probably something to do with the crosses.”
According to a church member in Wenzhou’s Pingyang county, where a large cross was removed from a local church last week, pastor Zhang Chongyang was also taken away by police on evening.
“He was called in for questioning by police, and he … is now under formal summons,” the church member said.
Church members in Zhejiang’s Cannon county said their pastor Liang Pu was among those hauled in for questioning.
And a pastor in Yuyaocheng village said local Protestant church members have tried to adopt civil disobedience principles in the face of the cross demolition campaign.
“As a pastor, I believe that we should stand firm but ensure no blood is shed,” the pastor said. “We need more people power, to run a non-violent resistance campaign to the bitter end.”
In Pingyang county’s Qihu church, some congregants were sent to hospital after a government-backed gang smashed down the church doors, sending them flying to the floor, before dragging them out of the church and beating them unconscious.
“He is still undergoing medical tests,” the wife of injured Qihu church member Lan Tiansi told RFA from the hospital. “The doctor wants to talk to me; I’ll speak to you later.”
'Safety and beauty'
According to the Global Times newspaper, which has close ties to the ruling Chinese Communist Party, Zhejiang authorities said they are merely “relocating” the crosses from the roofs of churches to the interior, “for the sake of safety and beauty,” it quoted local religious affairs officials as saying.
The removal is part of a three-year urbanisation and beautification campaign, which orders local governments to "revise" old neighborhoods, old industrial sites and urban villages and demolish illegal structures by 2015, it said.
A Pingyang county resident who declined to be named said the government’s cross demolition program is seen as a direct attack on the region’s Christians, however.
“It’s not as simple as pulling down a cross; it’s an attack on our beliefs, and we must rise up to resist it,” the man said. “Our resistance will be non-violent, however, because we are opposed to any form of violence.”
“The government does use violence, frequently. We believers are very angry, but we have to forgive them, right?”
Meanwhile, photos of Christians in Wenzhou, known as “China’s Jerusalem” because of the high proportion of believers, were widely retweeted on social media sites this week as they got together to make wooden crosses as part of the civil disobedience campaign.
Hong Kong media reported that the Christians plan to display the roughly-made and red-painted crosses all across the province as a form of protest against the authorities’ demolition program.
China is home to an estimated 23 million members of the government-backed Three-Self Patriotic Movement, as well as an unknown number of worshipers in unofficial “house churches.”
Reported by Hai Nan for RFA’s Cantonese Service, and by Yang Fan for the Mandarin Service. Translated and written in English by Luisetta Mudie. |
Convert Microsoft Word to Plain Text
This is a repost of an entry from 2004. This Word-cleaning functionality is showing up in more and more web editors, but people might still find this useful.
Most of the time when I’m writing content for the web (for this blog, or a forum comment, or whatever), I’ll write in Microsoft Word for the spell check and other features that aren’t in a standard textarea widget, and then I’ll cut and paste into the form on the site.
The problem is that this carries all of the high characters (“smart-quotes” and the like) that MS Word makes straight through to the site — and most sites aren’t set up to handle them. They expect plain (“Latin”) text.
A solution: this script converts text copied from MS word into plain text. Paste your input into the top box, press clean, and the input will be scrubbed and sent to the lower box.
(If you want to clean up Word HTML, rather than just create plain text, I suggest that you use HTML Tidy with the “clean” and “Word 2000” boxes checked.)
• “Double Quotes”, • ‘Single quotes’, • Ellipsis …, • em-dash —
Clean |
/// Render help text for a windows
fn render_help(&self, console: &mut dyn ConsoleOut, timeout: usize, update: bool) {
self.set_help_region(console);
// render the helper text for the selected component
match self.entries.get(self.selected) {
Some(entry) => entry.help(console),
_ => {}
};
// TODO: render back action if is list is exitable
} |
/**
* A basic implementation of LogListener. This implementation saves each log
* entry over the debug threshold into a file. The file location is determined
* by config parameters passed to BasicLog.
*
* BasicLog starts with three LogListeners; FileLogListener (this one),
* GraphicLogListener, and ConsoleLogListener. There is no need to install
* it into BasicLog unless you want ANOTHER FileLogListener.
*/
public class FileLogListener
extends AbstractLogListener
implements LogWriter
{
public static final int ITEM_BUFFER_MARGIN = 40;
public static final String DEFAULT_LOGFILE_DIR = "logs";
public static final String DEFAULT_LOGFILE_EXT = ".log";
public static final String DEFAULT_LOGFILE = "log"
+ DEFAULT_LOGFILE_EXT;
public static final String DEFAULT_LOGFILE_PREFIX = "";
public static final String DEFAULT_LOGFILE_SUFFIX = "";
private String logFilenamePre = DEFAULT_LOGFILE_PREFIX;
private String logFilenameSuf = DEFAULT_LOGFILE_SUFFIX;
private String logFilename = DEFAULT_LOGFILE;
private String logDirectory = DEFAULT_LOGFILE_DIR
+ File.separator;
private FileWriter fileWriter = null;
private final ConcurrentLinkedQueue<String> queue = new ConcurrentLinkedQueue<>();
/**
* Constructor for FileLogListener.
*
* @param directory the directory for the log files
* @param threshold the debug threshold for this listener
*/
public FileLogListener(TechEmpowerApplication application,
String directory, String filenamePrefix, String filenameSuffix,
int threshold)
{
super(application);
this.logFilename = generateFilename();
setLogfileDirectory(directory);
setLogFilenamePrefix(filenamePrefix);
setLogFilenameSuffix(filenameSuffix);
setDebugThreshold(threshold);
// Get the LogWriterCloser to monitor this LogWriter. This allows
// us to not worry about closing the file.
application.getLogManager().addLogWriter(this, application.getVersion().getProductName() + " debug log");
}
/**
* Returns the name of this listener.
*/
@Override
public String getName()
{
return "File Log";
}
//
// log methods
//
/**
* Log a string to the log file. Also displays to console. Takes a priority
* level and <i>does not</i> log this item if the priority level is less
* than the debug level threshold. Do not call this method directly; it is
* for a Log class to call.
*
* @param componentCode a four-letter component code of the caller.
* @param logString the string to write into the file.
* @param debugLevel the priority level.
*/
@Override
public void log(String componentCode, String logString, int debugLevel)
{
// Is the priority of the item being logged higher or equal to the
// debugging threshold set for this Log? If not, ignore this item.
if (debugLevel >= getDebugThreshold())
{
// Strip out carriage returns and other control characters that may
// mess up our nicely formatted log.
String toLog = StringHelper.stripISOControlCharacters(logString, " ");
// Update the time in the calendar.
computeTimestamps();
// Build the line.
StringBuilder buffer = new StringBuilder(ITEM_BUFFER_MARGIN + toLog.length());
buffer.append(getApplication().getVersion().getProductCode());
buffer.append(' ');
buffer.append(getFullTimestamp());
buffer.append(' ');
buffer.append(componentCode);
buffer.append(": ");
buffer.append(toLog);
buffer.append("\r\n");
// Queue this item to be written.
this.queue.add(buffer.toString());
}
}
//
// end log methods
//
//
// file methods
//
/**
* Closes the log file. It will be reopened by writeLogEntry the next
* time a log entry needs to be written. This method will be called
* by the LogFileCloser.
*/
@Override
public void closeFile()
{
try
{
if (this.fileWriter != null)
{
this.fileWriter.close();
}
}
catch (IOException ioexc)
{
debug(Log.COMPONENT_CODE, "IOException while closing log file!");
}
this.fileWriter = null;
}
/**
* Closes the log file. Before closing, first writes a single line to
* the file.
*/
@Override
public void closeFile(String statement)
{
closeFile();
}
/**
* Is the log file open?
*/
@Override
public boolean isOpen()
{
return (this.fileWriter != null);
}
/**
* Sets the filename prefix for Logs created by this logging component.
* The prefix will be appended prior to the date portion of the filename.
*
* @param prefixString the prefix to append to filenames.
*/
private void setLogFilenamePrefix(String prefixString)
{
this.logFilenamePre = prefixString;
}
/**
* Sets the filename suffix for Logs created by this logging component.
* The suffix will be appended after the date portion of the filename
* but prior to the file's extension.
*
* @param suffixString the suffix to append to filenames.
*/
private void setLogFilenameSuffix(String suffixString)
{
this.logFilenameSuf = suffixString;
}
/**
* Sets the logfile directory.
*
* @param dir the directory to use for log files.
*/
private void setLogfileDirectory(String dir)
{
if (dir.endsWith(File.separator))
{
this.logDirectory = dir;
}
else
{
this.logDirectory = dir + File.separator;
}
}
/**
* Checks to see if a new file needs to be opened.
*/
private boolean needNewFile()
{
return super.pastEndOfDay();
}
/**
* Flush the contents of the log item queue.
*/
@Override
public boolean flushFile()
{
// Only proceed if we need to write something (we have something in
// the queue).
String logItem = this.queue.poll();
if (logItem != null)
{
// Create a new file if necessary.
if (needNewFile())
{
this.logFilename = generateFilename();
closeFile();
debug(Log.COMPONENT_CODE, "New log file: " + this.logFilename);
}
// Open a new file if necessary.
if (this.fileWriter == null)
{
openFile(this.logDirectory + this.logFilename);
}
// Just a sanity check that we have a valid file writer reference.
if (this.fileWriter != null)
{
try
{
// Write everything we have in the queue.
while (logItem != null)
{
this.fileWriter.write(logItem);
logItem = this.queue.poll();
}
// Flush.
this.fileWriter.flush();
return true;
}
catch (IOException ioexc)
{
System.out.println("Cannot write to log file. " + ioexc);
}
}
}
return false;
}
/**
* Opens the fileWriter and initializes the fileWriter object.
*/
private void openFile(String filename)
{
// Open for append.
try
{
this.fileWriter = new FileWriter(filename, true);
}
catch (IOException ioexc)
{
System.out.println(Log.COMPONENT_CODE + "Cannot open log: " + filename);
}
}
/**
* Generate a new filename.
*/
private String generateFilename()
{
// Use a local calendar instance here.
Calendar fnCal = DateHelper.getCalendarInstance();
StringBuilder buffer = new StringBuilder(30);
buffer.append(this.logFilenamePre);
buffer.append(StringHelper.padZero(fnCal.get(Calendar.YEAR), 4));
buffer.append('-');
buffer.append(StringHelper.padZero(fnCal.get(Calendar.MONTH) + 1, 2));
buffer.append('-');
buffer.append(StringHelper.padZero(fnCal.get(Calendar.DAY_OF_MONTH), 2));
buffer.append(this.logFilenameSuf);
buffer.append(DEFAULT_LOGFILE_EXT);
return buffer.toString();
}
//
// end file methods
//
} |
import type { NextApiRequest, NextApiResponse } from "next";
import fetchPR from "lib/fetchPR";
import { PRData } from "lib/types";
export default async function handler(
req: NextApiRequest,
res: NextApiResponse<PRData>
) {
const { prNumber, repoName, repoOwner } = req.query;
res
.status(200)
.json(
await fetchPR(repoOwner as string, repoName as string, prNumber as string)
);
}
|
<reponame>guapi233/Tifa-web
/**
* Setting 二级路由 下的三级路由
*/
const Setting = () =>
import(/* webpackChunkName: "Setting" */ "@/views/Setting.vue");
const SettingBase = () =>
import(/* webpackChunkName: "SettingBase" */ "@/views/Setting/Base.vue");
const SettingAccount = () =>
import(
/* webpackChunkName: "SettingAccount" */ "@/views/Setting/Account.vue"
);
const SettingMail = () =>
import(/* webpackChunkName: "SettingMail" */ "@/views/Setting/Mail.vue");
const SettingPreference = () =>
import(
/* webpackChunkName: "SettingPreference" */ "@/views/Setting/Preference.vue"
);
const SettingFilter = () =>
import(/* webpackChunkName: "SettingFilter" */ "@/views/Setting/Filter.vue");
export default {
path: "/setting",
component: Setting,
meta: {
requireAuth: true,
},
children: [
{
path: "",
name: "SettingBase",
component: SettingBase,
},
{
path: "account",
name: "SettingAccount",
component: SettingAccount,
},
{
path: "mail",
name: "SettingMail",
component: SettingMail,
},
{
path: "preference",
name: "SettingPreference",
component: SettingPreference,
},
{
path: "filter",
name: "SettingFilter",
component: SettingFilter,
},
],
};
|
#![allow(non_snake_case)]
#![allow(non_camel_case_types)]
#![allow(clippy::upper_case_acronyms)]
/// Length of config word sector in words
pub const CONFIG_SECTOR_LENGTH: usize = 4;
type USERID = u16;
/// Alternate I/O Select for I2C1
pub enum AI2C1 {
/// I2C1 uses the ASDA1/ASCL1 pins
ON = 0x1,
/// I2C1 uses the SDA1/SCL1 pins
OFF = 0x0,
}
/// Alternate I/O Select for I2C2
pub enum AI2C2 {
/// I2C2 uses the ASDA2/ASCL2 pins
ON = 0x1,
/// I2C2 uses the SDA2/SCL2 pins
OFF = 0x0,
}
/// Peripheral Module Disable Configuration
pub enum PMDL1WAY {
/// Allow only one reconfiguration
ON = 0x1,
/// Allow multiple reconfigurations
OFF = 0x0,
}
/// Peripheral Pin Select Configuration
pub enum IOL1WAY {
/// Allow only one reconfiguration
ON = 0x1,
/// Allow multiple reconfigurations
OFF = 0x0,
}
/// PLL Input Divider
pub enum FPLLIDIV {
/// 1x Divider
DIV_1 = 0x0,
/// 2x Divider
DIV_2 = 0x1,
/// 3x Divider
DIV_3 = 0x2,
/// 4x Divider
DIV_4 = 0x3,
/// 5x Divider
DIV_5 = 0x4,
/// 6x Divider
DIV_6 = 0x5,
/// 10x Divider
DIV_10 = 0x6,
/// 12x Divider
DIV_12 = 0x7,
}
/// PLL Multiplier
pub enum FPLLMUL {
/// 15x Multiplier
MUL_15 = 0x0,
/// 16x Multiplier
MUL_16 = 0x1,
/// 17x Multiplier
MUL_17 = 0x2,
/// 18x Multiplier
MUL_18 = 0x3,
/// 19x Multiplier
MUL_19 = 0x4,
/// 20x Multiplier
MUL_20 = 0x5,
/// 21x Multiplier
MUL_21 = 0x6,
/// 24x Multiplier
MUL_24 = 0x7,
}
/// System PLL Input Clock Selection
pub enum FPLLICLK {
/// POSC is input to the System PLL
PLL_POSC = 0x0,
/// FRC is input to the System PLL
PLL_FRC = 0x1,
}
/// System PLL Output Clock Divider
pub enum FPLLODIV {
/// PLL Divide by 1
DIV_1 = 0x0,
/// PLL Divide by 2
DIV_2 = 0x1,
/// PLL Divide by 4
DIV_4 = 0x2,
/// PLL Divide by 8
DIV_8 = 0x3,
/// PLL Divide by 16
DIV_16 = 0x4,
/// PLL Divide by 32
DIV_32 = 0x5,
/// PLL Divide by 64
DIV_64 = 0x6,
/// PLL Divide by 256
DIV_256 = 0x7,
}
/// Brown-Out Reset (BOR) Enable
pub enum BOREN {
/// Disable BOR
OFF = 0x0,
/// Enable BOR
ON = 0x1,
}
/// Deep Sleep BOR Enable
pub enum DSBOREN {
/// Disable ZPBOR during Deep Sleep Mode
OFF = 0x0,
/// Enable ZPBOR during Deep Sleep Mode
ON = 0x1,
}
/// Deep Sleep Watchdog Timer Postscaler
pub enum DSWDTPS {
/// 1:2^5
DSPS1 = 0x0,
/// 1:2^6
DSPS2 = 0x1,
/// 1:2^7
DSPS3 = 0x2,
/// 1:2^8
DSPS4 = 0x3,
/// 1:2^9
DSPS5 = 0x4,
/// 1:2^10
DSPS6 = 0x5,
/// 1:2^11
DSPS7 = 0x6,
/// 1:2^12
DSPS8 = 0x7,
/// 1:2^13
DSPS9 = 0x8,
/// 1:2^14
DSPS10 = 0x9,
/// 1:2^15
DSPS11 = 0xa,
/// 1:2^16
DSPS12 = 0xb,
/// 1:2^17
DSPS13 = 0xc,
/// 1:2^18
DSPS14 = 0xd,
/// 1:2^19
DSPS15 = 0xe,
/// 1:2^20
DSPS16 = 0xf,
/// 1:2^21
DSPS17 = 0x10,
/// 1:2^22
DSPS18 = 0x11,
/// 1:2^23
DSPS19 = 0x12,
/// 1:2^24
DSPS20 = 0x13,
/// 1:2^25
DSPS21 = 0x14,
/// 1:2^26
DSPS22 = 0x15,
/// 1:2^27
DSPS23 = 0x16,
/// 1:2^28
DSPS24 = 0x17,
/// 1:2^29
DSPS25 = 0x18,
/// 1:2^30
DSPS26 = 0x19,
/// 1:2^31
DSPS27 = 0x1a,
/// 1:2^32
DSPS28 = 0x1b,
/// 1:2^33
DSPS29 = 0x1c,
/// 1:2^34
DSPS30 = 0x1d,
/// 1:2^35
DSPS31 = 0x1e,
/// 1:2^36
DSPS32 = 0x1f,
}
/// Deep Sleep WDT Reference Clock Selection
pub enum DSWDTOSC {
/// Select SOSC as DSWDT Reference Clock
SOSC = 0x0,
/// Select LPRC as DSWDT Reference clock
LPRC = 0x1,
}
/// Deep Sleep Watchdog Timer Enable
pub enum DSWDTEN {
/// Disable DSWDT during Deep Sleep Mode
OFF = 0x0,
/// Enable DSWDT during Deep Sleep Mode
ON = 0x1,
}
/// Deep Sleep Enable
pub enum FDSEN {
/// Disable DSEN bit in DSCON
OFF = 0x0,
/// Enable DSEN bit in DSCON
ON = 0x1,
}
/// Oscillator Selection Bits
pub enum FNOSC {
/// Fast RC Osc (FRC)
FRC = 0x0,
/// System PLL
SPLL = 0x1,
/// Primary Osc (HS,EC, XT)
POSC = 0x2,
/// Low Power Secondary Osc (SOSC)
SOSC = 0x4,
/// Low Power RC Osc (LPRC)
LPRC = 0x5,
}
/// Secondary Oscillator Enable
pub enum FSOSCEN {
/// Disabled
OFF = 0x0,
/// Enabled
ON = 0x1,
}
/// Internal/External Switch Over
pub enum IESO {
/// Disabled
OFF = 0x0,
/// Enabled
ON = 0x1,
}
/// Primary Oscillator Configuration
pub enum POSCMOD {
/// External clock mode
EC = 0x0,
/// XT osc mode
XT = 0x1,
/// HS osc mode
HS = 0x2,
/// Primary osc disabled
OFF = 0x3,
}
/// CLKO Output Signal Active on the OSCO Pin
pub enum OSCIOFNC {
/// Disabled
OFF = 0x1,
/// Enabled
ON = 0x0,
}
/// Peripheral Clock Divisor
pub enum FPBDIV {
/// Pb_Clk is Sys_Clk/1
DIV_1 = 0x0,
/// Pb_Clk is Sys_Clk/2
DIV_2 = 0x1,
/// Pb_Clk is Sys_Clk/4
DIV_4 = 0x2,
/// Pb_Clk is Sys_Clk/8
DIV_8 = 0x3,
}
/// Clock Switching and Monitor Selection
pub enum FCKSM {
/// Clock Switch Enable, FSCM Enabled
CSECME = 0x0,
/// Clock Switch Enable, FSCM Disabled
CSECMD = 0x1,
/// Clock Switch Disable, FSCM Disabled
CSDCMD = 0x3,
}
/// Watchdog Timer Postscaler
pub enum WDTPS {
/// 1:1
PS1 = 0x0,
/// 1:2
PS2 = 0x1,
/// 1:4
PS4 = 0x2,
/// 1:8
PS8 = 0x3,
/// 1:16
PS16 = 0x4,
/// 1:32
PS32 = 0x5,
/// 1:64
PS64 = 0x6,
/// 1:128
PS128 = 0x7,
/// 1:256
PS256 = 0x8,
/// 1:512
PS512 = 0x9,
/// 1:1024
PS1024 = 0xa,
/// 1:2048
PS2048 = 0xb,
/// 1:4096
PS4096 = 0xc,
/// 1:8192
PS8192 = 0xd,
/// 1:16384
PS16384 = 0xe,
/// 1:32768
PS32768 = 0xf,
/// 1:65536
PS65536 = 0x10,
/// 1:131072
PS131072 = 0x11,
/// 1:262144
PS262144 = 0x12,
/// 1:524288
PS524288 = 0x13,
/// 1:1048576
PS1048576 = 0x14,
}
/// Watchdog Timer Stop During Flash Programming
pub enum WDTSPGM {
/// Watchdog Timer stops during Flash programming
ON = 0x1,
/// Watchdog Timer runs during Flash programming
OFF = 0x0,
}
/// Watchdog Timer Window Enable
pub enum WINDIS {
/// Watchdog Timer is in Window Mode
ON = 0x0,
/// Watchdog Timer is in Non-Window Mode
OFF = 0x1,
}
/// Watchdog Timer Enable
pub enum FWDTEN {
/// WDT Disabled (SWDTEN Bit Controls)
OFF = 0x0,
/// WDT Enabled
ON = 0x1,
}
/// Watchdog Timer Window Size
pub enum FWDTWINSZ {
/// Window Size is 75%
WINSZ_75 = 0x0,
/// Window Size is 50%
WINSZ_50 = 0x1,
/// Window Size is 37.5%
WINSZ_37 = 0x2,
/// Window Size is 25%
WINSZ_25 = 0x3,
}
/// Background Debugger Enable
pub enum DEBUG {
/// Debugger is Enabled
ON = 0x0,
/// Debugger is Disabled
OFF = 0x3,
}
/// JTAG Enable
pub enum JTAGEN {
/// JTAG Port Enabled
ON = 0x1,
/// JTAG Disabled
OFF = 0x0,
}
/// ICE/ICD Comm Channel Select
pub enum ICESEL {
/// Communicate on PGEC1/PGED1
ICS_PGx1 = 0x3,
/// Communicate on PGEC2/PGED2
ICS_PGx2 = 0x2,
/// Communicate on PGEC3/PGED3
ICS_PGx3 = 0x1,
/// Communicate on PGEC4/PGED4
ICS_PGx4 = 0x0,
}
/// Program Flash Write Protect
pub enum PWP {
/// Disable
OFF = 0xff,
/// First 4K
PWP4K = 0xfe,
/// First 8K
PWP8K = 0xfd,
/// First 12K
PWP12K = 0xfc,
/// First 16K
PWP16K = 0xfb,
/// First 20K
PWP20K = 0xfa,
/// First 24K
PWP24K = 0xf9,
/// First 28K
PWP28K = 0xf8,
/// First 32K
PWP32K = 0xf7,
/// First 36K
PWP36K = 0xf6,
/// First 40K
PWP40K = 0xf5,
/// First 44K
PWP44K = 0xf4,
/// First 48K
PWP48K = 0xf3,
/// First 52K
PWP52K = 0xf2,
/// First 56K
PWP56K = 0xf1,
/// First 60K
PWP60K = 0xf0,
/// First 64K
PWP64K = 0xef,
/// First 68K
PWP68K = 0xee,
/// First 72K
PWP72K = 0xed,
/// First 76K
PWP76K = 0xec,
/// First 80K
PWP80K = 0xeb,
/// First 84K
PWP84K = 0xea,
/// First 88K
PWP88K = 0xe9,
/// First 92K
PWP92K = 0xe8,
/// First 96K
PWP96K = 0xe7,
/// First 100K
PWP100K = 0xe6,
/// First 104K
PWP104K = 0xe5,
/// First 108K
PWP108K = 0xe4,
/// First 112K
PWP112K = 0xe3,
/// First 116K
PWP116K = 0xe2,
/// First 120K
PWP120K = 0xe1,
/// First 124K
PWP124K = 0xe0,
/// First 128K
PWP128K = 0xdf,
/// First 132K
PWP132K = 0xde,
/// First 136K
PWP136K = 0xdd,
/// First 140K
PWP140K = 0xdc,
/// First 144K
PWP144K = 0xdb,
/// First 148K
PWP148K = 0xda,
/// First 152K
PWP152K = 0xd9,
/// First 156K
PWP156K = 0xd8,
/// First 160K
PWP160K = 0xd7,
/// First 164K
PWP164K = 0xd6,
/// First 168K
PWP168K = 0xd5,
/// First 172K
PWP172K = 0xd4,
/// First 176K
PWP176K = 0xd3,
/// First 180K
PWP180K = 0xd2,
/// First 184K
PWP184K = 0xd1,
/// First 188K
PWP188K = 0xd0,
/// First 192K
PWP192K = 0xcf,
/// First 196K
PWP196K = 0xce,
/// First 200K
PWP200K = 0xcd,
/// First 204K
PWP204K = 0xcc,
/// First 208K
PWP208K = 0xcb,
/// First 212K
PWP212K = 0xca,
/// First 216K
PWP216K = 0xc9,
/// First 220K
PWP220K = 0xc8,
/// First 224K
PWP224K = 0xc7,
/// First 228K
PWP228K = 0xc6,
/// First 232K
PWP232K = 0xc5,
/// First 236K
PWP236K = 0xc4,
/// First 240K
PWP240K = 0xc3,
/// First 244K
PWP244K = 0xc2,
/// First 248K
PWP248K = 0xc1,
/// First 252K
PWP252K = 0xc0,
/// First 256K
PWP256K = 0xbf,
}
/// Soft Master Clear Enable
pub enum SMCLR {
/// MCLR pin generates an emulated POR Reset
MCLR_POR = 0x0,
/// MCLR pin generates a normal system Reset
MCLR_NORM = 0x1,
}
/// Boot Flash Write Protect bit
pub enum BWP {
/// Protection Enabled
ON = 0x0,
/// Protection Disabled
OFF = 0x1,
}
/// Code Protect
pub enum CP {
/// Protection Enabled
ON = 0x0,
/// Protection Disabled
OFF = 0x1,
}
/// Configuration word sector
#[repr(C)]
pub struct ConfigSector {
DEVCFG3: u32,
DEVCFG2: u32,
DEVCFG1: u32,
DEVCFG0: u32,
}
impl ConfigSector {
/// Create a builder
pub const fn default() -> ConfigSectorBuilder {
ConfigSectorBuilder {
DEVCFG3: 0xffffffff,
DEVCFG2: 0xffffffff,
DEVCFG1: 0xffffffff,
DEVCFG0: 0xffffffff,
}
}
/// Convert into a array of 32 bit words consuming this ConfigSector
pub const fn into_array(self) -> [u32; CONFIG_SECTOR_LENGTH] {
[self.DEVCFG3, self.DEVCFG2, self.DEVCFG1, self.DEVCFG0]
}
}
/// Configuration word sector builder
pub struct ConfigSectorBuilder {
DEVCFG3: u32,
DEVCFG2: u32,
DEVCFG1: u32,
DEVCFG0: u32,
}
impl ConfigSectorBuilder {
pub const fn USERID(mut self, v: USERID) -> Self {
self.DEVCFG3 &= !0x0000ffff;
self.DEVCFG3 |= v as u32;
self
}
pub const fn AI2C1(mut self, v: AI2C1) -> Self {
self.DEVCFG3 &= !0x00400000;
self.DEVCFG3 |= (v as u32) << 22;
self
}
pub const fn AI2C2(mut self, v: AI2C2) -> Self {
self.DEVCFG3 &= !0x00800000;
self.DEVCFG3 |= (v as u32) << 23;
self
}
pub const fn PMDL1WAY(mut self, v: PMDL1WAY) -> Self {
self.DEVCFG3 &= !0x10000000;
self.DEVCFG3 |= (v as u32) << 28;
self
}
pub const fn IOL1WAY(mut self, v: IOL1WAY) -> Self {
self.DEVCFG3 &= !0x20000000;
self.DEVCFG3 |= (v as u32) << 29;
self
}
pub const fn FPLLIDIV(mut self, v: FPLLIDIV) -> Self {
self.DEVCFG2 &= !0x00000007;
self.DEVCFG2 |= v as u32;
self
}
pub const fn FPLLMUL(mut self, v: FPLLMUL) -> Self {
self.DEVCFG2 &= !0x00000070;
self.DEVCFG2 |= (v as u32) << 4;
self
}
pub const fn FPLLICLK(mut self, v: FPLLICLK) -> Self {
self.DEVCFG2 &= !0x00000080;
self.DEVCFG2 |= (v as u32) << 7;
self
}
pub const fn FPLLODIV(mut self, v: FPLLODIV) -> Self {
self.DEVCFG2 &= !0x00070000;
self.DEVCFG2 |= (v as u32) << 16;
self
}
pub const fn BOREN(mut self, v: BOREN) -> Self {
self.DEVCFG2 &= !0x00100000;
self.DEVCFG2 |= (v as u32) << 20;
self
}
pub const fn DSBOREN(mut self, v: DSBOREN) -> Self {
self.DEVCFG2 &= !0x00800000;
self.DEVCFG2 |= (v as u32) << 23;
self
}
pub const fn DSWDTPS(mut self, v: DSWDTPS) -> Self {
self.DEVCFG2 &= !0x1f000000;
self.DEVCFG2 |= (v as u32) << 24;
self
}
pub const fn DSWDTOSC(mut self, v: DSWDTOSC) -> Self {
self.DEVCFG2 &= !0x20000000;
self.DEVCFG2 |= (v as u32) << 29;
self
}
pub const fn DSWDTEN(mut self, v: DSWDTEN) -> Self {
self.DEVCFG2 &= !0x40000000;
self.DEVCFG2 |= (v as u32) << 30;
self
}
pub const fn FDSEN(mut self, v: FDSEN) -> Self {
self.DEVCFG2 &= !0x80000000;
self.DEVCFG2 |= (v as u32) << 31;
self
}
pub const fn FNOSC(mut self, v: FNOSC) -> Self {
self.DEVCFG1 &= !0x00000007;
self.DEVCFG1 |= v as u32;
self
}
pub const fn FSOSCEN(mut self, v: FSOSCEN) -> Self {
self.DEVCFG1 &= !0x00000020;
self.DEVCFG1 |= (v as u32) << 5;
self
}
pub const fn IESO(mut self, v: IESO) -> Self {
self.DEVCFG1 &= !0x00000080;
self.DEVCFG1 |= (v as u32) << 7;
self
}
pub const fn POSCMOD(mut self, v: POSCMOD) -> Self {
self.DEVCFG1 &= !0x00000300;
self.DEVCFG1 |= (v as u32) << 8;
self
}
pub const fn OSCIOFNC(mut self, v: OSCIOFNC) -> Self {
self.DEVCFG1 &= !0x00000400;
self.DEVCFG1 |= (v as u32) << 10;
self
}
pub const fn FPBDIV(mut self, v: FPBDIV) -> Self {
self.DEVCFG1 &= !0x00003000;
self.DEVCFG1 |= (v as u32) << 12;
self
}
pub const fn FCKSM(mut self, v: FCKSM) -> Self {
self.DEVCFG1 &= !0x0000c000;
self.DEVCFG1 |= (v as u32) << 14;
self
}
pub const fn WDTPS(mut self, v: WDTPS) -> Self {
self.DEVCFG1 &= !0x001f0000;
self.DEVCFG1 |= (v as u32) << 16;
self
}
pub const fn WDTSPGM(mut self, v: WDTSPGM) -> Self {
self.DEVCFG1 &= !0x00200000;
self.DEVCFG1 |= (v as u32) << 21;
self
}
pub const fn WINDIS(mut self, v: WINDIS) -> Self {
self.DEVCFG1 &= !0x00400000;
self.DEVCFG1 |= (v as u32) << 22;
self
}
pub const fn FWDTEN(mut self, v: FWDTEN) -> Self {
self.DEVCFG1 &= !0x00800000;
self.DEVCFG1 |= (v as u32) << 23;
self
}
pub const fn FWDTWINSZ(mut self, v: FWDTWINSZ) -> Self {
self.DEVCFG1 &= !0x03000000;
self.DEVCFG1 |= (v as u32) << 24;
self
}
pub const fn DEBUG(mut self, v: DEBUG) -> Self {
self.DEVCFG0 &= !0x00000003;
self.DEVCFG0 |= v as u32;
self
}
pub const fn JTAGEN(mut self, v: JTAGEN) -> Self {
self.DEVCFG0 &= !0x00000004;
self.DEVCFG0 |= (v as u32) << 2;
self
}
pub const fn ICESEL(mut self, v: ICESEL) -> Self {
self.DEVCFG0 &= !0x00000018;
self.DEVCFG0 |= (v as u32) << 3;
self
}
pub const fn PWP(mut self, v: PWP) -> Self {
self.DEVCFG0 &= !0x000ff000;
self.DEVCFG0 |= (v as u32) << 12;
self
}
pub const fn SMCLR(mut self, v: SMCLR) -> Self {
self.DEVCFG0 &= !0x00800000;
self.DEVCFG0 |= (v as u32) << 23;
self
}
pub const fn BWP(mut self, v: BWP) -> Self {
self.DEVCFG0 &= !0x01000000;
self.DEVCFG0 |= (v as u32) << 24;
self
}
pub const fn CP(mut self, v: CP) -> Self {
self.DEVCFG0 &= !0x10000000;
self.DEVCFG0 |= (v as u32) << 28;
self
}
pub const fn build(self) -> ConfigSector {
ConfigSector {
DEVCFG3: self.DEVCFG3,
DEVCFG2: self.DEVCFG2,
DEVCFG1: self.DEVCFG1,
DEVCFG0: self.DEVCFG0,
}
}
}
|
/* Enqueue multiple data into the ring tail. Num is smaller than ring size. */
static inline uint32_t ring_mpmc_enq_multi(ring_mpmc_t *ring,
uint32_t *ring_data,
uint32_t ring_mask,
const uint32_t data[],
uint32_t num)
{
uint32_t old_head, new_head, r_tail, num_free, i;
uint32_t size = ring_mask + 1;
do {
r_tail = odp_atomic_load_acq_u32(&ring->r_tail);
old_head = odp_atomic_load_acq_u32(&ring->w_head);
num_free = size - (old_head - r_tail);
if (num_free == 0)
return 0;
if (num > num_free)
num = num_free;
new_head = old_head + num;
} while (odp_unlikely(ring_mpmc_cas_u32(&ring->w_head, &old_head,
new_head) == 0));
for (i = 0; i < num; i++)
ring_data[(old_head + 1 + i) & ring_mask] = data[i];
while (odp_unlikely(odp_atomic_load_u32(&ring->w_tail) != old_head))
odp_cpu_pause();
odp_atomic_store_rel_u32(&ring->w_tail, new_head);
return num;
} |
Cointelegraph speaks to Devon Watson, Diebolds VP of Global Software R&D, on a mobile driven ATM, conditions for using Bitcoin in mobile banking and implementation of Blockchain into the Diebold platform.
Diebold Inc are a financial self-service, security and services corporation, incorporated in the United States since 1876. It is the world's largest manufacturer of ATMs with divisions in North America, Europe, Middle East and Africa, Asia Pacific and Latin America.
Devon Watson, Diebolds VP of Global Software Research and Development, is an owner of bitcoin and a big fan of the Blockchain with an insight into the whole ecosystem and protocol.
Bridging a mobile based world
Cointelegraph: I see in Copenhagen at Money2020 Diebold showcased one of its newest mobile driven banking concepts, a mobile driven ATM. Can you explain your reasons for bringing such an advanced concept to market?
Devon Watson: The more we see digital payments take off, the more cash actually gets used in society which is sort of an inverse phenomenon to most people's line of thinking and the simple reason is when there is a new digital payments innovation for your average consumer, it's now more and more convenient to move their payment types, whether that is physical cash or some sort of a digital transaction, back and forth from fiat to digital currency.
These new terminal designs and the new software that we’ve been building helps to bridge a mobile based world. You know we will see this more and more over the next several years the cards in your wallet will be dematerialised into your phone and now that phone has to be the bridge for the consumer between their digital payment types, their banking experience and the need to deposit or withdraw their cash and other types of tenders.
Conditions for using Bitcoin
CT: It says on your website, Diebold continues to shape the future of the mobile banking landscape by bridging the physical and digital worlds of cash in unprecedented ways. Have you any future plans to incorporate digital currencies such as bitcoin into this landscape?
DW: That’s a great question, I think it will probably happen. The question is when, so we are a provider to banks and retailers and others that want to deploy ATMs and software solutions to connect to their consumers and i think it’s unclear to me at least market by market when bitcoin will join that ecosystem.
I think if you look around the world you're starting to see the early signs of that being a payment option that providers want to adopt. When I’m looking at certain countries in Latin America the usage of bitcoin there seems to have skyrocketed and that's going to lead in my opinion to more adoption by the incumbents.
If the government's support the usage of this payment type that’s a good thing for its usage and if other providers within the ecosystem support it and make it easier that’s a good thing for its usage and then we of course are ready to immediately follow along and make sure consumers can transact between those two mediums.
Blockchain is no fit for a regular payment type
CT: From researching your company it is obvious it has stood the test of time and is future proof being nearly 150 years old and still the biggest supplier of ATMs in the world. What are your views on Blockchain technology and have you plans to implement it into the Diebold platform in the future?
DW: So when I look into my crystal ball, I came to Diebold previously from software start-ups and a venture capitalist background before that so you know I'm tasked with looking at how the world is changing over the next several years, making sure that we are proactively bringing the right solutions to the marketplace and to meet those opportunities and challenges.
For me when i look at Blockchain I actually see more promise in the technology for streamlining and adding security to inefficient processes right now. Do I think old payment types are going to run on a Blockchain? Probably not in the nearterm.
Blockchain is a brilliant way to manage and orchestrate cryptocurrency. But when I try to shoehorn it into a regular existing payment type - probably not. You have trusted parties and scale issues and real-time problems etc, it’s probably not the right fit.
But we are very bullish for medium and long term use against streamlining and securing document intensive processes. Whether that is capturing timestamping, securing the viewership of identity credentials, mortgage documentation or income proofs and things like that. Whether it’s automation of notary services all of these heavy paper based things that we still do for government reasons or for banking reasons. I think those are prime candidates to be moved to automation which is great for me. At the end of the day an ATM is an automated outpost managing your financial life.
If I can make it easier and more secure for a consumer to walk up scan proof of income for their mortgage documentation, making sure it’s cryptographically authenticated and only allow one financial institution to view that, that’s a pretty interesting consumer powerpoint.
If you put something into the Blockchain you are now in control of who can actually gain access to view this rather than forking it over to them and letting them show to whom they please. I think those sorts of use cases are very very exciting.
Cash and cryptocurrencies have a lot in common
CT: With the arrival of digital currencies and ATMs specifically designed for them, do you see these as a threat? How do you see the future role of digital currencies and their associated ATMs play out?
DW: So my point of view is for digital currencies to meet, let's call it, mass consumer adoption you need entities like Circle basically putting a banking service wrapper around a digital currency. And if that is the direction more and more banks and bank type entities go, then obviously an ATM is a great way to extend that service capability to the consumer base. That takes time because it has regulatory implications.
The most important use case of what we’ve learned from cryptocurrencies is that Blockchain comes into completely new consumer transactions giving consumers control and anonymity for case types of document sensitive types of things.
Let's take the documents and people out of application processes and notary processes, all that security and the control that gives a consumer is very powerful and really exciting and actually beneficial to both the consumer base and to the banks that have to deal with all that, so it’s a win-win. I see this happening probably first.
Longer term as cryptocurrencies become more and more mainstream through regulation etc, you know cash and cryptocurrencies have a lot in common. And there is an obvious need for a bridge between the two as a consumer good. |
#include "helpers.hpp"
std::string HELPERS::resolvePath(std::string path) {
wxFileName relativeToExecutable(wxStandardPaths::Get().GetExecutablePath());
relativeToExecutable.RemoveDir(relativeToExecutable.GetDirCount() - 1);
#ifdef __WXMSW__
wxFileName fullPath(relativeToExecutable.GetPathWithSep() + path, wxPATH_NATIVE);
#endif
#ifdef DEBIAN_SYSTEM
wxFileName fullPath("/usr/share/switas/" + path, wxPATH_NATIVE);
#endif
#ifdef __APPLE__
wxFileName fullPath(relativeToExecutable.GetPathWithSep() + path, wxPATH_NATIVE);
#endif
fullPath.MakeAbsolute();
std::string res = fullPath.GetFullPath(wxPATH_NATIVE).ToStdString();
return res;
};
std::vector<std::string> HELPERS::splitString(const std::string s, char delim) {
std::vector<std::string> result;
std::stringstream ss(s);
std::string item;
while(std::getline(ss, item, delim)) {
result.push_back(item);
}
return result;
}
wxFileName HELPERS::getMainSettingsPath(std::string name) {
wxString nameString = wxString::FromUTF8(name);
wxFileName relativeToExecutable(wxStandardPaths::Get().GetExecutablePath());
// Go one folder back
// This option is for Mac too
relativeToExecutable.RemoveDir(relativeToExecutable.GetDirCount() - 1);
relativeToExecutable.SetName(nameString);
relativeToExecutable.SetExt("json");
if(relativeToExecutable.FileExists()) {
// Prefer relative to the executable
return relativeToExecutable;
} else {
// Use the home folder as a backup
wxFileName inHomeFolder(wxStandardPaths::Get().GetUserConfigDir());
inHomeFolder.SetName(nameString);
inHomeFolder.SetExt("json");
if(inHomeFolder.FileExists()) {
return inHomeFolder;
} else {
// Put in etc lol, needed for debian
wxFileName etcFolder("/etc/switas/");
etcFolder.SetName(nameString);
etcFolder.SetExt("json");
return etcFolder;
}
}
}
wxColor HELPERS::getDefaultWindowBackground() {
return wxSystemSettings::GetColour(wxSYS_COLOUR_APPWORKSPACE);
}
wxFileName HELPERS::popOffDirs(wxFileName path, uint8_t num) {
for(uint8_t i = 0; i < num; i++) {
path.RemoveDir(path.GetDirCount() - 1);
}
return path;
}
std::string HELPERS::joinString(std::vector<std::string> vec, std::string delimiter) {
if(vec.size() != 0) {
// https://stackoverflow.com/a/40052831
// clang-format off
return std::accumulate(std::next(vec.begin()), vec.end(),
vec[0],
[&delimiter](std::string& a, std::string& b) {
return a + delimiter + b;
});
// clang-format on
} else {
return "";
}
}
float HELPERS::normalizeRadian(float angle) {
float a = std::fmod(angle, 2 * M_PI);
return a >= 0 ? a : (a + 2 * M_PI);
}
rapidjson::Document HELPERS::getSettingsFile(std::string filename) {
rapidjson::Document json;
std::ifstream file(filename);
std::string content((std::istreambuf_iterator<char>(file)), (std::istreambuf_iterator<char>()));
// Allow comments in JSON
json.Parse<rapidjson::kParseCommentsFlag>(content.c_str());
return json;
}
rapidjson::Document HELPERS::getSettingsFromString(std::string jsonString) {
rapidjson::Document json;
// Allow comments in JSON
json.Parse<rapidjson::kParseCommentsFlag>(jsonString.c_str());
return json;
}
wxBitmapButton* HELPERS::getBitmapButton(wxWindow* parentFrame, rapidjson::Document* settings, const char* name) {
wxImage resizedImage(HELPERS::resolvePath((*settings)["ui"][name].GetString()));
resizedImage.Rescale((*settings)["ui"]["buttonWidth"].GetInt(), (*settings)["ui"]["buttonHeight"].GetInt());
return new wxBitmapButton(parentFrame, wxID_ANY, wxBitmap(resizedImage));
}
wxBitmapButton* HELPERS::getSystemBitmapButton(wxWindow* parentFrame, wxArtID id) {
return new wxBitmapButton(parentFrame, wxID_ANY, wxArtProvider::GetBitmap(id));
}
void HELPERS::addDarkmodeWindows(wxWindow* window) {
#ifdef __WXMSW__
// Enable dark mode, super experimential, apparently
// needs to be applied to every window, however
SetWindowTheme(window->GetHWND(), L"DarkMode_Explorer", NULL);
window->Refresh();
#endif
}
// https://stackoverflow.com/a/478960/9329945
// Executes command and gets output
std::string HELPERS::exec(const char* cmd) {
wxArrayString outputArray;
// Technically returns result code
wxExecute(cmd, outputArray, wxEXEC_HIDE_CONSOLE);
std::size_t numOfLines = outputArray.GetCount();
if(numOfLines != 0) {
std::string output = outputArray[0].ToStdString();
for(std::size_t i = 1; i < numOfLines; i++) {
output += ("\n" + outputArray[i].ToStdString());
}
return output;
} else {
return "";
}
}
wxImage HELPERS::getImageFromJPEGData(std::vector<uint8_t> jpegBuffer) {
wxMemoryInputStream jpegStream(jpegBuffer.data(), jpegBuffer.size());
wxImage jpegImage;
jpegImage.LoadFile(jpegStream, wxBITMAP_TYPE_JPEG);
return jpegImage;
}
wxString HELPERS::calculateDhash(wxImage image, int dhashWidth, int dhashHeight) {
// Returns dhash as binary, 010011010, in string
// https://github.com/telows/Image-Bath/blob/master/dhash.cpp
wxImage copy = image.ConvertToGreyscale();
copy.Rescale(dhashWidth, dhashHeight, wxIMAGE_QUALITY_NORMAL);
char dhash[(dhashWidth - 1) * dhashHeight];
unsigned char* imagePointer = copy.GetData();
int dhashIndex = 0;
for(int y = 0; y < dhashHeight; y++) {
for(int x = 1; x < dhashWidth; x++) {
int thisPixelPointer = ((y * dhashWidth) + x) * 3;
unsigned char leftPixel = imagePointer[thisPixelPointer - 4];
unsigned char rightPixel = imagePointer[thisPixelPointer];
dhash[dhashIndex] = leftPixel > rightPixel ? '1' : '0';
dhashIndex++;
}
}
return wxString(dhash, sizeof(dhash));
}
const int HELPERS::getHammingDistance(wxString string1, wxString string2) {
int counter = 0;
for(int i = 0; i < string1.size(); i++) {
if(string1.GetChar(i) != string2.GetChar(i)) {
counter++;
}
}
return counter;
}
wxBitmap* HELPERS::getDefaultSavestateScreenshot() {
wxImage defaultImg(1280, 720);
// Set all of it to a pretty grey
defaultImg.SetRGB(wxRect(0, 0, 1280, 720), 76, 82, 92);
return new wxBitmap(defaultImg);
}
std::string HELPERS::makeRelative(std::string path, std::string rootDir) {
wxFileName newPath(wxString::FromUTF8(path));
newPath.MakeRelativeTo(wxString::FromUTF8(rootDir));
return newPath.GetFullPath().ToStdString();
}
std::string HELPERS::makeFromRelative(std::string path, std::string rootDir) {
return rootDir + "/" + path;
} |
/**
*
* @author Gaurav Gupta
*/
public class HierarchyWindowController extends WindowController {
public HierarchyWindowController(DocumentWindowController documentWindowController) {
super(HierarchyWindowController.class.getResource("resource/HierarchyWindow.fxml"), documentWindowController);
}
@Override
public void loadEditor(final EditorController editorController) {
hierarchyPanelHost.getChildren().add(this.getDocumentWindowController().getHierarchyPanelController().getPanelRoot());
this.getDocumentWindowController().getInfoPanelController().getPanelRoot();
// infoPanelHost.getChildren().add(this.getDocumentWindowController().getInfoPanelController().getPanelRoot());
// documentAccordion.setExpandedPane(documentAccordion.getPanes().get(0));
}
@FXML
public MenuButton menuButton;
// @FXML
// public TitledPane Hierarchy;
@FXML
public StackPane hierarchyPanelHost;
@FXML
public RadioMenuItem showNodeIdMenuItem;
@FXML
public RadioMenuItem showFxIdMenuItem;
// @FXML
// public Accordion documentAccordion;
// @FXML
// public StackPane infoPanelHost;
@FXML
public RadioMenuItem showInfoMenuItem;
// @FXML
// public TitledPane Info;
@FXML
public ToggleGroup hierarchyDisplayOptionTG;
//
// Hierarchy menu
//
@FXML
public void onHierarchyShowInfo(ActionEvent event) {
this.getDocumentWindowController().getHierarchyPanelController().setDisplayOption(AbstractHierarchyPanelController.DisplayOption.INFO);
// documentAccordion.setExpandedPane(documentAccordion.getPanes().get(0));
}
@FXML
public void onHierarchyShowFxId(ActionEvent event) {
this.getDocumentWindowController().getHierarchyPanelController().setDisplayOption(AbstractHierarchyPanelController.DisplayOption.FXID);
// documentAccordion.setExpandedPane(documentAccordion.getPanes().get(0));
}
@FXML
public void onHierarchyShowNodeId(ActionEvent event) {
this.getDocumentWindowController().getHierarchyPanelController().setDisplayOption(AbstractHierarchyPanelController.DisplayOption.NODEID);
// documentAccordion.setExpandedPane(documentAccordion.getPanes().get(0));
}
} |
<filename>src/pages/home/home.page.ts
import { Component } from '@angular/core';
import { Nav } from 'ionic-angular';
import { AuthService } from '../../providers/auth-service';
import { CalendarPage } from '../calendar/calendar.page';
import { ThemeableBrowserPage } from '../themeable-browser/themeable-browser.page';
import { Tile } from './models/tile.model';
import { EmailService } from '../../services/email.service';
import { CallService } from '../../services/call.service';
import { MapsService } from '../../services/maps.service';
import { InAppBrowserService } from '../../services/in-app-browser.service';
import { data } from './home-data';
import { User } from "../../models/User";
@Component({
templateUrl: 'home.html'
})
export class HomePage {
public tiles: Tile[][];
private emailService: EmailService;
private callService: CallService;
private mapsService: MapsService;
private browserService: InAppBrowserService;
private nav: Nav;
userInfo = new User()
constructor(
emailService: EmailService,
callService: CallService,
mapsService: MapsService,
browserService: InAppBrowserService,
nav: Nav,
private auth: AuthService
) {
this.emailService = emailService;
this.callService = callService;
this.mapsService = mapsService;
this.browserService = browserService;
this.nav = nav;
this.initTiles();
this.userInfo = this.auth.getUserInfo()
console.log(this.userInfo)
}
public navigateTo(tile) {
this.nav.setRoot(tile.component);
}
public getDirections() {
this.mapsService.openMapsApp(data.officeLocation);
}
public sendEmail() {
this.emailService.sendEmail(data.email);
}
public openFacebookPage() {
this.browserService.open(data.facebook);
}
public callUs() {
this.callService.call(data.phoneNumber);
}
private initTiles(): void {
this.tiles = [[{
title: 'Calendar',
path: 'calendar',
icon: 'calendar',
component: CalendarPage
}, {
title: 'Themeable browser',
path: 'themeable-browser',
icon: 'color-palette',
component: ThemeableBrowserPage
}]];
}
}
|
import { ReactComponent } from "./round-shuffle-24px.svg";
export const ShuffleIcon = ReactComponent;
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.