content
stringlengths 10
4.9M
|
---|
import numpy as np
import cv2
img = cv2.imread("imori.jpg")
img = img * np.array([0.0722, 0.7152, 0.2126])
img = np.sum(img, axis=2)
img[img<128] = 0
img[img>=128] = 255
cv2.imshow("imori", img.astype(np.uint8))
cv2.waitKey(0)
cv2.destroyAllWindows()
|
def modified_action(elem, sony, paperpile, modtime, *,
dry_run=True, verbose=True):
smod = os.stat(sony).st_mtime
pmod = os.stat(paperpile).st_mtime
print('current: %i, sony: %i, paperpile: %i' % (modtime, smod, pmod))
if smod > modtime + 10:
print('sony newer')
if pmod > modtime + 10:
print('both %s and %s changed; favoring Sony.' %
(sony, paperpile))
if dry_run or verbose:
print('copying updated %s to %s' % (sony, paperpile))
if not dry_run:
elem['modtime'] = smod
shutil.copy2(sony, paperpile)
elif pmod > modtime + 10:
print('paperpile newer')
if dry_run or verbose:
print('copying update %s to %s' % (paperpile, sony))
if not dry_run:
elem['modtime'] = pmod
shutil.copy2(paperpile, sony) |
<gh_stars>100-1000
/**
* Copyright (c) OpenLens Authors. All rights reserved.
* Licensed under MIT License. See LICENSE in root directory for more information.
*/
import type { DockTabStoreDependencies } from "../dock-tab-store/dock-tab.store";
import { DockTabStore } from "../dock-tab-store/dock-tab.store";
export interface CreateResourceTabStoreDependencies extends DockTabStoreDependencies {
}
export class CreateResourceTabStore extends DockTabStore<string> {
constructor(protected readonly dependencies: CreateResourceTabStoreDependencies) {
super(dependencies, {
storageKey: "create_resource",
});
}
}
|
<reponame>DOREMUS-ANR/diabolo-converter<gh_stars>0
package org.doremus.diaboloConverter.files;
import org.doremus.diaboloConverter.Utils;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Matcher;
@XmlAccessorType(XmlAccessType.FIELD)
public class Place extends DiaboloRecord {
private static final String PREFIX_REGEX = "(?i)(.+ d['ue]s?|chateau|la|les?)";
private static final String CITY_COUNTRY_REGEX = "(?i)ville (.+)";
private static final String ISLAND_COUNTRY_REGEX = "(?i)iles? d'(.+)";
private static final String PROVINCE_COUNTRY_REGEX = "(?i)province (.+)";
@XmlElement(name = "DORIS_KEY")
private String id;
@XmlElement(name = "DE")
private String originalLabel;
@XmlElement(name = "NA")
private String comment;
@XmlElement(name = "DOMAINE")
private String domain;
private String label = null;
private String type = null;
private String country = null;
@Override
public String getId() {
return id;
}
public String getComment() {
String c = Utils.notEmptyString(comment);
if (c == null) return null;
return c.replaceAll("\\s+", " ");
}
public String getOriginalLabel() {
return originalLabel;
}
public String getLabel() {
if (this.label != null) return this.label;
this.label = originalLabel;
// VALETTE (LA) (VILLE)
Matcher m = Utils.BRACKETS_PATTERN.matcher(label);
while (m.find()) {
String content = m.group(1).trim();
label = label.replace(m.group(0), "").trim();
if (content.matches(PREFIX_REGEX)) {
label = content + " " + label; // VALETTE (LA) -> LA VALLETTE
if (content.startsWith("REPUBLIQUE")) type = "PCLI";
else if (content.startsWith("ILES")) type = "ISLS";
else if (content.startsWith("ILE")) type = "ISL";
else if (content.startsWith("MONTS")) type = "MTS";
else if (content.startsWith("MONT")) type = "MT";
else if (content.startsWith("CHATEAU")) type = "S";
continue;
}
if (content.matches(CITY_COUNTRY_REGEX)) {
type = "city";
country = content.split(" ", 2)[1]; // (VILLE ANGLETERRE)
continue;
}
if (content.matches(PROVINCE_COUNTRY_REGEX)) {
type = "province";
country = content.split(" ", 2)[1]; // (PROVINCE ESPAGNE)
continue;
}
if (content.matches(ISLAND_COUNTRY_REGEX)) {
type = "island";
country = content.split("'", 2)[1]; // (ILE D'IRLANDE)
continue;
}
if (TYPES.get(content) != null) type = TYPES.get(content);
else if (COUNTRIES.get(content) != null) country = COUNTRIES.get(content);
}
return label;
}
// return continent code if available
public String getContinent() {
if (domain == null || domain.isEmpty()) return null;
return CONTINENTS.getOrDefault(this.domain, null);
}
public String getCountry() {
if (country != null)
return COUNTRIES.getOrDefault(country, null);
if ("FEDERATION DE RUSSIE".equals(domain)) return "RU";
if ("DPT".equals(type)) return "FR";
return null;
}
public String getType() {
return type;
}
public boolean isPeople() {
return "PEUPLES".equals(domain) || "PEOPLE".equals(type);
}
public boolean noGeoNames() {
return isPeople() || "TERRITOIRES ANCIENS".contains(domain)
|| "REGIONS POLAIRES".equals(domain);
}
private static final Map<String, String> CONTINENTS =
Collections.unmodifiableMap(new HashMap<String, String>() {{
put("AMERIQUE", "NA,SA");
put("EUROPE", "EU");
put("OCEANIE", "OC");
put("AFRIQUE", "AF");
put("ASIE", "AS");
}});
// see also http://www.geonames.org/export/codes.html
private static final Map<String, String> TYPES =
Collections.unmodifiableMap(new HashMap<String, String>() {{
put("VILLE", "P");
put("REGION", "RGN,A");
put("PROVINCE", "A");
put("COMTE", "A");
put("DUCHE", "PCLI");
put("REP", "PCLI");
put("ETAT", "PCLI,ADM1");
put("CANTON", "ADM1");
put("DPT", "ADM2");
put("SULTANAT", "PCLI");
put("AEROPORT", "AIRP");
put("CHATEAU", "US");
put("CENTRE SPATIAL", "CTRS");
put("ILE", "ISL");
put("ILES", "ISLS");
put("MONTAGNES", "MTS");
put("MONTAGNE", "MT");
put("MONT", "MT");
put("PLATEAU", "UPLD");
put("PLAINE", "RGN");
put("CAP", "CAPE");
put("LAC", "LK");
put("FLEUVE", "STM");
put("RIVIERE", "STM");
put("MER", "SEA");
put("COMMUNAUTE", "PEOPLE"); // custom code
put("PEUPLE", "PEOPLE"); // custom code
}});
private static final Map<String, String> COUNTRIES =
Collections.unmodifiableMap(new HashMap<String, String>() {{
put("ETATS-UNIS", "US");
put("ETATS UNIS", "US");
put("ITALIE", "IT");
put("SICILE", "IT");
put("GRECE", "GR");
put("FRANCE", "FR");
put("PAYS DE LA LOIRE", "FR");
put("ESPAGNE", "ES");
put("ANDALOUSIE", "ES");
put("CASTILLE", "ES");
put("ROUMANIE", "RO");
put("CHINE", "CN");
put("INDE", "IN");
put("IRLANDE", "IE");
put("ANGLETERRE", "GB");
put("ECOSSE", "GB");
put("BELGIQUE", "BE");
put("BULGARIE", "BG");
put("TURQUIE", "TR");
put("PAKISTAN", "PK");
put("VENEZUELA", "VE");
put("JAMAIQUE", "JM");
put("AUTRICHE", "AT");
put("CARINTHIE", "AT");
put("OHIO", "US");
put("ALABAMA", "US");
put("MASSACHUSETTS", "US");
put("<NAME>", "US");
put("OREGON", "US");
put("VIRGINIE", "US");
put("MAINE", "US");
put("PENNSYLVANIE", "US");
put("<NAME>", "US");
put("NEW JERSEY", "US");
put("ETAT DE NEW YORK", "US");
put("CANADA", "CA");
put("QUEBEC", "CA");
put("COLOMBIE", "CO");
put("MEXIQUE", "MX");
put("SUISSE", "CH");
put("<NAME>", "NL");
put("NORVEGE", "NO");
put("<NAME>", "PR");
put("MARTINIQUE", "MQ");
}});
}
|
/**
* This class implements the <code>ItemDefinition</code> interface.
* All method calls are delegated to the wrapped {@link org.apache.jackrabbit.spi.QItemDefinition},
* performing the translation from <code>Name</code>s to JCR names
* (and vice versa) where necessary.
*/
abstract class ItemDefinitionImpl implements ItemDefinition {
/**
* Logger instance for this class
*/
private static Logger log = LoggerFactory.getLogger(ItemDefinitionImpl.class);
/**
* Literal for 'any name'.
*/
protected static final String ANY_NAME = "*";
/**
* The namespace resolver used to translate <code>Name</code>s to JCR name strings.
*/
protected final NamePathResolver resolver;
/**
* The node type manager of this session.
*/
protected final AbstractNodeTypeManager ntMgr;
/**
* The wrapped item definition.
*/
protected final QItemDefinition itemDef;
/**
* Package private constructor to create a definition that is based on
* a template.
*
* @param itemDef item definition
* @param resolver
*/
ItemDefinitionImpl(QItemDefinition itemDef, NamePathResolver resolver) {
this(itemDef, null, resolver);
}
/**
* Package private constructor to create a definition that is based on
* an existing node type.
*
* @param itemDef
* @param ntMgr
* @param resolver
*/
ItemDefinitionImpl(QItemDefinition itemDef, AbstractNodeTypeManager ntMgr, NamePathResolver resolver) {
this.itemDef = itemDef;
this.resolver = resolver;
this.ntMgr = ntMgr;
}
//-----------------------------------------------------< ItemDefinition >---
/**
* {@inheritDoc}
*/
public NodeType getDeclaringNodeType() {
if (ntMgr == null) {
// only a template
return null;
} else {
try {
return ntMgr.getNodeType(itemDef.getDeclaringNodeType());
} catch (NoSuchNodeTypeException e) {
// should never get here
log.error("declaring node type does not exist", e);
return null;
}
}
}
/**
* {@inheritDoc}
*/
public String getName() {
if (itemDef.definesResidual()) {
return ANY_NAME;
} else {
try {
return resolver.getJCRName(itemDef.getName());
} catch (NamespaceException e) {
// should never get here
log.error("encountered unregistered namespace in property name", e);
// not correct, but an acceptable fallback
return itemDef.getName().toString();
}
}
}
/**
* {@inheritDoc}
*/
public int getOnParentVersion() {
return itemDef.getOnParentVersion();
}
/**
* {@inheritDoc}
*/
public boolean isAutoCreated() {
return itemDef.isAutoCreated();
}
/**
* {@inheritDoc}
*/
public boolean isMandatory() {
return itemDef.isMandatory();
}
/**
* {@inheritDoc}
*/
public boolean isProtected() {
return itemDef.isProtected();
}
//-------------------------------------------------------------< Object >---
/**
* {@inheritDoc}
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof ItemDefinitionImpl)) {
return false;
}
return itemDef.equals(((ItemDefinitionImpl) o).itemDef);
}
/**
* {@inheritDoc}
*/
@Override
public int hashCode() {
return itemDef.hashCode();
}
} |
#include<bits/stdc++.h>
typedef long long ll;
using namespace std;
#define endl "\n"
ll GCD(ll x)
{
ll sum = 0,tmp=x;
while(tmp>0){
sum+=tmp%10;
tmp/=10;
}
ll gcd = __gcd(x,sum);
return gcd;
}
int main()
{
ll tc; cin>>tc;
while(tc--){
ll n; cin>>n;
if(GCD(n)!=1) cout<<n<<endl;
else if(GCD(n+1)!=1) cout<<n+1<<endl;
else if(GCD(n+2)!=1) cout<<n+2<<endl;
}
return 0;
}
|
def makePoint(self, point):
from com.vividsolutions.jts.geom import Coordinate
ind = string.find(point,",")
latStr = point[0:ind-1]
lonStr = point[ind+1:len(point)]
lat = float(latStr)
lon = float(lonStr)
return Coordinate(lon,lat) |
/**
* Takes a copy of the cells, possibly filtering cells in hidden columns and rows, and cells if a window is present.
* Note filtering of {@link #labels} will happen later.
*/
private Set<SpreadsheetCell> filterCells(final Set<SpreadsheetCell> cells) {
return filterCells(
cells,
this.columns,
this.rows,
this.window()
);
} |
<gh_stars>1-10
use crate::{bitcoind::BitcoindError, config::BitcoindConfig};
use revault_tx::bitcoin::{
consensus::encode, Amount, BlockHash, OutPoint, Transaction as BitcoinTransaction,
};
use std::{
any::Any,
fs, process,
str::FromStr,
thread,
time::{Duration, Instant},
};
use jsonrpc::{
arg,
client::Client,
simple_http::{Error as HttpError, SimpleHttpTransport},
};
use serde_json::Value as Json;
// No call should take more than 10min to process (we don't do any rescan, at least for now, and we
// check that bitcoind is synced at startup).
// Actually every call should take way less time than that but we are over-precautionous as a
// failure to get the response of a call would usually mean that we have to crash, and therefore
// that we are not watching the funds anymore.
const RPC_REQ_TIMEOUT_SEC: u64 = 600;
// For how long do we keep retrying on communication error to the bitcoind server before giving up.
const RPC_REQ_RETRY_TIMEOUT_SEC: u64 = 300;
fn retry_timeout_exceeded(now: Instant, start: Instant) -> bool {
now.duration_since(start) > Duration::from_secs(RPC_REQ_RETRY_TIMEOUT_SEC)
}
#[derive(Debug)]
pub struct BitcoinD {
// For generalistic node RPC commands
node_client: Client,
// For watchonly RPC commands to the vaults descriptors
vault_client: Client,
// TODO: feebumping wallet
}
macro_rules! params {
($($param:expr),* $(,)?) => {
[
$(
arg($param),
)*
]
};
}
impl BitcoinD {
pub fn new(
config: &BitcoindConfig,
vault_wallet_path: String,
) -> Result<BitcoinD, BitcoindError> {
let cookie_string =
fs::read_to_string(&config.cookie_path).map_err(BitcoindError::CookieFile)?;
// Create a dummy client with a low timeout first to test the connection
let dummy_node_client = Client::with_transport(
SimpleHttpTransport::builder()
.url(&config.addr.to_string())
.map_err(BitcoindError::from)?
.timeout(Duration::from_secs(3))
.cookie_auth(cookie_string.clone())
.build(),
);
let req = dummy_node_client.build_request("echo", &[]);
dummy_node_client.send_request(req.clone())?;
let node_client = Client::with_transport(
SimpleHttpTransport::builder()
.url(&config.addr.to_string())
.map_err(BitcoindError::from)?
.timeout(Duration::from_secs(RPC_REQ_TIMEOUT_SEC))
.cookie_auth(cookie_string.clone())
.build(),
);
// Create a dummy client with a low timeout first to test the connection
let url = format!("http://{}/wallet/{}", config.addr, vault_wallet_path);
let dummy_vault_client = Client::with_transport(
SimpleHttpTransport::builder()
.url(&url)
.map_err(BitcoindError::from)?
.timeout(Duration::from_secs(3))
.cookie_auth(cookie_string.clone())
.build(),
);
let req = dummy_vault_client.build_request("echo", &[]);
dummy_vault_client.send_request(req.clone())?;
let vault_client = Client::with_transport(
SimpleHttpTransport::builder()
.url(&url)
.map_err(BitcoindError::from)?
.timeout(Duration::from_secs(RPC_REQ_TIMEOUT_SEC))
.cookie_auth(cookie_string)
.build(),
);
Ok(BitcoinD {
node_client,
vault_client,
})
}
// Try to be robust against spurious communication failures as much as possible.
fn handle_error(
&self,
e: jsonrpc::Error,
start: Instant,
is_startup: bool,
) -> Result<(), BitcoindError> {
let now = Instant::now();
match e {
jsonrpc::Error::Transport(ref err) => {
log::error!("Transport error when talking to bitcoind: '{}'", err);
// This is *always* a simple_http::Error. Rule out the error that can
// not occur after startup (ie if we encounter them it must be startup
// and we better be failing quickly).
let any_err = err as &dyn Any;
if let Some(http_err) = any_err.downcast_ref::<HttpError>() {
match http_err {
HttpError::InvalidUrl { .. } => return Err(BitcoindError::Server(e)),
HttpError::SocketError(_) => {
// On startup, we want to fail ASAP if there is an issue with the
// connection. On the other hand we certainly don't afterward if
// there is a spurious error!
if is_startup || retry_timeout_exceeded(now, start) {
return Err(BitcoindError::Server(e));
}
thread::sleep(Duration::from_secs(1));
}
HttpError::HttpParseError => {
// Weird. Try again once, just in case.
if now.duration_since(start) > Duration::from_secs(1) {
return Err(BitcoindError::Server(e));
}
thread::sleep(Duration::from_secs(1));
}
_ => {}
}
}
// This one *may* happen. For a number of reasons, the obvious one being
// that the RPC work queue is exceeded.
if retry_timeout_exceeded(now, start) {
return Err(BitcoindError::Server(e));
}
thread::sleep(Duration::from_secs(1));
log::debug!("Retrying RPC request to bitcoind.");
}
jsonrpc::Error::Rpc(ref err) => {
log::error!("JSONRPC error when talking to bitcoind: '{:?}'", err);
if retry_timeout_exceeded(now, start) {
return Err(BitcoindError::Server(e));
}
thread::sleep(Duration::from_secs(1));
}
jsonrpc::Error::Json(ref err) => {
// Weird. A JSON serialization error? Just try again but
// fail fast anyways as it should not happen.
log::error!(
"JSON serialization error when talking to bitcoind: '{}'",
err
);
if now.duration_since(start) > Duration::from_secs(1) {
return Err(BitcoindError::Server(e));
}
thread::sleep(Duration::from_millis(500));
log::debug!("Retrying RPC request to bitcoind.");
}
_ => return Err(BitcoindError::Server(e)),
};
Ok(())
}
fn make_request<'a, 'b>(
&self,
client: &Client,
method: &'a str,
params: &'b [Box<serde_json::value::RawValue>],
fail_fast: bool,
) -> Result<Json, BitcoindError> {
let req = client.build_request(method, ¶ms);
log::trace!("Sending to bitcoind: {:#?}", req);
// If we are explicitly told to not try again, don't.
if fail_fast {
return client
.send_request(req.clone())
.map_err(BitcoindError::Server)?
.result()
.map_err(BitcoindError::Server);
}
// Trying to be robust on bitcoind's spurious failures.
let start = Instant::now();
loop {
match client.send_request(req.clone()) {
Ok(resp) => {
log::trace!("Got from bitcoind: {:#?}", resp);
match resp.result() {
Ok(res) => return Ok(res),
Err(e) => {
self.handle_error(e, start, false)?;
}
};
}
Err(e) => {
// Decide wether we should error, or not yet
self.handle_error(e, start, false)?;
}
}
}
}
fn make_node_request<'a, 'b>(
&self,
method: &'a str,
params: &'b [Box<serde_json::value::RawValue>],
) -> Json {
self.make_request(&self.node_client, method, params, false)
.unwrap_or_else(|e| {
log::error!("Fatal bitcoind RPC error (node client): '{}'", e);
process::exit(1);
})
}
fn make_node_request_failible<'a, 'b>(
&self,
method: &'a str,
params: &'b [Box<serde_json::value::RawValue>],
) -> Result<Json, BitcoindError> {
self.make_request(&self.node_client, method, params, true)
}
/// Network name as returned by 'getblockchainfo'
pub fn bip70_net(&self) -> String {
self.make_node_request("getblockchaininfo", &[])
.get("chain")
.and_then(|c| c.as_str())
.expect("No 'chain' in 'getblockchaininfo' response?")
.to_string()
}
/// Fetch info about bitcoind's synchronization status
pub fn synchronization_info(&self) -> SyncInfo {
let chaininfo = self.make_node_request("getblockchaininfo", &[]);
SyncInfo {
headers: chaininfo
.get("headers")
.and_then(|h| h.as_u64())
.expect("No valid 'headers' in getblockchaininfo response?"),
blocks: chaininfo
.get("blocks")
.and_then(|b| b.as_u64())
.expect("No valid 'blocks' in getblockchaininfo response?"),
ibd: chaininfo
.get("initialblockdownload")
.and_then(|i| i.as_bool())
.expect("No valid 'initialblockdownload' in getblockchaininfo response?"),
progress: chaininfo
.get("verificationprogress")
.and_then(|i| i.as_f64())
.expect("No valid 'verificationprogress' in getblockchaininfo response?"),
}
}
/// Create a descriptor watchonly wallet
pub fn createwallet(&self, wallet_path: String) -> Result<(), BitcoindError> {
let res = self.make_node_request_failible(
"createwallet",
¶ms!(
Json::String(wallet_path),
Json::Bool(true), // watchonly
Json::Bool(false), // blank
Json::String("".to_string()), // passphrase,
Json::Bool(false), // avoid_reuse
Json::Bool(true), // descriptors
Json::Bool(true), // load_on_startup
),
)?;
if let Some(w) = res.get("warning") {
log::warn!("Warning creating wallet: '{}'", w);
}
Ok(())
}
/// Get a list of the name of loaded wallets on bitcoind
pub fn listwallets(&self) -> Vec<String> {
self.make_node_request("listwallets", &[])
.as_array()
.expect("API break, 'listwallets' didn't return an array.")
.into_iter()
.map(|json_str| {
json_str
.as_str()
.expect("API break: 'listwallets' contains a non-string value")
.to_string()
})
.collect()
}
/// Load a watchonly wallet. Failible since called at startup.
pub fn loadwallet(&self, wallet_path: String) -> Result<(), BitcoindError> {
let res = self.make_node_request_failible(
"loadwallet",
¶ms!(
Json::String(wallet_path),
Json::Bool(true), // load_on_startup
),
)?;
if let Some(w) = res.get("warning") {
log::warn!("Warning loading wallet: '{}'", w);
}
Ok(())
}
/// Unload a watchonly wallet.
pub fn unloadwallet(&self, wallet_path: String) -> Result<(), BitcoindError> {
let res =
self.make_node_request_failible("unloadwallet", ¶ms!(Json::String(wallet_path),))?;
if let Some(w) = res.get("warning") {
log::warn!("Warning unloading wallet: '{}'", w);
}
Ok(())
}
/// Get the (height, hash) pair of the current best block
pub fn chain_tip(&self) -> ChainTip {
let chaininfo = self.make_node_request("getblockchaininfo", &[]);
ChainTip {
height: chaininfo
.get("blocks")
.and_then(|b| b.as_i64())
.expect("No valid 'blocks' in getblockchaininfo response?")
as i32,
hash: BlockHash::from_str(
chaininfo
.get("bestblockhash")
.and_then(|i| i.as_str())
.expect("No valid 'bestblockhash' in getblockchaininfo response?"),
)
.expect("Not a valid block hash in 'bestblockhash' field?"),
}
}
/// Get the hash of the block at this height
pub fn block_hash(&self, height: i32) -> BlockHash {
BlockHash::from_str(
self.make_node_request("getblockhash", ¶ms!(height))
.as_str()
.expect("'getblockhash' didn't return a string."),
)
.expect("'getblockhash' returned an invalid block hash")
}
/// Get information about this tx output, if it is in the best block chain and unspent.
pub fn utxoinfo(&self, outpoint: &OutPoint) -> Option<UtxoInfo> {
let res = self.make_node_request(
"gettxout",
¶ms!(
outpoint.txid,
outpoint.vout,
false // include_mempool
),
);
// It returns null on "not found"
if res == Json::Null {
return None;
}
let confirmations = res
.get("confirmations")
.and_then(|c| c.as_i64())
.expect("'gettxout' didn't return a valid 'confirmations' value");
let bestblock = res
.get("bestblock")
.and_then(|bb| bb.as_str())
.and_then(|bb_str| BlockHash::from_str(bb_str).ok())
.expect("'gettxout' didn't return a valid 'bestblock' value");
let value = res
.get("value")
.and_then(|v| v.as_f64())
.and_then(|v| Amount::from_btc(v).ok())
.expect("'gettxout' didn't return a valid 'value' entry");
Some(UtxoInfo {
confirmations,
bestblock,
value,
})
}
/// Broadcast this transaction to the Bitcoin network
pub fn broadcast_tx(&self, tx: &BitcoinTransaction) -> Result<(), BitcoindError> {
let tx_hex = encode::serialize_hex(tx);
self.make_node_request_failible("sendrawtransaction", ¶ms!(tx_hex))
.map(|_| ())
}
}
/// Info about bitcoind's sync state
pub struct SyncInfo {
pub headers: u64,
pub blocks: u64,
pub ibd: bool,
pub progress: f64,
}
/// Block height and block hash of what we consider to be the block chain tip
pub struct ChainTip {
pub height: i32,
pub hash: BlockHash,
}
/// Info about a block chain UTXO
#[derive(Debug)]
pub struct UtxoInfo {
pub confirmations: i64,
pub bestblock: BlockHash,
pub value: Amount,
}
|
<filename>Example/Pods/CPAPIService/CPAPIService/Classes/CPAPIService.h
//
// CPAPIService.h
// CPAPIService
//
// Created by <NAME> on 2/24/20.
// Copyright © 2020 CP Tech. All rights reserved.
//
#import <Foundation/Foundation.h>
//! Project version number for CPAPIService.
FOUNDATION_EXPORT double CPAPIServiceVersionNumber;
//! Project version string for CPAPIService.
FOUNDATION_EXPORT const unsigned char CPAPIServiceVersionString[];
// In this header, you should import all the public headers of your framework using statements like #import <CPAPIService/PublicHeader.h>
|
/**
* Template method with default implementation (which may be overridden by a
* subclass), to load or obtain an ApplicationContext instance which will be
* used as the parent context of the root WebApplicationContext. If the
* return value from the method is null, no parent context is set.
* <p>The main reason to load a parent context here is to allow multiple root
* web application contexts to all be children of a shared EAR context, or
* alternately to also share the same parent context that is visible to
* EJBs. For pure web applications, there is usually no need to worry about
* having a parent context to the root web application context.
* <p>The default implementation uses ContextSingletonBeanFactoryLocator,
* configured via {@link #LOCATOR_FACTORY_SELECTOR_PARAM} and
* {@link #LOCATOR_FACTORY_KEY_PARAM}, to load a parent context
* which will be shared by all other users of ContextsingletonBeanFactoryLocator
* which also use the same configuration parameters.
* @param servletContext current servlet context
* @return the parent application context, or <code>null</code> if none
* @throws BeansException if the context couldn't be initialized
* @see org.springframework.beans.factory.access.BeanFactoryLocator
* @see org.springframework.context.access.ContextSingletonBeanFactoryLocator
*/
protected ApplicationContext loadParentContext(ServletContext servletContext)
throws BeansException {
ApplicationContext parentContext = null;
String locatorFactorySelector = servletContext.getInitParameter(LOCATOR_FACTORY_SELECTOR_PARAM);
String parentContextKey = servletContext.getInitParameter(LOCATOR_FACTORY_KEY_PARAM);
if (locatorFactorySelector != null) {
BeanFactoryLocator locator = ContextSingletonBeanFactoryLocator.getInstance(locatorFactorySelector);
if (logger.isInfoEnabled()) {
logger.info("Getting parent context definition: using parent context key of '" +
parentContextKey + "' with BeanFactoryLocator");
}
this.parentContextRef = locator.useBeanFactory(parentContextKey);
parentContext = (ApplicationContext) this.parentContextRef.getFactory();
}
return parentContext;
} |
/*
PostAccountHolderBalance Retrieve the balance(s) of an account holder.
This endpoint is used to retrieve the balance(s) of the accounts of an account holder. An account's balances are on a per-currency basis (i.e., an account may have multiple balances: one per currency).
* @param request AccountHolderBalanceRequest - reference of AccountHolderBalanceRequest).
* @param ctxs ..._context.Context - optional, for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
@return AccountHolderBalanceResponse
*/
func (a PlatformsFund) AccountHolderBalance(req *AccountHolderBalanceRequest, ctxs ..._context.Context) (AccountHolderBalanceResponse, *_nethttp.Response, error) {
res := &AccountHolderBalanceResponse{}
httpRes, err := a.Client.MakeHTTPPostRequest(req, res, a.BasePath()+"/accountHolderBalance", ctxs...)
return *res, httpRes, err
} |
// compute the number of jumps needed to escape the maze
int num_jumps(vector<int>& input) {
int current = 0;
int n = 0;
int size = input.size();
while (current < size) {
++n;
next_step(current, input);
}
return n;
} |
#pragma once
#include "Config.h"
#include "Serialize.h"
#include "ModelUtils.h"
#include "Routing.h"
#include <array>
#include <cstdint>
class Project;
class PlayState {
public:
//----------------------------------------
// Types
//----------------------------------------
enum ExecuteType {
Immediate,
Synced,
Latched,
};
class TrackState {
public:
//----------------------------------------
// Properties
//----------------------------------------
// fillAmount
int fillAmount() const { return _fillAmount; }
void setFillAmount(int fillAmount) {
_fillAmount = clamp(fillAmount, 0, 100);
}
void editFillAmount(int value, bool shift) {
setFillAmount(ModelUtils::adjustedByStep(fillAmount(), value, 10, shift));
}
void printFillAmount(StringBuilder &str) const {
str("%d%%", fillAmount());
}
//----------------------------------------
// State
//----------------------------------------
bool mute() const { return _state & Mute; }
bool requestedMute() const { return _state & RequestedMute; }
bool hasMuteRequest() const { return hasRequests(State::MuteRequests); }
bool fill() const { return _state & Fill; }
//----------------------------------------
// Pattern
//----------------------------------------
int pattern() const { return _pattern; }
int requestedPattern() const { return _requestedPattern; }
bool hasPatternRequest() const { return hasRequests(State::PatternRequests); }
//----------------------------------------
// Methods
//----------------------------------------
void clear();
void write(VersionedSerializedWriter &writer) const;
void read(VersionedSerializedReader &reader);
private:
enum State {
Mute = 1<<0,
RequestedMute = 1<<1,
Fill = 1<<2,
FillHold = 1<<3,
ImmediateMuteRequest = 1<<4,
SyncedMuteRequest = 1<<5,
LatchedMuteRequest = 1<<6,
ImmediatePatternRequest = 1<<7,
SyncedPatternRequest = 1<<8,
LatchedPatternRequest = 1<<9,
MuteRequests = ImmediateMuteRequest | SyncedMuteRequest | LatchedMuteRequest,
PatternRequests = ImmediatePatternRequest | SyncedPatternRequest | LatchedPatternRequest,
ImmediateRequests = ImmediateMuteRequest | ImmediatePatternRequest,
SyncedRequests = SyncedMuteRequest | SyncedPatternRequest,
LatchedRequests = LatchedMuteRequest | LatchedPatternRequest
};
static State muteRequestFromExecuteType(ExecuteType type) {
return State(int(ImmediateMuteRequest) << int(type));
}
static State patternRequestFromExecuteType(ExecuteType type) {
return State(int(ImmediatePatternRequest) << int(type));
}
void setRequests(int requests) {
_state |= uint16_t(requests);
}
void clearRequests(int requests) {
_state &= ~uint16_t(requests);
}
bool hasRequests(int requests) const {
return _state & uint16_t(requests);
}
void setMute(bool mute) {
if (mute) {
_state |= Mute;
} else {
_state &= ~Mute;
}
}
void setRequestedMute(bool mute) {
if (mute) {
_state |= RequestedMute;
} else {
_state &= ~RequestedMute;
}
}
void setFill(bool fill, bool hold) {
if (fill) {
_state |= Fill;
if (hold) {
_state |= FillHold;
} else {
_state &= ~FillHold;
}
} else {
if (!(_state & FillHold)) {
_state &= ~Fill;
}
}
}
void setPattern(int pattern) {
_pattern = pattern;
}
void setRequestedPattern(int pattern) {
_requestedPattern = pattern;
}
uint16_t _state;
uint8_t _pattern;
uint8_t _requestedPattern;
uint8_t _fillAmount;
friend class PlayState;
friend class Engine;
};
class SongState {
public:
bool playing() const { return _state & Playing; }
bool hasPlayRequests() const { return hasRequests(PlayRequests); }
int currentSlot() const { return _currentSlot; }
int currentRepeat() const { return _currentRepeat; }
int requestedSlot() const { return _requestedSlot; }
void clear();
private:
enum State {
Playing = 1<<0,
ImmediatePlayRequest = 1<<1,
SyncedPlayRequest = 1<<2,
LatchedPlayRequest = 1<<3,
ImmediateStopRequest = 1<<4,
SyncedStopRequest = 1<<5,
LatchedStopRequest = 1<<6,
PlayRequests = ImmediatePlayRequest | SyncedPlayRequest | LatchedPlayRequest,
StopRequests = ImmediateStopRequest | SyncedStopRequest | LatchedStopRequest,
ImmediateRequests = ImmediatePlayRequest | ImmediateStopRequest,
SyncedRequests = SyncedPlayRequest | SyncedStopRequest,
LatchedRequests = LatchedPlayRequest | LatchedStopRequest
};
static State playRequestFromExecuteType(ExecuteType type) {
return State(1<<(1 + type));
}
static State stopRequestFromExecuteType(ExecuteType type) {
return State(1<<(4 + type));
}
void setRequests(int requests) {
_state |= uint8_t(requests);
}
void clearRequests(int requests) {
_state &= ~uint8_t(requests);
}
bool hasRequests(int requests) const {
return _state & uint8_t(requests);
}
void setPlaying(bool playing) {
if (playing) {
_state |= Playing;
} else {
_state &= ~Playing;
}
}
void setRequestedSlot(int slot) {
_requestedSlot = slot;
}
void setCurrentSlot(int slot) {
_currentSlot = slot;
}
void setCurrentRepeat(int slot) {
_currentRepeat = slot;
}
uint8_t _state;
uint8_t _requestedSlot;
uint8_t _currentSlot;
uint8_t _currentRepeat;
friend class PlayState;
friend class Engine;
};
//----------------------------------------
// Properties
//----------------------------------------
// track states
const TrackState &trackState(int track) const { return _trackStates[track]; }
TrackState &trackState(int track) { return _trackStates[track]; }
// song state
const SongState &songState() const { return _songState; }
SongState &songState() { return _songState; }
//----------------------------------------
// Methods
//----------------------------------------
PlayState(Project &project);
// mutes
void muteTrack(int track, ExecuteType executeType = Immediate);
void unmuteTrack(int track, ExecuteType executeType = Immediate);
void toggleMuteTrack(int track, ExecuteType executeType = Immediate);
void muteAll(ExecuteType executeType = Immediate);
void unmuteAll(ExecuteType executeType = Immediate);
// solos
void soloTrack(int track, ExecuteType executeType = Immediate);
// fills
void fillTrack(int track, bool fill, bool hold = false);
void fillAll(bool fill, bool hold = false);
// pattern change
void selectTrackPattern(int track, int pattern, ExecuteType executeType = Immediate);
void selectPattern(int pattern, ExecuteType executeType = Immediate);
// snapshots
void createSnapshot();
void revertSnapshot(int targetPattern = -1);
void commitSnapshot(int targetPattern = -1);
bool snapshotActive() const { return _snapshot.active; }
// requests
void cancelMuteRequests();
void cancelPatternRequests();
void commitLatchedRequests() { _executeLatchedRequests = true; }
bool hasImmediateRequests() const { return _hasImmediateRequests; }
bool hasSyncedRequests() const { return _hasSyncedRequests; }
bool hasLatchedRequests() const { return _hasLatchedRequests; }
// song
void playSong(int slot, ExecuteType executeType = Immediate);
void stopSong(ExecuteType executeType = Immediate);
void clear();
void write(VersionedSerializedWriter &writer) const;
void read(VersionedSerializedReader &reader);
//----------------------------------------
// Routing
//----------------------------------------
void writeRouted(Routing::Target target, uint8_t tracks, int intValue, float floatValue);
private:
void selectTrackPatternUnsafe(int track, int pattern, ExecuteType executeType = Immediate);
void notify(ExecuteType executeType) {
_hasImmediateRequests |= (executeType == Immediate);
_hasSyncedRequests |= (executeType == Synced);
_hasLatchedRequests |= (executeType == Latched);
}
bool executeLatchedRequests() const { return _executeLatchedRequests; }
void clearImmediateRequests() { _hasImmediateRequests = false; }
void clearSyncedRequests() { _hasSyncedRequests = false; }
void clearLatchedRequests() { _hasLatchedRequests = false; _executeLatchedRequests = false; }
Project &_project;
std::array<TrackState, CONFIG_TRACK_COUNT> _trackStates;
SongState _songState;
bool _executeLatchedRequests;
bool _hasImmediateRequests;
bool _hasSyncedRequests;
bool _hasLatchedRequests;
static constexpr int SnapshotPatternIndex = CONFIG_PATTERN_COUNT;
struct {
bool active;
uint8_t lastSelectedPatternIndex;
uint8_t lastTrackPatternIndex[CONFIG_TRACK_COUNT];
} _snapshot;
friend class Project;
friend class Engine;
};
|
p,q,l,r = [ int(a) for a in input().split() ]
a = []
b = []
c = []
d = []
for i in range(p):
aa,bb = [ int(a) for a in input().split() ]
a.append(aa)
b.append(bb)
for i in range(q):
aa,bb = [ int(a) for a in input().split() ]
c.append(aa)
d.append(bb)
#print(p,q,l,r)
#print(a,b,c,d)
counter = 0
for i in range(l,r+1):
j = k = 0
while j < p and k < q:
if b[j] < c[k] + i:
j += 1
elif d[k] + i < a[j]:
k += 1
else:
counter += 1
break
print(counter) |
M:I 6 – Mission Impossible director Christopher McQuarrie has shared a first look at Henry Cavill with Tom Cruise in the 6th film of the franchise.
McQuarrie posted the image on Instagram which reveals himself with the acting duo on set in Paris, check it out below:
… A post shared by Christopher McQuarrie (@christophermcquarrie) on Apr 19, 2017 at 1:09pm PDT
Paramount’s latest sequel sees the return of Tom Cruise as Ethan Hunt, and he’s joined by the likes of Rebecca Ferguson, Simon Pegg, Sean Harris and Alec Baldwin. While Vanessa Kirby and Henry Cavill were late additions to the cast.
McQuarrie recently declared that production had begun with another Instagram post:
Light the fuse… (photo @rdhardy) A post shared by Christopher McQuarrie (@christophermcquarrie) on Apr 8, 2017 at 12:22am PDT
Mission: Impossible 6 hits theaters on July 27, 2018. |
<reponame>artas360/pythran<gh_stars>0
#ifndef PYTHONIC_OPERATOR_DELITEM__HPP
#define PYTHONIC_OPERATOR_DELITEM__HPP
#include "pythonic/include/operator_/__delitem__.hpp"
#include "pythonic/operator_/delitem.hpp"
namespace pythonic
{
namespace operator_
{
FPROXY_IMPL(pythonic::operator_, __delitem__, delitem);
}
}
#endif
|
def _initialize_logging_with_id(self, class_name):
if getattr(self, '_log', None) is not None:
return
log_name = "{}-{}".format(class_name, self.id)
self._log = logging.getLogger(log_name)
self.add_logging_handler(logging.NullHandler()) |
/**
* Evaluates IndexBounds from the given IntervalEvaluationTrees for the given query.
* 'indexBoundsInfo' contains the interval evaluation trees.
*
* Returns the built index bounds.
*/
std::unique_ptr<IndexBounds> makeIndexBounds(
const stage_builder::IndexBoundsEvaluationInfo& indexBoundsInfo, const CanonicalQuery& cq) {
auto bounds = std::make_unique<IndexBounds>();
bounds->fields.reserve(indexBoundsInfo.iets.size());
tassert(6335200,
"IET list size must be equal to the number of fields in the key pattern",
static_cast<size_t>(indexBoundsInfo.index.keyPattern.nFields()) ==
indexBoundsInfo.iets.size());
BSONObjIterator it{indexBoundsInfo.index.keyPattern};
BSONElement keyElt = it.next();
for (auto&& iet : indexBoundsInfo.iets) {
auto oil = interval_evaluation_tree::evaluateIntervals(
iet, cq.getInputParamIdToMatchExpressionMap(), keyElt, indexBoundsInfo.index);
bounds->fields.emplace_back(std::move(oil));
keyElt = it.next();
}
IndexBoundsBuilder::alignBounds(bounds.get(),
indexBoundsInfo.index.keyPattern,
indexBoundsInfo.index.collator != nullptr,
indexBoundsInfo.direction);
return bounds;
} |
/**
* @author Jeremy McCormick <[email protected]>
* @version $Id: AbstractLayeredSubdetector.java,v 1.10 2011/03/11 19:22:20 jeremy Exp $
*/
abstract public class AbstractLayeredSubdetector extends AbstractSubdetector implements Layered
{
protected Layering layering;
private List<Double> nrad;
private List<Double> nlam;
private List<Double> de;
private Map<String, Double> dedxmap = new HashMap<String, Double>();
private double intLens;
private double radLens;
/**
* Creates a new instance of a LayeredSubdetector
*/
public AbstractLayeredSubdetector( Element node ) throws JDOMException
{
super( node );
build( node );
// Initialize parameter arrays using layer count.
nrad = new ArrayList<Double>( this.getLayering().getNumberOfLayers() );
de = new ArrayList<Double>( this.getLayering().getNumberOfLayers() );
nlam = new ArrayList<Double>( this.getLayering().getNumberOfLayers() );
// Compute layer derived quantities.
computeLayerParameters();
}
private void build( Element node ) throws JDOMException
{
try
{
// Setup layering object.
layering = org.lcsim.geometry.layer.Layering.makeLayering( node );
}
catch ( JDOMException x )
{
throw new RuntimeException( x );
}
}
public boolean isLayered()
{
return true;
}
public Layering getLayering()
{
return layering;
}
public double getTotalThickess()
{
return layering.getThickness();
}
protected void setLayering( Layering layering )
{
// May only be called once at initialization time.
if ( this.layering == null )
this.layering = layering;
}
public Layer getLayer( int layern )
{
return this.layering.getLayer( layern );
}
public int getNumberOfLayers()
{
return this.layering.getLayerCount();
}
public double getDistanceToLayer( int layern )
{
return this.layering.getDistanceToLayer( layern );
}
public double getDistanceToSensor( int layern )
{
return this.layering.getDistanceToLayerSensorFront( layern );
}
public double getLayerThickness( int layern )
{
return this.layering.getLayer( layern ).getThickness();
}
public double getSensorThickness( int layern )
{
return this.layering.getLayer( layern ).getSensorThickness();
}
/**
* Compute the radiation and interaction lengths for each layer of this subdetector.
* FIXME Access to the dedx information by material name should be moved into
* IMaterial interface because map is duplicated across subdetectors. The map could
* also be made static.
*/
private void computeLayerParameters()
{
// System.out.println("nlayers = " + this.getLayering().getNumberOfLayers());
// IMaterialStore ms = MaterialStore.getInstance();
int nlayers = this.getNumberOfLayers();
Hep3Vector p = new BasicHep3Vector( 0., 0., 100. );
for ( int j = 0; j < nlayers; j++ )
{
// System.out.println("computing layer = " + j);
Layer layer = getLayering().getLayer( j );
double xrad = 0.;
double xlam = 0.;
double xde = 0.;
for ( LayerSlice slice : layer.getSlices() )
{
Material m = slice.getMaterial();
String materialName = m.getName();
double dedx;
if ( dedxmap.containsKey( materialName ) )
dedx = dedxmap.get( materialName ).doubleValue();
else
{
// Kludge to get material state to avoid using IMaterial objects that
// are not instantiated yet.
MaterialState state = m.getState();
IMaterial.State istate = null;
if ( state == MaterialState.GAS )
{
istate = IMaterial.Gas;
}
else if ( state == MaterialState.LIQUID )
{
istate = IMaterial.Liquid;
}
else if ( state == MaterialState.SOLID )
{
istate = IMaterial.Solid;
}
else if ( state == MaterialState.UNKNOWN )
{
istate = IMaterial.Unknown;
}
dedx = BetheBlochCalculator.computeBetheBloch(
m.getZeff(),
m.getAeff(),
m.getDensity(),
istate,
Material.DEFAULT_PRESSURE,
Material.DEFAULT_TEMPERATURE,
p,
105.,
1.,
.01 ) / 10000;
dedxmap.put( materialName, new Double( dedx ) );
}
double dx = slice.getThickness();
xrad += dx / m.getRadiationLengthWithDensity();
xlam += dx / m.getNuclearInteractionLengthWithDensity();
xde += dx * dedx;
}
nrad.add( j, new Double( xrad / 10. ) );
nlam.add( j, new Double( xlam / 10. ) );
de.add( j, new Double( xde ) );
}
// Compute totals for all layers.
for ( double lam : nlam )
{
intLens += lam;
}
for ( double rad : nrad )
{
radLens += rad;
}
}
public double getInteractionLengths()
{
return intLens;
}
public double getRadiationLengths()
{
return radLens;
}
public double getInteractionLengths( int layern )
{
return nlam.get( layern );
}
public double getRadiationLengths( int layern )
{
return nrad.get( layern );
}
public double getDe( int layern )
{
return de.get( layern );
}
} |
#include <bits/stdc++.h>
using namespace std;
typedef long long ll;
typedef long double ld;
typedef unsigned long long ull;
#define rep(i,l,r) for(int i=l;i<r;i++)
#define rep2(i,r,l) for(int i=r;i>l;i--)
#define sz(a) (ll)a.size()
#define mp(a,b) make_pair(a,b)
#define pb(a) push_back(a)
#define nl "\n"
#define pii pair<ll,ll>
#define F first
#define S second
#define debug1(x) cout<<x<<endl;
#define debug2(x,y) cout<<x<<" -> "<<y<<endl;
#define debug3(x,y,z) cout<<x<<" -> "<<y<<" -> "<<z<<endl;
#define trace(c,x) for(auto &x:c)
ll fact(ll n){
ll prod=1;
rep(i,2,n+1)
prod*=i;
return prod;
}
int main(){
ios_base::sync_with_stdio(false);
cin.tie(NULL),cout.tie(NULL);
ll n,ans=0;
cin>>n;
string num=to_string(n);
ll size=sz(num);
map<string,ll> m;
map<char,ll> dig;
rep(i,0,size)
dig[num[i]]++;
rep(i,0,1<<size){
map<char,ll> temp;
string subset="";
rep(j,0,size){
if( (i&(1<<j)) != 0)
subset+=num[j];
}
sort(subset.begin(), subset.end());
if(m[subset]==0){
m[subset]++;
rep(j,0,sz(subset))
temp[subset[j]]++;
ll p=fact(sz(subset)),q=fact(sz(subset)-1);
if(sz(temp)!=sz(dig))
continue;
ll flag=0;
trace(temp,x){
p/=fact(x.second);
if(x.first=='0' && x.second>0){
flag=1;
q/=fact(x.second-1);
}
else
q/=fact(x.second);
}
if(flag==1)
p-=q;
ans+=p;
// ans+=fact(sz(temp))/p;
// if(temp['0']>0)
// ans-=fact(sz(temp)-1)/( (p/temp['0'])*(temp['0']-1) );
}
}
cout<<ans<<nl;
return 0;
} |
//*****************************************************************************
//
//! Draws a vertical line.
//!
//! \param pContext is a pointer to the drawing context to use.
//! \param lX is the X coordinate of the line.
//! \param lY1 is the Y coordinate of one end of the line.
//! \param lY2 is the Y coordinate of the other end of the line.
//!
//! This function draws a vertical line, taking advantage of the fact that the
//! line is vertical to draw it more efficiently. The clipping of the vertical
//! line to the clipping rectangle is performed within this routine; the
//! display driver's vertical line routine is used to perform the actual line
//! drawing.
//!
//! \return None.
//
//*****************************************************************************
void
GrLineDrawV(const tContext *pContext, int lX, int lY1, int lY2)
{
int lTemp;
Check the arguments.
ASSERT(pContext);
If the X coordinate of this line is not within the clipping region, then
there is nothing to be done.
if((lX < pContext->sClipRegion.sXMin) ||
(lX > pContext->sClipRegion.sXMax))
{
return;
}
Swap the Y coordinates if the first is larger than the second.
if(lY1 > lY2)
{
lTemp = lY1;
lY1 = lY2;
lY2 = lTemp;
}
If the entire line is out of the clipping region, then there is nothing
to be done.
if((lY1 > pContext->sClipRegion.sYMax) ||
(lY2 < pContext->sClipRegion.sYMin))
{
return;
}
Clip the starting coordinate to the top side of the clipping region if
required.
if(lY1 < pContext->sClipRegion.sYMin)
{
lY1 = pContext->sClipRegion.sYMin;
}
Clip the ending coordinate to the bottom side of the clipping region if
required.
if(lY2 > pContext->sClipRegion.sYMax)
{
lY2 = pContext->sClipRegion.sYMax;
}
Call the low level vertical line drawing routine.
DpyLineDrawV(pContext->pDisplay, lX, lY1, lY2, pContext->ulForeground);
} |
/**
* SISO program GAGAOnString.java
*
* This is an APPROXIMATE version of a program solving the computational problem
* GAGAOnString, which is itself uncomputable.
*
* progString: A Java program P
*
* inString: A string I, to be thought of as an input to P
*
* returns: the program attempts to return "yes" if P(I)=="GAGA" and "no"
* otherwise, but it will fail if its simulation of P enters an infinite loop.
*/
public class GAGAOnString implements Siso2 {
@Override
public String siso(String progString, String inString) throws WcbcException, IOException {
Universal universal = new Universal();
String val = universal.siso(progString, inString);
if (val.equals("GAGA")) {
return "yes";
} else {
return "no";
}
}
public static void main(String[] args) throws WcbcException, IOException {
utils.checkSiso2Args(args);
String progString = args[0];
String inString = args[1];
GAGAOnString gAGAOnString = new GAGAOnString();
String result = gAGAOnString.siso(progString, inString);
System.out.println(result);
}
} |
/**
* Disable receiving over this WeaveConnection. This method is used by the application
* to indicate that it is not ready to receive any arrived data over the TCP connection.
* In order to re-enable receiving, the application needs to call EnableReceive() to
* allow WeaveConnection to hand over any received data by invoking the approrpiate
* callbacks.
*
* @sa EnableReceive()
*
*/
void WeaveConnection::DisableReceive()
{
ReceiveEnabled = false;
} |
def batch_internal_diversity(smiles, set_smiles=None):
rand_mols = [Chem.MolFromSmiles(s) for s in smiles]
fps = [AllChem.GetMorganFingerprintAsBitVect(m, 4, nBits=2048) for m in rand_mols]
vals = [bulk_tanimoto_distance(s, fps) if verify_sequence(s) else 0.0 for s in smiles]
return vals |
<reponame>pinglue/pg-repo
import path from "path";
import fs from "fs-extra";
import chokidar from "chokidar";
import type {
ChannelInfo
} from "../channel";
import {
_merge
} from "@pinglue/utils";
import {
_readYaml
} from "./utils/helpers.js";
import {
Loader,
LoaderSettings,
LoaderOutput
} from "./loader.js";
import type {
RegistrySettings
} from "./project-loader";
//======================================
interface Settings extends LoaderSettings {
pkgPath: string;
registrySettings: RegistrySettings;
}
interface Output extends LoaderOutput {
data: Record<string, ChannelInfo>;
};
export class ChannelsLoader extends Loader {
declare protected settings: Settings;
#filePath: string;
#fileWatcher: chokidar.FSWatcher;
constructor(settings: Settings) {
super(settings);
this.#filePath = path.join(
this.settings.pkgPath,
"info", "routes",
this.settings.registrySettings.route,
"registers.yaml"
);
if (this.settings
.registrySettings
.watchSettings
) {
this.#fileWatcher = chokidar.watch(this.#filePath)
.on("change", this.onFileChange(
"channels-settings",
"change-settings"
));
}
}
async load(): Promise<Output> {
// TODO(#14)
if (!await fs.pathExists(this.#filePath))
return {data:null};
return {
data: await _readYaml(this.#filePath)
};
}
async close() {
await this.#fileWatcher?.close();
}
} |
/*
* vmcache_alloc -- allocate memory (take it from the queue)
*
* It returns the number of allocated bytes if successful, otherwise -1.
* The last extent of doubly-linked list of allocated extents is returned
* in 'first_extent'.
* 'small_extent' has to be zeroed in the beginning of a new allocation
* (e.g. when *first_extent == NULL).
*/
ssize_t
vmcache_alloc(struct heap *heap, size_t size, ptr_ext_t **first_extent,
ptr_ext_t **small_extent)
{
ASSERTne(first_extent, NULL);
ASSERTne(small_extent, NULL);
ASSERT((*first_extent == NULL) ? (*small_extent == NULL) : 1);
LOG(3, "heap %p size %zu first_extent %p *small_extent %p",
heap, size, *first_extent, *small_extent);
struct heap_entry he, new;
size_t extent_size = heap->extent_size;
size_t to_allocate = size;
size_t allocated = 0;
util_mutex_lock(&heap->lock);
do {
if (vmcache_pop_heap_entry(heap, &he))
break;
size_t alloc_size = roundup(to_allocate + HFER_SIZE,
extent_size);
if (he.size >= alloc_size + extent_size) {
new.ptr = vmcache_new_heap_entry(he.ptr, alloc_size);
new.size = he.size - alloc_size;
he.size = alloc_size;
vmcache_insert_heap_entry(heap, &new,
&heap->first_extent, IS_FREE);
}
if (vmcache_insert_heap_entry(heap, &he, first_extent,
IS_ALLOCATED)) {
util_mutex_unlock(&heap->lock);
return -1;
}
if (*small_extent == NULL && he.size == extent_size)
*small_extent = *first_extent;
size_t allocated_size = he.size - HFER_SIZE;
allocated += allocated_size;
if (allocated_size > to_allocate &&
allocated_size - to_allocate >= extent_size - HFER_SIZE &&
*small_extent != NULL) {
vmcache_free_extent(heap, *small_extent);
}
to_allocate -= MIN(allocated_size, to_allocate);
} while (to_allocate > 0);
#ifdef STATS_ENABLED
heap->size_used += allocated;
#endif
util_mutex_unlock(&heap->lock);
return (ssize_t)(size - to_allocate);
} |
<reponame>nomad-xyz/monorepo
import { Nomad, utils, Network, LocalNetwork, Key } from "../src";
import type { TokenIdentifier } from "@nomad-xyz/sdk/nomad/tokens";
import { ethers } from "ethers";
import { TransferMessage } from "@nomad-xyz/sdk/nomad";
import fs from "fs";
import { Waiter } from "../src/utils";
import { LocalAgent } from "../src/agent";
//
/**
* Sends several amounts of tokens from network "From" to "To"
* to particular reciver and then test that they are received
*
* @param n - Nomad instance which has both "from" and "to" networks
* @param from - instance of Network *from* which the tokens will be sent
* @param to - instance of Network *to* which the tokens will be sent
* @param token - token identifier according to Nomad
* @param receiver - receiver address as string at network *to*
* @param amounts - array of amounts to be sent in bulk
* @returns a promise of pair [`success`, `tokenContract` ERC20 if it was created]
*/
export async function sendTokensAndConfirm(
n: Nomad,
from: Network,
to: Network,
token: TokenIdentifier,
receiver: string,
amounts: ethers.BigNumberish[],
fastLiquidity = false
) {
const ctx = n.getMultiprovider();
let amountTotal = ethers.BigNumber.from(0);
let result: TransferMessage | undefined = undefined;
for (const amountish of amounts) {
const amount = ethers.BigNumber.from(amountish);
result = await ctx.send(
from.name,
to.name,
token,
amount,
receiver,
fastLiquidity,
{
gasLimit: 10000000,
}
);
amountTotal = amountTotal.add(amount);
console.log(
`Sent from ${from.name} to ${to.name} ${amount.toString()} tokens`
);
}
if (!result) throw new Error(`Didn't get the result from transactions`);
console.log(
`Waiting for the last transactions of ${amounts.length} to be delivered:`
);
await result.wait();
console.log(`Waiting for asset to be created at destination!`);
// Waiting until the token contract is created at destination network tom
let waiter = new utils.Waiter(
async () => {
const tokenContract = await result!.assetAtDestination();
if (
tokenContract?.address !== "0x0000000000000000000000000000000000000000"
) {
console.log(
`Hurray! Asset was created at destination:`,
tokenContract!.address
);
return tokenContract;
}
},
3 * 60_000,
2_000
);
const tokenContract = await waiter.wait();
if (tokenContract === null) throw new Error(`Timedout token creation at destination`);
if (!tokenContract) throw new Error(`no token contract`);
let newBalance = await tokenContract!.balanceOf(receiver);
// Waiting until all 3 transactions will land at tom
let waiter2 = new utils.Waiter(
async () => {
if (newBalance.eq(amountTotal)) {
return true;
} else {
newBalance = await tokenContract!.balanceOf(receiver);
console.log(
`New balance:`,
parseInt(newBalance.toString()),
"must be:",
parseInt(tokenContract.toString())
);
}
},
4 * 60_000,
2_000
);
const success = await waiter2.wait();
if (success === null)
throw new Error(`Tokens transfer from ${from.name} to ${to.name} failed`);
return tokenContract!;
}
export async function setupTwo() {
const tom = new LocalNetwork("tom", 1000, "http://localhost:9545");
const jerry = new LocalNetwork("jerry", 2000, "http://localhost:9546");
const tomActor = new Key();
const jerryActor = new Key();
const t = utils.generateDefaultKeys();
const j = utils.generateDefaultKeys();
tom.addKeys(
tomActor,
t.updater,
t.watcher,
t.deployer,
t.signer.base,
t.signer.updater,
t.signer.watcher,
t.signer.relayer,
t.signer.processor
);
jerry.addKeys(
jerryActor,
j.updater,
j.watcher,
j.deployer,
j.signer.base,
j.signer.updater,
j.signer.watcher,
j.signer.relayer,
j.signer.processor
);
await Promise.all([tom.up(), jerry.up()]);
const n = new Nomad(tom);
n.addNetwork(jerry);
n.setUpdater(jerry, j.updater); // Need for an update like updater
n.setWatcher(jerry, j.watcher); // Need for the watcher
n.setDeployer(jerry, j.deployer); // Need to deploy all
n.setSigner(jerry, j.signer.base); // Need for home.dispatch
n.setSigner(jerry, j.signer.updater, "updater"); // Need for home.dispatch
n.setSigner(jerry, j.signer.relayer, "relayer"); // Need for home.dispatch
n.setSigner(jerry, j.signer.watcher, "watcher"); // Need for home.dispatch
n.setSigner(jerry, j.signer.processor, "processor"); // Need for home.dispatch
n.setUpdater(tom, t.updater); // Need for an update like updater
n.setWatcher(tom, t.watcher); // Need for the watcher
n.setDeployer(tom, t.deployer); // Need to deploy all
n.setSigner(tom, t.signer.base); // Need for home.dispatch
n.setSigner(tom, t.signer.updater, "updater"); // Need for home.dispatch
n.setSigner(tom, t.signer.relayer, "relayer"); // Need for home.dispatch
n.setSigner(tom, t.signer.watcher, "watcher"); // Need for home.dispatch
n.setSigner(tom, t.signer.processor, "processor"); // Need for home.dispatch
await n.deploy({ injectSigners: true });
n.exportDeployArtifacts("../../rust/config");
fs.writeFileSync("/tmp/nomad.json", JSON.stringify(n.toObject()));
return {
tom,
jerry,
tomActor,
jerryActor,
n,
};
}
export async function waitAgentFailure(
n: Nomad,
network: Network,
agentType: string
): Promise<Waiter<true>> {
const agent = (await n.getAgent(agentType, network)) as LocalAgent;
let startsCount = 0;
let homeFailed = false;
await agent.connect();
const agentEvents = await agent.getEvents();
agentEvents.on("start", () => {
console.log(
` ========================= ${agentType} started ========================= `
);
startsCount += 1;
});
agent.logMatcherRegisterEvent(
"homeFailed",
/Home contract is in failed state/
);
agentEvents.once("logs.homeFailed", () => {
console.log(
` ========================= ${agentType} homeFailed ========================= `
);
homeFailed = true;
});
await agent.start();
return new Waiter(
async (): Promise<true | undefined> => {
if (
homeFailed &&
startsCount >= 3 // initial start + 1st failed start after the first failure + 2nd failed start
) {
return true;
}
},
10 * 60_000,
2_000
);
}
|
A new method to determine the causes of deviation in cylinder pressure curves of motored reciprocating piston engines
Simulation calibration of modern engines to test bench measurements, mainly matching the indicated pressure curves in the combustion chamber, is a time consuming task that requires high user experience to manage a multitude of adjustment parameters. In the scope of this work an automated process for calibrating simulations at motored engine operating points is presented. This is achieved using characteristic pressure deviation curves, based on 1D simulations of a simplified base model. They incorporate the dimensionless deviation of the cylinder pressure originating from one parameter. A set of these curves is scaled and superposed to recreate the original pressure deviation between simulation and measurement. The scaling factor is used to quantify each parameter’s suggested adjustment value. This work presents the workflow of creating the characteristic pressure deviation curves, matching the deviation between measurement and simulation and determining the adjustment values. Further, the methodology is tested for interference of different parameters. A series of applications, ranging from 1D and 3D CFD simulation test cases to real world applications in different engines, concludes this work. |
<filename>auto-CRC.py<gh_stars>10-100
#!/usr/bin/env python
"""
This script appends CRC-32s to the end of the files in a directory.
This is intended for anime fansubbing releases.
You can change what it checks by modifying the 'ext' (extension) on L47.
Can be run both from the command line, and imported.
"""
import argparse
import binascii
import glob
import mimetypes
import os
import re
__author__ = "LightArrowsEXE"
__license__ = 'MIT'
__version__ = '1.2'
def calculateCRC(f):
with open(f, 'rb') as file:
calc = file.read()
return "%08X" % (binascii.crc32(calc) & 0xFFFFFFFF)
def strip_crc(f):
if re.search(r'\[[0-9a-fA-F]{8}\]', f):
strip = re.sub(r'\[[0-9a-fA-F]{8}\]', '', f)
# Hate how re.sub leaves some whitespace
filename = os.path.splitext(strip)[0]
filename = filename[:-1] + os.path.splitext(strip)[1]
os.rename(f, filename)
print(f"[-] {f} stripped")
def main(recursive=False):
if args.recursive:
filelist = glob.glob('**/*', recursive=True)
else:
filelist = glob.glob('*')
for f in filelist:
mime = mimetypes.types_map.get(os.path.splitext(f)[-1], "")
if mime.startswith("video/") or f.endswith('.mkv'):
if args.strip:
strip_crc(f)
else:
crc = calculateCRC(f)
if re.search(crc, f):
print(f"[*] {f}, correct CRC already present in filename")
else:
strip_crc(f)
str_filename = re.sub(r'\[[0-9a-fA-F]{8}\]', '', f)
filename = f'{os.path.splitext(f)[0]} [{crc}]{os.path.splitext(f)[1]}'
os.rename(str_filename, filename)
print(f"[+] {f}, CRC: [{crc}]")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-R", "--recursive",
action="store_true", default=False,
help="check recursively (default: %(default)s)")
parser.add_argument("-S", "--strip",
action="store_true", default=False,
help="strip CRCs from filenames (default: %(default)s)")
parser.parse_args()
args = parser.parse_args()
main()
|
A Coherent and Managed Runtime for ML on the SCC
Intel’s Single-Chip Cloud Computer (SCC) is a many-core architecture which stands out due to its complete lack of cache-coherence and the presence of fast, on-die interconnect for inter-core messaging. Cache-coherence, if required, must be implemented in software. Moreover, the amount of shared memory available on the SCC is very limited, requiring stringent management of resources even in the presence of software cachecoherence. In this paper, we present a series of techniques to provide the ML programmer a cache-coherent view of memory, while effectively utilizing both private and shared memory. To that end, we introduces a new, type-guided garbage collection scheme that effectively exploits SCC’s memory hierarchy, attempts to reduce the use of shared memory in favor of message passing buffers, and provides a efficient, coherent global address space. Experimental results over a variety of benchmarks show that more than 99% of the memory requests can be potentially cached. These techniques are realized in MultiMLton, a scalable extension of MLton Standard ML compiler and runtime system on the SCC. |
/* Parcelable boilerplate from here on out. */
@Override
public int describeContents() {
/* We are saving no file descriptors. */
return 0;
} |
OFFICIALS WITH Washington’s football team have defended the team’s name by arguing that its fans and many Americans, including some who are Native American, see nothing objectionable in the moniker and don’t want it changed. The suggestion, of course, is that the push for a new name comes from outliers, a vocal but clueless minority. That makes even more significant the decision by a group with close ties to the National Football League to add its respected voice to the chorus for change.
“We cannot be silent on this issue,” leaders of the Fritz Pollard Alliance said in a statement announcing its opposition to the name, a statement that was not coincidentally released on Martin Luther King Jr. Day. The group, which is named after the first black NFL head coach and which promotes diversity and equality of opportunity in the league, said a new name for the Washington team is urgently needed. “As the NFL continues to move in the direction of respect and dignity, one of its teams carrying this name cuts glaringly against the grain,” read a letter sent to minority coaches and front-office staff members.
As we’ve said before, we understand that many fans oppose a name change out of feelings of tradition and loyalty. That doesn’t make them racist. But few of them would dream of flinging the disparaging name at a Native American. That suggests how insupportable the name has become.
The Fritz Pollard Alliance joins other civil rights groups that have come out against the team’s name, but the group’s close association with the NFL and its track record of success in bringing about positive change gives particular resonance to its message. It made clear this was a decision not lightly taken; it followed a lengthy process that included meetings with team and league officials, a review of historical and legal issues and talks with Native Americans. Indeed, it seems as if the group felt it had no other choice but to come out publicly since it was backed into a corner by the intransigence of Washington team officials.
Most troubling from the group’s chronology of its efforts was the refusal of team owner Daniel Snyder to meet with Native Americans opposed to the team’s name. At one point, The Post’s John Woodrow Cox and Mark Maske reported, Fritz Pollard officials were shouted down in a “hostile” and “jarring” manner by the executive director of the foundation created by Mr. Snyder to help Native Americans. That refusal to even hear a differing viewpoint shows a closed mind and should concern league officials.
NFL officials have maintained that any decision about the name lies solely with Mr. Snyder. So it’s interesting that when Fritz Pollard officials told NFL officials, including Commissioner Roger Goodell, that they would go public with their position, they were told to do what they had to do. We hope that means league officials are beginning to understand how detrimental Mr. Snyder’s obstinacy is to their entire organization. |
package pipeline
import (
"context"
"path/filepath"
"testing"
"time"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/grafana/grafana-plugin-sdk-go/experimental"
"github.com/stretchr/testify/require"
)
func checkExactConversion(tb testing.TB, file string, fields []Field) *backend.DataResponse {
tb.Helper()
content := loadTestJson(tb, file)
converter := NewExactJsonConverter(ExactJsonConverterConfig{
Fields: fields,
})
converter.nowTimeFunc = func() time.Time {
return time.Date(2021, 01, 01, 12, 12, 12, 0, time.UTC)
}
channelFrames, err := converter.Convert(context.Background(), Vars{}, content)
require.NoError(tb, err)
dr := &backend.DataResponse{}
for _, cf := range channelFrames {
require.Empty(tb, cf.Channel)
dr.Frames = append(dr.Frames, cf.Frame)
}
err = experimental.CheckGoldenDataResponse(filepath.Join("testdata", file+".golden.txt"), dr, *update)
require.NoError(tb, err)
return dr
}
func BenchmarkExactJsonConverter_Convert(b *testing.B) {
content := loadTestJson(b, "json_exact")
converter := NewExactJsonConverter(ExactJsonConverterConfig{
Fields: []Field{
{
Name: "ax",
Value: "$.ax",
Type: data.FieldTypeNullableFloat64,
}, {
Name: "array_value",
Value: "$.string_array[0]",
Type: data.FieldTypeNullableString,
}, {
Name: "map_key",
Value: "$.map_with_floats['key1']",
Type: data.FieldTypeNullableFloat64,
},
},
})
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := converter.Convert(context.Background(), Vars{}, content)
require.NoError(b, err)
//require.Len(b, cf, 1)
//require.Len(b, cf[0].Frame.Fields, 3)
}
}
func TestExactJsonConverter_Convert(t *testing.T) {
checkExactConversion(t, "json_exact", []Field{
{
Name: "time",
Value: "#{now}",
Type: data.FieldTypeTime,
},
{
Name: "ax",
Value: "$.ax",
Type: data.FieldTypeNullableFloat64,
},
{
Name: "key1",
Value: "{x.map_with_floats.key1}",
Type: data.FieldTypeNullableFloat64,
Labels: []Label{
{
Name: "label1",
Value: "{x.map_with_floats.key2.toString()}",
},
{
Name: "label2",
Value: "$.map_with_floats.key2",
},
},
},
})
}
|
# Copyright 2020 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Library of effects functions."""
from ddsp import core
from ddsp import processors
from ddsp import synths
import gin
import tensorflow as tf
tf_float32 = core.tf_float32
#------------------ Reverbs ----------------------------------------------------
@gin.register
class Reverb(processors.Processor):
"""Convolutional (FIR) reverb."""
def __init__(self,
trainable=False,
reverb_length=48000,
add_dry=True,
name='reverb'):
"""Takes neural network outputs directly as the impulse response.
Args:
trainable: Learn the impulse_response as a single variable for the entire
dataset.
reverb_length: Length of the impulse response. Only used if
trainable=True.
add_dry: Add dry signal to reverberated signal on output.
name: Name of processor module.
"""
super().__init__(name=name, trainable=trainable)
self._reverb_length = reverb_length
self._add_dry = add_dry
def _mask_dry_ir(self, ir):
"""Set first impulse response to zero to mask the dry signal."""
# Make IR 2-D [batch, ir_size].
if len(ir.shape) == 1:
ir = ir[tf.newaxis, :] # Add a batch dimension
if len(ir.shape) == 3:
ir = ir[:, :, 0] # Remove unnessary channel dimension.
# Mask the dry signal.
dry_mask = tf.zeros([int(ir.shape[0]), 1], tf.float32)
return tf.concat([dry_mask, ir[:, 1:]], axis=1)
def _match_dimensions(self, audio, ir):
"""Tile the impulse response variable to match the batch size."""
# Add batch dimension.
if len(ir.shape) == 1:
ir = ir[tf.newaxis, :]
# Match batch dimension.
batch_size = int(audio.shape[0])
return tf.tile(ir, [batch_size, 1])
def build(self, unused_input_shape):
"""Initialize impulse response."""
if self.trainable:
initializer = tf.random_normal_initializer(mean=0, stddev=1e-6)
self._ir = self.add_weight(
name='ir',
shape=[self._reverb_length],
dtype=tf.float32,
initializer=initializer)
self.built = True
def get_controls(self, audio, ir=None):
"""Convert decoder outputs into ir response.
Args:
audio: Dry audio. 2-D Tensor of shape [batch, n_samples].
ir: 3-D Tensor of shape [batch, ir_size, 1] or 2D Tensor of shape
[batch, ir_size].
Returns:
controls: Dictionary of effect controls.
Raises:
ValueError: If trainable=False and ir is not provided.
"""
if self.trainable:
ir = self._match_dimensions(audio, self._ir)
else:
if ir is None:
raise ValueError('Must provide "ir" tensor if Reverb trainable=False.')
return {'audio': audio, 'ir': ir}
def get_signal(self, audio, ir):
"""Apply impulse response.
Args:
audio: Dry audio, 2-D Tensor of shape [batch, n_samples].
ir: 3-D Tensor of shape [batch, ir_size, 1] or 2D Tensor of shape
[batch, ir_size].
Returns:
tensor of shape [batch, n_samples]
"""
audio, ir = tf_float32(audio), tf_float32(ir)
ir = self._mask_dry_ir(ir)
wet = core.fft_convolve(audio, ir, padding='same', delay_compensation=0)
return (wet + audio) if self._add_dry else wet
@gin.register
class ExpDecayReverb(Reverb):
"""Parameterize impulse response as a simple exponential decay."""
def __init__(self,
trainable=False,
reverb_length=48000,
scale_fn=core.exp_sigmoid,
add_dry=True,
name='exp_decay_reverb'):
"""Constructor.
Args:
trainable: Learn the impulse_response as a single variable for the entire
dataset.
reverb_length: Length of the impulse response.
scale_fn: Function by which to scale the network outputs.
add_dry: Add dry signal to reverberated signal on output.
name: Name of processor module.
"""
super().__init__(name=name, add_dry=add_dry, trainable=trainable)
self._reverb_length = reverb_length
self._scale_fn = scale_fn
def _get_ir(self, gain, decay):
"""Simple exponential decay of white noise."""
gain = self._scale_fn(gain)
decay_exponent = 2.0 + tf.exp(decay)
time = tf.linspace(0.0, 1.0, self._reverb_length)[tf.newaxis, :]
noise = tf.random.uniform([1, self._reverb_length], minval=-1.0, maxval=1.0)
ir = gain * tf.exp(-decay_exponent * time) * noise
return ir
def build(self, unused_input_shape):
"""Initialize impulse response."""
if self.trainable:
self._gain = self.add_weight(
name='gain',
shape=[1],
dtype=tf.float32,
initializer=tf.constant_initializer(2.0))
self._decay = self.add_weight(
name='decay',
shape=[1],
dtype=tf.float32,
initializer=tf.constant_initializer(4.0))
self.built = True
def get_controls(self, audio, gain=None, decay=None):
"""Convert network outputs into ir response.
Args:
audio: Dry audio. 2-D Tensor of shape [batch, n_samples].
gain: Linear gain of impulse response. Scaled by self._scale_fn.
2D Tensor of shape [batch, 1]. Not used if trainable=True.
decay: Exponential decay coefficient. The final impulse response is
exp(-(2 + exp(decay)) * time) where time goes from 0 to 1.0 over the
reverb_length samples. 2D Tensor of shape [batch, 1]. Not used if
trainable=True.
Returns:
controls: Dictionary of effect controls.
Raises:
ValueError: If trainable=False and gain and decay are not provided.
"""
if self.trainable:
gain, decay = self._gain[tf.newaxis, :], self._decay[tf.newaxis, :]
else:
if gain is None or decay is None:
raise ValueError('Must provide "gain" and "decay" tensors if '
'ExpDecayReverb trainable=False.')
ir = self._get_ir(gain, decay)
if self.trainable:
ir = self._match_dimensions(audio, ir)
return {'audio': audio, 'ir': ir}
@gin.register
class FilteredNoiseReverb(Reverb):
"""Parameterize impulse response with outputs of a filtered noise synth."""
def __init__(self,
trainable=False,
reverb_length=48000,
window_size=257,
n_frames=1000,
n_filter_banks=16,
scale_fn=core.exp_sigmoid,
initial_bias=-3.0,
add_dry=True,
name='filtered_noise_reverb'):
"""Constructor.
Args:
trainable: Learn the impulse_response as a single variable for the entire
dataset.
reverb_length: Length of the impulse response.
window_size: Window size for filtered noise synthesizer.
n_frames: Time resolution of magnitudes coefficients. Only used if
trainable=True.
n_filter_banks: Frequency resolution of magnitudes coefficients. Only used
if trainable=True.
scale_fn: Function by which to scale the magnitudes.
initial_bias: Shift the filtered noise synth inputs by this amount
(before scale_fn) to start generating noise in a resonable range when
given magnitudes centered around 0.
add_dry: Add dry signal to reverberated signal on output.
name: Name of processor module.
"""
super().__init__(name=name, add_dry=add_dry, trainable=trainable)
self._n_frames = n_frames
self._n_filter_banks = n_filter_banks
self._synth = synths.FilteredNoise(n_samples=reverb_length,
window_size=window_size,
scale_fn=scale_fn,
initial_bias=initial_bias)
def build(self, unused_input_shape):
"""Initialize impulse response."""
if self.trainable:
initializer = tf.random_normal_initializer(mean=0, stddev=1e-2)
self._magnitudes = self.add_weight(
name='magnitudes',
shape=[self._n_frames, self._n_filter_banks],
dtype=tf.float32,
initializer=initializer)
self.built = True
def get_controls(self, audio, magnitudes=None):
"""Convert network outputs into ir response.
Args:
audio: Dry audio. 2-D Tensor of shape [batch, n_samples].
magnitudes: Magnitudes tensor of shape [batch, n_frames, n_filter_banks].
Expects float32 that is strictly positive. Not used if trainable=True.
Returns:
controls: Dictionary of effect controls.
Raises:
ValueError: If trainable=False and magnitudes are not provided.
"""
if self.trainable:
magnitudes = self._magnitudes[tf.newaxis, :]
else:
if magnitudes is None:
raise ValueError('Must provide "magnitudes" tensor if '
'FilteredNoiseReverb trainable=False.')
ir = self._synth(magnitudes)
if self.trainable:
ir = self._match_dimensions(audio, ir)
return {'audio': audio, 'ir': ir}
#------------------ Filters ----------------------------------------------------
@gin.register
class FIRFilter(processors.Processor):
"""Linear time-varying finite impulse response (LTV-FIR) filter."""
def __init__(self,
window_size=257,
scale_fn=core.exp_sigmoid,
name='fir_filter'):
super().__init__(name=name)
self.window_size = window_size
self.scale_fn = scale_fn
def get_controls(self, audio, magnitudes):
"""Convert network outputs into magnitudes response.
Args:
audio: Dry audio. 2-D Tensor of shape [batch, n_samples].
magnitudes: 3-D Tensor of synthesizer parameters, of shape [batch, time,
n_filter_banks].
Returns:
controls: Dictionary of tensors of synthesizer controls.
"""
# Scale the magnitudes.
if self.scale_fn is not None:
magnitudes = self.scale_fn(magnitudes)
return {'audio': audio, 'magnitudes': magnitudes}
def get_signal(self, audio, magnitudes):
"""Filter audio with LTV-FIR filter.
Args:
audio: Dry audio. 2-D Tensor of shape [batch, n_samples].
magnitudes: Magnitudes tensor of shape [batch, n_frames, n_filter_banks].
Expects float32 that is strictly positive.
Returns:
signal: Filtered audio of shape [batch, n_samples, 1].
"""
return core.frequency_filter(audio,
magnitudes,
window_size=self.window_size)
#------------------ Modulation -------------------------------------------------
class ModDelay(processors.Processor):
"""Modulated delay times used in chorus, flanger, and vibrato effects."""
def __init__(self,
center_ms=15.0,
depth_ms=10.0,
sample_rate=16000,
gain_scale_fn=core.exp_sigmoid,
phase_scale_fn=tf.nn.sigmoid,
add_dry=True,
name='mod_delay'):
super().__init__(name=name)
self.center_ms = center_ms
self.depth_ms = depth_ms
self.sample_rate = sample_rate
self.gain_scale_fn = gain_scale_fn
self.phase_scale_fn = phase_scale_fn
self.add_dry = add_dry
def get_controls(self, audio, gain, phase):
"""Convert network outputs into magnitudes response.
Args:
audio: Dry audio. 2-D Tensor of shape [batch, n_samples].
gain: Amplitude of modulated signal. Shape [batch_size, n_samples, 1].
phase: Relative delay time. Shape [batch_size, n_samples, 1].
Returns:
controls: Dictionary of tensors of synthesizer controls.
"""
if self.gain_scale_fn is not None:
gain = self.gain_scale_fn(gain)
if self.phase_scale_fn is not None:
phase = self.phase_scale_fn(phase)
return {'audio': audio, 'gain': gain, 'phase': phase}
def get_signal(self, audio, gain, phase):
"""Filter audio with LTV-FIR filter.
Args:
audio: Dry audio. 2-D Tensor of shape [batch, n_samples].
gain: Amplitude of modulated signal. Shape [batch_size, n_samples, 1].
phase: The normlaized instantaneous length of the delay, in the range of
[center_ms - depth_ms, center_ms + depth_ms] from 0 to 1.0. Shape
[batch_size, n_samples, 1].
Returns:
signal: Modulated audio of shape [batch, n_samples].
"""
max_delay_ms = self.center_ms + self.depth_ms
max_length_samples = int(self.sample_rate / 1000.0 * max_delay_ms)
depth_phase = self.depth_ms / max_delay_ms
center_phase = self.center_ms / max_delay_ms
phase = phase * depth_phase + center_phase
wet_audio = core.variable_length_delay(audio=audio,
phase=phase,
max_length=max_length_samples)
# Remove channel dimension.
if len(gain.shape) == 3:
gain = gain[..., 0]
wet_audio *= gain
return (wet_audio + audio) if self.add_dry else wet_audio
|
def pixel(ndf, input_nc=3, norm='batch', init_type='normal', init_gain=0.02):
norm_layer = get_norm_layer(norm_type=norm)
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
init_weights(net, init_type, init_gain=init_gain)
return net |
// SetPriority accept only PriorityLow, PriorityMedium or PriorityHigh constants
func (c MangaConfig) SetPriority(priority int) MangaConfig {
acceptable := makeListInt(priorities)
if _, ok := acceptable[priority]; !ok {
return c
}
c["priority"] = strconv.Itoa(priority)
return c
} |
import { Component, OnInit } from '@angular/core';
import {FirebaseService} from '../../services/firebase.service';
import {DataService} from '../../services/data.service';
import {Router, ActivatedRoute, Params} from '@angular/router';
@Component({
selector: 'app-applicant-list',
templateUrl: './applicant-list.component.html',
styleUrls: ['./applicant-list.component.css']
})
export class ApplicantListComponent implements OnInit {
id:any;
name_list:any;
i:number;
next:any;
listings:any;
constructor(
private firebaseService: FirebaseService,
private router:Router,
private route:ActivatedRoute,
private dataService:DataService
) { }
routing(a){
this.dataService.data=this.id;
this.router.navigate(['/user/applicant-profile/'+a]);
}
ngOnInit() {
this.next=[];
this.listings=[];
this.i=0;
this.id = this.route.snapshot.params['id'];
this.firebaseService.showApplicants(this.id).subscribe(keys => {
keys.forEach(key => {
// this.checkjobadd.push(key);
this.firebaseService.showApplicantName(key.$key).subscribe(response =>{
// this.name_list=response;
//this.next[this.i]=key.$key;
this.listings[this.i++]=response;
});
//alert(key.$key);
});
//console.log(this.checkjobadd.length);
});
}
}
|
/**
* Copy from HAN
*
* @author Emmanuel Bernard
*/
public abstract class HANTestCase extends junit.framework.TestCase {
private static SessionFactory sessions;
private static AnnotationConfiguration cfg;
private static Dialect dialect;
private static Class lastTestClass;
private Session session;
public HANTestCase() {
super();
}
public HANTestCase(String x) {
super( x );
}
protected void buildSessionFactory(Class[] classes, String[] packages, String[] xmlFiles) throws Exception {
if ( getSessions() != null ) getSessions().close();
try {
setCfg( new AnnotationConfiguration() );
configure( cfg );
if ( recreateSchema() ) {
cfg.setProperty( Environment.HBM2DDL_AUTO, "create-drop" );
}
for ( int i = 0; i < packages.length; i++ ) {
getCfg().addPackage( packages[i] );
}
for ( int i = 0; i < classes.length; i++ ) {
getCfg().addAnnotatedClass( classes[i] );
}
for ( int i = 0; i < xmlFiles.length; i++ ) {
InputStream is = Thread.currentThread().getContextClassLoader().getResourceAsStream( xmlFiles[i] );
getCfg().addInputStream( is );
}
setDialect( Dialect.getDialect() );
setSessions( getCfg().buildSessionFactory( /*new TestInterceptor()*/ ) );
}
catch (Exception e) {
e.printStackTrace();
throw e;
}
}
protected void setUp() throws Exception {
if ( getSessions() == null || lastTestClass != getClass() ) {
buildSessionFactory( getMappings(), getAnnotatedPackages(), getXmlFiles() );
lastTestClass = getClass();
}
}
@SuppressWarnings({ "deprecation" })
protected void runTest() throws Throwable {
try {
super.runTest();
if ( session != null && session.isOpen() ) {
if ( session.isConnected() ) session.connection().rollback();
session.close();
session = null;
fail( "unclosed session" );
}
else {
session = null;
}
}
catch (Throwable e) {
try {
if ( session != null && session.isOpen() ) {
if ( session.isConnected() ) session.connection().rollback();
session.close();
}
}
catch (Exception ignore) {
}
try {
if ( sessions != null ) {
sessions.close();
sessions = null;
}
}
catch (Exception ignore) {
}
throw e;
}
}
public Session openSession() throws HibernateException {
session = getSessions().openSession();
return session;
}
public Session openSession(Interceptor interceptor) throws HibernateException {
session = getSessions().openSession( interceptor );
return session;
}
protected abstract Class[] getMappings();
protected String[] getAnnotatedPackages() {
return new String[]{};
}
protected String[] getXmlFiles() {
return new String[]{};
}
private void setSessions(SessionFactory sessions) {
HANTestCase.sessions = sessions;
}
protected SessionFactory getSessions() {
return sessions;
}
private void setDialect(Dialect dialect) {
HANTestCase.dialect = dialect;
}
protected Dialect getDialect() {
return dialect;
}
protected static void setCfg(AnnotationConfiguration cfg) {
HANTestCase.cfg = cfg;
}
protected static AnnotationConfiguration getCfg() {
return cfg;
}
protected void configure(Configuration cfg) {
//cfg.setNamingStrategy( AlternativeNamingStrategy.INSTANCE );
//cfg.getSessionEventListenerConfig().setFlushEventListener( new EJB3FlushEventListener() );
//cfg.getSessionEventListenerConfig().setAutoFlushEventListener( new EJB3AutoFlushEventListener() );
}
protected boolean recreateSchema() {
return true;
}
} |
On the Minimum Number of Monochromatic Generalized Schur Triples
The solution to the problem of finding the minimum number of monochromatic triples $(x,y,x+ay)$ with $a\geq 2$ being a fixed positive integer over any 2-coloring of $ $ was conjectured by Butler, Costello, and Graham (2010) and Thanathipanonda (2009). We solve this problem using a method based on Datskovsky's proof (2003) on the minimum number of monochromatic Schur triples $(x,y,x+y)$. We do this by exploiting the combinatorial nature of the original proof and adapting it to the general problem.
Introduction
Ramsey theory has a rich history first popularized in 1935 by Erdös and Szekeres in their seminal paper . We investigate a part of the theory that was orginally developed by Issai Schur. The formulation of Schur's theorem was first derived from Van der Waerden's theorem in 1927 . Van der Waerden proved that any r-coloring of Z + must admit a monochromatic 3-term arithmetic progression {a, a + d, a + 2d} for some a, d > 1. A particular choice of x, y and z in terms of a and d admits a monochromatic solution to x + y = 2z, on a plane whose coordinates are the positive integers. Hence, a similar question regarding the coloring of monochromatic solutions to a simpler equation can be posed; namely, does there exist a least positive integer s = s(r) such that for any r-coloring of there is a monochromatic solution to x + y = z? Schur determined that the answer is yes, and we call the solution (x, y, z) to such an equation a Schur triple.
In 1959, Goodman was able to determine the minimum number of monochromatic triangles under a 2-edge coloring of a complete graph on n vertices, which turned out to be the same order as the average, n 3 24 +O(n 2 ). This motivated Graham to pose the problem of finding the minimum number of monochromatic Schur triples over any 2-coloring of at a conference and to offer 100 USD for the result. Graham initially conjectured that the average value should be the minimum at n 2 16 + O(n). However, Zeilberger and his student Robertson used discrete calculus to show that the minimum number must be n 2 22 + O(n) and won the cash prize. Around the same time, Schoen , followed four years later by Datskovsky , furnished different proofs using Fourier analysis to show that indeed, n 2 22 + O(n) is the correct minimum. Ultimately, their idea had reduced to one in combinatorics. More recently in 2009, Thanatiponanda confirmed the result using a new technique with computer algebra and a greedy algorithm. He conjectured a minimum number of monochromatic Schur triples for all r-colorings and a minimum number of monochromatic triples satisfying x + ay = z for a fixed integer a 2 over any 2-coloring of . We solve the latter part of the conjecture in this paper using a purely combinatorial approach.
The Minimum Number of Monchromatic Schur Triples
We first show how to find the minimum coloring of x + y = z in an elementary way using the method by Datkovsky . Then, we explore the more general case in the next two sections. We start by employing a 2-coloring on all integers in for n < ∞ and count the number of monochromatic Schur triples (x, y, z) where z = x + y. Denote the colors to be red (R) and blue (B).
The number of Schur triples includes the number of monochromatic Schur triples |M(n)| and non-monochromatic Schur triples |N (n)|.
Lemma 2. The number of Schur triples in is 1 2 Figure 1: The sets N − and N + Proof.
Each non-monochromatic triple gives two non-monochromatic pairs which gives the first equality. To get the second equality, we observe that the pairs in N − will contribute to two triples but the pairs in N + will only contribute to one. For example, in , (5,3) gives the triples (3, 2, 5) and (5,3,8). But, in , (8,9) only gives (1,8,9). Finally, the last equality comes from the fact that By putting together Lemmas 1 and 2 and Proposition 5, we obtain the next lemma.
Lemma 6. The number of monochromatic Schur triples under a 2-coloring on is In order to find the minimum value of |M(n)|, we must obtain the lower bound of |N + | in terms of µ B and µ R . To do this more efficiently, we denote D := |N − | − |N + | and find an upper bound on D instead. The proof requires the following notation: the electronic journal of combinatorics 22 (2015), #P00 Definition 7. Let S be the set of pairs of the form {s, n + 1 − s} where 1 s n/2. Denote by µ CC ′ the number of sets S with colorings C and C ′ in this order. γn is the number of non-monochromatic pairs in S.
Lemma 8. Assume, without loss of generality, that µ B µ R . Then Proof. Assuming 1 y < x n 2 , denote the sets X and Y as Ordered pairs in X × Y when colored differently are contained in N + ∪ N − , that is: We outline all possible colorings of the X and Y sets that contribute to the value of D in the table in Figure 2. We see that with the exception of the first four cases, the contribution to D is 0.
We now obtain an upper bound of D: The inequality (2) comes from our assumption that µ BB µ RR . The last equality comes Calculus shows that the maximum of (3) occurs when γ = µ B 2n which simplifies our inequality to Non-monochromatic X set and Monochromatic Y set 9 red blue red red (n + 1 − x, y) 0 (n + 1 − x, n + 1 − y) 10 blue red blue blue (n + 1 − x, y) 0 (n + 1 − x, n + 1 − y) 11 red blue blue blue (x, y) 0 (x, n + 1 − y) 12 blue red red red (x, y) 0 (x, n + 1 − y) Non-monochromatic X and Y sets 13 red blue blue red (x, y) Theorem 9. Over all 2-colorings of , the minimum number of monochromatic Schur triples is n 2 22 + O(n). Proof. An upper bound of the minimum can be obtained from a coloring on . We color R 4n/11 , B 6n/11 , R n/11 as illustrated in Figure 3. This proportion comes from a brute force computer search first proposed by Zeilberger . This coloring will give us n 2 22 + O(n) monochromatic triples. Next, we look for the lower bound of the minimum. By using Lemma 8 and the fact that which, together with Lemma 6, gives The right hand side of (4) achieves a maximum when µ R = 5n 11 and µ B = 6n 11 . As a result, we get the lower bound for the minimum to be: Because the lower and upper bounds match, we have therefore shown the desired result.
Remark. We can be confident that the bounds for equations (1) and (2) is sharp relative to the optimal coloring because we know that cases 3 and 4 from Figure 2 will not occur and that µ BR = 0.
This method of Datkovsky's also gives the optimal coloring for fixed ratios of µ B and µ R .
Corollary 10. For any fixed µ B µ R , the coloring on that gives the minimum number of monochromatic Schur triples is R For the case µ B 2n 3 , the maximum of D occurs when γ = µ B 2n . So, For the case µ B > 2n 3 , the maximum of D occurs when γ = µ R n . With similar calculations, The colorings mentioned in the statement of the corollary give us upper bounds for the minimum, which happens to match the lower bounds.
Corollary 11. For any fixed µ B µ R , the coloring on that gives the maximum number of monochromatic Schur triples is R 3 . Proof. Using a similar calculation to Lemma 8, we have that For the case µ B > 2n 3 , the minimum of D occurs when γ = µ R n . Therefore, The colorings mentioned in the statement of the corollary give us lower bounds of the maximum, which happens to match the upper bounds.
3 The Minimum Number of Monochromatic Triples (x, y, x + 2y) The technique illustrated in the previous section can be extended to x + ay = z for any fixed integer a 2. However, the nice symmetry we had previously with the equation x + y = z is no longer there. In this section we deal with the specific case a = 2. The general case will be outlined in Section 4.
We parallel the same argument as in Section 2. First, we write the number of nonmonochromatic triples |N (n)| in terms of variables we can optimize.
Definition 12. Denote by µ B 1 and µ R 1 the number of blue and red colorings respectively on 1, n 2 . Furthermore, denote by µ B 2 and µ R 2 the number of blue and red colorings respectively on n 2 , n .
the electronic journal of combinatorics 22 (2015), #P00 Note that µ B 1 + µ R 1 = n 2 and µ R 1 + µ R 2 = µ R . Definition 13. The sets of non-monochromatic pairs in × are defined as follows: Proof. We count sets of ordered pairs that come from non-monochromatic triples of type (x, y, z) where z = x + 2y as follows: We then have that: To obtain the second inequality, observe that (refer to Lemma 3 of Thanatipanonda ). The last equality comes The next lemma follows immediately from Proposition 14.
Lemma 15. The number of monochromatic triples of type (x, y, x+2y) under a 2-coloring on is Minimizing |M(n)| can be reduced to finding the upper bound of CC ′ the number of sets S with colorings C and C ′ in this order. The superscript (1) refers to the coloring of pairs on 1, n 2 . Also, denote by γ 1 n the number of nonmonochromatic pairs in S.
With this notation, γ 1 n = µ (1) Definition 17. Define sets X and Y 1 as follows, EE ′ is defined by the number of pairs (X, Y 1 ) where X has the coloring {C, C ′ } and Y 1 has the coloring {E, E ′ } under the condition 2y < x.
Proof. Assuming 1 2y < x n 2 , we observe that the ordered pairs in X × Y 1 when colored differently are contained in N − x ∪ N + y , that is: The table in Figure 5 shows that there are only four cases that contribute any value to D 2 , while the other cases contribute 0, similar to the table in Figure 2.
The result follows immediately.
The next proposition gives the upper bound for D 2 .
the electronic journal of combinatorics 22 (2015), #P00 Monochromatic X set and Non-monochromatic Y 1 set Case Proposition 19. Assume, without loss of generality, that µ B µ R and suppose the number of non-monochromatic pairs in S, γ 1 n, is fixed. Then where A 1 is the largest possible area under the curve in Figure 6, with a base of length γ 1 n for γ 1 Proof. From Lemma 18, We configure X to gain the maximum of µ BB ⊗ µ condition 2y < x . With this set up for X, we can count the number of pairs gained for every y in Y 1 as shown in Figure 7a.
Similarly, the configuration of X to gain the maximum of µ RR ⊗ µ (1) BR is when we color the far left of the interval 1, n 2 blue and the remainder of the interval red. With this set up for X, we can count the number of pairs gained for every y in Y 1 as shown in Figure 7b. Proof. Assume, without loss of generality, that µ B µ R . The proof ultimately depends on determining the area under the curve of Figure 6, which we break down into three cases according to the value of γ 1 . The three cases are illustrated in Figure 8. We complete the proof of this proposition as follows. For each case, we write A 1 in terms of the variables µ R , µ B , γ and γ 1 . Then, we optimize γ, γ 1 with respect to µ B 1 , µ R 1 and µ R 2 . Finally, we use the optimal γ and γ 1 values to maximize Denote this maximum to be ∆ max . Propositions 14 and 19 show that ∆ max will be the upper bound for |N (n)|. The optimization of ∆ has been done using Maple and for curious readers, the code can be found at Thanatipanonda's website. We note that ∆ can the electronic journal of combinatorics 22 (2015), #P00 ultimately be written as a function of only two variables µ R and µ R 1 . In our calculations, we use the following lower bound of γn and upper bound of γ 1 n: Maple's current technology does not allow us to optimize with absolute value and minimum functions. Thus, we separate each case into the following pieces. The subcases are summarized in the table in Figure 9. It is important to note that subcase D can be ignored in our calculation because it only produces one pair, namely µ R 1 = n 4 and µ R = n 2 (recall that µ R n 2 ). . This case is illustrated in Figure 8a.
In order to maximize A 1 , we maximize γ 1 n and minimize γn. To be able to determine the values of γ 1 and γ, we consider two further subcases as follows: the electronic journal of combinatorics 22 (2015), #P00 The optimal values of (γ 1 n, γn) is (min (µ R 1 , µ B 1 ) , |µ R 1 − µ R 2 |) as shown in Figure 10a.
The optimal values of (γ 1 n, γn) lies on the line γn = 2γ 1 n as shown in Figure 10b. This gives Thus, A 1 attains its maximum value at γn = µ B 2 .
The calculations for ∆ max for all subcases are summarized in the table in Figure 11 and the admissible regions for subcases A, B, and C are shown in Figure 14a. In this case, ∆ max occurs under subcase A. Figure 10: Finding the optimal values of γ 1 n and γn in Case 1 Case 2: γn 2 γ 1 n < µ R 2 . This case is illustrated in Figure 8b. In this case, we take the area of the rectangle but subtract those pairs that are outside of the region. Thus, we have that: In order to maximize A 1 , we want to make γ 1 n as close to µ B 2 as possible and minimize γn. We break this case down into subcases depending on whether or not the upper bound of γ 1 n is less than µ R 2 .
The optimal value of (γ 1 n, γn) is (min The optimal value of (γ 1 n, γn) is The calculations for ∆ max for all subcases are summarized in the table in Figure 12 and the admissible regions for subcases A, B, and C are shown in Figure 14b. In this case, ∆ max occurs under subcase A. Case 3: µ R 2 < γ 1 n. This case is illustrated in Figure 8c. In this final case, A 1 indicated by nearly the entire region under the graph. We take the area of the rectangle but subtract those pairs that are outside of the region. Thus, we the electronic journal of combinatorics 22 (2015), #P00 have that In order to maximize A 1 , we want to make γ 1 n as to close to n 4 as possible and minimize γn. The calculations for ∆ max for all subcases are summarized in the table in Figure 13 and the admissible regions for subcases A, B, and C are shown in Figure 14c. Once again, ∆ max occurs under subcase A. In all three cases, we can see that ∆ max occurs in Case 3 when µ R 1 = 3n 11 and µ R = 4n 11 with ∆ max = 5n 2 22 + O(n). Theorem 21. Over all 2-colorings of , the minimum number of monochromatic triples satisfying x + 2y = z is n 2 44 + O(n). Proof. An upper bound of the minimum can be obtained from a coloring on . We color R 3n/11 , B 7n/11 , R n/11 as illustrated in Figure 15. This solution was discovered in Butler, Costello, and Graham and in Thanathipanonda .
Because the lower and upper bounds match, we have therefore shown the desired result.
4 The General Case x + ay = z, a 2 We now generalize our result.
Theorem 22. Over all 2-colorings of , the minimum number of monochromatic triples satisfying x + ay = z, a 2 is n 2 2a(a 2 +2a+3) + O(n). The set up of this proof is similar to the set up in Section 3. We will outline it here. Definition 23. Denote by µ B 1 and µ R 1 the number of blue and red colorings respectively on 1, n a . Furthermore, denote by µ B 2 and µ R 2 the number of blue and red colorings respectively on n a , n . It is now easy to adapt this notation to prove the following analog to Proposition 14.
Proposition 25. |N (n)| 1 2 Proposition 27. Assume, without loss of generality, that µ B µ R and suppose the number of non-monochromatic pairs in S, γ 1 n, is fixed. Then where A 1 is the largest possible area that can be placed under the curve in Figure 17 Proposition 28. Over all 2-colorings of , the maximum number of non-monochromatic triples satisfying x + ay = z, a 2 is Proof. Without loss of generality, we assume µ B µ R . Suppose Propositions 25 and 27 show that optimizing ∆ will give us the upper bound for |N (n)|. We will call this optimum ∆ max . In order to find ∆ max , we consider three different cases to compute A 1 . Like before, each case will be subjected to the conditions listed in Figure 9.
Here, we show only the details for Case 3A which will give us the best upper bound like in the previous section.
Similar to before, we want γ 1 n to be as close to n 2a as possible and γn should be as small as possible. This is achieved by setting γ 1 n = µ B 1 and γn = µ R 1 − µ R 2 . Then which is attained when µ R 1 = a+1 a 2 +2a+3 and µ R = a+2 a 2 +2a+3 .
Proof of Theorem 22. An upper bound of the minimum can be obtained from a coloring on . We color with the ratio 1, a + 1 a+1 , 1 a+1 as illustrated in Figure 18, which was discovered in and .
This coloring gives us Because the lower and upper bounds match, we have therefore shown the desired result.
Conjectures
In this section, we present conjectures on variations of Graham's original problem. Denote by R, B, G the colors red, blue, and green respectively.
Equation:
ax + by = az where a, b are integers. This has also been conjectured in .
Equation:
x + y + w = z The coloring that gives the minimum number of monochromatic solutions over any 2-coloring of is , with the number of rainbow solutions to be n(n + 1) 10 . |
/**
* When a variable is overriden by another, e.g. via xsl:import,
* its references need to be copied or otherwise it may be
* compiled away as dead code. This method can be used for that
* purpose.
*/
public void copyReferences(VariableBase var) {
final int size = _refs.size();
for (int i = 0; i < size; i++) {
var.addReference(_refs.get(i));
}
} |
/// For a given conditional copy, predicate the definition of the source of
/// the copy under the given condition (using the same predicate register as
/// the copy).
bool HexagonExpandCondsets::predicate(MachineInstr *TfrI, bool Cond) {
unsigned Opc = TfrI->getOpcode();
(void)Opc;
assert(Opc == Hexagon::A2_tfrt || Opc == Hexagon::A2_tfrf);
DEBUG(dbgs() << "\nattempt to predicate if-" << (Cond ? "true" : "false")
<< ": " << *TfrI);
MachineOperand &MD = TfrI->getOperand(0);
MachineOperand &MP = TfrI->getOperand(1);
MachineOperand &MS = TfrI->getOperand(2);
if (!MS.isKill())
return false;
RegisterRef RT(MS);
unsigned PredR = MP.getReg();
MachineInstr *DefI = getReachingDefForPred(RT, TfrI, PredR, Cond);
if (!DefI || !isPredicable(DefI))
return false;
DEBUG(dbgs() << "Source def: " << *DefI);
ReferenceMap Uses, Defs;
MachineBasicBlock::iterator DefIt = DefI, TfrIt = TfrI;
bool PredValid = true;
for (MachineBasicBlock::iterator I = std::next(DefIt); I != TfrIt; ++I) {
if (!I->modifiesRegister(PredR, 0))
continue;
PredValid = false;
break;
}
for (MachineBasicBlock::iterator I = std::next(DefIt); I != TfrIt; ++I) {
MachineInstr *MI = &*I;
unsigned Exec = Exec_Then | Exec_Else;
if (PredValid && HII->isPredicated(*MI) && MI->readsRegister(PredR))
Exec = (Cond == HII->isPredicatedTrue(*MI)) ? Exec_Then : Exec_Else;
for (auto &Op : MI->operands()) {
if (!Op.isReg())
continue;
RegisterRef RR = Op;
if (!TargetRegisterInfo::isVirtualRegister(RR.Reg))
return false;
ReferenceMap &Map = Op.isDef() ? Defs : Uses;
addRefToMap(RR, Map, Exec);
}
}
if (isRefInMap(RT, Defs, Exec_Then) || isRefInMap(RT, Uses, Exec_Else))
return false;
RegisterRef RD = MD;
bool CanUp = canMoveOver(TfrI, Defs, Uses);
bool CanDown = canMoveOver(DefI, Defs, Uses);
if (DefI->mayLoad() || DefI->mayStore())
if (!canMoveMemTo(DefI, TfrI, true))
CanDown = false;
DEBUG(dbgs() << "Can move up: " << (CanUp ? "yes" : "no")
<< ", can move down: " << (CanDown ? "yes\n" : "no\n"));
MachineBasicBlock::iterator PastDefIt = std::next(DefIt);
if (CanUp)
predicateAt(RD, DefI, PastDefIt, PredR, Cond);
else if (CanDown)
predicateAt(RD, DefI, TfrIt, PredR, Cond);
else
return false;
if (RT != RD)
renameInRange(RT, RD, PredR, Cond, PastDefIt, TfrIt);
removeInstrFromLiveness(TfrI);
removeInstrFromLiveness(DefI);
return true;
} |
Alumina scale growth and degradation modes of a TBC system
Abstract The evolution of a thermal barrier coating system was followed during exposures at high temperature (1100°C) and under various thermal and mechanical loading conditions. The TBC system is composed of an EB-PVD yttria partially stabilised zirconia topcoat (TC) and a platinum nickel aluminide bondcoat (BC) deposited on a single crystal nickel based superalloy. Depending on the kind of heat treatment (isothermal or thermo-mechanically cycled), different types of defects (pores, cracks, re-oxidation), were observed at the BC/TGO interface, the TGO/TC interface or in the alumina scale. Damage processes were identified and discussed according to the type of imposed heat treatment. |
import { nil } from '../..';
describe('nil', () => {
describe('required', () => {
it('returns null if null passed', () => {
expect(nil(null)).toBe(null);
});
it('throws a typeError if other value is specified (no context specified)', () => {
expect(
() => nil(1),
).toThrow(new TypeError('null is expected but "1" received.'));
});
it('throws a typeError if other value is specified (with context desription)', () => {
expect(
() => nil(1, 'foo.bar'),
).toThrow(new TypeError('null is expected in foo.bar but "1" received.'));
});
it('throws a typeError undefined specified (no context specified)', () => {
expect(
() => nil(undefined),
).toThrow(new TypeError('null is expected but "undefined" received.'));
});
it('throws a typeError undefined specified (with context desription)', () => {
expect(
() => nil(undefined, 'foo.bar'),
).toThrow(new TypeError('null is expected in foo.bar but "undefined" received.'));
});
});
describe('optional', () => {
it('returns null if null passed', () => {
expect(nil.optional(null)).toBe(null);
});
it('throws a typeError if other value is specified (no context specified)', () => {
expect(
() => nil.optional(1),
).toThrow(new TypeError('null is expected but "1" received.'));
});
it('throws a typeError if other value is specified (with context desription)', () => {
expect(
() => nil.optional(1, 'foo.bar'),
).toThrow(new TypeError('null is expected in foo.bar but "1" received.'));
});
it('returns undefined if undefined specified', () => {
expect(nil.optional(undefined)).toBe(undefined);
});
});
});
|
Franchise player Carmelo Anthony offered a ringing endorsement of another player on the New York Knicks: point guard and roster hopeful Chasson Randle.
Within an NBA organization, few individuals are more influential than the franchise player. What they suggest isn’t guaranteed to materialize, but it’s only rational to value the opinion of the player whom the team is being built around.
If Phil Jackson and the New York Knicks’ front office truly covets Carmelo Anthony’s opinion, then Chasson Randle could be in store for some wonderful news.
Randle is currently on a partially guaranteed one-year deal worth $543,471. $100,000 is guaranteed to come his way, but the same can’t be said for the main roster spot that Randle is pursuing.
According to Al Iannazzone of Newsday, Anthony held nothing back when discussing Randle.
That’s a powerful endorsement considering the circumstances.
Derrick Rose is the starting point guard, but he has injury and legal concerns that could limit his availability. Brandon Jennings is the backup and sixth man, but he ruptured his achilles tendon in 2015 and only has 48 games of NBA regular season experience since then.
Having established that troubling reality, it’d only be rational for the Knicks to target a third point guard in order to create depth.
Randle certainly looked the part during the Knicks’ second preseason game. Matched up with the Brooklyn Nets, Randle lit the crosstown rivals up to the tune of 14 points, two rebounds, two assists, and three steals in 21 minutes.
It was a tremendous performance that clearly captured the attention of his teammates.
Anthony has essentially entered a public plea for Randle to be signed to the main roster. Randle could play for the Westchester Knicks in the NBA D-League, but New York may end up needing him on the main roster by season’s end.
If nothing else, Randle has the support of the franchise player.
That alone guarantees him a legitimate opportunity to make the main roster. |
from math import ceil
from collections import defaultdict as dd
import sys
input=sys.stdin.readline
#n=int(input())
n=input().split()[0]
su=0
for i in n:
su=(su+int(i))%9
if(su==0):
print("Yes")
else:
print("No")
|
<reponame>gannochenko/generators
import { FunctionComponent } from 'react';
import { FooterRoot, Copyright, Links } from './style';
import { FooterPropsType } from './type';
import { Container } from '../Container';
import { Link } from '../Link';
import { meta } from '../../meta';
export const Footer: FunctionComponent<FooterPropsType> = ({
...restProps
}) => {
return (
<FooterRoot {...restProps}>
<Container contentAlign="center">
<Copyright>
© then — now «{meta.application.name}»
</Copyright>
<Links variant="body2">
<Link href="/cookie-policy">Cookie policy</Link>
</Links>
</Container>
</FooterRoot>
);
};
|
package jp.sf.amateras.csseditor.editors;
import jp.sf.amateras.htmleditor.ColorProvider;
import jp.sf.amateras.htmleditor.HTMLPlugin;
import org.eclipse.jface.text.IDocument;
import org.eclipse.jface.text.contentassist.ContentAssistant;
import org.eclipse.jface.text.contentassist.IContentAssistant;
import org.eclipse.jface.text.presentation.IPresentationReconciler;
import org.eclipse.jface.text.presentation.PresentationReconciler;
import org.eclipse.jface.text.rules.DefaultDamagerRepairer;
import org.eclipse.jface.text.rules.RuleBasedScanner;
import org.eclipse.jface.text.source.ISourceViewer;
import org.eclipse.ui.editors.text.TextSourceViewerConfiguration;
/**
*
* @author <NAME>
*/
public class CSSConfiguration extends TextSourceViewerConfiguration {
private ColorProvider colorProvider;
private RuleBasedScanner commentScanner;
private CSSBlockScanner defaultScanner;
public CSSConfiguration(ColorProvider colorProvider){
this.colorProvider = colorProvider;
}
private RuleBasedScanner getCommentScanner(){
if (commentScanner == null) {
commentScanner = new RuleBasedScanner();
commentScanner.setDefaultReturnToken(
colorProvider.getToken(HTMLPlugin.PREF_COLOR_CSSCOMMENT));
}
return commentScanner;
}
private RuleBasedScanner getDefaultScanner(){
if (defaultScanner == null) {
defaultScanner = new CSSBlockScanner(colorProvider);
defaultScanner.setDefaultReturnToken(
colorProvider.getToken(HTMLPlugin.PREF_COLOR_FG));
}
return defaultScanner;
}
@Override public String[] getConfiguredContentTypes(ISourceViewer sourceViewer) {
return new String[] {
IDocument.DEFAULT_CONTENT_TYPE,
CSSPartitionScanner.CSS_COMMENT};
}
@Override public IContentAssistant getContentAssistant(ISourceViewer sourceViewer) {
ContentAssistant assistant = new ContentAssistant();
assistant.enableAutoInsert(true);
CSSAssistProcessor processor = new CSSAssistProcessor();
assistant.setContentAssistProcessor(processor,IDocument.DEFAULT_CONTENT_TYPE);
assistant.install(sourceViewer);
// IPreferenceStore store = HTMLPlugin.getDefault().getPreferenceStore();
// assistant.enableAutoActivation(store.getBoolean(HTMLPlugin.PREF_ASSIST_AUTO));
// assistant.setAutoActivationDelay(store.getInt(HTMLPlugin.PREF_ASSIST_TIMES));
return assistant;
}
@Override public IPresentationReconciler getPresentationReconciler(ISourceViewer sourceViewer) {
PresentationReconciler reconciler = new PresentationReconciler();
DefaultDamagerRepairer dr = null;
dr = new DefaultDamagerRepairer(getDefaultScanner());
reconciler.setDamager(dr, IDocument.DEFAULT_CONTENT_TYPE);
reconciler.setRepairer(dr, IDocument.DEFAULT_CONTENT_TYPE);
dr = new DefaultDamagerRepairer(getCommentScanner());
reconciler.setDamager(dr, CSSPartitionScanner.CSS_COMMENT);
reconciler.setRepairer(dr, CSSPartitionScanner.CSS_COMMENT);
return reconciler;
}
}
|
#include <iostream>
#include <vector>
#include <stack>
#include <queue>
using namespace std;
/**
* 2020-04-20
* Veronica
*/
struct TreeNode {
int val;
TreeNode *left;
TreeNode *right;
TreeNode(int x) : val(x), left(NULL), right(NULL) {}
};
class Solution {
public:
TreeNode * bstFromPreorder(vector<int>& preorder) {
if (preorder.empty())
return NULL;
int n = preorder.size();
TreeNode* root = new TreeNode(preorder[0]);
stack<TreeNode*> stk;
stk.push(root);
for (int i = 1; i < n; ++i) {
if (stk.empty())
break;
TreeNode* curr = stk.top();
if (preorder[i] < curr->val) { // left children
curr->left = new TreeNode(preorder[i]);
stk.push(curr->left);
}
else {
while (!stk.empty() && preorder[i] > stk.top()->val) {
curr = stk.top();
stk.pop();
}
if (stk.empty()) { // root
curr = root;
}
while (curr->right)
curr = curr->right;
curr->right = new TreeNode(preorder[i]);
stk.push(curr->right);
}
}
return root;
}
};
void printTree(TreeNode* root) {
if (!root) {
cout << "null" << " ";
return;
}
queue<TreeNode*> que;
que.push(root);
while (!que.empty()) {
TreeNode* curr = que.front();
if (curr == NULL)
cout << "null ";
else {
cout << curr->val << " ";
que.push(curr->left);
que.push(curr->right);
}
que.pop();
}
}
void deleteTree(TreeNode* root) {
if (!root)
return;
queue<TreeNode*> que;
que.push(root);
while (!que.empty()) {
TreeNode* curr = que.front();
if (curr->left)
que.push(curr->left);
if (curr->right)
que.push(curr->right);
que.pop();
curr->left = curr->right = NULL;
delete curr;
}
root = NULL;
}
int main() {
Solution solution;
vector<int> preorder = { 8,5,1,7,10,12 };
TreeNode *root = solution.bstFromPreorder(preorder);
printTree(root);
deleteTree(root);
return 0;
} |
/**
*
* jerry - Common Java Functionality
* Copyright (c) 2012, <NAME>
*
* http://www.sangupta/projects/jerry
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.sangupta.jerry.batch;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
*
* @author sangupta
*
*/
@SuppressWarnings("rawtypes")
public class MultiThreadedTaskExecutor {
private static final Logger LOGGER = LoggerFactory.getLogger(MultiThreadedTaskExecutor.class);
private static final int DEFAULT_BATCH_SIZE = 10;
private final MultiThreadableOperation multiThreadableOperation;
private ExecutorService pool = null;
/**
* Provides a user defined-name to this object so that errors can be traced easily.
*
*/
private String name;
/**
* Create an executor depending on developer's preference.
*
* @param name
* @param operation
* @param fixedSizePool
*/
public MultiThreadedTaskExecutor(String name, MultiThreadableOperation operation, boolean fixedSizePool) {
this(name, operation, DEFAULT_BATCH_SIZE, fixedSizePool);
}
/**
* Create an Executor that uses a fixed size pool.
*
* @param name
* @param operation
* @param batchSize
*/
public MultiThreadedTaskExecutor(String name, MultiThreadableOperation operation, int batchSize) {
this(name, operation, batchSize, true);
}
/**
* Create an Executor depending on given fixed size or cached thread pool.
*
* @param name
* @param operation
* @param batchSize
* @param fixedSizePool
*/
private MultiThreadedTaskExecutor(String name, MultiThreadableOperation operation, int batchSize, boolean fixedSizePool) {
if(operation == null) {
throw new IllegalArgumentException("Operation cannot be null.");
}
this.name = name;
this.multiThreadableOperation = operation;
if(fixedSizePool) {
pool = Executors.newFixedThreadPool(batchSize);
} else {
pool = Executors.newCachedThreadPool();
}
LOGGER.debug("Starting executor pool: {}", this.name);
}
public Future<Void> addInvocation(final Object argument) {
return pool.submit(new Callable<Void>() {
@SuppressWarnings("unchecked")
@Override
public Void call() throws Exception {
try {
multiThreadableOperation.runWithArguments(argument);
} catch(Throwable t) {
LOGGER.error("[{}] Error thrown running thread for argument {}", name, argument.toString());
LOGGER.error("Stack trace:", t);
}
return null;
}
});
}
public void waitForCompletion() throws InterruptedException {
if(pool != null) {
try {
pool.shutdown();
pool.awaitTermination(1, TimeUnit.DAYS);
} catch(Throwable t) {
LOGGER.error("Unable to complete jobs and shutdown pool", t);
}
}
}
public void cleanUp() {
if(pool != null && !pool.isShutdown()) {
try {
pool.shutdownNow();
} catch(Throwable t) {
LOGGER.error("Unable to shutdown pool", t);
}
}
}
}
|
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package fake
import (
"context"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type ClusterReader struct {
NoopClusterReader
GetResource *unstructured.Unstructured
GetErr error
ListResources *unstructured.UnstructuredList
ListErr error
SyncErr error
}
func (f *ClusterReader) Get(_ context.Context, _ client.ObjectKey, u *unstructured.Unstructured) error {
if f.GetResource != nil {
u.Object = f.GetResource.Object
}
return f.GetErr
}
func (f *ClusterReader) ListNamespaceScoped(_ context.Context, list *unstructured.UnstructuredList, _ string, _ labels.Selector) error {
if f.ListResources != nil {
list.Items = f.ListResources.Items
}
return f.ListErr
}
func (f *ClusterReader) Sync(_ context.Context) error {
return f.SyncErr
}
func NewNoopClusterReader() *NoopClusterReader {
return &NoopClusterReader{}
}
type NoopClusterReader struct{}
func (n *NoopClusterReader) Get(_ context.Context, _ client.ObjectKey, _ *unstructured.Unstructured) error {
return nil
}
func (n *NoopClusterReader) ListNamespaceScoped(_ context.Context, _ *unstructured.UnstructuredList,
_ string, _ labels.Selector) error {
return nil
}
func (n *NoopClusterReader) ListClusterScoped(_ context.Context, _ *unstructured.UnstructuredList,
_ labels.Selector) error {
return nil
}
func (n *NoopClusterReader) Sync(_ context.Context) error {
return nil
}
|
Erythrocyte sodium-lithium countertransport: another link between essential hypertension and diabetes.
Erythrocyte Na+/Li+ countertransport has been extensively investigated in human essential hypertension in numerous clinical, epidemiologic, and genetic studies and through these studies has emerged as the best-characterized intermediate phenotype. Patients with elevated antiporter activity manifest high total body exhangeable Na+ levels, renal and cardiac hypertrophy, and metabolic abnormalities which are part of the syndrome characterized by resistance to insulin-stimulated body glucose disposal. The coexistence of hypertension with insulin resistance and elevated Na+/Li+ countertransport has suggested that a link between the metabolic and ion transport abnormalities may be mediated through elevated insulin levels. In vitro studies in erythrocytes of fasted individuals have demonstrated that physiologic doses of insulin increased the maximal transport rate and the concentration for half-maximal transport for Na+ of both the Na+/Li+ and Na+/H+ exchanges. In vivo, Na+/Li+ exchange also exhibits a low affinity for Na+ in insulin-resistant hypertensive patients and in patients with insulin-dependent diabetes mellitus complicated by nephropathy. Insulin modulation of this antiporter may play a role in the chronic alterations in Na+ homeostasis observed in hypertension and diabetes. |
<reponame>xuezier/mvc-example
export * from './StoreService';
export * from './StoreAddressService'; |
// Libraries
import axios from 'axios'
// Errors
import { RequestError } from '../middleware/errorMiddleware'
class PackageNotFound extends RequestError {
constructor() {
super(404, 'Package does not exist')
}
}
/**
* API Documentation
* https://github.com/npm/registry/blob/master/docs/download-counts.md
*/
const api = axios.create({
baseURL: 'https://api.npmjs.org/downloads/',
})
interface CountResult {
downloads: number
start: string
end: string
}
async function getCountResult(url: string): Promise<CountResult> {
try {
const { data } = await api.get(url)
return {
downloads: data.downloads,
start: data.start,
end: data.end,
}
} catch (error) {
if (error.response?.status === 404) {
throw new PackageNotFound()
}
throw error
}
}
export default class NpmPackage {
constructor(readonly name: string) {}
async getLastDayDownloads(): Promise<CountResult> {
return getCountResult(`point/last-day/${this.name}`)
}
async getLastWeekDownloads(): Promise<CountResult> {
return getCountResult(`point/last-week/${this.name}`)
}
}
|
#include <iostream>
using namespace std;
#include <string>
#include <vector>
#include <algorithm>
#include <map>
typedef long long ll;
#include <cmath>
#include <iomanip>
const int MOD = 1000000007;
ll POWER(ll x,ll y)
{
ll ret=1;
ll p=x;
for (ll i=y;i>0;i/=2)
{
if (i%2) ret=(ret*p)%MOD;
p=(p*p)%MOD;
}
return ret;
}
ll gcm(ll a, ll b)
{
if (b>a) {ll c=a; a=b; b=c;}
while (b>0)
{
ll r=a%b;
a=b;
b=r;
}
return a;
}
int main(int argc, char *argv[])
{
int k,a,b;
cin>>k>>a>>b;
int x,y;
x=(a+k-1)/k;
y=b/k;
cout<<((x<=y) ? "OK":"NG")<<endl;
return 0;
}
|
// Testing Rest Client access against ORCID REST API
func TestRestAPI(t *testing.T) {
apiURL := "https://pub.sandbox.orcid.org"
clientID := "APP-01XX65MXBF79VJGF"
clientSecret := "3a87028d-c84c-4d5f-8ad5-38a93181c9e1"
testORCID := "0000-0003-0900-6903"
api, err := New(apiURL, OAuth, clientID, clientSecret)
if err != nil {
t.Errorf("Can't create API, %s", err)
t.FailNow()
}
if api == nil {
t.Errorf("API shouldn't be nil")
t.FailNow()
}
err = api.oAuthLogin()
if err != nil {
t.Errorf("Can't authenticate, %s", err)
t.FailNow()
}
api.token = ""
src, err := api.Request("get", "/v2.0/"+testORCID+"/record", map[string]string{})
if err != nil {
t.Errorf("request profile failed, %s", err)
t.FailNow()
}
data := map[string]interface{}{}
if err := json.Unmarshal(src, &data); err != nil {
t.Errorf("Can't unmashall JSON response, %s", err)
t.FailNow()
}
if _, ok := data["orcid-identifier"]; ok != true {
t.Errorf("missing orcid-identifier")
t.FailNow()
}
} |
<reponame>nirtamir2/vue-use-web
import { ref, onMounted, onUnmounted } from '@vue/composition-api';
export function useMousePosition() {
const x = ref(0);
const y = ref(0);
function handler(e: MouseEvent) {
x.value = e.clientX;
x.value = e.clientY;
}
onMounted(() => {
window.addEventListener('mousemove', handler, false);
});
onUnmounted(() => {
window.removeEventListener('mousemove', handler, false);
});
return {
x,
y
};
}
|
#----------------------------------------------------#
#--Author: <NAME> (<NAME>)-----#
#--W: https://harrys.fyi/----------------------------#
#--E: <EMAIL>----------------------------#
#----------------------------------------------------#
#!/usr/bin/env python
import fileinput
import shutil
from tkinter import *
import time
root=Tk()
variable1=StringVar()
variable2=StringVar()
variable3=StringVar()
variable4=StringVar()
variable5=StringVar()
variable6=StringVar()
variable7=StringVar()
variable8=StringVar()
variable9=StringVar()
variable10=StringVar()
variable11=StringVar()
variable12=StringVar()
variable13=StringVar()
variable14=StringVar()
def update_label(filename):
f = open(filename)
lines = f.readlines()
f.close()
i = 0
j= 0
k = 0
N=14
while i<(len(lines)/N):
state = []
j = 0
while (j<N):
state.append(lines[k].strip().split())
j = j + 1
k = k + 1
i = i + 1
variable1.set("V" + str(state[0][0]) + " = " + str(state[0][1]) + "\t" + "\u03B8" + str(state[0][0]) + " = " + str(state[0][2]))
variable2.set("V" + str(state[1][0]) + " = " + str(state[1][1]) + "\t" + "\u03B8" + str(state[1][0]) + " = " + str(state[1][2]))
variable3.set("V" + str(state[2][0]) + " = " + str(state[2][1]) + "\t" + "\u03B8" + str(state[2][0]) + " = " + str(state[2][2]))
variable4.set("V" + str(state[3][0]) + " = " + str(state[3][1]) + "\t" + "\u03B8" + str(state[3][0]) + " = " + str(state[3][2]))
variable5.set("V" + str(state[4][0]) + " = " + str(state[4][1]) + "\t" + "\u03B8" + str(state[4][0]) + " = " + str(state[4][2]))
variable6.set("V" + str(state[5][0]) + " = " + str(state[5][1]) + "\t" + "\u03B8" + str(state[5][0]) + " = " + str(state[5][2]))
variable7.set("V" + str(state[6][0]) + " = " + str(state[6][1]) + "\t" + "\u03B8" + str(state[6][0]) + " = " + str(state[6][2]))
variable8.set("V" + str(state[7][0]) + " = " + str(state[7][1]) + "\t" + "\u03B8" + str(state[7][0]) + " = " + str(state[7][2]))
variable9.set("V" + str(state[8][0]) + " = " + str(state[8][1]) + "\t" + "\u03B8" + str(state[8][0]) + " = " + str(state[8][2]))
variable10.set("V" + str(state[9][0]) + " = " + str(state[9][1]) + "\t" + "\u03B8" + str(state[9][0]) + " = " + str(state[9][2]))
variable11.set("V" + str(state[10][0]) + " = " + str(state[10][1]) + "\t" + "\u03B8" + str(state[10][0]) + " = " + str(state[10][2]))
variable12.set("V" + str(state[11][0]) + " = " + str(state[11][1]) + "\t" + "\u03B8" + str(state[11][0]) + " = " + str(state[11][2]))
variable13.set("V" + str(state[12][0]) + " = " + str(state[12][1]) + "\t" + "\u03B8" + str(state[12][0]) + " = " + str(state[12][2]))
variable14.set("V" + str(state[13][0]) + " = " + str(state[13][1]) + "\t" + "\u03B8" + str(state[13][0]) + " = " + str(state[13][2]))
root.update()
time.sleep(0.5)
root.title("State Estimation Results")
label1=Label(root,fg="dark green",textvariable=variable1)
label1.pack()
label2=Label(root,fg="dark green",textvariable=variable2)
label2.pack()
label3=Label(root,fg="dark green",textvariable=variable3)
label3.pack()
label4=Label(root,fg="dark green",textvariable=variable4)
label4.pack()
label5=Label(root,fg="dark green",textvariable=variable5)
label5.pack()
label6=Label(root,fg="dark green",textvariable=variable6)
label6.pack()
label7=Label(root,fg="dark green",textvariable=variable7)
label7.pack()
label8=Label(root,fg="dark green",textvariable=variable8)
label8.pack()
label9=Label(root,fg="dark green",textvariable=variable9)
label9.pack()
label10=Label(root,fg="dark green",textvariable=variable10)
label10.pack()
label11=Label(root,fg="dark green",textvariable=variable11)
label11.pack()
label12=Label(root,fg="dark green",textvariable=variable12)
label12.pack()
label13=Label(root,fg="dark green",textvariable=variable13)
label13.pack()
label14=Label(root,fg="dark green",textvariable=variable14)
label14.pack()
stop_button=Button(root,text="stop",width=25,command=root.destroy)
stop_button.pack()
update_label("../files/outputSE.txt")
root.mainloop()
|
*a,=map(int,input().split())
a[0],a[1]=abs(a[0]),abs(a[1])
aa=min(a[0],a[1])
a[0]-=aa
a[1]-=aa
bb=aa*2+a[0]+a[1]
print(['No','Yes'][a[2]-bb>-1 and (a[2]-bb)%2==0]) |
// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
import * as pulumi from "@pulumi/pulumi";
import { input as inputs, output as outputs } from "../types";
import * as utilities from "../utilities";
/**
* Retrieve information about a set of folders based on a parent ID. See the
* [REST API](https://cloud.google.com/resource-manager/reference/rest/v3/folders/list)
* for more details.
*
* ## Example Usage
* ### Searching For Folders At The Root Of An Org
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as gcp from "@pulumi/gcp";
*
* const my-org-folders = gcp.organizations.getFolders({
* parentId: `organizations/${_var.organization_id}`,
* });
* const first-folder = my_org_folders.then(my_org_folders => gcp.organizations.getFolder({
* folder: my_org_folders.folders?[0]?.name,
* }));
* ```
*/
export function getFolders(args: GetFoldersArgs, opts?: pulumi.InvokeOptions): Promise<GetFoldersResult> {
if (!opts) {
opts = {}
}
if (!opts.version) {
opts.version = utilities.getVersion();
}
return pulumi.runtime.invoke("gcp:organizations/getFolders:getFolders", {
"parentId": args.parentId,
}, opts);
}
/**
* A collection of arguments for invoking getFolders.
*/
export interface GetFoldersArgs {
/**
* A string parent as defined in the [REST API](https://cloud.google.com/resource-manager/reference/rest/v3/folders/list#query-parameters).
*/
parentId: string;
}
/**
* A collection of values returned by getFolders.
*/
export interface GetFoldersResult {
/**
* A list of projects matching the provided filter. Structure is defined below.
*/
readonly folders: outputs.organizations.GetFoldersFolder[];
/**
* The provider-assigned unique ID for this managed resource.
*/
readonly id: string;
readonly parentId: string;
}
export function getFoldersOutput(args: GetFoldersOutputArgs, opts?: pulumi.InvokeOptions): pulumi.Output<GetFoldersResult> {
return pulumi.output(args).apply(a => getFolders(a, opts))
}
/**
* A collection of arguments for invoking getFolders.
*/
export interface GetFoldersOutputArgs {
/**
* A string parent as defined in the [REST API](https://cloud.google.com/resource-manager/reference/rest/v3/folders/list#query-parameters).
*/
parentId: pulumi.Input<string>;
}
|
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.Arrays;
import java.util.StringTokenizer;
public class C {
static int N;
static char[] A;
static int[] arr;
static long[][][] dp=new long[1001][1001][27];
public static void main(String[] args) throws Throwable {
BufferedReader in=new BufferedReader(new InputStreamReader(System.in));
N=Integer.parseInt(in.readLine());
A=in.readLine().toCharArray();
arr=new int[26];
StringTokenizer st=new StringTokenizer(in.readLine());
for(int i=0;i<26;i++)arr[i]=Integer.parseInt(st.nextToken());
for(long[][] a:dp)
for(long[] b:a)
Arrays.fill(b, -1);
System.out.println(f(1,0,A[0]-'a'));
for(long[][] a:dp)
for(long[] b:a)
Arrays.fill(b, -1);
System.out.println(max(0,0,A[0]-'a'));
for(long[][] a:dp)
for(long[] b:a)
Arrays.fill(b, -1);
System.out.println(min(0,0,A[0]-'a'));
}
static long f(int a, int ant, int max) {
if(a-ant>arr[max])return 0;
if(a==N)return 1;
if(dp[a][ant][max]>=0)
return dp[a][ant][max];
return dp[a][ant][max]=(f(a+1,ant,arr[A[a]-'a']<arr[max]?A[a]-'a':max)+f(a+1,a,A[a]-'a'))%1000000007;
}
static long max(int a, int ant, int max) {
if(a-ant>arr[max])return 0;
if(a==N)return a-ant;
if(dp[a][ant][max]>=0)
return dp[a][ant][max];
return dp[a][ant][max]=Math.max(max(a+1,ant,arr[A[a]-'a']<arr[max]?A[a]-'a':max),Math.max(max(a+1,a,A[a]-'a'),a-ant));
}
static long min(int a, int ant, int max) {
if(a-ant>arr[max])return 10000;
if(a==N)return 1;
if(dp[a][ant][max]>=0)
return dp[a][ant][max];
return dp[a][ant][max]=Math.min(min(a+1,ant,arr[A[a]-'a']<arr[max]?A[a]-'a':max),min(a+1,a,A[a]-'a')+1);
}
/*static int max(int a, int cant, int letter) {
System.out.println(a+" "+cant+" "+(char)(letter+'a'));
if(cant>arr[letter])return Integer.MIN_VALUE;
if(a==N)return 0;
return Math.max(f(a+1,1,A[a]-'a')+1, f(a+1,letter==A[a]-'a'?cant+1:1,A[a]-'a')+1);
}*/
}
|
// NewTransport returns an http.RoundTripper that modifies requests according to
// the RequestModifiers passed in the arguments, before sending the requests to
// the base http.RoundTripper (which, if nil, defaults to http.DefaultTransport).
func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper {
return &transport{
Modifiers: modifiers,
Base: base,
}
} |
<gh_stars>1-10
package main
import (
"bytes"
"flag"
"fmt"
"log"
"net/rpc"
"github.com/aws/aws-sdk-go/service/lambda"
"github.com/cloudinterfaces/lrpc/client"
"github.com/cloudinterfaces/lrpc/demo"
)
func main() {
flag.Parse()
funcName := flag.Arg(0)
if len(funcName) == 0 {
log.Println("Function name required as first argument")
log.Fatal("demo funcName")
}
codec, err := client.DefaultCodec(funcName)
if err != nil {
log.Printf("AWS_REGION may need to be set to use function name: %s", funcName)
log.Fatal(err)
}
buf := new(bytes.Buffer)
logger := log.New(buf, "", 0)
client.SetLogger(codec, logger.Printf)
c := rpc.NewClientWithCodec(codec)
var out string
quot := demo.Quotient{}
args := demo.Args{A: 5, B: 2}
log.Printf("Calling Arith.Divide with: %#v", args)
if err = c.Call("Arith.Divide", &args, "); err != nil {
log.Fatal("Unexpected error:", err)
}
out = fmt.Sprintf("%v / %v = %v", args.A, args.B, quot.Quo)
if quot.Rem > 0 {
out += fmt.Sprintf(" with remainder %v/%v", quot.Rem, args.B)
}
log.Println(out)
log.Println("Calling Arith.Panic")
if err = c.Call("Arith.Panic", &out, &out); err != nil {
log.Println("Expected panic: ", err)
}
log.Println("Calling Arith.Error")
if err = c.Call("Arith.Error", &out, &out); err != nil {
log.Println("Expected err: ", err)
}
log.Println("Calling Arith.BadIdea")
if err = c.Call("Arith.BadIdea", new(struct{}), &out); err != nil {
log.Fatal(err)
}
log.Printf("Request ID was: %s", out)
log.Println("Calling Arith.Divide via JSON-RPC", args)
if lam := codec.(interface {
Lambda() *lambda.Lambda
}); lam != nil {
l := lam.Lambda()
req := lambda.InvokeInput{
FunctionName: &funcName,
Payload: []byte(`{"method":"Arith.Divide","params":{"A":5,"B":2},"id":"one"}`),
}
res, err := l.Invoke(&req)
if err != nil {
log.Fatal(err)
}
if res.FunctionError != nil {
log.Fatalf("FunctionError: %s", *res.FunctionError)
}
log.Printf("Output: %s", string(res.Payload))
}
log.Println("Cloudwatch logs: ")
fmt.Println(buf.String())
}
|
async def _poll_dead_pid(pid, callback, args):
for timeout in (0.001, 0.01, 0.1, 1.0, 2.0):
await asyncio.sleep(timeout)
status = wait_pid(pid)
if status is not None:
_invoke_callback(callback, pid, status, args)
break
else:
LOGGER.critical("Pid %r is not exiting after several seconds.", pid) |
Patrick Kaleta of the Buffalo Sabres is serving a 10-game suspension for his hit to the head of Jack Johnson of the Columbus Blue Jackets, which he’s currently appealing. On Friday, he addressed the suspension and his over-the-edge style of play … which he says he’s trying to change.
From the Sabres:
Scroll to continue with content Ad
Kaleta says he’s already been working on adjusting his style and continues to do so. “I’ve been changing my game over the past little while. Everyone who’s watched the Sabres and has paid close attention has seen it,” he said. “Like I said, I just need to keep continuing to get better and do what’s best for the team and try to be a positive influence on what’s going on here.”
If that sounds a little bit like what we heard from a guy like Matt Cooke after he was hit with a big suspension from the NHL, well, Cooke reached out to Kaleta and Kaleta respects his input:
From the Sabres:
At first, Kaleta was surprised to hear from Cooke.“It was kind of cool because as you guys know, I don’t really expect to have a lot of friends on other teams and stuff like that,” Kaleta said. “But it’s interesting because I don’t know his background or anything, but I’m assuming he’s worked pretty hard to get to where he is. “We’re sort of in the same boat there so he kind of understands where I’m coming from and what I have to do to stay in the League and what I have to do to help out the team. It was cool in that aspect and I respect everything he said and hopefully I can use what he said and help myself and my game out.”
Kaleta is eligible to return for the Buffalo Sabres’ home game against the Anaheim Ducks on Nov. 2, pending the appeal. |
Unsupervised Learning of Visual Features through Spike Timing Dependent Plasticity
Spike timing dependent plasticity (STDP) is a learning rule that modifies synaptic strength as a function of the relative timing of pre- and postsynaptic spikes. When a neuron is repeatedly presented with similar inputs, STDP is known to have the effect of concentrating high synaptic weights on afferents that systematically fire early, while postsynaptic spike latencies decrease. Here we use this learning rule in an asynchronous feedforward spiking neural network that mimics the ventral visual pathway and shows that when the network is presented with natural images, selectivity to intermediate-complexity visual features emerges. Those features, which correspond to prototypical patterns that are both salient and consistently present in the images, are highly informative and enable robust object recognition, as demonstrated on various classification tasks. Taken together, these results show that temporal codes may be a key to understanding the phenomenal processing speed achieved by the visual system and that STDP can lead to fast and selective responses.
Introduction
Temporal constraints pose a major challenge to models of object recognition in cortex. When two images are simultaneously flashed to the left and right of fixation, human subjects can make reliable saccades to the side where there is a target animal in as little as 120-130 ms . If we allow 20-30 ms for motor delays in the oculomotor system, this implies that the underlying visual processing can be done in 100 ms or less. In monkeys, recent recordings from inferotemporal cortex (IT) showed that spike counts over time bins as small as 12.5 ms (which produce essentially a binary vector with either ones or zeros) and only about 100 ms after stimulus onset contain remarkably accurate information about the nature of a visual stimulus . This sort of rapid processing presumably depends on the ability of the visual system to learn to recognize familiar visual forms in an unsupervised manner. Exactly how this learning occurs constitutes a major challenge for theoretical neuroscience. Here we explored the capacity of simple feedforward network architectures that have two key features. First, when stimulated with a flashed visual stimulus, the neurons in the various layers of the system fire asynchronously, with the most strongly activated neurons firing first-a mechanism that has been shown to efficiently encode image information . Second, neurons at later stages of the system implement spike timing dependent plasticity (STDP), which is known to have the effect of concentrating high synaptic weights on afferents that systematically fire early . We demonstrate that when such a hierarchical system is repeatedly presented with natural images, these intermediate-level neurons will naturally become selective to patterns that are reliably present in the input, while their latencies decrease, leading to both fast and informative responses. This process occurs in an entirely unsupervised way, but we then show that these intermediate features are able to support categorization.
Our network belongs to the family of feedforward hierarchical convolutional networks, as in . To be precise, its architecture is inspired from Serre, Wolf, and Poggio's model of object recognition , a model that itself extends HMAX and performs remarkably well with natural images. Like them, in an attempt to model the increasing complexity and invariance observed along the ventral pathway , we use a four-layer hierarchy (S1-C1-S2-C2) in which simple cells (S) gain their selectivity from a linear sum operation, while complex cells (C) gain invariance from a nonlinear max pooling operation (see Figure 1 and Methods for a complete description of our model). Nevertheless, our network does not only rely on static nonlinearities: it uses spiking neurons and operates in the temporal domain. At each stage, the time to first spike with respect to stimulus onset (or, to be precise, the rank of the first spike in the spike train, as we will see later) is supposed to be the ''key variable,'' that is, the variable that contains information and that is indeed read out and processed by downstream neurons. When presented with an image, the first layer's S1 cells, emulating V1 simple cells, detect edges with four preferred orientations, and the more strongly a cell is activated, the earlier it fires. This intensity-latency conversion is in accordance with recordings in V1 showing that response latency decreases with the stimulus contrast and with the proximity between the stimulus orientation and the cell's preferred orientation . It has already been shown how such orientation selectivity can emerge in V1 by applying STDP on spike trains coming from retinal ON-and OFF-center cells , so we started our model from V1 orientation-selective cells. We also limit the number of spikes at this stage by introducing competition between S1 cells through a one-winner-take-all mechanism: at a given location-corresponding to one cortical columnonly the spike corresponding to the best matching orientation is propagated (sparsity is thus 25% at this stage). Note that k-winner-take-all mechanisms are easy to implement in the temporal domain using inhibitory GABA interneurons .
These S1 spikes are then propagated asynchronously through the feedforward network of integrate-and-fire neurons. Note that within this time-to-first-spike framework, the maximum operation of complex cells simply consists of propagating the first spike emitted by a given group of afferents . This can be done efficiently with an integrateand-fire neuron with low threshold that has synaptic connections from all neurons in the group.
Images are processed one by one, and we limit activity to at most one spike per neuron, that is, only the initial spike wave is propagated. Before presenting a new image, every neuron's potential is reset to zero. We process various scaled versions of the input image (with the same filter size). There is one S1-C1-S2 pathway for each processing scale (not represented on Figure 1). This results in S2 cells with various receptive field sizes (see Methods). Then C2 cells take the maximum response (i.e., first spike) of S2 cells over all positions and scales, leading to position and scale invariant responses.
This paper explains how STDP can set the C1-S2 synaptic connections, leading to intermediate-complexity visual features, whose equivalent in the brain may be in V4 or IT. STDP is a learning rule that modifies the strength of a neuron's synapses as a function of the precise temporal relations between pre-and postsynaptic spikes: an excitatory synapse receiving a spike before a postsynaptic one is emitted is potentiated (long-term potentiation) whereas its strength is weakened the other way around (long-term depression) . The amount of modification depends on the delay between these two events: maximal when pre-and postsynaptic spikes are close together, and the effects gradually decrease and disappear with intervals in excess of a few tens of milliseconds . Note that STDP is in agreement with Hebb's postulate because presynaptic neurons that fired slightly before the postsynaptic neuron are those that ''took part in firing it.'' Here we used a simplified STDP rule where the weight modification does not depend on the delay between pre-and postsynaptic spikes, and the time window is supposed to cover the whole spike wave (see Methods). We also use 0 and 1 as ''soft bounds'' (see Methods), ensuring the synapses remain excitatory. Several authors have studied the effect of STDP with Poisson spike trains . Here, we demonstrate STDP's remarkable ability to detect statistical regularities in terms of earliest firing afferent patterns within visual spike trains, despite their very high dimensionality inherent to natural images.
Visual stimuli are presented sequentially, and the resulting spike waves are propagated through to the S2 layer, where STDP is used. We use restricted receptive fields (i.e., S2 cells only integrate spikes from an s 3 s square neighborhood in the C1 maps corresponding to one given processing scale) and weight-sharing (i.e., each prototype S2 cell is duplicated in retinotopic maps and at all scales). Starting with a random weight matrix (size ¼ 4 3 s 3 s), we present the first visual stimuli. Duplicated cells are all integrating the spike train and compete with each other. If no cell reaches its threshold, nothing happens and we process the next image. Otherwise for each prototype the first duplicate to reach its threshold is the winner. A one-winner-take-all mechanism prevents the other duplicated cells from firing. The winner thus fires and the STDP rule is triggered. Its weight matrix is updated, and the change in weights is duplicated at all positions and scales. This allows the system to learn patterns despite changes in position and size in the training examples. We also use local inhibition between different prototype cells: when a cell fires at a given position and scale, it prevents all other cells from firing later at the same scale and within an s/2 3 s/2 square neighborhood of the firing position. This competition, only used in the learning phase, prevents all the cells from learning the same pattern. Instead, the cell population self-organizes, each cell trying to learn a distinct pattern so as to cover the whole variability of the inputs.
If the stimuli have visual features in common (which should be the case if, for example, they contain similar objects), the STDP process will extract them. That is, for some cells we will observe convergence of the synaptic weights (by saturation), which end up being either close to 0 or to 1. During the convergence process, synapses compete for control of the timing of postsynaptic spikes . The winning synapses are those through which the earliest spikes arrive (on average) , and this is true even in the presence of jitter and spontaneous activity (although the model presented in this paper is fully deterministic). This ''preference'' for the earliest spikes is a key point since the earliest spikes, which correspond in our framework to the most salient regions of an image, have been shown to be the most informative . During the learning, the postsynaptic spike latency decreases . After convergence, the responses become selective (in terms of latency)
Author Summary
The paper describes a new biologically plausible mechanism for generating intermediate-level visual representations using an unsupervised learning scheme. These representations can then be used very effectively to perform categorization tasks using natural images. While the basic hierarchical architecture of the system is fairly similar to a number of other recent proposals, the key differences lie in the level of description that is used-individual neurons and spikes-and in the sort of coding scheme involved. Essentially, we have found that a combination of a temporal coding scheme where the most strongly activated neurons fire first with spike timing dependent plasticity leads to a situation where neurons in higher order visual areas will gradually become selective to frequently occurring feature combinations. At the same time, their responses become more and more rapid. We firmly believe that such mechanisms are a key to understanding the remarkable efficiency of the primate visual system.
consistently among the earliest to fire. STDP detects these kinds of statistical regularities among the spike trains and creates one unit for each distinct pattern.
Results
We evaluated our STDP-based learning algorithm on two California Institute of Technology datasets, one containing faces and the other motorbikes, and a distractor set containing backgrounds, all available at http://www.vision. caltech.edu (see Figure 2 for sample pictures). Note that most of the images are not segmented. Each dataset was split into a training set, used in the learning phase, and a testing set, not seen during the learning phase but used afterward to evaluate the performance on novel images. This standard crossvalidation procedure allows the measurement of the system's ability to generalize, as opposed to learning the specific training examples. The splits used were the same as Fergus, Perona, and Zisserman . All images were rescaled to be 300 pixels in height (preserving the aspect ratio) and converted to grayscale values.
We first applied our unsupervised STDP-based algorithm on the face and motorbike training examples (separately), presented in random order, to build two sets of ten classspecific C2 features. Each C2 cell has one preferred input, defined as a combination of edges (represented by C1 cells). Note that many gray-level images may lead to this combination of edges because of the local max operation of C1 cells and because we lose the ''polarity'' information (i.e., which side of the edge is darker). However, we can reconstruct a representation of the set of preferred images by convolving the weight matrix with a set of kernels representing oriented bars. Since we start with random weight matrices, at the beginning of the learning process the reconstructed preferred stimuli do not make much sense. But as the cells learn, structured representations emerge, and we are usually able to identify the nature of the cells' preferred stimuli. Figures 3 and 4 show the reconstructions at various stages of learning for the face and motorbike datasets, respectively. We stopped the learning after 10,000 presentations.
Then we turned off the STDP rule and tested these STDPobtained features' ability to support face/nonface and motorbike/nonmotorbike classification. This paper focuses more on feature extraction than on sophisticated classification methods, so we first used a very simple decision rule based on the number of C2 cells that fired with each test image, on which a threshold is applied. Such a mechanism could be easily implemented in the brain. The threshold was set at the equilibrium point (i.e., when the false positive rate equals the missed rate). In Table 1 we report good classification results with this ''simple-count'' scheme in terms of area under the receiver operator characteristic (ROC) and the performance rate at equilibrium point.
We also evaluated a more complicated classification scheme. C2 cells' thresholds were supposed to be infinite, and we measured the final potentials they reached after having integrated the whole spike train generated by the image. This final potential can be seen as the number of early spikes in common between a current input and a stored prototype (this contrasts with HMAX and extensions , where a Euclidian distance or a normalized dot product is used to measure the difference between a stored prototype Figure 1. Overview of the Five-Layer Feedforward Spiking Neural Network As in HMAX , we alternate simple cells that gain selectivity through a sum operation, and complex cells that gain shift and scale invariance through a max operation (which simply consists of propagating the first received spike). Cells are organized in retinotopic maps until the S2 layer (inclusive). S1 cells detect edges. C1 maps subsample S1 maps by taking the maximum response over a square neighborhood. S2 cells are selective to intermediatecomplexity visual features, defined as a combination of oriented edges (here we symbolically represented an eye detector and a mouth detector). There is one S1-C1-S2 pathway for each processing scale (not represented). Then C2 cells take the maximum response of S2 cells over all positions and scales and are thus shift-and scale-invariant. Finally, a classification is done based on the C2 cells' responses (here we symbolically represented a face/nonface classifier). In the brain, equivalents of S1 cells may be in V1, S2 cells in V1-V2, S2 cells in V4-PIT, C2 cells in AIT, and the final classifier in PFC. This paper focuses on the learning of C1 to S2 synaptic connections through STDP. doi:10.1371/journal.pcbi.0030031.g001 and a current input). Note that this potential is contrast invariant: a change in contrast will shift all the latencies but will preserve the spike order. The final potentials reached with the training examples were used to train a radial basis function (RBF) classifier (see Methods). We chose this classifier because linear combination of Gaussian-tuned units is hypothesized to be a key mechanism for generalization in the visual system . We then evaluated the RBF on the testing sets. As can be seen in Table 1, performance with this ''potential þ RBF'' scheme was better.
Using only ten STDP-learnt features, we reached on those two classes a performance that is comparable to that of Serre, Wolf, and Poggio's model, which itself is close to the best state-of-the-art computer vision systems . However, their system is more generic. Classes with more intraclass variability (for example, animals) appear to pose a problem with our approach because a lot of training examples (say a few tens) of a given feature type are needed for the STDP process to learn it properly.
Our approach leads to the extraction of a small set (here ten) of highly informative class-specific features. This is in contrast with Serre et al.'s approach where many more (usually about a thousand) features are used. Their sets are more generic and are suitable for many different classes . They rely on the final classifier to ''select'' diagnostic features and appropriately weight them for a given classification task.
Here, STDP will naturally focus on what is common to the positive training set, that is, target object features. The background is generally not learned (at least not in priority), since backgrounds are almost always too different from one image to another for the STDP process to converge. Thus, we directly extract diagnostic features, and we can obtain reasonably good classification results using only a threshold on the number of detected features. Furthermore, as STDP performs vector quantization from multiple examples as opposed to ''one-shot learning,'' it will not learn the noise, nor anything too specific to a given example, with the result that it will tend to learn archetypical features.
Another key point is the natural trend of the algorithm to learn salient regions, simply because they correspond to the earliest spikes, with the result that neurons whose receptive fields cover salient regions are likely to reach their threshold (and trigger the STDP rule) before neurons ''looking'' at other regions. This contrasts with more classical competitive learning approaches, where input normalization helps different input patterns to be equally effective in the learning process . Note that ''salient'' means within our network ''with well-defined contrasted edges,'' but saliency is a more generic concept of local differences, for example, in intensity, color, or orientations as in the model of Itti, Koch, and Niebur . We could use other types of S1 cells to detect other types of saliency, and, provided we apply the same intensity-latency conversion, STDP would still focus on the most salient regions. Saliency is known to drive attention (see for a review). Our model predicts that it also drives the learning. Future experimental work will test this prediction.
Of course, in real life we are unlikely to see many examples of a given category in a row. That is why we performed a second simulation, where 20 C2 cells were presented with the face, motorbike, and background training pictures in random order, and the STDP rule was applied. Figure 5 shows all the reconstructions for this mixed simulation after 20,000 presentations. We see that the 20 cells self-organized, some of them having developed selectivity to face features, and others to motorbike features. Interestingly, during the learning process the cells rapidly showed a preference for one category. After a certain degree of selectivity had been reached, the face-feature learning was not influenced by the presentation of motorbikes (and vice versa), simply because face cells will not fire (and trigger the STDP rule) on motorbikes. Again we tested the quality of these features with a (multiclass) classification task, using an RBF network and a ''one-versus-all'' approach (see Methods). As before, we tested two implementations: one based on ''binary detections þ RBF'' and one based on ''potential þ RBF''. Note that a simple detection count cannot work here, as we need at least some supervised learning to know which feature (or feature combination) is diagnostic (or antidiagnostic) of which class. Table 2 shows the confusion matrices obtained on the testing sets for both implementations, leading, respectively, to 95.0% and 97.7% of correct classifications on average. It is worth mentioning that the ''potential þ RBF'' system perfectly discriminated between faces and motorbikes-although both were presented in the unsupervised STDP-based learning phase.
A third type of simulation was run to illustrate the STDP learning process. For these simulations, only three C2 cells and four processing scales (71%, 50%, 35%, and 25%) were used. We let at most one cell fire at each processing scale. The rest of the parameters were strictly identical to the other simulations (see Methods). Videos S1-S3 illustrate the STDP learning process with, respectively, faces, motorbikes, and a mix of faces, motorbikes, and background pictures. It can be seen that after convergence the STDP feature showed a good tradeoff between selectivity (very few false alarms) and invariance (most of the targets were recognized).
An interesting control is to compare the STDP learning rule with a more standard hebbian rule in this precise framework. For this purpose, we converted the spike trains coming from C1 cells into a vector of (real-valued) C1 activities X C1 , supposed to correspond to firing rates (see Methods). Each S2 cell was no longer modeled at the integrate-and-fire level but was supposed to respond with a (static) firing rate Y S2 given by the normalized dot product: where W S2 is the synaptic weight vector of the S2 cell (see Methods). The S2 cells still competed with each other, but the kwinner-take-all mechanisms now selected the cells with the highest firing rates (instead of the first one to fire). Only the cells whose firing rates reached a certain threshold were considered in the competition (see Methods). The winners now triggered the following modified hebbian rule (instead of STDP): where a decay term has been added to keep the weight vector bounded (however, the rule is still local, unlike an explicit weight normalization). Note that this precaution was not needed in the STDP case because competition between synapse naturally bounds the weight vector . The rest of the network is strictly identical to the STDP case. Figure 6 shows the reconstruction of the preferred stimuli for the ten C2 cells after 10,000 presentations for the face stimuli ( Figure 6, top) and the motorbikes stimuli ( Figure 6, top). Again we can usually recognize the face and motorbike parts to which the cells became selective (even though the reconstructions look fuzzier than in the STDP case because the final weights are more graded). We also tested the ability of these hebbian-obtained features to support face/nonface and motorbike/nonmotorbike classification once fed into an RBF, and the results are shown in Table 1 (last column). We also evaluated the hebbian features with the multiclass setup. Twenty cells were presented with the same mix of face, motorbike, and background pictures as before. Figure 7 shows the final reconstructions after 20,000 presentations, and Table 2 shows the confusion matrix (last columns).
The main conclusion is that the modified hebbian rule is also able to extract pertinent features for classification (although performance on these tests appears to be slightly worse). This is not very surprising as STDP can be seen as a hebbian rule transposed in the temporal domain, but it was worth checking. Where STDP would detect (and create selectivity to) sets of units that are consistently among the first one to fire, the hebbian rule detects (and creates selectivity to) sets of units that consistently have the highest firing rates. However, we believe the temporal framework is a better description of what really happens at the neuronal level, at least in ultrarapid categorization tasks. Furthermore, STDP also explains how the system becomes faster and faster with training, since the neurons learn to decode the first information available at their afferents' level (see also Discussion).
Discussion
While the ability of hierarchical feedforward networks to support classification is now reasonably well established (e.g., ), how intermediate-complexity features can be learned remains an open problem, especially with cluttered images. In the original HMAX model, S2 features were not learned but were manually hardwired . Later versions used huge sets of random crops (say 1,000) taken from natural images and used these crops to ''imprint'' S2 cells . This approach works well but is costly since redundancy is very high between features, and many features are irrelevant for most (if not all) of the tasks. To select only pertinent features for a given task, Ullman proposed an interesting criterion based on mutual information , leaving the question of possible neural implementation open. LeCun showed how visual features in a convolutional network could be learned in a supervised manner using back-propagation , without claiming this algorithm was biologically plausible. Although we may occasionally use supervised learning to create a set of features suitable for a particular recognition task, it seems unrealistic that we need to do that each time we learn a new class. Here we took another approach: one layer with unsupervised competitive learning is used as input for a second layer with supervised learning. Note that this kind of hybrid scheme has been found to learn much faster than a two-layer backpropagation network .
Our approach is a bottom-up one: instead of intuiting good image-processing schemes and discussing their eventual neural correlates, we took known biological phenomena that occur at the neuronal level, namely integrate-and-fire and STDP, and observed where they could lead at a more integrated level. The role of the simulations with natural images is thus to provide a ''plausibility proof'' that such mechanisms could be implemented in the brain.
However, we have made four main simplifications. The first one was to propagate input stimuli one by one. This may correspond to what happens when an image is flashed in an ultrarapid categorization paradigm , but normal visual perception is an ongoing process. However, every 200 ms or 300 ms we typically perform a saccade. The processing of each of these discrete ''chunks'' seems to be optimized for rapid execution , and we suggest that much can be done with the feedforward propagation of a single spike wave. Furthermore, even when fixating, our eyes are continuously making microsaccades that could again result in repetitive waves of activation. This idea is in accordance with electrophysiological recordings showing that V1 neuron activity is correlated with microsaccades . Here we assumed the successive waves did not interfere, which does not seem too unreasonable given that the neuronal time constants (integration, leak, STDP window) are in the range of a few tens of milliseconds whereas the interval between saccades and microsaccades is substantially longer. It is also possible that extraretinal signals suppress interference by shutting down any remaining activity before propagating the next wave. Note that this simplification allows us to use nonleaky integrate-and-fire neurons and an infinite STDP time window. More generally, as proposed by Hopfield , waves could be generated by population oscillations that would fire one cell at a time in advance of the maximum of the oscillation, which increases with the inputs the cell received. This idea is in accordance with recordings in area 17 of cat visual cortex showing that suboptimal cells reveal a systematic phase lag relative to optimally stimulated cells .
The second simplification we have made is to use only five layers (including the classification layer), whereas processing in the ventral stream involves many more layers (probably about ten), and complexity increases more slowly than suggested here. However, STDP as a way to combine simple features into more complex representations, based on statistical regularities among earliest spike patterns, seems to be a very efficient learning rule and could be involved at all stages.
The third main simplification we have made consists of using restricted receptive fields and weight sharing, as do most of the bio-inspired hierarchical networks (networks using these techniques are called convolutional networks). We built shift and scale invariance by structure (and not by training) by duplicating S1, C1, and S2 cells at all positions and scales. This is a way to reduce the number of free parameters (and therefore the VC dimension ) of the network by incorporating prior information into the network design: responses should be scale-and shiftinvariant. This greatly reduces the number of training examples needed. Note that this technique of weight sharing could be applied to other transformations than shifting and scaling, for instance, rotation and symmetry. However, it is difficult to believe that the brain could really use weight sharing since, as noted by Fö ldiá k , updating the weights of all the simple units connected to the same complex unit is a nonlocal operation. Instead, he suggested that at least the low-level features could be learned locally and independently. Subsequently, cells with similar preferred stimulus may connect adaptively to the same complex cell, possibly by detecting correlation across time thanks to a trace rule . Wallis, Rolls, and Milward successfully implemented this sort of mechanism in a multilayered hierarchical network called Vis-Net ; however, performance after learning objects from unsegmented natural images was poor . Future work will evaluate the use of local learning and adaptative complex pooling in our network, instead of exact weight sharing. Learning will be much slower but should lead to similar STDP features. Note that it seems that monkeys can recognize high-level objects at scales and positions that have not been experienced previously . It could be that in the brain local learning and adaptative complex pooling are used up to a certain level of complexity, but not for high-level objects. These high-level objects could be represented with a combination of simpler features that would already be shift-and scaleinvariant. As a result, there would be less need for spatially specific representations for high-level objects.
The last main simplification we have made is to ignore both feedback loops and top-down influences. While normal, everyday vision extensively uses feedback loops, the temporal constraints almost certainly rule them out in an ultrarapid categorization task . The same cannot be said about the top-down signals, which do not depend directly on inputs. For example, there is experimental evidence that the selectivity to the ''relevant'' features for a given recognition task can be enhanced in IT and in V4 , possibly thanks to a top-down signal coming from the prefrontal cortex, thought to be involved in the categorization process. These effects, for example, modeled by Szabo et al. , are not taken into account here.
Despite these four simplifications, we think our model captures two key mechanisms used by the visual system for rapid object recognition. The first one is the importance of the first spikes for rapidly encoding the most important information about a visual stimulus. Given the number of stages involved in high-level recognition and the short latencies of selective responses recorded in monkeys' IT , the time window available for each neuron to perform its computation is probably about 10-20 ms and will rarely contain more than one or two spikes. The only thing that matters for a neuron is whether an afferent fires early enough so that the presynaptic spike falls in the critical time window, while later spikes cannot be used for ultrarapid categorization. At this point (but only at this point), we have to consider two hypotheses: either presynaptic spike times are completely stochastic (for example, drawn from a Poisson distribution), or they are somewhat reliable. The first hypothesis causes problems since the first presynaptic spikes (again the only ones taken into account) will correspond to a subset of the afferents that is essentially random, and will not contain much information about their real activities . A solution to this problem is to use populations of redundant neurons (with similar selectivity) to ensure the first presynaptic spikes do correspond on average to the most active populations of afferents. In this work we took the second hypothesis, assuming the time to first spike of the afferents (or, to be precise, their firing order) was reliable and did reflect a level of activity. This second hypothesis receives experimental support. For example, recent recordings in monkeys show that IT neurons' responses in terms of spike count close to stimulus onset (100-150 ms time bin) seem to be too reliable to be fit by a typical Poisson firing rate model . Another recent electrophysiological study in monkeys showed that IT cell's latencies do contain information about the nature of a visual stimulus . There is also experimental evidence for precise spike time responses in V1 and in many other neuronal systems (see for a review).
Very interestingly, STDP provides an efficient way to develop selectivity to first spike patterns, as shown in this work. After convergence, the potential reached by an STDP neuron is linked to the number of early spikes in common between the current input and a stored prototype. This ''early spike'' versus ''later spike'' neural code (while the spike order within each bin does not matter) has not only been proven robust enough to perform object recognition in natural images but is fast to read out: an accurate response can be produced when only the earliest afferents have fired. The use of such a mechanism at each stage of the ventral stream could account for the phenomenal processing speed achieved by the visual system.
Materials and Methods
Here is a detailed description of the network, the STDP model, and the classification methods. S1 cells. S1 cells detect edges by performing a convolution on the input images. We are using 5 3 5 convolution kernels, which roughly correspond to Gabor filters with wavelength of 5 (i.e., the kernel contains one period), effective width 2, and four preferred orientations: p/8, p/4 þ p/8, p/2 þ p/8, and 3p/4 þ p/8 (p/8 is there to avoid focusing on horizontal and vertical edges, which are seldom diagnostic). We apply those filters to five scaled versions of the original image: 100%, 71%, 50%, 35%, and 25%. There are thus 4 3 5 ¼ 20 S1 maps. S1 cells emit spikes with a latency that is inversely proportional to the absolute value of the convolution (the response is thus invariant to an image negative operation). We also limit activity at this stage: at a given processing scale and location, only the spike corresponding to the best matching orientation is propagated.
C1 cells. C1 cells propagate the first spike emitted by S1 cells in a 7 3 7 square of a given S1 map (which corresponds to one preferred orientation and one processing scale). Two adjacent C1 cells in a C1 map correspond to two 7 3 7 squares of S1 cells shifted by six S1 cells (and thus overlap of one S1 row). C1 maps thus subsample S1 maps. To be precise, neglecting the side effects, there are 6 3 6 ¼ 36 times fewer C1 cells than S1 cells. As proposed by Riesenhuber and Poggio , this maximum operation is a biologically plausible way to gain local shift invariance. From an image processing point of view, it is a way to perform subsampling within retinotopic maps without flattening high spatial frequency peaks (as would be the case with local averaging).
We also use a local lateral inhibition mechanism at this stage: when a C1 cell emits a spike, it increases the latency of its neighbors within an 11 3 11 square in the map with the same preferred orientation and the same scale. The percentage of latency increase decreases linearly with the distance from the spike, from 15% to 5%. As a result, if a region is clearly dominated by one orientation, cells will inhibit each other and the spike train will be globally late and thus unlikely to be ''selected'' by STDP.
S2 cells. S2 cells correspond to intermediate-complexity visual features. Here we used ten prototype S2 cell types, and 20 in the mixed simulation. Each prototype cell is duplicated in five maps (weight sharing), each map corresponding to one processing scale. Within those maps, the S2 cells can integrate spikes only from the four C1 maps of the corresponding processing scale. The receptive field size is 16 3 16 C1 cells (neglecting the side effects; this leads to 96 3 96 S1 cells, and the corresponding receptive field size in the original image is 2 ). C1-S2 synaptic connections are set by STDP.
Note that we did not use a leakage term. In the brain, by progressively resetting membrane potentials toward their resting levels, leakiness will decrease the interference between two successive spike waves. In our model we process spike waves one by one and reset all the potentials before each propagation, and so leaks are not needed.
Finally, activity is limited at this stage: a k-winner-take-all strategy ensures at most two cells that can fire for each processing scale. This mechanism, only used in the learning phase, helps the cells to learn patterns with different real sizes. Without it, there is a natural bias toward ''small'' patterns (i.e., large scales), simply because corresponding maps are larger, and so likeliness of firing with random weights at the beginning of the STDP process is higher.
C2 cells. Those cells take for each prototype the maximum response (i.e., first spike) of corresponding S2 cells over all positions and processing scales, leading to ten shift-and scale-invariant cells (20 in the mixed case).
STDP model. We used a simplified STDP rule: where i and j refer, respectively, to the post-and presynaptic neurons, t i and t j are the corresponding spike times, Dw ij is the synaptic weight modification, and a þ and a À are two parameters specifying the amount of change. Note that the weight change does not depend on the exact t i À t j value, but only on its sign. We also used an infinite time window. These simplifications are equivalent to assuming that the intensity-latency conversion of S1 cells compresses the whole spike wave in a relatively short time interval (say, 20-30 ms), so that all presynaptic spikes necessarily fall close to the postsynaptic spike time, and the change decrease becomes negligible. In the brain, this change decrease and the limited time window are crucial: they prevent different spike waves coming from different stimuli from interfering in the learning process. In our model, we propagate stimuli one by one, so these mechanisms are not needed. Note that with this simplified STDP rule only the order of the spikes matters, not their precise timings. As a result, the intensity-latency conversion function of S1 cells has no impact, and any monotonously decreasing function gives the same results. The multiplicative term w ij Á (1 À w ij ) ensures the weight remains in the range (excitatory synapses) and implements a soft bound effect: when the weight approaches a bound, weight changes tend toward zero.
We also applied long-term depression to synapses through which no presynaptic spike arrived, exactly as if a presynaptic spike had arrived after the postsynaptic one. This is useful to eliminate the noise due to original random weights on synapses through which presynaptic spikes never arrive.
As the STDP learning progresses, we increase a þ and ja À j To be precise, we start with a þ ¼ 2 À6 and multiply the value by 2 every 400 postsynaptic spikes, until a maximum value of 2 À2 . a À is adjusted so as to keep a fixed a þ /a À ratio (À4/3). This allows us to accelerate convergence when the preferred stimulus is somewhat ''locked,'' whereas directly using high learning rates with the random initial weights leads to erratic results.
Classification setup. We used an RBF network. In the brain, this classification step may be done in the PFC using the outputs of IT. Let X be the vector of C2 responses (containing either binary detections with the first implementation or final potentials with the second one). This kind of classifier computes an expression of the form: and then classifies based on whether or not f(X) reaches a threshold. Supervised learning at this stage involves adjusting the synaptic weights c so as to minimize a (regularized) error on the training set . The X i correspond to C2 responses for some training examples (1/4 of the training set randomly selected). The full training set was used to learn the c i . We used r ¼ 2 and k ¼ 10 À12 (regularization parameter). The multiclass case was handled with a ''one-versus-all approach.'' If n is the number of classes (here, three), n RBF classifiers of the kind ''class I'' versus ''all other classes'' are trained. At the time of testing, each one of the n classifiers emits a (real-valued) prediction that is linked to the probability of the image belonging to its category. The assigned category is the one that corresponds to the highest prediction value.
Hebbian learning. The spike trains coming from C1 cells were converted into real-valued activities (supposed to correspond to firing rates) by taking the inverse of the first spikes' latencies (note that these activities do not correspond exactly to the convolution values because of the local lateral inhibition mechanism of layer C1). The activities (or firing rates) of S2 units were computed as: where W S2 is the synaptic weight vector of the S2 cell. Note that the normalization causes an S2 cell to respond maximally when the input vector X C1 is collinear to its weight vector W S2 (neural circuits for such normalization have been proposed in ). Hence W S2 (or any vector collinear to it) is the preferred stimulus of the S2 cell. With another stimulus X C1 the response is proportional to the cosine between W S2 and X C1 . This kind of tuning has been used in extensions of HMAX . It is similar to the Gaussian tuning of the original HMAX , but it is invariant to the norm of the input (i.e., multiplying the input activities by 2 has no effect on the response), which allows us to remain contrast-invariant (see also for a comparison between the two kinds of tuning).
Only the cells whose activities were above a threshold were considered in the competition process. It was found useful to use individual adaptative thresholds: each time a cell was among the winners, its threshold was set to 0.91 times its activity (this value was tuned to get approximately the same number of weight updates as with STDP). The competition mechanism was exactly the same as before, except that it selected the most active units and not the first one to fire. The winners' weight vectors were updated with the following modified hebbian rule: a is the learning rate. It was found useful to start with a small learning rate (0.002) and to geometrically increase it every ten iterations. The geometric ratio was set to reach a learning rate of 0.02 after 2,000 iterations, after which the learning rate stayed constant.
Differences from the model of Serre, Wolf, and Poggio. Here we summarize the differences between our model and their model in terms of architecture (leaving the questions of learning and temporal code aside).
We process various scaled versions of the input image (with the same filter size), instead of using various filter sizes on the original image: S1 level, only the best matching orientation is propagated; C1 level, we use lateral inhibition (see above); S2 level, the similarity between a current input and the stored prototype is linked to the number of early spikes in common between the corresponding spike trains, while Serre et al. use the Euclidian distance between the corresponding patches of C1 activities.
We used an RBF network and not a Support Vector Machine.
Supporting Information
Video S1. Face-Feature Learning Here we presented the face-training examples in random order, propagated the corresponding spike waves, and applied the STDP rule. At the top of the screen, the input image is shown, with red, green, or blue squares indicating the receptive fields of the cells that fired (if any). At the bottom of the screen, we reconstructed the preferred stimuli of the three C2 cells. Above each reconstruction, the number of postsynaptic spikes emitted is shown with the corresponding color. The red, green, and blue cells develop selectivity to a view of, respectively, the bust, the head, and the face. |
/**
* Updates the chart data from the chart template
*
* @param original the original chart to update
* @param chart the template chart to update the original from
* @return the updated chart
*/
public Chart updateChart(Chart original, Chart chart) {
original.setInternationalNumber(chart.getInternationalNumber());
original.setActive(chart.isActive());
original.setHorizontalDatum(chart.getHorizontalDatum());
original.setName(chart.getName());
original.setScale(chart.getScale());
original.setGeometry(chart.getGeometry());
return saveEntity(original);
} |
def run(self, cur_time, points):
device_dict = {}
dx_result = Results()
for point, value in list(points.items()):
point_device = [name.lower() for name in point.split("&")]
if point_device[0] not in device_dict:
device_dict[point_device[0]] = [(point_device[1], value)]
else:
device_dict[point_device[0]].append((point_device[1], value))
damper_data = []
oat_data = []
mat_data = []
rat_data = []
cooling_data = []
fan_sp_data = []
fan_status_data = []
missing_data = []
for key, value in device_dict.items():
data_name = key
if value is None:
continue
if data_name == self.fan_status_name:
fan_status_data = data_builder(value, data_name)
elif data_name == self.oad_sig_name:
damper_data = data_builder(value, data_name)
elif data_name == self.oat_name:
oat_data = data_builder(value, data_name)
elif data_name == self.mat_name:
mat_data = data_builder(value, data_name)
elif data_name == self.rat_name:
rat_data = data_builder(value, data_name)
elif data_name == self.cool_call_name:
cooling_data = data_builder(value, data_name)
elif data_name == self.fan_sp_name:
fan_sp_data = data_builder(value, data_name)
if not oat_data:
missing_data.append(self.oat_name)
if not rat_data:
missing_data.append(self.rat_name)
if not mat_data:
missing_data.append(self.mat_name)
if not damper_data:
missing_data.append(self.oad_sig_name)
if not cooling_data:
missing_data.append(self.cool_call_name)
if not fan_status_data and not fan_sp_data:
missing_data.append(self.fan_status_name)
if missing_data:
dx_result.log("Missing data from publish: {}".format(missing_data))
return dx_result
current_fan_status, fan_sp = self.check_fan_status(fan_status_data, fan_sp_data, cur_time)
dx_result = self.check_elapsed_time(dx_result, cur_time, self.unit_status, FAN_OFF)
if not current_fan_status:
dx_result.log("Supply fan is off: {}".format(cur_time))
return dx_result
else:
dx_result.log("Supply fan is on: {}".format(cur_time))
if fan_sp is None and self.constant_volume:
fan_sp = 100.0
oat = mean(oat_data)
rat = mean(rat_data)
mat = mean(mat_data)
oad = mean(damper_data)
self.check_temperature_condition(oat, rat, cur_time)
dx_result = self.check_elapsed_time(dx_result, cur_time, self.oaf_condition, OAF)
if self.oaf_condition:
dx_result.log("OAT and RAT readings are too close.")
return dx_result
limit_condition = self.sensor_limit_check(oat, rat, mat, cur_time)
dx_result = self.check_elapsed_time(dx_result, cur_time, self.sensor_limit, limit_condition[1])
if limit_condition[0]:
dx_result.log("Temperature sensor is outside of bounds: {} -- {}".format(limit_condition, self.sensor_limit))
return dx_result
dx_result, self.temp_sensor_problem = self.econ1.econ_alg1(dx_result, oat, rat, mat, oad, cur_time)
econ_condition, cool_call = self.determine_cooling_condition(cooling_data, oat, rat)
_log.debug("Cool call: {} - Economizer status: {}".format(cool_call, econ_condition))
if self.temp_sensor_problem is not None and not self.temp_sensor_problem:
dx_result = self.econ2.econ_alg2(dx_result, cool_call, oat, rat, mat,
oad, econ_condition, cur_time, fan_sp)
dx_result = self.econ3.econ_alg3(dx_result, oat, rat, mat, oad,
econ_condition, cur_time, fan_sp)
dx_result = self.econ4.econ_alg4(dx_result, oat, rat, mat, oad,
econ_condition, cur_time, fan_sp)
dx_result = self.econ5.econ_alg5(dx_result, oat, rat, mat, cur_time)
elif self.temp_sensor_problem:
self.pre_conditions(dx_list[1:], TEMP_SENSOR, cur_time, dx_result)
self.econ2.clear_data()
self.econ2.clear_data()
self.econ3.clear_data()
self.econ4.clear_data()
self.econ5.clear_data()
return dx_result |
def check_pdf_file_exists(dir_out, sample, file_name):
actual_file = os.path.join(dir_out, sample, file_name)
assert os.path.exists(actual_file), "Non-existent file: %s" % actual_file |
1574. Multivariate Regression Analysis to Determine Independent Predictors of Treatment Outcomes in the RESTORE-IMI 2 Trial
In the RESTORE-IMI 2 trial, imipenem/cilastatin/relebactam (IMI/REL) was non-inferior to PIP/TAZ for treating hospital-acquired/ventilator-associated bacterial pneumonia (HABP/VABP) in the primary endpoint of Day 28 all-cause mortality (D28 ACM) and the key secondary endpoint of clinical response (CR) at early follow-up (EFU; 7-14 d after end of therapy). We performed a multivariate regression analysis to determine independent predictors of treatment outcomes in this trial.
Randomized, controlled, double-blind, phase 3, non-inferiority trial comparing IMI/REL 500 mg/250 mg vs PIP/TAZ 4 g/500 mg, every 6 h for 7-14 d, in adult patients (pts) with HABP/VABP. Stepwise-selection logistic regression modeling was used to determine independent predictors of D28 ACM and favorable CR at EFU, in the MITT population (randomized pts with ≥1 dose of study drug, except pts with only gram-positive cocci at baseline). Baseline variables (n=19) were pre-selected as candidates for inclusion (Table 1), based on clinical relevance. Variables were added to the model if significant (p < 0.05) and removed if their significance was reduced (p > 0.1) by addition of other variables.
Baseline variables that met criteria for significant independent predictors of D28 ACM and CR at EFU in the final selected regression model are in Fig 1 and Fig 2, respectively. As expected, APACHE II score, renal impairment, elderly age, and mechanical ventilation were significant predictors for both outcomes. Bacteremia and P. aeruginosa as a causative pathogen were predictors of unfavorable CR, but not of D28 ACM. Geographic region and the hospital service unit a patient was admitted to were found to be significant predictors, likely explained by their collinearity with other variables. Treatment allocation (IMI/REL vs PIP/TAZ) was not a significant predictor for ACM or CR; this was not unexpected, since the trial showed non-inferiority of the two HABP/VABP therapies. No interactions between the significant predictors and treatment arm were observed.
This analysis validated known predictors for mortality and clinical outcomes in pts with HABP/VABP and supports the main study results by showing no interactions between predictors and treatment arm.
Table 1. Candidate baseline variables pre-selected for inclusion
Figure 1. Independent predictors of greater Day 28 all-cause mortality (MITT population; N=531)
Figure 2. Independent predictors of favorable clinical response at EFU (MITT population; N=531)
Robert Tipping, MS, Merck & Co., Inc. (Employee, Shareholder) Jiejun Du, PhD, Merck & Co., Inc. (Employee, Shareholder) Maria C. Losada, BA, Merck & Co., Inc. (Employee, Shareholder) Michelle L. Brown, BS, Merck & Co., Inc. (Employee, Shareholder) Katherine Young, MS, Merck & Co., Inc. (Employee, Shareholder)Merck & Co., Inc. (Employee, Shareholder) Joan R. Butterton, MD, Merck & Co., Inc. (Employee, Shareholder) Amanda Paschke, MD MSCE, Merck & Co., Inc. (Employee, Shareholder) Luke F. Chen, MBBS MPH MBA FRACP FSHEA FIDSA, Merck & Co., Inc. (Employee, Shareholder)Merck & Co., Inc. (Employee, Shareholder)
|
#!/usr/bin/env python3
import argparse
from codeforces.parser import parse
from utils.generators import (
generate_folder_structure,
generate_test_files,
copy_templates,
)
class Platform:
CODEFORCES = 'codeforces'
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--platform',
help='contest platform(only codeforces supported for now)',
default=Platform.CODEFORCES,
choices=(
Platform.CODEFORCES,
),
)
parser.add_argument(
'-c',
'--contest',
help='contest number',
type=int,
)
args = parser.parse_args()
if args.platform == Platform.CODEFORCES:
problems = parse(args.contest)
generate_folder_structure(args.contest, problems)
generate_test_files(args.contest, problems)
copy_templates(args.platform, args.contest, problems)
if __name__ == '__main__':
main()
|
/**
* @author Mark Fisher
*/
public abstract class ReceptorEvent<D> {
private static final ObjectMapper objectMapper = new ObjectMapper();
private final int id;
private final String type;
private Map<String, D> data;
public ReceptorEvent(int id, String type) {
this.id = id;
this.type = type;
}
public int getId() {
return id;
}
public String getType() {
return type;
}
public Map<String, D> getData() {
return data;
}
public void setData(Map<String, D> data) {
this.data = data;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append(String.format("id: %s%n", id));
builder.append(String.format("event: %s%n", type));
String dataString;
try {
dataString = objectMapper.writeValueAsString(data);
}
catch (JsonProcessingException e) {
dataString = (data != null ? data.toString() : null);
}
builder.append(String.format("data: %s%n", dataString));
return builder.toString();
}
} |
#include "ch.h"
#include "app_cfg.h"
#include "message.h"
#include "sxfs.h"
#include "common.h"
#include "crc/crc32.h"
#include "touch.h"
#include "types.h"
#include <string.h>
#include <stdio.h>
typedef struct {
uint32_t reset_count;
unit_t temp_unit;
output_ctrl_t control_mode;
quantity_t hysteresis;
quantity_t screen_saver;
sensor_config_t sensor_configs[MAX_NUM_SENSOR_CONFIGS];
matrix_t touch_calib;
controller_settings_t controller_settings[NUM_CONTROLLERS];
temp_profile_checkpoint_t temp_profile_checkpoints[NUM_CONTROLLERS];
ota_update_checkpoint_t ota_update_checkpoint;
char auth_token[64];
net_settings_t net_settings;
fault_data_t fault;
} app_cfg_data_t;
typedef struct {
app_cfg_data_t data;
uint32_t crc;
} app_cfg_rec_t;
static msg_t app_cfg_thread(void* arg);
static app_cfg_rec_t* app_cfg_load(sxfs_part_id_t* loaded_from);
static app_cfg_rec_t* app_cfg_load_from(sxfs_part_id_t part);
/* Local RAM copy of app_cfg */
static app_cfg_rec_t app_cfg_local;
static Mutex app_cfg_mtx;
void
app_cfg_init()
{
chMtxInit(&app_cfg_mtx);
app_cfg_rec_t* app_cfg = app_cfg_load(NULL);
if (app_cfg != NULL) {
app_cfg_local = *app_cfg;
app_cfg_local.data.reset_count++;
free(app_cfg);
}
else {
app_cfg_reset();
}
chThdCreateFromHeap(NULL, 1024, LOWPRIO, app_cfg_thread, NULL);
}
static msg_t
app_cfg_thread(void* arg)
{
(void)arg;
chRegSetThreadName("app_cfg");
while (!chThdShouldTerminate()) {
app_cfg_flush();
chThdSleepSeconds(2);
}
return 0;
}
void
app_cfg_reset()
{
memset(&app_cfg_local.data, 0, sizeof(app_cfg_local.data));
app_cfg_local.data.reset_count = 0;
app_cfg_local.data.ota_update_checkpoint.download_in_progress = false;
app_cfg_local.data.ota_update_checkpoint.update_size = 0;
app_cfg_local.data.ota_update_checkpoint.last_block_offset = 0;
memset(app_cfg_local.data.ota_update_checkpoint.update_ver, 0, sizeof(app_cfg_local.data.ota_update_checkpoint.update_ver));
app_cfg_local.data.temp_unit = UNIT_TEMP_DEG_F;
app_cfg_local.data.control_mode = ON_OFF;
app_cfg_local.data.hysteresis.value = 1;
app_cfg_local.data.hysteresis.unit = UNIT_TEMP_DEG_F;
app_cfg_local.data.net_settings.security_mode = 0;
app_cfg_local.data.net_settings.ip_config = IP_CFG_DHCP;
app_cfg_local.data.net_settings.ip = 0;
app_cfg_local.data.net_settings.subnet_mask = 0;
app_cfg_local.data.net_settings.gateway = 0;
app_cfg_local.data.net_settings.dns_server = 0;
touch_calib_reset();
app_cfg_local.data.controller_settings[CONTROLLER_1].controller = CONTROLLER_1;
app_cfg_local.data.controller_settings[CONTROLLER_1].setpoint_type = SP_STATIC;
app_cfg_local.data.controller_settings[CONTROLLER_1].static_setpoint.value = 68;
app_cfg_local.data.controller_settings[CONTROLLER_1].static_setpoint.unit = UNIT_TEMP_DEG_F;
app_cfg_local.data.controller_settings[CONTROLLER_1].output_settings[OUTPUT_1].enabled = false;
app_cfg_local.data.controller_settings[CONTROLLER_1].output_settings[OUTPUT_1].function = OUTPUT_FUNC_COOLING;
app_cfg_local.data.controller_settings[CONTROLLER_1].output_settings[OUTPUT_1].cycle_delay.unit = UNIT_TIME_MIN;
app_cfg_local.data.controller_settings[CONTROLLER_1].output_settings[OUTPUT_1].cycle_delay.value = 3;
app_cfg_local.data.controller_settings[CONTROLLER_1].output_settings[OUTPUT_2].enabled = false;
app_cfg_local.data.controller_settings[CONTROLLER_1].output_settings[OUTPUT_2].function = OUTPUT_FUNC_HEATING;
app_cfg_local.data.controller_settings[CONTROLLER_1].output_settings[OUTPUT_2].cycle_delay.unit = UNIT_TIME_MIN;
app_cfg_local.data.controller_settings[CONTROLLER_1].output_settings[OUTPUT_2].cycle_delay.value = 3;
app_cfg_local.data.controller_settings[CONTROLLER_2].controller = CONTROLLER_2;
app_cfg_local.data.controller_settings[CONTROLLER_2].setpoint_type = SP_STATIC;
app_cfg_local.data.controller_settings[CONTROLLER_2].static_setpoint.value = 68;
app_cfg_local.data.controller_settings[CONTROLLER_2].static_setpoint.unit = UNIT_TEMP_DEG_F;
app_cfg_local.data.controller_settings[CONTROLLER_2].output_settings[OUTPUT_1].enabled = false;
app_cfg_local.data.controller_settings[CONTROLLER_2].output_settings[OUTPUT_1].function = OUTPUT_FUNC_COOLING;
app_cfg_local.data.controller_settings[CONTROLLER_2].output_settings[OUTPUT_1].cycle_delay.unit = UNIT_TIME_MIN;
app_cfg_local.data.controller_settings[CONTROLLER_2].output_settings[OUTPUT_1].cycle_delay.value = 3;
app_cfg_local.data.controller_settings[CONTROLLER_2].output_settings[OUTPUT_2].enabled = false;
app_cfg_local.data.controller_settings[CONTROLLER_2].output_settings[OUTPUT_2].function = OUTPUT_FUNC_HEATING;
app_cfg_local.data.controller_settings[CONTROLLER_2].output_settings[OUTPUT_2].cycle_delay.unit = UNIT_TIME_MIN;
app_cfg_local.data.controller_settings[CONTROLLER_2].output_settings[OUTPUT_2].cycle_delay.value = 3;
app_cfg_local.crc = crc32_block(0, &app_cfg_local.data, sizeof(app_cfg_data_t));
app_cfg_flush();
}
static app_cfg_rec_t*
app_cfg_load(sxfs_part_id_t* loaded_from)
{
app_cfg_rec_t* app_cfg = app_cfg_load_from(SP_APP_CFG_1);
if (app_cfg != NULL) {
if (loaded_from != NULL)
*loaded_from = SP_APP_CFG_1;
return app_cfg;
}
app_cfg = app_cfg_load_from(SP_APP_CFG_2);
if (app_cfg != NULL) {
if (loaded_from != NULL)
*loaded_from = SP_APP_CFG_2;
return app_cfg;
}
return NULL;
}
static app_cfg_rec_t*
app_cfg_load_from(sxfs_part_id_t part)
{
bool ret;
app_cfg_rec_t* app_cfg = malloc(sizeof(app_cfg_rec_t));
ret = sxfs_read(part, 0, (uint8_t*)app_cfg, sizeof(app_cfg_rec_t));
if (!ret) {
free(app_cfg);
return NULL;
}
uint32_t calc_crc = crc32_block(0, &app_cfg->data, sizeof(app_cfg_data_t));
if (calc_crc != app_cfg->crc) {
free(app_cfg);
return NULL;
}
return app_cfg;
}
unit_t
app_cfg_get_temp_unit(void)
{
return app_cfg_local.data.temp_unit;
}
void
app_cfg_set_temp_unit(unit_t temp_unit)
{
if (temp_unit != UNIT_TEMP_DEG_C &&
temp_unit != UNIT_TEMP_DEG_F)
return;
if (temp_unit == app_cfg_local.data.temp_unit)
return;
chMtxLock(&app_cfg_mtx);
app_cfg_local.data.temp_unit = temp_unit;
chMtxUnlock();
msg_send(MSG_TEMP_UNIT, &app_cfg_local.data.temp_unit);
}
output_ctrl_t
app_cfg_get_control_mode(void)
{
return app_cfg_local.data.control_mode;
}
void
app_cfg_set_control_mode(output_ctrl_t control_mode)
{
if (control_mode != ON_OFF &&
control_mode != PID)
return;
if (control_mode == app_cfg_local.data.control_mode)
return;
chMtxLock(&app_cfg_mtx);
app_cfg_local.data.control_mode = control_mode;
chMtxUnlock();
msg_send(MSG_CONTROL_MODE, &app_cfg_local.data.control_mode);
}
quantity_t
app_cfg_get_hysteresis(void)
{
return app_cfg_local.data.hysteresis;
}
void
app_cfg_set_hysteresis(quantity_t hysteresis)
{
if (memcmp(&hysteresis, &app_cfg_local.data.hysteresis, sizeof(quantity_t)) == 0)
return;
if (hysteresis.unit == UNIT_TEMP_DEG_C) {
hysteresis.value *= (9.0f / 5.0f);
hysteresis.unit = UNIT_TEMP_DEG_F;
}
chMtxLock(&app_cfg_mtx);
app_cfg_local.data.hysteresis = hysteresis;
chMtxUnlock();
}
quantity_t
app_cfg_get_screen_saver(void)
{
return app_cfg_local.data.screen_saver;
}
void
app_cfg_set_screen_saver(quantity_t screen_saver)
{
if (memcmp(&screen_saver, &app_cfg_local.data.screen_saver, sizeof(quantity_t)) == 0)
return;
chMtxLock(&app_cfg_mtx);
app_cfg_local.data.screen_saver = screen_saver;
chMtxUnlock();
}
quantity_t
app_cfg_get_probe_offset(sensor_serial_t sensor_serial)
{
uint8_t i;
quantity_t offset;
offset.unit = UNIT_TEMP_DEG_F;
offset.value = 0;
for (i = 0; i < MAX_NUM_SENSOR_CONFIGS; i++) {
if (memcmp(sensor_serial, app_cfg_local.data.sensor_configs[i].sensor_serial, sizeof(sensor_serial_t)) == 0)
return app_cfg_local.data.sensor_configs[i].offset;
}
return offset;
}
void
app_cfg_set_probe_offset(quantity_t probe_offset, sensor_serial_t sensor_serial)
{
uint8_t i;
int idx, next_idx;
idx = next_idx = -1;
for(i = 0; i < MAX_NUM_SENSOR_CONFIGS; i++) {
sensor_serial_t* sensor_sn = &app_cfg_local.data.sensor_configs[i].sensor_serial;
if(memcmp(sensor_serial, sensor_sn, sizeof(sensor_serial_t)) == 0) {
idx = i;
break;
}
/* super cryptic non-zero check */
else if (((*sensor_sn)[0] == 0) &&
(memcmp(*sensor_sn, *(sensor_sn + 1), 5) == 0)) {
next_idx = i;
break;
}
}
if (idx < 0)
idx = next_idx;
if (idx < 0)
return;
if (probe_offset.unit == UNIT_TEMP_DEG_C) {
probe_offset.value *= (9.0f / 5.0f);
probe_offset.unit = UNIT_TEMP_DEG_F;
}
chMtxLock(&app_cfg_mtx);
memcpy(app_cfg_local.data.sensor_configs[idx].sensor_serial, sensor_serial, sizeof(sensor_serial_t));
app_cfg_local.data.sensor_configs[idx].offset = probe_offset;
chMtxUnlock();
}
const matrix_t*
app_cfg_get_touch_calib(void)
{
return &app_cfg_local.data.touch_calib;
}
void
app_cfg_set_touch_calib(matrix_t* touch_calib)
{
chMtxLock(&app_cfg_mtx);
app_cfg_local.data.touch_calib = *touch_calib;
chMtxUnlock();
}
const controller_settings_t*
app_cfg_get_controller_settings(temp_controller_id_t controller)
{
if (controller >= NUM_CONTROLLERS)
return NULL;
return &app_cfg_local.data.controller_settings[controller];
}
void
app_cfg_set_controller_settings(
temp_controller_id_t controller,
settings_source_t source,
controller_settings_t* settings)
{
if (controller >= NUM_CONTROLLERS)
return;
if ((source == SS_SERVER) ||
memcmp(settings, &app_cfg_local.data.controller_settings[controller], sizeof(controller_settings_t)) != 0) {
chMtxLock(&app_cfg_mtx);
app_cfg_local.data.controller_settings[controller] = *settings;
chMtxUnlock();
msg_id_t msg_id;
if (source == SS_DEVICE)
msg_id = MSG_CONTROLLER_SETTINGS;
else
msg_id = MSG_API_CONTROLLER_SETTINGS;
msg_send(msg_id, settings);
}
}
const temp_profile_checkpoint_t*
app_cfg_get_temp_profile_checkpoint(temp_controller_id_t controller)
{
if (controller >= NUM_CONTROLLERS)
return NULL;
return &app_cfg_local.data.temp_profile_checkpoints[controller];
}
void
app_cfg_set_temp_profile_checkpoint(temp_controller_id_t controller, temp_profile_checkpoint_t* checkpoint)
{
if (controller >= NUM_CONTROLLERS)
return;
if (memcmp(checkpoint, &app_cfg_local.data.temp_profile_checkpoints[controller], sizeof(temp_profile_checkpoint_t)) != 0) {
chMtxLock(&app_cfg_mtx);
app_cfg_local.data.temp_profile_checkpoints[controller] = *checkpoint;
chMtxUnlock();
}
}
const char*
app_cfg_get_auth_token()
{
return app_cfg_local.data.auth_token;
}
void
app_cfg_set_auth_token(const char* auth_token)
{
chMtxLock(&app_cfg_mtx);
strncpy(app_cfg_local.data.auth_token,
auth_token,
sizeof(app_cfg_local.data.auth_token));
chMtxUnlock();
}
const net_settings_t*
app_cfg_get_net_settings()
{
return &app_cfg_local.data.net_settings;
}
void
app_cfg_set_net_settings(const net_settings_t* settings)
{
if (memcmp(settings, &app_cfg_local.data.net_settings, sizeof(net_settings_t)) != 0) {
chMtxLock(&app_cfg_mtx);
app_cfg_local.data.net_settings = *settings;
chMtxUnlock();
msg_send(MSG_NET_NETWORK_SETTINGS, NULL);
}
}
const ota_update_checkpoint_t*
app_cfg_get_ota_update_checkpoint(void)
{
return &app_cfg_local.data.ota_update_checkpoint;
}
void
app_cfg_set_ota_update_checkpoint(const ota_update_checkpoint_t* checkpoint)
{
chMtxLock(&app_cfg_mtx);
app_cfg_local.data.ota_update_checkpoint = *checkpoint;
chMtxUnlock();
}
uint32_t
app_cfg_get_reset_count(void)
{
return app_cfg_local.data.reset_count;
}
void
app_cfg_clear_fault_data()
{
memset(&app_cfg_local.data.fault, 0, sizeof(fault_data_t));
}
const fault_data_t*
app_cfg_get_fault_data()
{
return &app_cfg_local.data.fault;
}
void
app_cfg_set_fault_data(fault_type_t fault_type, void* data, uint32_t data_size)
{
if (data_size > MAX_FAULT_DATA)
data_size = MAX_FAULT_DATA;
app_cfg_local.data.fault.type = fault_type;
memcpy(app_cfg_local.data.fault.data, data, data_size);
}
void
app_cfg_flush()
{
chMtxLock(&app_cfg_mtx);
sxfs_part_id_t used_app_cfg_part = SP_APP_CFG_1;
app_cfg_rec_t* app_cfg = app_cfg_load(&used_app_cfg_part);
app_cfg_local.crc = crc32_block(0, &app_cfg_local.data, sizeof(app_cfg_data_t));
if (app_cfg == NULL || memcmp(&app_cfg_local, app_cfg, sizeof(app_cfg_rec_t)) != 0) {
bool ret;
sxfs_part_id_t unused_app_cfg_part =
(used_app_cfg_part == SP_APP_CFG_1) ? SP_APP_CFG_2 : SP_APP_CFG_1;
ret = sxfs_erase_all(unused_app_cfg_part);
if (ret) {
ret = sxfs_write(unused_app_cfg_part, 0, (uint8_t*)&app_cfg_local, sizeof(app_cfg_local));
if (ret) {
ret = sxfs_erase_all(used_app_cfg_part);
if (!ret)
printf("used app cfg erase failed! %d\r\n", used_app_cfg_part);
}
else {
printf("unused app cfg write failed! %d\r\n", unused_app_cfg_part);
}
}
else {
printf("unused app cfg erase failed! %d\r\n", unused_app_cfg_part);
}
}
chMtxUnlock();
if (app_cfg != NULL)
free(app_cfg);
}
|
/**
* Created by Joel on 19-Sep-17.
*/
public class DiscoverGamesContract {
interface View extends BaseView<Presenter> {
void setLoadingIndicator(boolean active);
void showPopularGames(List<Game> games);
void showMostAnticipatedGames(List<Game> games);
void showUpcomingGames(List<Game> games);
void showLoadingPopularGamesError();
void showLoadingMostlyAnticipatedGamesError();
void showLoadingUpcomingGamesError();
void showGameDetailUi(String newsArticleId);
boolean isActive();
}
interface Presenter extends BasePresenter {
void loadPopularGames(boolean forceUpdate);
void loadMostAnticipatedGames(boolean forceUpdate);
void loadUpcomingGames(boolean forceUpdate);
void openGameDetail(@NonNull Game game);
}
} |
N,K=map(int,input().split())
A=list(map(int,input().split()))
tmp=10**9+7
cna=0
for i,a in enumerate(A):
for at in A[i+1:]:
if a>at:
cna+=1
cna2=0
for a in A:
for at in A:
if a>at:
cna2+=1
kt=(K-1)*(K)//2
print((K*cna+kt*cna2)%tmp)
|
def monkey_patch_py2neo():
for item in IMPORT_TABLE:
if not hasattr(py2neo, item.name):
setattr(py2neo, item.name, getattr(this_module, item.name))
if py2neo_ver == 1:
monkey_patch_py2neo_v1()
elif py2neo_ver == 2:
monkey_patch_py2neo_v2()
elif py2neo_ver == 3:
monkey_patch_py2neo_v3() |
// VolumeGet calls engine binary
// TODO: Deprecated, replaced by gRPC proxy
func (e *EngineBinary) VolumeGet(*longhorn.Engine) (*Volume, error) {
output, err := e.ExecuteEngineBinary("info")
if err != nil {
return nil, errors.Wrapf(err, "cannot get volume info")
}
info := &Volume{}
if err := json.Unmarshal([]byte(output), info); err != nil {
return nil, errors.Wrapf(err, "cannot decode volume info: %v", output)
}
return info, nil
} |
# -*- coding: utf-8 -*-
from .trainer import Trainer
from .plm_trainer import MaskedLMTrainer
from .evaluation import (evaluate_text_classification,
evaluate_entity_recognition,
evaluate_attribute_extraction,
evaluate_relation_extraction,
evaluate_joint_extraction,
evaluate_generation)
from .options import OptionSampler
from .utils import auto_device, LRLambda, count_params, collect_params, check_param_groups
|
Chemometric discrimination of different tomato cultivars based on their volatile fingerprint in relation to lycopene and total phenolics content.
INTRODUCTION
The characteristic flavour of tomato is given by a complex mixture of sugars, acids, amino acids, minerals and volatile metabolites. Of these, volatile compounds are considered to greatly influence the flavour of tomato fruits. The volatile aroma compounds and phytochemical content of tomatoes are dependent on genotype, environmental conditions and cultural practices, and can thus be used for cultivar discrimination.
OBJECTIVE
To assess the possibility of using the volatile profile of tomato to fingerprint and discriminate different tomato cultivars based on an 'in-tube extraction' technique coupled to gas chromatography, combined with mass spectrometry (ITEX/GC-MS) and a chemometric approach.
RESULTS
Using the ITEX/GC-MS technique, 61 volatiles were analysed and separated from tomato cultivars, with 58 being identified. The main volatiles identified in all tomato cultivars were: hexanal, trans-2-hexenal, 1-hexanol, 3-pentanone, 3-methylbutanol, 2-methylbutanol, 3-methylbutanal and 6-methyl-5-hepten-2-one. The lycopene content and total phenolic compound content of the tomato cultivars varied between 36.78 and 73.18 mg/kg fresh weight (fw) and from 119.4 to 253.7 mg of gallic acid equivalents (GAE) per kilogram fresh weight, respectively. Volatile fingerprint and phytochemical composition led to a good differentiation between tomato cultivars, with the first two principal components explaining 89% of the variance in the data.
CONCLUSION
The tomato cultivars studied were easily discriminated based on their characteristic volatile profile that was obtained using the reliable ITEX/GC-MS technique. Principal component analysis revealed, in addition to volatile compounds, the important role played by the total phenolic content in tomato cultivar discrimination, which is highly correlated with phenotypic and biochemical differences between tomato cultivars. |
<gh_stars>0
package domain
import (
"encoding/json"
"errors"
)
func ReadTasks(data []byte) (map[string]Task, error) {
type instanceJson struct {
Application_id string
Warden_job_id uint64
Warden_container_path string
Instance_index uint64
State string
}
type stagingMessageJson struct {
App_id string
}
type stagingTaskJson struct {
Staging_message stagingMessageJson
Warden_job_id uint64
Warden_container_path string
}
type instancesJson struct {
Instances []instanceJson
Staging_tasks []stagingTaskJson
}
var jsonInstances instancesJson
if len(data) < 1 {
return nil, errors.New("Empty data, can't parse json")
}
err := json.Unmarshal(data, &jsonInstances)
if err != nil {
return nil, err
}
tasks := make(map[string]Task, len(jsonInstances.Instances))
for _, jsonInstance := range jsonInstances.Instances {
if jsonInstance.Warden_container_path == "" || jsonInstance.Warden_job_id == 0 {
continue
}
if isStateTracked(jsonInstance.State) {
task := Task{
ApplicationId: jsonInstance.Application_id,
SourceName: "App",
WardenContainerPath: jsonInstance.Warden_container_path,
WardenJobId: jsonInstance.Warden_job_id,
Index: jsonInstance.Instance_index,
}
tasks[task.Identifier()] = task
}
}
for _, jsonStagingTask := range jsonInstances.Staging_tasks {
if jsonStagingTask.Warden_job_id == 0 {
continue
}
task := Task{
ApplicationId: jsonStagingTask.Staging_message.App_id,
SourceName: "STG",
WardenContainerPath: jsonStagingTask.Warden_container_path,
WardenJobId: jsonStagingTask.Warden_job_id,
}
tasks[task.Identifier()] = task
}
return tasks, nil
}
func isStateTracked(state string) bool {
return (state == "RUNNING" || state == "STARTING" || state == "STOPPING")
}
|
/* eslint-env jest */
import { Parser } from "../gcode-parser";
test('all input should be preserved', () => {
const parser = new Parser();
const gcode =`G1 X0 Y0 Z1 E1`;
const parsed = parser.parseGCode(gcode);
expect(parsed).not.toBeNull();
const unparsed = parser.lines.join('\n');
expect(unparsed).toEqual(gcode);
});
test('multiple lines should be preserved', () => {
const parser = new Parser();
const gcode =`G1 X0 Y0 Z1 E1\nG1 X10 Y10 E10`;
const parsed = parser.parseGCode(gcode);
expect(parsed).not.toBeNull();
const unparsed = parser.lines.join('\n');
expect(unparsed).toEqual(gcode);
});
test('comments should be preserved', () => {
const parser = new Parser();
const gcode =`G1 X0 Y0 Z1 E1; this is a comment`;
const parsed = parser.parseGCode(gcode);
expect(parsed).not.toBeNull();
const unparsed = parser.lines.join('\n');
expect(unparsed).toEqual(gcode);
}); |
<filename>impl/src/main/java/com/github/chenjianjx/srb4jfullsample/impl/biz/auth/AccessTokenRepo.java<gh_stars>1-10
package com.github.chenjianjx.srb4jfullsample.impl.biz.auth;
import java.sql.Timestamp;
import org.apache.ibatis.annotations.Delete;
import org.apache.ibatis.annotations.Insert;
import org.apache.ibatis.annotations.Select;
import org.apache.ibatis.annotations.SelectKey;
import org.apache.ibatis.annotations.Update;
import org.springframework.stereotype.Repository;
/**
*
* @author <EMAIL>
*
*/
@Repository
public interface AccessTokenRepo {
@Insert("insert into AccessToken(tokenStr, lifespan, userId, expiresAt,refreshTokenStr, createdBy) "
+ "values (#{tokenStr}, #{lifespan}, #{userId}, #{expiresAt}, #{refreshTokenStr}, #{createdBy})")
@SelectKey(statement = "select last_insert_id() as id", keyProperty = "id", keyColumn = "id", before = false, resultType = long.class)
public long saveNewToken(AccessToken accessToken);
@Select("select * from AccessToken where tokenStr = #{tokenStr}")
public AccessToken getByTokenStr(String tokenStr);
@Select("select * from AccessToken where refreshTokenStr = #{refreshTokenStr}")
public AccessToken getByRefreshTokenStr(String refreshTokenStr);
@Delete("delete from AccessToken where tokenStr = #{tokenStr}")
public void deleteByTokenStr(String tokenStr);
@Delete("delete from AccessToken where expiresAt < #{timestamp}")
public int deleteTokensExpiresBefore(Timestamp timestamp);
@Update("update AccessToken set tokenStr = #{tokenStr}, lifespan=#{lifespan}, expiresAt = #{expiresAt}, refreshTokenStr = #{refreshTokenStr}, updatedBy = #{updatedBy} where id = #{id}")
public void updateAccessToken(AccessToken newToken);
}
|
Premedication With Oral Pregabalin for the Prevention of Acute Postsurgical Pain in Coronary Artery Bypass Surgery
Background: For coronary artery bypass grafting (CABG) sternotomy should be performed. The pain after surgery is severe and requires medical intervention. Use of the analgesics is limited by their side effects and studies suggest that prevention with some medications before surgery is effective in controlling the postoperative pain. Objectives: We investigated the efficacy of pregabalin administration before surgery in the treatment of acute postoperative pain after CABG surgery. Patients and Methods: Sixty patients indicated for elective CABG surgery were randomly allocated to two groups. One group received placebo and the other received 150 mg of oral pregabalin before surgery. Heart rates, blood pressure, respiratory rate, intensive care unit (ICU) stay duration, morphine consumption, and pain score according to the visual analog scale (VAS) were measured and recorded at 4, 12, and 24 hours of surgery. Results: Pregabalin consumption did not alter hemodynamic parameters and was safe in patients after CABG. Its consumption was associated with significant reduction in the pain score (P values were 0.035, 0.026, and 0.047 respectively at 4, 12, and 24 hours of surgery). Its use was not associated with changes in the morphine consumption at 4, 12, and 24 hours of surgery (P > 0.05). Conclusions: Premedication with studied dose of pregabalin is effective for the prevention of postoperative pain in patients after CABG and has no adverse effects. Trials with other treating schedule and doses of the drug should be performed to determine the best treatment plan.
Background
Atherosclerotic narrowing of coronary arteries is a common adverse condition that requires medical attention and in many cases, coronary artery bypass grafting (CABG) is indicated for high-risk patients (1)(2)(3)(4)(5)(6)(7). In CABGs, patients have severe pain during and after surgery due to sternotomy. Acute pain after cardiac surgery might be visceral, musculoskeletal, or neurogenic in origins requiring medical attention (8,9). Acute pain after surgery is an undesirable outcome of CABG that might turn into persistent and debilitating postoperative chronic pain (10). It directly correlates with prolonged surgery. In a study by Lahtinen et al., 49%, 78%, and 62% of patients experienced severe pain at rest, during coughing, and on movement, respectively (11). According to the same study, 31% of patients were having pain upon movement even one year after surgery. Management of the condition is difficult and most of the patients receive opioids for pain relief (9). Due to several known adverse effects of opioids, efforts are being made to replace existing drugs with newer ones with fewer adverse effects and to develop novel approaches for reducing postsurgery pain (12).
The anticonvulsant pregabalin is indicated for the treatment of peripheral neuropathic pain. Similar to gabapentin, pregabalin is an analog of neurotransmitter gamma-aminobutyric acid (GABA) (13). It mainly acts through binding on alpha-2 and delta receptors and acting as antihyperalgesic agent (14). Pregabalin delays or offsets the sensitization of dorsal horn neurons, possibly leading to augmentation of surgical stimulation that affects changes in the central and peripheral nervous system. Recent studies on pregabalin use for reducing postoperative pain have revealed its beneficial effects on the prevention of pain as well as altering the neuropathic pain incidence. A systematic review by Clarke et al. on pregabalin use in postoperative pain has confirmed its safety and effectiveness (15). However, another systematic review by Chaparro et al. concluded that enough evidences are not available to make conclusion whether gabapentin is suit-able for prevention of postsurgical pain (10). In addition, optimal dose and duration of the treatment cannot be recommended because of the heterogeneity of the trials.
Objectives
The aim of the current study was to investigate whether premedication with pregabalin was effective in the treatment of acute postoperative pain after CABG.
Study Design
A total of 60 patients referred to the Rajaie Cardiovascular, Medical and Research Center, Tehran, Iran. for elective CABG were recruited in this randomized, controlled, double-blinded trial. Patients and examining physicians were unaware of the intervention used for the patient during study. Patients were randomly allocated to two groups: one group was set as control and the second received pregabalin. For randomization purpose, we used the http://www.randomizer.org online software. According to Sunder et al. study in 2012 that assessed pregabalin effect on post-op pain, he found VAS in pregabalin and control group 2.02 ± 57 and 2.39 ± 0.61, respectively by using online sample size calculator http://www.stat.ubc. ca/~rollin/stats/ssize/n2.html we calculated 29 cases in each group. Finally, we chose the sample size of 30 in each group and a total of 60 patients.
Patients were assigned numbers from one to 60 and each was randomly assigned either into control or pregabalin groups as defined randomly by the software. Patients older than 20 years old who were planned for elective CABG with Laryngeal view grade from 1 to 3, according to the American Society of Anesthesiologists (ASA) guideline on management of the difficult airway (16), were included in this study. The anesthesia and surgical techniques were the same for all the patients.
Patients with liver or renal dysfunction, metabolic disorders, and left bundle branch block (LBBB) were excluded from study. In addition, patients with indications of emer-gency surgical operation, those using opioids, patients with a history of drug sensitivity or seizures, smokers, and those with ejection fraction (EF) < 35% were excluded. Patients signed informed consent form. Study protocol was approved by Institutional Ethics Review Board of the Rajaei Heart Center. Protocol of the study was in compliance with Helsinki declaration on ethical principles for medical research involving human subjects (17).
Treatment Plan and Outcome Measures
Patients in pregabalin and control groups respectively received single dose of 150-mg pregabalin capsules (LYR-ICA, Pfizer Inc, Germany) and placebo two hours before surgery. Severity of pain after 4, 12 and 24 hours were measured in patients by a visual analog scale (VAS) and recorded (18). Patients having VAS scores > 3 received 0.1 mg/kg of intravenous morphine up to 8 mg. Background data including arterial blood pressure, heart rate, respiratory rate, and time of extubation were recorded.
Statistical Analysis
Numerical data were expressed as mean ± standard deviation (SD). As the data showed normal distribution pattern by Kolmogorov-Smirnov test as well as homogeneity of variance, group comparisons were made by Student's t test. Chi square test was used to examine differences between qualitative data. In all comparisons, statistical significance levels were considered at P < 0.05.
Results
Overall, 60 patients were recruited for this study among which 10 (16.6%) were female. Rate of females in pregabalin and control groups were similar and identical to its frequency in total patients. Mean age of control and pregabalin groups were 57.9 ± 8.6 and 54.7 ± 8.3 years, respectively (P > 0.05). Although our exclusion criteria was the patients age > 20 years, the age of patients ranged from 35 to 70 years due to occurrence of the coronary artery events at older ages. According to our results, no hemodynamic changes were observed between two groups (P > 0.05) ( Table 1). Comparison of severity of pain showed significant differences between two groups at 4, 12, and 24 hours of surgery ( Figure 1) (P < 0.05). Morphine consumption did not change significantly by using pregabalin. Consumption of morphine was similar between patients receiving pregabalin before treatment and those receiving placebo ( Table 2). ICU stay duration was also similar between two groups (P > 0.05) ( Table 3).
Discussion
In the current study, we investigated the effectiveness and safety profile of pregabalin vs control group for the management of acute pain after CABG. Findings of the study showed no significant differences in pain score according to the VAS scale between two groups at 4, 12, and 24 hours of surgery. Moreover, using pregabalin did not significantly change the opioid consumption doses.
Although some studies are advocating the use of pregabalin before surgery to reduce postsurgical pain, some others deny its effectiveness (19,20). Studies by Joshi et al. demonstrated that acute and chronic postoperative pain after off-pump CABG in patients receiving pregabalin, 150 mg two hours prior to induction of anesthesia and 75 mg twice daily for two consecutive postoperative days, had significantly lowered pain severity at rest and upon movement in comparison to control group. In addition, pregabalin consumption was associated with 60% lower rates of tramadol consumption without increasing the extubation time (21). In the current study, we used a single dose of pregabalin (150 mg) before surgery. As our study demonstrated no significant differences between two groups, it appears that premedication by itself is not sufficient to reduce postsurgical pain.
A meta-analysis study by Chaparro et al. for evaluation of the efficacy of systemic drugs for the prevention of chronic pain after surgery by examining the proportion of patients reporting pain three months or more after surgery, could not approve pregabalin preventive effect on postsurgical pain (10). Nonetheless, a similar study by Clarke et al. shows that pregabalin was useful in this case (15). Our study should be added to the evidence that pregabalin might be useful in the management of postoperative pain when administered at single dose of 150 mg before surgery. According to the meta-analysis by Chaparro et al. (10), ketamine might be more effective in management of acute postsurgical pain. Our study is the first of its kind examining the possibility of using pregabalin to prevent postsurgery pain of CABG. Our trial demonstrated significant differences in pain scores between placebo and pregabalin when administered at single dose of 150 mg before surgery. Considering our findings, findings of other studies and safety profile of the drug, we recommend examining pregabalin effectiveness with different doses and treatment schedules in larger trials to define whether it can be used in management of postoperative pain and to determine a universal dosing and administration schedule.
conducting this research. We express our special thanks to the study consultant biostatistician, Dr Hooman Bakhshandeh. |
<commit_msg>Use the correct main for cancel job
<commit_before>package com.ibm.streamsx.rest;
import java.io.IOException;
import java.math.BigInteger;
import com.ibm.streamsx.topology.internal.streams.InvokeCancel;
class StreamsConnectionImpl extends AbstractStreamsConnection {
private final String userName;
StreamsConnectionImpl(String userName, String authorization,
String resourcesUrl, boolean allowInsecure) throws IOException {
super(authorization, resourcesUrl, allowInsecure);
this.userName = userName;
}
@Override
String getAuthorization() {
return authorization;
}
@Override
boolean cancelJob(Instance instance, String jobId) throws IOException {
// TODO - correct domain id
InvokeCancel cancelJob = new InvokeCancel(null, instance.getId(), new BigInteger(jobId), userName);
try {
return cancelJob.invoke(false) == 0;
} catch (Exception e) {
throw new RESTException("Unable to cancel job " + jobId
+ " in instance " + instance.getId(), e);
}
}
}
<commit_after>package com.ibm.streamsx.rest;
import java.io.IOException;
import java.math.BigInteger;
import com.ibm.streamsx.topology.internal.streams.InvokeCancel;
class StreamsConnectionImpl extends AbstractStreamsConnection {
private final String userName;
StreamsConnectionImpl(String userName, String authorization,
String resourcesUrl, boolean allowInsecure) throws IOException {
super(authorization, resourcesUrl, allowInsecure);
this.userName = userName;
}
@Override
String getAuthorization() {
return authorization;
}
@Override
boolean cancelJob(Instance instance, String jobId) throws IOException {
InvokeCancel cancelJob = new InvokeCancel(
instance.getDomain().getId(), instance.getId(),
new BigInteger(jobId), userName);
try {
return cancelJob.invoke(false) == 0;
} catch (Exception e) {
throw new RESTException("Unable to cancel job " + jobId
+ " in instance " + instance.getId(), e);
}
}
}
|
package net.onrc.onos.core.intent;
import static org.junit.Assert.assertEquals;
import net.onrc.onos.core.topology.LinkData;
import net.onrc.onos.core.util.Dpid;
import net.onrc.onos.core.util.PortNumber;
import net.onrc.onos.core.util.SwitchPort;
import net.onrc.onos.core.util.serializers.KryoFactory;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;
/**
* Unit tests for PathIntent.
*/
public class PathIntentTest {
private static final Dpid DPID_1 = new Dpid(1L);
private static final Dpid DPID_2 = new Dpid(2L);
private static final Dpid DPID_3 = new Dpid(3L);
private static final Dpid DPID_4 = new Dpid(4L);
private static final PortNumber PORT_NUMBER_1 = PortNumber.uint16((short) 1);
private static final PortNumber PORT_NUMBER_2 = PortNumber.uint16((short) 2);
@Before
public void setUp() throws Exception {
}
@After
public void tearDown() throws Exception {
}
@Test
public void testCreateFirstId() {
String id = PathIntent.createFirstId("100");
assertEquals("100___0", id);
}
@Test
public void testCreateNextId() {
String id = PathIntent.createNextId("100___999");
assertEquals("100___1000", id);
}
@Test
public void test() {
KryoFactory factory = new KryoFactory();
Kryo kryo = factory.newKryo();
Output output = new Output(1024);
ConstrainedShortestPathIntent cspIntent1 =
new ConstrainedShortestPathIntent("1", 2L, 3L, 4L, 5L, 6L, 7L, 1000.0);
Path path = new Path();
path.add(new LinkData(new SwitchPort(1L, 1L), new SwitchPort(2L, 2L)));
path.add(new LinkData(new SwitchPort(2L, 1L), new SwitchPort(3L, 2L)));
path.add(new LinkData(new SwitchPort(3L, 1L), new SwitchPort(4L, 2L)));
PathIntent pathIntent1 = new PathIntent("11", path, 123.45, cspIntent1);
kryo.writeObject(output, pathIntent1);
output.close();
Input input = new Input(output.toBytes());
// create pathIntent from bytes
PathIntent pathIntent2 =
kryo.readObject(input, PathIntent.class);
input.close();
// check
assertEquals("11", pathIntent2.getId());
Path path2 = pathIntent2.getPath();
assertEquals(DPID_1, path2.get(0).getSrc().getDpid());
assertEquals(PORT_NUMBER_1, path2.get(0).getSrc().getPortNumber());
assertEquals(DPID_2, path2.get(0).getDst().getDpid());
assertEquals(PORT_NUMBER_2, path2.get(0).getDst().getPortNumber());
assertEquals(DPID_2, path2.get(1).getSrc().getDpid());
assertEquals(PORT_NUMBER_1, path2.get(1).getSrc().getPortNumber());
assertEquals(DPID_3, path2.get(1).getDst().getDpid());
assertEquals(PORT_NUMBER_2, path2.get(1).getDst().getPortNumber());
assertEquals(DPID_3, path2.get(2).getSrc().getDpid());
assertEquals(PORT_NUMBER_1, path2.get(2).getSrc().getPortNumber());
assertEquals(DPID_4, path2.get(2).getDst().getDpid());
assertEquals(PORT_NUMBER_2, path2.get(2).getDst().getPortNumber());
assertEquals(123.45, pathIntent2.getBandwidth(), 0.0);
ConstrainedShortestPathIntent cspIntent2 =
(ConstrainedShortestPathIntent) pathIntent2.getParentIntent();
assertEquals("1", cspIntent2.getId());
assertEquals(2L, cspIntent2.getSrcSwitchDpid());
assertEquals(3L, cspIntent2.getSrcPortNumber());
assertEquals(4L, cspIntent2.getSrcMac());
assertEquals(5L, cspIntent2.getDstSwitchDpid());
assertEquals(6L, cspIntent2.getDstPortNumber());
assertEquals(7L, cspIntent2.getDstMac());
assertEquals(1000.0, cspIntent2.getBandwidth(), 0.0);
}
}
|
//package codeforce;
import java.io.*;
import java.util.*;
public class dimaandhares {
static int[] a,b,c;
static int[][] dp;
static int n;
public static void main(String[] args) throws Exception
{
BufferedReader br=new BufferedReader(new InputStreamReader(System.in));
PrintWriter pw=new PrintWriter(System.out);
n=Integer.parseInt(br.readLine());
a=new int[n];
b=new int[n];
c=new int[n];
dp=new int[n][2];
StringTokenizer st1=new StringTokenizer(br.readLine());
for(int i=0;i<n;i++)
{
a[i]=Integer.parseInt(st1.nextToken());
}
st1=new StringTokenizer(br.readLine());
for(int i=0;i<n;i++)
{
b[i]=Integer.parseInt(st1.nextToken());
}
st1=new StringTokenizer(br.readLine());
for(int i=0;i<n;i++)
{
c[i]=Integer.parseInt(st1.nextToken());
}
for(int k=0;k<n;k++)
{
dp[k][0]=-1;
dp[k][1]=-1;
}
pw.println(function(0,0));
pw.close();
}
//0 previous not taken,1 previous taken
public static int function(int current,int prevs)
{
if(current>=n)
{
return 0;
}
if(dp[current][prevs]!=-1)
{
return dp[current][prevs];
}
int ans=0;
if(current==0)
{
ans=Math.max(function(current+1,0)+b[current],ans);
ans=Math.max(function(current+1, 1)+a[current],ans);
}
if(current==(n-1))
{
if(prevs==0)
{
ans=a[current];
}
else
{
ans=b[current];
}
}
else
{
if(prevs==0)
{
ans=Math.max(function(current+1,0)+b[current],ans );
ans=Math.max(function(current+1,1)+a[current], ans);
}
else
{
ans=Math.max(function(current+1,0)+c[current], ans);
ans=Math.max(function(current+1,1)+b[current], ans);
}
}
return dp[current][prevs]=ans;
}
}
|
Is Unhealthy Fat Mass Disguised By A Healthy BMI In Females With Eating Disorders?: 1723 Board #4 June 1 1
CONCLUSION: Large differences in means were expected due to the MRI imaging a larger anatomical region (L5 to T9) compared to DXA (20% distance from iliac crest to base of skull). Our results indicate that quantification of VAT determined from DXA and a novel multi-slice MRI protocol are highly correlated, suggesting that either method could be used to provide important insight into disease risk status and effectiveness of therapeutic interventions. |
/**
* Implementation of the algorithm General
* Algorithm minimizes the MADFA per height level
* @author Tobias
*
*/
public class AlgorithmSPL_N extends AlgorithmSPL_T {
@Override
protected void addWord(String word) {
// long nanoTime = System.nanoTime();
State currentState = this.automaton.getStartState();
String left = "";
String right = word;
while (!right.isEmpty() && Util.stateExist(currentState, Util.head(right)) && !Util.getNextState(currentState, Util.head(right)).isConfluence()) {
String nextChar = Util.head(right);
currentState = Util.getNextState(currentState, nextChar);
left += nextChar;
right = Util.tail(right);
}
while (!right.isEmpty() && Util.stateExist(currentState, Util.head(right))) {
String nextChar = Util.head(right);
State nextState = Util.getNextState(currentState, nextChar);
State cloneState = Util.cloneState(automaton, nextState);
Transition transition = currentState.getTransition(nextState, nextChar);
nextState.getIncomingTransitions().remove(transition);
transition.setEndState(cloneState);
cloneState.addIncomingTransition(transition);
automaton.addState(cloneState);
if(cloneState.isFinalState()) {
automaton.addFinalState(cloneState);
}
currentState = cloneState;
left += nextChar;
right = Util.tail(right);
}
while (!right.isEmpty()) {
String nextChar = Util.head(right);
State newState = new State();
Transition transition = new Transition(currentState, newState, nextChar);
currentState.addOutgoingTransition(transition);
newState.addIncomingTransition(transition);
automaton.addState(newState);
automaton.addTransition(transition);
currentState = newState;
left += nextChar;
right = Util.tail(right);
}
currentState.setFinalState(true);
automaton.addFinalState(currentState);
// nanoTime = System.nanoTime() - nanoTime;
// Benchmark.addLineToBenchmarkFile("addWord " + word + ": " + nanoTime + "ns");
// RuntimeMap.insertInto("addWordMethodCalls", 1);
// RuntimeMap.insertInto("addWordMethodRuntime", nanoTime);
}
@Override
public AlgorithmSkeleton copy() {
return new AlgorithmSPL_N();
}
} |
#pragma once
#include <cstdint>
#include <cassert>
#include <iostream>
#include <memory>
#include "symbol.hpp"
namespace ast {
template <typename T>
using Ptr = std::shared_ptr<T>;//T*;
template <typename T>
using WeakPtr = std::weak_ptr<T>;//T*;
#define newPtr std::make_shared
#define castPtr std::dynamic_pointer_cast
//Ptr<T> newPtr(T *t) {return std::make_shared<T>(t);}
using std::string;
typedef int64_t i64;
typedef int uchar;
// Variant type using polymorphism
// Consider implementing this solution with templates,
// visitor pattern perhaps, boost::variant
enum ASTType {
ListType = 0,
StringType,
I64Type,
CharType,
DoubleType,
BoolType,
SymType,
VoidType
};
struct List;
struct AVoid;
inline void print_indent(int level) {
//assert(level < 4);
while(level--) std::cout << " ";
}
struct Atom : std::enable_shared_from_this<Atom> {
int line;
int column;
Atom() : line(-1), column(-1) {}
Atom(int l, int c) : line(l), column(c) {}
virtual ASTType type() = 0;
virtual ~Atom() {}
virtual void dump(int indent) = 0;
// literals
virtual i64 get_i64() {assert(!"Not an int64"); return 0;}
virtual int get_char() {assert(!"Not a char"); return 0;}
virtual double get_double() {assert(!"Not a double"); return 0;}
virtual bool get_bool() {assert(!"Not a bool"); return 0;}
virtual std::string get_string() {assert(!"Not a string"); return std::string();}
//virtual void get_void() {assert(!"Not a void"); return;}
// syms lists
virtual Sym get_Sym() {assert(!"Not a symbol"); return Sym();}
//virtual Ptr<List> get_List() {assert(!"Not a list"); return nullptr;}
};
struct List : Atom { // never-empty list
Ptr<Atom> head;
Ptr<List> tail;
virtual ASTType type() {return ListType;}
/*virtual Ptr<List> get_List() {
return shared_from_this();
}*/
virtual ~List() {}
virtual void dump(int level) {
print_indent(level);
std::cout << "(" << std::endl;
for(Ptr<Atom> a : *this) {
a->dump(level + 1);
std::cout << std::endl;
}
print_indent(level);
std::cout << ")";
}
Ptr<List> copy() {
return newPtr<List>(head, tail ? tail->copy() : nullptr);
}
Ptr<Atom> at(size_t n) {
if(n) return tail->at(n - 1);
else return head;
}
//List() : head(Atom::VoidValue), tail(nullptr) {}
List(Ptr<Atom> head, Ptr<List> list) : head(head), tail(list) {}
List(int l, int c, Ptr<Atom> head, Ptr<List> list) : Atom(l, c), head(head), tail(list) {}
template<typename T>
List(T head, Ptr<List> list); //: head(head), tail(list) {}
template<typename T>
List(int l, int c, T head, Ptr<List> list);
struct List_iterator {
List_iterator operator++() {el = el->tail.get(); return *this;} //pre
List_iterator operator++(int x) { // post increment
List_iterator it(*this);
el = el->tail.get();
return it;
} //post
Ptr<Atom> operator*() {return el->head;}
Ptr<Atom> operator->() {return el->head;}
bool operator==(List_iterator rhs) {return el == rhs.el;}
bool operator!=(List_iterator rhs) {return el != rhs.el;}
List *el;
List_iterator(List *rhs) {el = rhs;}
List_iterator(const List_iterator &rhs) {el = rhs.el;}
};
typedef List_iterator iterator;
iterator begin() {return iterator(this);}
iterator end() {return iterator(nullptr);}
};
Ptr<List> list(std::initializer_list<Ptr<Atom> > l);
//Ptr<List> list(std::initializer_list<Ptr<Atom> > l);
inline Ptr<Atom> atom(Ptr<List> &l) {return std::dynamic_pointer_cast<Atom, List>(l);}
inline Ptr<List> asList(const Ptr<Atom> &r) {
assert(r->type() == ListType);
return std::dynamic_pointer_cast<List, Atom>(r);
}
Ptr<List> *append(Ptr<List> *l, Ptr<Atom> a);
Ptr<List> *appendList(Ptr<List> *l, Ptr<List> a);
#define ATOM_SPECIALIZE(name, ty) \
struct A##name : Atom { \
ty value; \
A##name(const ty &t) : value(t) {} \
A##name(int l, int c, const ty &t) : value(t), Atom(l, c) {} \
virtual ASTType type() {return name##Type;} \
virtual ty get_##ty() { \
return value; \
} \
virtual ~A##name() {} \
virtual void dump(int level) { print_indent(level); std::cout << std::boolalpha << get_##ty();} \
}; \
inline Ptr<Atom> atom(int l, int c, ty t) {return newPtr<A##name>(l, c, t);}\
inline Ptr<Atom> atom(ty t) {return newPtr<A##name>(t);}
inline std::ostream &operator<<(std::ostream &out, Sym r) {
return std::cout << r.str();
}
ATOM_SPECIALIZE(I64, i64);
ATOM_SPECIALIZE(Bool, bool);
ATOM_SPECIALIZE(Double, double);
ATOM_SPECIALIZE(Char, uchar);
ATOM_SPECIALIZE(Sym, Sym);
ATOM_SPECIALIZE(String, string);
inline Ptr<Atom> atom(char c) {return atom((uchar)c);}
struct AVoid : Atom {
virtual ASTType type() {return VoidType;}
virtual ~AVoid() {}
virtual void dump(int level) {print_indent(level); std::cout << "v()" << std::endl;}
static Ptr<AVoid> value();
AVoid() {}
AVoid(const AVoid &r) {}
};
template<typename T>
List::List(T head, Ptr<List> list) : head(atom(head)), tail(list) {}
template<typename T>
List::List(int l, int c, T head, Ptr<List> list) : Atom(l, c), head(atom(head)), tail(list) {}
}
#if 0
ASTAtom Alc(ASTAtom, int, int);
ASTAtom Along(long x);
ASTAtom Acstring(cstring x);
ASTAtom Adouble(double x);
ASTAtom Abool(bool x);
ASTAtom Achar(long x);
ASTAtom ASymbol(Symbol x);
ASTAtom Asymbol(cstring x);
bool ASTAtom_isNil(ASTAtom x);
ASTAtom AList(ASTList * c);
ASTAtom AASTList(ASTList * c);
ASTList * ASTList_new(ASTAtom car, ASTList * next);
ASTList * ASTList_add(ASTList ** l, ASTAtom n);
ASTList * ASTList_append(ASTList ** l, ASTList * l2);
ASTList * ASTList_list(size_t count, ...);
ASTList * ASTList_last(ASTList * l);
ASTList * ASTList_reversed(ASTList * l);
ASTAtom * ASTList_at(ASTList * this, size_t index);
size_t ASTList_length(ASTList * this);
ASTList * ASTList_next(ASTList * c);
ASTList ** ASTList_nextr(ASTList * c);
#define foreach(el, li) for(ASTList * el = li; el; el = el->next)
extern ASTAtom nil;
void to_str(char * buf, ASTAtom a);
void dump(ASTAtom what, int indent);
#endif
|
/**
* User: blangel
* Date: 12/30/11
* Time: 2:51 PM
*
* A {@link Command} to initialize a directory as a {@literal ply} project.
*/
public final class Init extends Command {
private static final Set<File> CLEANUP_FILES = new HashSet<File>();
public Init(Args args) {
super(args);
}
public void run() {
try {
OutputExt.init("true", "true", "warn,info"); // dis-regard ad-hoc props and defined properties, simply init
init(new File("."), args);
} catch (PomNotFound pnf) {
Output.print("^ply^ ^error^ Specified maven pom file [ ^b^%s^r^ ] does not exist.", pnf.pom);
cleanupAfterFailure();
throw new SystemExit(1);
} catch (NoRepositories nr) {
Output.print("^ply^ ^error^ No global repositories. Reinstall ply or add a repository to the ^b^$PLY_HOME/config/repositories.properties^r^ file.");
cleanupAfterFailure();
throw new SystemExit(1);
} catch (PomParseException ppe) {
Output.print("^ply^ ^error^ Could not parse pom [ %s ].", ppe.getMessage());
Output.print("^ply^ ^error^ As this is a maven project be sure to have run ^b^mvn clean install^r^ before running ^b^ply init^r^.");
Output.print("^ply^ ^error^ Also ensure your local maven repository is accessible to ply: add line ^b^~/.m2/repository=maven^r^ to file ^b^$PLY_HOME/config/repositories.properties^r^");
cleanupAfterFailure();
throw new SystemExit(1);
} catch (InitException ie) {
Output.print("^ply^ ^error^ Could not initialize project [ %s ].", (ie.getCause() != null ? ie.getCause() : ""));
cleanupAfterFailure();
throw new SystemExit(1);
} catch (AlreadyInitialized ai) {
Output.print("^ply^ Current directory is already initialized.");
} catch (Throwable t) {
t.printStackTrace(); // exceptional case - print to std-err
cleanupAfterFailure();
throw new SystemExit(1);
}
}
private static void init(File from, Args args) throws AlreadyInitialized, PomNotFound {
// check for existing init.
File ply = FileUtil.fromParts(from.getPath(), ".ply");
if (ply.exists()) {
throw new AlreadyInitialized();
}
CLEANUP_FILES.add(ply);
// now create the .ply/config directories
File configDir = FileUtil.fromParts(from.getPath(), ".ply", "config");
configDir.mkdirs();
// check for an existing maven project
File mavenPom;
if ((mavenPom = getMavenPom(from, args)) != null) {
if (!mavenPom.exists()) {
throw new PomNotFound(mavenPom.getPath());
}
List<RepositoryAtom> repositoryAtoms = getRepositories();
MavenPomParser parser = new MavenPomParser();
PrintStream old = setupTabOutput();
MavenPom pom = null;
for (RepositoryAtom repositoryAtom : repositoryAtoms) {
try {
pom = parser.parsePom(mavenPom.getPath(), repositoryAtom);
if (pom != null) {
break;
}
} catch (Exception e) {
// try next...
}
}
revertTabOutput(old);
if ((pom == null) || !createProperties(from, pom)) {
throw new PomParseException(mavenPom.getPath());
}
if ((pom.modules != null) && !pom.modules.isEmpty()) {
for (PropFile.Prop submodule : pom.modules.props()) {
File pomFile = FileUtil.fromParts(from.getPath(), submodule.name);
try {
if (pomFile.exists()) {
List<String> rawArgs = new ArrayList<String>(2);
rawArgs.add("init");
rawArgs.add("--from-pom=pom.xml");
Args pomArgs = new Args(rawArgs, args.adHocProps);
init(pomFile, pomArgs);
} else {
Output.print("^warn^ Module [ ^b^%s^r^ ] specified in %s but directory not found.", submodule, mavenPom.getPath());
}
} catch (AlreadyInitialized ai) {
// ignore, this is fine
} catch (PomNotFound pnf) {
Output.print("^warn^ Could not find ^b^%s^r^'s pom file. For init of sub-modules the pom must be named ^b^pom.xml^r^", pomFile.getPath());
}
}
}
} else {
if (!createDefaultProperties(from)) {
throw new InitException(null);
}
}
// create default directory structure; if not exists
createDefaultDirectories(from);
// flush props cache to pick up the newly created properties
PropsExt.invalidateCaches(configDir);
// print out the local properties
Output.print("^ply^ Created the following project properties:");
Output.print("^ply^");
PrintStream old = setupTabOutput();
Get get = new Get(null);
get.print(configDir, null, Scope.Default, null, false);
revertTabOutput(old);
String projectName = Props.get("name", Context.named("project"), Scope.Default, configDir).value();
Output.print("^ply^");
Output.print("^ply^ Project ^b^%s^r^ initialized successfully.", projectName);
}
private static File getMavenPom(File from, Args args) {
if ((args.args.size() > 1) && args.args.get(1).startsWith("--from-pom=")) {
return FileUtil.fromParts(from.getPath(), args.args.get(1).substring("--from-pom=".length()));
} else if (isHeadless()) {
return null;
}
File[] poms = findPomFiles(from);
if ((poms != null) && poms.length > 0) {
String options;
if (poms.length == 1) {
options = "[Y/n]";
Output.printNoLine("^ply^ Found a pom file [ ^b^%s^r^ ], parse configuration from it %s ", poms[0].getPath(), options);
} else {
options = "[num/n]";
Output.print("^ply^ Found pom files:");
int choice = 1;
for (File pom : poms) {
Output.print("^ply^ [^b^%d^r^] %s", choice++, pom.getPath());
}
Output.printNoLine("^ply^ parse configuration from %s? ", options);
}
while (true) {
try {
CharBuffer buffer = CharBuffer.allocate(Integer.valueOf(poms.length).toString().length() + 1);
new InputStreamReader(System.in).read(buffer);
buffer.rewind();
String answer = buffer.toString().trim();
Integer answerAsNumber = null;
try {
answerAsNumber = Integer.parseInt(answer);
} catch (NumberFormatException nfe) {
answerAsNumber = null;
}
if ((poms.length == 1) && answer.equalsIgnoreCase("y")) {
return poms[0];
} else if ((poms.length > 1) && (answerAsNumber != null)) {
int index = answerAsNumber - 1;
if ((index >= 0) && (index < poms.length)) {
return poms[index];
}
Output.printNoLine("^ply^ ^red^invalid number^r^; must be between %d and %d, parse configuration %s ", 1, poms.length, options);
} else if (!answer.equalsIgnoreCase("n")) {
Output.printNoLine("^ply^ ^red^invalid option^r^, parse configuration %s ", options);
} else {
break;
}
} catch (IOException ioe) {
throw new InitException(ioe);
}
}
}
return null;
}
private static void cleanupAfterFailure() {
for (File file : CLEANUP_FILES) {
FileUtil.delete(file);
}
}
/**
* Exists as an alternative to {@link net.ocheyedan.ply.PlyUtil#isHeadless()} as during init the process
* cannot load properties until the local project properties have been initialized; otherwise, the resolved
* properties (which will not yet contain the, just created, local properties) will not be within the cache.
* @return true if {@literal ply} is running as headless
* @see net.ocheyedan.ply.PlyUtil#isHeadless()
*/
private static boolean isHeadless() {
String plyPropertiesPath = FileUtil.pathFromParts(PlyUtil.SYSTEM_CONFIG_DIR.getPath(), "ply.properties");
PropFile plySystemProps = PropFiles.load(plyPropertiesPath, false, false);
return "true".equalsIgnoreCase(plySystemProps.get("headless").value());
}
/**
* Initializes the {@literal project.properties} file with the following values:
* namespace = {@link MavenPom#groupId}
* name = {@link MavenPom#artifactId}
* version = {@link MavenPom#version}
* packaging = {@link MavenPom#packaging}
* Initializes the {@literal dependencies.properties} file with {@link MavenPom#dependencies}
* and {@literal repositories.properties} file with {@link MavenPom#repositories}.
* @param from directory from which to save property files
* @param pom which to extract configuration values.
* @return true is success; false, otherwise
*/
private static boolean createProperties(File from, MavenPom pom) {
Map<String, PropFile> fileToProps = new HashMap<String, PropFile>(3, 1.0f);
PropFile projectProps = new PropFile(Context.named("project"), PropFile.Loc.Local);
projectProps.add("namespace", pom.groupId);
projectProps.add("name", pom.artifactId);
projectProps.add("version", pom.version);
if ((pom.packaging != null) && !DependencyAtom.DEFAULT_PACKAGING.equals(pom.packaging)
&& !"pom".equals(pom.packaging)) { // maven's pom packaging will be considered default packaging in ply
projectProps.add("packaging", pom.packaging);
}
if (pom.buildDirectory != null) {
projectProps.add("build.dir", pom.buildDirectory);
}
if (pom.buildSourceDirectory != null) {
projectProps.add("src.dir", pom.buildSourceDirectory);
}
if (pom.buildFinalName != null) {
projectProps.add("artifact.name", pom.buildFinalName);
}
fileToProps.put(FileUtil.pathFromParts(from.getPath(), ".ply", "config", "project.properties"), projectProps);
if ((pom.dependencies != null) && !pom.dependencies.isEmpty()) {
fileToProps.put(FileUtil.pathFromParts(from.getPath(), ".ply", "config", "dependencies.properties"), pom.dependencies);
}
if ((pom.testDependencies != null) && !pom.testDependencies.isEmpty()) {
fileToProps.put(FileUtil.pathFromParts(from.getPath(), ".ply", "config", "dependencies.test.properties"), pom.testDependencies);
}
if ((pom.repositories != null) && !pom.repositories.isEmpty()) {
fileToProps.put(FileUtil.pathFromParts(from.getPath(), ".ply", "config", "repositories.properties"), pom.repositories);
}
if (pom.buildOutputDirectory != null) {
PropFile compilerProps = new PropFile(Context.named("compiler"), PropFile.Loc.Local);
compilerProps.add("build.path", pom.buildOutputDirectory);
fileToProps.put(FileUtil.pathFromParts(from.getPath(), ".ply", "config", "compiler.properties"), compilerProps);
}
if (pom.buildTestOutputDirectory != null) {
PropFile compilerTestProps = new PropFile(Context.named("compiler"), Scope.named("test"), PropFile.Loc.Local);
compilerTestProps.add("build.path", pom.buildTestOutputDirectory);
fileToProps.put(FileUtil.pathFromParts(from.getPath(), ".ply", "config", "compiler.test.properties"), compilerTestProps);
}
if (pom.buildTestSourceDirectory != null) {
PropFile projectTestProps = new PropFile(Context.named("project"), Scope.named("test"), PropFile.Loc.Local);
projectTestProps.add("src.dir", pom.buildTestSourceDirectory);
fileToProps.put(FileUtil.pathFromParts(from.getPath(), ".ply", "config", "project.test.properties"), projectTestProps);
}
if ((pom.modules != null) && !pom.modules.isEmpty()) {
fileToProps.put(FileUtil.pathFromParts(from.getPath(), ".ply", "config", "submodules.properties"), pom.modules);
}
return createProperties(fileToProps);
}
/**
* Creates the {@literal project.res.dir}, {@literal project.src.dir} within the default and test scope
* for the project based at {@code baseDir}, if the directories don't already exist.
* @param baseDir from which to create the directory structure
*/
private static void createDefaultDirectories(File baseDir) {
File configDir = FileUtil.fromParts(baseDir.getPath(), ".ply", "config");
File projectPropsFile = FileUtil.fromParts(configDir.getPath(), "project.properties");
File projectTestPropsFile = FileUtil.fromParts(configDir.getPath(), "project.test.properties");
PropFileChain projectProps = Props.get(Context.named("project"), Scope.Default, projectPropsFile, true);
PropFileChain projectTestProps = Props.get(Context.named("project"), Scope.named("test"), projectTestPropsFile, true);
String srcDirPath = projectProps.get("src.dir").value();
String resDirPath = projectProps.get("res.dir").value();
String srcTestDirPath = projectTestProps.get("src.dir").value();
String resTestDirPath = projectTestProps.get("res.dir").value();
File srcDir = FileUtil.fromParts(baseDir.getPath(), srcDirPath);
File resDir = FileUtil.fromParts(baseDir.getPath(), resDirPath);
File srcTestDir = FileUtil.fromParts(baseDir.getPath(), srcTestDirPath);
File resTestDir = FileUtil.fromParts(baseDir.getPath(), resTestDirPath);
boolean createdSrc = !srcDirPath.isEmpty() && (srcDir.exists() || srcDir.mkdirs()),
createdRes = !resDirPath.isEmpty() && (resDir.exists() || resDir.mkdirs()),
createdSrcTest = !srcTestDirPath.isEmpty() && (srcTestDir.exists() || srcTestDir.mkdirs()),
createdResTest = !resTestDirPath.isEmpty() && (resTestDir.exists() || resTestDir.mkdirs());
if (!createdSrc || !createdRes || !createdSrcTest || !createdResTest) {
Output.print("^ply^^warn^ Could not create project directories.");
}
}
/**
* Initializing the {@literal project.properties} file with the following values:
* namespace = current working directory
* name = current working directory
* version = 1.0
* and any ad-hoc properties specified on the command line, which take precedence over those specified above.
* @param from directory from which to save property files
* @return true on success
*/
private static boolean createDefaultProperties(File from) {
Map<Scope, Map<Context, PropFile>> adHocProps = AdHoc.get();
Map<String, PropFile> projectMap = new HashMap<String, PropFile>(3, 1.0f);
for (Scope scope : adHocProps.keySet()) {
Map<Context, PropFile> contexts = adHocProps.get(scope);
for (Context context : contexts.keySet()) {
PropFile adHocPropFile = contexts.get(context); // even though Loc == AdHoc; doesn't matter in how we're using it
if (adHocPropFile.isEmpty()) {
continue;
}
String path = FileUtil.pathFromParts(from.getPath(), ".ply", "config", PropFiles.getFileName(adHocPropFile));
projectMap.put(path, adHocPropFile);
}
}
String projectKey = FileUtil.pathFromParts(from.getPath(), ".ply", "config", "project.properties");
try {
File projectDirectory = new File(".");
String path = projectDirectory.getCanonicalPath();
if (path.endsWith(File.separator)) {
path = path.substring(0, path.length() - 1);
}
int lastPathIndex = path.lastIndexOf(File.separator);
if (lastPathIndex != -1) {
path = path.substring(lastPathIndex + 1);
}
// ensure at least 'namespace'/'name'/'version' exist in the 'project.properties' file
PropFile projectProps = projectMap.get(projectKey);
if (projectProps == null) {
projectProps = new PropFile(Context.named("project"), Scope.Default, PropFile.Loc.Local);
projectMap.put(projectKey, projectProps);
}
if (!projectProps.contains("namespace")) {
projectProps.add("namespace", path);
}
if (!projectProps.contains("name")) {
projectProps.add("name", path);
}
if (!projectProps.contains("version")) {
projectProps.add("version", "1.0");
}
return createProperties(projectMap);
} catch (IOException ioe) {
Output.print("^error^ could not create the local project's properties files.");
Output.print(ioe);
return false;
}
}
/**
* Saves each {@code fileToProps}
* @param fileToProps mapping from file name to {@link PropFile}
* @return true if all saves succeeded; false otherwise
*/
private static boolean createProperties(Map<String, PropFile> fileToProps) {
for (String filePath : fileToProps.keySet()) {
PropFile localProperties = fileToProps.get(filePath);
if (!PropFiles.store(localProperties, filePath, true)) {
return false;
}
}
return true;
}
/**
* Causes all output to be indented four spaces.
* @return the current {@link System#out} at time of this call.
*/
private static PrintStream setupTabOutput() {
final PrintStream old = System.out;
PrintStream tabbed = new PrintStream(new ByteArrayOutputStream() /* spurious as calls are delegated to 'old' */) {
final Object[] nil = new Object[0];
@Override public void print(String out) {
old.print(String.format("%s %s", OutputExt.resolve("^ply^", nil), out));
}
@Override public void println(String out) {
old.println(String.format("%s %s", OutputExt.resolve("^ply^", nil), out));
}
};
System.setOut(tabbed);
return old;
}
/**
* Sets the {@link System#out} to {@code old}.
* @param old the existing {@link PrintStream} before any call to {@link #setupTabOutput()}
*/
private static void revertTabOutput(PrintStream old) {
System.setOut(old);
}
/**
* @param from the base directory in which to look for pom files.
* @return all files within the {@code from} directory ending with "pom.xml"
*/
private static File[] findPomFiles(File from) {
return from.listFiles(new FilenameFilter() {
@Override public boolean accept(File dir, String name) {
return name.endsWith("pom.xml");
}
});
}
/**
* @return all repositories available
* @throws NoRepositories if there are no repositories
*/
private static List<RepositoryAtom> getRepositories() throws NoRepositories {
List<RepositoryAtom> repositories = new ArrayList<RepositoryAtom>();
String localRepoPath = getSystemLocalRepo();
RepositoryAtom local = RepositoryAtom.parse(localRepoPath);
if (local != null) {
repositories.add(local);
}
Collection<Prop> repositoryProperties = getSystemRepositories();
RepositoryAtom repo;
for (Prop prop : repositoryProperties) {
repo = RepositoryAtom.parse(RepositoryAtom.atomFromProp(prop));
if (repo != null) {
repositories.add(repo);
}
}
if (repositories.isEmpty()) {
throw new NoRepositories();
}
Collections.sort(repositories, RepositoryAtom.LOCAL_COMPARATOR);
return repositories;
}
/**
* @return the system value for the {@literal depmngr.localRepo} property for the default scope
*/
private static String getSystemLocalRepo() {
PropFile depmngr = PropFiles.load(FileUtil.pathFromParts(PlyUtil.SYSTEM_CONFIG_DIR.getPath(), "depmngr.properties"), false, true);
if (depmngr == null) {
return null;
}
return depmngr.get("localRepo").value();
}
/**
* @return the system defined repositories for the default scope
*/
private static Collection<Prop> getSystemRepositories() {
String systemRepositoriesPath = FileUtil.pathFromParts(PlyUtil.SYSTEM_CONFIG_DIR.getPath(), "repositories.properties");
PropFile systemRepositoriesProps = PropFiles.load(systemRepositoriesPath, false, false);
List<Prop> props = new ArrayList<Prop>();
for (Prop systemRepoProp : systemRepositoriesProps.props()) {
props.add(systemRepoProp);
}
return props;
}
/**
* Thrown to indicate the directory has already been initialized.
*/
@SuppressWarnings("serial")
private static class AlreadyInitialized extends RuntimeException { }
/**
* Thrown to indicate that a specified pom file could not be found.
*/
@SuppressWarnings("serial")
private static class PomNotFound extends RuntimeException {
final String pom;
private PomNotFound(String pom) {
super();
this.pom = pom;
}
}
/**
* Thrown to indicate that a pom could not be parsed
*/
@SuppressWarnings("serial")
private static class PomParseException extends RuntimeException {
private PomParseException(String message) {
super(message);
}
}
@SuppressWarnings("serial")
private static class InitException extends RuntimeException {
private InitException(Throwable cause) {
super(cause);
}
}
/**
* Thrown to indicate that no repositories could be found while looking up the pom.
*/
@SuppressWarnings("serial")
private static class NoRepositories extends RuntimeException { }
} |
# Copyright 2017 LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
from unittest.mock import patch
from fossor.checks.BasicEnvirCheck.diskusage import DiskUsage
@patch('fossor.plugin.Plugin.shell_call')
def test_disk_usage(sc_mock):
out = 'Filesystem 1K-blocks Used Available Use% Mounted on\n' \
'/dev/sda2 407G 318G 69G 99% /\n' \
'tmpfs 32G 161M 32G 1% /dev/shm'
err = ''
return_code = 0
sc_mock.return_value = out, err, return_code
c = DiskUsage()
assert c.run({}) == 'Disk utilization is at critical state (> 98). partition=/dev/sda2 at utilization=99%'
|
/**
* Functional tests to validate that {@code PrefsKeyAmplitudeSynced} properties get synced
* correctly with amplitude user properties.
*
* @author Inderjeet Singh
*/
@RunWith(PowerMockRunner.class)
@PrepareForTest({Context.class, SharedPreferences.class, PreferenceManager.class, Amplitude.class})
public class PrefsAmplitudeSyncFunctionalTest {
private static final String AMPLITUDE_SYNCED = "amplitudeSynced";
private Prefs prefs;
private AmplitudeClient amplitudeClient;
private JSONObject userProperties;
@Before
public void setUp() throws Exception {
Context context = AndroidFixtures.createMockContext();
Gson gson = new Gson();
prefs = new Prefs(context, gson);
amplitudeClient = Mockito.mock(AmplitudeClient.class);
Mockito.doAnswer(new Answer<Void>() {
public Void answer(InvocationOnMock invocation) {
Object[] args = invocation.getArguments();
userProperties = (JSONObject) args[0]; // Just set userProperties
return null;
}
}).when(amplitudeClient).setUserProperties(Mockito.any(JSONObject.class));
prefs.addListener(new PrefsAmplitudeSyncListener(amplitudeClient, gson, AMPLITUDE_SYNCED));
}
@Test
public void testAmplitudeSync() throws Exception {
TypedKey<String> sync = new TypedKey<>("sync", String.class, AMPLITUDE_SYNCED);
prefs.put(sync, "test");
assertEquals("test", userProperties.get("sync"));
// assert that amplitude sync doesn't happen for non-synced properties
TypedKey<String> nosync = new TypedKey<>("nosync", String.class);
prefs.put(nosync, "test");
assertFalse(userProperties.has("nosync"));
}
@Test
public void testAmplitudeUnsetOnBooleanPropertyRemoval() throws Exception {
TypedKey<Boolean> bool = new TypedKey<>("bool", Boolean.class, AMPLITUDE_SYNCED);
prefs.put(bool, true);
assertTrue(userProperties.getBoolean("bool"));
prefs.remove(bool);
assertFalse(userProperties.getBoolean("bool"));
}
@SuppressWarnings("unused")
private static class Bag {
final int apples;
final int oranges;
Bag(int apples, int oranges) {
this.apples = apples;
this.oranges = oranges;
}
}
@Test
public void testAmplitudeSetObjectProperty() throws Exception {
TypedKey<Bag> bag = new TypedKey<>("bag", Bag.class, AMPLITUDE_SYNCED);
prefs.put(bag, new Bag(2, 3));
assertEquals("{\"apples\":2,\"oranges\":3}", userProperties.get("bag"));
}
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testAmplitudeSetIntProperty() throws Exception {
TypedKey<Integer> key = new TypedKey<>("key", Integer.class, AMPLITUDE_SYNCED);
prefs.put(key, 3);
assertEquals(3, userProperties.get("key"));
TypedKey key2 = new TypedKey<>("key2", int.class, AMPLITUDE_SYNCED);
prefs.put(key2, 4);
assertEquals(4, userProperties.get("key2"));
}
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testAmplitudeSetShortProperty() throws Exception {
TypedKey<Short> key = new TypedKey<>("key", Short.class, AMPLITUDE_SYNCED);
prefs.put(key, (short)3);
assertEquals((short)3, userProperties.get("key"));
TypedKey key2 = new TypedKey<>("key2", short.class, AMPLITUDE_SYNCED);
prefs.put(key2, (short)4);
assertEquals((short)4, userProperties.get("key2"));
}
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testAmplitudeSetLongProperty() throws Exception {
TypedKey<Long> key = new TypedKey<>("key", Long.class, AMPLITUDE_SYNCED);
prefs.put(key, (long)3);
assertEquals((long)3, userProperties.get("key"));
TypedKey key2 = new TypedKey<>("key2", long.class, AMPLITUDE_SYNCED);
prefs.put(key2, (long)4);
assertEquals((long)4, userProperties.get("key2"));
}
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testAmplitudeSetByteProperty() throws Exception {
TypedKey<Byte> key = new TypedKey<>("key", Byte.class, AMPLITUDE_SYNCED);
prefs.put(key, (byte)3);
assertEquals((byte)3, userProperties.get("key"));
TypedKey key2 = new TypedKey<>("key2", byte.class, AMPLITUDE_SYNCED);
prefs.put(key2, (byte)4);
assertEquals((byte)4, userProperties.get("key2"));
}
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testAmplitudeSetCharacterProperty() throws Exception {
TypedKey<Character> key = new TypedKey<>("key", Character.class, AMPLITUDE_SYNCED);
prefs.put(key, 'a');
assertEquals('a', userProperties.get("key"));
TypedKey key2 = new TypedKey<>("key2", char.class, AMPLITUDE_SYNCED);
prefs.put(key2, 'b');
assertEquals('b', userProperties.get("key2"));
}
@Test
public void testAmplitudeSetStringProperty() throws Exception {
TypedKey<String> key = new TypedKey<>("key", String.class, AMPLITUDE_SYNCED);
prefs.put(key, "abracadabra");
assertEquals("abracadabra", userProperties.get("key"));
}
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testAmplitudeSetBooleanProperty() throws Exception {
TypedKey<Boolean> key = new TypedKey<>("key", Boolean.class, AMPLITUDE_SYNCED);
prefs.put(key, true);
assertEquals(true, userProperties.get("key"));
TypedKey key2 = new TypedKey<>("key2", boolean.class, AMPLITUDE_SYNCED);
prefs.put(key2, true);
assertEquals(true, userProperties.get("key2"));
}
@Test
public void testAmplitudeSetFloatProperty() throws Exception {
TypedKey<Float> key = new TypedKey<>("key", Float.class, AMPLITUDE_SYNCED);
prefs.put(key, 1.23F);
assertEquals(1.23F, userProperties.get("key"));
}
@Test
public void testAmplitudeSetDoubleProperty() throws Exception {
TypedKey<Double> key = new TypedKey<>("key", Double.class, AMPLITUDE_SYNCED);
prefs.put(key, 1.233D);
assertEquals(1.233D, userProperties.get("key"));
}
} |
def testSinglePageWithZeroPages( self ):
project = TestUtils.createProject()
self.assertIs( project.single_page, True ) |
Abstract 080: Dietary Fructose Enhances Protein Kinase C Activation by Angiotensin II in Proximal Tubules via Changes in Intracellular Calcium
Dietary fructose causes salt-sensitive hypertension. This is in part due to increasing the sensitivity of proximal nephron Na reabsorption to angiotensin II (Ang II) such that lower concentrations stimulate transport to a greater extent. Ang II stimulates Na transport in this segment by increasing protein kinase C (PKC) α, a calcium- and lipid-dependent kinase. We hypothesized that dietary fructose increases the ability of Ang II to elevate intracellular calcium (Cai) and, thereby, activate PKC α in proximal tubules. This, in turn, stimulates Na/H exchange activity, the primary transporter involved in Na reabsorption. To test our hypothesis rats were maintained on a diet of normal chow plus tap water or normal chow plus 20% fructose. The effect of Ang II on Cai was measured using Fura2 in perfused S2 segments. Na/H exchange (NHE) was measured in perfused tubules using the pH-sensitive dye BCECF. PKC α activity was measured by separating particulate and soluble fractions, performing Western blots and recording the particulate to soluble ratio. Basal Cai was 143±29 nM in proximal tubules from control rats while it was 160±30 nM in those given fructose, not significantly different. Ang II (1 nM) increased Cai by 43±10 nM in control tubules and by 148±53 nM in tubules from rats fed fructose (p < 0.03). A higher concentration of Ang II (100 nM) had a similar effect in tubules from both groups (Fructose: 237±100 nM vs. Control: 190±34 nM). Ang II increased the particulate to soluble ratio of PKC α, a measure of activation, by 0.134±0.026 in tubules from rats fed fructose (p <0.001) but not significantly in control tubules (0.060±0.061). Finally we measured NHE activity. Ang II (1 pM) increased NHE activity by 0.7±0.1 fluorescent units/s in tubules from rats given fructose but had no effect on NHE activity in control tubules (p<0.01). With Go6976, a PKC α/β1 inhibitor, Ang II was unable to stimulate NHE activity in tubules from rats fed fructose. We conclude that dietary fructose increases the ability of Ang II to elevate Cai, and consequently PKCα. This, in turn, stimulates NHE activity which likely contributes to fructose-induced salt-sensitivity of blood pressure. |
/**
* Task synchronize list.
*
* @author Dmitriy Bobrov ([email protected])
*/
public class SyncListLinkTest {
/**
* Test add.
*/
@Test
public void whenAddValueToLinkedConatainer() {
SyncListLink<Integer> linkedList = new SyncListLink<>();
for (int i = 0; i < 6; i++) {
linkedList.add(i);
}
Integer result = linkedList.get(4);
Integer expected = 4;
assertThat(result, is(expected));
}
/**
* Test Iterator.
*/
@Test
public void testIteratorInRealCondition() {
Integer result = null;
SyncListLink<Integer> linkedList = new SyncListLink<>();
for (int i = 0; i < 6; i++) {
linkedList.add(i);
}
while (linkedList.iterator().hasNext()) {
result = linkedList.iterator().next();
}
Integer expected = 5;
assertThat(result, is(expected));
}
} |
def generator2toolset(cls, generator):
if not platform._is_win:
raise NotImplementedError("generator2toolset only available on Windows")
if not generator.startswith("Visual Studio"):
raise ValueError("Toolsets only available for Visual Studio generators.")
if generator.endswith(('Win64', 'ARM', 'IA64')):
generator = (generator.rsplit(' ', 1)[0]).strip()
vs_generator_map = {'Visual Studio 16 2019': 'v142',
'Visual Studio 15 2017': 'v141',
'Visual Studio 14 2015': 'v140',
'Visual Studio 12 2013': 'v120',
'Visual Studio 11 2012': 'v110',
'Visual Studio 10 2010': 'v100',
'Visual Studio 9 2008': 'v90'}
out = vs_generator_map.get(generator, None)
if out is None:
raise ValueError("Failed to locate toolset for generator: %s" % generator)
return out |
import { IVersionMetaData } from './IVersionMetaData';
import { ITagInfo } from './ITagInfo';
export interface IProgram {
id: number;
name: string;
path: string;
langs: string[];
tagInfos: ITagInfo[];
versions: IVersionMetaData[];
}
|
Analysis of the phosphatidylinositol 3'-kinase signaling pathway in glioblastoma patients in vivo.
Deregulated signaling through the phosphatidylinositol 3'-kinase (PI3K) pathway is common in many types of cancer, including glioblastoma. Dissecting the molecular events associated with activation of this pathway in glioblastoma patients in vivo presents an important challenge that has implications for the development and clinical testing of PI3K pathway inhibitors. Using an immunohistochemical analysis applied to a tissue microarray, we performed hierarchical clustering and multidimensional scaling, as well as univariate and multivariate analyses, to dissect the PI3K pathway in vivo. We demonstrate that loss of the tumor suppressor protein PTEN, which antagonizes PI3K pathway activation, is highly correlated with activation of the main PI3K effector Akt in vivo. We also show that Akt activation is significantly correlated with phosphorylation of mammalian target of rapamycin (mTOR), the family of forkhead transcription factors (FOXO1, FOXO3a, and FOXO4), and S6, which are thought to promote its effects. Expression of the mutant epidermal growth factor receptor vIII is also tightly correlated with phosphorylation of these effectors, demonstrating an additional route to PI3K pathway activation in glioblastomas in vivo. These results provide the first dissection of the PI3K pathway in glioblastoma in vivo and suggest an approach to stratifying patients for targeted kinase inhibitor therapy. |
public class Program {
public static boolean returnTrue() {
return true;
}
}
|
/**
* Class for server analysis failed exception.
*
* @author Murat Artim
* @date 7 Apr 2017
* @time 16:04:44
*
*/
public class ServerAnalysisFailedException extends Exception {
/** Serial ID. */
private static final long serialVersionUID = 1L;
/** Server message. */
private final AnalysisFailed serverMessage_;
/**
* Creates server analysis failed exception.
*
* @param serverMessage
* Server message.
*/
public ServerAnalysisFailedException(AnalysisFailed serverMessage) {
serverMessage_ = serverMessage;
}
/**
* Returns the thrown exception message of the analysis from the server.
*
* @return The thrown exception message of the analysis from the server.
*/
public String getServerExceptionMessage() {
return serverMessage_.getExceptionMessage();
}
/**
* Returns analysis output file download URL or <code>null</code> if no output file was uploaded or produced.
*
* @return Analysis output file download URL or <code>null</code> if no output file was uploaded or produced.
*/
public String getDownloadUrl() {
return serverMessage_.getDownloadUrl();
}
/**
* Returns the listener hash code.
*
* @return Listener hash code.
*/
public int getListenerHashCode() {
return serverMessage_.getListenerHashCode();
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.