content
stringlengths 10
4.9M
|
---|
/**
* @file
* Bind exo links.
*/
(function ($, Drupal) {
/**
* Attaches the Ajax behavior to each Ajax form element.
*
* @type {Drupal~behavior}
*
* @prop {Drupal~behaviorAttach} attach
* Initialize all {@link Drupal.Ajax} objects declared in
* `drupalSettings.ajax` or initialize {@link Drupal.Ajax} objects from
* DOM elements having the `use-ajax-submit` or `use-ajax` css class.
* @prop {Drupal~behaviorDetach} detach
* During `unload` remove all {@link Drupal.Ajax} objects related to
* the removed content.
*/
Drupal.behaviors.exoAjax = {
attach: function (context, settings) {
// Bind Ajax behaviors to all items showing the class.
$('.exo-ajax').once('ajax').each(function () {
const element_settings = {} as any;
// Clicked links look better with the throbber than the progress bar.
element_settings.progress = {type: 'fullscreen'};
// For anchor tags, these will go to the target of the anchor rather
// than the usual location.
var href = $(this).attr('href');
if (href) {
element_settings.url = href;
element_settings.event = 'click';
}
element_settings.dialogType = $(this).data('dialog-type');
element_settings.dialog = $(this).data('dialog-options');
element_settings.base = $(this).attr('id');
element_settings.element = this;
Drupal.ajax(element_settings);
});
}
};
}(jQuery, Drupal));
|
n = int(input())
d = []
for i in range(n):
t = input().split(" ")
t = [float(j) for j in t]
d.append(t)
dsum = 0
for i in range(n):
for j in list(range(i+1, n)):
dtmp = ((d[i][0] - d[j][0]) ** 2) + ((d[i][1] - d[j][1]) ** 2)
dsum = dsum + dtmp ** 0.5
res = dsum / (n / 2)
print(res) |
Design and Test of Portable Potentiostat Detecting for Pesticide Residue in the Atmosphere of Greenhouse
In order to rapidly measure the residue of pesticide reside in the atmosphere of greenhouse, this paper proposes a scheme about the portable Potentiostat based on C8051F020 chip. It could magnify weak current signal to voltage signal which could identified. To test the performance of the instrument, a testing was carried out using a circuit model which is equivalent to the three electrode electrochemical sensor. The experimental result shows that the instrument has good accuracy. |
class VcfVariant:
"""A variant in a VCF file (not to be confused with core.Variant)"""
__slots__ = ('position', 'reference_allele', 'alternative_allele')
def __init__(self, position, reference_allele, alternative_allele):
"""
position -- 0-based start coordinate
reference_allele -- string
alternative_allele -- string
Multi-ALT sites are not modelled.
"""
self.position = position
self.reference_allele = reference_allele
self.alternative_allele = alternative_allele
def __repr__(self):
return "VcfVariant({}, {!r}, {!r})".format(self.position,
self.reference_allele, self.alternative_allele)
def __hash__(self):
return hash((self.position, self.reference_allele, self.alternative_allele))
def __eq__(self, other):
return (self.position == other.position) and \
(self.reference_allele == other.reference_allele) and \
(self.alternative_allele == other.alternative_allele)
def __lt__(self, other):
return (self.position, self.reference_allele, self.alternative_allele) < (other.position, other.reference_allele, other.alternative_allele)
def is_snv(self):
return (self.reference_allele != self.alternative_allele) and (
len(self.reference_allele) == len(self.alternative_allele) == 1)
def normalized(self):
"""
Return a normalized version of this variant.
Common prefixes and/or suffixes between the reference and alternative allele are removed,
and the position is adjusted as necessary.
>>> VcfVariant(100, 'GCTGTT', 'GCTAAATT').normalized()
VcfVariant(103, 'G', 'AAA')
"""
pos, ref, alt = self.position, self.reference_allele, self.alternative_allele
while len(ref) >= 1 and len(alt) >= 1 and ref[-1] == alt[-1]:
ref, alt = ref[:-1], alt[:-1]
while len(ref) >= 1 and len(alt) >= 1 and ref[0] == alt[0]:
ref, alt = ref[1:], alt[1:]
pos += 1
return VcfVariant(pos, ref, alt) |
import numpy as np
from PuzzleLib.Backend import gpuarray
from PuzzleLib.Backend.Kernels import Pool
from PuzzleLib.Modules.Module import ModuleError, Module
from PuzzleLib.Modules.MaxPool2D import MaxPool2D
class MaxUnpool2D(Module):
def __init__(self, maxpool2d, name=None):
super().__init__(name)
self.registerBlueprint(locals(), exclude=["maxpool2d"])
self.maxpool2d = maxpool2d
self.maxpool2d.withMask = True
def updateData(self, data):
self.data = Pool.maxunpool2d(data, self.maxpool2d.inData.shape, self.maxpool2d.mask)
def updateGrad(self, grad):
self.grad = Pool.maxunpool2dBackward(grad, self.maxpool2d.data.shape, self.maxpool2d.mask)
def dataShapeFrom(self, shape):
batchsize, maps, inh, inw = shape
hsize, wsize = self.maxpool2d.size
padh, padw = self.maxpool2d.pad
hstride, wstride = self.maxpool2d.stride
outh = (inh - 1) * hstride - 2 * padh + hsize
outw = (inw - 1) * wstride - 2 * padw + wsize
return batchsize, maps, outh, outw
def checkDataShape(self, shape):
if shape != self.maxpool2d.mask.shape:
raise ModuleError("Data shape (current %s) must be equal to connected MaxPool2D mask shape (%s)" %
(shape, self.maxpool2d.mask.shape))
def gradShapeFrom(self, shape):
batchsize, maps, outh, outw = shape
hsize, wsize = self.maxpool2d.size
padh, padw = self.maxpool2d.pad
hstride, wstride = self.maxpool2d.stride
inh = (outh + 2 * padh - hsize) // hstride + 1
inw = (outw + 2 * padw - wsize) // wstride + 1
return batchsize, maps, inh, inw
def checkGradShape(self, shape):
if shape != self.maxpool2d.inData.shape:
raise ModuleError("Grad shape (current %s) must be equal to connected MaxPool2D data shape (%s)" %
(shape, self.maxpool2d.inData.shape))
def unittest():
batchsize, maps, h, w = 15, 3, 4, 5
indata = gpuarray.to_gpu(np.random.randn(batchsize, maps, h, w).astype(np.float32))
maxpool2d = MaxPool2D()
maxunpool2d = MaxUnpool2D(maxpool2d)
maxpool2d(indata)
data = gpuarray.to_gpu(np.random.randn(*maxpool2d.data.shape).astype(np.float32))
maxunpool2d(data)
hostPoolData = data.get()
hostMask = maxpool2d.mask.get()
hostOutData = np.zeros(maxpool2d.inData.shape, dtype=np.float32)
for b in range(batchsize):
for c in range(maps):
for y in range(maxpool2d.data.shape[2]):
for x in range(maxpool2d.data.shape[3]):
maxidx = hostMask[b, c, y, x]
hostOutData[b, c].ravel()[maxidx] = hostPoolData[b, c, y, x]
assert np.allclose(hostOutData, maxunpool2d.data.get())
grad = gpuarray.to_gpu(np.random.randn(*maxunpool2d.data.shape).astype(np.float32))
maxunpool2d.backward(grad)
hostGrad = grad.get()
hostInGrad = np.empty(maxunpool2d.grad.shape, dtype=np.float32)
for b in range(batchsize):
for c in range(maps):
for y in range(maxpool2d.data.shape[2]):
for x in range(maxpool2d.data.shape[3]):
maxidx = hostMask[b, c, y, x]
hostInGrad[b, c, y, x] = hostGrad[b, c].ravel()[maxidx]
assert np.allclose(hostInGrad, maxunpool2d.grad.get())
if __name__ == "__main__":
unittest()
|
/// Run the build-spec command
pub fn run<G, E>(self, config: Configuration<G, E>) -> error::Result<()>
where
G: RuntimeGenesis,
E: ChainSpecExtension,
{
info!("Building chain spec");
let mut spec = config.expect_chain_spec().clone();
let raw_output = self.raw;
if spec.boot_nodes().is_empty() && !self.disable_default_bootnode {
let keys = config.network.node_key.into_keypair()?;
let peer_id = keys.public().into_peer_id();
let addr = build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(30333u16), P2p(peer_id)];
spec.add_boot_node(addr)
}
let json = sc_service::chain_ops::build_spec(spec, raw_output)?;
print!("{}", json);
Ok(())
} |
def update_state(seleted_event, lattice_state):
if seleted_event == "attach":
pos = numpy.nonzero(lattice_state == 0)[0]
index = numpy.random.choice(pos)
lattice_state[index] += 1
elif seleted_event == "detach":
pos = numpy.nonzero(lattice_state[:-1] == 1)[0]
index = numpy.random.choice(pos)
lattice_state[index] -= 1
elif seleted_event == "detach_end": end
lattice_state[-1] -= 1
elif seleted_event == "forward_hop":
pos = numpy.nonzero((lattice_state[1:] - lattice_state[:-1]) == -1)[0]
index = numpy.random.choice(pos)
lattice_state[index] -= 1
lattice_state[index + 1] += 1
return lattice_state |
/**
* The Shiro framework's default concrete implementation of the {@link SecurityManager} interface,
* based around a collection of {@link org.apache.shiro.realm.Realm}s. This implementation delegates its
* authentication, authorization, and session operations to wrapped {@link Authenticator}, {@link Authorizer}, and
* {@link org.apache.shiro.session.mgt.SessionManager SessionManager} instances respectively via superclass
* implementation.
* <p/>
* To greatly reduce and simplify configuration, this implementation (and its superclasses) will
* create suitable defaults for all of its required dependencies, <em>except</em> the required one or more
* {@link Realm Realm}s. Because {@code Realm} implementations usually interact with an application's data model,
* they are almost always application specific; you will want to specify at least one custom
* {@code Realm} implementation that 'knows' about your application's data/security model
* (via {@link #setRealm} or one of the overloaded constructors). All other attributes in this class hierarchy
* will have suitable defaults for most enterprise applications.
* <p/>
* <b>RememberMe notice</b>: This class supports the ability to configure a
* {@link #setRememberMeManager RememberMeManager}
* for {@code RememberMe} identity services for login/logout, BUT, a default instance <em>will not</em> be created
* for this attribute at startup.
* <p/>
* Because RememberMe services are inherently client tier-specific and
* therefore aplication-dependent, if you want {@code RememberMe} services enabled, you will have to specify an
* instance yourself via the {@link #setRememberMeManager(RememberMeManager) setRememberMeManager}
* mutator. However if you're reading this JavaDoc with the
* expectation of operating in a Web environment, take a look at the
* {@code org.apache.shiro.web.DefaultWebSecurityManager} implementation, which
* <em>does</em> support {@code RememberMe} services by default at startup.
*
* @since 0.2
*/
public class DefaultSecurityManager extends SessionsSecurityManager {
private static final Logger log = LoggerFactory.getLogger(DefaultSecurityManager.class);
protected RememberMeManager rememberMeManager;
protected SubjectDAO subjectDAO;
protected SubjectFactory subjectFactory;
/**
* Default no-arg constructor.
*/
public DefaultSecurityManager() {
super();
this.subjectFactory = new DefaultSubjectFactory();
this.subjectDAO = new DefaultSubjectDAO();
}
/**
* Supporting constructor for a single-realm application.
*
* @param singleRealm the single realm used by this SecurityManager.
*/
public DefaultSecurityManager(Realm singleRealm) {
this();
setRealm(singleRealm);
}
/**
* Supporting constructor for multiple {@link #setRealms realms}.
*
* @param realms the realm instances backing this SecurityManager.
*/
public DefaultSecurityManager(Collection<Realm> realms) {
this();
setRealms(realms);
}
/**
* Returns the {@code SubjectFactory} responsible for creating {@link Subject} instances exposed to the application.
*
* @return the {@code SubjectFactory} responsible for creating {@link Subject} instances exposed to the application.
*/
public SubjectFactory getSubjectFactory() {
return subjectFactory;
}
/**
* Sets the {@code SubjectFactory} responsible for creating {@link Subject} instances exposed to the application.
*
* @param subjectFactory the {@code SubjectFactory} responsible for creating {@link Subject} instances exposed to the application.
*/
public void setSubjectFactory(SubjectFactory subjectFactory) {
this.subjectFactory = subjectFactory;
}
/**
* Returns the {@code SubjectDAO} responsible for persisting Subject state, typically used after login or when an
* Subject identity is discovered (eg after RememberMe services). Unless configured otherwise, the default
* implementation is a {@link DefaultSubjectDAO}.
*
* @return the {@code SubjectDAO} responsible for persisting Subject state, typically used after login or when an
* Subject identity is discovered (eg after RememberMe services).
* @see DefaultSubjectDAO
* @since 1.2
*/
public SubjectDAO getSubjectDAO() {
return subjectDAO;
}
/**
* Sets the {@code SubjectDAO} responsible for persisting Subject state, typically used after login or when an
* Subject identity is discovered (eg after RememberMe services). Unless configured otherwise, the default
* implementation is a {@link DefaultSubjectDAO}.
*
* @param subjectDAO the {@code SubjectDAO} responsible for persisting Subject state, typically used after login or when an
* Subject identity is discovered (eg after RememberMe services).
* @see DefaultSubjectDAO
* @since 1.2
*/
public void setSubjectDAO(SubjectDAO subjectDAO) {
this.subjectDAO = subjectDAO;
}
public RememberMeManager getRememberMeManager() {
return rememberMeManager;
}
public void setRememberMeManager(RememberMeManager rememberMeManager) {
this.rememberMeManager = rememberMeManager;
}
protected SubjectContext createSubjectContext() {
return new DefaultSubjectContext();
}
/**
* Creates a {@code Subject} instance for the user represented by the given method arguments.
*
* @param token the {@code AuthenticationToken} submitted for the successful authentication.
* @param info the {@code AuthenticationInfo} of a newly authenticated user.
* @param existing the existing {@code Subject} instance that initiated the authentication attempt
* @return the {@code Subject} instance that represents the context and session data for the newly
* authenticated subject.
* Subject n. 主题;起因;科目;主词;(绘画、摄影等的)题材;实验对象;主语;国民;主旋律;主体;中心
*/
protected Subject createSubject(AuthenticationToken token, AuthenticationInfo info, Subject existing) {
SubjectContext context = createSubjectContext();
context.setAuthenticated(true);
context.setAuthenticationToken(token);
context.setAuthenticationInfo(info);
context.setSecurityManager(this);
if (existing != null) {
context.setSubject(existing);
}
// 此处创建Subject
return createSubject(context);
}
/**
* Binds a {@code Subject} instance created after authentication to the application for later use.
* <p/>
* As of Shiro 1.2, this method has been deprecated in favor of {@link #save(org.apache.shiro.subject.Subject)},
* which this implementation now calls.
*
* @param subject the {@code Subject} instance created after authentication to be bound to the application
* for later use.
* @see #save(org.apache.shiro.subject.Subject)
* @deprecated in favor of {@link #save(org.apache.shiro.subject.Subject) save(subject)}.
*/
@Deprecated
protected void bind(Subject subject) {
save(subject);
}
protected void rememberMeSuccessfulLogin(AuthenticationToken token, AuthenticationInfo info, Subject subject) {
RememberMeManager rmm = getRememberMeManager();
if (rmm != null) {
try {
rmm.onSuccessfulLogin(subject, token, info);
} catch (Exception e) {
if (log.isWarnEnabled()) {
String msg = "Delegate RememberMeManager instance of type [" + rmm.getClass().getName() +
"] threw an exception during onSuccessfulLogin. RememberMe services will not be " +
"performed for account [" + info + "].";
log.warn(msg, e);
}
}
} else {
if (log.isTraceEnabled()) {
log.trace("This " + getClass().getName() + " instance does not have a " +
"[" + RememberMeManager.class.getName() + "] instance configured. RememberMe services " +
"will not be performed for account [" + info + "].");
}
}
}
protected void rememberMeFailedLogin(AuthenticationToken token, AuthenticationException ex, Subject subject) {
RememberMeManager rmm = getRememberMeManager();
if (rmm != null) {
try {
rmm.onFailedLogin(subject, token, ex);
} catch (Exception e) {
if (log.isWarnEnabled()) {
String msg = "Delegate RememberMeManager instance of type [" + rmm.getClass().getName() +
"] threw an exception during onFailedLogin for AuthenticationToken [" +
token + "].";
log.warn(msg, e);
}
}
}
}
protected void rememberMeLogout(Subject subject) {
RememberMeManager rmm = getRememberMeManager();
if (rmm != null) {
try {
rmm.onLogout(subject);
} catch (Exception e) {
if (log.isWarnEnabled()) {
String msg = "Delegate RememberMeManager instance of type [" + rmm.getClass().getName() +
"] threw an exception during onLogout for subject with principals [" +
(subject != null ? subject.getPrincipals() : null) + "]";
log.warn(msg, e);
}
}
}
}
/**
* First authenticates the {@code AuthenticationToken} argument, and if successful, constructs a
* {@code Subject} instance representing the authenticated account's identity.
* <p/>
* Once constructed, the {@code Subject} instance is then {@link #bind bound} to the application for
* subsequent access before being returned to the caller.
*
* @param token the authenticationToken to process for the login attempt.
* @return a Subject representing the authenticated user.
* @throws AuthenticationException if there is a problem authenticating the specified {@code token}.
*/
public Subject login(Subject subject, AuthenticationToken token) throws AuthenticationException {
AuthenticationInfo info;
try {
// 这里才开始真正的认证操作, 在这个方法中定义了AuthenticationInfo对象用来接收从Realm传来的认证信息
info = authenticate(token);
} catch (AuthenticationException ae) {
try {
onFailedLogin(token, ae, subject);
} catch (Exception e) {
if (log.isInfoEnabled()) {
log.info("onFailedLogin method threw an " +
"exception. Logging and propagating original AuthenticationException.", e);
}
}
throw ae; //propagate
}
// Subject n. 主题;起因;科目;主词;(绘画、摄影等的)题材;实验对象;主语;国民;主旋律;主体;中心
// 登录用户验证成功之后进行session处理
Subject loggedIn = createSubject(token, info, subject);
onSuccessfulLogin(token, info, loggedIn);
return loggedIn;
}
protected void onSuccessfulLogin(AuthenticationToken token, AuthenticationInfo info, Subject subject) {
rememberMeSuccessfulLogin(token, info, subject);
}
protected void onFailedLogin(AuthenticationToken token, AuthenticationException ae, Subject subject) {
rememberMeFailedLogin(token, ae, subject);
}
protected void beforeLogout(Subject subject) {
rememberMeLogout(subject);
}
protected SubjectContext copy(SubjectContext subjectContext) {
return new DefaultSubjectContext(subjectContext);
}
/**
* This implementation functions as follows:
* <p/>
* <ol>
* <li>Ensures the {@code SubjectContext} is as populated as it can be, using heuristics to acquire
* data that may not have already been available to it (such as a referenced session or remembered principals).</li>
* <li>Calls {@link #doCreateSubject(org.apache.shiro.subject.SubjectContext)} to actually perform the
* {@code Subject} instance creation.</li>
* <li>calls {@link #save(org.apache.shiro.subject.Subject) save(subject)} to ensure the constructed
* {@code Subject}'s state is accessible for future requests/invocations if necessary.</li>
* <li>returns the constructed {@code Subject} instance.</li>
* </ol>
*
* @param subjectContext any data needed to direct how the Subject should be constructed.
* @return the {@code Subject} instance reflecting the specified contextual data.
* @see #ensureSecurityManager(org.apache.shiro.subject.SubjectContext)
* @see #resolveSession(org.apache.shiro.subject.SubjectContext)
* @see #resolvePrincipals(org.apache.shiro.subject.SubjectContext)
* @see #doCreateSubject(org.apache.shiro.subject.SubjectContext)
* @see #save(org.apache.shiro.subject.Subject)
* @since 1.0
*/
public Subject createSubject(SubjectContext subjectContext) {
//create a copy so we don't modify the argument's backing map:
SubjectContext context = copy(subjectContext);
//ensure that the context has a SecurityManager instance, and if not, add one:
// ensure [ɪnˈʃʊr] vt. 保证,确保;使安全
context = ensureSecurityManager(context);
//associated [əˈsəʊsieɪtɪd] adj. 关联的;联合的 v. 联系(associate的过去式和过去分词)
//referenced [ˈrefrənst] adj. 引用的,参考的 v. 引用(reference的过去分词);附…以供参考
//Resolve an associated Session (usually based on a referenced session ID), and place it in the context before
//sending to the SubjectFactory. The SubjectFactory should not need to know how to acquire sessions as the
//process is often environment specific - better to shield the SF from these details:
//resolve [rɪˈzɑːlv] vt. 决定;溶解;使……分解;决心要做……;[主化]解析 vi. 解决;决心;分解 n. 坚决;决定要做的事
context = resolveSession(context);
//Similarly, the SubjectFactory should not require any concept of RememberMe - translate that here first
//if possible before handing off to the SubjectFactory:
context = resolvePrincipals(context);
// 真正创建subject的方法
Subject subject = doCreateSubject(context);
//reference [ˈrefrəns] n. 参考,参照;涉及,提及;参考书目;介绍信;证明书 vi. 引用 vt. 引用
//necessary [ˈnesəsəri] adj. 必要的;必需的;必然的 n. 必需品
//resolved [rɪˈzɒlvd] adj. 下定决心的,坚决的 v. 解决;决定;(立法机构等)表决;分解;使(病症)消退;使(不协和音)转向协和音;(当远处物体看得更清楚时)变成;(光学仪器等)分辨(resolve 的过去式和过去分词)
//stored [stɔːrd] v. 储存;容纳
//save this subject for future reference if necessary:
//(this is needed here in case rememberMe principals were resolved and they need to be stored in the
//session, so we don't constantly rehydrate the rememberMe PrincipalCollection on every operation).
//Added in 1.2:
// 存储session
save(subject);
return subject;
}
/**
* Actually creates a {@code Subject} instance by delegating to the internal
* {@link #getSubjectFactory() subjectFactory}. By the time this method is invoked, all possible
* {@code SubjectContext} data (session, principals, et. al.) has been made accessible using all known heuristics
* and will be accessible to the {@code subjectFactory} via the {@code subjectContext.resolve*} methods.
*
* @param context the populated context (data map) to be used by the {@code SubjectFactory} when creating a
* {@code Subject} instance.
* @return a {@code Subject} instance reflecting the data in the specified {@code SubjectContext} data map.
* @see #getSubjectFactory()
* @see SubjectFactory#createSubject(org.apache.shiro.subject.SubjectContext)
* @since 1.2
*/
protected Subject doCreateSubject(SubjectContext context) {
return getSubjectFactory().createSubject(context);
}
/**
* Saves the subject's state to a persistent location for future reference if necessary.
* <p/>
* This implementation merely delegates to the internal {@link #setSubjectDAO(SubjectDAO) subjectDAO} and calls
* {@link SubjectDAO#save(org.apache.shiro.subject.Subject) subjectDAO.save(subject)}.
*
* @param subject the subject for which state will potentially be persisted
* @see SubjectDAO#save(org.apache.shiro.subject.Subject)
* @since 1.2
*/
protected void save(Subject subject) {
this.subjectDAO.save(subject);
}
/**
* Removes (or 'unbinds') the Subject's state from the application, typically called during {@link #logout}..
* <p/>
* This implementation merely delegates to the internal {@link #setSubjectDAO(SubjectDAO) subjectDAO} and calls
* {@link SubjectDAO#delete(org.apache.shiro.subject.Subject) delete(subject)}.
*
* @param subject the subject for which state will be removed
* @see SubjectDAO#delete(org.apache.shiro.subject.Subject)
* @since 1.2
*/
protected void delete(Subject subject) {
this.subjectDAO.delete(subject);
}
/**
* Determines if there is a {@code SecurityManager} instance in the context, and if not, adds 'this' to the
* context. This ensures the SubjectFactory instance will have access to a SecurityManager during Subject
* construction if necessary.
*
* @param context the subject context data that may contain a SecurityManager instance.
* @return The SubjectContext to use to pass to a {@link SubjectFactory} for subject creation.
* @since 1.0
*/
@SuppressWarnings({"unchecked"})
protected SubjectContext ensureSecurityManager(SubjectContext context) {
if (context.resolveSecurityManager() != null) {
log.trace("Context already contains a SecurityManager instance. Returning.");
return context;
}
log.trace("No SecurityManager found in context. Adding self reference.");
context.setSecurityManager(this);
return context;
}
/**
* Attempts to resolve any associated session based on the context and returns a
* context that represents this resolved {@code Session} to ensure it may be referenced if necessary by the
* invoked {@link SubjectFactory} that performs actual {@link Subject} construction.
* <p/>
* If there is a {@code Session} already in the context because that is what the caller wants to be used for
* {@code Subject} construction, or if no session is resolved, this method effectively does nothing
* returns the context method argument unaltered.
*
* @param context the subject context data that may resolve a Session instance.
* @return The context to use to pass to a {@link SubjectFactory} for subject creation.
* @since 1.0
*/
@SuppressWarnings({"unchecked"})
protected SubjectContext resolveSession(SubjectContext context) {
if (context.resolveSession() != null) {
log.debug("Context already contains a session. Returning.");
return context;
}
try {
//Context couldn't resolve it directly, let's see if we can since we have direct access to
//the session manager:
Session session = resolveContextSession(context);
if (session != null) {
context.setSession(session);
}
} catch (InvalidSessionException e) {
log.debug("Resolved SubjectContext context session is invalid. Ignoring and creating an anonymous " +
"(session-less) Subject instance.", e);
}
return context;
}
protected Session resolveContextSession(SubjectContext context) throws InvalidSessionException {
SessionKey key = getSessionKey(context);
if (key != null) {
return getSession(key);
}
return null;
}
protected SessionKey getSessionKey(SubjectContext context) {
Serializable sessionId = context.getSessionId();
if (sessionId != null) {
return new DefaultSessionKey(sessionId);
}
return null;
}
private static boolean isEmpty(PrincipalCollection pc) {
return pc == null || pc.isEmpty();
}
/**
* Attempts to resolve an identity (a {@link PrincipalCollection}) for the context using heuristics. This
* implementation functions as follows:
* <ol>
* <li>Check the context to see if it can already {@link SubjectContext#resolvePrincipals resolve an identity}. If
* so, this method does nothing and returns the method argument unaltered.</li>
* <li>Check for a RememberMe identity by calling {@link #getRememberedIdentity}. If that method returns a
* non-null value, place the remembered {@link PrincipalCollection} in the context.</li>
* </ol>
*
* @param context the subject context data that may provide (directly or indirectly through one of its values) a
* {@link PrincipalCollection} identity.
* @return The Subject context to use to pass to a {@link SubjectFactory} for subject creation.
* @since 1.0
*/
@SuppressWarnings({"unchecked"})
protected SubjectContext resolvePrincipals(SubjectContext context) {
PrincipalCollection principals = context.resolvePrincipals();
if (isEmpty(principals)) {
log.trace("No identity (PrincipalCollection) found in the context. Looking for a remembered identity.");
principals = getRememberedIdentity(context);
if (!isEmpty(principals)) {
log.debug("Found remembered PrincipalCollection. Adding to the context to be used " +
"for subject construction by the SubjectFactory.");
context.setPrincipals(principals);
// The following call was removed (commented out) in Shiro 1.2 because it uses the session as an
// implementation strategy. Session use for Shiro's own needs should be controlled in a single place
// to be more manageable for end-users: there are a number of stateless (e.g. REST) applications that
// use Shiro that need to ensure that sessions are only used when desirable. If Shiro's internal
// implementations used Subject sessions (setting attributes) whenever we wanted, it would be much
// harder for end-users to control when/where that occurs.
//
// Because of this, the SubjectDAO was created as the single point of control, and session state logic
// has been moved to the DefaultSubjectDAO implementation.
// Removed in Shiro 1.2. SHIRO-157 is still satisfied by the new DefaultSubjectDAO implementation
// introduced in 1.2
// Satisfies SHIRO-157:
// bindPrincipalsToSession(principals, context);
} else {
log.trace("No remembered identity found. Returning original context.");
}
}
return context;
}
protected SessionContext createSessionContext(SubjectContext subjectContext) {
DefaultSessionContext sessionContext = new DefaultSessionContext();
if (!CollectionUtils.isEmpty(subjectContext)) {
sessionContext.putAll(subjectContext);
}
Serializable sessionId = subjectContext.getSessionId();
if (sessionId != null) {
sessionContext.setSessionId(sessionId);
}
String host = subjectContext.resolveHost();
if (host != null) {
sessionContext.setHost(host);
}
return sessionContext;
}
public void logout(Subject subject) {
if (subject == null) {
throw new IllegalArgumentException("Subject method argument cannot be null.");
}
beforeLogout(subject);
PrincipalCollection principals = subject.getPrincipals();
if (principals != null && !principals.isEmpty()) {
if (log.isDebugEnabled()) {
log.debug("Logging out subject with primary principal {}", principals.getPrimaryPrincipal());
}
Authenticator authc = getAuthenticator();
if (authc instanceof LogoutAware) {
((LogoutAware) authc).onLogout(principals);
}
}
try {
delete(subject);
} catch (Exception e) {
if (log.isDebugEnabled()) {
String msg = "Unable to cleanly unbind Subject. Ignoring (logging out).";
log.debug(msg, e);
}
} finally {
try {
stopSession(subject);
} catch (Exception e) {
if (log.isDebugEnabled()) {
String msg = "Unable to cleanly stop Session for Subject [" + subject.getPrincipal() + "] " +
"Ignoring (logging out).";
log.debug(msg, e);
}
}
}
}
protected void stopSession(Subject subject) {
Session s = subject.getSession(false);
if (s != null) {
s.stop();
}
}
/**
* Unbinds or removes the Subject's state from the application, typically called during {@link #logout}.
* <p/>
* This has been deprecated in Shiro 1.2 in favor of the {@link #delete(org.apache.shiro.subject.Subject) delete}
* method. The implementation has been updated to invoke that method.
*
* @param subject the subject to unbind from the application as it will no longer be used.
* @deprecated in Shiro 1.2 in favor of {@link #delete(org.apache.shiro.subject.Subject)}
*/
@Deprecated
@SuppressWarnings({"UnusedDeclaration"})
protected void unbind(Subject subject) {
delete(subject);
}
protected PrincipalCollection getRememberedIdentity(SubjectContext subjectContext) {
RememberMeManager rmm = getRememberMeManager();
if (rmm != null) {
try {
return rmm.getRememberedPrincipals(subjectContext);
} catch (Exception e) {
if (log.isWarnEnabled()) {
String msg = "Delegate RememberMeManager instance of type [" + rmm.getClass().getName() +
"] threw an exception during getRememberedPrincipals().";
log.warn(msg, e);
}
}
}
return null;
}
} |
Marilyn Mosby, the Maryland state attorney for Baltimore who on Friday announced charges against six city police officers for the death of Freddie Gray, said last year that a St. Louis County grand jury’s decision not to indict Darren Wilson in the shooting death of Michael Brown “breaks [her] heart” and that a special prosecutor should have been appointed to handle the case.
The revelation comes as Mosby is being asked to recuse herself and appoint a special prosecutor to handle the case against six Baltimore police officers now charged with a range of crimes in the death of 25-year-old Freddie Gray.
Baltimore’s Fraternal Order of Police slammed Mosby’s charging announcement, calling it a “rush to judgement.” The group said that Mosby should recuse herself from the case because her husband, Nick Mosby, is a city councilman whose district encompasses the area where Gray was arrested earlier this month.
The organization fears that Nick Mosby’s reelection chances will be hurt if his wife does not throw the book at the officers.
“It is clear that your husband’s political future will be directly impacted, for better or worse, by the outcome of your investigation,” Baltimore Fraternal Order of Police president Gene Ryan said after Mosby’s charging announcement.
Protests held in Baltimore on behalf of justice for Gray gave way to riots and looting earlier this week.
Mosby is also close to Billy Murphy Jr., the attorney representing the Gray family. Murphy donated $5,000 to Mosby’s political campaign and was a member of her transition committee following her November election, the Baltimore Sun reported. The Sun did note that the Fraternal Order of Police also donated to Mosby’s campaign.
During a surprise press conference on Friday, Mosby announced charges against the officers involved in arresting and transporting Gray ranging from misconduct to involuntary manslaughter to second-degree depraved-heart murder.
Gray was arrested April 12 following an arrest which Mosby said on Friday was illegal. Mosby also said that officers failed to properly restrain Gray after placing him in a transport van. She said they also failed to provide medical care even after he asked for it. Gray suffered a broken neck and died on April 19. (RELATED: Six Baltimore Officers Charged In Freddie Gray’s Death)
Besides potential political and personal conflicts of interest, Marilyn Mosby and her husband have also spoken out publicly in other high-profile cases, including the cases of Michael Brown and Trayvon Martin, the Florida 17-year-old who was fatally shot by George Zimmerman in Feb. 2012.
After Zimmerman’s acquittal of second-degree murder in July 2013, Marilyn Mosby attended a protest rally held at the federal courthouse in Baltimore. She spoke along with pastor Jamal Bryant, who has been a prominent figure in the media during the Gray case. They were joined by activist Faraji Muhammad and spoke about young black men being targeted for violence.
Though Mosby seemingly felt that the justice system failed by not returning a guilty verdict for Zimmerman, evidence strongly suggested that he shot Martin in self-defense as the teenager was straddling him and punching him in the face and slamming his head. (RELATED: Law Professor: Baltimore Officers Were Likely Overcharged, Charges Will Be Dismissed)
Nick Mosby went further than a mere rally in response to what he saw as an unjust verdict. The city councilman called on city government officials to boycott Florida businesses.
“The marches and rallies are great, but when it relates to economics, it can drive change,” Mosby said in Aug. 2013.
Marilyn Mosby also publicly criticized Bob McCulloch, the St. Louis County prosecutor who opened a grand jury investigation on Wilson.
Wilson, who is White, shot the 18-year-old Brown numerous times following an altercation on Aug. 9. Brown had just stolen cigars from a nearby convenience store. Witnesses saw Brown punch Wilson inside of his police car. After a chase, Wilson said he shot Brown after the man turned and charged at him.
In a panel discussion in December on News One Now, Mosby called McCulloch’s handling of that case “problematic” and “questionable” and strongly implied that racial dynamics were at play.
“We have to question the motives,” she said.
And germane to the Gray case, Mosby said that McCulloch’s handling of the Wilson grand jury was the reason that officials should “bring in special prosecutors.”
Despite her apparent support for the idea of special prosecutors, Mosby has given no indication that she believes that the Gray case warrants similar oversight, despite the numerous conflicts of interest at play.
Mosby also spoke of the racial dynamics at play during the Ferguson fallout.
Of McCulloch, she said, “so you have an individual that’s been in office, and does not share your interests and your values and is making decisions about your daily life.”
She noted that Ferguson is 68 percent black, and that only six percent of that population votes.
She said that the grand jury’s decision to not indict Wilson “tears my heart apart as a mother.”
“But I’ve got to tell you that I’m glad that we’re finally having the conversation and realizing how important that role of a prosecutor is and how awesome that discretion is,” Mosby said, adding that officials like McCulloch must be held accountable and that change occurs at the voting booth.
WATCH:
Follow Chuck on Twitter |
/// Check if the given text would be a valid referent name;
pub fn validate(name: impl AsRef<str>) -> Result<(), NotReferentName> {
let name = name.as_ref();
let first_char = name.chars().next();
match first_char {
Some(c) if c.is_uppercase() => Ok(()),
_ => Err(NotReferentName(name.into())),
}
} |
<filename>clients/benchmarks/perf_script/plotPerformance.py
# ########################################################################
# Copyright 2016-2020 Advanced Micro Devices, Inc.
#
# ########################################################################
# to use this script, you will need to download and install the 32-BIT VERSION of:
# - Python 2.7 x86 (32-bit) - http://www.python.org/download/releases/2.7.1
#
# you will also need the 32-BIT VERSIONS of the following packages as not all the packages are available in 64bit at the time of this writing
# The ActiveState python distribution is recommended for windows
# (make sure to get the python 2.7-compatible packages):
# - NumPy 1.5.1 (32-bit, 64-bit unofficial, supports Python 2.4 - 2.7 and 3.1 - 3.2.) - http://sourceforge.net/projects/numpy/files/NumPy/
# - matplotlib 1.0.1 (32-bit & 64-bit, supports Python 2.4 - 2.7) - http://sourceforge.net/projects/matplotlib/files/matplotlib/
#
# For ActiveState Python, all that one should need to type is 'pypm install matplotlib'
import datetime
import sys
import argparse
import subprocess
import itertools
import os
import matplotlib.pyplot as plt
import pylab
from matplotlib.backends.backend_pdf import PdfPages
os.system( "grep NT sgemm.txt > sgemm_NT.csv" )
input = open ('sgemm_NT.csv', 'r')
x = []
y = []
shape = ''
for line in input:
line = line.replace("(", ",")
line = line.replace(")", ",")
value = line.split(',')
x.append(value[1])
y.append(value[7])
shape = value[0]
#print value
f = plt.figure()
plt.rcParams.update({'font.size':20})
plt.xlabel('M=N=K')
plt.ylabel("Gflop/s")
plt.title('rocBLAS SGEMM ' + shape)
plt.yticks()
plt.grid(True)
plt.legend( loc = 2)
plot1 = plt.plot(x, y)
f.savefig("sgemm.pdf", bbox_inches='tight')
input.close()
|
def remote_backends(compact=True):
warnings.warn(
"remote_backends() will be deprecated in upcoming versions (>0.5). "
"using filters instead is recommended (i.e. available_backends({'local': False}).",
DeprecationWarning)
return available_backends({'local': False}, compact=compact) |
// Decompiled by Jad v1.5.8e. Copyright 2001 <NAME>.
// Jad home page: http://www.geocities.com/kpdus/jad.html
// Decompiler options: packimports(3)
// Source File Name: FldSKDQueryWhereClause.java
package com.ibm.tivoli.maximo.skd.app;
import java.rmi.RemoteException;
import psdi.mbo.*;
import psdi.security.UserInfo;
import psdi.server.MXServer;
import psdi.util.*;
public class FldSKDQueryWhereClause extends MboValueAdapter
{
public FldSKDQueryWhereClause(MboValue mbv)
{
super(mbv);
}
public void validate()
throws MXException, RemoteException
{
if(getMboValue().isNull())
return;
MboValue thisValue = getMboValue();
MboRemote mbo = thisValue.getMbo();
String objectname = "";
objectname = mbo.getString("OBJECTNAME");
String where = "";
where = mbo.getString("WHERECLAUSE");
try
{
MboSetRemote msr = MXServer.getMXServer().getMboSet(objectname, mbo.getUserInfo());
msr.setWhere(where);
msr.getMbo(0);
}
catch(Exception ex)
{
Object params[] = {
where
};
if(BidiUtils.isBidiEnabled())
params[0] = BidiUtils.buildAndPush(mbo.getName(), "WHERECLAUSE", (String)params[0], mbo.getUserInfo().getLangCode());
throw new MXApplicationException("scheduler", "NotValidExpression", params);
}
}
}
|
<reponame>celiafish/VisTrails<filename>contrib/vtksnl/__init__.py<gh_stars>10-100
###########################################################################
##
## Copyright (C) 2006-2010 University of Utah. All rights reserved.
##
## This file is part of VisTrails.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following to ensure GNU General Public
## Licensing requirements will be met:
## http://www.opensource.org/licenses/gpl-license.php
##
## If you are unsure which license is appropriate for your use (for
## instance, you are interested in developing a commercial derivative
## of VisTrails), please contact us at <EMAIL>.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
################################################################################
# VTK-SNL Package for VisTrails (Sandia National Laboratories)
################################################################################
"""The Visualization ToolKit (VTK) is an open source, freely available
software system for 3D computer graphics, image processing, and
visualization used by thousands of researchers and developers around
the world. This version of the VTK package requires the Sandia
National Laboratories version of the VTK libraries.
http://www.vtk.org"""
identifier = 'edu.utah.sci.vistrails.vtksnl'
name = 'VTKSNL'
version = '0.9.1'
def package_dependencies():
import core.packagemanager
manager = core.packagemanager.get_package_manager()
if manager.has_package('edu.utah.sci.vistrails.spreadsheet'):
return ['edu.utah.sci.vistrails.spreadsheet']
else:
return []
def package_requirements():
import core.requirements
if not core.requirements.python_module_exists('vtksnl'):
raise core.requirements.MissingRequirement('vtksnl')
if not core.requirements.python_module_exists('PyQt4'):
print 'PyQt4 is not available. There will be no interaction',
print 'between VTK and the spreadsheet.'
import vtksnl
|
def _connectionFrameCb_connected(self,connector):
self.connector = connector
self.tableFrame.after(GUIUPDATEPERIOD,self._updateTable)
self.notifThread = NotifThread(
self.connector,
self._connectionFrameCb_disconnected
)
self.snapshotThread = SnapshotThread(
self.connector,
) |
/**
* Stub object for {@code ActivatedJob}. It contains setters for all getters defined in the
* interface.
*
* <p>Plus it contains additional getters that are only available once a job has been worked on.
*
* <p>{@code getStatus()}
*
* <ul>
* <li>{@code ACTIVATED} - initial state. Either the job has not been worked on yet, or it has
* been worked on, but no result was submitted via {@code JobClient}
* <li>{@code COMPLETED} - job was completed. This unlocks:
* <ul>
* <li>{@code getOutputVariables()} - the variables set on completion of the job
* </ul>
* <li>{@code FAILED} - job failed. This unlocks:
* <ul>
* <li>{@code getErrorMessage()} - error message of the job
* <li>{@code getRemainingRetries()} - remaining retries for the job
* </ul>
* <li>{@code ERROR_THROWN} - an error was thrown. This unlocks
* <ul>
* <li>{@code getErrorMessage()}
* <li>{@code getErrorCode()}
* </ul>
* </ul>
*/
public class ActivatedJobStub implements ActivatedJob {
private static final ZeebeObjectMapper JSON_MAPPER = new ZeebeObjectMapper();
// variables available at the outset
private Status status = Status.ACTIVATED;
private Map<String, Object> inputVariables = new HashMap<>();
private final long key;
private String type = "jobType";
private long processInstanceKey = 0;
private String bpmnProcessId = "bpmnProcessId";
private int processDefinitionVersion = 0;
private int processDefinitionKey = 0;
private String elementId = "serviceTask1";
private long elementInstanceKey = 0;
private Map<String, String> customHeaders = new HashMap<>();
private String worker = "worker";
private int retries = 1;
private long deadline = 10_000L;
// variables available after working on a job
private Map<String, Object> outputVariables = new HashMap<>();
private String errorMessage;
private int remainingRetries;
private String errorCode;
public ActivatedJobStub(final long key) {
this.key = key;
}
@Override
public long getKey() {
return key;
}
@Override
public String getType() {
return type;
}
public void setType(final String type) {
this.type = type;
}
@Override
public long getProcessInstanceKey() {
return processInstanceKey;
}
public void setProcessInstanceKey(final long processInstanceKey) {
this.processInstanceKey = processInstanceKey;
}
@Override
public String getBpmnProcessId() {
return bpmnProcessId;
}
public void setBpmnProcessId(final String bpmnProcessId) {
this.bpmnProcessId = bpmnProcessId;
}
@Override
public int getProcessDefinitionVersion() {
return processDefinitionVersion;
}
public void setProcessDefinitionVersion(final int processDefinitionVersion) {
this.processDefinitionVersion = processDefinitionVersion;
}
@Override
public long getProcessDefinitionKey() {
return processDefinitionKey;
}
public void setProcessDefinitionKey(final int processDefinitionKey) {
this.processDefinitionKey = processDefinitionKey;
}
@Override
public String getElementId() {
return elementId;
}
public void setElementId(final String elementId) {
this.elementId = elementId;
}
@Override
public long getElementInstanceKey() {
return elementInstanceKey;
}
public void setElementInstanceKey(final long elementInstanceKey) {
this.elementInstanceKey = elementInstanceKey;
}
@Override
public Map<String, String> getCustomHeaders() {
return customHeaders;
}
public void setCustomHeaders(final Map<String, String> customHeaders) {
this.customHeaders = customHeaders;
}
@Override
public String getWorker() {
return worker;
}
public void setWorker(final String worker) {
this.worker = worker;
}
@Override
public int getRetries() {
return retries;
}
public void setRetries(final int retries) {
this.retries = retries;
}
@Override
public long getDeadline() {
return deadline;
}
public void setDeadline(final long deadline) {
this.deadline = deadline;
}
@Override
public String getVariables() {
return JSON_MAPPER.toJson(inputVariables);
}
@Override
public Map<String, Object> getVariablesAsMap() {
return inputVariables;
}
@Override
public <T> T getVariablesAsType(final Class<T> variableType) {
return JSON_MAPPER.fromJson(getVariables(), variableType);
}
@Override
public String toJson() {
return JSON_MAPPER.toJson(this);
}
@Override
public String toString() {
return toJson();
}
public Status getStatus() {
return status;
}
public boolean isCompleted() {
return status == Status.COMPLETED;
}
protected void setCompleted() {
status = Status.COMPLETED;
}
public boolean isFailed() {
return status == Status.FAILED;
}
protected void setFailed() {
status = Status.FAILED;
}
public boolean hasThrownError() {
return status == Status.ERROR_THROWN;
}
protected void setErrorThrown() {
status = Status.ERROR_THROWN;
}
public Map<String, Object> getInputVariables() {
return getVariablesAsMap();
}
public void setInputVariables(final Map<String, Object> variables) {
inputVariables = variables;
}
public Map<String, Object> getOutputVariables() {
return outputVariables;
}
protected void setOutputVariables(final Map<String, Object> variables) {
outputVariables = variables;
}
public String getErrorMessage() {
return errorMessage;
}
public void setErrorMessage(final String errorMessage) {
this.errorMessage = errorMessage;
}
public int getRemainingRetries() {
return remainingRetries;
}
public void setRemainingRetries(final int remainingRetries) {
this.remainingRetries = remainingRetries;
}
public String getErrorCode() {
return errorCode;
}
protected void setErrorCode(final String errorCode) {
this.errorCode = errorCode;
}
public enum Status {
ACTIVATED,
COMPLETED,
FAILED,
ERROR_THROWN
}
} |
/**
* Utilities for storing more complex collection types in
* {@link org.apache.hadoop.conf.Configuration} instances.
*/
@InterfaceAudience.Public
public final class ConfigurationUtil {
// TODO: hopefully this is a good delimiter; it's not in the base64 alphabet,
// nor is it valid for paths
public static final char KVP_DELIMITER = '^';
// Disallow instantiation
private ConfigurationUtil() {
}
/**
* Store a collection of Map.Entry's in conf, with each entry separated by ',' and key values
* delimited by {@link #KVP_DELIMITER}
* @param conf configuration to store the collection in
* @param key overall key to store keyValues under
* @param keyValues kvps to be stored under key in conf
*/
public static void setKeyValues(Configuration conf, String key,
Collection<Map.Entry<String, String>> keyValues) {
setKeyValues(conf, key, keyValues, KVP_DELIMITER);
}
/**
* Store a collection of Map.Entry's in conf, with each entry separated by ',' and key values
* delimited by delimiter.
* @param conf configuration to store the collection in
* @param key overall key to store keyValues under
* @param keyValues kvps to be stored under key in conf
* @param delimiter character used to separate each kvp
*/
public static void setKeyValues(Configuration conf, String key,
Collection<Map.Entry<String, String>> keyValues, char delimiter) {
List<String> serializedKvps = Lists.newArrayList();
for (Map.Entry<String, String> kvp : keyValues) {
serializedKvps.add(kvp.getKey() + delimiter + kvp.getValue());
}
conf.setStrings(key, serializedKvps.toArray(new String[serializedKvps.size()]));
}
/**
* Retrieve a list of key value pairs from configuration, stored under the provided key
* @param conf configuration to retrieve kvps from
* @param key key under which the key values are stored
* @return the list of kvps stored under key in conf, or null if the key isn't present.
* @see #setKeyValues(Configuration, String, Collection, char)
*/
public static List<Map.Entry<String, String>> getKeyValues(Configuration conf, String key) {
return getKeyValues(conf, key, KVP_DELIMITER);
}
/**
* Retrieve a list of key value pairs from configuration, stored under the provided key
* @param conf configuration to retrieve kvps from
* @param key key under which the key values are stored
* @param delimiter character used to separate each kvp
* @return the list of kvps stored under key in conf, or null if the key isn't present.
* @see #setKeyValues(Configuration, String, Collection, char)
*/
public static List<Map.Entry<String, String>> getKeyValues(Configuration conf, String key,
char delimiter) {
String[] kvps = conf.getStrings(key);
if (kvps == null) {
return null;
}
List<Map.Entry<String, String>> rtn = Lists.newArrayList();
for (String kvp : kvps) {
String[] splitKvp = StringUtils.split(kvp, delimiter);
if (splitKvp.length != 2) {
throw new IllegalArgumentException("Expected key value pair for configuration key '" + key
+ "'" + " to be of form '<key>" + delimiter + "<value>; was " + kvp + " instead");
}
rtn.add(new AbstractMap.SimpleImmutableEntry<>(splitKvp[0], splitKvp[1]));
}
return rtn;
}
} |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_HLO_TO_IR_BINDINGS_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_HLO_TO_IR_BINDINGS_H_
#include <unordered_map>
#include "external/llvm/include/llvm/IR/IRBuilder.h"
#include "external/llvm/include/llvm/IR/Value.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/llvm_ir/alias_analysis.h"
#include "tensorflow/compiler/xla/service/llvm_ir/ir_array.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
namespace xla {
namespace gpu {
// This class encapsulates the bindings between HloInstructions and LLVM IR
// values that represent their addresses.
class HloToIrBindings {
public:
HloToIrBindings(const HloModule& module,
const BufferAssignment* buffer_assignment,
llvm::IRBuilder<>* ir_builder, bool is_nested)
: buffer_assignment_(buffer_assignment),
is_nested_(is_nested),
ir_builder_(ir_builder),
alias_analysis_(module, *buffer_assignment_,
&ir_builder_->getContext()) {}
void EmitBasePointersForHlos(
tensorflow::gtl::ArraySlice<const HloInstruction*> io_hlos,
tensorflow::gtl::ArraySlice<const HloInstruction*> non_io_hlos);
// Rebinds the given HLO to the LLVM IR value that represent its address.
void BindHloToIrValue(const HloInstruction& hlo, llvm::Value* ir_value,
const ShapeIndex& shape_index = {});
// Unbinds all IR values that's defined in an LLVM function, e.g., function
// arguments and stack variables. Global variables will be kept in bindings_.
//
// This method is called after emitting code for each top-level HLO. The local
// IR values are out of scope at that point and should not be used.
void UnbindAllLocalIrValues();
// Returns whether `hlo` is bound to an LLVM IR value.
bool BoundToIrValue(const HloInstruction& hlo) const {
return base_ptrs_.count(&hlo);
}
llvm::Value* GetTempBufferBase() const { return temp_buffer_base_; }
// A helper method that returns the base pointer of the IrArray containing the
// output of "inst".at the given ShapeIndex.
llvm::Value* GetBasePointer(const HloInstruction& hlo,
const ShapeIndex& shape_index = {}) const {
auto it = base_ptrs_.find(&hlo);
CHECK(it != base_ptrs_.end());
return it->second.element(shape_index);
}
// Return the underlying IrArray of the output of the given instruction.
llvm_ir::IrArray GetIrArray(const HloInstruction& hlo,
const ShapeIndex& shape_index = {});
private:
// Emits IR to resolve (possibly) recursive GetTupleElement instructions.
llvm::Value* EmitGetTupleElement(const HloInstruction* gte,
llvm::Value* base_ptr);
// Returns an llvm typed ir representation of 'ir_value' based on 'hlo' shape.
llvm::Value* GetTypedIrValue(const HloInstruction& hlo,
const ShapeIndex& shape_index,
llvm::Value* ir_value);
const BufferAssignment* buffer_assignment_;
const bool is_nested_;
llvm::IRBuilder<>* ir_builder_;
// Stores the underlying llvm::IrArray for each HloInstruction.
// For an instruction that generates multiple outputs, the root will be a
// tuple shape. The IrArray for each element output is stored in the subnode
// in the ShapeTree.
std::unordered_map<const HloInstruction*, ShapeTree<llvm::Value*>> base_ptrs_;
// The address of the memory block that contains all temporary buffers.
llvm::Value* temp_buffer_base_;
llvm_ir::AliasAnalysis alias_analysis_;
};
} // namespace gpu
} // namespace xla
#endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_HLO_TO_IR_BINDINGS_H_
|
// Copyright (c) 2016 The btcsuite developers
// Copyright (c) 2016 The commanderu developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package chaincfg
import "testing"
// TestMustRegisterPanic ensures the mustRegister function panics when used to
// register an invalid network.
func TestMustRegisterPanic(t *testing.T) {
t.Parallel()
// Setup a defer to catch the expected panic to ensure it actually
// paniced.
defer func() {
if err := recover(); err == nil {
t.Error("mustRegister did not panic as expected")
}
}()
// Intentionally try to register duplicate params to force a panic.
mustRegister(&MainNetParams)
}
|
PROVISION OF ENGLISH-MEDIUM INSTRUCTION: TRENDS AND ISSUES
The article addresses the current trends of teaching subjects through the medium of English which has been boosting in the world and in Ukraine. Introduced due to globalization processes, teaching in English as a Medium of Instruction (EMI) has become an essential part of internationalization policies of universities. The increase
in numbers of international students is viewed as an indication of quality of education provided by universities; it contributes to universities’ higher ratings and competitiveness. The introduction of EMI has been sustainably promoted by the British Council. Nonetheless, EMI providers across the world keep encountering similar issues
and challenges. Amongst those, the most essential is low English language proficiency of non-native English speakers – both teachers and students. The article aims to examine the training provided to Ukrainian teachers who deliver EMI courses. The authors surveyed 28 EMI teachers in two universities in the country. The results
imply the necessity to reprofile linguistic and pedagogical courses for EMI teachers, including training them in implementing innovative and interactive teaching techniques. The prospects of further research arouse from the need to develop a quality system of assessing students’ learning outcomes. |
import numpy as np
from collections import defaultdict
from functools import reduce
from itertools import product
# s = input()
# n, m = int(input())
# A = list(map(int, input().split()))
n, m = map(int, input().split())
light = defaultdict(list)
for i in range(m):
A = list(map(int, input().split()))
for s in range(1, A[0]+1):
light[i].append(A[s]-1)
p = list(map(int, input().split()))
res = 0
for state in product([0,1], repeat=n):
for i in range(m):
if len(list(filter(lambda s: state[s] == 1, light[i])))%2 != p[i]:
break
else:
res += 1
print(res)
|
THE mother of a tragic teenager who streamed her own suicide on Facebook Live has been accused of mocking her daughter online as she took her own life.
Gina Caze is said to have watched and written messages on her estranged 14-year old daughter's social media post accusing her of seeking attention and crying wolf.
Facebook 7 Nakia Venant streamed her own suicide on Facebook Live
AP 7 Gina Caze talks to the press after the tragedy
Caze did nothing to stop desperate Nakia Venant from committing suicide, a damning report from a child services department says.
The Florida Department of Children and Families said instead Caze criticised her daughter as she watched the two-hour long Facebook Live video.
The official report says she called Nakia a 'custody jit' and and that she was a 'crying wolf...seeking attention'.
In the report, investigators allege that Nakia’s mother, who identified herself to the press as Gina Alexis, watched the broadcast for nearly two hours.
Facebook 7 Nakia was passed around 14 different foster homes in 16 months
But her attorney, Howard Talenfeld, indicated his client did not see her daughter’s suicide.
“Mom did not know that Naika had committed suicide. She did not witness this online,” he said.
And Caze has strenuously denied she made the comments as her daughter died.
Caze had a long history of conflicts with her daughter and Nakia was eventually taken away from her due to physical abuse concerns.
Vulnerable Nakia was living with a foster family at the time of her death and was found hanged by Miami-Dade police.
Facebook 7 Caze had a long history of conflicts with her daughter
Facebook 7 Online tributes were paid to Nakia after the tragedy
The sickening comments on her Facebook page were made from Caze's Facebook account with the user name Gina Alexis, reported Tampa Bay Times.
One read: '#ADHD games played u sad little DCF custody jit that's why u where u at for this dumb s--t n more u keep crying wolf u dead u will get buried life goes on after a jit that doesn't listen to there parents trying to be grown seeking boys and girls attention instead of her books.'
Her broadcast was watched by hundreds of people, some of whom pleaded with the girl to reconsider her decision.
But others urged her to take her life, calling her names and saying that the broadcast was fake.
The youngster was taken away from Caze in 2009 because there were concerns that Venant was being physically abused by her mother.
Since then she had spints in several foster homes and at one point she was passed around to 14 different homes in the span of 16 months.
The DCF claim that she displayed inappropriate sexual behaviour and she admitted she had seen pornographic videos.
MOST READ IN NEWS Exclusive DARK PAST Homeless man doused in water by rail staff KILLED man who splashed him with paint MOMO NO-NO Momo Challenge in 'Peppa Pig and Fortnite vids' as YouTube and Instagram slammed MOMO SHOCK Creepy 'suicide character' Momo told lad, 8, to 'stab himself in neck' TREE OF TERROR Mum horrified to learn what the strange 'pods' were hanging from branches SUICIDE WARNING What is Momo and how can parents protect their children? say no no to momo Expert advice on how to keep children safe from online suicide game Momo
In 2010 she was placed back in the care of Caze, only to be removed in 2014, but was then later returned after social workers insisted it was for the best.
A year later, her mother relinquished custody, saying that she no longer wanted the teenager.
The two remained in contact through social media.
On January 22, Nakia had started a livestream on Facebook when she killed herself at her Miami home just after 3am.
Efforts to save the girl were thwarted by a series of accidents after a friend reported the two-hour livestream to police and provided them with the wrong Miami address.
AP 7 Caze is said to have watched the two-hour long Facebook Live video
Facebook 7 Cops were allegedly given the wrong address when Nakia's live stream was reported to police
Emergency workers rushed to the wrong house and learned of Nakia’s address — where they later found her hanging in the bathroom as her foster parents slept.
The girl was rushed to Jackson North Hospital, where she was pronounced dead.
Her close friend Gerta Telfort described Nakia as a good student with lots of charisma.
She said Nakia, a seventh-grader at Young Women’s Preparatory Academy, hoped to one day write a book and had started a daily journal.
If you are affected by any of the issues raised in this article, please call the Samaritans on (free) 116123 or 020 7734 2800 or visit www.samaritans.org |
Molecular Cytogenetics in Childhood Acute Lymphoblastic Leukemia: A Hospital-Based Observational Study
OBJECTIVE This study was conducted to determine the frequency of chromosomal aberrations in children aged <19 years with newly diagnosed acute lymphoblastic leukemia (ALL), attending/admitted in the Department of Pediatrics and Radiotherapy, Government Medical College, Jammu. Furthermore, we aimed to study the correlation between the cytogenetic molecular abnormalities and the immediate clinical outcome (induction of remission). MATERIALS AND METHODS This was a prospective study conducted over a period of 2 years (May 2011 to May 2013) in a tertiary care hospital in India. Forty pediatric (1–19 years) patients (18 males, 22 females; M: F = 0.8 : 1) with newly diagnosed ALL were studied for molecular cytogenetic analysis. Written consent was obtained from the parents of the patients. Bone marrow aspiration was done for making the diagnosis of ALL. Children lost to follow-up and who failed to give consent were excluded from the survey. Host factors and clinical parameters were obtained from patients. RESULTS Bone marrow aspirate samples of 40 diagnosed cases of ALL were subjected to routine cytogenetic analysis, and reverse transcription-polymerase chain reaction (RT-PCR) technique was used for molecular analysis. Well-spread metaphase plates were obtained in 18/40 (45%) cases for analysis. RT-PCR revealed abnormal genes in 20/40 (50%) patients. The results of molecular cytogenetic analysis were correlated with patients’ clinical and hematological parameters for risk stratification and immediate outcome (induction of remission). Eighteen out of 40 (45%) cases revealed no abnormality. Among the remaining 22 cases, 8 had TEL–AML1 (20%), 6 had BCR–ABL (15%), 4 had MLL–AF4 (10%), 2 had E2A–PBX1 (5%) fusion genes, and 2 had hyperdiploidy. To conclude, a higher proportion of cases in this study showed adverse translocations such as t (9;22), t (4;11), and t (1;19) compared to that reported in literature. CONCLUSION RT-PCR assay was useful in detecting the prognostically significant oncogene fusion transcripts. In our study of 40 patients, we found that the pattern and frequency differ from those reported in Western literature. Our study reveals a lower frequency of hyperdiploidy (5%) and a higher frequency of BCR–ABL gene fusion (20%) in childhood ALL. Above all, in contrast to previous studies on childhood ALL, our study showed female predominance, with the male-to-female ratio being 0.8 : 1. Apart from the BCR–ABL fusion gene, none other was associated with poor prognosis. It is already well established that the characterization of the genetic entities at diagnosis is crucial for the understanding and the optimal treatment of ALL. Because the aberrations in our population differ significantly from those reported in Western populations, we may be required to tailor our protocols.
Introduction
A great number of cytogenetic aberrations, mainly translocations, specific to a certain character -or even a subtype of leukemia -some of which have been proven to be prognostically significant, are employed in the intent of treatment. 1 Acquired chromosomal abnormalities, structural or numerical, are observed in malignant bone marrow cells in .75% of patients with hematologic malignancies, with an increasing incidence reported due to the application of complementary detection methods provided by molecular cytogenetics. 2 Appropriate risk assessment is the most important requirement in the choice of therapy, ensuring that patients are neither over-nor undertreated. There is universal understanding that certain clinical features, genetic abnormalities of leukemic cells, pharmacodynamics, pharmacogenetics, pharmacogenomics variables of the innkeeper, and early treatment response have important prognostic and therapeutic implications. Further refinements in therapy, including the role of risk-adapted treatment protocols, attempt to improve cure rates for high-risk patients, while determining the toxicity of therapy for low-risk patients, are required. study was approved by the ethics committee of the University of Jammu, and conducted in accordance with the principles of the Declaration of Helsinki. Forty patients (18 males, 22 females; M: F = 0.8 : 1) with newly diagnosed pediatric (1-19 years) acute lymphoblastic leukemia (ALL) were studied for molecular cytogenetic analysis. Written consent was obtained from the parents of the patients. Bone marrow aspiration was done for making the diagnosis of ALL. Children lost to follow-up and who failed to give consent were excluded from the survey. Host factors and clinical parameters were obtained from patients. Detailed clinical examination, investigations, aspiration and examination of bone marrow, chemotherapy (according to risk stratification), and monitoring were conducted as per standard protocol and were not altered. Apart from routine histopathological examination for type and classification, 4 mL of bone marrow aspirate was collected for this study, 2 mL each for cytogenetic and molecular studies. The bone marrow aspirate (2 mL) was collected for cytogenetic study in a 10-mL sterile tube containing RPMI 1640 medium, heparin, and fetal calf serum. Furthermore, 2 mL of sample was collected in a 2.5-mL ethylenediamine tetraacetic acid-containing vial for molecular analysis. The above-collected samples were then subjected to karyotyping and reverse transcription-polymerase chain reaction (RT-PCR).
Results
The following host factors and clinical parameters were obtained from patients.
Age and sex. The age of patients at the time of diagnosis ranged from 6 months to 16.5 years. The majority of patients were below 10 years of age, mean age of the patients being 7.4 years. The majority of patients were females (22/40) and the male-to-female ratio was 0.8 : 1.
Nutritional status. Protein-energy malnutrition was seen in 8/40 patients (20%), with two having chronic grade 3 protein-energy malnutrition, two having acute grade 2 protein-energy malnutrition, and four having acute grade 1 protein-energy malnutrition.
Duration of symptoms. The duration of symptoms at presentation ranged from 1 week to 6 months. The majority of patients (24/40, 60%) presented with history of symptoms in the 4-12 weeks preceding the time of presentation. The prolonged duration of symptoms can be explained by the lack of proper health care facilities in the peripheral regions. Most of the patients were from far-flung areas.
Presenting symptoms. The common clinical manifestations at admission were fever, pallor, bleeding manifestations, hepatosplenomegaly, lymphadenopathy, and musculoskeletal pain. On examination, hepatosplenomegaly was observed in 34 patients, pallor in 22 patients, fever in 20 patients, generalized lymphadenopathy in 18 patients, and petechiae or bleeding manifestation in 8 patients. Joint symptoms were seen in six patients, whereas CNS disease at the time of presentation was observed in four patients. High incidence of CNS and joint disease can be explained by the delay in diagnosis.
Molecular cytogenetics.
Using the morphological classification, 24 patients had L1 type ALL and the remaining 16 had L2 phenotype. Cytogenetic analysis using karyotyping revealed the occurrence of normal karyotype in 12 patients (30%). Among the remaining patients, six (15%) had abnormal karyotype (hyperdiploidy in two patients and Philadelphia chromosome in four patients ). Absence of mitotic division was determined in 22 (55%) patients. Molecular analysis using RT-PCR revealed TEL-AML1 as the most common fusion gene abnormality in the study cohort (8/40), followed by BCR-ABL fusion gene in (6/40), MLL-AF4 in (4/40), and E2A-PBX1 (2/40). No abnormal fusion gene was seen in the remaining 20 patients. BCR-ABL-positive patients tested positive for p190 protein. This test was conducted to differentiate ALL from CML.
Correlation between molecular cytogenetics and clinical features. The lowest hemoglobin level was noted in ALL with BCR-ABL fusion gene, with a base value of 3.5 g/dL, while the highest level was seen in ALL with TEL-AML1 fusion gene, with a base value of 5.75 g/dL ( Table 1). The lowest white blood cell count was seen in ALL with TEL-AML1 fusion gene, with a base value of 7,825/mm 3 and the highest count was found in ALL with BCR-ABL fusion gene, with a mean value of 109,333/mm 3 . The lowest platelet count was found in ALL with MLL-AF4 fusion gene, with a mean value of 25,000/mm 3 , while the highest level was seen in ALL with TEL-AML1 fusion gene, with a base value of 165,000/mm 3 .
Treatment. Treatment could be started in only 30 (75%) patients as per UK-ALL 2003 version 6 protocol because the remaining 10 (25%) patients did not opt for treatment (Table 2).
Bone marrow status at day 28/outcome. Of the 30 patients who received therapy, 20 (66.6%) were in complete remission (M1 status of the bone marrow on day 28).
Failure of induction of remission was seen in six (15%) patients, with day 28 bone marrow showing M2 status. Among those who did not go into remission after day 28 of therapy, four were already on regimen C because they had BCR-ABL and MLL-AF4 fusion gene abnormalities. They were shifted to a higher center for further treatment, and imatinib was considered for their treatment. Two of the patients with M2 status of bone marrow on day 28 were shifted to regimen C from B, but their parents refused further treatment thereafter. Two patients each, among the six patients with failure of induction, had the abnormal fusion genes BCR-ABL, MLL-AF4, and E2A-PBX1. In the present study, there were four (13.3%) induction deaths. These patients were Ph+ve, with high initial total leukocyte count (TLC) between 90,000/mm 3 and 140,000/mm 3 . Morphology revealed L1 type of blasts, as well as aggressive bulky disease at the onset in the form of massive hepatosplenomegaly and generalized lymphadenopathy. All the deaths occurred in the male group, aged .7 years. The cause of death was determined as sepsis (Pseudomonas, in two cases), massive intracranial bleeding (one case), and massive upper gastrointestinal bleeding (one case).
Discussion
Acute lymphoblastic leukemia (ALL) is the most common neoplastic disease in children, which results from somatic mutation in a single lymphoid progenitor cell at one of the several discrete stages of development. It accounts for 25% of all childhood cancers and approximately 75% of all cases of childhood leukemia. 9 To improve the survival of patients with ALL in developing countries, it is important to conduct research into the biology, treatment response, and prognostic factors. Effective protocols from the Western world may not be optimal in developing countries because factors considered for assigning risk groups may differ in developing countries. There is a need to assess survival data and identify risk factors for relapse in our set of patients. Although multiple studies have been performed across the world, there is a paucity of literature available on the risk factors, pattern of relapsed disease, and outcome of children with ALL in India. The present study was an attempt to study the clinical features, laboratory parameters, and prognostic factors for immediate outcome (induction of remission) in 40 newly Age Range (years) 3.5-12 7-12 years 10-16. 5 diagnosed ALL children hospitalized during a period of 2 years in the Jammu region. They were analyzed to enable identification of problems, risk factors, and prognostic factors that might be specific to this patient population. These patients were treated as per the UK-ALL 2003 version 6 protocol after clinical stratification and were given chemotherapy accordingly. Supportive therapy in the form of blood products, intravenous fluids, antibiotics, nursing care, and other necessary medication was given and patients were monitored subsequently for induction of remission or any adverse event and the final outcome. Hyperleukocytosis (defined as TLC .100 × 10 9 /L) was observed in six (15%) patients; this result is similar to studies by researchers from other Indian centers, who have reported hyperleukocytosis in 15.3%-23.2% of cases. Normal cytogenetics was reported in 12 (30%) patients, consistent with results of other studies. 13,14 The present study revealed less frequency of hyperdiploidy (5%), contrary to the frequency reported in literature. In the present study, the most common chromosomal aberration detected was TEL-AML1 (8/40), followed by BCR-ABL (6/40), MLL-AF4 (4/40), and E2A-PBX1 (2/40).
Complete remission was thus attained in 66.6% patients. The low remission rate in the present study may be attributed to the higher frequency of unfavorable chromosomal aberrations, especially BCR-ABL and MLL-AF4, and relatively lower frequency of good prognostic chromosomal aberrations, such as hyperdiploidy. Mortality rates (10%) were similar to those reported in several Indian studies but significantly higher than those in Western studies. This may be explained on the basis of the higher percentage of unfavorable chromosomal aberrations and T-cell ALL in the Indian population. 15 What this study adds. The following results have been obtained in this study: 1. Higher percentage (15%) of BCR-ABL fusion gene in childhood ALL 2. Lower percentage (5%) of hyperdiploidy in childhood ALL 3. Lower age group for BCR-ABL type 4. Increased duration between onset of symptoms and diagnosis 5. Hepatosplenomegaly was the most common finding at presentation in 85% of patients. 6. Patients carrying fusion genes with poor prognosis, namely, BCR-ABL/E2A-PBX1, were from the border areas of Sunderbhani and Samba, where shelling, blasts, and firing are common from across the border. This indicates a possible common environmental factor that may be prevalent in these areas.
Author Contributions
AP wrote the first draft of the manuscript. AR, AK, and AAS helped in writing the manuscript and did primary corrections in the manuscript. RH and SKD made final corrections to the manuscript before submission. All authors reviewed and approved of the final manuscript. |
/**
* A TimeMark has a name and a time value.
* It can be compared to other TimeMarks.
*/
public class TimeMark implements Comparable<TimeMark> {
private String name;
private Float time;
private TimeMark () {
this.setName(null);
this.setTime((Float) null);
}
public TimeMark (String time) {
this();
this.setTime(time);
}
TimeMark (Float time) {
this();
this.setTime(time);
}
public TimeMark (String name, String time) {
this(time);
this.setName(name);
}
TimeMark (String name, Float time) {
this(time);
this.setName(name);
}
public String getName () {
return name;
}
public void setName (String name) {
String n = name;
if (n != null) {
n = n.replaceAll("\\s", "");
this.name = n;
}
}
public Float getTime () {
return time;
}
// keep time setters private, so that they cannot be modified later
private void setTime (String time) {
String t = time;
if (time != null) {
t = t.replaceAll("\\s", "");
this.time = Float.parseFloat(t);
}
}
private void setTime (Float time) {
this.time = time;
}
/**
* Return this TimeMark if it equals specified TimeMark.
* Return the specified TimeMark only if it equals this TimeMark and has a name while this TimeMark does not have a name.
* <p>
* This method can be handy if you want to avoid "duplicate" TimeMark objects in a list.
*
* @param t the TimeMark to be compared
* @return TimeMark which is considered more complete
*/
public TimeMark returnMoreComplete (TimeMark t) {
TimeMark rval = this;
if (t != null && this.isEqual(t) && this.getName() == null && t.getName() != null) {
rval = t;
}
return rval;
}
/**
* A specified TimeMark is equal to this TimeMark if it has the same time
* and either the same name or one of both names is null.
*
* @param t the TimeMark to be compared
* @return true if TimeMarks are considered equal
* false otherwise
*/
public boolean isEqual (TimeMark t) {
return t != null && this.getTime().equals(t.getTime()) && (
(this.getName() == null || t.getName() == null) ||
this.getName().equals(t.getName()));
}
public boolean isGreater (TimeMark t) {
return t != null && this.getTime().compareTo(t.getTime()) > 0;
}
public boolean isSmaller (TimeMark t) {
return t != null && !isEqual(t) && !isGreater(t);
}
public boolean isGreaterOrEqual (TimeMark t) {
return t != null && (isEqual(t) || isGreater(t));
}
public boolean isSmallerOrEqual (TimeMark t) {
return t != null && (isEqual(t) || isSmaller(t));
}
@Override
public int compareTo (TimeMark o) throws ClassCastException, NullPointerException {
if (o == null) {
throw(new NullPointerException());
}
if (this.isEqual(o)) {
return 0;
}
if (isSmaller(o)) {
return -1;
}
if (isGreater(o)) {
return 1;
}
return 0;
}
public String toString () {
String rval = "";
if (name != null) {
rval = rval + name;
}
if (time != null) {
rval = rval + " " + time;
}
return rval;
}
} |
package net.aegistudio.brdfviewer;
import java.awt.Dimension;
import java.awt.GridLayout;
import javax.swing.JLabel;
import javax.swing.JPanel;
import javax.swing.JSpinner;
import javax.swing.SpinnerNumberModel;
import javax.swing.event.ChangeListener;
public class JDegreeField extends JPanel {
private static final long serialVersionUID = 1L;
protected final double max;
protected final JSpinner spinner;
protected final JLabel output;
public JDegreeField(double max, Dimension inputSize) {
this.max = max;
setBorder(null);
setLayout(new GridLayout(0, 1));
// The spinner input field.
spinner = new JSpinner(new SpinnerNumberModel(
0.0, 0.0, 0.999, 0.001));
spinner.setPreferredSize(inputSize);
add(spinner);
// The value transforming field.
output = new JLabel();
output.setHorizontalAlignment(JLabel.RIGHT);
add(output);
updateOutput();
}
public void setValue(double value) {
spinner.setValue(((int)1000.0 * value / max) / 1000.0);
updateOutput();
repaint();
}
public double getValue() {
return ((Double)spinner.getValue()) * max;
}
protected void updateOutput() {
output.setText(String.format(
"<html>Degree: %.3f<br>Radian: %.3f</html>",
getValue() * 180.0 / Math.PI, getValue()));
}
public void setEnabled(boolean enabled) {
spinner.setEnabled(enabled);
}
public void addChangeListener(ChangeListener listener) {
spinner.addChangeListener(listener);
}
}
|
<filename>elza/elza-core/src/main/java/cz/tacr/elza/service/importnodes/vo/ImportSource.java
package cz.tacr.elza.service.importnodes.vo;
import java.util.List;
import cz.tacr.elza.domain.ApScope;
import cz.tacr.elza.domain.ArrFile;
import cz.tacr.elza.domain.ArrStructuredObject;
/**
* Rozhraní zdroje pro import.
*
* Průchod stromem musí být typu DFS (prohledávání do hloubky).
*
* @since 19.07.2017
*/
public interface ImportSource {
/**
* @return seznam použitých scope
*/
List<ApScope> getScopes();
/**
* @return seznam použitých souborů
*/
List<ArrFile> getFiles();
/**
* @return seznam použitých obalů
*/
List<ArrStructuredObject> getStructuredList();
/**
* @return má další uzel?
*/
boolean hasNext();
/**
* @param changeDeep callback pro orientaci ve stromu (změna úrovní)
* @return získání dalšího uzlu ze stromu
*/
Node getNext(DeepCallback changeDeep);
}
|
// PFAdd adds the specified elements to the specified HyperLogLog
func (p *Pipeline) PFAdd(key string, elements ...string) {
p.Command("PFADD", 1+len(elements))
p.Arg(resp.Key(key))
for _, el := range elements {
p.Arg(resp.String(el))
}
} |
Strategy of achieving high beam quality on spatial intensity and wavefront simultaneously in high power lasers
High-power laser plays an important role in many fields, such as directed energy weapon, optoelectronic contermeasures, inertial confinement fusion, industrial processing and scientific research. The uniform nearfield and wavefront are the important part of the beam quality for high power lasers, which is conducive to maintaining the high spatial beam quality in propagation. We demonstrate experimentally that the spatial intensity and wavefront distribution at the output is well compensated simultaneously in the complex high-power solid-state laser system by using the small-aperture spatial light modulator (SLM) and deformable mirror (DM) in the front stage. The experimental setup is a hundred-Joule-level Nd:glass laser system operating at three wavelengths at 1053 nm (1ω), 527 nm (2ω) and 351 nm (3ω) with 3 ns pulse duration with the final output beam aperture of 60 mm. While the clear arperture of the electrically addressable SLM is less than 20 mm and the effective diameter of the 52-actuators DM is about 15 mm. In the beam shaping system, the key point is that the two front-stage beam shaping devices needs to precompensate the gain nonuniform and wavefront distortion of the laser system. The details of the iterative algorithm for improving the beam quality and the strategy of achiving high beam quality on spatial intensity and wavefront simultaneously are presented. Experimental results show that the output wavefront RMS value is 0.06, and simultaneously the output near-field modulation is 1.38:1 and the fluence contrast is 10.5% at 3 ns at 1053nm with 40-Joule-level energy. |
// Fast removes packet loss and delays.
func (IPTables) Fast(ctx context.Context, node string) error {
output, err := ssh.CombinedOutput(ctx, node, "/sbin/tc", "qdisc", "del", "dev", "eth0", "root")
if err != nil && strings.Contains(string(output), "RTNETLINK answers: No such file or directory") {
err = nil
}
return err
} |
<reponame>calvin-ally/healenium-web
package com.epam.healenium.service.impl;
import com.epam.healenium.PageAwareBy;
import com.epam.healenium.SelfHealingEngine;
import com.epam.healenium.model.LastHealingDataDto;
import com.epam.healenium.service.HealingElementsService;
import com.epam.healenium.treecomparing.Node;
import lombok.extern.slf4j.Slf4j;
import org.openqa.selenium.WebElement;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
@Slf4j
public class HealingElementsServiceImpl extends AbstractHealingServiceImpl implements HealingElementsService {
public HealingElementsServiceImpl(SelfHealingEngine engine) {
super(engine);
}
@Override
public List<WebElement> heal(PageAwareBy pageBy, List<WebElement> pageElements) {
Optional<LastHealingDataDto> lastHealingDataDto = getLastHealingDataDto(pageBy);
if (!lastHealingDataDto.isPresent() || lastHealingDataDto.get().getPaths().isEmpty()) {
return pageElements;
}
List<List<Node>> lastValidPath = new ArrayList<>(lastHealingDataDto.map(LastHealingDataDto::getPaths).get());
if (lastValidPath.isEmpty() && !pageElements.isEmpty()) {
engine.saveElements(pageBy, pageElements);
return pageElements;
} else {
return healAndSave(pageBy, pageElements, lastValidPath, lastHealingDataDto);
}
}
private List<WebElement> healAndSave(PageAwareBy pageBy,
List<WebElement> elementsFromPage,
List<List<Node>> nodesFromDb,
Optional<LastHealingDataDto> lastHealingDataDto) {
Map<WebElement, List<Node>> elementToNodeFromPage = new HashMap<>();
List<List<Node>> nodesFromDbToDelete = new ArrayList<>();
elementsFromPage.forEach(e -> elementToNodeFromPage.put(e, new ArrayList<>(engine.getNodePath(e))));
nodesFromDb.forEach(node -> {
elementToNodeFromPage.entrySet().removeIf(entry -> {
if (node.equals(entry.getValue())) {
nodesFromDbToDelete.add(node);
return true;
}
return false;
});
});
nodesFromDb.removeAll(nodesFromDbToDelete);
List<WebElement> healedElements = nodesFromDb.stream()
.map(nodes -> {
log.warn("Failed to find an element using locator {}\nTrying to heal...", pageBy.getBy());
return healLocator(pageBy, nodes, lastHealingDataDto).orElse(null);
})
.map(driver::findElement)
.collect(Collectors.toList());
addHealedElements(elementsFromPage, elementToNodeFromPage, healedElements);
if (!elementToNodeFromPage.isEmpty()) {
List<List<Node>> nodesToSave = lastHealingDataDto.get().getPaths();
nodesToSave.addAll(elementToNodeFromPage.values());
engine.saveNodes(pageBy, nodesToSave);
}
return elementsFromPage;
}
private void addHealedElements(List<WebElement> elementsFromPage, Map<WebElement, List<Node>> elementToNodeFromPage, List<WebElement> healedElements) {
if (!healedElements.isEmpty()) {
elementsFromPage.addAll(healedElements);
List<List<Node>> healedNodes = healedElements.stream()
.map(engine::getNodePath)
.collect(Collectors.toList());
if (!elementToNodeFromPage.isEmpty()) {
healedNodes.forEach(healedNode -> elementToNodeFromPage.entrySet()
.removeIf(entry -> healedNode.equals(entry.getValue())));
}
}
}
}
|
// Generates the SELECT sql for this dataset and uses Exec#ScanValsContext to scan the results into a slice of primitive
// values
//
// i: A pointer to a slice of primitive values
func (sd *SelectDataset) ScanValsContext(ctx context.Context, i interface{}) error {
if sd.queryFactory == nil {
return errQueryFactoryNotFoundError
}
return sd.Executor().ScanValsContext(ctx, i)
} |
<filename>frameworks/components/ui_box_progress.cpp
/*
* Copyright (c) 2020-2021 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "components/ui_box_progress.h"
#include "draw/draw_utils.h"
#include "engines/gfx/gfx_engine_manager.h"
#include "gfx_utils/graphic_log.h"
namespace OHOS {
UIBoxProgress::UIBoxProgress()
: progressWidth_(0), progressHeight_(0), isValidWidthSet_(false), isValidHeightSet_(false)
{
SetDirection(Direction::DIR_LEFT_TO_RIGHT);
}
void UIBoxProgress::DrawValidRect(BufferInfo& gfxDstBuffer,
const Image* image,
const Rect& rect,
const Rect& invalidatedArea,
const Style& style,
uint16_t radius)
{
Rect cordsTmp;
if ((image != nullptr) && (image->GetSrcType() != IMG_SRC_UNKNOWN)) {
ImageHeader header = {0};
image->GetHeader(header);
Rect area(rect);
switch (direction_) {
case Direction::DIR_LEFT_TO_RIGHT:
cordsTmp.SetPosition(area.GetLeft() - radius, area.GetTop());
break;
case Direction::DIR_TOP_TO_BOTTOM:
cordsTmp.SetPosition(area.GetLeft(), area.GetTop() - radius);
break;
case Direction::DIR_RIGHT_TO_LEFT:
cordsTmp.SetPosition(area.GetRight() + radius - header.width, area.GetTop());
break;
case Direction::DIR_BOTTOM_TO_TOP:
cordsTmp.SetPosition(area.GetLeft(), area.GetBottom() + radius - header.height);
break;
default:
GRAPHIC_LOGE("UIBoxProgress: DrawValidRect direction Err!\n");
break;
}
cordsTmp.SetHeight(header.height);
cordsTmp.SetWidth(header.width);
if (area.Intersect(area, invalidatedArea)) {
image->DrawImage(gfxDstBuffer, cordsTmp, area, style, opaScale_);
}
} else {
BaseGfxEngine::GetInstance()->DrawRect(gfxDstBuffer, rect, invalidatedArea, style, opaScale_);
}
if (style.lineCap_ == CapType::CAP_ROUND) {
DrawRoundCap(gfxDstBuffer, image, {cordsTmp.GetX(), cordsTmp.GetY()}, rect, invalidatedArea, radius, style);
}
}
void UIBoxProgress::DrawRoundCap(BufferInfo& gfxDstBuffer,
const Image* image,
const Point& imgPos,
const Rect& rect,
const Rect& invalidatedArea,
uint16_t radius,
const Style& style)
{
Point leftTop;
Point leftBottom;
Point rightTop;
Point rightBottom;
switch (direction_) {
case Direction::DIR_LEFT_TO_RIGHT:
case Direction::DIR_RIGHT_TO_LEFT: {
leftTop.x = rect.GetLeft() - 1;
leftTop.y = rect.GetTop() + radius - 1;
leftBottom.x = leftTop.x;
leftBottom.y = rect.GetBottom() - radius + 1;
rightTop.x = rect.GetRight() + 1;
rightTop.y = leftTop.y;
rightBottom.x = rightTop.x;
rightBottom.y = leftBottom.y;
break;
}
case Direction::DIR_TOP_TO_BOTTOM:
case Direction::DIR_BOTTOM_TO_TOP: {
leftTop.x = rect.GetLeft() + radius - 1;
leftTop.y = rect.GetTop() - 1;
rightTop.x = rect.GetRight() - radius + 1;
rightTop.y = leftTop.y;
leftBottom.x = leftTop.x;
leftBottom.y = rect.GetBottom() + 1;
rightBottom.x = rightTop.x;
rightBottom.y = leftBottom.y;
break;
}
default:
GRAPHIC_LOGE("UIBoxProgress: DrawRoundCap direction Err!\n");
break;
}
Style capStyle = style;
capStyle.lineWidth_ = radius;
capStyle.lineColor_ = style.bgColor_;
if ((image != nullptr) && (image->GetSrcType() != IMG_SRC_UNKNOWN)) {
capStyle.lineOpa_ = style.imageOpa_;
} else {
capStyle.lineOpa_ = style.bgOpa_;
}
ArcInfo arcInfo = {{0}};
arcInfo.radius = radius;
arcInfo.imgPos = imgPos;
arcInfo.imgSrc = image;
bool isEvenLen = false;
if (direction_ == Direction::DIR_LEFT_TO_RIGHT || direction_ == Direction::DIR_RIGHT_TO_LEFT) {
if (rect.GetHeight() % 2 == 0) { // 2: determine the odd or even number of the height
isEvenLen = true;
}
} else if (rect.GetWidth() % 2 == 0) { // 2: determine the odd or even number of the width
isEvenLen = true;
}
if (isEvenLen) {
arcInfo.center = leftTop;
arcInfo.startAngle = THREE_QUARTER_IN_DEGREE;
arcInfo.endAngle = 0;
BaseGfxEngine::GetInstance()->DrawArc(gfxDstBuffer, arcInfo, invalidatedArea, capStyle, opaScale_,
CapType::CAP_NONE);
arcInfo.center = leftBottom;
arcInfo.startAngle = SEMICIRCLE_IN_DEGREE;
arcInfo.endAngle = THREE_QUARTER_IN_DEGREE;
BaseGfxEngine::GetInstance()->DrawArc(gfxDstBuffer, arcInfo, invalidatedArea, capStyle, opaScale_,
CapType::CAP_NONE);
arcInfo.center = rightTop;
arcInfo.startAngle = 0;
arcInfo.endAngle = QUARTER_IN_DEGREE;
BaseGfxEngine::GetInstance()->DrawArc(gfxDstBuffer, arcInfo, invalidatedArea, capStyle, opaScale_,
CapType::CAP_NONE);
arcInfo.center = rightBottom;
arcInfo.startAngle = QUARTER_IN_DEGREE;
arcInfo.endAngle = SEMICIRCLE_IN_DEGREE;
BaseGfxEngine::GetInstance()->DrawArc(gfxDstBuffer, arcInfo, invalidatedArea, capStyle, opaScale_,
CapType::CAP_NONE);
} else {
switch (direction_) {
case Direction::DIR_LEFT_TO_RIGHT:
case Direction::DIR_RIGHT_TO_LEFT: {
arcInfo.center = leftTop;
arcInfo.startAngle = SEMICIRCLE_IN_DEGREE;
arcInfo.endAngle = 0;
BaseGfxEngine::GetInstance()->DrawArc(gfxDstBuffer, arcInfo, invalidatedArea, capStyle, opaScale_,
CapType::CAP_NONE);
arcInfo.center = rightTop;
arcInfo.startAngle = 0;
arcInfo.endAngle = SEMICIRCLE_IN_DEGREE;
BaseGfxEngine::GetInstance()->DrawArc(gfxDstBuffer, arcInfo, invalidatedArea, capStyle, opaScale_,
CapType::CAP_NONE);
break;
}
case Direction::DIR_TOP_TO_BOTTOM:
case Direction::DIR_BOTTOM_TO_TOP: {
arcInfo.center = leftTop;
arcInfo.startAngle = THREE_QUARTER_IN_DEGREE;
arcInfo.endAngle = QUARTER_IN_DEGREE;
BaseGfxEngine::GetInstance()->DrawArc(gfxDstBuffer, arcInfo, invalidatedArea, capStyle, opaScale_,
CapType::CAP_NONE);
arcInfo.center = leftBottom;
arcInfo.startAngle = QUARTER_IN_DEGREE;
arcInfo.endAngle = THREE_QUARTER_IN_DEGREE;
BaseGfxEngine::GetInstance()->DrawArc(gfxDstBuffer, arcInfo, invalidatedArea, capStyle, opaScale_,
CapType::CAP_NONE);
break;
}
default:
GRAPHIC_LOGE("UIBoxProgress: DrawRoundCap direction Err!\n");
break;
}
}
}
void UIBoxProgress::GetBackgroundParam(Point& startPoint,
int16_t& width,
int16_t& height,
uint16_t& radius,
const Style& style)
{
Rect rect = GetOrigRect();
// 2: Half of the gap
startPoint.x = rect.GetLeft() + style_->borderWidth_ + style_->paddingLeft_ + (GetWidth() - progressWidth_) / 2;
// 2: Half of the gap
startPoint.y = rect.GetTop() + style_->borderWidth_ + style_->paddingTop_ + (GetHeight() - progressHeight_) / 2;
radius = 0;
width = progressWidth_;
height = progressHeight_;
if (style.lineCap_ == CapType::CAP_ROUND) {
switch (direction_) {
case Direction::DIR_LEFT_TO_RIGHT:
case Direction::DIR_RIGHT_TO_LEFT:
radius = (progressHeight_ + 1) >> 1;
width -= radius << 1;
startPoint.x += radius;
break;
case Direction::DIR_TOP_TO_BOTTOM:
case Direction::DIR_BOTTOM_TO_TOP:
radius = (progressWidth_ + 1) >> 1;
height -= radius << 1;
startPoint.y += radius;
break;
default:
GRAPHIC_LOGE("UIBoxProgress: GetBackgroundParam direction Err!\n");
return;
}
}
}
void UIBoxProgress::DrawBackground(BufferInfo& gfxDstBuffer, const Rect& invalidatedArea)
{
Point startPoint;
int16_t progressWidth;
int16_t progressHeight;
uint16_t radius;
GetBackgroundParam(startPoint, progressWidth, progressHeight, radius, *backgroundStyle_);
Rect coords(startPoint.x, startPoint.y, startPoint.x + progressWidth - 1, startPoint.y + progressHeight - 1);
DrawValidRect(gfxDstBuffer, backgroundImage_, coords, invalidatedArea, *backgroundStyle_, radius);
}
void UIBoxProgress::DrawForeground(BufferInfo& gfxDstBuffer, const Rect& invalidatedArea, Rect& coords)
{
Point startPoint;
int16_t progressWidth;
int16_t progressHeight;
uint16_t radius;
GetBackgroundParam(startPoint, progressWidth, progressHeight, radius, *foregroundStyle_);
int16_t length;
switch (direction_) {
case Direction::DIR_LEFT_TO_RIGHT: {
length = GetCurrentPos(progressWidth - 1);
coords.SetRect(startPoint.x, startPoint.y, startPoint.x + length, startPoint.y + progressHeight - 1);
break;
}
case Direction::DIR_RIGHT_TO_LEFT: {
length = GetCurrentPos(progressWidth - 1);
coords.SetRect(startPoint.x + progressWidth - 1 - length,
startPoint.y, startPoint.x + progressWidth - 1, startPoint.y + progressHeight - 1);
break;
}
case Direction::DIR_TOP_TO_BOTTOM: {
length = GetCurrentPos(progressHeight - 1);
coords.SetRect(startPoint.x, startPoint.y, startPoint.x + progressWidth - 1, startPoint.y + length);
break;
}
case Direction::DIR_BOTTOM_TO_TOP: {
length = GetCurrentPos(progressHeight - 1);
coords.SetRect(startPoint.x, startPoint.y + progressHeight - 1 - length,
startPoint.x + progressWidth - 1, startPoint.y + progressHeight - 1);
break;
}
default: {
GRAPHIC_LOGE("UIBoxProgress: DrawForeground direction Err!\n");
return;
}
}
DrawValidRect(gfxDstBuffer, foregroundImage_, coords, invalidatedArea, *foregroundStyle_, radius);
}
void UIBoxProgress::OnDraw(BufferInfo& gfxDstBuffer, const Rect& invalidatedArea)
{
UIView::OnDraw(gfxDstBuffer, invalidatedArea);
if (enableBackground_) {
DrawBackground(gfxDstBuffer, invalidatedArea);
}
if ((lastValue_ - rangeMin_ != 0) || (foregroundStyle_->lineCap_ == CapType::CAP_ROUND)) {
Rect coords;
DrawForeground(gfxDstBuffer, invalidatedArea, coords);
}
}
} // namespace OHOS
|
/// Create owned surface from vector and sizes.
pub fn from_vec(height: usize, width: usize, data: Vec<T>) -> Self {
assert_eq!(height * width, data.len());
let shape = Shape {
row_stride: width,
col_stride: 1,
height,
width,
start: 0,
end: data.len(),
};
Self { shape, data }
} |
import numpy as np
import pytest
from pandas import (
NA,
Series,
)
import pandas._testing as tm
@pytest.mark.parametrize("dtype", ["int64", "float64"])
def test_to_numpy_na_value(dtype):
# GH#48951
ser = Series([1, 2, NA, 4])
result = ser.to_numpy(dtype=dtype, na_value=0)
expected = np.array([1, 2, 0, 4], dtype=dtype)
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_cast_before_setting_na():
# GH#50600
ser = Series([1])
result = ser.to_numpy(dtype=np.float64, na_value=np.nan)
expected = np.array([1.0])
tm.assert_numpy_array_equal(result, expected)
|
/**
* Evaluates CSS properties of DOM tree
*
* @param doc Document tree
* @param media Media
* @param inherit Use inheritance
* @return Map where each element contains its CSS properties
*/
public StyleMap evaluateDOM(XmlDocument doc, MediaSpec media, final boolean inherit) {
DeclarationMap declarations = assingDeclarationsToDOM(doc, media, inherit);
StyleMap nodes = new StyleMap(declarations.size());
Traversal<StyleMap> traversal = new Traversal<StyleMap>(doc, (Object) declarations) {
@Override
protected void processElement(StyleMap result, PsiElement current, Object source) {
NodeData main = CSSFactory.createNodeData();
List<Declaration> declarations = ((DeclarationMap) source).get(current, null);
if (declarations != null) {
for (Declaration d : declarations) {
main.push(d);
}
if (inherit)
main.inheritFrom(result.get(walker.parentElement(), null));
}
result.put(current, null, main.concretize());
for (PseudoDeclaration pseudo : ((DeclarationMap) source).pseudoSet(current)) {
NodeData pdata = CSSFactory.createNodeData();
declarations = ((DeclarationMap) source).get(current, pseudo);
if (declarations != null) {
for (Declaration d : declarations) {
pdata.push(d);
}
pdata.inheritFrom(main);
}
result.put(current, pseudo, pdata.concretize());
}
}
};
traversal.levelTraversal(nodes);
return nodes;
} |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-11 05:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('securedpi_locks', '0004_auto_20161010_2142'),
]
operations = [
migrations.RenameField(
model_name='lock',
old_name='is_locked',
new_name='facial_recognition',
),
migrations.AddField(
model_name='lock',
name='is_active',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='lock',
name='status',
field=models.CharField(choices=[('locked', 'locked'), ('unlocked', 'unlocked'), ('pending', 'pending')], default='unlocked', max_length=8),
),
migrations.AlterField(
model_name='lock',
name='description',
field=models.CharField(blank=True, max_length=25),
),
migrations.AlterField(
model_name='lock',
name='location',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='lock',
name='raspberry_pi_id',
field=models.CharField(max_length=20),
),
migrations.AlterField(
model_name='lock',
name='title',
field=models.CharField(max_length=15),
),
migrations.AlterField(
model_name='lock',
name='web_cam_id',
field=models.CharField(blank=True, max_length=20),
),
]
|
n = int(input())
a = int(input())
b = int(input())
c = int(input())
d = int(input())
e = int(input())
if n%a == 0:
train = n // a
else:
train = 1 + (n // a)
if n%b == 0:
bus = (n // b)
else:
bus = 1 + (n // b)
if n%c == 0:
taxi = (n // c)
else:
taxi = 1 + (n // c)
if n%d == 0:
plane = (n // d)
else:
plane = 1 + (n // d)
if n%e == 0:
boat = (n // e)
else:
boat = 1 + (n // e)
vehicles = [train, bus, taxi, plane, boat]
latest = train
for i in range(len(vehicles)):
if latest < vehicles[i]:
latest = vehicles[i]
if latest == train:
an = train + 4
elif latest == bus:
an = bus + 4
elif latest == taxi:
an = taxi + 4
elif latest == plane:
an = plane+ 4
else:
an = boat + 4
print(an) |
<reponame>jobmission/ratelimiter-spring-boot-starter<filename>src/main/java/com/revengemission/commons/ratelimiter/RateLimiterAutoConfiguration.java
package com.revengemission.commons.ratelimiter;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.AutoConfigureAfter;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.data.redis.RedisAutoConfiguration;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.connection.RedisStandaloneConfiguration;
import org.springframework.data.redis.connection.lettuce.LettuceConnectionFactory;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.serializer.GenericJackson2JsonRedisSerializer;
import org.springframework.data.redis.serializer.StringRedisSerializer;
import java.io.Serializable;
/**
* @author wzhang
*/
@Configuration
@EnableConfigurationProperties(RateLimiterProperties.class)
@AutoConfigureAfter(RedisAutoConfiguration.class)
public class RateLimiterAutoConfiguration {
@Autowired
private RateLimiterProperties properties;
@Bean
@ConditionalOnMissingBean
public LettuceConnectionFactory redisConnectionFactory() {
RedisStandaloneConfiguration redisStandaloneConfiguration = new RedisStandaloneConfiguration(properties.getHost(), properties.getPort());
redisStandaloneConfiguration.setPassword(properties.getPassword());
return new LettuceConnectionFactory(redisStandaloneConfiguration);
}
@Bean
@ConditionalOnMissingBean(name = "intRedisTemplate")
public RedisTemplate<String, Serializable> intRedisTemplate(RedisConnectionFactory redisConnectionFactory) {
RedisTemplate<String, Serializable> template = new RedisTemplate<>();
template.setKeySerializer(new StringRedisSerializer());
template.setValueSerializer(new GenericJackson2JsonRedisSerializer());
template.setConnectionFactory(redisConnectionFactory);
return template;
}
@Bean
RateLimiterAop rateLimiterAop(RedisTemplate<String, Serializable> intRedisTemplate) {
return new RateLimiterAop(intRedisTemplate);
}
}
|
It was a tense, emotional afternoon at the Michigan Board of Education.
In the two months since the board put out a draft proposal for how schools could choose to support LGBT kids, it’s become a major controversy.
That’s mostly because the suggestions include letting transgender students use the bathrooms that fit their gender identity – not just the single-stall staff bathrooms.
And some parents feel like the board is trying to cut them out of the conversation, because of wording that’s intended to protect the privacy and safety of students whose parents might not support their gender identity:
Transgender and GNC students have the right to decide when, with whom, and to what extent to share private information. When contacting the parent/guardian of a transgender or GNC student, school staff should use the student’s legal name and the pronoun corresponding to the student’s assigned sex at birth, unless the student or parent/guardian has specified otherwise.
Students share personal experiences with depression, suicidal thoughts
More than a dozen LGBT students turned out to Tuesday’s meeting, where they spoke about the need for policies like the ones the board is suggesting.
“My name is Aiden Ramirez-Tatum. I’m an 18-year-old high school senior at Greenville High School. I’m a trumpet player, a club leader, and an AP student. Four years ago, I wanted to die,” he said, adding that he was bullied relentlessly in middle school at a different school district.
“I was 14 and had just finished a year of school online, an option I chose after facing three years of consistent bullying at my middle school, because I was openly bisexual and gender non-conforming. At the time, I believed what many people still do: that this kind of anguish was simply a symptom of my identity. I was transgender, and I would always suffer.”
Ramirez-Tatum says he transferred to a charter school sophomore year, where an assistant principal told him it was “interesting and fascinating that I was transgender, because she was a scientist.”
After transferring yet again, Ramirez-Tatum says he found a school that accepted him.
“They enacted many of the practices outlined in the guidance the school board has presented, and I had a future again. I realized my pain was not me, it was a side effect of hatred, not who I was.”
His father, James Tatum, also spoke to the board during the public comment section of the meeting.
“I have seen what ignorance and discrimination did to my son. When Aiden first came out to my wife and I, and then the community, the school district we lived in had no guidelines for transgender youth. And bullying by students, and the administration, caused Aiden to live in fear. He withdrew from this abusive situation,” he said.
Tatum says after they transferred Aiden to Greenville, “I’ve seen my son go from a withdrawn, bullied teen, to the top of his class,” his voice catching and tears welling up in his eyes.
“When a child feels safe, they remove mental blocks they put up to protect themselves, and learning becomes easier. I don’t have an education degree, but I stand up here as a father, a father of a very proud son, asking that you not bow to the backlash, and do what is right for these children.”
Republican lawmakers, some parents say the proposals are dangerous
State Senator Patrick Colbeck told the board he believes their intentions are good, but the proposals they’ve suggested are misguided.
“Unfortunately, the policies that are being promoted actually…promote behaviors that are not supportive of safety, not supportive of good health; not only for the students that choose that, but also for all the other students that are in that environment," Colbeck said.
“You know, it’s not bullying or peer pressure that leads to a higher incidence of AIDS in the LGBT community than the normal population,” Colbeck said.
“During elementary and middle school, I was bullied incessantly. I haven’t seen the State Board of Ed propose a policy for kids that get good grades and carry a trombone, right?”
But Jane Lach of Howell told the board that growing up as a transgender student in the 60s, the bullying she experienced was likely on a different level than Colbeck’s.
“I can assure you, that when he carries his trombone home from school, nobody lied in wait for him, to see if they could catch him and beat him senseless for playing the trombone,” she said.
Several commentors spoke to the board about their concerns as parents, that the privacy section of the proposals would shut them out of the conversation.
And one mom said she worries about her daughters’ safety if the proposals are enacted.
“Creating an open door policy to restrooms at elementary, middle school and high school age is going to create problems that I know we all have in our heads,” Jennifer Schlosser of Mason told the board.
“I don’t want to discourage anyone from creating good policy, and I think that this policy is on the right track. My question is, can we revise the bathroom policy, and create something that everyone can handle?
The public comment period will run through May 11th. Already, more than 8,000 comments have been submitted to the board online. Still, the board says they currently have no plans to vote on the proposals anytime soon. |
Changes in cyclic AMP level of rat thyroid by acute and chronic stimulation of thyrotropin in vivo.
To assess a possible postmortem change in the level of cyclic AMP, the thyroids were snap-frozen at various time intervals after removal. Rats were fed a low-iodine diet (LID) with PTU for 2 weeks and a week after PTU discontinuation (PTU withdrawal). In all cases, the cyclic AMP level tended to increase as time elapsed from removing till fixing the thyroids, but in the PTU withdrawal group, the level was rapidly increased 2-fold after 5 min. In an acute experiment, the thyroids were removed under anesthesia and frozen rapidly. Intravenous administration of ovine thyrotropin (250 mU) and TRH (500 ng) brought about a rapid increase in the thyroidal level of cyclic AMP to 40% to 20% over the control level. Two weeks after PTU treatment, circulating thyrotropin was increased to a maximum of 19-fold and a further enhancement ("rebound") was observed after PTU withdrawal. PTU treatment led to an increase in the thyroidal level of cyclis AMP pre mug DNA to 60% over the control value. Following PTU withdrawal, the rise in the level of cyclic AMP returned to the normal level. However, there was no change in the concentration when it was expressed as per mg wet tissue weight. Therefore, the increase in the thyroidal concentration of cyclic AMP per mug of DNA may be due to an increase in volume of the follicular cells. |
def isoverlap(self, point):
for ship in self.content:
if (point in ship.ext) or (point in ship.buffer):
return True
return False |
import { Injectable } from '@angular/core';
import { Cliente } from './cliente.model';
import { Subject } from 'rxjs';
import { HttpClient } from '@angular/common/http';
import { map } from 'rxjs/operators';
import { stringify } from '@angular/compiler/src/util';
import { Router } from '@angular/router';
@Injectable({ providedIn: 'root' })
export class ClienteService {
private clientes: Cliente[] = [];
private listaClientesAtualizada = new Subject<{
clientes: Cliente[];
maxClientes: number;
}>();
constructor(private httpClient: HttpClient, private router: Router) {}
getCliente(idCliente: string) {
//return {...this.clientes.find((cli) => cli.id === idCliente)};
return this.httpClient.get<{
_id: string;
nome: string;
fone: string;
email: string;
imagemURL: string;
criador: string;
}>(`http://localhost:3000/api/clientes/${idCliente}`);
}
getClientes(pagesize: number, page: number): void {
const parametros = `?pagesize=${pagesize}&page=${page}`;
this.httpClient
.get<{ mensagem: string; clientes: any; maxClientes: number }>(
'http://localhost:3000/api/clientes' + parametros
)
.pipe(
map((dados) => {
return {
clientes: dados.clientes.map((cliente) => {
return {
id: cliente._id,
nome: cliente.nome,
fone: cliente.fone,
email: cliente.email,
imagemURL: cliente.imagemURL,
criador: cliente.criador,
};
}),
maxClientes: dados.maxClientes,
};
})
)
.subscribe((dados) => {
console.log(dados.clientes);
this.clientes = dados.clientes;
this.listaClientesAtualizada.next({
clientes: [...this.clientes],
maxClientes: dados.maxClientes,
});
});
}
adicionarCliente(nome: string, fone: string, email: string, imagem: File) {
const dadosCliente = new FormData();
dadosCliente.append('nome', nome);
dadosCliente.append('fone', fone);
dadosCliente.append('email', email);
dadosCliente.append('imagem', imagem);
this.httpClient
.post<{ mensagem: string; cliente: Cliente }>(
'http://localhost:3000/api/clientes',
dadosCliente
)
.subscribe((dados) => {
this.router.navigate(['/']);
});
}
removerCliente(id: string) {
return this.httpClient.delete(`http://localhost:3000/api/clientes/${id}`);
}
getListaDeClientesAtualizadaObservable() {
return this.listaClientesAtualizada.asObservable();
}
atualizarCliente(
id: string,
nome: string,
fone: string,
email: string,
imagem: File | string
) {
//const cliente: Cliente = { id, nome, fone, email, imagemURL: null};
let clienteData: Cliente | FormData;
if (typeof imagem === 'object') {
// é um arquivo, montar um form data
clienteData = new FormData();
clienteData.append('id', id);
clienteData.append('nome', nome);
clienteData.append('fone', fone);
clienteData.append('email', email);
clienteData.append('imagem', imagem, nome); //chave, foto e nome para o arquivo
} else {
//enviar JSON comum
clienteData = {
id: id,
nome: nome,
fone: fone,
email: email,
imagemURL: imagem,
criador: null,
};
}
console.log(typeof clienteData);
this.httpClient
.put(`http://localhost:3000/api/clientes/${id}`, clienteData)
.subscribe((res) => {
this.router.navigate(['/']);
});
}
}
|
<reponame>lucuma/moar
# coding=utf-8
from hashlib import sha1
import os
from moar import Storage
from .utils import RES_PATH, get_impath, get_raw_data
BASE_URL = 'http://example.com'
def get_random_key():
return sha1(os.urandom(12)).hexdigest()
def test_get_thumbsdir():
name = 'qwertyuiop'
s = Storage(RES_PATH, BASE_URL)
assert s.get_thumbsdir(name) == 't'
s = Storage(RES_PATH, BASE_URL, thumbsdir='thumbs')
assert s.get_thumbsdir(name) == 'thumbs'
s = Storage(RES_PATH, BASE_URL, thumbsdir=lambda n: n[:3])
assert s.get_thumbsdir(name) == name[:3]
def test_save():
s = Storage(RES_PATH, BASE_URL)
path = 'a200x140.png'
name, _ = os.path.splitext(os.path.basename(path))
key = get_random_key()
data = get_raw_data(get_impath(path))
thumb = s.save(path, key, 'png', data)
assert thumb.url == '/'.join([BASE_URL, 't', name + '.' + key + '.png'])
assert thumb.key == key
def test_get_nn_thumb():
s = Storage(RES_PATH, BASE_URL)
path = 'a200x140.png'
key = get_random_key()
thumb = s.get_thumb(path, key, 'jpeg')
assert not thumb
def test_get_saved_thumb():
s = Storage(RES_PATH, BASE_URL)
path = 'a200x140.png'
key = get_random_key()
data = get_raw_data(get_impath(path))
thumb = s.save(path, key, 'jpeg', data)
thumb2 = s.get_thumb(path, key, 'jpeg')
assert thumb.url == thumb2.url
|
Classification of Bicycle Traffic Patterns in Five North American Cities
This study used a unique database of long-term bicycle counts from 38 locations in five North American cities and along the Route Verte in Quebec, Canada, to analyze bicycle ridership patterns. The cities in the study were Montreal, Quebec; Ottawa, Ontario; and Vancouver, British Columbia, in Canada and Portland, Oregon, and San Francisco, California, in the United States. Count data showed that the bicycle volume patterns at each location could be classified as utilitarian, mixed utilitarian, mixed recreational, and recreational. Study locations classified by these categories were found to have consistent hourly and weekly traffic patterns across cities, despite considerable differences between the cities in their weather, size, and urban form. Seasonal patterns across the four categories and in the cities also were identified. Expansion factors for each classification are presented by hour and day of the week. Monthly expansion factors are presented for each city. Finally, traffic volume characteristics are presented for comparison purposes. |
package com.suntiago.dblibDemo;
import android.content.Context;
import com.suntiago.lockpattern.PatternManager;
import com.suntiago.sloth.SlothApplication;
import com.suntiago.sloth.account.AccountManager;
import com.suntiago.sloth.utils.FileUtils;
import com.suntiago.sloth.utils.file.StorageManagerHelper;
import com.suntiago.sloth.utils.log.CrashHandler;
import com.suntiago.sloth.utils.log.Slog;
/**
* Created by Jeremy on 2018/11/20.
*/
public class DemoApp extends SlothApplication {
static final String COM = "suntiago";
static final String appNAme = "demo";
@Override
public void onCreate() {
super.onCreate();
Context ct = this;
StorageManagerHelper.getStorageHelper().initPath(COM, appNAme);
FileUtils.initPath(COM, appNAme);
AccountManager.init(ct);
PatternManager.init(ct);
Slog.init(ct);
Slog.setDebug(true, true);
Slog.enableSaveLog(false);
CrashHandler crashHandler = CrashHandler.getInstance();
crashHandler.init(getApplicationContext());
/*com.suntiago.network.network.utils.Slog.setDebug(logEnable, logEnable);
com.suntiago.network.network.utils.Slog.setLogCallback(
new com.suntiago.network.network.utils.Slog.ILog() {
@Override
public void i(String tag, String msg) {
Slog.i(tag, msg);
}
@Override
public void v(String tag, String msg) {
Slog.v(tag, msg);
}
@Override
public void d(String tag, String msg) {
Slog.d(tag, msg);
}
@Override
public void e(String tag, String msg) {
Slog.e(tag, msg);
}
@Override
public void state(String packName, String state) {
Slog.state(packName, state);
}
});*/
}
}
|
/**
* Parses an address from JSON format into a Location object
* @param json JSON representation of the address
* @return A Location object representing the given address
*/
public static Location parseLocationFromJson(JSONObject json) {
Location address = new Builder()
.inCountry(json.getAsString("country"))
.inCity(json.getAsString("city"))
.inRegion(json.getAsString("region"))
.onStreet(json.getAsString("streetName"))
.atStreetNumber(json.getAsString("streetNumber"))
.withPostCode(json.getAsString("postcode"))
.atDistrict(json.getAsString("district"))
.inSuburb("suburb")
.build();
return address;
} |
A computer chip designed to mimic the performance of the human brain has hit a major new milestone.
The chip, developed by IBM, and Cornell Tech and iniLabs, is a significant step up from its performance just over two years ago when the project was first announced.
The SyNAPSE chip, which stands for Systems of Neuromorphic Adaptive Plastic Scalable Electronics, is now capable of 1 million programmable neurons, 256 million programmable synapses and 46 billion synaptic operations per second, per watt. This advancement points the way toward a future of faster, cooler and more compact cognitive computing in a wide range of scenarios.
See also: 8 Ways Tech Has Completely Rewired Our Brains
However, the development, which is funded by the Defense Advanced Research Projects Agency (DARPA), is still currently limited to the lab.
Referred to as "cognitive computing" because of a dynamic that attempts to mimic the interactions of neurons and synapses in biological brains, IBM uses what it terms as advanced algorithms and silicon circuitry to allow for more organic problem solving based on hypotheses, past experiences and trail and error — just like a human brain.
"These brain-inspired chips could transform mobility, via sensory and intelligent applications that can fit in the palm of your hand but without the need for Wi-Fi," said Dr. Dharmendra S. Modha, chief scientist at IBM Research's Brain-Inspired Computing unit.
Some of the future applications the researchers envision the SyNAPSE chip facilitating include solar-powered, leaf-shaped sensor modules that could send out environmental and forest fire alerts as well as assistive glasses that would be able to guide the visually impaired wearer without the need of a Wi-Fi connection.
Concept drawings of solar-powered sensors designed to detect changes in the environment. Image: IBM Research
An earlier prototype of the neurosynaptic core project was revealed in 2011. At the time, the chip boasted only 256 neurons. According to the research team, the current chip, built on Samsung’s 28nm process technology, is roughly the size of a postage stamp and can run on a small amount of energy, about equivalent to what it takes to power a hearing aid.
"It is an astonishing achievement to leverage a process traditionally used for commercially available, low-power mobile devices to deliver a chip that emulates the human brain by processing extreme amounts of sensory information with very little power," said Shawn Han, vice president of Samsung's foundry marketing. "This is a huge architectural breakthrough that is essential as the industry moves toward the next-generation cloud and big-data processing." |
Addressing the looming identity crisis in single cell RNA-seq
Single cell RNA-sequencing technology (scRNA-seq) provides a new avenue to discover and characterize cell types, but the experiment-specific technical biases and analytic variability inherent to current pipelines may undermine the replicability of these studies. Meta-analysis of rapidly accumulating data is further hampered by the use of ad hoc naming conventions. Here we demonstrate our replication framework, MetaNeighbor, that allows researchers to quantify the degree to which cell types replicate across datasets, and to rapidly identify clusters with high similarity for further testing. We first measure the replicability of neuronal identity by comparing more than 13 thousand individual scRNA-seq transcriptomes, then assess cross-dataset evidence for novel pyramidal neuron and cortical interneuron subtypes identified by scRNA-seq. We find that 24/45 cortical interneuron subtypes and 10/48 pyramidal neuron subtypes have evidence of replication in at least one other study. Identifying these putative replicates allows us to re-analyze the data for differential expression and provide lists of robust candidate marker genes. Across tasks we find that large sets of variably expressed genes can identify replicable cell types and subtypes with high accuracy, indicating many of the transcriptional changes characterizing cell identity are pervasive and easily detected.
Introduction 23
Single cell RNA-sequencing (scRNA-seq) has emerged as an important new technology 24 enabling the dissection of heterogeneous biological systems into ever more refined cellular 25 components. One popular application of the technology has been to try to define novel cell 26 subtypes within a given tissue or within an already refined cell class, as in the lung (Treutlein et In order to answer this, we turned to the issue of cell diversity in the brain, a prime target of 40 scRNA-seq as neuron diversity is critical for construction of the intricate, exquisite circuits 41 underlying brain function. The heterogeneity of brain tissue makes it particularly important that 42 results be assessed for replicability, while its popularity as a target of study makes this goal 43 particularly feasible. Because a primary aim of neuroscience has been to derive a taxonomy of 44 cell types (Ascoli et al., 2008), already more than twenty single cell RNA-seq experiments have 45 been performed using mouse nervous tissue (Poulin et al., 2016). Remarkable strides have 46 been made to address fundamental questions about the diversity of cells in the nervous system, 47 including efforts to describe the cellular composition of the cortex and hippocampus (Tasic et 48 al., 2016;Zeisel et al., 2015), to exhaustively discover the subtypes of bipolar neurons in the 49 retina (Shekhar et al., 2016), and to characterize similarities between human and mouse 50 midbrain development (La Manno et al., 2016). In spite of this wealth of data, there have been 51 few attempts to compare, validate and substantiate cell type transcriptional profiles across 52 scRNA-seq datasets, and no systematic or formal method has been developed for 53 accomplishing this task. 54 To address this gap in the field, we propose a simple, supervised framework, MetaNeighbor 55 (meta-analysis via neighbor voting), to assess how well cell type-specific transcriptional profiles 56 replicate across datasets. Our basic rationale is that if a cell type has a biological identity rooted 57 in the transcriptome then knowing its expression features in one dataset will allow us to find 58 cells of the same type in another dataset. We make use of the cell type labels supplied by data 59 providers, and assess the correspondence of cell types across datasets by taking the following 60 approach (see schematic, Figure 2) Next, we do cross-dataset validation: we hide all cell type labels ('identity') for one 66 dataset at a time. This dataset will be used as our test set. Cells from all other datasets 67 remain labeled, and are used as the training set. 68 3) Finally, we predict the cell type labels of the test set: we use a neighbor voting algorithm 69 to predict the identity of the held-out cells based on their similarity to the training data. 70 Conceptually, this resembles approaches for the validation of sample clustering (Dudoit et al.,71 2002; Kapp and Tibshirani, 2007) but it has been adapted to operate from within a supervised 72 learning framework. This permits both systematic scoring and carefully defined control 73 experiments to investigate the data features that drive high performance. Our implementation is 74 extremely fast and robust to technical differences between experiments; because prediction is 75 performed only within an individual dataset at a time, we are able to keep many aspects of 76 technical variation constant. This essentially controls for any dataset specific effects that would 77 otherwise swamp the subtler cell identity signal. The method provides a score that indicates the 78 degree to which a cell type replicates for each gene set that is tested. This means that 79 MetaNeighbor doubles as a low-tech 'feature selection tool' that we can use to identify the 80 transcriptional features that are most discriminative between cell types. By comparing the 81 scores returned from using Gene Ontology (GO) functions ("functional gene sets") or sets of 82 randomly chosen genes ("random gene sets"), we can determine whether co-expression of 83 specific gene sets is characteristic of particular cell types, and thus important for cell function or 84
identity. 85
We evaluate cell identity by taking sequential steps according to the basic taxonomy of brain 86 cells: first classifying neurons vs. non-neuronal cells across eight single cell RNA-seq studies, 87 then classifying cortical inhibitory neurons vs. excitatory neurons, and for our final step, we align 88 interneuron and pyramidal cell subtypes across three studies. Critically, we discover that that 89 almost any sufficiently large and highly variable set of genes can be used to distinguish between 90 cell types, suggesting that cell identity is widely represented within the transcriptome. 91 Furthermore, we find that cross-dataset analysis of pyramidal neurons results in broad definition 92 of cortical vs. hippocampal types, and find evidence for the replication of five layer-restricted 93 subtypes. In contrast, we find that cortical interneuron subtypes show clear lineage-specific 94 structure, and we readily identify 11 subtypes that replicate across datasets, including 95 Chandelier cells and five novel subtypes defined by transcriptional clustering in previous work. 96 Meta-analysis of differential expression across these highly replicable cortical interneuron 97 subtypes revealed evidence for canonical marker genes such as parvalbumin and somatostatin, 98 as well as new candidates which may be used for improved molecular genetic targeting, and to 99 understand the diverse phenotypes and functions of these cells. 100
Assessing neuronal identity with MetaNeighbor 101
We aimed to measure the replicability of cell identity across tasks of varying specificity. 102 Broadly, these are divided into tasks where we are recapitulating known cell identities, and ones 103 where are measuring the replicability of novel cell identities discovered in recent research. The 104 former class of task is the focus of this subsection: first, by assessing how well we could 105 distinguish neurons from non-neuronal cells ("task one"), and next assessing the discriminability 106 of excitatory and inhibitory neurons ("task two"). As detailed in the methods, MetaNeighbor 107 outputs a performance score for each gene set and task. This score is the mean area under the 108 receiver operator characteristic curve (AUROC) across all folds of cross-dataset validation, and 109 it can be interpreted as the probability that we will rank a positive higher than a negative (e.g. 110 neuron vs. non-neuronal cell, when using neurons as the positive label set) based on the 111 expression of a set of genes. This varies between 0 and 1, with 1 being perfect classification, 112 0.5 meaning that we have performed as well as if we had randomly guessed the cell's identity, 113 and 0.9 or above being extremely high. Comparison of scores across gene sets allows us to 114 discover their relative importance for defining cell identity. 115 As described above, in task one we assessed how well we could identify neurons and non-116 neuronal cells across eight datasets with a total of 13928 cells (Table S1). Although this was 117 designed to be fairly simple, we were surprised to find that AUROC scores were significantly 118 higher than chance for all gene sets tested, including all randomly chosen sets (AUROC all 119 sets =0.78 ± 0.1, Figure 2A). Reassuringly, a bootstrapped sampling of the datasets showed a 120 trend toward increased performance with the inclusion of additional training data, indicating that 121 we are recognizing an aggregate signal across datasets ( Figure S1). However, the significant 122 improvement of random sets over the null means that prior knowledge about gene function is 123 not required to differentiate between these cell classes. Randomly chosen sets of genes have 124 decidedly non-random expression patterns that enable discrimination between cell types. 125 Task two aimed to assess how well we could discriminate between cortical excitatory and 126 previous results, we saw that AUROC scores were significantly higher than chance 129 (AUROC=0.69 ± 0.1, Figure 2B), suggesting that transcriptional differences are likely to be 130 encoded in a large number of genes. 131 Consistent with the view that a large fraction of transcripts are useful for determining cell 132 identity, we found a positive dependency of AUROC scores on gene set size, regardless of 133 whether genes within the sets were randomly selected or shared some biological function 134 ( Figure 2B). This was further supported by a comparison of scores for task one using 100 sets 135 of either 100 or 800 randomly chosen genes. AUROC score distributions and means were 136 significantly different, with sets of 100 genes having lower scores but higher variability in 137 performance, whereas sets of 800 genes were more restricted in variance and gave higher 138 performance on average ( Figure 2C, AUROC 100 =0.80 ± 0.05, AUROC 800 =0.90 ± 0.03, p<2.2E-139 16, Wilcoxon rank sum test). The variability in performance observed while keeping set size 140 constant suggests that even in random sets, there are transcriptional features that contribute to 141 cell identity. We delved into this further by comparing AUROC scores across gene sets chosen 142 based on their mean expression as we have previously shown that this is a critical factor to 143 control for in evaluating single cell gene co-expression (Crow et al., 2016). We performed task 144 one again using expression-level based gene sets and found a strong positive relationship 145 between expression level and our ability to classify cells ( Figure 2D, r s =0.9). 146 These results provide evidence that MetaNeighbor can readily identify cells of the same type 147 across datasets, without relying on specific knowledge of marker genes. In these two examples, 148 all cells could be classified as one of two types, making this a binary classification task. We find 149 that a gene set's size and mean expression level are the key features that allow for cell type 150 discrimination in this setting. in one and 23 in the other), and the authors of the later paper compared their outcomes by 162 looking at the expression of a handful of marker genes, which yielded mixed results: a small 163 number of cell types seemed to have a direct match but for others the results were more 164 conflicting, with multiple types matching to one another, and others having no match at all. Here 165 we aimed to more quantitatively assess the similarity of their results, and compare them with our 166 own data which derives from phenotypically characterized sub-populations; i.e., not from 167 unsupervised expression clustering (see Table S2 for sample information). 168 MetaNeighbor relies on coordinated variation in expression level to detect cell identity, which 169 means that genes with high variability are particularly useful. Our preceding binary 170 classifications showed that genes with high mean expression were more likely to have variation 171 that allowed MetaNeighbor to learn cell identities. In the following analyses, we are examining 172 both rare and common cell types across datasets. In this case, the mean expression level of 173 marker genes should be a proxy for cell incidence: we can expect that the marker expression for 174 a more abundant type would have a higher mean expression. Since variance scales with 175 expression, the most highly variable genes in the dataset would likely only be discriminative for 176 the abundant type. Because we would like to be able to identify both abundant and rare cell 177 types, we select the genes with the highest variance at each mean expression level. 178 We identified 638 genes with high variability given their expression levels (detailed in Methods) 179 and these were used as a 'high variability gene set' to measure AUROC scores between each 180 pair of cells across datasets. When AUROCs were measured using all genes, we saw that 181 clustering was subject to strong lab-specific effects ( Figure S2). In contrast, the use of variable 182 genes reproduced the known subtype structure, with major branches for the three main 183 subtypes, Pv, Sst and Htr3a. 184 To examine how the previously identified interneuron subtypes are represented across the three 185 studies, we tested the similarity of each pair of subtypes both within and across datasets using 186 the high variability gene set. For each genetically-targeted interneuron type profiled by Paul et 187 al., we found at least one corresponding subtype from the other two studies, which were defined 188 by having a mean AUROC score across training/testing folds >0.95 ( Figure 3). This includes 189 Chandelier cells, a subtype that could not be definitively identified by either Tasic or Zeisel. 190 Using our reciprocal testing and training protocol we find that the Tasic_Pvalb Cpne5 subtype 191 are likely to be Chandelier cells (AUROC=0.99). In addition, expanding our criteria to include all 192 reciprocal best matches in addition to those with ID scores >0.95, we found correspondence 193 among five subtypes that were assessed only in the Tasic and Zeisel data, (Table S3), without requiring manual 204 gene curation. Because we quantify the similarity among types we can prioritize matches, and 205 use these as input to MetaNeighbor for further evaluation. 206 In the above, we identified overlaps using a single gene set. To assess cell identification more 207 broadly, we ran MetaNeighbor with these new across-dataset subtype labels, measuring 208 predictive validity across all gene sets in GO ( Figure 3A, far right). The distribution of AUROC 209 scores varied across subtypes but we found that the score from the high variability gene set was 210 representative of overall trends, with high performing groups showing higher mean AUROC 211 scores over many gene sets. As detailed in the previous section, we note that AUROC scores 212 are sensitive both to the number of training samples (n) and to underlying data features (e.g., 213 transcriptome complexity), which complicates direct comparison of ID score distributions. Both 214 the high mean AUROCs across all putative replicate subtypes (>0.6), and the similarity of 215 maximum performance suggest that distinctive gene co-expression can be observed in each 216 subtype (max AUROC=0.92 ± 0.04). As with previous tasks, we found little difference in average 217 AUROCs using functional gene sets compared to random sets (mean AUROC Random =0.67 ± 218 0.06, mean AUROC GO =0.68 ± 0.1). 219 These results indicate that highly variable gene sets can be used alongside pairwise testing and 220 training as a heuristic to identify replicable subtypes. 221
Investigating pyramidal neuron subtypes using MetaNeighbor 222
The heterogeneity of pyramidal neurons is undisputed, but the organizing principles are still 223 debated, with some suggesting that identity is discrete and modular (Habib et al., 2016;Zeisel 224 et al., 2015) and others purporting that identities are more likely to be described by expression 225 gradients or spectra (Cembrowski et al., 2016). With MetaNeighbor we are able to quantitatively 226 assess the degree to which pyramidal subtypes defined by scRNA-seq replicate across diverse 227 datasets. If cell types are discrete and modular, we would expect to see sharp differences, with 228 some types showing very strong similarity to one another, and strong dissimilarities to other 229
types. 230
To compare pyramidal neuron scRNA-seq datasets we permuted through all combinations of 231 subtypes as testing and training data based on a set of 743 genes with high variability given 232 their expression level (subtypes listed in Table S2). This was the same procedure that was used 233 for cortical interneurons and while there were similar numbers of subtypes in total, a smaller 234 fraction corresponded across datasets (10/48, ~21%) yielding five putative subtypes (Figure 235 3B). The AUROC score heatmap was generally less modular than the heatmap of interneuron 236 scores. The most prominent feature was that types from the hippocampus and cortex tended to 237 cluster separately from one another. Within each region-specific cluster some layer-or area-238 specific clustering was observed but it was not completely consistent. Particular discrepancy 239 was observed between the cortical layer 5 subtypes which showed more similar AUROC score 240 profiles to the hippocampal subtypes than to other deep layer types (Tasic L5b_Cdh13,241 L5_Chrna6, L5b_Tph). Note that these were also the same subtypes that Tasic et al. found no 242 match for in their marker gene analysis. We suggest that the inclusion of additional datasets 243 may help to resolve this inconsistency. 244 We assessed the five putative subtypes using MetaNeighbor. All subtypes were significantly 245 discernable compared to the null ( Figure 3B) and as with the interneuron subtypes, AUROC 246 scores from the high variability gene set were well correlated with mean performance across all 247 of GO (3888 gene sets). In line with previous tasks, we found that functional gene sets 248 performed equally to random gene sets (mean AUROC Random =0.71 ± 0.08, AUROC GO =0.70 ± 249 0.09). 250
Comparing gene set performance across tasks 251
Finally, we compared gene set results from the 11 replicate interneuron subtypes and the 5 252 pyramidal neuron subtypes. In agreement with our previous results, we found that the top 253 groups were all related to neuronal function, which is unsurprising given the large size of these 254 gene sets and their likelihood of expression and variation in these cells ( Figure 3C). AUROCs 255 were highly correlated across tasks (r~0.76), with slightly higher performance for identifying 256 interneuron types compared to pyramidal types ( Figure 3D). The linearity of the trend across all 257 scores suggests that fundamental data features, like mean expression level and set size, 258 underlie the differential discriminative value of gene sets. The high performance across many 259 sets (mean AUROC ~0.7) also supports the notion that cell identity is encoded promiscuously 260 across the transcriptome, and is not restricted to a small set of functionally important genes. 261
Identifying subtype specific genes 262
ScRNA-seq experiments often seek to define marker genes for novel subtypes. Though ideally 263 marker genes are perfectly discriminative with respect to all cells, in practice marker genes are 264 often contextual and defined relative to a particular out-group. Here we aimed to identify 265 possible marker genes that would allow discrimination among interneuron subtypes or 266 pyramidal neuron subtypes. For each of our identified replicate subtypes we generated a ranked 267 list of possible marker genes by performing one-tailed, non-parametric differential expression 268 analysis within each study for all subtypes (e.g., Int1 vs. all other interneurons in the Zeisel 269 study, Int2 vs. all interneurons, etc.) and combining p-values for replicated types using Fisher's 270 method (Table S4). Figure 4A shows the FDR adjusted p-values for the top candidates based 271 on fold change for the ten replicated interneuron subtypes with overlapping differential 272 expression patterns. Figure 4B shows the same for the two pyramidal neuron subtypes with 273 overlapping differential expression patterns. The majority of these genes have previously been 274 characterized as having some degree of subtype-or layer-specific expression, for example we 275 readily identify genes that were used for the Cre-driver lines in the Tasic and Paul studies (Sst, 276 Pvalb, Vip, Cck, Htr3a, Ctgf). Even though we filtered for genes with high fold changes, we see 277 that many genes are differentially expressed in more than one subtype. Notably, considerable We also identify some novel candidates, including Ptn, or pleiotrophin, which is significantly 283 more expressed in the three Nos1-expressing subtypes than in the others ( Figure 4B). It is thus 284 expected to be discriminative of Nos1-positive neurons compared to other interneuron types. 285 We validated Ptn expression with in situ hybridization and we show clear expression in neurons 286 that are positive for both Sst and Nos1 ( Figure 4C). Ptn is a growth factor, and we suggest that 287 its expression may be required for maintaining the long-range axonal connections that 288 characterize these cells. These cells are well described by current markers, however this 289 approach is likely to be of particular value for novel subtypes that lack markers, allowing 290 researchers to prioritize genes for follow-up by assessing robustness across multiple data 291
sources. 292
Discussion 293 Single-cell transcriptomics promises to have a revolutionary impact by enabling comprehensive 294 sampling of cellular heterogeneity; nowhere is this variability more profound than within the 295 brain, making it a particular focus of both single-cell transcriptomics and our own analysis into 296 its replicability. The substantial history of transcriptomic analysis and meta-analysis gives us 297 guidance about bottlenecks that will be critical to consider in order to characterize cellular 298 heterogeneity. The most prominent of these is laboratory-specific bias, likely deriving from the 299 adherence to a strict set of internal standards, which may filter for some classes of biological 300 signal (e.g., poly-A selection) or induce purely technical grouping (e.g., by sequencing depth). 301
Because of this, it is imperative to be able to align data across studies and determine what is 302
replicable. In this work, we have provided a formal means of determining replicable cell identity 303 by treating it as a quantitative prediction task. The essential premise of our method is that if a 304 cell type has a distinct transcriptional profile within a dataset, then an algorithm trained from that 305 data set will correctly identify the same type within an independent data set. 306 The currently available data allowed us to draw a number of conclusions. We validated the 307 discrete identity of eleven interneuron subtypes, and described replicate transcriptional profiles 308 to prioritize possible marker genes, including Ptn, a growth factor that is preferentially expressed 309 in Sst Chodl cells. We performed a similar assessment for pyramidal neurons but found less 310 correspondence among datasets, suggesting that additional data will be required to determine 311 whether there is evidence for discrete pyramidal neuron types. One major surprise of our 312 analysis is the degree of replicability in the current data. Our AUROC scores are exceptionally 313 high, particularly when considered in the context of the well-described technical confounds of 314 single-cell data. We suspect this reflects the fundamental nature of the biological problem we 315 are facing: discrete cell types can be identified by their transcriptional profiles, and the biological 316 clarity of the problem overcomes technical variation. 317 This is further suggested by our result that cell identity has promiscuous effects within 318 transcriptional data. While in-depth investigation of the most salient gene functions is required to 319 characterize cell types, to simply identify cell types is relatively straightforward. This is 320 necessarily a major factor in the apparent successes of unsupervised methods in determining 321 novel cell types and suggests that cell type identity is clearly defined by transcriptional profiles, sets show more correlated expression within than across types, and variation across types is 332 likely to be accounted for by simple important factors, like cell size. This is not to say that more 333 detailed characterization of cell types is not necessary: understanding the differences between 334 cells and how they work will require focused investigation into the precise molecular players that 335 are differentially utilized. However, we hope that this helps to demonstrate that the variations on 336 dimension reduction and clustering methods in single cell RNA-seq are 'working', inevitably by 337 taking advantage of this very clear signal. 338 In this work we opted to use the subtype or cluster labels provided by the original authors, in 339 essence to characterize both the underlying data as well as current analytic practices. However, 340 this has limitations where studies cluster to different levels of specificity. For example, the Tasic 341 paper defines multiple Parvalbumin subtypes but the Zeisel and Paul work do not. Our method 342 makes it extremely easy to identify highly overlapping types at the levels defined by each 343 author, facilitating downstream work to validate the sub-clusters through meta-analysis and at 344 the bench. Given the known noisiness of single-cell expression and the complex and 345 idiosyncratic character of approaches taken to assessing it, the degree of replicability that we 346 see is much higher than could have been expected were there not simple explanations for the 347 derived clusters from individual laboratories. Our work shows that with additional data, 348 comprehensive evaluation and replication is likely to be quantitatively straightforward, making it 349 possible to have high confidence in derived cell sub-types quite rapidly. As this additional data is 350 generated, our approach can provide consistent updates of the field-wide consensus. 351 The simplicity of our method makes it unlikely to be biased toward the exact cell identity tasks 352 assessed here. For example, because of the method's reliance on relative ranks, it is almost 353 entirely immune to normalization as a potential confound. On the one hand, this limits our 354 sensitivity to detect real signals of some type, but this cost is more than offset by the robustness 355 of signals identified. Its simplicity also means that it is scalable, and readily admits to the 356 incorporation of data from individual labs in their ongoing work. Ultimately we hope that by 357 defining what is replicable clearly, MetaNeighbor will allow future studies involving cell-cell 358 comparisons to build on a strong foundation toward a comprehensive delineation of cell types. 359
Animals, manual cell sorting and scRNA-seq 361
Mice were bred and cared for in accordance with animal husbandry protocols at Cold Spring 362 Harbor Laboratory, with access to food and water ad libitum and a 12 hour light-dark cycle. preparation kit (7-11 cycles of PCR). Libraries were size-selected with SPRISelect magnetic 379 beads (Agencourt) and sequenced with paired-end 101bp reads using an Illumina HiSeq. 380 PolyA-primed reads were mapped to the mouse reference genome (mm9) with Bowtie (v 381 0.12.7), while paired sequences were used for varietal tag counting. A custom python script was 382 used map and tally sequences with unique tags for each mRNA in each cell (Crow et al., 2016). 383 All data is available to download from GEO (accession GSE92522). 384
Public expression data 385
Data analysis was performed in R using custom scripts (github.com/maggiecrow/MetaNeighbor, 386 2016). Processed expression data tables were downloaded from GEO directly, then subset to 387 genes appearing on both Affymetrix GeneChip Mouse Gene 2.0 ST array (902119) and the 388 UCSC known gene list to generate a merged matrix containing all samples from each 389 experiment. The mean value was taken for all genes with more than one expression value 390 assigned. Where no gene name match could be found, a value of 0 was input. We considered 391 only samples that were explicitly labeled as single cells, and removed cells that expressed fewer 392 than 1000 genes with expression >0. Cell type labels were manually curated using sample 393 labels and metadata from GEO (see Tables S1 and S2). Merged data and metadata are linked 394 through our Github page. 395
Gene sets 396
Gene annotations were obtained from the GO Consortium 'goslim_generic' (August 2015). 397 These were filtered for terms appearing in the GO Consortium mouse annotations 398 'gene_association.mgi.gz' (December 2014) and for gene sets with between 20-1000 genes, 399 leaving 106 GO groups with 9221 associated genes. Random gene sets were generated by 400 randomly choosing genes with the same set size distribution as GO slim. Sets of high variance 401 genes were generated by binning data from each dataset into deciles based on expression 402 level, then making lists of the top 25% of the most variable genes for each decile, excluding the 403 most highly expressed bin. The high variance set was then defined as the intersect of the high 404 variance gene lists across the relevant datasets. 405
MetaNeighbor 406
All scripts, sample data and detailed directions to run MetaNeighbor in R can be found on our 407 Github page (github.com/maggiecrow/MetaNeighbor, 2016). 408 The input to MetaNeighbor is a set of genes, a data matrix and two sets of labels: one set for 409 labeling each experiment, and one set for labeling the cell types of interest. For each gene set, 410 the method generates a cell-cell similarity network by measuring the Spearman correlation 411 between all cells across the genes within the set, then ranking and standardizing the network so 412 that all values lie between 0 and 1. The use of rank correlations means that the method is 413 robust to any rank-preserving normalization (i.e., log2, TPM, RPKM). Ranking and standardizing 414 the networks ensures that distributions remain uniform across gene sets, and diminishes the 415 role outlier similarities can play since values are constrained. 416 The node degree of each cell is defined as the sum of the weights of all edges connected to it 417 (i.e., the sum of the standardized correlation coefficients between each cell and all others), and 418 this is used as the null predictor in the neighbor voting algorithm to standardize for a cell's 'hub-419 ness': cells that are generically linked to many cells are preferentially down-weighted, whereas 420 those with fewer connections are less penalized. For each cell type assessment, the neighbor 421 voting predictor produces a weighted matrix of predicted labels by performing matrix 422 multiplication between the network and the binary vector (0,1) indicating cell type membership, 423 then dividing each element by the null predictor (i.e., node degree). In other words, each cell is 424 given a score equal to the fraction of its neighbors, including itself, which are part of a given cell To test the dependency of results on the amount of training and testing data we repeated the 441 neuron vs. non-neuronal cell discrimination task after randomly selecting between two and 442 seven datasets ten times each. This was done for 21 representative gene sets. Means for each 443 gene set and each number of included datasets were plotted. 444
Identifying putative replicates 445
In cases where cell identity was undefined across datasets (i.e., cortical interneuron and 446 pyramidal subtypes) we treated each subtype label as a positive for each other subtype, and 447 assessed similarity over the high variance gene set described above. For example, Int1 from the 448 Zeisel dataset was used as the positive (training) set, and all other subtypes were considered 449 the test set in turn. Mean AUROCs from both testing and training folds are plotted in the 450 heatmap in Figure 3. A stringent cut-off of mean AUROC >0.95 and/or mutual best matches 451 across datasets identified putative replicated types for further assessment with our supervised 452 framework (detailed above). While lowering this threshold could increase the number of 453 subtypes with some match, we found that reciprocal top hits alone provided an upper bound on 454 the number of replicated types (i.e., lowering the thresholds did not allow for a higher number of 455 subtypes). New cell type labels encompassing these replicate types (e.g. a combined Sst-Chodl 456 label containing Int1 (Zeisel), Sst Chodl (Tasic) and Sst Nos1 (Paul)) were generated for 457 MetaNeighbor across random and GO sets, and for meta-analysis of differential expression. 458 While only reciprocal top-hits across laboratories were used to define novel cell-types, 459 conventional cross-validation within laboratories was performed to fill in AUROC scores across 460 labels contained within each lab. 461
Differential expression 462
For each cell type within a dataset (defined by the authors' original labeling), differential gene 463 expression was calculated using a one-sided Wilcoxon rank-sum test, comparing gene 464 expression within a given cell type to all other cells within the dataset (e.g., Zeisel_Int1 vs all 465 other Zeisel interneurons). Meta-analytic p-values were calculated for each putative replicated 466 type using Fisher's method (Fisher, 1925) then a multiple hypothesis test correction was 467 performed with the Benjamini-Hochberg method (Benjamini and Hochberg, 1995). Top 468 differentially expressed genes were those with an adjusted meta-analytic p-value <0.001 and 469 with log2 fold change >2 in each dataset. All differential expression data for putative replicated 470 subtypes can be found in Table S4. A and B, is plotted. There is a weak correlation between these cells. On the bottom left of the panel we see the correlation between cells A and C, which are strongly correlated. By taking the correlations between all pairs of cells we can build a cell network (right), where every node is a cell and the edges represent how similar each cell is to each other cell. C -The cell network that was generated in B can be extended to include data from multiple experiments (multiple datasets). The generation of this multi-dataset network is the first step of MetaNeighbor. D -The cross-validation and scoring scheme of MetaNeighbor is demonstrated in this panel. To assess cell type identity across experiments we use neighbor voting in cross-validation, systematically hiding the labels from one dataset at a time. Cells within the hidden dataset are predicted as similar to the cell types from other datasets, using a neighbor voting formalism. Whether these scores prioritize cells as the correct type within the dataset determines the performance, expressed as the AUROC. In other words, comparative assessment of cells occurs only within a dataset, but this is based only on training information from outside that dataset. This is then repeated for all gene sets of interest.
Figure 2 -Cell type identity is widely represented in the transcriptome
A & B -Distribution of AUROC scores from MetaNeighbor for discriminating neurons from non-neuronal cells ("task one", A) and for distinguishing excitatory vs. inhibitory neurons ("task two", B). GO scores are in black and random gene set scores are plotted in gray. Dashed grey lines indicate the null expectation for correctly guessing cell identity (AUROC=0.5). For both tasks, almost any gene set can be used to improve performance above the null, suggesting widespread encoding of cell identity across the transcriptome. C -Task one AUROC scores for each gene set are plotted with respect to the number of genes. A strong, positive relationship is observed between gene set size and AUROC score, regardless of whether genes were chosen randomly or based on shared functions. D -Distribution of AUROC scores for task one using 100 sets of 100 randomly chosen genes, or 800 randomly chosen genes. The mean AUROC score is significantly improved with the use of larger gene sets (mean 100 = 0.80 +/-0.05, mean 800 = 0.90 +/-0.03). E -Relationship between AUROC score and expression level. Task one was re-run using sets of genes chosen based on mean expression. A strong positive relationship was observed between expression level and performance (r s ~0.9).
Figure 3 -Cross-dataset analysis of interneuron and pyramidal neuron diversity
A -(Left) Heatmap of AUROC scores between interneuron subtypes based on the highly variable gene kernel. Dendrograms were generated by hierarchical clustering of Euclidean distances using average linkage. Row colors indicate data origin and column colors show marker expression. Clustering of AUROC score profiles recapitulates known cell type structure, with major branches representing the Pv, Sst and Htr3a lineages. (Middle) Table of reciprocal best matches and subtype pairs with scores >0.95. (Right) Boxplots of GO performance (3888 sets) for each replicated subtype, ordered by their AUROC score from the highly variable gene set. Subtypes are labeled with the names from Tasic et al. A positive relationship is observed between AUROC scores from the highly variable set and the average AUROC score for each subtype. Mean AUROCs are all greater than chance (0.5) suggesting robust cross-dataset replication across gene sets. B -(Left) Heatmap of AUROC scores between pyramidal subtypes based on the highly variable gene kernel, clustered as in A. Row colors indicate datasets and column colors show brain region, cortical layer or hippocampal area.
Clustering of AUROC score profiles shows a separation of cortical and hippocampal subtypes. (Middle) Table of reciprocal best matches. (Right) Boxplots of GO performance (3888 sets) for each replicated subtype, ordered by their AUROC score from the highly variable gene set. Subtypes are labeled by layer. A positive relationship is observed between ID scores from the highly variable set and the average AUROC for each subtype. C -The table shows the top GO terms that allow for cross-dataset subtype discrimination, listed by their mean AUROC across tasks. For both tasks, high scores are obtained for terms related to neuronal function. D -AUROC scores for each GO function are plotted, with pyramidal scores on the y-axis and interneuron scores on the x-axis. AUROCs are highly correlated across tasks (r s~0 .76), suggesting limited functional specificity. Subtypes are labeled by layer. B -Standardized Ptn expression is plotted across the three experiments, where each box represents an interneuron subtype. High, but variable expression is observed across the three Sst Chodl types. C -Fluorescent double in-situ of Ai14/tdTomato driven by Sst-Flp and Nos-Cre expression (green) and Ptn (red). Dotted box indicates the area shown in higher magnification on the right, arrowheads point to cells that express both transcripts. |
/* Calculates the distance between the center points to determine overlap */
bool Arena::IsColliding(
ArenaMobileEntity * const mobile_e,
ArenaEntity * const other_e) {
double delta_x = other_e->get_pose().x - mobile_e->get_pose().x;
double delta_y = other_e->get_pose().y - mobile_e->get_pose().y;
double distance_between = sqrt(delta_x*delta_x + delta_y*delta_y);
return
(distance_between <= (mobile_e->get_radius() + other_e->get_radius()));
} |
from sys import stdin,stdout
def main():
n=int(stdin.readline())
a=sorted(map(int,stdin.readline().split( )))
b=sorted(map(int,stdin.readline().split( )))
x,y=0,0
turn = 1
while a or b:
if turn:
if a:
if b:
if a[-1]>=b[-1]:
x+=a.pop()
else:
b.pop()
else:
x+=a.pop()
else:
if b:
b.pop()
else:
if b:
if a:
if b[-1]>=a[-1]:
y+=b.pop()
else:
a.pop()
else:
y+=b.pop()
else:
if a:
a.pop()
turn^=1
stdout.write("%d"%(x-y))
main()
|
Have you ever thought it would be fun to be a bull in a china shop?
If so, you might be interested in a new business that is scheduled to open this spring in St. Paul’s Hamline-Midway neighborhood.
Customers at the Break Room will be handed a baseball bat or a sledge hammer or a frying pan and then shown into a windowless room with some fragile objects: vases, lamps, dishes, electronic equipment.
Then you just start whaling away.
The “recreational destruction” venue is the brainchild of Minneapolis resident Theresa Purcell, who also created the Twin Cities’ long-running Trash Film Debauchery movie series and does special effects for the Soap Factory’s Haunted Basement.
“I’ve kind of always liked smashing things, even when I was a kid,” said Purcell, 32.
She said she has used a sledge hammer to deconstruct a television set.
“It was fun. It was a good release,” she said. “I figured other people would like to do the same thing.”
Purcell said she plans to market her business as a stress reliever. She said she might offer discounts to people who just stopped smoking. Or to new parents.
Want to break something after a break-up? This might be the place for you.
Purcell said even people who aren’t angry seem attracted to the idea. She said she’s got some inquiries about bachelor party events.
“It feels good and fun to smash things,” she said. “My grandma’s into this.”
Breaking stuff has a similar effect as exercising, Purcell said.
“It makes me feel better,” she said.
Purcell said she plans to open her “you buy it, you break it” business in late May or early June at a space in Can Can Wonderland, an artist-designed indoor mini-golf course being planned at a former can factory building at 755 N. Prior Ave.
She said she’s also planning a fund-raising event at the Soap Factory, the Minneapolis art exhibition space, on April 30.
Purcell said customers will have to be 18 or older and will be outfitted with face masks, overalls, gloves and shoe covers. There will be a selection of objects you can purchase to smash to smithereens.
A plate might cost $1 or an old chair $3. But if you want to re-create the laser printer beat down from the movie “Office Space,” that might costs $15 to $20.
Purcell said she expects customers would buy about five objects for a 20-minute whack session.
She said she’ll be sourcing unwanted glass, pottery and ceramic pieces from thrift stores. Obsolete electronics gear will come from Tech Dump, a recycling service also located in the Midway area.
Cleaning up and recycling of the broken fragments will be part of the service.
“Customers just get to go hog wild and smash things – we take care of the rest,” according to Purcell.
The destruction room will be equipped with high-definition cameras so you can buy pictures or videos of yourself bashing a keyboard to pieces, she said.
“We want people to have a nice keepsake if they want,” she said.
There will also be an audio system so you can program a soundtrack for breaking stuff. Want to smash plates to Smashing Pumpkins? You can do it.
Purcell said she’d eventually like to expand the business to include outdoor events to allow people to demolish bigger items like old cars. She’d also like to create a mobile break room inside a truck. It would park on a downtown street like a food trucks to allow office workers to blow off some steam during lunch hour.
Purcell isn’t the first to come up with business based on breaking stuff.
In recent years, similar venues have started up in Dallas, Toronto and Novi Sad, Serbia, with names like the Rage Room or the Anger Room.
Russell Chastain opened the Smash Shack with his wife about two years ago in his garage in Jacksonville, N.C.
“We offer a guilt-free smashing environment for parties, date nights, anger therapy, missed anniversaries, or just because. We would much rather people take out their anger here in a safe manner rather than in unhealthy ways and unsafe places,” according to the Smash Shack website. “Great therapy is writing and drawing on the plates and glasses before smashing!!!!!!!”
Chastain said a typical date night outing would involve smashing 30 items for $20.
Chastain said customers include people on team-building exercises, people who are undergoing marriage counseling, and people who have been sent by their therapists. He said about 80 percent of his customers are women. Chastain said he gets a call or an email nearly every day from someone else who is interested in opening up a similar business.
Shawn Baker, who in December started a smashing room company in Houston called Tantrums, said her customers have ranged from a chiropractor to bikers to stay-at-home moms. They pay $25 for five minutes, $35 for 10 minutes or $50 for 15 minutes to use lead pipes or golf clubs to tee off on dishes, mirrors, crock pots and toilets while listening to everything from heavy metal to classical music.
The idea that expressing hostility can have a cathartic or beneficial effect reaches back to thinkers ranging from Aristotle to Freud.
But according to a 2007 study, more recent research has shown that venting anger is at best ineffective and in some cases harmful.
“Expressing your anger doesn’t relieve any aggressive tendencies; if anything, venting makes it worse,” according to a study by researchers at the University of Arkansas, University of Michigan and Florida State University.
“The good feeling that follows venting anger is likely to reinforce venting and violence,” the study found.
One of the study’s authors, Brad Bushman, a psychology professor now at Ohio State University, admits that breaking stuff is fun.
“It’s an adrenaline rush. People enjoy doing it, especially if it’s relatively safe,” he said. But “just because something feels good, that doesn’t mean it’s effective or healthy or it works.”
Bushman’s study found that test subjects who were insulted in experiments and then allowed to vent their frustration by pounding nails or hitting a punching bag ended up more hostile and aggressive after the experience.
Bushman’s research suggests that expressing anger and aggression teaches people how to behave more aggressively.
“Aggression begets aggression. That’s the take home message the research shows,” said Jeffrey Lohr, a University of Arkansas psychologist.
Bushman and Lohr say that anger can be dissipated faster by breathing or relaxation exercises.
But Baker, the owner of Tantrums, said, “You don’t have to be frustrated or mad to come here. You come because it’s fun.”
“I’m not doing this as anger management,” Purcell said. “This is recreation. This is fun. There certainly is a release in smashing things.” |
def remove(dtype,name,raw):
cloud = m80.CloudData()
cloud.remove(
dtype,
name,
raw
) |
import { Injectable } from "@nestjs/common";
import { ConfigService } from "@nestjs/config";
@Injectable()
export class MailService {
constructor(
private configService: ConfigService,
) { }
async sendMail(email_address: string, body: string, subject: string) {
var AWS = require('aws-sdk');
AWS.config.update({ region: this.configService.get("AMAZON_REGION") });
var params = {
Destination: {
ToAddresses: [
email_address
]
},
Message: {
Body: {
Html: {
Charset: "UTF-8",
Data: `<u>${body}</u>`
}
},
Subject: {
Charset: 'UTF-8',
Data: subject
}
},
Source: "<EMAIL>",
};
var sendPromise = new AWS.SES({ apiVersion: '2010-12-01' }).sendEmail(params).promise();
sendPromise.then(
function (data) {
console.log(data.MessageId);
}).catch(
function (err) {
console.error(err, err.stack);
});
}
} |
////////////////////////////////////////////////////////////////////////////////
//
// File: ProcessInterpPtsToPts.cpp
//
// For more information, please see: http://www.nektar.info/
//
// The MIT License
//
// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
// Department of Aeronautics, Imperial College London (UK), and Scientific
// Computing and Imaging Institute, University of Utah (USA).
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
// Description: Interpolate field to a series of specified points.
//
////////////////////////////////////////////////////////////////////////////////
#include <iostream>
#include <string>
using namespace std;
#include <boost/core/ignore_unused.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/math/special_functions/fpclassify.hpp>
#include <FieldUtils/Interpolator.h>
#include <LibUtilities/BasicUtils/ParseUtils.h>
#include <LibUtilities/BasicUtils/Progressbar.hpp>
#include <LibUtilities/BasicUtils/SharedArray.hpp>
#include <LibUtilities/BasicUtils/PtsIO.h>
#include <LibUtilities/BasicUtils/CsvIO.h>
#include "ProcessInterpPtsToPts.h"
namespace Nektar
{
namespace FieldUtils
{
ModuleKey ProcessInterpPtsToPts::className =
GetModuleFactory().RegisterCreatorFunction(
ModuleKey(eProcessModule, "interpptstopts"),
ProcessInterpPtsToPts::create,
"Interpolates a set of points to another, requires fromfld and "
"fromxml to be defined, a line, plane or block of points can be "
"defined");
ProcessInterpPtsToPts::ProcessInterpPtsToPts(FieldSharedPtr f) : ProcessModule(f)
{
m_config["topts"] = ConfigOption(
false, "NotSet", "Pts file to which interpolate field");
m_config["line"] = ConfigOption(
false, "NotSet", "Specify a line of N points using "
"line=N,x0,y0,z0,z1,y1,z1");
m_config["plane"] = ConfigOption(
false, "NotSet", "Specify a plane of N1 x N2 points using "
"plane=N1,N2,x0,y0,z0,z1,y1,z1,x2,y2,z2,x3,y3,z3");
m_config["box"] = ConfigOption(
false, "NotSet", "Specify a rectangular box of N1 x N2 x N3 points "
"using a box of points limited by box="
"N1,N2,N3,xmin,xmax,ymin,ymax,zmin,zmax");
m_config["clamptolowervalue"] =
ConfigOption(false, "-10000000", "Lower bound for interpolation value");
m_config["clamptouppervalue"] =
ConfigOption(false, "10000000", "Upper bound for interpolation value");
m_config["defaultvalue"] =
ConfigOption(false, "0", "Default value if point is outside domain");
m_config["cp"] =
ConfigOption(false, "NotSet",
"Parameters p0 and q to determine pressure coefficients");
}
ProcessInterpPtsToPts::~ProcessInterpPtsToPts()
{
}
void ProcessInterpPtsToPts::Process(po::variables_map &vm)
{
ASSERTL0(m_f->m_fieldPts != LibUtilities::NullPtsField,
"Should have a PtsField for ProcessInterpPtsToPts.");
ASSERTL0(m_f->m_comm->GetSize() == 1,
"ProcessInterpPtsToPts not implemented in parallel.");
// Move m_f->m_fieldPts
LibUtilities::PtsFieldSharedPtr oldPts = m_f->m_fieldPts;
m_f->m_fieldPts = LibUtilities::NullPtsField;
// Create new fieldPts
CreateFieldPts(vm);
int nfields = m_f->m_variables.size();
for (int j = 0; j < nfields; ++j)
{
Array<OneD, NekDouble> newPts(m_f->m_fieldPts->GetNpoints());
m_f->m_fieldPts->AddField(newPts, m_f->m_variables[j]);
}
NekDouble clamp_low = m_config["clamptolowervalue"].as<NekDouble>();
NekDouble clamp_up = m_config["clamptouppervalue"].as<NekDouble>();
NekDouble def_value = m_config["defaultvalue"].as<NekDouble>();
InterpolatePtsToPts(oldPts, m_f->m_fieldPts, clamp_low,
clamp_up, def_value);
if (!boost::iequals(m_config["cp"].as<string>(), "NotSet"))
{
calcCp0();
}
}
void ProcessInterpPtsToPts::CreateFieldPts(po::variables_map &vm)
{
boost::ignore_unused(vm);
int rank = m_f->m_comm->GetRank();
int nprocs = m_f->m_comm->GetSize();
// Check for command line point specification
if (m_config["topts"].as<string>().compare("NotSet") != 0)
{
string inFile = m_config["topts"].as<string>();
if (boost::filesystem::path(inFile).extension() == ".pts")
{
LibUtilities::PtsIOSharedPtr ptsIO =
MemoryManager<LibUtilities::PtsIO>::AllocateSharedPtr(m_f->m_comm);
ptsIO->Import(inFile, m_f->m_fieldPts);
}
else if (boost::filesystem::path(inFile).extension() == ".csv")
{
LibUtilities::CsvIOSharedPtr csvIO =
MemoryManager<LibUtilities::CsvIO>::AllocateSharedPtr(m_f->m_comm);
csvIO->Import(inFile, m_f->m_fieldPts);
}
else
{
ASSERTL0(false, "unknown topts file type");
}
}
else if (m_config["line"].as<string>().compare("NotSet") != 0)
{
vector<NekDouble> values;
ASSERTL0(ParseUtils::GenerateVector(
m_config["line"].as<string>(), values),
"Failed to interpret line string");
ASSERTL0(values.size() > 2,
"line string should contain 2*Dim+1 values "
"N,x0,y0,z0,x1,y1,z1");
double tmp;
ASSERTL0(std::modf(values[0], &tmp) == 0.0, "N is not an integer");
ASSERTL0(values[0] > 1, "N is not a valid number");
int dim = (values.size() - 1) / 2;
int npts = values[0];
// Information for partitioning
int ptsPerProc = npts / nprocs;
int extraPts = (rank < nprocs - 1) ? 0: npts % nprocs;
int locPts = ptsPerProc + extraPts;
int start = rank * ptsPerProc;
int end = start + locPts;
Array<OneD, Array<OneD, NekDouble> > pts(dim);
Array<OneD, NekDouble> delta(dim);
for (int i = 0; i < dim; ++i)
{
pts[i] = Array<OneD, NekDouble>(locPts);
delta[i] = (values[dim + i + 1] - values[ i + 1]) / (npts - 1);
}
for (int i = 0, cntLoc = 0; i < npts; ++i)
{
if (i >= start && i < end)
{
for (int n = 0; n < dim; ++n)
{
pts[n][cntLoc] = values[n+1] + i * delta[n];
}
++cntLoc;
}
}
vector<size_t> ppe;
ppe.push_back(npts);
m_f->m_fieldPts =
MemoryManager<LibUtilities::PtsField>::AllocateSharedPtr(dim,
pts);
m_f->m_fieldPts->SetPtsType(LibUtilities::ePtsLine);
m_f->m_fieldPts->SetPointsPerEdge(ppe);
}
else if (m_config["plane"].as<string>().compare("NotSet") != 0)
{
vector<NekDouble> values;
ASSERTL0(ParseUtils::GenerateVector(
m_config["plane"].as<string>(), values),
"Failed to interpret plane string");
ASSERTL0(values.size() > 9,
"plane string should contain 4 Dim+2 values "
"N1,N2,x0,y0,z0,x1,y1,z1,x2,y2,z2,x3,y3,z3");
double tmp;
ASSERTL0(std::modf(values[0], &tmp) == 0.0, "N1 is not an integer");
ASSERTL0(std::modf(values[1], &tmp) == 0.0, "N2 is not an integer");
ASSERTL0(values[0] > 1, "N1 is not a valid number");
ASSERTL0(values[1] > 1, "N2 is not a valid number");
int dim = (values.size() - 2) / 4;
Array<OneD, int> npts(2);
npts[0] = values[0];
npts[1] = values[1];
int totpts = npts[0] * npts[1];
// Information for partitioning
int ptsPerProc = totpts / nprocs;
int extraPts = (rank < nprocs - 1) ? 0: totpts % nprocs;
int locPts = ptsPerProc + extraPts;
int start = rank * ptsPerProc;
int end = start + locPts;
Array<OneD, Array<OneD, NekDouble> > pts(dim);
Array<OneD, NekDouble> delta1(dim);
Array<OneD, NekDouble> delta2(dim);
for (int i = 0; i < dim; ++i)
{
pts[i] = Array<OneD, NekDouble>(locPts);
delta1[i] = (values[2+1*dim + i] - values[2+0*dim + i])/(npts[0]-1);
delta2[i] = (values[2+2*dim + i] - values[2+3*dim + i])/(npts[0]-1);
}
for (int j = 0, cnt = 0, cntLoc = 0; j < npts[1]; ++j)
{
for (int i = 0; i < npts[0]; ++i, ++cnt)
{
if (cnt >= start && cnt < end)
{
for (int n = 0; n < dim; ++n)
{
pts[n][cntLoc] =
(values[2+n] + i * delta1[n]) *
(1.0 - j / ((NekDouble)(npts[1]-1))) +
(values[2 + 3*dim + n] + i * delta2[n]) *
( j / ((NekDouble)(npts[1]-1)));
}
++cntLoc;
}
}
}
vector<size_t> ppe;
ppe.push_back(npts[0]);
ppe.push_back(npts[1]);
m_f->m_fieldPts =
MemoryManager<LibUtilities::PtsField>::AllocateSharedPtr(dim,
pts);
m_f->m_fieldPts->SetPtsType(LibUtilities::ePtsPlane);
m_f->m_fieldPts->SetPointsPerEdge(ppe);
}
else if (m_config["box"].as<string>().compare("NotSet") != 0)
{
vector<NekDouble> values;
ASSERTL0(ParseUtils::GenerateVector(
m_config["box"].as<string>(), values),
"Failed to interpret box string");
ASSERTL0(values.size() == 9,
"box string should contain 9 values "
"N1,N2,N3,xmin,xmax,ymin,ymax,zmin,zmax");
int dim = 3;
Array<OneD, int> npts(3);
npts[0] = values[0];
npts[1] = values[1];
npts[2] = values[2];
int totpts = npts[0]*npts[1]*npts[2];
Array<OneD, Array<OneD, NekDouble> > pts(dim);
Array<OneD, NekDouble> delta(dim);
// Information for partitioning
int ptsPerProc = totpts / nprocs;
int extraPts = (rank < nprocs - 1) ? 0: totpts % nprocs;
int locPts = ptsPerProc + extraPts;
int start = rank * ptsPerProc;
int end = start + locPts;
for (int i = 0; i < dim; ++i)
{
pts[i] = Array<OneD, NekDouble>(locPts);
delta[i] = (values[4 + 2*i] - values[3 + 2*i]) / (npts[i] - 1);
}
for (int k = 0, cnt = 0, cntLoc = 0; k < npts[2]; ++k)
{
for (int j = 0; j < npts[1]; ++j)
{
for (int i = 0; i < npts[0]; ++i, ++cnt)
{
if (cnt >= start && cnt < end)
{
pts[0][cntLoc] = values[3] + i * delta[0];
pts[1][cntLoc] = values[5] + j * delta[1];
pts[2][cntLoc] = values[7] + k * delta[2];
++cntLoc;
}
}
}
}
vector<size_t> ppe;
ppe.push_back(npts[0]);
ppe.push_back(npts[1]);
ppe.push_back(npts[2]);
m_f->m_fieldPts =
MemoryManager<LibUtilities::PtsField>::AllocateSharedPtr(dim,
pts);
m_f->m_fieldPts->SetPtsType(LibUtilities::ePtsBox);
m_f->m_fieldPts->SetPointsPerEdge(ppe);
vector<NekDouble> boxdim;
boxdim.assign(&values[3], &values[3] + 6);
m_f->m_fieldPts->SetBoxSize(boxdim);
}
else
{
ASSERTL0(false,
"ProcessInterpPtsToPts requires line, plane or box option.");
}
}
void ProcessInterpPtsToPts::InterpolatePtsToPts(
LibUtilities::PtsFieldSharedPtr &fromPts,
LibUtilities::PtsFieldSharedPtr &toPts,
NekDouble clamp_low,
NekDouble clamp_up,
NekDouble def_value)
{
boost::ignore_unused(def_value);
ASSERTL0(toPts->GetNFields() >= fromPts->GetNFields(),
"ptField has too few fields");
int nfields = fromPts->GetNFields();
Interpolator interp;
if (m_f->m_comm->GetRank() == 0)
{
interp.SetProgressCallback(&ProcessInterpPtsToPts::PrintProgressbar,
this);
}
interp.Interpolate(fromPts, toPts);
if (m_f->m_comm->GetRank() == 0)
{
cout << endl;
}
for (int f = 0; f < nfields; ++f)
{
for (int i = 0; i < toPts->GetNpoints(); ++i)
{
if (toPts->GetPointVal(f, i) > clamp_up)
{
toPts->SetPointVal(f, i, clamp_up);
}
else if (toPts->GetPointVal(f, i) < clamp_low)
{
toPts->SetPointVal(f, i, clamp_low);
}
}
}
}
void ProcessInterpPtsToPts::calcCp0()
{
LibUtilities::PtsFieldSharedPtr pts = m_f->m_fieldPts;
int dim = pts->GetDim();
int nq1 = pts->GetNpoints();
int r, f;
int pfield = -1;
NekDouble p0,qinv;
vector<int> velid;
vector<NekDouble> values;
ASSERTL0(ParseUtils::GenerateVector(
m_config["cp"].as<string>(),values),
"Failed to interpret cp string");
ASSERTL0(values.size() == 2,
"cp string should contain 2 values "
"p0 and q (=1/2 rho u^2)");
p0 = values[0];
qinv = 1.0/values[1];
for(int i = 0; i < pts->GetNFields(); ++i)
{
if(boost::iequals(pts->GetFieldName(i),"p"))
{
pfield = i;
}
if(boost::iequals(pts->GetFieldName(i),"u")||
boost::iequals(pts->GetFieldName(i),"v")||
boost::iequals(pts->GetFieldName(i),"w"))
{
velid.push_back(i);
}
}
if(pfield != -1)
{
if(!velid.size())
{
WARNINGL0(false,"Did not find velocity components for Cp0");
}
}
else
{
WARNINGL0(false,"Failed to find 'p' field to determine cp0");
}
// Allocate data storage
Array<OneD, Array< OneD, NekDouble> > data(2);
for (f = 0; f < 2; ++f)
{
data[f] = Array< OneD, NekDouble>(nq1, 0.0);
}
for (r = 0; r < nq1; r++)
{
if(pfield != -1) // calculate cp
{
data[0][r] = qinv*(pts->GetPointVal(dim + pfield, r) - p0);
if(velid.size()) // calculate cp0
{
NekDouble q = 0;
for(int i = 0; i < velid.size(); ++i)
{
q += 0.5*pts->GetPointVal(dim + velid[i], r)*
pts->GetPointVal(dim + velid[i], r);
}
data[1][r] = qinv*(pts->GetPointVal(dim + pfield, r)+q - p0);
}
}
}
if(pfield != -1)
{
pts->AddField(data[0], "Cp");
m_f->m_variables.push_back("Cp");
if(velid.size())
{
pts->AddField(data[1], "Cp0");
m_f->m_variables.push_back("Cp0");
}
}
}
void ProcessInterpPtsToPts::PrintProgressbar(const int position,
const int goal) const
{
LibUtilities::PrintProgressbar(position, goal, "Interpolating");
}
}
}
|
<reponame>vasumahesh1/azura
#pragma once
#include "D3D12Core.h"
namespace Azura {
namespace D3D12 {
class D3D12ScopedSampler
{
public:
void Create(const SamplerDesc& desc, const Log& log_D3D12RenderSystem);
const D3D12_SAMPLER_DESC& GetDesc() const;
private:
D3D12_SAMPLER_DESC m_desc{};
};
} // namespace D3D12
} // namespace Azura |
<reponame>JiayiPang/GKLDA<gh_stars>10-100
package knowledge;
import java.util.ArrayList;
import java.util.Iterator;
import nlp.Vocabulary;
import nlp.WordSet;
/**
* This class implements the must-set used in GK-LDA.
*/
public class MustSet implements Iterable<String> {
public WordSet wordset = null;
// public double weight = 1.0;
public MustSet() {
wordset = new WordSet();
}
public MustSet(ArrayList<String> wordstrList) {
wordset = new WordSet(wordstrList);
}
/**
* Construct a singleton must-set.
*/
public MustSet(String wordstr) {
ArrayList<String> wordstrList = new ArrayList<String>();
wordstrList.add(wordstr);
wordset = new WordSet(wordstrList);
}
/**
* Get a must-set from a line, e.g., {price, cheap, expensive}.
*/
public static MustSet getMustSetFromALine(String line, Vocabulary vocab) {
WordSet wordset = new WordSet();
line = line.replace("{", "");
line = line.replace("}", "");
String[] strSplits = line.split("[\\s,]");
for (String split : strSplits) {
String wordstr = split.trim();
if (vocab.containsWordstr(wordstr)) {
wordset.addWord(wordstr);
}
}
MustSet mustset = new MustSet();
mustset.wordset = wordset;
return mustset;
}
public String getWordstr(int index) {
return wordset.wordstrsList.get(index);
}
public int getWordIndex(String wordstr) {
return wordset.getWordIndex(wordstr);
}
public int size() {
return wordset.size();
}
@Override
public String toString() {
return "{" + wordset.toString() + "}";
}
@Override
public Iterator<String> iterator() {
return wordset.iterator();
}
}
|
/**
* crypto_akcipher_sign() - Invoke public key sign operation
*
* Function invokes the specific public key sign operation for a given
* public key algorithm
*
* @req: asymmetric key request
*
* Return: zero on success; error code in case of error
*/
static inline int crypto_akcipher_sign(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
struct crypto_alg *calg = tfm->base.__crt_alg;
int ret;
crypto_stats_get(calg);
ret = alg->sign(req);
crypto_stats_akcipher_sign(ret, calg);
return ret;
} |
<gh_stars>0
// package: services
// file: file/fileinfo.proto
import * as jspb from "google-protobuf";
import * as common_common_entity_pb from "../common/common_entity_pb";
export class FileInfo extends jspb.Message {
getIdentity(): string;
setIdentity(value: string): void;
getHash(): string;
setHash(value: string): void;
getUserIdentity(): number;
setUserIdentity(value: number): void;
getPath(): string;
setPath(value: string): void;
getName(): string;
setName(value: string): void;
getExt(): string;
setExt(value: string): void;
getSize(): number;
setSize(value: number): void;
getMime(): string;
setMime(value: string): void;
getDeleted(): boolean;
setDeleted(value: boolean): void;
getHidden(): boolean;
setHidden(value: boolean): void;
getLabel(): number;
setLabel(value: number): void;
getParent(): string;
setParent(value: string): void;
getType(): number;
setType(value: number): void;
getDirectory(): boolean;
setDirectory(value: boolean): void;
getAtime(): number;
setAtime(value: number): void;
getCtime(): number;
setCtime(value: number): void;
getMtime(): number;
setMtime(value: number): void;
getVersion(): number;
setVersion(value: number): void;
getLocking(): boolean;
setLocking(value: boolean): void;
getOp(): number;
setOp(value: number): void;
getPreview(): boolean;
setPreview(value: boolean): void;
getPreviewType(): number;
setPreviewType(value: number): void;
getFlag(): number;
setFlag(value: number): void;
getUniqueIdentity(): string;
setUniqueIdentity(value: string): void;
getShare(): boolean;
setShare(value: boolean): void;
getDownloadAddress(): string;
setDownloadAddress(value: string): void;
getUnlockTime(): number;
setUnlockTime(value: number): void;
getChildren(): number;
setChildren(value: number): void;
getChildrenTotal(): number;
setChildrenTotal(value: number): void;
serializeBinary(): Uint8Array;
toObject(includeInstance?: boolean): FileInfo.AsObject;
static toObject(includeInstance: boolean, msg: FileInfo): FileInfo.AsObject;
static extensions: {[key: number]: jspb.ExtensionFieldInfo<jspb.Message>};
static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo<jspb.Message>};
static serializeBinaryToWriter(message: FileInfo, writer: jspb.BinaryWriter): void;
static deserializeBinary(bytes: Uint8Array): FileInfo;
static deserializeBinaryFromReader(message: FileInfo, reader: jspb.BinaryReader): FileInfo;
}
export namespace FileInfo {
export type AsObject = {
identity: string,
hash: string,
userIdentity: number,
path: string,
name: string,
ext: string,
size: number,
mime: string,
deleted: boolean,
hidden: boolean,
label: number,
parent: string,
type: number,
directory: boolean,
atime: number,
ctime: number,
mtime: number,
version: number,
locking: boolean,
op: number,
preview: boolean,
previewType: number,
flag: number,
uniqueIdentity: string,
share: boolean,
downloadAddress: string,
unlockTime: number,
children: number,
childrenTotal: number,
}
}
export class FileInfoListResponse extends jspb.Message {
hasParent(): boolean;
clearParent(): void;
getParent(): FileInfo | undefined;
setParent(value?: FileInfo): void;
clearDataList(): void;
getDataList(): Array<FileInfo>;
setDataList(value: Array<FileInfo>): void;
addData(value?: FileInfo, index?: number): FileInfo;
serializeBinary(): Uint8Array;
toObject(includeInstance?: boolean): FileInfoListResponse.AsObject;
static toObject(includeInstance: boolean, msg: FileInfoListResponse): FileInfoListResponse.AsObject;
static extensions: {[key: number]: jspb.ExtensionFieldInfo<jspb.Message>};
static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo<jspb.Message>};
static serializeBinaryToWriter(message: FileInfoListResponse, writer: jspb.BinaryWriter): void;
static deserializeBinary(bytes: Uint8Array): FileInfoListResponse;
static deserializeBinaryFromReader(message: FileInfoListResponse, reader: jspb.BinaryReader): FileInfoListResponse;
}
export namespace FileInfoListResponse {
export type AsObject = {
parent?: FileInfo.AsObject,
dataList: Array<FileInfo.AsObject>,
}
}
export class FileInfoFilterRequest extends jspb.Message {
clearIdentityList(): void;
getIdentityList(): Array<string>;
setIdentityList(value: Array<string>): void;
addIdentity(value: string, index?: number): string;
clearTypeList(): void;
getTypeList(): Array<number>;
setTypeList(value: Array<number>): void;
addType(value: number, index?: number): number;
getDirectory(): number;
setDirectory(value: number): void;
getName(): string;
setName(value: string): void;
serializeBinary(): Uint8Array;
toObject(includeInstance?: boolean): FileInfoFilterRequest.AsObject;
static toObject(includeInstance: boolean, msg: FileInfoFilterRequest): FileInfoFilterRequest.AsObject;
static extensions: {[key: number]: jspb.ExtensionFieldInfo<jspb.Message>};
static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo<jspb.Message>};
static serializeBinaryToWriter(message: FileInfoFilterRequest, writer: jspb.BinaryWriter): void;
static deserializeBinary(bytes: Uint8Array): FileInfoFilterRequest;
static deserializeBinaryFromReader(message: FileInfoFilterRequest, reader: jspb.BinaryReader): FileInfoFilterRequest;
}
export namespace FileInfoFilterRequest {
export type AsObject = {
identityList: Array<string>,
typeList: Array<number>,
directory: number,
name: string,
}
}
export class FileInfoListRequest extends jspb.Message {
getIdentity(): string;
setIdentity(value: string): void;
getUserIdentity(): number;
setUserIdentity(value: number): void;
getPath(): string;
setPath(value: string): void;
hasListInfo(): boolean;
clearListInfo(): void;
getListInfo(): common_common_entity_pb.ListInfo | undefined;
setListInfo(value?: common_common_entity_pb.ListInfo): void;
clearOrderByList(): void;
getOrderByList(): Array<common_common_entity_pb.OrderByRequest>;
setOrderByList(value: Array<common_common_entity_pb.OrderByRequest>): void;
addOrderBy(value?: common_common_entity_pb.OrderByRequest, index?: number): common_common_entity_pb.OrderByRequest;
hasFilter(): boolean;
clearFilter(): void;
getFilter(): FileInfoFilterRequest | undefined;
setFilter(value?: FileInfoFilterRequest): void;
getOp(): number;
setOp(value: number): void;
serializeBinary(): Uint8Array;
toObject(includeInstance?: boolean): FileInfoListRequest.AsObject;
static toObject(includeInstance: boolean, msg: FileInfoListRequest): FileInfoListRequest.AsObject;
static extensions: {[key: number]: jspb.ExtensionFieldInfo<jspb.Message>};
static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo<jspb.Message>};
static serializeBinaryToWriter(message: FileInfoListRequest, writer: jspb.BinaryWriter): void;
static deserializeBinary(bytes: Uint8Array): FileInfoListRequest;
static deserializeBinaryFromReader(message: FileInfoListRequest, reader: jspb.BinaryReader): FileInfoListRequest;
}
export namespace FileInfoListRequest {
export type AsObject = {
identity: string,
userIdentity: number,
path: string,
listInfo?: common_common_entity_pb.ListInfo.AsObject,
orderByList: Array<common_common_entity_pb.OrderByRequest.AsObject>,
filter?: FileInfoFilterRequest.AsObject,
op: number,
}
}
|
Precoder Design for MIMO Broadcast Channels
This paper considers precoder designs in downlink MIMO broadcast channels. We present a MMSE feedback precoding scheme, which is analogue to the MMSE generalized decision feedback equalizer (GDFE) in the uplink MIMO multiple access channel. The proposed scheme obtains the same level of total mean-square error as the MMSE-GDFE in the dual uplink with perfect decision, and it achieves a sum capacity that is very close to the sum capacity of MIMO BC. It can also offer a significant gain over the linear precoding techniques in term of average error probability |
{-# LANGUAGE ForeignFunctionInterface, CPP #-}
module System.Win32.DDE (
initializeDde,
destroyDde,
DdeState,
DdeCallback,
ddeResultAck,
ddeResultTrue,
ddeResultFalse,
ddeXtypConnect,
ddeXtypPoke,
ddeCpWinAnsi,
queryString,
accessData,
unaccessData,
withDdeData
) where
#if defined(i386_HOST_ARCH)
# define WINDOWS_CCONV stdcall
#elif defined(x86_64_HOST_ARCH)
# define WINDOWS_CCONV ccall
#else
# error Unknown mingw32 arch
#endif
import Control.Applicative
import Control.Exception
import Control.Monad
import Data.Bits
import Data.Binary.Get
import Data.Typeable
import Data.ByteString hiding (map, putStrLn)
import Data.IORef
import System.Win32.XlParser
import System.Win32.DLL
import System.Win32.Types
import Foreign
import Foreign.C.Types
import Foreign.C.String
import Foreign.Marshal.Array
import qualified Data.ByteString.Lazy as BL
data DdeException = ApiError String
deriving (Show, Typeable)
ddeResultAck :: HANDLE
ddeResultAck = wordPtrToPtr $ bit 15
ddeResultTrue :: HANDLE
ddeResultTrue = wordPtrToPtr $ bit 1
ddeResultFalse :: HANDLE
ddeResultFalse = wordPtrToPtr $ bit 0
ddeXtypConnect :: CUInt
ddeXtypConnect = 0x1062
ddeXtypPoke :: CUInt
ddeXtypPoke = 0x4090
ddeCpWinAnsi = 1004
instance Exception DdeException
foreign import WINDOWS_CCONV "windows.h DdeInitializeW"
ddeInitialize :: LPDWORD -> FunPtr DdeCallback -> DWORD -> DWORD -> IO CUInt
foreign import WINDOWS_CCONV "windows.h DdeUninitialize"
ddeUninitialize :: DWORD -> IO BOOL
foreign import WINDOWS_CCONV "windows.h DdeCreateStringHandleW"
ddeCreateStringHandle :: DWORD -> LPSTR -> CInt -> IO HANDLE
foreign import WINDOWS_CCONV "windows.h DdeFreeStringHandleW"
ddeFreeStringHandle :: DWORD -> LPSTR -> IO HANDLE
foreign import WINDOWS_CCONV "windows.h DdeNameService"
ddeNameService :: DWORD -> HANDLE -> HANDLE -> CInt -> IO HANDLE
foreign import WINDOWS_CCONV "windows.h DdeCmpStringHandles"
ddeCmpStringHandles :: HANDLE -> HANDLE -> IO CInt
foreign import WINDOWS_CCONV "windows.h DdeQueryStringW"
ddeQueryString :: DWORD -> HANDLE -> CString -> DWORD -> CInt -> IO DWORD
foreign import WINDOWS_CCONV "windows.h DdeAccessData"
ddeAccessData :: HANDLE -> LPDWORD -> IO (Ptr CUChar)
foreign import WINDOWS_CCONV "windows.h DdeUnaccessData"
ddeUnaccessData :: HANDLE -> IO ()
foreign import WINDOWS_CCONV "wrapper"
mkCallbackPtr :: DdeCallback -> IO (FunPtr DdeCallback)
data DdeState = DdeState {
ddeInstance :: DWORD,
appName :: String,
topic :: String,
appNameHandle :: HANDLE,
topicHandle :: HANDLE,
callback :: FunPtr DdeCallback,
dataCallback :: DdeDataCallback
}
type DdeDataCallback = String -> (Int, Int, [XlData]) -> IO Bool
type DdeCallback = CUInt -> CUInt -> HANDLE -> HANDLE -> HANDLE -> HANDLE -> LPDWORD -> LPDWORD -> IO HANDLE
{-|
- Callback for DDE messages
- DdeState is wrapped in IORef, because this callback should be passed to ddeInitialize, which in turn returns DDE handle
-}
ddeCallback :: IORef DdeState -> CUInt -> CUInt -> HANDLE -> HANDLE -> HANDLE -> HANDLE -> LPDWORD -> LPDWORD -> IO HANDLE
ddeCallback state msgType format hConv hsz1 hsz2 hData dwData1 dwData2
| msgType == ddeXtypConnect = handleConnect state hsz1 hsz2
| msgType == ddeXtypPoke = handlePoke state hsz1 hData
| otherwise = return nullHANDLE -- Do not handle other messages, they are boring
where
handleConnect state hsz1 hsz2 = do
myDdeState <- readIORef state
maybeAppName <- queryString myDdeState 256 hsz2
case maybeAppName of
Just incomingAppName -> do
if incomingAppName == appName myDdeState
then do
return ddeResultTrue
else do
return ddeResultFalse
Nothing -> return ddeResultFalse
handlePoke state hsz1 hData = do
myDdeState <- readIORef state
maybeTopic <- queryString myDdeState 256 hsz1
case maybeTopic of
Nothing -> return ddeResultFalse
Just topic -> withDdeData hData (\xlData -> do
case runGetOrFail xlParser $ BL.fromStrict xlData of
Left (_, _, errmsg) -> do
return ddeResultFalse
Right (_, _, table) -> do
rc <- (dataCallback myDdeState) topic table
return $ if rc
then ddeResultAck
else ddeResultFalse )
initializeDde :: String -> String -> DdeDataCallback -> IO (IORef DdeState)
initializeDde appName topic callback = alloca (\instancePtr -> do
ddeState <- newIORef $ DdeState {
ddeInstance = 0,
appName = appName,
appNameHandle = nullHANDLE,
topic = topic,
topicHandle = nullHANDLE,
callback = nullFunPtr,
dataCallback = callback }
cb <- mkCallbackPtr (ddeCallback ddeState)
rc <- ddeInitialize instancePtr cb 0 0
instanceRaw <- peek instancePtr
atomicModifyIORef' ddeState (\state -> (state { ddeInstance = instanceRaw, callback = cb }, ()))
when (rc /= CUInt 0) $ throw $ ApiError "Unable to initialize DDE"
withCString appName (\appNameRaw -> withCString topic (\topicRaw -> do
myAppNameHandle <- ddeCreateStringHandle instanceRaw appNameRaw ddeCpWinAnsi
myTopicHandle <- ddeCreateStringHandle instanceRaw topicRaw ddeCpWinAnsi
when (myAppNameHandle == nullHANDLE || myTopicHandle == nullHANDLE) $ throw $ ApiError "Unable to create strings handles"
atomicModifyIORef' ddeState (\state -> (state { appNameHandle = myAppNameHandle, topicHandle = myTopicHandle }, ()))
rc2 <- ddeNameService instanceRaw myAppNameHandle nullPtr 1
when (rc2 == nullHANDLE) $ throw $ ApiError $ "Unable to register application name: " ++ appName
return ddeState)))
destroyDde :: DdeState -> IO ()
destroyDde state = do
freeHaskellFunPtr $ callback state
ddeUninitialize $ ddeInstance state
return ()
queryString :: DdeState -> Int -> HANDLE -> IO (Maybe String)
queryString state maxSize handle = allocaBytes maxSize (\x -> do
rc <- ddeQueryString (ddeInstance state) handle x (toEnum maxSize) ddeCpWinAnsi
if rc == 0
then return Nothing
else Just <$> peekCAString x)
accessData :: HANDLE -> IO ByteString
accessData handle = alloca (\dataSizePtr -> do
dataPtr <- ddeAccessData handle dataSizePtr
dataSize <- peek dataSizePtr
pack . map (toEnum . fromEnum) <$> peekArray (fromEnum dataSize) dataPtr)
unaccessData :: HANDLE -> IO ()
unaccessData = ddeUnaccessData
withDdeData :: HANDLE -> (ByteString -> IO a) -> IO a
withDdeData handle = bracket (accessData handle) (\_ -> unaccessData handle)
|
<gh_stars>0
import { connect, MapDispatchToProps, MapStateToProps } from 'react-redux';
import { withRouter } from 'react-router';
import { bindActionCreators, Dispatch } from 'redux';
import { OverallState } from '../../../commons/application/ApplicationTypes';
import { Chapter } from '../../../commons/application/types/ChapterTypes';
import { changeChapter, fetchChapter } from '../../../commons/workspace/WorkspaceActions';
import { DefaultChapter, DispatchProps, StateProps } from './AcademyDefaultChapter';
const mapStateToProps: MapStateToProps<StateProps, {}, OverallState> = state => ({
sourceChapter: state.workspaces.playground.context.chapter,
sourceVariant: state.workspaces.playground.context.variant
});
const mapDispatchToProps: MapDispatchToProps<DispatchProps, {}> = (dispatch: Dispatch) =>
bindActionCreators(
{
handleFetchChapter: () => fetchChapter(),
handleUpdateChapter: (chapter: Chapter) => changeChapter(chapter.chapter, chapter.variant)
},
dispatch
);
const AcademyDefaultChapterContainer = withRouter(
connect(mapStateToProps, mapDispatchToProps)(DefaultChapter)
);
export default AcademyDefaultChapterContainer;
|
<reponame>Marinabsanz/Angularprojects
import { Producto, calculateIsv } from './06-desec-Argumentos';
const carritoCompras: Producto[]= [
{
descripción: 'prod1',
pvp:100
},
{
descripción: 'prod2',
pvp:200
}
];
const [total, isv] = calculateIsv( carritoCompras );
console.log( 'Total', total );
console.log( 'ISV', isv )
|
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.setupwizardlib.annotations;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
/**
* Denotes that the class, method or field has its visibility relaxed so
* that unit tests can access it.
* <p/>
* The <code>visibility</code> argument can be used to specific what the original
* visibility should have been if it had not been made public or package-private for testing.
* The default is to consider the element private.
*/
@Retention(RetentionPolicy.SOURCE)
public @interface VisibleForTesting {
/**
* Intended visibility if the element had not been made public or package-private for
* testing.
*/
enum Visibility {
/** The element should be considered protected. */
PROTECTED,
/** The element should be considered package-private. */
PACKAGE,
/** The element should be considered private. */
PRIVATE
}
/**
* Intended visibility if the element had not been made public or package-private for testing.
* If not specified, one should assume the element originally intended to be private.
*/
Visibility visibility() default Visibility.PRIVATE;
} |
package app
// Version denotes the version of the program
const Version = "0.1"
|
import Data.Char
import Data.List
(...) = (.).(.)
count = length ... filter
solve x
| count isUpper x > count isLower x = map toUpper x
| otherwise = map toLower x
main = interact solve
|
import { Component } from "@angular/core";
import { FormControl } from "@angular/forms";
interface IExampleItem {
id: string;
name: string;
icon: string;
}
@Component({
selector: "nui-combobox-v2-customize-options-example",
templateUrl: "combobox-v2-customize-options.example.component.html",
host: { class: "combobox-container" },
})
export class ComboboxV2CustomizeOptionsExampleComponent {
public icons: any[] = ["check", "email", "execute"];
public items: IExampleItem[] = Array.from({ length: 100 }).map((_, i) =>
({
id: `value-${i}`,
name: $localize `Item ${i}`,
icon: this.getRandomIcon(),
}));
public comboboxControl = new FormControl();
public displayFn(item: IExampleItem): string {
return item?.name || "";
}
private getRandomIcon() {
return this.icons[Math.round(Math.random() * 2)];
}
}
|
def resume(self, **kwargs):
if 'upload_id' in kwargs:
self.manifest["uploadId"] = kwargs['upload_id']
kwargs.pop('upload_id')
if self.manifest["uploadId"] is None:
raise ValueError("Cannot resume without an upload id.")
upload_kwargs = {}
if 'progress_callback' in kwargs:
upload_kwargs['progress_callback'] = kwargs['progress_callback']
kwargs.pop('progress_callback')
has_next_page = True
while has_next_page:
response = self.object_storage_client.list_multipart_upload_parts(self.manifest["namespace"],
self.manifest["bucketName"],
self.manifest["objectName"],
self.manifest["uploadId"],
**kwargs)
parts = self.manifest["parts"]
for part in response.data:
part_index = part.part_number - 1
if -1 < part_index < len(parts):
manifest_part = parts[part_index]
if manifest_part["size"] != part.size:
raise ValueError('Cannot resume upload with different part size. Parts were uploaded with a part size of {} MiB'.format(part.size / MEBIBYTE))
manifest_part["etag"] = part.etag
manifest_part["opc_md5"] = part.md5
elif part_index >= len(parts):
raise ValueError('There are more parts on the server than parts to resume, please check the upload ID.')
has_next_page = response.has_next_page
kwargs['page'] = response.next_page
self.upload(**upload_kwargs) |
/**
* Preferences property listener bound to this table with a weak reference to avoid
* strong link between user preferences and this table.
*/
private static class UserPreferencesChangeListener implements PropertyChangeListener {
private WeakReference<FurnitureTable> furnitureTable;
public UserPreferencesChangeListener(FurnitureTable furnitureTable) {
this.furnitureTable = new WeakReference<FurnitureTable>(furnitureTable);
}
public void propertyChange(PropertyChangeEvent ev) {
// If furniture table was garbage collected, remove this listener from preferences
FurnitureTable furnitureTable = this.furnitureTable.get();
if (furnitureTable == null) {
((UserPreferences)ev.getSource()).removePropertyChangeListener(
UserPreferences.Property.valueOf(ev.getPropertyName()), this);
} else {
furnitureTable.repaint();
furnitureTable.getTableHeader().repaint();
}
}
} |
<gh_stars>1-10
use osm_is_area;
#[test]
fn way_circular_refs_no_way_tag() {
let end = 1252234;
let refs = vec![end, 23452234, 28373423, end];
let tags = vec![(r"waterway", r"custom")];
assert_eq!(false, osm_is_area::way(&tags, &refs));
}
#[test]
fn way_area_no_tag() {
let end = 1252234;
let refs = vec![end, 23452234, 28373423, end];
let tags = vec![(r"waterway", r"riverbank"), (r"area", r"no")];
assert_eq!(false, osm_is_area::way(&tags, &refs));
}
#[test]
fn way_no_circular_refs() {
let end = 1252234;
let refs = vec![end, 23452234, end, 28373423];
let tags = vec![(r"waterway", r"riverbank"), (r"area", r"yes")];
assert_eq!(false, osm_is_area::way(&tags, &refs));
}
#[test]
fn way_polygon_whitelist() {
let end = 1252234;
let refs = vec![end, 23452234, 28373423, end];
let tags = vec![(r"waterway", r"riverbank")];
assert_eq!(true, osm_is_area::way(&tags, &refs));
}
#[test]
fn way_polygon_blacklist() {
let end = 1252234;
let refs = vec![end, 23452234, 28373423, end];
let tags = vec![(r"natural", r"cliff")];
assert_eq!(false, osm_is_area::way(&tags, &refs));
}
#[test]
fn way_coastline_blacklist() {
let end = 1252234;
let refs = vec![end, 23452234, 28373423, end];
let tags = vec![(r"natural", r"coastline")];
assert_eq!(true, osm_is_area::way(&tags, &refs));
}
|
/* 0x6C-0x6E SHRX Shift the 16-bit Accumulator right from Imm/Abs/Ind */
void cosproc::SHRX(uint16_t src){
uint16_t shift = (Read(src) << 8) | Read(src+1);
uint16_t temp = ((r[0] << 8) | r[1]) >> shift;
r[0] = (temp & 0xFF00) >> 8;
r[1] = temp & 0x00FF;
st[0] = temp == 0;
} |
Man riding a motorcycle (Shutterstock)
In a parking lot in Salem, Oregon, a 59-year-old man pulled a pistol on two 17-year-old boys on Sunday and told them to “get ready to die.”
According to the Oregonian, however, a motorcyclist whizzed by at just the right time and knocked the weapon from the gunman’s hands without ever stopping or identifying himself.
On Sunday, the teenagers were crossing Center Street in Salem when Edward Francis West shouted at them from his truck to get out of the road.
The boys shouted back and West pulled into a parking lot and confronted them, slapping one of the teenagers in the face.
West then went to his truck and retrieved a pistol, which he aimed at the boys and said, “Get ready to die!”
Then, according to Marion County Sheriff’s Department records, a man on a green motorcycle rode up and used his helmet to knock the gun out of West’s hands, giving the boys time to run away.
Neither West nor the boys got a good look at the motorcyclist, who has not come forward.
The sheriff’s office did not release the boys’ names, but said that West was taken into custody and charged with harassment, menacing, assault and unlawful use of a weapon.
[hat-tip to the NYDN] |
/*
Fibonacci search is an efficient search algorithm based on:
--> divide and conquer principle that can find an element in the given sorted array.
Algorithm --->
Let the length of given array be n [0...n-1] and the element to be searched be x.
Then we use the following steps to find the element with minimum steps:
1) Find the smallest Fibonacci number greater than or equal to n.
Let this number be c [ Fibonacci number].
Let the two Fibonacci numbers preceding it be a and b respectively.
While the array has elements to be checked:
-> Compare x with the last element of the range covered by a.
-> If x matches, return index value
-> Else if x is less than the element, move the third Fibonacci variable two Fibonacci down,
indicating removal of approximately two-third of the unsearched array.
-> Else x is greater than the element, move the third Fibonacci variable one Fibonacci down.
Reset offset to index. Together this results into removal of approximately front one-third of the unsearched array.
Since there might be a single element remaining for comparison, check if b is '1'.
If Yes, compare x with that remaining element. If match, return index value.
From the above algorithm it is clear if we have to search the larger section of the array,
then the time taken will be more and will result into worst case and it's complexity wil be O(log n).
If on the very first search, we get our element then it will be considered as the best case and complexcity will be O(1).
When we consider the average case then case left and lies between the best and worst i,
when we have to search the element on the smaller section of the array and hence we get our average case complexity as O(log n).
According to the algorithm we will first sort the array.
Output is based on Sorted array.
QUESTION-->
1) You are given 't' test cases to check.
2) You are given a memory space of array size, 10.
3) You are asked to enter elements in the array.
4) You can input as much elements in the array less than or equal to 10.
5) The array should be sorted if Unsorted.
6) You have to chose an element and find its position in the sorted array.
7) if element not found, print -1.
*/
#include <bits/stdc++.h>
using namespace std;
// function created to find the min value between x and y
int min(int x, int y) { return (x <= y) ? x : y; }
//function created returns index of x if present, else returns -1.
int FibonacciSearch(int arr[], int x, int n)
{
// a,b,c are variables that stores the fibonacci numbers sequentially.
int a = 0;
int b = 1;
int c = a + b;
// until c does not become equal to or greater than n , loop executes.
while (c < n)
{
a = b;
b = c;
c = a + b;
}
// Marks the eliminated range from front.
int offset = -1;
// checking if c is at valid location.
while (c > 1)
{
// i will be assigned the value of min() used.
int i = min(offset + a, n - 1);
//If x is greater than the value at index ,c cut the subarray array from offset to i.
if (arr[i] < x)
{
c = b;
b = a;
a = c - b;
offset = i;
}
//If x is greater than the value at index ,cut the subarray after i+1.
else if (arr[i] > x)
{
c = a;
b = b - a;
a = c - b;
}
else
//if element found, return index.
return i;
}
//comparing the last element with x
if (b && arr[offset + 1] == x)
return offset + 1;
// if not then return -1.
return -1;
}
int main()
{
int l;
cout << "\nEnter the number of elements in array which should be less than 10";
cin >> l;
int arr[10];
cout << "Enter elements in array";
for (int i = 0; i < l; i++)
{
cin >> arr[i];
}
//sorting the array
sort(arr, arr + l);
int n = sizeof(arr) / sizeof(arr[0]);
int x;
cout << "\nEnter element to be searched :";
cin >> x;
cout << "Found at index:" << FibonacciSearch(arr, x, n);
return 0;
}
/*
Test Cases:
Input 1:
7
100 90 30 15 60 120 10
30
Output 1:
2
Input 2:
5
40 60 22 10 45
22
Output 2:
1
Input 3:
2
40 60
45
Output 3:
-1
Space complexity: O(1)
Time complexity: O(n)
*/
|
n = int(input())
i1 = 1
i2 = 1
i3 = n - 2
if i3 % 3 == 0:
i2 = 2
i3 = i3 - 1
print("{} {} {}".format(i1, i2, i3))
|
/// Fetch children for volum and group entities.
void pqCMBContextMenuHelper::accumulateChildGeometricEntities(
QSet<vtkIdType>& blockIds, const smtk::model::EntityRef& toplevel)
{
vtkIdType bidx = -1;
if (toplevel.isVolume() && !toplevel.hasIntegerProperty("block_index"))
{
smtk::model::Faces faces = toplevel.as<smtk::model::Volume>().faces();
smtk::model::Faces::iterator fcit;
for (fcit = faces.begin(); fcit != faces.end(); ++fcit)
{
if (fcit->hasIntegerProperty("block_index") && getBlockIndex(*fcit, bidx))
blockIds.insert(bidx);
smtk::model::EntityRefs bdys = fcit->lowerDimensionalBoundaries(-1);
smtk::model::EntityRefs::const_iterator evit;
for (evit = bdys.begin(); evit != bdys.end(); ++evit)
if (evit->hasIntegerProperty("block_index") && getBlockIndex(*evit, bidx))
blockIds.insert(bidx);
}
}
else if (toplevel.isGroup())
{
smtk::model::EntityRefs members =
toplevel.as<smtk::model::Group>().members<smtk::model::EntityRefs>();
for (smtk::model::EntityRefs::const_iterator it = members.begin(); it != members.end(); ++it)
accumulateChildGeometricEntities(blockIds, *it);
}
if (toplevel.hasIntegerProperty("block_index") && getBlockIndex(toplevel, bidx))
blockIds.insert(bidx);
} |
The effect of casual teaching on student satisfaction: evidence from the UK
ABSTRACT A large and increasing proportion of teaching in UK universities is being fulfilled by staff on casual, rather than permanent, contracts. This paper examines how the proportion of teaching by casual staff affected student satisfaction in 2014–15. We find that an increased proportion of casual teaching leads to lower student satisfaction, even when controlling for respondent's subject, university and faculty. This suggests a trade-off between increasing casualisation and student satisfaction, which could have implications for future student demand. These results can be generalised to the rest of the economy and highlight potential perverse effects arising from casual contracts. |
Optimal measurement area determination algorithm of articulated arm measuring machine based on improved FOA
The determination of the optimal measurement area of the articulated arm measuring machine belongs to the multi-dimensional function optimization problem under complex constraints. To realize high-precision measurement of low-precision articulated arm measuring machine, we analyze the working principle and error source of the measuring machine, and establish the optimization target model of the optimal measurement area in this paper. We propose a method for determining the optimal measurement area of an articulated arm measuring machine based on improved FOA. The basic FOA algorithm is improved, the historical optimal individual and population centroid information are added in the population iteration update process, and the fruit fly individuals in each iteration are directly used as the taste concentration judgment value, which increases cooperation and information sharing among fruit fly individuals, and improves the global optimization ability and stability of the algorithm. In the designated area of the measuring machine, we have carried out comparative experiments on the optimization results of improved FOA and basic FOA, ACO, PSO, AL-SC-FOA, LGMS-FOA, IPGS-FFO. Experimental results show that the improved FOA, ACO, PSO, and IPGS-FFO algorithms do not fall into local optimum, and the optimal measurement area determined by them is consistent with the optimization results of other algorithms, and is superior to other algorithms in convergence speed and stability, so it is more suitable for determining the optimal measurement area of articulated arm measuring machine.
Introduction
The error sources of articulated arm coordinate measuring machine (AACMM) mainly include circular encoder measurement errors with a sinusoidal variation law, structural parameter errors, thermal deformation errors, force deformation errors, motion errors, and data acquisition system errors; in addition, these error sources include probe errors and measurement errors caused by improper measurement methods and measurement environment 1 with complex variation laws given the series mechanical structure of the AACMM, thereby resulting in amplification effect of each joint error; thus, the measurement accuracy of the measuring machine is lower. 2 The object measured can be placed in the optimal measuring zone of the measuring machine to achieve high-precision measurement of the low-precision AACMM; the maximum measurement error in this zone is the minimum. 3 To determine the precise location of the optimal measuring zone for the measuring machine, the variation law of each error component, transfer relation, and comprehensive error distribution law of the measuring machine must be analyzed to find a precise determination location of the optimal measuring zone through an appropriate optimization algorithm.
Domestic and foreign scholars have conducted research on this issue. Qin 4 used optical autocollimator and metal polyhedral prism combination method to measure the circular indexing error of six grating circular encoders of the measuring machine, and matched the six encoders to determine the deviation range of different areas of the measuring machine. The ant colony algorithm was used to solve the optimal measurement area, but the ACO convergence speed was slow and the single optimization time was long. Jiang 5 simulated the distribution law of the maximum measurement error of the articulated arm measuring machine in the measurement space by using the distribution of points and the extended algorithm, and used the particle swarm optimization (PSO) to optimize the error in a certain area. However, the efficiency of the algorithm in solving the optimal measurement area is low and the program running time is long. Zheng 6 selected v-SVM and RBF kernel function to construct the spatial error distribution model of flexible coordinate measuring machine, and obtained the optimal measurement area model aiming at single point measurement and spatial distance measurement by using support vector machine theory. However, this method is not practical, and the model is based on a large number of measured data, and its measurement error does not include the angle measurement error of circular encoder. Hu et al. 7 determined the measurement space according to the structural parameter analysis of AACMM, divided the measurement space of the measuring machine into several small cubic areas at equal intervals, and used the improved ant colony algorithm to find the maximum measurement error of each small area, but the convergence speed of the algorithm was slow and it was easy to fall into local optimum. Given the complex error sources, transmission relationship, and error distribution law of the AACMM, solving the optimal measuring zone for the AACMM can be classified as a high-dimensional optimization problem. Simultaneously, the optimal measurement zone of the measuring machine under various measurement conditions is different and must be determined in real-time under the measurement condition. Therefore, the stability and real-time performance of the determination algorithm of the measuring machine is proposed. This determination algorithm is poor in search speed and real-time performance. There are many popular intelligent algorithms with good optimization results, such as DE (Differential Evolution algorithm), 8 which starts from a random initial population, so it has wide applicability and nonlinearity. However, when the number of individuals in the population is small, the fitness value of the newly generated population is poor and it is difficult to converge to the extreme point, and the accuracy of determining the best measurement area needs to be further improved.
Fruit fly optimization algorithm (FOA), proposed by Taiwan scholar Pan 9 in 2011, is a global optimization algorithm based on simulating the foraging behavior of Drosophila. Compared with other algorithms, it has the advantages of less control parameters, simple principle, fast search speed and good real-time performance. 10,11 It has been successfully applied to solve many practical problems. For example, Wang et al. 12 applied the improved FOA algorithm to China's industry. An improved fruit fly optimization algorithm (FOA) considers the time delay effect of input variables, which is helpful to select input variables. Wang et al. 13 predicted the remaining service life of lithium-ion batteries based on fractional brownian motion and fruit fly optimization algorithm, and optimized the Hurst index H in the brownian motion FBM model by FOA. Peng et al. 14 applied FOA algorithm to the determination of LSTM super parameters for long and short memory to solve the problems of handwriting recognition and time series prediction, and verified the effectiveness of FOA-LSTM model through experiments. However, the FOA also has some shortcomings, such as random blindness in search, slow convergence rate in subsequent period, low convergence accuracy, easy to fall into local optimization, and unsuitable for optimization problems with negative values in the domain of independent variables 15,16 ; the expected effect is difficult to achieve, especially when solving complex problems, such as multi-peak, high-dimensional, and large-scale issues; therefore, solving the optimal measurement zone of the measuring machine is a complex high-dimensional optimization problem. To apply the FOA with fast search speed and good real-time performance to the real-time determination of the optimal measurement area of the measuring machine, an improved FOA is proposed to enhance its global optimization ability and algorithm stability, and the optimization results of different algorithms are compared and verified.
The remainder of this paper is organized as follows. Section ''Related works'' describes the related works, including introduce the optimization model of the optimal measuring area of the measuring machine and the improved FOA optimization algorithm. Section ''Experiments and analysis'' reports the implementation details, the experimental results. At the end of the paper, we conclude the proposed algorithm.
Model building
Working principle of the AACMM. The structure of the AACMM is illustrated in Figure 1. This structure consists of three flexible measuring arms, six movable joints, and one measuring head. The joints of the mechanical arms are equipped with circular encoder angle sensors. In accordance with the angle value of each joint and the length of the measuring arm, a computer calculates the 3D coordinates of the measured points using the measurement model expressed in equation (1), which is established through the D-H method. 17 where l i is the shortest distance between the axes of the two joint shafts, u i is the output of each joint circle encoder, d i is the joint offset amount (the distance intercepted by the adjacent rod long line on the joint), a i is the adjacent joint axis relative torsion angle, and l is the length of the measuring head of the measuring machine.
The structural parameters l i , d i , a i , l of a certain type of the AACMM are calibrated through parameter identification before delivery. These parameters are constant values (the measuring machine parameters studied in this work are listed in Table 1).
Error analysis of the AACMM Error analysis of the AACMM. In this work, simplifying the algorithm model only considers the influence of the main error sources (circular encoder angular measurement error with progressive error amplification effect) on the measuring accuracy of the AACMM. The error fitting models of six circular coders measured by a certain type of the AACMM are expressed in equation (2), We use the method of combining autocollimator with metal polyhedral prism to measure the error of circular encoder, establish the eccentric error model of circular encoder, then fit the error curve by using the thirdorder Fourier series, and calculate the undetermined coefficient by using the least square method, and finally get the error characteristic curves of six circular encoders, as shown in Figure 2, all of which are sinusoidallike curves with a period of 2p, with different error amplitudes. The model parameters are shown in Table 2, and the relevant model parameter data refer to Jiang. 5 e i (u i ) = a i + b i cos (u i ) + c i sin (u i ) + d i cos 2(u i ) + e i sin (2u i ) + f i cos (3u i ) where i = 1, 2, 3, 4, 5, 6: Ã l sin (u 6 + e 6 (u 6 )) sin a 6 + l 6 cos (u 6 + e 6 (u 6 )) Àl cos (u 6 + e 6 (u 6 )) sin a 6 + l 6 sin (u 6 + e 6 (u 6 )) l cos a 6 + d 6 1 The rotation angle of each encoder can be expressed as u i + e i (u i ), which is substituted into equation (1), and the calculation result of the 3D coordinates for the measured point, including the joint rotation angle error, is expressed in equation (3). The subtraction of the calculation results of equation (3) and equation (1) obtains a 3D coordinate measurement error of the measuring machine, as presented in equation (4).
For measuring the same point, measuing errors will be different with various measuring attitudes. Given that the AACMM can measure the point at any point in the space with numerous measuring attitudes (corresponding to different circular encoder angles and errors), the 3D space coordinate error of the measurement point exhibits a ''fruit-like'' distribution law and has a certain continuity. Thus, an optimal measurement zone with the maximum error being the minimum exists.
Algorithm model of the optimum measuring area for the AACMM. In accordance with the measurement model equation (1) and equation (4) of the AACMM, the optimal target algorithm model of the maximum measurement error of the points in each partition can be determined as: where x k min , x k max correspond to the upp-er and lower bounds of the x-axis in the opt-imization region; y k min , y k max are the upper a-nd lower bounds of the yaxis in the optimi-zation region, respectively; and z k min , z k max a-re the upper and lower bounds of the zaxis in the optimization region, correspondingly. A minus sign is added before d(u 1 , u 2 , u 3 , u 4 , u 5 , u 6 ), which are then converted to calculate the minimum value. The converted form is: Clearly, equation (6) is a constrained optimization problem. The most common method for handling constraints is the penalty function. 18 In this work, the constraints are treated using an external penalty function, which is transformed into the external penalty function and then added to the objective function. The feasible field is x À x kmin 50, x kmax À x50, y À y kmin 50, y kmax À y50, z À z kmin 50, z kmax À z50 The penalty function is: The target augmented function is P(u 1 , u 2 , u 3 , u 4 , u 5 , u 6 , s) where s is the penalty factor and takes a large positive number, andP(u 1 , u 2 , u 3 , u 4 , u 5 , u 6 ) is a penalty function. When the combination of six encoder angles of the measuring machine is substituted into the measuring model, the calculated coordinate is located in the area to be optimized, andP(u 1 , u 2 , u 3 , u 4 , u 5 , u 6 ) = 0; when the calculated coordinate is not inthe area to be optimized,P(u 1 , u 2 , u 3 , u 4 , u 5 , u 6 ) . 0, and the objective function is punished extra. A large s indicates a heavy punishment. Then, the optimization model for solving the maximum error of measuring points in each partition of the measuring machine can be rewritten as follows: Apparently, equation (10) expresses the optimization problem of complex high-dimensional functions for solving the minimum value without specific requirements for solving this problem. In this function, six independent variables (six circular encoders) are presented in equation (10). Thus, the optimization algorithm adopted must be suitable for multi-dimensional function optimization and must have an improved multidimensional variable coding scheme. Given that the algorithm for determining the optimal measurement zone of the AACMM must be embedded in the DSP subsequently, this algorithm is used to determine the optimal measurement zone of the AACMM for different measurement objects, strategies, and environments in real-time. This real-time requirement of the optimization algorithm is proposed. The six variables in equation (10) have a large range of values, and the whole range of values has positive and negative values. The candidate solution of the optimization algorithm must cover the entire domain as much as possible and coordinate the global and local optimization.
The basic FOA has the advantages of minimal control parameters, simple principle, fast search speed, and favorable real-time performance. Thus, it can be used to solve the real-time problem of the optimal measurement zone of the AACMM. However, the basic FOA is unsuitable for solving the minimum value of equation (10). Therefore, the basic FOA must be improved to make this algorithm suitable for solving this question.
Optimization algorithm Basic FOA. Fruit fly optimization algorithm (FOA) is a swarm intelligence optimization algorithm based on drosophila foraging behavior evolution. Drosophila has a keen olfactory system, which can search for food sources far away, and then find the location of food and other flies through the visual system, and fly to them. 19 The drosophila foraging behavior is applied to the field of intelligent optimization algorithm, and the fruit fly optimization algorithm is formed. The optimal solution of the specific problem is finally found through the communication between individual foraging information and group foraging information. In accordance with this characteristic of the fruit fly population, the basic steps of the FOA are presented as follows: (1) Initialize the population location, and set the population size (Sizepop) and the maximum number of iterations (maxgen).
(2) Fruit fly individuals search for food through random directions and distances.
(3) Estimate the distance (Dist) between the fruit fly and the origin, and then calculate the taste concentration judgment value S, which is the reciprocal of Dist.
(4) Substitute the taste concentration det-ermination value (S) into the taste concentrat-ion determination function to determine the t-aste concentration (Smell(i)) of the current p-osition of the fruit fly.
(5) Find the location of the fruit flies with the lowest taste concentration (bestindex) and minimum taste concentration (bestSmell) in the fruit fly population (in the case of minimum value).
(6) Preserve the optimal taste concentration value and x, y coordinates, at which time the fruit flies use the visual organ to fly to the position.
Improve FOA. To make the FOA suitable for solving the optimal measurement zone of the AACMM, the present work combines ACFOA (The algorithm determines whether FOA is in local convergence state by the value of population fitness variance. If so, chaotic algorithm is used for global optimization, so as to jump out of local extremum and improve the convergence accuracy and convergence speed of FOA.) and LGMS-FOA 20 (In LGMS, a parameter called inertia weight is introduced to balance global search and local search. LGMS-FOA tends to have more global search ability at the beginning of operation and more local search ability at the end of operation) to make the following improvements to the basic FOA are as follows 21,22 : (1) A reasonable multi-dimensional variable coding scheme is designed. (2) The basic FOA only uses the optimal individual information of the current population in the iterative optimization, and the local optimal solution is easily reached. To solve this problem, the information of historical optimal individuals and population centroid is added to increase cooperation and information sharing among fruit fly individuals, thus improving the accuracy and robustness of the algorithm.
(3) The taste concentration determination value S(i)
is the reciprocal of Dist(i). Generally, the value of Dist will be large, and thus the range of the S(i) value will be very small. When it is substituted for the taste concentration determination function (the objective function of the optimal measuring area of the AACMM), it will probably cause premature convergence and reach the local optimal solution. Furthermore, the basic FOA cannot solve the function optimization problem with negative numbers in the definition domain given the S(i)50 of the basic FOA, while the angle range of the six circular encoders in the target model of the optimal measurement area of the joint arm measuring machine has negative values. To solve this problem, the individual of each iteration is directly considered the judgment value S(i) of flavor concentration. Thus, the candidate solution can cover the whole function definition domain as far as possible, thus improving its global optimization ability and algorithm stability.
The improved FOA is used to solve the optimal measurement zone of the AACMM. The specific steps of the algorithm are presented as follows: (1) Initialize the location of the fruit fly population (X axis), and set the population size (Sizepop), the maximum number of iterations maxgen, the learning coefficients C1 and C2, popmin1, popmax1 and popmin2, and popmax2 (angular range of motion of the circular encoder).
X axis = u i min + (u i max À u i min ) Ã rand(), (i = 1, 2, 3, 4, 5, 6) ð22Þ (2) Provide the fruit fly individuals the random direction and distance to search for food using the sense of smell.
X(i) = X axis + cons tan t à (2 à rand(1, 6) À 1) ð23Þ (3) Substitut X(i) for the taste concentration determination value S(i), and execute Steps (4)-(5) in section ''Error analysis of the AACMM.'' (4) Record the optimal taste concentration value of fruit fly individuals (bestSmell) in the current population, and retain the historical optimal fitness value and position information of everyone in P and PX, respectively. (5) Calculate the population center of mass using equations (24)-(27), and assign the value to C g X.
where P g X is the optimal individual position of the population history? Figure 4 shows the algorithm flow chart of the improved FOA optimization algorithm.
Experiments and analysis
Algorithm test for solving the optimal measurement zone Space À100mm4X4100mm, À100mm4Y4100mm, a-nd À100mm4Z4100mm of the AACMM is divided into 1000 small cubic spaces with 20 mm interval of 3D coordinates, and the minimum of equation (10) in each small cubic space (i.e. the maximum measurement error of each small 3D space) is solved. In comparison with the maximum measurement error of each small 3D space, the 3D space with the smallest measurement error is the optimal measurement area of the AACMM in the specified measurement space.
To test the performance of the algorithm, the PSO algorithm, LGMS-FOA, ACO algorithm, and improved FOA are used to solve the maximum measurement error determined by the objective function of equation (10) in the small 3D space 80mm4X4100mm, À 100mm4Y4 À 80mm, and À40mm4Z4 À 20mm in the specified measurement space of the measuring machine. These algorithms are implemented in MATLAB R2016b and run on a PC with Intel(R) Xeon(R) E31270 and [email protected] GHz with 16 GB RAM.
In order to compare the influence of the maximum and minimum particle velocity parameters on PSO algorithm performance, V max = 0:5, 1:5, 2:5, 3:5, V min =À 0:5, À 1:5, À 2:5, À 3:5 are used to solve the maximum measurement error determined by the objective function of formula (10), and other parameters are set as: particle size Sizepop = 100, maximum iteration number Maxgen = 1000, penalty factor s = 1000 the algorithm optimization result is shown in Figure 5. The analysis shows that the convergence speed of the algorithm is the fastest when the speed value is V max = 3:5, V min =À 3:5. Therefore, the parameters of PSO algorithm are set as follows: Particle size Sizepop = 100, maximum iteration number Maxgen = 1000, maximum and minimum velocities V max = 3:5, V min =À 3:5, penalty factor s = 1000, inertia weight selection equation (31), acceleration factor 23 c1 = c2 = 1:49445 (according to Liu et al. 23 , and the comparison of the algorithm optimization effect of multiple groups of acceleration factor values, the value is finally obtained), and v s = 0:9, v e = 0:4. 24 v(k) = v s À (v s À v e )(k=T max ) 2 ð31Þ Figure 5. PSO algorithm optimization results at different particle velocities.
Similarly, after several parameter adjustment and analysis, the optimal parameter settings of each algorithm are obtained as follows: The LGMS-FOA parameters: population size Sizepop = 100, maximum iteration number Maxgen = 1000, search coefficient n = 0:005, penalty factor s = 1000, initial weight v o = 1, and weight adjustment coefficient a = 0:95.
In accordance with the algorithm parameters set above, the PSO algorithm, LGMS-FOA, ACO algorithm, and improved FOA are run 100 times. The single optimization results of the four algorithms are depicted in Figure 6, and the test results are compared, as summarized in Table 4.
To test the performance of the improved FOA further, the basic FOA, LGMS-FOA, IPGS-FFO algorithm, AL-SC-FOA, and improved FOA are used to solve the maximum measurement error determined by the objective function of equation (10) in the small 3D space 80mm4X4100mm, À100mm4Y4 À 80mm, and À40mm4Z4 À 20mm in the specified Table 3. Parameter setting of four algorithms.
The parameters of the IPGS-FFO algorithm are set as follows: population size Sizepop = 20, 50, 80, 100; maximum iteration number Maxgen = 1000; penalty factor s = 1000; and other algorithm parameters presented in Literature. 25 The parameters of the AL-SC-FOA are set as follows: population size Sizepop = 20, 50, 80, 100; maximum iteration number Maxgen = 1000; penalty factor s = 1000; and other algorithm parameters mentioned in Literature. 26 The The test and optimization results of the five algorithms are displayed in Table 6 and Figure 7 respectively. The specific parameter settings are shown in Table 5.
To verify the feasibility of the improved FOA further, the improved FOA algorithm and PSO algorithm which has successfully solved the optimal measurement area in AACMM in the early stage of the project team are adopted. These algorithms are applied to solve the minimum value using equation (10) (maximum measurement error of each small 3D space of 1000 small stereo spaces that are divided in section ''Algorithm test for solving the optimal measurement zone''). Then, the optimization results of the two algorithms are compared. The parameters of the PSO algorithm are set as follows: particle size Sizepop = 100, maximum iteration number Maxgen = 1000, maximum and minimum velocities V max = 3:5, V min =À 3:5, penalty factor s = 1000, inertia weight selection (equation (31)), acceleration factor c1 = c2 = 1:49445, and w s = 0:9, w e = 0:4 To compare the maximum measurement errors of each small stereo space, some data with large and small measurement errors are selected, and the obtained fitness value is retained using two significant figures ( Table 7).
Analysis of experimental results
After testing and analyzing various algorithms for solving the optimal measurement area of AACMM measuring machine in section ''Algorithm test for solving the optimal measurement zone,'' we can get the following analysis results.
Taking the small measuring space area 80mm4X4100mm À100mm4Y4 À 80mm, À40mm4Z4 À 20mm of the measuring machine as an example, as shown in Figure 6 and Table 4, when PSO algorithm, LGMS-FOA algorithm, ACO algorithm and improved FOA are used to determine the optimal measuring area of measuring machine, we can know that, (1) the LGMS-FOA reaches the local optimal solution, and the optimal value is 20.063 mm. The PSO algorithm, ACO algorithm, and improved FOA do not reach the local optimal solution, and the optimal value (i.e. the maximum measurement error in the small stereo space) is 20.11 mm. Therefore, the maximum measurement error of Measurement and Control 53(9-10) by the three algorithms without falling into the local optimal solution running 100 times independently indicate that the accuracy and stability of the improved FOA are superior to the ACO and PSO algorithms.
As can be seen from Figure 7 and Table 6: operation that is run 100 times is less than the IPGS-FFO, which shows that the stability is better in the improved FOA than in the IPGS-FFO. (5) The comparison of the basic FOA and the improved FOA implies that the performance of the basic FOA can be effectively enhanced through the method proposed in this work.
In the measuring space À100mm4X4100mm, À100mm4Y4100mm, À100mm4Z4100mm of the measuring machine, the larger and smaller measuring error areas and the corresponding measurement errors are obtained by using different algorithms, as shown in Table 7. The results show that the improved FOA algorithm and PSO algorithm can solve the error distribution of the articulated arm coordinate measuring machine, and the optimal measurement area in the measurement space is 0mm4X420mm, À20mm4 Y40mm, 0mm4Z420mm, the maximum measurement error of this area is 0.056 mm. Compared with the maximum measurement error of 0.11 mm, the accuracy is improved by 50.9%. Therefore, the feasibility and effectiveness of the improved FOA algorithm to determine the optimal measurement area of the articulated arm measuring machine is proved again. |
def testDomainFromDictWrongSubset(self):
domainDict = self.domain.to_dict()
someslice = self.domain.datamap.subset(0, 0, 20, 20)
with self.assertRaises(Exception) as e:
DomainMap.from_dict(domainDict, someslice)
self.assertEqual(str(e.exception),
"Datamap file does not match Datamap file used to create DomainMap.") |
Evaluating in-home water purification methods for communities in Texas on the border with Mexico.
This study evaluated user preferences among three alternative in-home water treatment technologies suitable for households relying on trucked water in El Paso County, Texas, which is on the border with Mexico. The three technologies were: chlorination of household storage tanks, small-scale batch chlorination, and point-of-use ultraviolet disinfection. Fifteen households used each of the three technologies in succession for roughly four weeks each during April through June of 2004. Data were collected on treated water quality, and a face-valid survey was administered orally to assess user satisfaction with the technologies on a variety of attributes. Treatment with a counter-top ultraviolet disinfection system received statistically significantly higher ratings for taste and odor and likelihood of future use than the other two approaches. Ultraviolet disinfection and small-scale batch chlorination both received significantly higher ratings for ease of use than did storage tank chlorination. Over-chlorination was a common problem with both batch chlorination and storage tank chlorination. Water quality in the households using trucked water is now higher than was reported by a previous study, suggesting that water quality has improved over time. |
<reponame>kozakusek/ipp-2020-testy<filename>z2/part1/jm/random/560382901.c
#include <stdint.h>
#include <stdlib.h>
#include <assert.h>
#include <stdio.h>
#include "gamma.h"
#include <stdbool.h>
#include <string.h>
int main() {
/*
scenario: test_random_actions
uuid: 560382901
*/
/*
random actions, total chaos
*/
gamma_t* board = gamma_new(10, 11, 7, 10);
assert( board != NULL );
assert( gamma_move(board, 1, 5, 9) == 1 );
assert( gamma_move(board, 1, 9, 6) == 1 );
assert( gamma_free_fields(board, 1) == 108 );
char* board507891017 = gamma_board(board);
assert( board507891017 != NULL );
assert( strcmp(board507891017,
"..........\n"
".....1....\n"
"..........\n"
"..........\n"
".........1\n"
"..........\n"
"..........\n"
"..........\n"
"..........\n"
"..........\n"
"..........\n") == 0);
free(board507891017);
board507891017 = NULL;
assert( gamma_move(board, 3, 9, 3) == 1 );
assert( gamma_move(board, 3, 8, 2) == 1 );
assert( gamma_move(board, 4, 1, 1) == 1 );
assert( gamma_move(board, 5, 4, 9) == 1 );
assert( gamma_golden_possible(board, 5) == 1 );
assert( gamma_move(board, 6, 0, 8) == 1 );
assert( gamma_busy_fields(board, 6) == 1 );
assert( gamma_move(board, 7, 7, 6) == 1 );
assert( gamma_move(board, 7, 8, 2) == 0 );
assert( gamma_move(board, 1, 2, 8) == 1 );
assert( gamma_move(board, 1, 4, 3) == 1 );
assert( gamma_move(board, 2, 3, 5) == 1 );
assert( gamma_move(board, 3, 9, 0) == 1 );
assert( gamma_move(board, 3, 5, 6) == 1 );
assert( gamma_move(board, 4, 4, 9) == 0 );
assert( gamma_move(board, 5, 0, 4) == 1 );
char* board913830858 = gamma_board(board);
assert( board913830858 != NULL );
assert( strcmp(board913830858,
"..........\n"
"....51....\n"
"6.1.......\n"
"..........\n"
".....3.7.1\n"
"...2......\n"
"5.........\n"
"....1....3\n"
"........3.\n"
".4........\n"
".........3\n") == 0);
free(board913830858);
board913830858 = NULL;
assert( gamma_move(board, 6, 10, 5) == 0 );
assert( gamma_move(board, 6, 4, 3) == 0 );
assert( gamma_golden_possible(board, 6) == 1 );
assert( gamma_move(board, 7, 0, 4) == 0 );
assert( gamma_move(board, 7, 1, 4) == 1 );
assert( gamma_move(board, 1, 5, 2) == 1 );
assert( gamma_move(board, 2, 2, 6) == 1 );
assert( gamma_move(board, 2, 6, 2) == 1 );
assert( gamma_move(board, 3, 10, 5) == 0 );
assert( gamma_move(board, 4, 7, 9) == 1 );
assert( gamma_move(board, 4, 7, 3) == 1 );
assert( gamma_move(board, 5, 3, 3) == 1 );
assert( gamma_move(board, 6, 7, 2) == 1 );
assert( gamma_move(board, 6, 5, 5) == 1 );
assert( gamma_move(board, 7, 7, 5) == 1 );
assert( gamma_move(board, 7, 4, 1) == 1 );
assert( gamma_move(board, 1, 8, 9) == 1 );
assert( gamma_move(board, 2, 7, 10) == 1 );
char* board833638102 = gamma_board(board);
assert( board833638102 != NULL );
assert( strcmp(board833638102,
".......2..\n"
"....51.41.\n"
"6.1.......\n"
"..........\n"
"..2..3.7.1\n"
"...2.6.7..\n"
"57........\n"
"...51..4.3\n"
".....1263.\n"
".4..7.....\n"
".........3\n") == 0);
free(board833638102);
board833638102 = NULL;
assert( gamma_move(board, 3, 8, 5) == 1 );
assert( gamma_move(board, 4, 7, 2) == 0 );
assert( gamma_move(board, 5, 5, 4) == 1 );
assert( gamma_move(board, 5, 9, 5) == 1 );
assert( gamma_move(board, 6, 5, 4) == 0 );
assert( gamma_move(board, 6, 2, 10) == 1 );
assert( gamma_move(board, 7, 10, 8) == 0 );
assert( gamma_move(board, 1, 0, 4) == 0 );
assert( gamma_golden_possible(board, 1) == 1 );
assert( gamma_move(board, 2, 6, 0) == 1 );
assert( gamma_move(board, 2, 4, 9) == 0 );
assert( gamma_move(board, 3, 9, 8) == 1 );
assert( gamma_move(board, 3, 0, 6) == 1 );
assert( gamma_golden_possible(board, 3) == 1 );
assert( gamma_move(board, 4, 6, 3) == 1 );
assert( gamma_move(board, 4, 3, 10) == 1 );
assert( gamma_move(board, 5, 5, 1) == 1 );
assert( gamma_move(board, 5, 8, 0) == 1 );
assert( gamma_free_fields(board, 5) == 72 );
assert( gamma_move(board, 6, 8, 6) == 1 );
assert( gamma_move(board, 6, 8, 10) == 1 );
assert( gamma_move(board, 7, 8, 7) == 1 );
char* board298575589 = gamma_board(board);
assert( board298575589 != NULL );
assert( strcmp(board298575589,
"..64...26.\n"
"....51.41.\n"
"6.1......3\n"
"........7.\n"
"3.2..3.761\n"
"...2.6.735\n"
"57...5....\n"
"...51.44.3\n"
".....1263.\n"
".4..75....\n"
"......2.53\n") == 0);
free(board298575589);
board298575589 = NULL;
assert( gamma_move(board, 1, 2, 8) == 0 );
assert( gamma_move(board, 2, 3, 5) == 0 );
assert( gamma_move(board, 2, 3, 0) == 1 );
char* board301112954 = gamma_board(board);
assert( board301112954 != NULL );
assert( strcmp(board301112954,
"..64...26.\n"
"....51.41.\n"
"6.1......3\n"
"........7.\n"
"3.2..3.761\n"
"...2.6.735\n"
"57...5....\n"
"...51.44.3\n"
".....1263.\n"
".4..75....\n"
"...2..2.53\n") == 0);
free(board301112954);
board301112954 = NULL;
assert( gamma_move(board, 3, 8, 3) == 1 );
assert( gamma_move(board, 4, 7, 5) == 0 );
assert( gamma_move(board, 4, 7, 5) == 0 );
assert( gamma_move(board, 5, 4, 6) == 1 );
assert( gamma_move(board, 5, 1, 4) == 0 );
assert( gamma_move(board, 6, 1, 6) == 1 );
assert( gamma_move(board, 7, 9, 2) == 1 );
assert( gamma_move(board, 7, 5, 1) == 0 );
assert( gamma_move(board, 1, 6, 3) == 0 );
assert( gamma_move(board, 2, 9, 6) == 0 );
assert( gamma_move(board, 2, 8, 4) == 1 );
assert( gamma_move(board, 3, 3, 8) == 1 );
assert( gamma_move(board, 4, 3, 9) == 1 );
assert( gamma_busy_fields(board, 4) == 6 );
assert( gamma_move(board, 5, 5, 7) == 1 );
assert( gamma_move(board, 5, 8, 10) == 0 );
char* board979071177 = gamma_board(board);
assert( board979071177 != NULL );
assert( strcmp(board979071177,
"..64...26.\n"
"...451.41.\n"
"6.13.....3\n"
".....5..7.\n"
"362.53.761\n"
"...2.6.735\n"
"57...5..2.\n"
"...51.4433\n"
".....12637\n"
".4..75....\n"
"...2..2.53\n") == 0);
free(board979071177);
board979071177 = NULL;
assert( gamma_move(board, 6, 2, 4) == 1 );
assert( gamma_move(board, 6, 9, 10) == 1 );
assert( gamma_move(board, 7, 8, 6) == 0 );
assert( gamma_busy_fields(board, 7) == 6 );
assert( gamma_move(board, 1, 4, 6) == 0 );
assert( gamma_move(board, 2, 8, 7) == 0 );
assert( gamma_move(board, 3, 6, 6) == 1 );
assert( gamma_move(board, 3, 3, 9) == 0 );
assert( gamma_move(board, 4, 7, 5) == 0 );
assert( gamma_move(board, 5, 5, 0) == 1 );
assert( gamma_busy_fields(board, 5) == 10 );
assert( gamma_busy_fields(board, 6) == 9 );
assert( gamma_move(board, 7, 1, 2) == 1 );
assert( gamma_move(board, 1, 6, 8) == 1 );
assert( gamma_move(board, 3, 8, 2) == 0 );
assert( gamma_move(board, 3, 1, 1) == 0 );
assert( gamma_move(board, 4, 2, 0) == 1 );
assert( gamma_golden_possible(board, 4) == 1 );
assert( gamma_move(board, 5, 4, 8) == 1 );
assert( gamma_move(board, 5, 7, 8) == 1 );
assert( gamma_move(board, 6, 1, 3) == 1 );
assert( gamma_move(board, 6, 3, 1) == 1 );
assert( gamma_move(board, 7, 5, 0) == 0 );
assert( gamma_move(board, 7, 0, 4) == 0 );
assert( gamma_golden_possible(board, 7) == 1 );
assert( gamma_move(board, 1, 10, 1) == 0 );
assert( gamma_move(board, 1, 3, 5) == 0 );
assert( gamma_busy_fields(board, 1) == 7 );
assert( gamma_move(board, 2, 7, 7) == 1 );
assert( gamma_golden_possible(board, 2) == 1 );
assert( gamma_move(board, 3, 3, 0) == 0 );
assert( gamma_busy_fields(board, 3) == 10 );
assert( gamma_free_fields(board, 3) == 48 );
assert( gamma_move(board, 4, 4, 9) == 0 );
assert( gamma_move(board, 4, 5, 10) == 1 );
assert( gamma_move(board, 5, 6, 0) == 0 );
assert( gamma_move(board, 6, 9, 2) == 0 );
assert( gamma_move(board, 6, 8, 1) == 0 );
assert( gamma_move(board, 7, 9, 6) == 0 );
assert( gamma_move(board, 7, 8, 5) == 0 );
assert( gamma_busy_fields(board, 7) == 7 );
assert( gamma_move(board, 1, 3, 0) == 0 );
assert( gamma_move(board, 1, 1, 2) == 0 );
assert( gamma_move(board, 2, 8, 1) == 1 );
assert( gamma_move(board, 2, 4, 2) == 1 );
assert( gamma_move(board, 3, 0, 2) == 1 );
assert( gamma_move(board, 3, 3, 3) == 0 );
assert( gamma_move(board, 4, 8, 1) == 0 );
assert( gamma_move(board, 5, 10, 4) == 0 );
assert( gamma_move(board, 5, 8, 0) == 0 );
assert( gamma_move(board, 6, 4, 3) == 0 );
assert( gamma_busy_fields(board, 6) == 11 );
assert( gamma_move(board, 7, 9, 1) == 1 );
assert( gamma_busy_fields(board, 7) == 8 );
char* board926111293 = gamma_board(board);
assert( board926111293 != NULL );
assert( strcmp(board926111293,
"..64.4.266\n"
"...451.41.\n"
"6.135.15.3\n"
".....5.27.\n"
"362.533761\n"
"...2.6.735\n"
"576..5..2.\n"
".6.51.4433\n"
"37..212637\n"
".4.675..27\n"
"..42.52.53\n") == 0);
free(board926111293);
board926111293 = NULL;
assert( gamma_move(board, 1, 7, 3) == 0 );
assert( gamma_move(board, 2, 6, 3) == 0 );
assert( gamma_busy_fields(board, 2) == 10 );
assert( gamma_move(board, 3, 2, 5) == 1 );
assert( gamma_move(board, 4, 0, 4) == 0 );
assert( gamma_move(board, 4, 6, 6) == 0 );
assert( gamma_move(board, 5, 7, 3) == 0 );
assert( gamma_move(board, 6, 1, 0) == 0 );
assert( gamma_move(board, 6, 6, 7) == 0 );
assert( gamma_move(board, 7, 0, 0) == 1 );
assert( gamma_move(board, 7, 0, 1) == 1 );
assert( gamma_move(board, 1, 10, 4) == 0 );
assert( gamma_move(board, 1, 8, 5) == 0 );
assert( gamma_move(board, 2, 2, 3) == 0 );
assert( gamma_move(board, 3, 5, 6) == 0 );
assert( gamma_move(board, 3, 4, 0) == 1 );
assert( gamma_move(board, 4, 4, 7) == 1 );
assert( gamma_move(board, 5, 6, 4) == 1 );
assert( gamma_golden_possible(board, 5) == 1 );
assert( gamma_move(board, 6, 5, 6) == 0 );
assert( gamma_move(board, 6, 3, 1) == 0 );
assert( gamma_move(board, 7, 4, 4) == 1 );
assert( gamma_move(board, 7, 4, 3) == 0 );
assert( gamma_free_fields(board, 7) == 36 );
assert( gamma_golden_move(board, 7, 10, 8) == 0 );
assert( gamma_move(board, 1, 8, 8) == 1 );
assert( gamma_move(board, 1, 3, 2) == 1 );
assert( gamma_move(board, 2, 1, 6) == 0 );
assert( gamma_move(board, 3, 9, 2) == 0 );
assert( gamma_move(board, 3, 8, 0) == 0 );
assert( gamma_free_fields(board, 3) == 10 );
assert( gamma_move(board, 5, 4, 7) == 0 );
assert( gamma_move(board, 6, 7, 0) == 0 );
assert( gamma_move(board, 6, 9, 7) == 0 );
char* board255198177 = gamma_board(board);
assert( board255198177 != NULL );
assert( strcmp(board255198177,
"..64.4.266\n"
"...451.41.\n"
"6.135.1513\n"
"....45.27.\n"
"362.533761\n"
"..32.6.735\n"
"576.755.2.\n"
".6.51.4433\n"
"37.1212637\n"
"74.675..27\n"
"7.42352.53\n") == 0);
free(board255198177);
board255198177 = NULL;
assert( gamma_move(board, 7, 9, 6) == 0 );
assert( gamma_move(board, 7, 3, 4) == 1 );
assert( gamma_move(board, 1, 3, 5) == 0 );
assert( gamma_free_fields(board, 1) == 33 );
assert( gamma_move(board, 2, 9, 0) == 0 );
assert( gamma_busy_fields(board, 2) == 10 );
assert( gamma_move(board, 3, 2, 7) == 0 );
assert( gamma_golden_possible(board, 3) == 1 );
assert( gamma_move(board, 4, 2, 2) == 1 );
assert( gamma_move(board, 5, 9, 1) == 0 );
assert( gamma_move(board, 5, 7, 10) == 0 );
assert( gamma_move(board, 6, 5, 6) == 0 );
assert( gamma_free_fields(board, 6) == 14 );
assert( gamma_move(board, 7, 9, 9) == 1 );
assert( gamma_move(board, 7, 6, 7) == 1 );
assert( gamma_golden_move(board, 7, 10, 8) == 0 );
assert( gamma_move(board, 1, 9, 2) == 0 );
assert( gamma_move(board, 2, 5, 1) == 0 );
assert( gamma_golden_possible(board, 2) == 1 );
assert( gamma_move(board, 3, 0, 7) == 1 );
assert( gamma_move(board, 4, 8, 5) == 0 );
char* board951393166 = gamma_board(board);
assert( board951393166 != NULL );
assert( strcmp(board951393166,
"..64.4.266\n"
"...451.417\n"
"6.135.1513\n"
"3...45727.\n"
"362.533761\n"
"..32.6.735\n"
"5767755.2.\n"
".6.51.4433\n"
"3741212637\n"
"74.675..27\n"
"7.42352.53\n") == 0);
free(board951393166);
board951393166 = NULL;
assert( gamma_move(board, 5, 0, 1) == 0 );
assert( gamma_free_fields(board, 5) == 13 );
assert( gamma_move(board, 6, 9, 10) == 0 );
assert( gamma_move(board, 7, 8, 5) == 0 );
assert( gamma_busy_fields(board, 7) == 14 );
assert( gamma_move(board, 1, 2, 5) == 0 );
assert( gamma_move(board, 1, 1, 1) == 0 );
assert( gamma_busy_fields(board, 1) == 9 );
assert( gamma_move(board, 2, 3, 5) == 0 );
assert( gamma_move(board, 3, 6, 6) == 0 );
assert( gamma_move(board, 3, 8, 9) == 0 );
assert( gamma_move(board, 4, 8, 5) == 0 );
assert( gamma_move(board, 5, 1, 8) == 0 );
assert( gamma_move(board, 1, 4, 10) == 1 );
assert( gamma_move(board, 2, 9, 2) == 0 );
assert( gamma_move(board, 2, 7, 9) == 0 );
assert( gamma_golden_possible(board, 2) == 1 );
char* board266694915 = gamma_board(board);
assert( board266694915 != NULL );
assert( strcmp(board266694915,
"..6414.266\n"
"...451.417\n"
"6.135.1513\n"
"3...45727.\n"
"362.533761\n"
"..32.6.735\n"
"5767755.2.\n"
".6.51.4433\n"
"3741212637\n"
"74.675..27\n"
"7.42352.53\n") == 0);
free(board266694915);
board266694915 = NULL;
assert( gamma_move(board, 3, 1, 6) == 0 );
assert( gamma_move(board, 3, 6, 5) == 1 );
assert( gamma_move(board, 4, 2, 0) == 0 );
assert( gamma_move(board, 5, 0, 4) == 0 );
assert( gamma_move(board, 5, 6, 1) == 1 );
assert( gamma_move(board, 6, 0, 0) == 0 );
assert( gamma_move(board, 6, 9, 2) == 0 );
assert( gamma_move(board, 7, 7, 2) == 0 );
assert( gamma_move(board, 7, 3, 4) == 0 );
assert( gamma_move(board, 1, 2, 3) == 1 );
assert( gamma_move(board, 1, 7, 5) == 0 );
assert( gamma_move(board, 2, 7, 1) == 1 );
assert( gamma_move(board, 2, 5, 2) == 0 );
char* board549787414 = gamma_board(board);
assert( board549787414 != NULL );
assert( strcmp(board549787414,
"..6414.266\n"
"...451.417\n"
"6.135.1513\n"
"3...45727.\n"
"362.533761\n"
"..32.63735\n"
"5767755.2.\n"
".6151.4433\n"
"3741212637\n"
"74.6755227\n"
"7.42352.53\n") == 0);
free(board549787414);
board549787414 = NULL;
assert( gamma_move(board, 4, 0, 0) == 0 );
assert( gamma_golden_possible(board, 4) == 1 );
assert( gamma_move(board, 5, 6, 3) == 0 );
assert( gamma_move(board, 5, 4, 7) == 0 );
assert( gamma_move(board, 6, 4, 9) == 0 );
assert( gamma_move(board, 6, 6, 8) == 0 );
assert( gamma_move(board, 7, 7, 1) == 0 );
assert( gamma_move(board, 7, 3, 2) == 0 );
assert( gamma_move(board, 1, 3, 8) == 0 );
assert( gamma_move(board, 1, 7, 3) == 0 );
assert( gamma_move(board, 2, 7, 1) == 0 );
assert( gamma_move(board, 2, 6, 2) == 0 );
assert( gamma_move(board, 3, 5, 1) == 0 );
assert( gamma_move(board, 4, 4, 9) == 0 );
assert( gamma_free_fields(board, 4) == 24 );
assert( gamma_move(board, 5, 4, 9) == 0 );
assert( gamma_move(board, 5, 6, 8) == 0 );
assert( gamma_move(board, 6, 8, 9) == 0 );
assert( gamma_busy_fields(board, 6) == 11 );
assert( gamma_golden_possible(board, 6) == 1 );
assert( gamma_move(board, 7, 5, 4) == 0 );
assert( gamma_move(board, 1, 10, 6) == 0 );
assert( gamma_move(board, 2, 3, 0) == 0 );
assert( gamma_move(board, 3, 7, 3) == 0 );
assert( gamma_move(board, 4, 0, 7) == 0 );
assert( gamma_move(board, 5, 8, 4) == 0 );
assert( gamma_golden_possible(board, 5) == 1 );
assert( gamma_move(board, 6, 6, 2) == 0 );
char* board210847502 = gamma_board(board);
assert( board210847502 != NULL );
assert( strcmp(board210847502,
"..6414.266\n"
"...451.417\n"
"6.135.1513\n"
"3...45727.\n"
"362.533761\n"
"..32.63735\n"
"5767755.2.\n"
".6151.4433\n"
"3741212637\n"
"74.6755227\n"
"7.42352.53\n") == 0);
free(board210847502);
board210847502 = NULL;
assert( gamma_move(board, 7, 9, 6) == 0 );
assert( gamma_busy_fields(board, 7) == 14 );
assert( gamma_move(board, 1, 5, 1) == 0 );
assert( gamma_move(board, 1, 6, 0) == 0 );
assert( gamma_busy_fields(board, 1) == 11 );
char* board282613349 = gamma_board(board);
assert( board282613349 != NULL );
assert( strcmp(board282613349,
"..6414.266\n"
"...451.417\n"
"6.135.1513\n"
"3...45727.\n"
"362.533761\n"
"..32.63735\n"
"5767755.2.\n"
".6151.4433\n"
"3741212637\n"
"74.6755227\n"
"7.42352.53\n") == 0);
free(board282613349);
board282613349 = NULL;
assert( gamma_move(board, 3, 1, 0) == 0 );
assert( gamma_move(board, 3, 8, 10) == 0 );
assert( gamma_busy_fields(board, 3) == 15 );
assert( gamma_move(board, 4, 6, 10) == 1 );
assert( gamma_busy_fields(board, 4) == 11 );
assert( gamma_golden_possible(board, 4) == 1 );
assert( gamma_move(board, 5, 8, 1) == 0 );
assert( gamma_move(board, 5, 9, 6) == 0 );
assert( gamma_move(board, 6, 5, 1) == 0 );
assert( gamma_move(board, 6, 9, 2) == 0 );
assert( gamma_move(board, 7, 8, 9) == 0 );
assert( gamma_busy_fields(board, 7) == 14 );
assert( gamma_move(board, 1, 3, 0) == 0 );
assert( gamma_move(board, 2, 3, 4) == 0 );
assert( gamma_move(board, 3, 8, 9) == 0 );
assert( gamma_move(board, 4, 7, 3) == 0 );
assert( gamma_move(board, 4, 0, 8) == 0 );
assert( gamma_move(board, 5, 5, 0) == 0 );
assert( gamma_free_fields(board, 5) == 9 );
assert( gamma_move(board, 6, 5, 1) == 0 );
assert( gamma_move(board, 7, 4, 7) == 0 );
assert( gamma_move(board, 1, 7, 9) == 0 );
assert( gamma_move(board, 1, 5, 2) == 0 );
assert( gamma_free_fields(board, 1) == 7 );
assert( gamma_move(board, 2, 5, 1) == 0 );
assert( gamma_busy_fields(board, 2) == 11 );
assert( gamma_free_fields(board, 2) == 6 );
assert( gamma_golden_possible(board, 2) == 1 );
assert( gamma_golden_move(board, 2, 6, 9) == 0 );
assert( gamma_move(board, 3, 5, 1) == 0 );
assert( gamma_move(board, 3, 5, 8) == 0 );
assert( gamma_move(board, 4, 1, 6) == 0 );
assert( gamma_move(board, 4, 3, 1) == 0 );
assert( gamma_move(board, 5, 8, 1) == 0 );
assert( gamma_move(board, 6, 0, 7) == 0 );
assert( gamma_move(board, 6, 9, 7) == 0 );
assert( gamma_move(board, 7, 2, 2) == 0 );
assert( gamma_move(board, 1, 3, 5) == 0 );
assert( gamma_move(board, 1, 6, 5) == 0 );
assert( gamma_move(board, 2, 2, 8) == 0 );
assert( gamma_move(board, 3, 7, 7) == 0 );
assert( gamma_busy_fields(board, 3) == 15 );
assert( gamma_move(board, 5, 4, 9) == 0 );
assert( gamma_move(board, 5, 4, 4) == 0 );
assert( gamma_free_fields(board, 5) == 9 );
assert( gamma_golden_move(board, 5, 3, 6) == 0 );
assert( gamma_move(board, 6, 7, 1) == 0 );
assert( gamma_move(board, 7, 5, 1) == 0 );
assert( gamma_move(board, 7, 4, 10) == 0 );
assert( gamma_move(board, 1, 3, 9) == 0 );
assert( gamma_move(board, 2, 1, 2) == 0 );
assert( gamma_move(board, 2, 2, 6) == 0 );
assert( gamma_busy_fields(board, 2) == 11 );
assert( gamma_move(board, 3, 9, 0) == 0 );
assert( gamma_move(board, 3, 1, 3) == 0 );
assert( gamma_move(board, 4, 3, 5) == 0 );
assert( gamma_move(board, 5, 7, 1) == 0 );
assert( gamma_move(board, 6, 8, 5) == 0 );
assert( gamma_free_fields(board, 6) == 9 );
assert( gamma_move(board, 7, 3, 0) == 0 );
assert( gamma_busy_fields(board, 7) == 14 );
assert( gamma_move(board, 1, 4, 9) == 0 );
assert( gamma_golden_possible(board, 1) == 1 );
assert( gamma_move(board, 2, 10, 1) == 0 );
assert( gamma_busy_fields(board, 2) == 11 );
assert( gamma_move(board, 3, 9, 0) == 0 );
assert( gamma_busy_fields(board, 3) == 15 );
assert( gamma_move(board, 4, 3, 0) == 0 );
assert( gamma_move(board, 4, 8, 5) == 0 );
assert( gamma_move(board, 5, 7, 1) == 0 );
assert( gamma_move(board, 6, 3, 5) == 0 );
assert( gamma_busy_fields(board, 6) == 11 );
char* board134271069 = gamma_board(board);
assert( board134271069 != NULL );
assert( strcmp(board134271069,
"..64144266\n"
"...451.417\n"
"6.135.1513\n"
"3...45727.\n"
"362.533761\n"
"..32.63735\n"
"5767755.2.\n"
".6151.4433\n"
"3741212637\n"
"74.6755227\n"
"7.42352.53\n") == 0);
free(board134271069);
board134271069 = NULL;
assert( gamma_move(board, 7, 7, 2) == 0 );
assert( gamma_move(board, 7, 0, 7) == 0 );
assert( gamma_move(board, 1, 5, 0) == 0 );
assert( gamma_free_fields(board, 1) == 7 );
assert( gamma_move(board, 2, 0, 7) == 0 );
assert( gamma_move(board, 2, 5, 7) == 0 );
assert( gamma_move(board, 3, 9, 6) == 0 );
assert( gamma_busy_fields(board, 3) == 15 );
assert( gamma_move(board, 4, 1, 9) == 1 );
assert( gamma_move(board, 5, 2, 9) == 0 );
assert( gamma_move(board, 6, 7, 4) == 0 );
assert( gamma_move(board, 6, 5, 3) == 0 );
assert( gamma_golden_possible(board, 6) == 1 );
assert( gamma_move(board, 7, 5, 0) == 0 );
char* board173115778 = gamma_board(board);
assert( board173115778 != NULL );
assert( strcmp(board173115778,
"..64144266\n"
".4.451.417\n"
"6.135.1513\n"
"3...45727.\n"
"362.533761\n"
"..32.63735\n"
"5767755.2.\n"
".6151.4433\n"
"3741212637\n"
"74.6755227\n"
"7.42352.53\n") == 0);
free(board173115778);
board173115778 = NULL;
assert( gamma_move(board, 1, 9, 0) == 0 );
assert( gamma_move(board, 2, 0, 9) == 0 );
assert( gamma_move(board, 2, 1, 7) == 0 );
assert( gamma_move(board, 3, 6, 5) == 0 );
assert( gamma_move(board, 3, 8, 10) == 0 );
assert( gamma_move(board, 4, 5, 7) == 0 );
assert( gamma_move(board, 5, 5, 5) == 0 );
assert( gamma_move(board, 6, 7, 3) == 0 );
assert( gamma_golden_move(board, 6, 8, 8) == 0 );
assert( gamma_move(board, 7, 4, 7) == 0 );
assert( gamma_move(board, 7, 0, 8) == 0 );
assert( gamma_move(board, 1, 6, 7) == 0 );
assert( gamma_move(board, 1, 7, 8) == 0 );
assert( gamma_move(board, 2, 3, 0) == 0 );
assert( gamma_move(board, 3, 7, 9) == 0 );
assert( gamma_golden_possible(board, 3) == 1 );
assert( gamma_golden_possible(board, 4) == 1 );
assert( gamma_move(board, 5, 9, 2) == 0 );
assert( gamma_move(board, 5, 2, 3) == 0 );
char* board779194118 = gamma_board(board);
assert( board779194118 != NULL );
assert( strcmp(board779194118,
"..64144266\n"
".4.451.417\n"
"6.135.1513\n"
"3...45727.\n"
"362.533761\n"
"..32.63735\n"
"5767755.2.\n"
".6151.4433\n"
"3741212637\n"
"74.6755227\n"
"7.42352.53\n") == 0);
free(board779194118);
board779194118 = NULL;
assert( gamma_move(board, 6, 5, 1) == 0 );
assert( gamma_move(board, 7, 6, 3) == 0 );
char* board422921739 = gamma_board(board);
assert( board422921739 != NULL );
assert( strcmp(board422921739,
"..64144266\n"
".4.451.417\n"
"6.135.1513\n"
"3...45727.\n"
"362.533761\n"
"..32.63735\n"
"5767755.2.\n"
".6151.4433\n"
"3741212637\n"
"74.6755227\n"
"7.42352.53\n") == 0);
free(board422921739);
board422921739 = NULL;
assert( gamma_move(board, 1, 6, 9) == 1 );
assert( gamma_move(board, 2, 6, 3) == 0 );
assert( gamma_move(board, 2, 8, 4) == 0 );
assert( gamma_move(board, 3, 4, 7) == 0 );
assert( gamma_move(board, 3, 2, 0) == 0 );
assert( gamma_move(board, 4, 6, 3) == 0 );
assert( gamma_move(board, 5, 0, 9) == 0 );
assert( gamma_move(board, 5, 0, 9) == 0 );
assert( gamma_golden_move(board, 5, 10, 8) == 0 );
assert( gamma_move(board, 6, 9, 2) == 0 );
assert( gamma_move(board, 1, 7, 9) == 0 );
assert( gamma_move(board, 1, 6, 2) == 0 );
assert( gamma_busy_fields(board, 1) == 12 );
assert( gamma_move(board, 2, 9, 0) == 0 );
assert( gamma_move(board, 2, 8, 2) == 0 );
assert( gamma_move(board, 3, 3, 6) == 0 );
assert( gamma_move(board, 3, 0, 10) == 0 );
assert( gamma_busy_fields(board, 3) == 15 );
gamma_delete(board);
return 0;
}
|
/**
* This class provides an easy-to-use wrapper for the HttpCommandServerService.
*
* @author [email protected] (Chaitanya Gharpure)
*
*/
public class HttpCommandServerServiceManager {
public static final String EXTERNAL_STORAGE = Environment.getExternalStorageDirectory() + "";
public static final String REL_ROOT_DIR = "cellbots/files";
public static final int PORT = 8080;
public static final String LOCAL_IP = "127.0.0.1";
private static final String TAG = "HttpCommandServerServiceManager";
private IHttpCommandServerService httpServerService = null;
private IRegisterCallbackService registerCallbackService = null;
private Context context;
private HttpRequestListener requestListener = null;
private ServiceConnection serviceConn = new ServiceConnection() {
@Override
public void onServiceDisconnected(ComponentName name) {
}
@Override
public void onServiceConnected(ComponentName name, IBinder service) {
httpServerService = IHttpCommandServerService.Stub.asInterface(service);
try {
httpServerService.setRoot(REL_ROOT_DIR);
} catch (RemoteException e) {
e.printStackTrace();
}
requestListener.onConnected();
}
};
private ServiceConnection registerCallbackConn = new ServiceConnection() {
@Override
public void onServiceDisconnected(ComponentName name) {
}
@Override
public void onServiceConnected(ComponentName name, IBinder service) {
registerCallbackService = IRegisterCallbackService.Stub.asInterface(service);
try {
registerCallbackService.registerCallback(mCallback);
} catch (RemoteException e) {
Log.e(TAG, "Error registering callback.");
}
}
};
public static void copyLocalServerFiles(Context ct) {
String[] fileNames = ct.getResources().getStringArray(R.array.copy_files);
File dir = new File(EXTERNAL_STORAGE + "/" + REL_ROOT_DIR);
if (!dir.exists()) dir.mkdirs();
copyFiles(ct, fileNames, EXTERNAL_STORAGE + "/" + REL_ROOT_DIR);
}
private static void copyFiles(Context ct, String[] fileNames, String path) {
for (String name : fileNames) {
try {
copyFile(ct.getAssets().open(name), path + "/" + name);
} catch (IOException e) {
Log.e(TAG, "Error copying file " + name);
}
}
}
private static void copyFile(InputStream stream, String path) {
try {
File file = new File(path);
if (!file.getParentFile().exists()) return;
FileOutputStream op = new FileOutputStream(file);
byte[] data = new byte[1024];
while (true) {
int bytesRead = stream.read(data);
if (bytesRead == -1) break;
op.write(data, 0, bytesRead);
}
stream.close();
} catch (IOException e) {
Log.e(TAG, "Error copying Logbot resource file.");
}
}
/**
* Returns the IP address of this device.
* @return IP address as a String
*/
public static String getLocalIpAddress() {
try {
for (Enumeration<NetworkInterface> en = NetworkInterface.getNetworkInterfaces();
en.hasMoreElements();) {
NetworkInterface intf = en.nextElement();
for (Enumeration<InetAddress> enumIpAddr = intf.getInetAddresses();
enumIpAddr.hasMoreElements();) {
InetAddress inetAddress = enumIpAddr.nextElement();
if (!inetAddress.isLoopbackAddress()) {
return inetAddress.getHostAddress().toString();
}
}
}
} catch (SocketException ex) {
Log.e("", ex.toString());
}
return null;
}
public HttpCommandServerServiceManager(Context ct, HttpRequestListener listener) {
context = ct;
requestListener = listener;
HttpCommandServerServiceManager.copyLocalServerFiles(ct);
context.bindService(new Intent(IHttpCommandServerService.class.getName()),
serviceConn, Context.BIND_AUTO_CREATE);
context.bindService(new Intent(IRegisterCallbackService.class.getName()),
registerCallbackConn, Context.BIND_AUTO_CREATE);
}
public void disconnect() {
if (httpServerService != null) {
try {
httpServerService.stopServer();
} catch (RemoteException e) {
Log.e(TAG, "Error stopping HTTP server.");
}
}
if (registerCallbackService != null) {
try {
registerCallbackService.unregisterCallback(mCallback);
} catch (RemoteException e) {
Log.e(TAG, "Error unregistering callback.");
}
}
if (serviceConn != null)
context.unbindService(serviceConn);
if (registerCallbackConn != null)
context.unbindService(registerCallbackConn);
serviceConn = null;
registerCallbackConn = null;
}
public void setResponseByName(String name, byte[] data, String contentType) {
if (httpServerService != null) {
try {
httpServerService.setResponseDataByName(name, data, contentType);
} catch (RemoteException e) {
Log.e(TAG, "Error calling remote method to set response.");
}
}
}
public void setResponseByName(String name, String resource, String contentType) {
if (httpServerService != null) {
try {
httpServerService.setResponsePathByName(name, resource, contentType);
} catch (RemoteException e) {
Log.e(TAG, "Error calling remote method to set response.");
}
}
}
public byte[] getResponseByName(String name) {
if (httpServerService != null) {
try {
return httpServerService.getResponseByName(name);
} catch (RemoteException e) {
Log.e(TAG, "Error calling remote method to set response.");
}
}
return null;
}
public void setRootDir(String root) {
if (httpServerService != null) {
try {
httpServerService.setRoot(root);
} catch (RemoteException e) {
Log.e(TAG, "Error calling remote method to set root directory.");
}
}
}
private IHttpCommandServerServiceCallback mCallback =
new IHttpCommandServerServiceCallback.Stub() {
@Override
public void OnRequest(String request, String[] keys, String[] values, byte[] data) {
Log.d(TAG, "*** Param request received: " + request);
if (requestListener != null) {
requestListener.onRequest(request, keys, values, data);
}
}
};
/**
* Implement this interface to receive callbacks when the HTTP server is
* connected and when PUT/POST requests are received.
*/
public interface HttpRequestListener {
public void onRequest(String req, String[] keys, String[] values, byte[] data);
public void onConnected();
}
} |
Evaluating the safety effectiveness of downgrade warning signs on vehicle crashes on Wyoming mountain passes
Abstract Highway safety on mountain passes is a major concern to most highway agencies in the Western United States. Large trucks are known to be disproportionately affected on downgrades which characterize mountain passes in comparison to other vehicle classes. However, downgrade crash risks are known to exist also for several other classes of vehicles such as buses, single-unit trucks, recreational vehicles (RVs) and passenger vehicles. The Wyoming Department of Transportation (WYDOT) in an effort to reduce the crash risks existing on Wyoming mountain passes has among other measures installed warning signs on steep grades in the state. However, concerns for out of control vehicles on downgrades persists. An empirical analysis of the safety performance of static warning signs installed two-lane road downgrades was carried out using the negative binomial (NB) modeling approach. A review of the literature indicated that there are unique contributory factors to truck and non-truck crashes. Two crash prediction models were therefore developed each for truck and other vehicles. In an attempt to approach the study holistically and to derive reliable results, traffic and geometric factors were included in the analysis. The results showed that truck escape ramp, directional and speed combination, and hill combination warning signs are effective in reducing truck crashes on downgrades. For non-truck crashes, the presence of downgrade and truck-specific warning signs within 0.5 miles of the downgrade, hill, chevron, and directional and speed combination warning signs reduce crashes to various degrees. It is hoped that this study will provide important information to traffic safety engineers and policymakers concerned with downgrade safety.
Abstract: Highway safety on mountain passes is a major concern to most highway agencies in the Western United States. Large trucks are known to be disproportionately affected on downgrades which characterize mountain passes in comparison to other vehicle classes. However, downgrade crash risks are known to exist also for several other classes of vehicles such as buses, single-unit trucks, recreational vehicles (RVs) and passenger vehicles. The Wyoming Department of Transportation (WYDOT) in an effort to reduce the crash risks existing on Wyoming mountain passes has among other measures installed warning signs on steep grades in the state. However, concerns for out of control vehicles on downgrades persists. An empirical analysis of the safety performance of static warning signs installed twolane road downgrades was carried out using the negative binomial (NB) modeling approach. A review of the literature indicated that there are unique contributory factors to truck and non-truck crashes. Two crash prediction models were therefore developed each for truck and other vehicles. In an attempt to approach the study holistically and to derive reliable results, traffic and geometric factors were included in the analysis. The results showed that truck escape ramp, directional and speed Milhan Moomen ABOUT THE AUTHORS Milhan Moomen is currently completing a PhD in civil engineering at the University of Wyoming. His research focuses on truck traffic safety on downgrades.
Mahdi Rezapour (PhD), is a post-doctoral research associate at the department of civil engineering, University of Wyoming. He attended the University of North Dakota and University of Wyoming for his graduate education.
Khaled Ksaibati (PhD, P.E.), is a Professor of civil engineering at University of Wyoming. He is also currently the director of the Wyoming Technology Transfer Center. He attended Wayne State University, and Purdue University for his education. The research presented in this paper is part of a wide study of updating and implementing the Grade Severity Rating System (GSRS) for Wyoming Mountain Passes. The study aims at improving truck safety on Wyoming mountain passes by recommending safe descent speeds to drivers.
PUBLIC INTEREST STATEMENT
Crashes on steep downgrades cause extensive damage to lives and property. Highway agencies have continuously sought measures to reduce the occurrence of downgrade crashes. An important intervention has been the use of downgrade warning signs to present information to drivers about downgrades. The Wyoming Department of Transportation (WYDOT) has installed several warning signs on steep downgrades to counter the incidence of downgrade crashes. This study carried out an empirical analysis of Wyoming downgrade warning signs using the negative binomial modeling approach. Two crash prediction models were developed for truck-and non-truck crashes. It was found that truck escape ramp, directional and speed combination, and hill combination warning signs are effective in reducing truck crashes on downgrades. The general presence of downgrade and truck-specific warning signs within downgrades, hill, chevron, and directional and speed combination warning signs were effective in reducing non-truck crashes.
Introduction
Motor vehicle crashes result in the death of more than one million people each year around the world, and between 20 and 50 million more suffer non-fatal injuries (WHO, 2015). In 2016 alone, there were 34,439 fatal motor crashes in the United States, which resulted in 37,461 deaths (NHTSA, 2018a). Every year, motor vehicle crashes in the US result in an estimated $230 billion, representing 2% of gross domestic product (NRC, 2004).
Wyoming has the highest fatality (24.7 deaths per 100,000 population) and truck-related crash rates in the US (Weber & Murray, 2014). These high rates result from the relatively high amount of through truck traffic on the interstates and mountainous highways which characterize Wyoming (Mashhadi, Wulff, & Ksaibati, 2018). The steep downgrades and curves which are features of mountainous highways place additional challenges on drivers. This increases the crash risks on mountain highways compared to level, straight highways. The causes of crashes on downgrades are varied but have been mainly attributed to inexperienced downgrade driving, defective brakes and inadequate signing (Myers, Irving, & Walter, 1981). Other factors related to environment and highway geometry have been cited as well (Ahmed, Huang, Abdel-Aty, & Guevara, 2011).
Trucks are especially vulnerable on downgrades because of their large sizes and the heavy loads they carry. Large trucks may weigh as much as 20-30 times passenger vehicles and are also taller which invariably increases their odds of crashes on downgrades. Also, large trucks can run out of control on downgrades due to an inability to stop as a result of brake failure. Brake failure on downgrades is attributed to brake heating, failure to downshift, and mechanical failure (Myers et al., 1981) Driver factors and attitudes no doubt play a significant role in crashes on downgrades. Speeding has generally been identified as a factor in most crashes (Abdel-Aty et al. 2005;Elvik, Amundsen, & Hofset, 2001). An analysis of the 2015 safety statistics indicates that 48,613 drivers were involved in 32,166 fatal crashes in which 35,092 people lost their lives. Of this number, 18% of drivers were found to be speeding at the time of the crash, and 27% of those killed were in a crash involving at least one speeding driver (NHTSA, 2018b).
A study conducted by Lill to investigate unusually severe truck crashes on downgrades identified several primary factors which appear repeatedly for downgrade crashes. Key among the finding was drivers who were inexperienced or unfamiliar with the specific area and downgrades accounted for 43% of the crashes recorded (Lill, 1977). Failure to downshift on the grade, and excessive speed were found in 82% of the crashes identified. Other factors identified were driver impairment, defective trucks or brakes, and inadequate signing.
Conclusions drawn from Lill's and other studies were that warning signs can potentially be effective in preventing downgrade crashes since they mitigate the significant factors (driver unfamiliarity and speeding) identified (Johnson, Myers, DiMarco, & Allen, 1982;Myers et al., 1981).
Warning signs are used as an important tool for educating road users. They also help in enforcement by assisting in the implementation of traffic laws. Most importantly, warning signs caution drivers about the locations and dangers associated with hazardous sections of highways.
There has been a lot of research effort in evaluating the efficacies of geometric safety improvements such as lane widening, shoulder widening, lane width improvement (Al-Masaeid & Sinha, 1995;Labi, 2011) two-way left turns, vertical curve flattening (Hovey & Chowdhury, 2005), among many others. Other safety effectiveness studies have focused on intersections, bypasses, and cross-sectional elements (Bauer & Harwood, 2000;Elvik, 2008;Zeeger, Hummer, Herf, Reinfurt, & Hunger, 1987). Comparatively, relatively fewer research efforts have been undertaken with regards to the safety effectiveness of advance warning signs. Bowman (1993) found that majority of advance warning devices used by highway agencies were not evaluated by formal effectiveness studies but are simply assumed to be effective by most agencies. Veneziano and Knapp (2016) noted that the impact of only a few commonly used static warning signs has been studied and documented by any state-of-the-practice approach with robust results. The study rated the most previous research on the safety effectiveness of warning signs as having low or medium reliability. Carson, Nee, and McCormack (2005) researched the safety effectiveness of ice warning signs and concluded that the signs appear to have a statistically significant effect on traffic speeds, lane change, and braking activity even though drivers were oblivious to their existence. Hallmark, Hawkins, and Smadi (2015) evaluated the effect dynamic speed signs on curves had on vehicle approach speeds in different states and concluded the warning signs were generally effective in decreasing speeds. Al-Kaisy (2006) in studying the efficacy of static warning signs for occasional hazards conducted a survey by a questionnaire sent to all 50 state Departments of Transportations (DOTs) and 2 Canadian provinces. Results from the 28 responding DOTs suggested that most states are not assured about the effectiveness of static warning signs for occasional hazards. The survey results, however, indicated that most responding DOTs found the use of unconventional warning devices to be effective. Another study of selected warning devices for reducing truck speeds suggests signs targeted at trucks such as "truck tipping" warning signs and flashing lights mounted above and below a "truck tipping" sign reduce truck approach speeds (Middleton, 1994).
The above discussion is a review of some studies conducted on the safety effectiveness of warning signs. The studies agree to varying degrees that warning signs are important in reducing crashes though their effectiveness have not been quantitatively evaluated in most instances. The authors in reviewing literature for this study did not find any research work targeted specifically on downgrade warning signs, which suggests a research gap in the safety effectiveness studies. This study was motivated by the need to assess the safety effectiveness of downgrade warning signs to prevent the incidence of downgrade crashes. This was achieved by calibrating crash frequency models for trucks and other vehicles. Estimating the safety effectiveness advance warning signs installed on downgrades will help identify signs which are effective in countering crashes on downgrades. The results of this study will also serve to enhance knowledge on the installation and use of downgrade warning signs as recommended by the MUTCD.
Methodology
With most crash data being in the form of non-negative integers, Poisson regression is mostly a starting point for any regression model. Poisson regression modeling is a technique that overcomes the ordinary least square (OLS) requirement for constant variance and normal distribution. However, Poisson regression models restrict the mean to be equal to the variance, which may result in biased and inconsistent coefficient estimates in the presence of over-dispersed data (Lord & Mannering, 2010).
The negative binomial (NB) regression model, which is an extension of the Poisson regression model, is able to overcome the deficiency of possible over-dispersion in the data. An overdispersion parameter is introduced in the NB model to account for dispersion in the data. The NB regression model is derived from the Poisson regression model which is specified as follows: where; Pðy i Þ is the probability of a roadway entity i having y i crashes within a time period and λ i is the Poisson parameter for roadway entity i. Rewriting the Poisson parameter for each observation gives the NB regression model. This is written as: where; EXPðεiÞ is a gamma distributed error term with a mean of 1 and a variance α. The addition of the gamma-distributed term allows the mean to differ from the variance in the NB regression model.
The safety effectiveness of warning signs can be calculated using the elasticity of the parameter estimates obtained from regression models (Donnell, Porter, & Shankar, 2010;Labi, 2011). Elasticity is a measure of the responsiveness of one variable change to a change in another (Washington, Karlaftis, Mannering, & Fred, 2011). In the context of the warning signs, elasticity is interpreted as the percent change in expected crash frequency resulting from a 1% change in a continuous explanatory variable. The elasticity of the dependent variable Y with respect to an independent variable X is given by: Elasticity for indicator variables, also known as pseudo-elasticity, refers to the percent change in expected crash frequency given a change in the value of the indicator variable from zero to unity. This is defined as: Elasticity is a useful measure of effectiveness because it is dimensionless unlike an estimated regression parameter, which is dependent on the units of measurement (Washington et al., 2011).
To select between the Poisson and negative binomial regression models, model fit statistics and the significance of the dispersion parameter were analyzed. The model fit statistics analyzed for this study were the log-likelihood statistic and the Akaike Information Criterion (AIC).
Model validation was undertaken using the leave-one-out cross-validation (LOOCV) technique. The LOOCV technique was chosen due to the limited data size. This approach provides a method to estimate generalization errors and allows for comparison between models. In this technique, one observation is selected as the test set with all other observations considered as the test set. A prediction is made for the point after which the error is then computed. This procedure is repeated for all the observations and an average error is estimated to evaluate the model.
Data
The first step in the analysis involved the identification of hazardous downgrade sections. The Manual on Uniform Traffic Control Devices (MUTCD) specifies the combination of downgrades and lengths deemed hazardous to road users. These hazardous downgrades must meet any of the following criteria (FHWA, 2009): • a 5% grade that is more than 3,000 ft. (914.4 m) in length, • a 6% grade that is more than 2,000 ft. (609.6 m) in length, • a 7% grade that is more than 1,000 ft. (304.8 m) in length, • an 8% grade that is more than 750 ft. (228.6 m) in length, and • a 9% grade that is more than 500 ft. (152.4 m) in length.
The Wyoming Department of Transportation (WYDOT) maintains a database containing general roadway geometric characteristics, route numbers, mileposts (MPs), elevations, and vertical and horizontal alignment information. Grades for different sections were computed using information from the WYDOT database. The grade between two different locations was computed from their elevations and MPs from (Equation 1): The calculations allowed a graphical plot of gradient against mileposts to be generated for each route to help identify grades that met the MUTCD criteria. A typical plot on US 16 is as shown in Figure 1.
The length and grade combination of each section was then examined to assess if it met the MUTCD criteria requiring the installation of a steep grade warning sign. Online video logs from Pathway Services Inc. were used to visually check the routes selected and information on grades as well as signage at the various locations. Eleven years of crash data for each section was obtained from the Critical Analysis Reporting Environment (CARE) package. The CARE package provides information on historical accident data from crash reports and is used by many DOTs across the nation to analyze and present crash information. Information such as crash severity, date, vehicle type, location, and crash circumstances was gathered for the selected downgrade sections. Geometric characteristics were also obtained from WYDOT and aggregated to the crash data.
Other information such as number of lanes, passing lanes, MP markers, downgrade direction, start, and end of each downgrade section, posted speed limits, brake check areas, and truck escape ramps were collected. The data set compiled had horizontal and vertical characteristics, cross-sectional, and warning sign information.
Homogenous segmentation
To achieve reliable and accurate results for safety analysis using any cross-sectional method, it is recommended that homogeneous segments be used (AASHTO, 2010;Gross, Persaud, & Lyon, 2010). Homogenous segments refer to segments with similar characteristics, with each segment homogenous in respect of features such as length, traffic volume, roadway design, and traffic control features (AASHTO, 2010). Based on the Highway Safety Manual (HSM), a new homogenous segment begins at the center of each intersection or the following criteria: • Beginning of a horizontal curve, • Point of vertical intersection for a sag or crest vertical curve, • When there is a change in at least one of the following criteria: • Average annual daily traffic, • Lane width, • Shoulder width, To assess the impact of the warning signs and other road characteristics, the above criteria were considered based on a minimum length of 0.10 mile.
Types of advance warning signs
The types and number of installed warning signs were collected on 157 sections of the included routes in the study. All the downgrades considered fulfilled the criteria specified by the MUTCD for installing downgrade warning signs. The categories of advance warning signs collected are shown in Figure 2. These are:
Hill signs/hill signs with advisory grade or distance plaques
Hill signs (W7-1, W7-1a) are usually placed in advance of downgrades to warn drivers of a steep decline. These signs are frequently used in combination with supplemental signs (W7-2bP, W7-3P, W7-3aP, and W7-3bP) (FHWA, 2009). Supplemental signs emphasize the use of lower gears and speed at locations where conditions justify extra caution. Hill warning signs are installed on locations where crash experience, or engineering judgment indicate a need. These warning signs were divided into the hill signs alone or the combinations of the hill signs and supplement signs.
Truck escape ramp signs
As can be seen from Figure 2, this category includes W7-4, W7-4b, and W7-4c. These signs inform drivers, especially truck drivers of the provision of truck escape ramp facilities for use of out of control vehicles. This sign in this study also was used as an indication of the presence of an escape ramp.
Directional warning signs
Warning signs of this type are installed on mountain passes to inform drivers of changes in horizontal alignment and route direction.
Directional sign/directional sign and advisory speed plaque
Most often, directional warning signs were combined with advisory speed plaques or were installed close to speed signs for emphasis on reducing speed. These two groups were combined for the analysis.
Chevron warning signs
These are signs installed to show the edge of the road in dangerous curves and provide an emphasis for sudden changes in horizontal curves. Chevron signs are placed at the actual location of the curve change or bend to assist in safely negotiating such sections. They are usually black arrows placed on a yellow background.
Miscellaneous warning signs
Several other downgrade warning signs were identified during the data collection warning drivers of approaching downgrades. Some of these signs did not have enough observations to categorize them into individual groups. These included lane merges, high wind, route layout, and rollover signs among others. Due to their assorted these signs were placed in a miscellaneous category.
Summary of warning signs and variables
The total length of downgrades considered for the study was 172 miles of mainly two-lane highways, segmented into 1416 sections. However, due to unavailability of warning sign information on some segments, the number of observations used for the analysis was 1232. Three miles of advance warning signs were collected before the start of downgrades along with warning signs within the downgrades. This meant that the total length used for frequency analysis of the advance warning signs was 3,696 miles. Since the warning signs were mostly installed in advance of the downgrade, signs within the downgrade were found to have low frequencies. Warning signs within the downgrade were therefore combined into a single category in the modeling process. A complete list of variables related to warning sign along with their frequencies and normalized information (frequency/mile) are presented in Table 1.
Apart from the warning signs, variables related to geometric and traffic characteristics of the segments were considered for the analysis. These variables included number of crest curves, lane width, number of sag curves within segments, curve radius and length, average daily traffic (ADT) among others. Tables 2 and 3 show the descriptive statistics of the significant variables.
Estimation results
In this section, the contributory factors to truck and other vehicular (non-truck) crashes will be discussed. Poisson and NB regression models were analyzed for the study. The statistical significance of the dispersion parameter is an indication that the NB models were better fits to the data. Also, AIC values were found to be lower for the NB models suggesting a better fit in comparison to the Poisson model. Comparatively, higher log-likelihood estimates for the NB NB estimation results of the models predicting downgrade crash frequency for trucks and other vehicles areas are presented in Tables 4 and 5. The tables include the estimated coefficient, standard error, Wald chi-square, and p-value. Most of the presented results are reasonable and intuitive. Interpreting the effect of an explanatory variable for NB models is done in terms of the exponent of the parameter estimate. For instance, a parameter estimate of 0.20 implies that for a unit increase in that independent variable, the expected number of crashes will increase by a factor of exp(0.20) = 1.22% or 22% while holding all the other variables in the model. Conversely, a parameter estimate of −0.20 indicates that for a unit increase in the independent variable of interest, the expected crash frequency is expected to decrease by 1 − exp(−0.20) = 0.18% or 18% while holding all other variables in the model constant.
Negative binomial model for truck crash frequency
The first analysis was conducted by incorporating only truck crashes. It is hypothesized that because trucks are prone to crashes on downgrades due to their sizes and loads, truck drivers pay close attention to specific warning signs related to trucks and the downgrade geometry. Though other vehicle drivers consider all warning signs, they may not pay as close attention to the downgrade-specific warning signs as truck drivers do. Also, the literature recommended that the crashes be analyzed separately due to the uniqueness of the contributory factors of each crash type. The significant variables in this analysis were divided into roadway characteristics and warning signs. Table 4 shows the estimation results of the NB model for truck crashes.
Average grade
The parameter estimate for average grade was found to be significant with a positive coefficient, indicating that higher grades are associated with higher truck-related crashes. The results suggest that an increase in the vertical grade by 1% will result in an increase in truck crashes by a factor of 1.32 given that the other variables are held constant. The result is expected because trucks are much more prone to crashes on downgrades due to brake failure. This result is consistent with findings in the existing literature (Bowman & Coleman, 1989;Johnson et al., 1982). Table 4 suggests that superelevation is positively associated with truck crash frequency on downgrades. A unit change in superelevation will result in an increase in truck crashes by up to a factor of 1.05 while holding all other variables in the model constant. This result is intuitive because it is known that trucks have higher risks of over-turning or toppling on curves due to their high center of gravity and are difficult to control on curves (Khattak, Schneider, & Targa, 2003;Mcknight, Bahouth, Mcknight, & Bahouth, 2009). The results somehow confirm findings by Stimpson (1977) which showed that the interaction between superelevation with other factors such as improper speed selection leads to an increase in crashes. However, a study conducted by de Milliken and de Pont (2005) concluded that a 1% increase in superelevation could reduce heavy vehicle loss-of-control crash risk while cornering. It should be noted that for two-lane highways, only one superelevation is constructed along a cross-section of the road, which would result in more challenges while cornering. The adverse effects of downgrade-curve interaction and high superelevation at curves is another phenomenon which may be a major reason for this finding.
Deflection angle
Deflection angles occur by setting two tangents in a horizontal curve. Higher deflection angles result in a longer and smoother curve, which would consequently increase the crash risk by encouraging speeding. The positive parameter estimate for deflection angle is thus expected. The analysis suggests that a one-degree increase in deflection angle will result in about a 2.4 increase in truck crash frequency. This finding is consistent with existing literature and engineering intuition (Hauer, 1999
ADTT
The positive sign of average annual daily truck traffic (ADTT) indicates that road segments with high truck traffic experience higher truck-related crashes. This is an expected result as greater numbers of trucks on downgrades increases the risk of a crash occurring due to brake failure, and an increased vehicle-vehicle interaction. However, Milton and Mannering (1998) have found that an increase in the percentage of truck traffic is associated with a decrease in crashes. They argued that as the percentage of trucks increase in the traffic stream in relation to cars, the frequencies of vehicle overtaking, and lane changes decrease, reducing the risk of crashes.
Warning signs within downgrade
As discussed previously, observations for warning signs within the downgrade segments were low. This necessitated combining all the warning signs within the downgrade segments into one category. Warning signs installed within downgrades were found to be statistically significant at a 5% significance level for truck-related crashes. The positive coefficient of these warning signs indicates a positive association with higher truck crash frequencies. This does not mean these signs increase crashes, but the positive coefficient may only suggest that such warning signs are installed on blackspots; areas known to have high crashes. The positive parameter estimate of this variable may also be the result of confounding brought about by the different groupings of warning signs in this category.
Miscellaneous warning signs
Miscellaneous downgrade signs were found to be significant when installed in advance of downgrades. The results suggest an increase in miscellaneous downgrade signs is associated with higher truck crashes. The result is unexpected but may be an indication that such downgrade signs are installed on locations with high frequencies of crashes.
Hill sign combination with downgrade and distance advisory plaque
The results indicated that the hill sign combination with downgrade and distance advisory plaque (W7-1 + W7-3aP) significantly reduce truck-related crashes. A unit increase in the number of this sign will lead to an estimated decrease of truck crashes by 36% while holding all variables in the model constant. This may be due to this sign being easily recognizable and the extra information provided on these sign types.
Truck escape ramp signs
Truck escape ramp signs were found to be associated with a decrease in truck crash frequency on downgrades. Truck escape ramps allow vehicles, especially trucks that have run out of control due to brake problems to come to a safe stop. These ramps are predominantly installed on downgrades where the incidence of truck runaways is high (Witherford, 1992). The warning sign may not by itself reduce truck crashes, but the presence of truck escape ramps may have engendered this effect. This is because segments with truck escape ramps observe fewer truck crashes in comparison to locations without the ramps. The parameter estimate suggests that a unit increase of a truck escape ramp sign will reduce truck crashes by about 44% while holding all other variables in the model constant.
Directional and speed combination advisory sign
The directional and speed plaque advisory sign was found to be related to a decrease in truck crashes on downgrades. A unit increase in the frequency of this sign is associated with a 32% decrease in the number of truck crashes while holding all variables in the model constant. This warning sign is important on downgrades due to the winding nature of such terrain which also requires low operating speeds.
Graphical representation of variable impacts on truck crashes
To visually represent the effect impact of a unit change of the independent variable on truck crashes, a radar chart is shown in Figure 3. The impact is in terms of the exponent of the parameter estimate of the independent variable. That is exp(β) for positive estimates or 1-exp(β) for negative parameter estimates.
Negative binomial model for other vehicles (non-trucks) crash frequency
The second analysis was conducted to identify the effects of variables on other vehicular crash frequency. Several variables were found to impact the frequency of other vehicular crashes including curve length and type, superelevation, tangent length, presence of passing lane, ADTT and several warning signs.
Downgrade length
Downgrade length was shown to have a significant influence on non-truck crashes. As can be seen from Table 5, the positive sign of the parameter estimate for downgrade length indicates that increasing the downgrade length is associated with a higher occurrence of crashes. The result suggests that for every mile of downgrade length, the expected crash frequency will increase by a factor of 38.6 (using the unstandardized downgrade length) while holding all other variables in
Curve type
The results from Table 5 indicates that curve type is statistically significant at the 0.95 confidence level. The results show that crest curves are associated with a decrease in the frequency of nontruck crashes compared to level sections. It was found that a unit increase in slope leads to about a 15% decrease in crashes if all other variables in the model are held constant. This, in the context of improving traffic does not offer much information; it cannot be recommended that more crest curves be constructed. The results may buttress the point that downgrades dominated by sag curves have higher risks of crashes occurring on them.
Superelevation
Like the results obtained for truck crash frequencies, superelevation was found to be positively associated with an increase in other vehicle crashes. The results suggest a factor of 1.08 increase in the number of expected crashes for a 1% increase in superelevation on downgrades.
ADTT
As expected, ADTT is positively associated with an increase in the number of crashes on downgrades. The explanation of the effect of ADTT on truck crashes holds for these types of crashes. However, the effect of ADTT on non-truck crashes was found to be marginal. A unit increase of ADTT results in an expected increase of crashes by a factor of 1.002 given all other variables in the model are held constant. This factor increases with increasing truck traffic. Although ADT and ADTT were included in the preliminary analyses, only ADTT was found to be significant in the final model.
Passing lane
The effect of passing lane on crash frequency is similar to what was found for trucks. The results suggest that for a unit increase in the number of passing lanes, the expected crash frequency will decrease by 17% while holding all the other variables in the model constant.
Presence of downgrade warning signs
This category of warning signs refers to downgrade-specific or truck-specific warning signs (hill sign and combinations, truck escape ramp signs, truck speed signs, etc.) installed predominantly on hills. For this analysis, they were considered present if they were installed 0.5 miles or less in advance of downgrades. They exclude speed limit, directional, chevron, lane merges, and high wind warning signs. This variable was created to assess the impact of general downgrade signs on crashes. The presence of a downgrade warning sign was found to significantly reduce non-truck related crashes. This may be an indication that the downgrade warning signs specified are effective in preventing non-truck crashes even though they are targeted at larger vehicles crashes.
Hill sign (W7-1)
The hill sign was found to have a negative influence on the frequency of other vehicle crashes on downgrades. The results show that by adding a hill sign, the expected frequency of non-truck crashes will decrease by about 43%. This finding indicates that hill signs do not only impact truck crashes but other crashes as well. A plausible explanation for this result is that drivers become aware of possible truck traffic in the presence of the hill sign. Also, the hill sign is well known to motorists who drive on mountain passes and is an indication of an upcoming difficult terrain. These reasons may lead to cautious driving, thus resulting in a decrease in crash frequency.
Chevron warning sign
The negative coefficient for Chevron warning signs installed before downgrades for the non-truck crash frequency model indicates that sections with higher numbers of chevron warning signs generally experience fewer crashes. The results suggest installing a Chevron sign in advance of a downgrade will lead to about an 11% reduction in non-truck crashes while holding all other variables constant. This is expected because Chevron warning signs which alert drivers to sudden changes in horizontal alignment are associated with a decrease in travel speeds in order to safely negotiate such geometric changes. Reducing speed while traveling over mountain passes is highly recommended to decrease the probability of run-off road crashes. Other studies have confirmed this finding where Chevron signs have been found to reduce crashes by up to 50% (Lalani, 1992).
4.9. Graphical representation of variable impacts on non-truck crashes Figure 4 is a radar chart showing the impact of a unit change in the independent variables on nontruck crash frequency. Again, the impact is expressed in terms of the exponent of the parameter estimate of the independent variable.
Elasticities and safety effectiveness of identified factors
After the crash prediction models, crash reduction and safety improvements were estimated by elasticity estimated from the regression parameters. The result of the elasticity analysis is shown in Table 6.
The results show that passing lanes are the most effective measures in preventing truck crashes on downgrades. For warning signs, directional and speed advisory combinations, truck escape ramp and the hill combination signs were found to be effective. In terms of non-truck crashes, curve type, passing lanes, general downgrade warning signs, hill signs, directional and speed advisory combination signs, and chevron signs were all effective in reducing crash frequency. The highest effect was found from hill signs, followed by directional and speed advisory combination signs.
Bar charts are provided in Figures 5 and 6 to graphically show the elasticity of variables which were found to reduce crash frequency. The elasticities represent the percent reduction of crash frequency for a 1% change in the independent variable.
Conclusions and recommendations
The high cost of crashes, especially on downgrades to the society justifies evaluating the safety effectiveness of warning signs. This paper presented comprehensive crash prediction models incorporating warning signs, geometric and traffic variables from which safety effectiveness was analyzed. Though the study focused on warning signs, other factors identified could be useful in understanding the impacts of those variables on downgrade crash frequencies. The study found that there is empirical evidence to justify the installation of advance warning signs for downgrades. Specifically, the study found that: . Impact of independent variables on non-truck crashes.
• Warning signs were more effective when they are installed in advance of downgrades in comparison to warning signs installed within the sections. Signs installed before downgrades were found to be effective not only in reducing truck crashes but other vehicular crashes as well.
• The directional and speed plaque combination sign was found to be the most effective sign in reducing truck crashes on downgrades. Truck escape ramp signs and hill combination signs were also shown to reduce truck crash frequencies. The results of the elasticity analysis indicate that these signs can reduce truck crashes from about 6% to 17%.
• For non-truck crashes, chevron, hill, directional and speed plaque combination sign, and presence of a downgrade warning sign were found to be effective in reducing crashes on downgrades. The elasticity analysis suggests that the presence of a downgrade sign (within 0.5 miles) in advance of the downgrade has the highest safety effect of about 28% on nontruck crashes. Overall, the warning signs identified were found to have a safety effectiveness of about 2% to 28% for non-truck involved crashes. Hill sign combination with downgrade and distance advisory plaque (W7-1 + W7-3aP) −11.10 Chevron warning sign −2.42 • The analysis showed that the presence of a passing lane has a significant effect on reducing both truck and non-truck crash frequencies on downgrades. The findings also indicate that crest curves reduce other crashes on downgrades.
• Despite the analysis showing that general installation of warning signs reduces crash frequencies, a lot of thought should go into the locations and numbers installed. Installing many warning signs than needed may result in drivers losing their respect for them and disregarding pertinent information in the process (FHWA, 2009).
• Intelligent Transportation Systems (ITS) may be used along with the current warning sign regime on mountain passes in Wyoming. Such ITS measures include warning systems with the ability to communicate speeds and weights to large vehicles fitted with transponders, thermal imaging of heavy vehicle brakes on brake check areas, speed feedback systems to alert drivers of heavy vehicles when they exceed the safe speed at a location, and the potential use of invehicle telematics.
Having discussed which warning signs are effective in preventing crashes on downgrades, it is important to note that not all the important warning signs were statistically significant in this study. This may have been due to a lack of variation observed for these warning signs to be significant in the models. Equally important is that warning signs function as a system and not individually. This concept was not pursued in this study due to data limitations.
For future studies, the safety evaluation of warning signs should be linked to the importance that drivers place on them. This may need a comprehensive psychological evaluation to understand the preventative impact of the warning signs. Other studies should on completely assessing the safety impacts of warning signs based not only on crash frequency but also on crash severity.
Also, the impact of warning signs on crash severity may be analyzed in a future study. Though several studies have used count models such as the Poisson and NB models to predict crash frequencies, their use in crash severity analysis has been limited. The use of count models in crash severity analysis is complicated because the counts of crash in each severity level are not independent. This implies that the correlation among specific injury crash counts must be taken into account (Anastasopoulos & Mannering, 2011). Therefore, methodological approaches such as multinomial logit models, ordered logit models, mixed logit models, and ordered probit models are preferred for crash severity analysis. |
import { DialogElement } from './vaadin-dialog.js';
export type DialogRenderer = (root: HTMLElement, dialog?: DialogElement) => void;
export type DialogResizableDirection = 'n' | 'e' | 's' | 'w' | 'nw' | 'ne' | 'se' | 'sw';
export type DialogResizeDimensions = {
width: string;
height: string;
contentWidth: string;
contentHeight: string;
};
export type DialogOverlayBounds = {
top: number;
left: number;
width: number;
height: number;
};
export type DialogOverlayBoundsParam =
| DialogOverlayBounds
| {
top?: string | number;
left?: string | number;
width?: string | number;
height?: string | number;
};
/**
* Fired when the `opened` property changes.
*/
export type DialogOpenedChangedEvent = CustomEvent<{ value: boolean }>;
/**
* Fired when the dialog resize is finished.
*/
export type DialogResizeEvent = CustomEvent<DialogResizeDimensions>;
export interface DialogElementEventMap {
'opened-changed': DialogOpenedChangedEvent;
resize: DialogResizeEvent;
}
export type DialogEventMap = HTMLElementEventMap & DialogElementEventMap;
|
/*
* Set the clipping rectangle in an output PostScript file.
* Note that an unpaired grestore
* is used to restore the initial clipping path before setting up the
* clip; this means that no other unpaired gsaves may be used.
*/
int
PSsetClip(Metafile *mf, int num, Glimit *rect)
{
int imf;
mf_cgmo **cgmo = &mf->cgmo;
for (imf = 0; imf < num; ++imf) {
FILE *fp = cgmo[imf]->fp;
(void) fprintf(fp, "gr gs %f %f %f %f b clip n\n",
rect->xmin, rect->ymin,
rect->xmax, rect->ymax);
}
return OK;
} |
def generate_light(self, parent_tag, light):
if not isinstance(light, Light):
raise TypeError("Expecting the given 'light' to be an instance of `Light`, but got instead: "
"{}".format(type(light)))
attrib = {}
for name in ['name', 'castshadow', 'active']:
self._update_attribute_dict(attrib, light, name)
for name, key in zip(['position', 'direction'], ['pos', 'dir']):
self._update_attribute_dict(attrib, light, name, key=key)
for name in ['ambient', 'diffuse', 'specular']:
self._update_attribute_dict(attrib, light, name, slice=slice(3))
return ET.SubElement(parent_tag, 'light', attrib=attrib) |
Letter to the Editor
Dear Editor, Based on a latent class analysis of 12 ‘non-core symptoms’ in a sample of 1,210 patients with Myalgic Encephalomyelitis (ME) and/or chronic fatigue syndrome (CFS) Huber and colleagues found 6 subtypes (classes) of ‘ME/CFS’: one class likely to endorse all noncore symptoms, one class that endorsed none of the non-core symptoms, one class with primarily gastro-intestinal symptoms, one class with primarily circulatory symptoms, one class with gastro-intestinal and circulatory symptoms, and one class with circulatory symptoms and orthostatic intolerance. Not surprisingly, none of the subclasses relates to a ME patient subgroup . This is a direct consequence of the definition of ‘ME/CFS’ . ME is a neuromuscular disease, characterized by muscle fatigability (‘general or local muscular fatigue following minimal exertion with prolonged recovery time’) and ‘neurological disturbance, especially of cognitive, autonomic and sensory functions’ . ‘ME/CFS’ is defined by the authors by four ‘core symptoms’: fatigue, post-exertional malaise (PEM), neurocognitive problems, and unrefreshing sleep . Using this definition of ‘ME/CFS’ a ME patient subgroup is excluded, as a large patient study (n = 420) found that only 77% of ME patients experience ‘cognitive disturbance’. Post-exertional ‘malaise’, unrefreshing sleep and fatigue, ‘core symptoms’ of ‘ME/CFS’ were never part of a formal definition of ME . In this context, it should be noted that postexertional ‘malaise’, an ill-defined abstract notion, is not the same as muscle fatigability/post-exertional muscle weakness, a specific symptom of ME , which can be assessed objectively. The non-specificity of post-exertional ‘malaise’, ‘fatigue’ and unrefreshing sleep’, is illustrated by the observation of the authors that many patients with two medical diseases, major depression disorder, and (explained) chronic fatigue also experience these three ‘core symptoms’ of ME/ CFS. In conclusion, to find solutions for patients and to define patient ‘ME/CFS’ patient subgroups correctly it is crucial to differentiate ME from ‘ME/CFS’ and CFS . |
// GenerateMethodResponse returns the current state in the form of *svcapitypes.MethodResponse.
func GenerateMethodResponse(resp *svcsdk.MethodResponse) *svcapitypes.MethodResponse {
cr := &svcapitypes.MethodResponse{}
if resp.ResponseModels != nil {
f0 := map[string]*string{}
for f0key, f0valiter := range resp.ResponseModels {
var f0val string
f0val = *f0valiter
f0[f0key] = &f0val
}
cr.Spec.ForProvider.ResponseModels = f0
} else {
cr.Spec.ForProvider.ResponseModels = nil
}
if resp.ResponseParameters != nil {
f1 := map[string]*bool{}
for f1key, f1valiter := range resp.ResponseParameters {
var f1val bool
f1val = *f1valiter
f1[f1key] = &f1val
}
cr.Spec.ForProvider.ResponseParameters = f1
} else {
cr.Spec.ForProvider.ResponseParameters = nil
}
if resp.StatusCode != nil {
cr.Spec.ForProvider.StatusCode = resp.StatusCode
} else {
cr.Spec.ForProvider.StatusCode = nil
}
return cr
} |
Method Statement for Reconstruction / Strengthening of Existing Nullah Structures for Tsui Ping River
Revitalization of Tsui Ping River project needs demolition part of nullah structures and reconstruction of it. According to the particular specification and Drainage Service Department requirement, fully closure of Tsui Ping River are not allowed. Any construction work for the river bed in the wet season need to get approval. This article shows the method statement for reconstruction/strengthening of existing nullah structures for Tsui Ping River.
Introduction
Revitalization of Tsui Ping River project will be one of the most significant projects under Energizing Kowloon East. The biggest challenge will be to maintain hydraulic flow of Tsui Ping River and complete the works within the tight Contract time frame.
Potential Risks / Constraints and Mitigation Measures
Having thoroughly reviewed the tender documents, we are fully aware of the potential risks and site constraints for the reconstruction/strengthening of existing nullah structures. As such, we have proposed the most effective and suitable construction method and mitigation measures to overcome these risks / constraints. The details are p described below: Hydraulic impact to the river -To maintain the function of the existing Tsui Ping River during construction, we will conduct Drainage Impact Assessment and submit a Temporary Drainage and Sewerage Management Plan as per PS1.24A prior to construction. Temporary flow diversion measures will be implemented and the river bed reconstruction works will be scheduled in dry seasons only to avoid overflow of the river. We will also remove the existing flow obstruction, i.e. existing ramp of Footbridge KF90 at early stage to improve the drainage performance.
Tight construction programme -To overcome the tight programme due to restrictions for working in the nullah during wet seasons, the reconstruction or strengthening works of the river bed and nullah wall structures will be scheduled in dry seasons only. We will arrange the piling works to be carried out in wet seasons on temporary piling platform above the nullah to provide programme assurance. Multiple concurrent construction work fronts will be arranged at different areas within the existing nullah in both wet and dry seasons.
Tidal influence to construction activities within river bed -The construction activities within the nullah may be affected by the high tidal water. To eliminate the impact, we will implement temporary tidal management measures by installing sheet piles to form a cofferdam in the nullah in order to ensure work in dry condition. The top level of the cofferdam will be carefully designed at +2.8mPD which is higher than the tidal water at +2.6mPD by referring to the predicted tidal level by Hong Kong Observatory at different construction periods. A flap valve will be installed at drainage outlet to prevent back water flow effect.
Maintaining the stability of existing nullah walls -Various nullah walls will have to be trimmed prior to reconstruction or strengthening works of the nullah structures and the stability may be affected. In order to maintain its stability, we will adopt wire saw-cutting method for partial demolition to minimise vibration impact on the existing nullah wall. Robust temporary support measures will be in place to stabilise the existing nullah wall by using temporary waling and strutting system. Grouting will be carried out beneath the existing nullah wall prior to the excavation of the river bed. Instrumentation monitoring points such as settlement markers and tilt meters will be installed and regular monitoring will be conducted for the structure to ensure no exceedance of the allowable limits.
Measures to Maintain the Stability of the Existing Nullah Walls During Reconstruction / Strengthening of Nullah Structures
As the existing nullah wall is in the form of gravity retaining structure, its stability needs to be maintained during partial demolition of the existing nullah wall and excavation of existing river bed by proper temporary strengthening and supporting works which consist of the following 3 major elements: 1) Lateral support system consisting of soldier pile / pipe pile wall, waling and strutting system; 2) Grout curtain underneath the existing nullah wall; 3) Steel lagging plate to be installed between soldier piles / pipe piles. The details of the proposed measures are described below: Prior to the start of the partial demolition of the existing nullah wall, the excavation & lateral support system (ELS) will be installed to stabilize the existing structure. The ELS consists of soldier pile or pipe pile to be installed to -3.0mPD, depending on the extent of partial demolition of nullah wall, which is supported by 1 to 2 layers of waling and raking struts at 4m c/c spacing horizontally as shown in Figure 1. Grout curtain will be installed underneath the existing nullah wall to fill the pores and cavities of soil after installation of the soldier pile or pipe pile wall and prior to excavation of river bed. The grout curtain not only serves as effective cut-off to groundwater flow, but also strengthens the foundation of the existing nullah wall thereby enhancing the overall stability of the partially demolished nullah wall during deepening of river. During the excavation below the existing river bed, steel lagging plates between the soldier piles or pipe piles will be installed by welding in stages as shown in Figure 2 while the excavation proceeds. Owing to different natures of construction works and the extent of partial demolition of the existing nullah walls at various locations, four types of temporary strengthening and support system are proposed to support the existing nullah wall during partial demolition of the nullah wall and excavation of the river bed, which are elaborated as follows: 3.1. Type 1 -Soldier piles supported by raking struts in Zone A and B At locations where nullah wall strengthening works are carried out, we propose to install soldier piles behind the portion of nullah wall to be demolished by cutting vertical slots of 400mm width into the nullah wall at 4m c/c horizontally. Soldier piles will be installed by coring method. 2 layers of raking struts will then be installed to laterally support the solider pile wall as shown in Figure 1.
Type 2 -Pipe piles supported by raking struts in Zone A, Zone B
Pipe piles spaced at 700mm c/c are proposed to be installed behind the portion of nullah wall to be demolished at cross-river walkways and landscape decks where substantial demolition of existing nullah wall takes place. To minimize vibration and damage to the existing nullah wall, coring method is proposed for installation of pipe piles. 2 layers of raking struts will then be installed to laterally support the pipe pile wall as shown in Figure 1.
Type 3 -Soldier piles supported by horizontal struts and lagging plates in Zone C
At locations where U-shaped retaining structure is to be constructed in Zone C, we propose to install soldier piles by coring method at 4m c/c prior to excavation of river bed. Steel lagging plates between the soldier piles will then be installed by welding in stages while the excavation proceeds.
Type 4 -Lagging plates supported by permanent pipe pile wall in Zone C
In Zone C where permanent pipe pile wall will be constructed in Zone C, we propose to make use of the permanent pipe pile wall as temporary support for installation of steel lagging plates by welding for excavation of river bed. In addition to ELS system mentioned above, we also propose other measures to maintain the stability of the existing nullah wall which include partial demolition of nullah walls in alternate bays, evaluation of the existing nullah structure prior to partial demolition, minimization of surcharge load behind partially demolished nullah walls.
Method for Partial Demolition of Nullah Walls
Partial demolition of nullah walls will take place in dry season after implementation of the temporary flow diversion measures. Soldier piles, sheetpiles at the middle of river and grout curtain underneath the existing nullah wall will be installed in the preceding wet season with an aim to expedite the construction programme in dry season. As the partial demolition along Tsui Ping Road and King Yip Lane involves trimming of existing nullah walls, conventional demolition method with the use of hydraulic breakers is not desirable for such nature of works because of significant vibration and potential damage to the existing nullah wall. To this end, we propose to adopt wire saw cutting method for partial demolition of the nullah wall, which is a quick, versatile, low noise, dust and vibration-free method for removing large sections of concrete. The details of partial demolition by wire saw cutting method are described below: 150mm dia. horizontal and vertical core holes are drilled into the nullah wall at pre-determined positions. Wire saw will then be fed through the core holes around the portion of the structure to be cut. Pulling force will then be applied to the spinning wire by a series of pulleys to cut concrete chunk in rectangular shape. The cut lines will be formed such that the size of each demolished concrete piece will be limited to 2 to 3 ton by weight. The partial demolition will be proceeded from the top to the base of the wall. The demolished concrete pieces will then be removed by crane lorry.
Method for Strengthening of Nullah Walls with New Base Slab
After partial demolition of nullah walls, deepening of existing river will be proceeded and construction of new base slab and nullah wall will be carried out in the sequence described below: Excavation to Final Excavation Level -Excavation will be carried out with the use of hydraulic excavator. Lagging plates will be installed between the soldier piles while excavation of river bed proceeds. The excavation and installation of lagging plates will repeat until reaching the final excavation level.
Construction of new base slab (first half) -Grade 200 rock fill will be laid and first half of the new base slab with kickers will then be constructed by cast in-situ method. After the newly constructed base slab attains adequate strength, the bottom layer of struts will be removed.
Construction of new nullah wall (first half) -The new nullah wall will be constructed up to soffit level of top strut. The top layer of strut will be removed after the newly constructed nullah wall attains sufficient strength. The upper portion of new nullah wall will then be constructed.
Construction of new base slab and nullah wall (second half) -Construction of second half of the new base slab and nullah wall along the other side of the river will be carried out in the next dry season after the implementation of temporary flow diversion. The same procedures for partial demolition and construction of new base slab and nullah wall as described above will be repeated to complete the strengthening of nullah walls with new base slab for the whole section of the river
Method Statement for the Construction of the New Nullah Walls Along King Yip Street
There are three types of new nullah walls along King Yip Street in Zone C, namely the U-shaped retaining structure, the pipe pile wall (north side) and inverted T-shaped wall (south side).
Construction of U-shaped Retaining Structure (both north and south sides)
To fast track the construction programme, we will erect temporary working platform over the river for installation of soldier piles, install soldier piles of ELS system by coring method and carry out grout curtain beneath the existing nullah wall at both sides of the river in wet season. Sheet pile will also be installed in the middle of river as partition wall for temporary flow diversion. In the following dry season, we will divert the river flow to the north side of the river to provide a dry condition for excavation of river bed at the south side. Excavation of the river bed will then be carried out to 500mm below the strut level for the installation of raking struts. Further excavation will be proceeded with steel lagging plates installed in stages between the soldier piles until final excavation level is reached. Construction of base slab and half of the tie beam of the U-shaped retaining structure at south side will then be carried out. After removal of struts, the new nullah wall will be constructed to the soffit of the new cantilever slab. Concrete blocks will be installed as temporary flow diversion and we will divert the river flow to the south side of the river. In the next dry season, sheet piles will be extracted and the same construction method and sequence will be adopted for the construction of the U-shaped retaining structure at the north side. Upon completion of the U-shaped retaining structure, cantilever slabs on both sides of the river will be constructed in the following wet season. Temporary bracket will be erected on the new nullah wall as support for soffit formwork of cantilever slab. After that, the RC structure of cantilever slab will be constructed.
Construction of Inverted T-shaped wall (south side)
Considering the tight construction programme, we will arrange to install socketed H-pile, pipe pile and carry out grout curtain beneath existing nullah wall in the wet season on temporary working platform installed over the river. Meanwhile, sheet pile will also be installed in the middle of river as partition wall for temporary flow diversion. In the next dry season, the river flow will be diverted to the north side of the river so that the river bed works at the south side can be carried out in dry condition. River bed will be excavated using hydraulic excavator down to the final excavation level and the pile head of socketed H-pile will then be trimmed to the cut-off level. Half of tie beam and base slab of inverted T-shaped wall will be constructed after laying of Grade 200 rockfill. River wall will then be constructed, followed by mass concrete fill for aeration cascade in front of the river wall. The gap between the new river wall and the existing nullah wall will be backfilled with compacted soil. In the next dry season, we will divert river flow to the south side of the river. In order to stabilize the existing nullah wall, we will install vertical posts through the rockfill foundation of existing nullah wall by coring method prior to the river bed excavation, and horizontal struts between the permanent pipe piles and vertical posts as shown in Figure 2. Excavation will be carried out in stages followed by the installation of the lagging wall between vertical posts, until the final excavation level is reached. The walier beam and half of the tie beam will be constructed after laying of Grade 200 rockfill. Subsequent to the removal of the horizontal strut, the lagging wall will be constructed up to the underside of the capping beam. In the following wet season, the temporary bracket will be mounted to the previously constructed lagging wall as a support for construction of the capping beam and cantilever walkway.
6. Measures to Monitor and Minimise the Settlement to the Existing Nullah Walls, Adjacent Footpath and Roads, Existing Utilities and Buildings
Measures to Monitor the Settlement to the Sensitive Receivers
In addition to the instrumentation points indicated in the tender drawing, we will install additional ground settlement monitoring points and utility settlement monitoring points to the existing nullah walls, adjacent footpath and roads, existing utilities and buildings and carry out intensive and frequent monitoring to ensure that AAA levels are not exceeded during construction. We will also increase our frequency of visual inspections on adjacent structures and facilities which are critical and high-risk such as Kwun Tong Bypass.
Measures to Minimise the Settlement to the Sensitive Receivers
Installation of Temporary Waling and Strutting System to Stabilize the Existing Nullah Wall -Prior to partial demolition of the nullah wall and excavation of the river bed, we propose to install temporary waling and strutting system as stabilization measures of the existing nullah wall. The two layers of raking strut together with the soldier pile or pipe pile wall resist the nullah wall against sliding and overturning. The robustness of the lateral support system reduces the settlement behind the existing nullah wall to within 23mm.
Installation of lagging plates underneath the existing nullah wall -When the excavation reaches the bottom level of the existing nullah wall, the lagging plates will be installed at every 500mm excavation depth between the soldier piles or pipe piles in the existing nullah wall. The lagging plates protect the founding material of the nullah wall from being scoured. Ground treatment underneath the existing nullah wall -We will carry out ground treatment to the soil underneath the existing nullah wall before partial demolition of the nullah wall and excavation of river bed to minimize wall movement and ground settlement.
Construction of the river bed and nullah wall in alternative bay -We will carry out partial demolition and reconstruction of the existing nullah wall in alternative bay manner. Such construction method will minimize ground disturbance.
Potential Measures in Expediting the Construction Programme
In order to expedite the construction programme, we propose to adopt the following potential measures: Use of prefabricated modular temporary working platform -To shorten the construction period required for installation of the working platform, prefabricated modular platform will be used for piling works and temporary works such as installation of the soldier piles or pipe piles. Because of the modular type, it can be easily assembled, erected and dissembled.
Arrangement of piling works and temporary works in wet season -To fully utilize the wet season, we will arrange to carry out piling works and grout curtain, and installation of soldier piles or pipe piles with the use of prefabricated modular temporary working platform erected over the river in the wet season while maintaining the flow capacity of Tsui Ping River. Such construction arrangement allows the river bed and nullah wall construction to be carried out in the next dry season.
Adoption of precast shell for tie beam construction -We will adopt precast shell for construction of the tie beam which is part of the sub-structure in U-shaped retaining structures in Zone C. The precast shell serves as both permanent formwork and the outer component of the tie beam structure. With this method, no formwork erection is required so that the construction time can be reduced.
Concurrent construction in different areas -To speed up the construction programme, we will arrange multiple concurrent construction work fronts at different areas within the existing nullah in both wet and dry seasons. For example, we will deploy sufficient resources to carry out piling works and temporary works installation in the wet season while both river bed and nullah wall construction and piling works can be arranged concurrently in the dry season.
Extra merits: The use of modular working platform to allow the piling works to be carried out in wet seasons and precast elements for tie beams and cantilever walkway can expedite the construction programme. 2) Partial demolition and reconstruction of the existing nullah wall will be carried out in alternative bay manner. Such construction method will minimize ground disturbance and impact on the stability of sensitive receivers. |
/**
* This function will be used to register authenticator plugin to SDK.
* @param authenticatorName
* @param className
* @return
* @throws NullPointerException
* @throws OMAuthenticationManagerException
*/
public boolean registerAuthenticator(String authenticatorName, String className)
throws NullPointerException, OMAuthenticationManagerException {
try {
Class c = Class.forName(className);
if (!OMAuthenticator.class.isAssignableFrom(c)) {
throw new OMAuthenticationManagerException(OMErrorCode.INVALID_INPUT,
"Class [" + className + "] must implement OMAuthenticator interface");
}
return registerAuthenticator(authenticatorName, c);
} catch (ClassNotFoundException e) {
throw new OMAuthenticationManagerException(OMErrorCode.INVALID_INPUT, e);
}
} |
//
// Handles DirectShow graph events.
//
// Returns true if the player should be stopped.
//
bool CMediaPlayer::HandleGraphEvent()
{
bool stopped = false;
if (_MediaEvent== NULL)
{
return stopped;
}
long evCode;
LONG_PTR param1, param2;
while (SUCCEEDED(_MediaEvent->GetEvent(&evCode, ¶m1, ¶m2, 0)))
{
_MediaEvent->FreeEventParams(evCode, param1, param2);
switch (evCode)
{
case EC_COMPLETE:
{
{
IMediaControl *mediaControl;
_GraphBuilder->QueryInterface(IID_PPV_ARGS(&mediaControl));
mediaControl->Stop();
mediaControl->Release();
}
REFERENCE_TIME timeBegin = 0;
_MediaSeeking->SetPositions(&timeBegin, AM_SEEKING_AbsolutePositioning, NULL, AM_SEEKING_NoPositioning);
stopped = true;
}
break;
case EC_USERABORT:
case EC_ERRORABORT:
stopped = false;
}
}
return stopped;
} |
/* SENTRY-1471 - fixing the validation logic.
* a) When the number of retries exceeds the limit, propagate the Assert exception to the caller.
* b) Throw an exception instead of returning false, to pass valuable debugging info up the stack
* - expected vs. found permissions.
*/
private void verifyOnAllSubDirsHelper(Path p, FsAction fsAction, String user, String group,
boolean shouldExist, boolean recurse, int retry) throws Throwable {
FileStatus fStatus = null;
try {
fStatus = miniDFS.getFileSystem().getFileStatus(p);
if (shouldExist) {
if(!Strings.isNullOrEmpty(group)) {
Assert.assertEquals("Error at verifying Path action : " + p + " ;", fsAction,
getAcls(AclEntryType.GROUP, p).get(group));
}
if(!Strings.isNullOrEmpty(user)) {
Assert.assertEquals("Error at verifying Path action : " + p + " ;", fsAction,
getAcls(AclEntryType.USER,p).get(user));
}
} else {
if(!Strings.isNullOrEmpty(group)) {
assertFalse("Error at verifying Path : " + p + " ," +
" group : " + group + " ;", getAcls(AclEntryType.GROUP, p).containsKey(group));
}
if(!Strings.isNullOrEmpty(user)) {
assertFalse("Error at verifying Path : " + p + " ," +
" user : " + user + " ;", getAcls(AclEntryType.USER, p).containsKey(user));
}
}
LOGGER.info("Successfully found acls for path = " + p.getName());
} catch (Throwable th) {
if (retry > 0) {
LOGGER.info("Retry: " + retry);
Thread.sleep(RETRY_WAIT);
verifyOnAllSubDirsHelper(p, fsAction, user, group, shouldExist, recurse, retry - 1);
} else {
throw th;
}
}
if (recurse && fStatus.isDirectory()) {
FileStatus[] children = miniDFS.getFileSystem().listStatus(p);
for (FileStatus fs : children) {
verifyOnAllSubDirsHelper(fs.getPath(), fsAction, user, group, shouldExist, recurse, NUM_RETRIES);
}
}
} |
After being knocked out of the US Open Cup on Tuesday, LA needed a strong showing against the San Jose Earthquakes to kick off their summer campaign. To the Galaxy’s credit, they delivered: aided by a clever combination that built to a great finish from Gyasi Zardes, LA came away with a well-earned road win.
“We talked over the last three days about concentrating for 90 minutes. We’ve played well through a lot of stretches in a lot of games, but we’ve let our concentration go.”
—Landon Donovan
Forget the last three days: the Galaxy have struggled with closing out games all season. By that measure – and it’s an important one – Saturday’s win was reassuring. LA looked sharp throughout the night, and was especially good about responding to San Jose’s counter attacks. That’s not to say there weren’t nervy moments (and yes, those moments increased toward the end of the game) but keeper Jaime Penedo was rarely tested.
A big part of that consistency rested on a few key players. Marcelo Sarvas, for starters, look comfortable as a roving left winger. His accuracy and movement helped LA maintain calm possession in the middle third. And two other standouts provided a sneak-peek at the direction the Galaxy’s play style is evolving: the two outside backs, Robbie Rogers and Dan Gargan, who pressed their way up the sidelines and into the attack, providing plenty of service to LA’s strikers. The quality of the crosses varied, for sure, but at least the Galaxy are finding a groove.
It takes more than rhythm to win a game, though, and LA’s lone goal was a spectacular one. Robbie Keane lofted the ball forward into the box, where Marcelo Sarvas received it on the end of a great run and laid it back to Gyasi Zardes. For all the flack he gets about his finishing, Zardes made the most of the moment — his rocket to the top right netting was unsaveable, I don’t care who you have in goal.
“If Barcelona scored a goal like that, people would be talking about it for weeks. It was a good sequence: it was a great ball by Robbie, great run by Marcelo and Gyasi peeled off with a great run and a finish.”
—Landon Donovan
The Galaxy had one more good chance when Stefan Ishizaki took a shot from near the top of the 18 that bounced off the far post. Had it gone in, he could’ve made up for a mistake in the first half, when he chose to pass instead of taking a close-range shot on goal. And if there’s any complaint to be had with LA’s offense, it’s exactly that: they didn’t look hungry. The Galaxy will take whatever wins they can get at this point, but there’s no reason that their narrow lead couldn’t have been bigger.
In the final minutes San Jose nearly made LA pay for all those missed chances. But the Galaxy’s back line held strong – Tommy Meyer in particular had a great showing – and maintained the shut out.
With any luck LA can hang on to their momentum this time around. They’ll be tested again Friday, July 4th, when they square-off against the struggling Portland Timbers at home.
Comments
comments |
"""
Requires:
llvmdev-7.0.1 with all targets enabled
"""
import re
import inspect
from llvmlite import ir
import llvmlite.binding as llvm
import numba.cuda
import numba as nb
from .utils import get_version
if get_version('numba') >= (0, 49):
from numba.core import sigutils, registry
else:
from numba import sigutils
from numba.targets import registry
def initialize_llvm():
llvm.initialize()
llvm.initialize_all_targets()
llvm.initialize_all_asmprinters()
initialize_llvm()
def annotation_to_numba_type(a):
"""Return numba type from a signature annotation.
"""
if isinstance(a, str):
if a.endswith('*'):
return nb.types.CPointer(annotation_to_numba_type(a[:-1].strip()))
t = nb.types.__dict__.get(a, None)
if isinstance(t, nb.types.Type):
return t
elif a is inspect.Signature.empty or a is None:
return nb.types.void
elif a is int:
return nb.types.int_ # int64
elif a is float:
return nb.types.double
elif a is complex:
return nb.types.complex128
elif a is bytes:
return nb.types.CPointer(nb.types.byte) # uint8*
elif a is str:
return nb.types.string # unicode_type
raise NotImplementedError(
f'converting {a} of type {type(a)} to numba types')
def get_numba_signature(func, return_type=None):
"""Return numba signature from a function.
"""
s = inspect.signature(func)
argtypes = []
for i, a in enumerate(s.parameters):
t = annotation_to_numba_type(s.parameters[a].annotation)
if t == nb.types.void:
print(f'Warning: {i+1}-th parameter `{a}` in `{func.__name__}`'
'does not define type. Assuming `int32`')
t = nb.types.int32
argtypes.append(t)
if return_type is None:
rtype = s.return_annotation
else:
rtype = return_type
rtype = annotation_to_numba_type(rtype)
# TODO: warn if return annotation is missing but func contains
# return statement
return rtype(*argtypes)
def get_llvm_ir(func, sig=None,
target='host',
locals={}, options={}):
"""Return LLVM IR of a Python function.
Parameters
----------
func : callable
Specify Python function.
sig : {<numba signature>, dict}
Specify the numba signature of the Python function.
target : {'host', 'cuda', llvmlite.binding.Target}
Specify IR target.
"""
if sig is None:
sig = get_numba_signature(func)
if target == 'host':
# triple = llvm.get_default_triple()
# there is also get_process_triple
target_desc = registry.cpu_target
typing_context = target_desc.typing_context
target_context = target_desc.target_context
elif target == 'cuda':
# triple = 'nvptx64-nvidia-cuda'
target_desc = nb.cuda.descriptor.CUDATargetDesc
typing_context = target_desc.typingctx
target_context = target_desc.targetctx
else:
raise NotImplementedError(repr(target))
flags = nb.compiler.Flags()
flags.set('no_compile')
flags.set('no_cpython_wrapper')
if not isinstance(sig, dict):
sig = {func.__name__: sig}
main_mod = llvm.parse_assembly('source_filename="{}"'
.format(inspect.getsourcefile(func)))
main_mod.name = func.__name__
for fname, _sig in sig.items():
args, return_type = sigutils.normalize_signature(_sig)
cres = nb.compiler.compile_extra(typingctx=typing_context,
targetctx=target_context,
func=func,
args=args,
return_type=return_type,
flags=flags,
locals=locals)
# C wrapper
fndesc = cres.fndesc
module = cres.library.create_ir_module(fndesc.unique_name)
context = cres.target_context
ll_argtypes = [context.get_value_type(ty) for ty in args]
ll_return_type = context.get_value_type(return_type)
wrapty = ir.FunctionType(ll_return_type, ll_argtypes)
wrapfn = module.add_function(wrapty, fname)
builder = ir.IRBuilder(wrapfn.append_basic_block('entry'))
fnty = context.call_conv.get_function_type(return_type, args)
fn = builder.module.add_function(fnty, cres.fndesc.llvm_func_name)
status, out = context.call_conv.call_function(
builder, fn, return_type, args, wrapfn.args)
builder.ret(out)
cres.library.add_ir_module(module)
cres.library._optimize_final_module()
cres.library._final_module.verify()
cres.library._finalized = True
llvmir = cres.library.get_llvm_str()
main_mod.link_in(llvm.parse_assembly(llvmir), preserve=True)
# todo: return AST as well
return str(main_mod)
def get_triple(llvm_ir):
return re.search(r'target\s+triple\s*=\s*"(?P<triple>[-\d\w\W_]+)"\s*$',
llvm_ir, re.M).group('triple')
def create_execution_engine(triple):
target = llvm.Target.from_triple(triple)
target_machine = target.create_target_machine()
backing_mod = llvm.parse_assembly("")
engine = llvm.create_mcjit_compiler(backing_mod, target_machine)
return engine
def compile_ir(engine, llvm_ir):
# Create a LLVM module object from the IR
mod = llvm.parse_assembly(llvm_ir)
mod.verify()
engine.add_module(mod)
engine.finalize_object()
engine.run_static_constructors()
return mod
def compile(llvm_ir):
engine = create_execution_engine(get_triple(llvm_ir))
compile_ir(engine, llvm_ir)
return engine
|
def temp_workspace():
home = os.getcwd()
with tempfile.TemporaryDirectory() as temp:
try:
os.chdir(temp)
yield
finally:
os.chdir(home) |
/**
* Formats the given expected string to a format matching the current locale setting.
* The given string shall use tabulation before each line of the message.
*/
private static String localize(final Level level, final String expected) {
final String levelToReplace = level.getName();
final String levelLocalized = level.getLocalizedName();
assertTrue(expected, expected.startsWith(levelToReplace));
final int margin = MonolineFormatter.levelWidth(null);
final StringBuilder buffer = new StringBuilder(expected.length() + 40)
.append(levelLocalized)
.append(CharSequences.spaces(margin - levelLocalized.length()))
.append(expected, levelToReplace.length() + 1, expected.length());
final String spaces = Strings.CONTINUATION_MARK
+ CharSequences.spaces(margin - 1).toString();
int positionOfLast = -1;
for (int i=margin; (i=buffer.indexOf("\n\t", i)) >= 0; i += margin) {
buffer.replace(positionOfLast = ++i, i+1, spaces);
}
if (positionOfLast >= 0) {
buffer.setCharAt(positionOfLast, Strings.CONTINUATION_END);
}
return buffer.toString();
} |
def performance(self, data, labels):
return self.evaluate(data, labels)[0] |
const __safari = require('../safari');
require('./safari')(__safari); |
AUBURN, N.Y. -- Cassie Mattes was inside the Auburn Domino's Saturday night when she noticed a man "smash his body into the door."
The 26-year-old assistant manager watched as the man picked himself up and moved away from the restaurant.
"It caught my attention," she said.
Mattes said the man appeared to be highly intoxicated as he staggered into the street.
But what she saw next is something she says she will never forget:
At 11:24 p.m., that same man -- later identified by Auburn police as off-duty New York City Police Officer Michael Cerrato -- fired a gun twice at a Domino's deliveryman driving past the restaurant, witnesses said.
"I said, 'Oh my God' and ran to the back and called 911," Mattes said.
The Domino's deliveryman, Cory Parsons, said he saw the man in the road as he drove past Domino's on Dill Street after delivering pizza at the Hilton Garden Inn. But he had no clue the man had just shot in his direction as he drove to his next delivery on Elizabeth Street in Auburn, he said.
"I saw him waving something black, but I didn't realize what it was," said Parson, 21, of Syracuse. "I thought he had a glove on and was waving at me to slow down."
But Parsons said he was only driving about 30 mph to 35 mph.
"I just passed him and then I heard, 'pop,'" Parsons said. "I thought my tire popped."
Parsons didn't think much of it and continued to the traffic light at Dill and North streets. He stopped. As he turned right onto North Street, Parsons said he saw a flash of light in his rear view mirror.
"I thought it was a firecracker," he said.
As soon as he delivered the pizza and returned to his car, his phone rang. It was Mattes. She told Parsons someone had shot at him and that he needed to get back to Domino's. Auburn police were already there.
"That's when I instantly realized what the pops were and what the guy was doing waving at me," Parsons said.
The popping sound wasn't his tire. And the flash of light wasn't a firecracker.
"It's crazy," he said. "He stared right at me."
Both Parsons and Mattes said they got a good look at the shooter, who was still nearby when police arrived.
Auburn police say they arrested Cerrato, 29, of Pomona in Rockland County, and charged him with first-degree reckless endangerment, a felony, and second-degree menacing, a misdemeanor.
The Citizen reported that, according to court papers, witnesses said Cerrato appeared highly intoxicated.
Cerrato, who is currently suspended without pay from NYPD, is accused of shooting a 9mm Glock handgun in the direction of an occupied car outside Domino's, at 19 Dill St. in Auburn, police said.
Parson said his Honda Accord was not struck by the bullets and he was not hurt.
Police did not say what led up to the shots being fired. And Parsons said he has never seen or heard of Cerrato before the incident.
Cerrato was training with the National Guard in the area, the Auburn Police Department told the Citizen. (The New York Army National Guard's 102nd Military Police Battalion is based in Auburn.)
After he was arraigned on the charges, Cerrato was sent to the Cayuga County Jail. He was later released, records show.
Parsons, who has worked at Domino's for the past two years, said he doesn't feel as safe since his car was shot at. But it's also made him think about how precious life is.
"Don't take life for granted and cherish every day like it's your last," he said. "My family could have been going to a funeral right now. It makes you appreciate life when you go through something like that."
Mattes, who has worked at Domino's for the past six years, said the situation has made her emotional.
"I feel bad," she said. "I understand even the best people in life can make the worst decisions."
But the events that unfolded Saturday still run through her head.
"It's made me think of my life and what I'm going through, and his life and what he's about to go through," she said.
"Is he thinking about us like we're thinking about him?" she asked. "All I'm looking for is an apology for what we went through. Now we're so scared to be at our jobs, a place we felt so safe at prior to this incident." |
def writeContigSizes(genome, outfile):
outf = IOTools.openFile(outfile, "w")
fasta = IndexedFasta.IndexedFasta(
os.path.join(PARAMS["genome_dir"], genome))
for contig, size in fasta.getContigSizes(with_synonyms=False).items():
outf.write("%s\t%i\n" % (contig, size))
outf.close() |
/**
* frees up memory for a building
*
* See also: olc_delete_building
*
* @param bld_data *building The building to free.
*/
void free_building(bld_data *bdg) {
bld_data *proto = building_proto(GET_BLD_VNUM(bdg));
struct interaction_item *interact;
if (GET_BLD_NAME(bdg) && (!proto || GET_BLD_NAME(bdg) != GET_BLD_NAME(proto))) {
free(GET_BLD_NAME(bdg));
}
if (GET_BLD_TITLE(bdg) && (!proto || GET_BLD_TITLE(bdg) != GET_BLD_TITLE(proto))) {
free(GET_BLD_TITLE(bdg));
}
if (GET_BLD_ICON(bdg) && (!proto || GET_BLD_ICON(bdg) != GET_BLD_ICON(proto))) {
free(GET_BLD_ICON(bdg));
}
if (GET_BLD_COMMANDS(bdg) && (!proto || GET_BLD_COMMANDS(bdg) != GET_BLD_COMMANDS(proto))) {
free(GET_BLD_COMMANDS(bdg));
}
if (GET_BLD_DESC(bdg) && (!proto || GET_BLD_DESC(bdg) != GET_BLD_DESC(proto))) {
free(GET_BLD_DESC(bdg));
}
if (GET_BLD_EX_DESCS(bdg) && (!proto || GET_BLD_EX_DESCS(bdg) != GET_BLD_EX_DESCS(proto))) {
free_extra_descs(&GET_BLD_EX_DESCS(bdg));
}
if (GET_BLD_INTERACTIONS(bdg) && (!proto || GET_BLD_INTERACTIONS(bdg) != GET_BLD_INTERACTIONS(proto))) {
while ((interact = GET_BLD_INTERACTIONS(bdg))) {
GET_BLD_INTERACTIONS(bdg) = interact->next;
free(interact);
}
}
if (GET_BLD_SCRIPTS(bdg) && (!proto || GET_BLD_SCRIPTS(bdg) != GET_BLD_SCRIPTS(proto))) {
free_proto_scripts(&GET_BLD_SCRIPTS(bdg));
}
if (GET_BLD_YEARLY_MAINTENANCE(bdg) && (!proto || GET_BLD_YEARLY_MAINTENANCE(bdg) != GET_BLD_YEARLY_MAINTENANCE(proto))) {
free_resource_list(GET_BLD_YEARLY_MAINTENANCE(bdg));
}
free(bdg);
} |
def adjust_table_speed(kwargs=None):
speed = int(
float(self.variables.devices_dict["Table_control"]["default_joy_speed"])
/ 100.0
* float(self.table_move_ui.Table_speed.value())
)
self.variables.table.set_joystick_speed(float(speed)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.