content
stringlengths 10
4.9M
|
---|
<reponame>myohn-paypal/butterfly<gh_stars>10-100
package com.paypal.butterfly.extensions.api;
import java.util.List;
/**
* Marker interface for every type that can be assigned as
* the parent of a {@link com.paypal.butterfly.extensions.api.TransformationUtility}
*
* @author facarvalho
*/
public interface TransformationUtilityParent {
/**
* Return the name of this transformation utility parent
*
* @return the name of this transformation utility parent
*/
String getName();
/**
* Return an immutable list of all children
*
* @return an immutable list of all children
*/
List<TransformationUtility> getChildren();
}
|
def write_trial_result(self,
experiment: Experiment,
response_correct: bool,
response_time: int) -> None:
curr_trial = experiment.get_current_trial()
if curr_trial is None:
raise ValueError("Experiment is finished. No trial to apply result.")
curr_block = experiment.blocks[experiment.current_block_id]
data = [experiment.subject_name,
experiment.keyboard_key_for_presented,
experiment.keyboard_key_for_absent,
experiment.current_block_id + 1,
curr_block.condition_name,
experiment.current_trial_id + 1,
curr_trial.targets_number,
1 if curr_trial.target_is_presented else 0,
1 if curr_trial.target_orientation_is_vertical else 0,
1 if response_correct else 0,
response_time]
self.__write_row__(data) |
/**
*
*
* @author Paul Speed
*/
public class SkyState extends BaseAppState
{
private Spatial sky;
public SkyState() {
}
@Override
protected void initialize( Application app ) {
Texture texture1 = app.getAssetManager().loadTexture("Textures/galaxy+Z.jpg");
Texture texture2 = app.getAssetManager().loadTexture("Textures/galaxy-Z.jpg");
Texture texture3 = app.getAssetManager().loadTexture("Textures/galaxy+X.jpg");
Texture texture4 = app.getAssetManager().loadTexture("Textures/galaxy-X.jpg");
Texture texture5 = app.getAssetManager().loadTexture("Textures/galaxy+Y.jpg");
Texture texture6 = app.getAssetManager().loadTexture("Textures/galaxy-Y.jpg");
sky = SkyFactory.createSky(app.getAssetManager(),
texture1, texture2,
texture3, texture4,
texture5, texture6);
}
@Override
protected void cleanup( Application app ) {
}
@Override
protected void onEnable() {
((SimpleApplication)getApplication()).getRootNode().attachChild(sky);
}
@Override
protected void onDisable() {
sky.removeFromParent();
}
} |
def prefix(self, prefix):
if prefix is not None and len(prefix) < 1:
raise ValueError("Invalid value for `prefix`, length must be greater than or equal to `1`")
self._prefix = prefix |
package com.github.mvp4g.nalu.client.handler;
import com.github.mvp4g.nalu.client.application.IsContext;
import org.gwtproject.event.shared.SimpleEventBus;
public abstract class AbstractHandler<C extends IsContext>
implements IsHandler {
protected C context;
protected SimpleEventBus eventBus;
public AbstractHandler() {
super();
}
public void setContext(C context) {
this.context = context;
}
public void setEventBus(SimpleEventBus eventBus) {
this.eventBus = eventBus;
}
}
|
"A calorie is a calorie" is a tautology used to convey the speaker's conviction that the concept of the "calorie" is in fact a sufficient way to describe energy content of food.
The tautological phrase means that regardless of the form of food calorie a person consumes (whether a carbohydrate, protein or fat calorie) the energetic value of such a calorie, is identical to any other. One dietary calorie contains 4,184 joules of energy. With this knowledge, it is easy to assume that all calories have equal value.[1]
However, good human nutrition measures foods for other values than just energy in calories.[citation needed]
History [ edit ]
In 1878, German nutritionist Max Rubner crafted what he called the "isodynamic law".[2] The law claims that the basis of nutrition is the exchange of energy,[3] and was applied to the study of obesity in the early 1900s by Carl von Noorden. Von Noorden had two theories about what caused people to develop obesity. The first simply avowed Rubner's notion that "a calorie is a calorie". The second theorized that obesity development depends on how the body partitions calories for either use or storage.[2] Since 1925, a calorie has been defined in terms of the joule. The definition of a calorie changed in 1948, which became one calorie is equal to approximately 4,2 joules.[4]
The related concept of "calorie in, calorie out" is contested[5] and despite having become a commonly held and frequently referenced belief in nutritionism, the implications associated with "a calorie is a calorie" are still being debated.[6][7][8] The wisdom and effects of skipping meals in an attempt to limit caloric intake is also still largely debated.[9][10][11]
Calorie counting [ edit ]
Calorie amounts found on food labels are based on the Atwater system.[12] The accuracy of the system is disputed, despite no real proposed alternatives. For example, a 2012 study by a USDA scientist concluded that the measured energy content of a sample of almonds was 32% lower than the estimated Atwater value.[13] Furthermore, it is known that some calories are lost in waste, without ever having been chemically converted or stored. The driving mechanism behind caloric intake is absorption, which occurs largely in the small intestine and distributes nutrients to the circulatory and lymphatic capillaries by means of osmosis, diffusion and active transport. Fat, in particular is emulsified by bile produced by the liver and stored in the gallbladder where it is released to the small intestine via the bile duct. A relatively lesser amount of absorption, composed primarily of water, occurs in the large intestine.
Facts [ edit ]
One dietary Calorie contains 4184 joules of energy. The human body is a highly complex biochemical system that undergoes processes which regulate energy balance. The metabolic pathways for protein are less efficient than the metabolic pathways for carbohydrates and fat.[citation needed] Protein contains four calories per gram, although a large part of the calories are lost as heat when metabolised by the body.[1]
It may be easy to consume 500 calories worth of ice cream or chocolate in one sitting, although it may be difficult to eat 500 calories of eggs or carrot in one sitting.[1]
See also [ edit ] |
/* $Id: VBoxAutostart.h $ */
/** @file
* VBoxAutostart - VirtualBox Autostart service.
*/
/*
* Copyright (C) 2012-2017 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
#ifndef __VBoxAutostart_h__
#define __VBoxAutostart_h__
/*******************************************************************************
* Header Files *
*******************************************************************************/
#include <iprt/getopt.h>
#include <iprt/types.h>
#include <VBox/cdefs.h>
#include <VBox/types.h>
#include <VBox/com/com.h>
#include <VBox/com/VirtualBox.h>
/*******************************************************************************
* Constants And Macros, Structures and Typedefs *
*******************************************************************************/
/**
* Config AST node types.
*/
typedef enum CFGASTNODETYPE
{
/** Invalid. */
CFGASTNODETYPE_INVALID = 0,
/** Key/Value pair. */
CFGASTNODETYPE_KEYVALUE,
/** Compound type. */
CFGASTNODETYPE_COMPOUND,
/** List type. */
CFGASTNODETYPE_LIST,
/** 32bit hack. */
CFGASTNODETYPE_32BIT_HACK = 0x7fffffff
} CFGASTNODETYPE;
/** Pointer to a config AST node type. */
typedef CFGASTNODETYPE *PCFGASTNODETYPE;
/** Pointer to a const config AST node type. */
typedef const CFGASTNODETYPE *PCCFGASTNODETYPE;
/**
* Config AST.
*/
typedef struct CFGAST
{
/** AST node type. */
CFGASTNODETYPE enmType;
/** Key or scope id. */
char *pszKey;
/** Type dependent data. */
union
{
/** Key value pair. */
struct
{
/** Number of characters in the value - excluding terminator. */
size_t cchValue;
/** Value string - variable in size. */
char aszValue[1];
} KeyValue;
/** Compound type. */
struct
{
/** Number of AST node entries in the array. */
unsigned cAstNodes;
/** AST node array - variable in size. */
struct CFGAST *apAstNodes[1];
} Compound;
/** List type. */
struct
{
/** Number of entries in the list. */
unsigned cListEntries;
/** Array of list entries - variable in size. */
char *apszEntries[1];
} List;
} u;
} CFGAST, *PCFGAST;
/** Flag whether we are in verbose logging mode. */
extern bool g_fVerbose;
/** Handle to the VirtualBox interface. */
extern ComPtr<IVirtualBox> g_pVirtualBox;
/** Handle to the session interface. */
extern ComPtr<ISession> g_pSession;
/** handle to the VirtualBox interface. */
extern ComPtr<IVirtualBoxClient> g_pVirtualBoxClient;
/**
* System log type.
*/
typedef enum AUTOSTARTLOGTYPE
{
/** Invalid log type. */
AUTOSTARTLOGTYPE_INVALID = 0,
/** Log info message. */
AUTOSTARTLOGTYPE_INFO,
/** Log error message. */
AUTOSTARTLOGTYPE_ERROR,
/** Log warning message. */
AUTOSTARTLOGTYPE_WARNING,
/** Log verbose message, only if verbose mode is activated. */
AUTOSTARTLOGTYPE_VERBOSE,
/** Famous 32bit hack. */
AUTOSTARTLOGTYPE_32BIT_HACK = 0x7fffffff
} AUTOSTARTLOGTYPE;
/**
* Log messages to the system and release log.
*
* @returns nothing.
* @param pszMsg Message to log.
* @param enmLogType Log type to use.
*/
DECLHIDDEN(void) autostartSvcOsLogStr(const char *pszMsg, AUTOSTARTLOGTYPE enmLogType);
/**
* Print out progress on the console.
*
* This runs the main event queue every now and then to prevent piling up
* unhandled things (which doesn't cause real problems, just makes things
* react a little slower than in the ideal case).
*/
DECLHIDDEN(HRESULT) showProgress(ComPtr<IProgress> progress);
/**
* Converts the machine state to a human readable string.
*
* @returns Pointer to the human readable state.
* @param enmMachineState Machine state to convert.
* @param fShort Flag whether to return a short form.
*/
DECLHIDDEN(const char *) machineStateToName(MachineState_T enmMachineState, bool fShort);
/**
* Parse the given configuration file and return the interesting config parameters.
*
* @returns VBox status code.
* @param pszFilename The config file to parse.
* @param ppCfgAst Where to store the pointer to the root AST node on success.
*/
DECLHIDDEN(int) autostartParseConfig(const char *pszFilename, PCFGAST *ppCfgAst);
/**
* Destroys the config AST and frees all resources.
*
* @returns nothing.
* @param pCfgAst The config AST.
*/
DECLHIDDEN(void) autostartConfigAstDestroy(PCFGAST pCfgAst);
/**
* Return the config AST node with the given name or NULL if it doesn't exist.
*
* @returns Matching config AST node for the given name or NULL if not found.
* @param pCfgAst The config ASt to search.
* @param pszName The name to search for.
*/
DECLHIDDEN(PCFGAST) autostartConfigAstGetByName(PCFGAST pCfgAst, const char *pszName);
/**
* Main routine for the autostart daemon.
*
* @returns exit status code.
* @param pCfgAst Config AST for the startup part of the autostart daemon.
*/
DECLHIDDEN(RTEXITCODE) autostartStartMain(PCFGAST pCfgAst);
/**
* Main routine for the autostart daemon when stopping virtual machines
* during system shutdown.
*
* @returns exit status code.
* @param pCfgAst Config AST for the shutdown part of the autostart daemon.
*/
DECLHIDDEN(RTEXITCODE) autostartStopMain(PCFGAST pCfgAst);
/**
* Logs a verbose message to the appropriate system log.
*
* @param pszFormat The log string. No trailing newline.
* @param ... Format arguments.
*/
DECLHIDDEN(void) autostartSvcLogVerboseV(const char *pszFormat, va_list va);
/**
* Logs a verbose message to the appropriate system log.
*
* @param pszFormat The log string. No trailing newline.
* @param ... Format arguments.
*/
DECLHIDDEN(void) autostartSvcLogVerbose(const char *pszFormat, ...);
/**
* Logs a warning message to the appropriate system log.
*
* @param pszFormat The log string. No trailing newline.
* @param ... Format arguments.
*/
DECLHIDDEN(void) autostartSvcLogWarningV(const char *pszFormat, va_list va);
/**
* Logs a warning message to the appropriate system log.
*
* @param pszFormat The log string. No trailing newline.
* @param ... Format arguments.
*/
DECLHIDDEN(void) autostartSvcLogWarning(const char *pszFormat, ...);
/**
* Logs a info message to the appropriate system log.
*
* @param pszFormat The log string. No trailing newline.
* @param ... Format arguments.
*/
DECLHIDDEN(void) autostartSvcLogInfoV(const char *pszFormat, va_list va);
/**
* Logs a info message to the appropriate system log.
*
* @param pszFormat The log string. No trailing newline.
* @param ... Format arguments.
*/
DECLHIDDEN(void) autostartSvcLogInfo(const char *pszFormat, ...);
/**
* Logs the message to the appropriate system log.
*
* In debug builds this will also put it in the debug log.
*
* @param pszFormat The log string. No trailing newline.
* @param ... Format arguments.
*
* @todo This should later be replaced by the release logger and callback destination(s).
*/
DECLHIDDEN(void) autostartSvcLogErrorV(const char *pszFormat, va_list va);
/**
* Logs the error message to the appropriate system log.
*
* In debug builds this will also put it in the debug log.
*
* @param pszFormat The log string. No trailing newline.
* @param ... Format arguments.
*
* @todo This should later be replaced by the release logger and callback destination(s).
*/
DECLHIDDEN(void) autostartSvcLogError(const char *pszFormat, ...);
/**
* Deals with RTGetOpt failure, bitching in the system log.
*
* @returns 1
* @param pszAction The action name.
* @param rc The RTGetOpt return value.
* @param argc The argument count.
* @param argv The argument vector.
* @param iArg The argument index.
* @param pValue The value returned by RTGetOpt.
*/
DECLHIDDEN(RTEXITCODE) autostartSvcLogGetOptError(const char *pszAction, int rc, int argc, char **argv, int iArg, PCRTGETOPTUNION pValue);
/**
* Bitch about too many arguments (after RTGetOpt stops) in the system log.
*
* @returns 1
* @param pszAction The action name.
* @param argc The argument count.
* @param argv The argument vector.
* @param iArg The argument index.
*/
DECLHIDDEN(RTEXITCODE) autostartSvcLogTooManyArgsError(const char *pszAction, int argc, char **argv, int iArg);
/**
* Prints an error message to the screen.
*
* @param pszFormat The message format string.
* @param va Format arguments.
*/
DECLHIDDEN(void) autostartSvcDisplayErrorV(const char *pszFormat, va_list va);
/**
* Prints an error message to the screen.
*
* @param pszFormat The message format string.
* @param ... Format arguments.
*/
DECLHIDDEN(void) autostartSvcDisplayError(const char *pszFormat, ...);
/**
* Deals with RTGetOpt failure.
*
* @returns 1
* @param pszAction The action name.
* @param rc The RTGetOpt return value.
* @param argc The argument count.
* @param argv The argument vector.
* @param iArg The argument index.
* @param pValue The value returned by RTGetOpt.
*/
DECLHIDDEN(RTEXITCODE) autostartSvcDisplayGetOptError(const char *pszAction, int rc, int argc, char **argv, int iArg, PCRTGETOPTUNION pValue);
/**
* Bitch about too many arguments (after RTGetOpt stops).
*
* @returns RTEXITCODE_FAILURE
* @param pszAction The action name.
* @param argc The argument count.
* @param argv The argument vector.
* @param iArg The argument index.
*/
DECLHIDDEN(RTEXITCODE) autostartSvcDisplayTooManyArgsError(const char *pszAction, int argc, char **argv, int iArg);
DECLHIDDEN(int) autostartSetup();
DECLHIDDEN(void) autostartShutdown();
#endif /* __VBoxAutostart_h__ */
|
/** Removes an XPath Query item from the saved cache.
*
* @param searchItem String containing the user text string to remove.
*
*/
public void removeSavedXPathQuerySearch( String searchItem ) {
removeFindSearch( searchItem,
"XPathQueries",
getSavedXPathQueries() );
} |
<gh_stars>1-10
"""
Provide a comment after each right curly bracket.
This rule only recognizes single line type comments.
== Violation ==
} <== Violation. No comment
== Good ==
} // if <== OK. Comment of bracket
"""
from nsiqcppstyle_rulehelper import *
from nsiqcppstyle_reporter import *
from nsiqcppstyle_rulemanager import *
def RunRule(lexer, line, lineno) :
if Match("^ *};* *$", line) :
nsiqcppstyle_reporter.Error(DummyToken(lexer.filename, line, lineno, 0), __name__, 'Right curly brackets must be commented.')
ruleManager.AddLineRule(RunRule)
###########################################################################################
# Unit Test
###########################################################################################
from nsiqunittest.nsiqcppstyle_unittestbase import *
class testRule(nct):
def setUpRule(self):
ruleManager.AddLineRule(RunRule)
def test1(self):
self.Analyze("test/thisFile.c",
"""
void function(int k, int j, int pp)
{
%s
}
""" % ("d"*121))
assert CheckErrorContent(__name__)
def test2(self):
self.Analyze("test/thisFile.c",
"""
void function(int k, int j, int pp)
{
%s
%s
} // function
""" % ("d"*119, " "*130))
assert not CheckErrorContent(__name__)
|
Initiative sponsored by Carnegie Council and led by UN climate veteran aims to break silence around geo-technologies and explore their potential
By Ed King
Ban Ki-moon’s former climate advisor is to lead a project to develop rules for geoengineering, amid fears current efforts to slow global warming are insufficient.
Janos Pasztor, who served with Ban at the UN from 2008-2012 and 2015-2016 will launch the Carnegie Climate Geoengineering Governance initiative (C2G2) in New York on 16 February.
Speaking from Nairobi where he was discussing his plans with officials at the UN Environment Programme, Pasztor said all options to tackle climate change should be discussed.
“As much as the Paris [climate] Agreement was a major step forward we know that even if all countries meet their targets we’re still looking at warming of 3C,” he tells Climate Home.
“To assume it will be 100% [successful] is not realistic, and we are saying to reach these ambitious goals we need to think seriously on what else to add in to massive mitigation efforts.
“Some scientists are saying this is not enough, and we should consider solar radiation management to make sure we don’t go beyond 1.5C to 2C. These are plausible scenarios and we need to think seriously about all options.”
UN to extend freeze on climate change geoengineering: https://t.co/XcVN2IkYOF pic.twitter.com/tpAMK7MMDm — Climate Home (@ClimateHome) December 7, 2016
The term geoengineering covers a wide range of technologies and proposals. These include spraying fine particles into the atmosphere to filter rays from the sun and fertilising oceans with iron filings to promote growth of carbon-sucking organisms.
Less exciting but currently more realistic are vast tree planting schemes and capturing emissions from burning bioenergy crops (BECCS).
The problem, Pasztor explains, is that many of these technologies have potentially planet-altering consequences, and there are few rules in place to govern basic experiments.
“There is hardly anything,” he says, pointing to the London Protocol as an example. It regulates the dumping of “materials” into the ocean for geoengineering purposes but aside from that offers little guidance.
Last last year the UN’s biodiversity body extended its warning against large-scale geoengineering, although it did urge countries to cooperate on future research projects.
“There is practically no real discussion in climate risk management. There is a debate in the scientific community but little in the policy community,” he says.
“That’s where we need the big change. We need to shift debate from academia to policy communications at international level in order to encourage government action.”
Using some geoengineering techniques could buy the planet time to fight climate change, says Pasztor, but don’t expect quick results.
He sees this as a 5-year project, one that will take him around the world as he builds a picture of how governments and policymakers see this vexatious issue and what they want to do about it.
This week he meets UNEP director Erik Solheim, next stop is India to work out how officials in Delhi could feed into rule-making.
And there’s a far bigger issue lurking.
Countries taking an active interest in radical climate technologies include the US, Germany, UK, China and Japan – but the impacts of firing mini-mirrors into the atmosphere may not be equal everywhere.
A 2013 study by a team at the UK Met Office revealed that while the release of fine particles in the Northern Hemisphere stratosphere could manage solar radiation, it would also cause a massive drought in the Sahel.
“Geo-technologies may produce a global good of somehow improving the climate… but there could be local impacts and these could be quite bad and negative. How do you deal with a global good but some end up suffering more than others?” asks Pasztor.
“I could envisage an agreement where we decide to do cloud seeding and recognise Sahel will be hit and triple development assistance to the region to make sure water wells are dug and whatever else is needed to counteract negative impacts.”
Thank you @ErikSolheim and @UNEP colleagues for an excellent discussion today of issues related to governance of climate geoengineering pic.twitter.com/EDxGn89Wbb — Janos Pasztor (@jpasztor) February 2, 2017
Still, he’s preparing for a rough ride. Opposition to the use of land in developing countries for energy crops is intense, suspicion over how and who would “seed the clouds” with sun-blocking particles rages across social media (conspiracies galore on twitter at #chemtrails).
Many argue the priority should be mitigation technologies, which Pasztor says is a “false argument,” as he too agrees the priority must be cutting carbon.
This is not an either-or he contends, but it must an option.
“It could possibly give a breathing space for decarbonisation or make sure that if we overshoot [the 2C warming limit], we don’t overshoot for too long,” he says.
“Government officials, intergovernmental officials, some of my craziest conservationist friends… they agree unanimously we must deal with this.
“They’re not all pro – some are very much against it. But all agree it must be discussed, we need a dialogue and to bring it to the level where politicians can deal with it.” |
/**
* Creates the replacement instruction for the given index in the
* instruction sequence.
*/
public Instruction create(Clazz clazz, CodeAttribute codeAttribute, int index)
{
int matchedInstructionIndex =
index < instructionSequenceMatcher.instructionCount() ?
index :
instructionSequenceMatcher.instructionCount() - 1;
int matchedInstructionOffset =
instructionSequenceMatcher.matchedInstructionOffset(matchedInstructionIndex);
replacementInstructions[index].accept(clazz,
null,
codeAttribute,
matchedInstructionOffset,
this);
return replacementInstruction;
} |
"""Denormalize Request.payout
Revision ID: 3e5e1d3a02c
Revises: 5<PASSWORD>
Create Date: 2014-07-26 21:25:29.870535
"""
# revision identifiers, used by Alembic.
revision = '3e5e1d3a02c'
down_revision = '5<PASSWORD>'
from decimal import Decimal
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import update, select, table, column, join, outerjoin, case
from sqlalchemy.sql.functions import func
request = table('request',
column('id', sa.Integer),
column('base_payout', sa.Numeric(precision=15, scale=2)),
column('payout', sa.Numeric(precision=15, scale=2)))
mod_table = table('modifier',
column('id', sa.Integer),
column('request_id', sa.Integer),
column('_type', sa.String(length=20)),
column('voided_user_id', sa.Integer))
abs_table = table('absolute_modifier',
column('id', sa.Integer),
column('value', sa.Numeric(precision=15, scale=2)))
rel_table = table('relative_modifier',
column('id', sa.Integer),
column('value', sa.Numeric(precision=8, scale=5)))
def upgrade():
op.add_column('request',
sa.Column('payout', sa.Numeric(precision=15, scale=2), index=True,
nullable=True))
bind = op.get_bind()
absolute = select([abs_table.c.value.label('value'),
mod_table.c.request_id.label('request_id')])\
.select_from(join(abs_table, mod_table,
mod_table.c.id == abs_table.c.id))\
.where(mod_table.c.voided_user_id == None)\
.alias()
relative = select([rel_table.c.value.label('value'),
mod_table.c.request_id.label('request_id')])\
.select_from(join(rel_table, mod_table,
mod_table.c.id == rel_table.c.id))\
.where(mod_table.c.voided_user_id == None)\
.alias()
abs_sum = select([request.c.id.label('request_id'),
request.c.base_payout.label('base_payout'),
func.sum(absolute.c.value).label('sum')])\
.select_from(outerjoin(request, absolute,
request.c.id == absolute.c.request_id))\
.group_by(request.c.id)\
.alias()
rel_sum = select([request.c.id.label('request_id'),
func.sum(relative.c.value).label('sum')])\
.select_from(outerjoin(request, relative,
request.c.id == relative.c.request_id))\
.group_by(request.c.id)\
.alias()
total_sum = select([abs_sum.c.request_id.label('request_id'),
((
abs_sum.c.base_payout +
case([(abs_sum.c.sum == None, Decimal(0))],
else_=abs_sum.c.sum)) *
(
1 +
case([(rel_sum.c.sum == None, Decimal(0))],
else_=rel_sum.c.sum))).label('payout')])\
.select_from(join(abs_sum, rel_sum,
abs_sum.c.request_id == rel_sum.c.request_id))
payouts = bind.execute(total_sum)
for request_id, payout in payouts:
up = update(request).where(request.c.id == request_id).values(
payout=payout)
bind.execute(up)
op.alter_column('request', 'payout', nullable=False,
existing_type=sa.Numeric(precision=15, scale=2))
def downgrade():
op.drop_column('request', 'payout')
|
Share On copy Share On copy
Share On link Share On link
Share On tumblr Share On tumblr
Share On more Share On more
Share On whatsapp Share On whatsapp
Share On sms Share On sms
Share On email Share On email
Share On twitter Share On twitter
Share On lineapp Share On lineapp
Share On pinterest Share On pinterest
Share On pinterest Share On pinterest
Share On vk Share On vk
Share On facebook Share On facebook
1. When he basked in the glow of these lights and his hair and his face were all adorable and beautiful. Thomas Peter / Reuters
2. Whenever he smiles his quirky smile. Jason Merritt / Getty Images
3. When he asked if he could have sex with you like a proper gentleman. Tap to play GIF Tap to play GIF drake-ramoray.tumblr.com
5. When he grew this weird mustache and somehow still looked amazing. Twitter: @johnkrasinski
7. When he emerges from the shadows and all you see is his face and it's perfect. Vittorio Zunino Celotto / Getty Images
9. When he grew this beard and made this face and wore these sunglasses. Twitter: @johnkrasinski
10. When his facial hair game was pretty much always perfect. Jason Merritt / Getty Images
13. And when he sat like this while wearing a Spiderman costume. Tap to play GIF Tap to play GIF drake-ramoray.tumblr.com
18. When he stuck his butt out and walked like a dinosaur. Tap to play GIF Tap to play GIF fancynewbeesly.tumblr.com
19. When he wore this black jacket and looked beautiful. Jason Merritt / Getty Images
23. When he was a real asshole but still painfully cute. Tap to play GIF Tap to play GIF andrewblahfield.tumblr.com
24. Basically whenever he's in a suit. Jeffrey Ufberg / WireImage
32. When he looked sexy in a white T-shirt on the beach. Tap to play GIF Tap to play GIF daniel-nava.tumblr.com
33. Basically when he looked sexy doing anything. Matt Carr / Getty Images
34. Because he's John Krasinski and he's beautiful and amazing and the most perfect man alive. Kevork Djansezian / Getty Images |
// SpawnClientInstances spawns instances of clients of the service to be tested.
func SpawnClientInstances(t *testing.T, clientCount int, listenerCreator ChannelListenerCreator, clientName, serviceName string) ([]*EndpointMockInstance, *sync.WaitGroup) {
var result []*EndpointMockInstance
wg := &sync.WaitGroup{}
for i := 0; i < clientCount; i++ {
wg.Add(2)
serverEndpoint := NewChannelMockEndpoint()
clientEndpoint := NewChannelMockEndpoint()
serverChannel := NewChannelMock(serverEndpoint, clientEndpoint)
serverListener, err := listenerCreator(t)
require.NoError(t, err, "unexpected failure when creating server listener")
server := NewFullDuplex(serverListener, serverChannel, serviceName+" -> "+clientName+" : "+strconv.Itoa(i))
go func() {
defer wg.Done()
defer server.Close()
server.Run()
}()
clientChannel := NewChannelMock(clientEndpoint, serverEndpoint)
clientListener := NewChannelListenerMock()
client := &EndpointMockInstance{
Channel: clientChannel,
Listener: clientListener,
Conn: NewFullDuplex(clientListener, clientChannel, clientName+" -> "+serviceName+" : "+strconv.Itoa(i)),
}
go func() {
defer wg.Done()
client.Conn.Run()
}()
result = append(result, client)
}
time.Sleep(time.Duration(50) * time.Millisecond)
return result, wg
} |
by Adam Armstrong
HPE Announces All-Flash For As Little As 3¢/GB/Month
Hewlett Packard Enterprise (HPE) announced today its new initiative around flexible storage consumption models and accelerating all-flash data center adoption with its HPE 3PAR Flash Now. HPE is also announcing an update to its StoreFabric 32Gb (Gen 6) Fibre Channel portfolio for faster networking. And they have entered a partnership with Spectra Logic for enhanced data protection.
Though it has been around for roughly 35 years, Flash has had a tremendous impact on the industry over the last few years. Flash went from a small applications accelerator in certain use cases to the predominate storage choice for certain applications and workloads. Many companies have come to market with all-flash devices including HPE with its 3PAR StoreServ Storage platform. The main barrier to more flash adoption is price. When customers see that they can save costs and scale extremely large in the cloud they tend to go that route. HPE is looking for a way to bridge flash adoption and the cloud.
HPE’s new HPE 3PAR Flash Now initiative is a way to provide customers with all-flash technology, on-premises for as little as $0.03 per usable GB per month. Aside from a lower cost of flash making it easier to adopt, HPE is also bringing embedded data protection and application availability, as well as security and control for the data kept on-premises, through programs such as HPE Flexible Capacity and Pre-Provisioning.
Customer benefits include:
Optimize cash flow by deferring payments until their new all-flash array is up and running or injecting cash via asset trade-in
Accelerate service delivery by taking advantage of automated data migration tools at no cost or opt for tailored migration services
Improve service levels through HPE’s 99.9999% availability guarantee, and by including flash-optimized networking and data protection
Minimize refresh risk with built-in non-disruptive upgrade to next-gen technologies such as storage class memory and NVMe
Maintain control over data locality, avoid long-term lock-in and eliminate the costs and complications of reclaiming data from the cloud
Having an all-flash data center is only as good as the connection into it. To address this HPE has completely updated its StoreFabric 32Gb (Gen 6) Fibre Channel portfolio making it a key part of its 3PAR Flash Now initiative. The portfolio features HPE Smart SAN technology, which can fully automated orchestration directly from HPE 3PAR StoreServ flash storage arrays. Automated orchestration can save hours of manual work, reduce risks due to human error, and reduce provisioning time by up to 90%.
HPE will be working with Spectra Logic through a new reseller relationship. Both companies believe that this relationship will help drive down costs of long-term storage to as little as $0.01/GB.
Availability
HPE 3PAR Flash Now, HPE StoreFabric 32Gb (Gen 6) Fibre Channel portfolio, and the new Spectra Enterprise Libraries are all available now.
HPE 3PAR StoreServ
Discuss this story
Sign up for the StorageReview newsletter |
/**
* CatHW_Async.java
*
*
* An "hardware" class that acts as the master in which all the other "hardware" classes run
* through.
*
* This is NOT an OpMode. This class is used to define all the other hardware classes.
* This hardware class assumes the following device names have been configured on the robot.
*
*
* @author FTC Team #10273, The Cat in the Hat Comes Back.
*/
public class CatHW_Async
{
/**
* Attribute that is used to tell the robot through all the other classes whether it is on the
* red or blue alliance.
*/
public static boolean isRedAlliance = true;
public static boolean isLeftAlliance = false;
/** Local OpMode members. */
HardwareMap hwMap = null;
LinearOpMode opMode = null;
private static CatHW_Async myInstance = null;
/** Other Hardware subSystems */
CatHW_Jaws jaws = null;
CatHW_DriveOdo drive = null;
CatHW_Vision eyes = null;
CatHW_Lights lights = null;
CatHW_Carousel carousel = null;
/* Constructor */
public CatHW_Async() {}
public static CatHW_Async getInstance(){
return myInstance;
}
/**
* Initialize all the standard Hardware interfaces as well as all the subsystem hardware
* classes.
*
* @param ahwMap is the robot's hardware map object.
* @param theOpMode for Linear OpMode usage.
* @throws InterruptedException in case of error.
*/
public void init(HardwareMap ahwMap, LinearOpMode theOpMode, boolean useVuforia)
throws InterruptedException {
// Save a reference to hardware map and opMode
hwMap = ahwMap;
opMode = theOpMode;
myInstance = this;
// Give Telemetry for each system we begin to init:
opMode.telemetry.addData("Initialize", "Jaws...");
opMode.telemetry.update();
jaws = new CatHW_Jaws(this);
jaws.init();
opMode.telemetry.addData("Initialize", "Tail...");
opMode.telemetry.update();
//tail = new CatHW_Tail(this);
//tail.init();
opMode.telemetry.addData("Initialize", "Claw...");
opMode.telemetry.update();
//claw = new CatHW_Claw(this);
//claw.init();
opMode.telemetry.addData("Initialize", "Launcher...");
opMode.telemetry.update();
opMode.telemetry.addData("Initialize", "Lights...");
opMode.telemetry.update();
lights = CatHW_Lights.getInstanceAndInit(this);
lights.init();
opMode.telemetry.addData("Initialize", "Drive...");
opMode.telemetry.update();
drive = new CatHW_DriveOdo(this);
drive.init();
opMode.telemetry.addData("Initialize", "Vision...");
opMode.telemetry.update();
eyes = new CatHW_Vision(this);
eyes.initVision(hwMap, useVuforia);
opMode.telemetry.addData("Initialize", "All Done... BOOM!");
opMode.telemetry.update();
opMode.telemetry.addData("Initialize", "Carousel...");
opMode.telemetry.update();
carousel = new CatHW_Carousel(this);
carousel.init();
}
//----------------------------------------------------------------------------------------------
// Common Miscellaneous Methods:
//----------------------------------------------------------------------------------------------
/**
* Method which will pause the robot's action for so many seconds. Used for actions that could
* either take time or when some part of the robot just needs to wait.
*
* @param seconds that the robot's systems will be delayed.
*/
public void robotWait(double seconds) {
ElapsedTime delayTimer = new ElapsedTime();
while (opMode.opModeIsActive() && (delayTimer.seconds() < seconds)) {
opMode.idle();
//eyes.updateVuforia();
}
}
} |
t = int(input())
answer = []
ans = []
counter = 0
for a in range(t):
counter = 0
n = int(input())
ans = [0 for d in range(n)]
line = list(input())
for b in range(len(line)-n+1):
current = [line[c] for c in range(b, b+n)]
#print(current)
check = int(ans[counter])|int(current[counter])
# print(current[counter])
if(str(check) == current[counter]):
ans[counter] = current[counter]
counter += 1
else:
while(str(check) != current[counter]):
counter += 1
check = ans[counter]|int(current[counter])
answer.append(''.join(map(str, ans)) )
for i in answer:
print(i)
|
<filename>defs/defs.go<gh_stars>10-100
package defs
const (
FILE_DIR = "./file/"
MAX_UPLOAD_SIZE = 1024 * 1024 * 50
)
|
<gh_stars>0
package com.ruoyi.activiti.listener;
import lombok.extern.slf4j.Slf4j;
import org.activiti.engine.delegate.DelegateTask;
import org.activiti.engine.delegate.TaskListener;
@Slf4j
public class SetAssignListener implements TaskListener {
@Override
public void notify(DelegateTask delegateTask) {
log.debug("上一个任务的执行人assigneeUser: {}", delegateTask.getVariable("assigneeUser"));
//
}
}
|
// Convert a HWCART communicator to a native MPI_Cart communicator.
// Dimension order will be set to MPI order (ZYX). All standard MPI_Cart functions
// (MPI_Cart_shift, MPI_Cart_sub, MPI_Cart_rank, etc.) can be used on mpicart_com.
int hwcart2mpicart(MPI_Comm hwcart_comm, int nlevels, int *topo, int *periodic, hwcart_order_t cart_order, MPI_Comm *mpicart_comm_out)
{
int ii, gdim[3] = {1,1,1};
for(ii=0; ii<nlevels; ii++){
gdim[0] *= topo[ii*3+0];
gdim[1] *= topo[ii*3+1];
gdim[2] *= topo[ii*3+2];
}
if(cart_order != HWCartOrderZYX) {
int coord[3], comm_rank, mpi_rank;
HWCART_MPI_CALL( MPI_Comm_rank(hwcart_comm, &comm_rank) );
hwcart_rank2coord(hwcart_comm, gdim, comm_rank, cart_order, coord);
hwcart_coord2rank(hwcart_comm, gdim, periodic, coord, HWCartOrderZYX, &mpi_rank);
MPI_Comm temp_comm;
HWCART_MPI_CALL( MPI_Comm_split(hwcart_comm, 0, mpi_rank, &temp_comm) );
HWCART_MPI_CALL( MPI_Cart_create(temp_comm, 3, gdim, periodic, 0, mpicart_comm_out) );
HWCART_MPI_CALL( MPI_Comm_free(&temp_comm) );
} else {
HWCART_MPI_CALL( MPI_Cart_create(hwcart_comm, 3, gdim, periodic, 0, mpicart_comm_out) );
}
return 0;
} |
/**
* Check to see if an object is empty i.e. if all fields of an object are full, then this will return true.
*
* @param object containing various fields
* @return true if there exists at least 1 field that is not null; false otherwise
*/
public static boolean isEmpty(Object object) {
if (object == null) {
return true;
}
Object fieldValue;
String stringValue;
for (Field field : object.getClass().getDeclaredFields()) {
field.setAccessible(true);
try {
fieldValue = field.get(object);
} catch (IllegalArgumentException | IllegalAccessException exception) {
throw new ApplicationServerException(exception);
}
field.setAccessible(false);
stringValue = fieldValue == null ? null : fieldValue.toString();
if (StringUtils.isNotBlank(stringValue)) {
return false;
}
}
return true;
} |
use super::super::api;
use std::collections::HashSet;
use std::mem;
pub type Selector<T> = Fn(&api::DataSet<T>, api::Feature, usize) -> Vec<api::Number>;
fn number_bound<T: api::RecordMeta>(record: &T, feature: api::Feature, bound: api::Number) -> bool {
match record.number_value(feature) {
Some(value) => value > bound,
None => false
}
}
pub fn bounds_criterions<T: api::RecordMeta>(data: &api::DataSet<T>, selector: &Selector<T>, n: usize) -> Vec<Box<api::Criterion<T>>> {
let mut result: Vec<Box<api::Criterion<T>>> = Vec::new();
for f in 0..data.records[0].0.feature_count() {
if data.records[0].0.feature_type(f) == api::FeatureType::Number {
let bounds = selector(data, f, n);
for bound in bounds {
result.push(Box::new(move |rec: &T| number_bound(rec, f, bound)));
result.push(Box::new(move |rec: &T| !number_bound(rec, f, bound)));
}
}
}
result
}
pub fn uniform_selector<T: api::RecordMeta>(data: &api::DataSet<T>, feature: api::Feature, n: usize) -> Vec<api::Number> {
let mut set: HashSet<u64> = HashSet::new();
let mut result = Vec::new();
let mut i = 0;
for (j, record) in data.records.iter().enumerate() {
if i != j % n {
match record.0.number_value(feature) {
Some(v) if !v.is_nan() => set.insert(unsafe{mem::transmute(v)}),
_ => false
};
i = j % n;
}
}
for packed in set {
result.push(unsafe{mem::transmute(packed)});
}
result
}
|
H, W, K = map(int, input().split())
S = [list(map(int, list(input()))) for _ in range(H)]
tab_h = [[0] * (W + 1) for _ in range(H)]
for i in range(H):
cnt = 0
for j in range(W):
cnt += S[i][j]
tab_h[i][j + 1] = cnt
# tab_v = [[0] * (H + 1) for _ in range(W)]
# for j in range(W):
# cnt = 0
# for i in range(H):
# cnt += S[i][j]
# tab_v[i + 1][j] = cnt
ans = 1000000007
for case in range(2 ** (H - 1)):
case_tmp = case
iscut_v = []
for i in range(H - 1):
iscut_v.append(case_tmp % 2)
case_tmp >>= 1
#
parts = []
vsrc = 0
for vdst in range(1, H + 1):
if vdst == H:
parts.append((vsrc, vdst))
break
elif iscut_v[vdst - 1] == 1:
parts.append((vsrc, vdst))
vsrc = vdst
else:
continue
#
possible = False
hsrc = 0
iscut_h = [0] * (W - 1)
for hdst in range(1, W + 1):
num_1 = 0
for vsrc, vdst in parts:
num_1_tmp = 0
for i in range(vsrc, vdst):
num_1_tmp += tab_h[i][hdst] - tab_h[i][hsrc]
num_1 = max(num_1, num_1_tmp)
# print("##", hdst, (vsrc, vdst), num_1_tmp)
#
if num_1 > K:
if hdst == 1 or hdst - 1 == hsrc:
break
else:
iscut_h[hdst - 2] = 1
hsrc = hdst - 1
else:
continue
else:
possible = True
#
if possible:
ans_tmp = sum(iscut_v) + sum(iscut_h)
# print("#", iscut_v, iscut_h, ans_tmp)
ans = min(ans, ans_tmp)
print(ans) |
Sorry about the delay. It is a crazy time of the year. And the most difficult is to find enough time to actually get back to ponies and to this style. It's funny the part that says "patience, my little ponies" is the one making people wait. It wasn't on purpose, I swear.So... yeah, as I said before, it's crazy how a simple wish to give my character a bit of a background has led me to actually struggling for quality, worrying about all the components of a comic, learning about comics (perfect opportunity!)... I know it's not so good, "critics" can dance happily finding tons of flaws on each page. Go ahead, grab your rotten tomatoes.Anyway, it's nice to visit Pinkie for a bit of cheer after a long and stressful day. And hear once again "how Equestria was made"Will Twilight find out who's the culprit?! Has Ditsy already recovered to deliver her mail?! Will Crusaders hear more boring stories from the adult ponies?! Find out in the next strip-set... or not, this artist is too lazy to draw fast.Watermark 2 |
import { Injectable } from '@angular/core';
import { AuthService } from '../shared/services';
import { OdbcService } from '../shared/services';
@Injectable()
export class User {
private username: number;
private password: string;
private name: string;
private family_name: string;
private groups: any;
private PGP_PAT_NOMBRE: any;
private skin: any;
private color: any;
private PGP_CLAFE_URL_LOGO_APLICACION: any;
private PER_NUMERO_DOCUMENTO: any;
private PGP_PROM_NOMBRE_CORTO: any;
public auth_token: string;
constructor(private authService:AuthService, private odbc:OdbcService){
this.set();
}
set(){
const currentUser = this.authService.userAttributes();
if ( currentUser ) {
this.username = currentUser["cognito:username"];
this.name = currentUser["name"];
this.family_name = currentUser["family_name"];
this.groups = currentUser["cognito:groups"];
this.odbc.getOdbcInfo(this.username).subscribe (
(data) => {
console.log( data );
if ( data ) {
this.PGP_PAT_NOMBRE = data.PGP_PAT_NOMBRE;
this.skin = data.skin;
this.color = data.color;
this.PGP_CLAFE_URL_LOGO_APLICACION = data.PGP_CLAFE_URL_LOGO_APLICACION;
this.PER_NUMERO_DOCUMENTO = data.PER_NUMERO_DOCUMENTO;
this.PGP_PROM_NOMBRE_CORTO = data.PGP_PROM_NOMBRE_CORTO
}
},
(error) => { console.log(error); },
);
}
}
get(){
return this;
}
hasRole( role: string ) {
return (this.Groups.indexOf(role) >= 0);
}
selectHome(){
return ( this.hasRole('colciencias') || this.hasRole('potencial') ) ;
}
set AuthToken( token: string ) {
localStorage.setItem('cognito_token', token);
}
get AuthToken(){
return localStorage.getItem('cognito_token');
}
get Username(){
return this.username;
}
get Password(){
return <PASSWORD>;
}
get Name(){
return this.name
}
get FamilyName(){
return this.family_name;
}
get Groups(){
return this.groups
}
}
|
use crate::env::Env;
use crate::ffi::*;
use deno_core::v8;
#[napi_sym]
fn napi_throw_type_error(
env: napi_env,
code: *const c_char,
msg: *const c_char,
) -> Result {
let mut env = &mut *(env as *mut Env);
// let code = CStr::from_ptr(code).to_str().unwrap();
let msg = CStr::from_ptr(msg).to_str().unwrap();
// let code = v8::String::new(env.scope, code).unwrap();
let msg = v8::String::new(env.scope, msg).unwrap();
let error = v8::Exception::type_error(env.scope, msg);
env.scope.throw_exception(error);
Ok(())
}
|
#include <cmath>
#include <vector>
#include <string>
#include <boost/shared_ptr.hpp>
#include <ros/ros.h>
#include <sensor_msgs/PointCloud2.h>
#include <auro_lidar_msgs/LidarSweep.h>
#include <pcl_conversions/pcl_conversions.h>
#include <pcl_ros/point_cloud.h>
#include <pcl/point_types.h>
#ifndef SWEEP_TO_POINT_CLOUD_H
#define SWEEP_TO_POINT_CLOUD_H
typedef pcl::PointCloud<pcl::PointXYZI> PointCloudXYZI;
class SweepToPointCloud
{
public:
SweepToPointCloud();
void spin();
void callback(auro_lidar_msgs::LidarSweep::ConstPtr const & cloudPtr);
PointCloudXYZI::PointType polarToCart(auro_lidar_msgs::LidarSweepPoint const & ls_point,float v_angle);
private:
//ROS pub/sub/service
ros::NodeHandle m_nh;
ros::Subscriber m_lidar_sweep_sub;
ros::Publisher m_point_cloud_pub;
};
#endif
|
<filename>src/schema/test/clone.ts
import test = require('tape');
import {
MuBoolean,
MuUTF8,
MuFloat32,
MuDate,
MuArray,
MuSortedArray,
MuVector,
MuDictionary,
MuStruct,
MuUnion,
} from '../index';
test('primitive.clone()', (t) => {
const bool = new MuBoolean();
t.equal(bool.clone(true), true);
t.equal(bool.clone(false), false);
const utf8 = new MuUTF8();
t.equal(utf8.clone(''), '');
t.equal(
utf8.clone('<a href="https://github.com/mikolalysenko/mudb/">mudb</a>'),
'<a href="https://github.com/mikolalysenko/mudb/">mudb</a>',
);
t.equal(utf8.clone('Iñtërnâtiônàlizætiøn☃💩'), 'Iñtërnâtiônàlizætiøn☃💩');
const float32 = new MuFloat32();
t.equal(float32.clone(0), 0);
t.equal(float32.clone(-0.5), -0.5);
t.equal(float32.clone(3.1415927410125732), 3.1415927410125732);
t.end();
});
test('data.clone()', (t) => {
const date = new MuDate();
const moment = date.alloc();
const instant = date.clone(moment);
t.deepEqual(moment, instant);
t.notEqual(moment, instant);
t.end();
});
test('array.clone()', (t) => {
const array = new MuArray(new MuFloat32(), Infinity);
let a = array.alloc();
t.notEqual(array.clone(a), a);
t.deepEqual(array.clone(a), a);
a = [0.5];
t.deepEqual(array.clone(a), a);
a = [0.5, 1.5];
t.deepEqual(array.clone(a), a);
const nestedArray = new MuArray(
new MuArray(new MuFloat32(), Infinity),
Infinity,
);
let na = nestedArray.alloc();
t.deepEqual(nestedArray.clone(na), na);
na = [[]];
t.deepEqual(nestedArray.clone(na), na);
t.notEqual(nestedArray.clone(na)[0], na[0]);
na = [[0.5]];
t.deepEqual(nestedArray.clone(na), na);
na = [[0.5, 1.5]];
t.deepEqual(nestedArray.clone(na), na);
na = [[0.5, 1.5], [0.5, 1.5]];
t.deepEqual(nestedArray.clone(na), na);
t.end();
});
test('sortedArray.clone()', (t) => {
const array = new MuSortedArray(new MuFloat32(), Infinity);
let a = array.alloc();
t.notEqual(array.clone(a), a);
t.deepEqual(array.clone(a), a);
a = [0.5];
t.deepEqual(array.clone(a), a);
a = [0.5, 1.5];
t.deepEqual(array.clone(a), a);
const nestedArray = new MuSortedArray(
new MuSortedArray(new MuFloat32(), Infinity),
Infinity,
);
let na = nestedArray.alloc();
t.deepEqual(nestedArray.clone(na), na);
na = [[]];
t.deepEqual(nestedArray.clone(na), na);
t.notEqual(nestedArray.clone(na)[0], na[0]);
na = [[0.5]];
t.deepEqual(nestedArray.clone(na), na);
na = [[0.5, 1.5]];
t.deepEqual(nestedArray.clone(na), na);
na = [[0.5, 1.5], [0.5, 1.5]];
t.deepEqual(nestedArray.clone(na), na);
t.end();
});
test('vector.clone()', (t) => {
const vector = new MuVector(new MuFloat32(), 2);
const v = vector.alloc();
t.notEqual(vector.clone(v), v);
t.deepEqual(vector.clone(v), v);
v[0] = 0.5;
v[1] = 1.5;
t.deepEqual(vector.clone(v), v);
t.end();
});
test('dictionary.clone()', (t) => {
const dictionary = new MuDictionary(new MuFloat32(), Infinity);
const d = dictionary.alloc();
t.notEqual(dictionary.clone(d), d);
t.deepEqual(dictionary.clone(d), d);
d['a'] = 0.5;
t.deepEqual(dictionary.clone(d), d);
d['b'] = 1.5;
t.deepEqual(dictionary.clone(d), d);
const nestedDictionary = new MuDictionary(
new MuDictionary(new MuFloat32(), Infinity),
Infinity,
);
const nd = nestedDictionary.alloc();
t.deepEqual(nestedDictionary.clone(nd), nd);
nd['a'] = {f: 0.5};
t.deepEqual(nestedDictionary.clone(nd), nd);
t.notEqual(nestedDictionary.clone(nd)['a'], nd['a']);
nd['b'] = {f: 0.5, g: 1.5};
t.deepEqual(nestedDictionary.clone(nd), nd);
t.end();
});
test('struct.clone()', (t) => {
const struct = new MuStruct({
b: new MuBoolean(),
u: new MuUTF8(),
f: new MuFloat32(),
});
const s = struct.alloc();
t.notEqual(struct.clone(s), s);
t.deepEqual(struct.clone(s), s);
s.b = true;
s.u = 'Iñtërnâtiônàlizætiøn☃💩';
s.f = 0.5;
t.deepEqual(struct.clone(s), s);
const nestedStruct = new MuStruct({
s1: new MuStruct({
b: new MuBoolean(),
u: new MuUTF8(),
f: new MuFloat32(),
}),
s2: new MuStruct({
b: new MuBoolean(),
u: new MuUTF8(),
f: new MuFloat32(),
}),
});
const ns = nestedStruct.alloc();
t.deepEqual(nestedStruct.clone(ns), ns);
t.notEqual(nestedStruct.clone(ns).s1, ns.s1);
ns.s1.b = true;
ns.s1.u = 'Iñtërnâtiônàlizætiøn☃💩';
ns.s1.f = 0.5;
t.deepEqual(nestedStruct.clone(ns), ns);
t.end();
});
test('union.clone()', (t) => {
const stringOrFloat = new MuUnion({
u: new MuUTF8(),
f: new MuFloat32(),
});
const sf = stringOrFloat.alloc();
t.notEqual(stringOrFloat.clone(sf), sf);
t.deepEqual(stringOrFloat.clone(sf), sf);
sf.type = 'u';
sf.data = 'Iñtërnâtiônàlizætiøn☃💩';
t.deepEqual(stringOrFloat.clone(sf), sf);
sf.type = 'f';
sf.data = 0.5;
t.deepEqual(stringOrFloat.clone(sf), sf);
const union = new MuUnion({
s: new MuStruct({
b: new MuBoolean(),
u: new MuUTF8(),
f: new MuFloat32(),
}),
}, 's');
const u = union.alloc();
t.notEqual(union.clone(u), u);
t.notEqual(union.clone(u).data, u.data);
t.deepEqual(union.clone(u), u);
u.data.b = true;
u.data.u = 'Iñtërnâtiônàlizætiøn☃💩';
u.data.f = 0.5;
t.deepEqual(union.clone(u), u);
t.end();
});
|
def normalize_element(element, element_signature=None):
normalized_components = []
if element_signature is None:
components = nest.flatten(element)
flattened_signature = [None] * len(components)
pack_as = element
else:
flattened_signature = nest.flatten(element_signature)
components = nest.flatten_up_to(element_signature, element)
pack_as = element_signature
with ops.name_scope("normalize_element"):
from tensorflow.python.data.ops import dataset_ops
for i, (t, spec) in enumerate(zip(components, flattened_signature)):
try:
if spec is None:
spec = type_spec_from_value(t, use_fallback=False)
except TypeError:
normalized_components.append(
ops.convert_to_tensor(t, name="component_%d" % i))
else:
if isinstance(spec, sparse_tensor.SparseTensorSpec):
normalized_components.append(sparse_tensor.SparseTensor.from_value(t))
elif isinstance(spec, ragged_tensor.RaggedTensorSpec):
normalized_components.append(
ragged_tensor.convert_to_tensor_or_ragged_tensor(
t, name="component_%d" % i))
elif isinstance(
spec, (tensor_array_ops.TensorArraySpec, dataset_ops.DatasetSpec)):
normalized_components.append(t)
elif isinstance(spec, NoneTensorSpec):
normalized_components.append(NoneTensor())
elif isinstance(t, composite_tensor.CompositeTensor):
normalized_components.append(t)
else:
dtype = getattr(spec, "dtype", None)
normalized_components.append(
ops.convert_to_tensor(t, name="component_%d" % i, dtype=dtype))
return nest.pack_sequence_as(pack_as, normalized_components) |
package watcher
import (
"context"
"git.henghajiang.com/backend/api_gateway_v2/core/routing"
"git.henghajiang.com/backend/api_gateway_v2/core/utils"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/mvcc/mvccpb"
"github.com/hhjpin/goutils/logger"
"time"
)
type Watcher interface {
Put(key, val string, isCreate bool) error
Delete(key string) error
BindTable(table *routing.Table)
GetTable() *routing.Table
GetWatchChan() clientv3.WatchChan
Ctx() context.Context
Refresh()
}
var Mapping map[Watcher]clientv3.WatchChan
func watch(w Watcher, c clientv3.WatchChan) {
defer func() {
if err := recover(); err != nil {
stack := utils.Stack(3)
logger.Errorf("[Recovery] %s panic recovered:\n%s\n%s", utils.TimeFormat(time.Now()), err, stack)
}
// restart watch func
go watch(w, c)
}()
for {
select {
case <-w.Ctx().Done():
logger.Error(w.Ctx().Err())
w.Refresh()
c = w.GetWatchChan()
Mapping[w] = c
goto Over
case resp := <-c:
if resp.Canceled {
logger.Warnf("watch canceled")
logger.Error(w.Ctx().Err())
w.Refresh()
c = w.GetWatchChan()
Mapping[w] = c
goto Over
}
table := w.GetTable()
if len(resp.Events) > 0 {
for _, evt := range resp.Events {
switch evt.Type {
case mvccpb.PUT:
table.PushWatchEvent(routing.WatchMsg{
Handle: func() routing.WatchMsgFunc {
key := string(evt.Kv.Key)
value := string(evt.Kv.Value)
return func() {
if err := w.Put(key, value, evt.IsCreate()); err != nil {
logger.Error(err)
}
}
}(),
})
case mvccpb.DELETE:
table.PushWatchEvent(routing.WatchMsg{
Handle: func() routing.WatchMsgFunc {
key := string(evt.Kv.Key)
return func() {
if err := w.Delete(key); err != nil {
logger.Error(err)
}
}
}(),
})
default:
logger.Warnf("unrecognized event type: %d", evt.Type)
continue
}
}
}
}
}
Over:
logger.Debugf("watch task finished")
}
func Watch(wch map[Watcher]clientv3.WatchChan) {
for k, v := range wch {
if k.GetTable() == nil {
panic("watcher does not bind to routing table")
}
go watch(k, v)
}
}
|
import { default as groupBy } from "lodash/groupBy"
import parseISO from "date-fns/parseISO"
import { Gender, getGender } from "./getGender"
import { Credit } from "./Credit"
import { Movie } from "./Movie"
import { Photo, Poster, Backdrop } from "./Images"
import { SocialMedia } from "./SocialMedia"
import { TV } from "./TV"
import {
createNullable,
limitResults,
mapToCredits,
mapToModel,
Resolver
} from "../resolvers/utils"
type Member = "cast" | "crew"
const getCredits = (
member: Member
// eslint-disable-next-line no-use-before-define
): Resolver<Person, { limit?: number }, Promise<(Movie | TV)[]>> => async (
{ _credits },
{ limit, ...rest },
{ dataSources, language },
info
) => {
const roles =
_credits[member].slice(0, limit ? limit : _credits[member].length) || []
const grouped = groupBy(roles, `media_type`)
const results = await Promise.all([
Promise.all(
grouped.movie.map(({ id }) =>
dataSources.TMDB.movie({ id, language, ...rest }, info)
)
),
Promise.all(
grouped.tv.map(({ id }) =>
dataSources.TMDB.tv({ id, language, ...rest }, info)
)
)
])
return results.flat()
}
export class Person {
// eslint-disable-next-line no-undef
[key: string]: any
id!: string
// General Details
name!: string
aliases: string[]
knownFor: string
biography!: string
gender: Gender
birthday: Date | null
birthplace: string
diedOn: Date | null
// Social
homepage: URL | null
socialMedia: SocialMedia | null
// Credits
static credits: Resolver<
Person,
{ limit: number; mediaType: ("movie" | "tv")[] },
Promise<Credit[]>
> = (parent, { limit, mediaType }) => {
const { cast, crew } = mapToCredits(parent._credits, parent)
return limitResults(
limit,
new Promise((resolve) =>
resolve(
[...cast, ...crew].filter(({ _mediaType }) =>
mediaType.includes(_mediaType)
)
)
)
)
}
static appearsIn = getCredits(`cast`)
static workedOn = getCredits(`crew`)
// Media
photo: Photo | null
images: Photo[]
taggedImages: (Poster | Backdrop)[]
constructor({
also_known_as: aliases,
known_for: knownFor,
gender,
birthday,
place_of_birth: birthplace,
deathday: diedOn,
homepage,
external_ids: socialMedia,
profile_path: photo,
combined_credits: credits,
images,
tagged_images: taggedImages,
...rest
}: Person) {
Object.assign(this, rest)
this.aliases = aliases
this.knownFor = knownFor
this.gender = getGender((gender as unknown) as number | undefined)
this.birthday = birthday ? parseISO((birthday as unknown) as string) : null
this.birthplace = birthplace
this.diedOn = diedOn ? parseISO(diedOn) : null
this.homepage = homepage ? new URL((homepage as unknown) as string) : null
this.socialMedia = createNullable(socialMedia, SocialMedia)
this.photo = createNullable(photo, Photo)
const { profiles } = (images as unknown) as Record<string, Poster[]>
this._credits = credits
this.images = mapToModel(profiles, Photo)
const {
backdrops = [],
posters = []
} = (taggedImages as unknown) as Record<string, (Backdrop | Poster)[]>
this.taggedImages = [
mapToModel(backdrops, Backdrop),
mapToModel(posters, Poster)
].flat() as (Poster | Backdrop)[]
}
}
|
class DefaultSegment:
"""DefaultSegment represents a particluar segment of a JPEG file.
This class is instantiated by JpegFile when parsing Jpeg files
and is not intended to be used directly by the programmer. This
base class is used as a default which doesn't know about the internal
structure of the segment. Other classes subclass this to provide
extra information about a particular segment.
"""
def __init__(self, marker, fd, data, mode):
"""The constructor for DefaultSegment takes the marker which
identifies the segments, a file object which is currently positioned
at the end of the segment. This allows any subclasses to potentially
extract extra data from the stream. Data contains the contents of the
segment."""
self.marker = marker
self.data = data
self.mode = mode
self.fd = fd
self.code = jpeg_markers.get(self.marker, ('Unknown-{}'.format(self.marker), None))[0]
assert mode in ["rw", "ro"]
if self.data is not None:
self.parse_data(data)
class InvalidSegment(Exception):
"""This exception may be raised by sub-classes in cases when they
can't correctly identify the segment."""
pass
def write(self, fd):
"""This method is called by JpegFile when writing out the file. It
must write out any data in the segment. This shouldn't in general be
overloaded by subclasses, they should instead override the get_data()
method."""
fd.write('\xff')
fd.write(pack('B', self.marker))
data = self.get_data()
fd.write(pack('>H', len(data) + 2))
fd.write(data)
def get_data(self):
"""This method is called by write to generate the data for this segment.
It should be overloaded by subclasses."""
return self.data
def parse_data(self, data):
"""This method is called be init to parse any data for the segment. It
should be overloaded by subclasses rather than overloading __init__"""
pass
def dump(self, fd):
"""This is called by JpegFile.dump() to output a human readable
representation of the segment. Subclasses should overload this to provide
extra information."""
print >> fd, " Section: [%5s] Size: %6d" % \
(jpeg_markers[self.marker][0], len(self.data)) |
def scale_pdata(dic, data, reverse=False):
try:
scale = np.power(2., -float(dic['procs']['NC_proc']))
except KeyError:
warn('Unable to scale data, returning unscaled data')
scale = 1
if reverse == True:
return data * scale
else:
return data / scale |
def terminal_stop_logging(self):
if self.flag_graphics:
print(BRACKET_END)
self.push() |
<reponame>matteobruni/FlexSearch
/// <reference path="..\..\references\references.d.ts" />
module flexportal {
'use strict';
import Session = FlexSearch.DuplicateDetection.Session;
import TargetRecord = FlexSearch.DuplicateDetection.TargetRecord;
export class Duplicate extends FlexSearch.DuplicateDetection.SourceRecord {
Targets: TargetRecord []
SourceStatusName: any
FlexSearchId: string
}
export interface ISessionScope extends ng.IScope, IMainScope {
duplicates: Duplicate[]
session: Session
openMatches(dup: Duplicate) : void
duplicatesPromise: ng.IPromise<Duplicate[]>
sessionPromise: ng.IPromise<Session>
// Pagination specific
getPage(pageNumber: number): void
ActivePage: number
PageCount: number
PageSize: number
DupesCount: number
}
interface IRouteParamsService extends angular.ui.IStateParamsService {
id: string
}
export function fromDocumentToDuplicate(d: FlexSearch.Core.DocumentDto) {
var dup = new Duplicate();
dup.FlexSearchId = d.Id;
dup.SessionId = d.Fields["sessionid"];
dup.SourceId = d.Fields["sourceid"];
dup.SourceRecordId = d.Fields["sourcerecordid"];
dup.TotalDupes = d.Fields["totaldupesfound"];
dup.SourceStatus = d.Fields["sourcestatus"];
dup.SourceStatusName = toSourceStatusName(parseInt(dup.SourceStatus));
dup.SourceDisplayName = d.Fields["sourcedisplayname"];
dup.Targets = <TargetRecord[]>JSON.parse(d.Fields["targetrecords"]);
return dup;
}
export class SessionController {
/* @ngInject */
constructor($scope: ISessionScope, $stateParams: any, $http: ng.IHttpService, $state: any, datePrinter: any, flexClient: FlexClient) {
var sessionId = $stateParams.sessionId;
$scope.ActivePage = 1;
$scope.PageSize = 50;
// Configure what to do when a match card is clicked
$scope.openMatches = function(dup: Duplicate) {
// Set the subheader for the list to show the clicked item
(<any>$(".pagination .md-subheader-content")).html("<div class='activeDuplicate'>" + dup.SourceDisplayName + " (" + dup.SourceId + ")</div>");
(<any>$(".md-subheader.pagination").show());
// Display the comparison table
$state.go('comparison', {sessionId: dup.SessionId, sourceId: dup.SourceId});
};
// Get the Session Properties
(function(sId) {
// Store the promise on the $scope to be accessed by child controllers
$scope.sessionPromise = flexClient.getSessionBySessionId(sId)
.then(document => {
$scope.session = <Session>JSON.parse(document.Fields["sessionproperties"]);
// Display the session details on the top toolbar
var title =
"Session for " + $scope.session.IndexName
+ " using " + $scope.session.ProfileName + " profile"
+ " started at " + datePrinter.toDateStr($scope.session.JobStartTime)
$scope.setTitle(title);
return $scope.session;
});
})(sessionId);
$scope.getPage = function (pageNumber) {
// Show the progress bar
var progress = $(".duplicate-list md-progress-linear");
progress.show();
// Set the active page
if (pageNumber < 1 || pageNumber > $scope.PageCount) return;
$scope.ActivePage = pageNumber;
// Get the Duplicates
(function(sId) {
// Store the promise on the $scope to make it accessible by child controllers
$scope.duplicatesPromise = flexClient.getDuplicatesFromSession(
sId,
$scope.PageSize,
($scope.ActivePage - 1) * $scope.PageSize,
"sourceid"
)
.then(results => {
$scope.duplicates = results.Documents.map(fromDocumentToDuplicate);
// Set the total number of Duplicates
$scope.DupesCount = results.TotalAvailable;
// Set the number of pages
$scope.PageCount = Math.ceil(results.TotalAvailable / $scope.PageSize);
// Hide the progress bar
progress.hide();
return $scope.duplicates;
});
})(sessionId);
};
// Get the first page
$scope.getPage(1);
}
}
}
|
For $11 an hour I stocked nonfiction and worked the register at Black Oak Books in Berkeley, a used bookstore otherwise staffed by aging, garrulous intellectuals without institutional affiliation. For $12 an hour I assisted Sam Green, a filmmaker whose first documentary, The Weather Underground, chronicled the radical group from the 1960s responsible for bombing the US Capitol, the Pentagon, and the United States Department of State. The Weathermen always phoned their targets beforehand, after the bomb had been planted, to avoid hurting anybody. I worked these jobs in 2007, before the economic meltdown and the sudden growth of the second tech bubble. It was a pre-Airbnb, pre-Uber, pre-I-can’t-get-a-reservation-anywhere-in-the-Mission-District-on-a-Monday-night San Francisco.
My partner and I moved from a divorcee’s guest room in Berkeley to fulfill a low-income requirement in an unfinished luxury loft of smoothly poured concrete near downtown. We lived there for four months until the unit sold for $1.2 million. We ended up at Artists Television Access, in a room occupied by Divine before the media arts organization with anarchist leanings moved into the building. I socialized with Marxist organizers from the labor union UNITE HERE!, Marxist graduate students from UC Berkeley’s rhetoric program, and Marxist workers from different affinity groups. Walking home from a book-group meeting with members of the Workers International League, I felt a surge of affect, like I was starting to accomplish what I had moved out to San Francisco to do—which was to be political.
After four months my knowledge began to feel unmarketable. I found myself wondering if I would ever be able to afford the objects that adorned my middle-class childhood memories. A job posting on the Bay Area Video Coalition website for a video producer led to an interview with an anonymous company in Mountain View. I was picked up by a middle manager named Bert in a Prius at the Caltrain station. As we drove past the Computer History Museum and into a large corporate campus, it occurred to me that I was competing for a position at Google, and had been, technically, since I’d stepped into the vehicle. Upon exiting, I confronted a chaos of identical, sky-blue cruiser bikes just organized enough to seem suspicious, like a set-piece for first-time visitors.
We entered 1600 Amphitheater Drive through one of many sets of large glass doors, and I halted in front of a row of six digital prints of the Google logo, all on the same 3 × 5 foot canvas, each one done in the style of a different modern master—Monet, Van Gogh, Matisse, Picasso, Dali, and Pollock. The Pollock was a basic Photoshop splatter-brush defacement, while the Monet was an epic travesty: an impressionistic GOOGLE floating nowhere above three lily pads. The Dali was a shotgun marriage between the Persistence of Memory and the famous insignia. The collapsed sense of space and time resonated most with its surroundings. “Yeah, they like to do art projects here,” said Bert impatiently.
After four interviews and a Final Cut Pro test session in which I edited reel of a company seminar with David “Avocado” Wolfe, the self-described rock star of the superfood and longevity universe, I was hired for a month-long trial period. If I survived I would remain a contracted employee, paid a salary of $34,000 by Mountain View–based Transvideo Studios to work full-time on the Google campus. I would enjoy perks like the endless swimming machine or a private Thai Massage in one of the only rooms in the Googleplex blessed with opaque walls. Too skeptical to make many friends and integrate, I frequently took my electric scooter around campus to systematically sample the offerings at each of the nineteen cafes, and to purify my anus with the arsenal of targeted functions on the Japanese toilets that graced each and every bathroom. I never missed an opportunity to reserve a conference bike for my team.
Shortly after I was hired, white and gray lounge chairs with spherical retractable hoods started to appear in open spaces without any corresponding memo or orientation. These were MetroNaps—sleep machines. On my third spotting I decided to get in, discovering remote controls on the arm that could adjust knee elevation, toggle between “sleep music” tracks, and set an alarm consisting of light and subtle chair vibrations. Unlike the Japanese toilets, MetroNaps weren’t branded with a national culture, authentic or otherwise; instead, they were always already international, produced for the jet-setting elite of the global information technology sector to “improve employee morale while boosting the bottom line.”
A former coworker rests in a metronap sleeping pod. Photo courtesy of the author.
The first time I saw Sergey Brin he was gripping a ball not made for sport, but more likely for a child, or a dolphin, or at the very least an office, while talking to two other men in a clearing of personalized work stations. He was too short to have been a quarterback, in high school or anywhere else, but his chest was puffed out from beneath a long-sleeved performance base layer. He sporadically shifted his weight back and forth in royal blue Crocs, moving the toy between his hands, gesturing as he explained something to his less poised colleagues.
The first time I saw Larry Page he was eating alone with his head down in one of the campus’ peripheral cafes. I remember a moist yellow pile on his plate, which could have emerged from any number of cuisines—pan-Asian, Caribbean, Magyar—depending on the mash-up offered at that particular cafe. His blazer suggested he had just given a presentation to outsiders and he looked sort of sick.
One day Barack Obama came to campus and I spoke to him for three minutes. He coincides with an archetype of cool in a political system starved for hipness. I decided that this was the secret to his success. He lets you participate in the cool while subtly convincing you of your own bright future. “I love free pancakes,” I said, too quickly. “Me too, man,” he replied, patting me on the shoulder, “me too.” I didn’t get to talk to Al Gore when he visited for Google’s annual “Zeitgeist” conference, but I felt the wind as he stormed past me down a long hallway and into a bathroom like an animal anxious to shed its skin. I stood there holding my two signed copies of Bill Clinton’s book Giving, one of which I sold to Black Oak Books for $150. I’m still sitting on the other.
One day, after scrubbing the audio on the video of Anthony Bourdain giving a talk as an Author at Google and then exporting a Google Dance event performance by the employee troupe Decadance, I heard a woman screaming from the lobby. It was the type of screaming you might hear at a crowded Verizon store when somebody has just learned the cost of cancelling their contract. This wasn’t a common sound in the corporate offices of a company started by two Montessori/Stanford graduates, where employees take mindfulness-based emotional intelligence courses. A few of us crept towards the lobby to see a woman in a San Jose Sharks jersey confronting our building’s receptionist while clutching a printout of what appeared when she entered her name into Google. She had marched down to the headquarters to demand that the first two search results be removed. She was savvy enough to know the internet was produced and organized somewhere, but like most of us she didn’t fully understand how it worked.
The novelty of the environment evaporated, like a new operating system that doesn’t feel new for long. I would line-up off Market Street in San Francisco at 7:15 each morning, in order of arrival, with a group of coworkers—mostly men—wearing T-shirts emblazoned with logos for companies like DoubleClick and SurveyMonkey. We tried to keep an open mind regarding the queries, come-ons, and antagonisms of the also-mostly-male homeless community our lines snaked around. The luxury limo shuttle would arrive and take me to the office. There I would sit in front of two Apple Cinema Displays—sometimes editing and making graphics, sometimes mining information to leak to organizer friends. I read Antonio Negri and the luminaries of Italian Autonomist Marxism and anthropological studies of finance like Benjamin Lee and Edward LiPuma’s Financial Derivatives and the Globalization of Risk. Render time meant research time, and unlimited printing meant flyers for the events my friends and I would put on at Artists Television Access back in San Francisco. I began to suffer debilitating headaches around 3:00 p.m. and started doing stretches in my building’s empty gym during my afternoon break. After 9–12 hours on campus I would fill my Google-issued bag with Naked juices and to-go containers of food for my roommates, before getting back on the shuttle to ride Route 101 back to San Francisco. The nausea would set in when the shuttle pulled onto the freeway.
Something happened every day at 2:15 p.m. outside of the building next to mine. At first it registered as an unusual shape with unusual colors and an unidentifiable cause passing me consistently at the same time everyday. I came to realize that it was the same group of workers, mostly black and Latino, on a campus of mostly white and Asian employees, walking out of the exit like a factory bell had just gone off. Sequestered at the outer limits of campus, they would all get into their own cars: not Google shuttles like the rest of us. Hanging from their belts were yellow badges, a color I had not noticed before amongst the white badges of full-timers, the red badges of contractors, and the green badges of the interns.
Patent 7508978: Google’s proprietary book scanning technology.
I started to obsess a little. I mined all the information about the yellow badges that I could from Google’s intranet, which led me to the internal name for the team—ScanOps. This class of workers, who left the building much like the industrial proletariat of a bygone era, actually performed the Fordist labor of digitization for Google Books—“scanning” printed matter from the area’s university libraries page by page on V-shaped tables with two DSLR cameras mounted overhead. I found some vague meeting notes, probably left visible by accident, about how they would be excluded from all standard privileges like cafes, bikes, shuttles, and even access to other buildings. This was a fairly commonplace result of hierarchical organization at a corporate multinational, but why was this class of workers denied the privileges that even the kitchen and custodial staff had access to, and why did it seem so secretive?
I researched the Lumière Brothers, who presented their workforce in motion as it left a single gate of their factory that produced photographic plates. It was one of the first films ever made. Almost immediately, I wanted to create a similar document, but updated for the intervening century in digital, high-definition color video with sound. And I wanted to contrast the movement of the Google book “factory” workers with other classes of employees to demonstrate how corporate hierarchy scripts different forms of movement. I also wanted to get to know the ScanOps workers, and see how they felt about all this.
One day during lunch I set up a camera and tripod in a few places around the center of campus and recorded white, red, and green badged employees coming and going. The next day I set up in front of the ScanOps building right before the workers’ shift ended, and recorded their exit. The day after, I sat near the Google sign outside the building and introduced myself to a few of them, offering my card and saying that I worked next door and would love to hear more about their work. The following day—almost a year into working at Google—I was fired. Management would say it was for using company video equipment on company time for a personal project. Google’s legal team would say it was for snooping around the legally contentious Google Books project. But I knew the truth. Because, for all the perks, for all the fountains gushing in the sunshine and the embroidered fleece jackets, the on-site medical staff, the flexibility and the ball pit; for all the “don’t be evil” and the free email and the building of accessible infrastructure for the international democracy to come; for all of this, Google remains committed, first last and always, to accumulation. And that means it wasn’t going to let a little thing like structural racism slow its roll. The yellow badge signified “not worth the price of integration,” considering the high turnover rate, the accounts of physical attacks between employees, the criminal records, the widespread lack of credentialed education. It meant getting paid $10 an hour, going to the bathroom only when a bell indicated it was permissible to do so, and being subject to a behavioral point system that could lead to immediate termination, for which the only fix was at special events like the Easter egg hunt, where a small number of eggs contained point removal tickets. Any attempt to draw attention to the fact that this supposedly revolutionary company contained a decidedly unrevolutionary caste system would be dealt with in the old-fashioned way.
Andrew Norman Wilson, Workers Leaving the Googleplex, 2011. 11min03.
The termination of my employment came at an opportune moment. San Francisco was the first truly cosmopolitan place I’d ever lived. After growing up in a cul-de-sac carved into a forest in Massachusetts, I was noticing that the freedoms afforded to artists like my roommates at Artists Television Access were more appealing than the logistic approaches of documentary, activism, and corporate branding and communications. I was picking up ways of framing my documentary and activist work as “social practice” and “relational aesthetics.” In 2009 I ended up at the School of the Art Institute of Chicago in the sculpture department. I read the postcolonial theory of Gayatri Chakravorty Spivak and the constructivist anthropology of Bruno Latour and wrote artist statements where I diagnosed my art like a fascinating new disease, complex and evolving:
My current practice investigates the inner workings of corporate globalization via a direct involvement with the actors, technologies, and organizations that constitute it. In creating this interdisciplinary work, I push the limits of business relationships to extremes that create ruptures and require them to be rethought.
I was quite affected by an idea from The Practice of Everyday Life by Michel de Certeau—“la perruque,” which translates to “wig.” It’s a tactic for enacting resistance in a way that looks like you are just working hard, and what he describes operates more like a computer virus infecting a vast computer program than a revolution.
Independent from coursework, I was reading the blog of San Francisco–based author, entrepreneur, angel investor, public speaker, Chinese kickboxer, tango champion, and lifestyle designer Timothy Ferriss—a shell-company in the flesh. His book The 4-Hour Workweek promoted the outsourcing of tedious tasks to remote personal assistant services in India. I wanted to develop a direct relationship with a member of a corporate middle class subjected to digital sweatshop labor, so I signed up with the service Get Friday in Bangalore. I was paired with a twenty-five-year-old assistant named Akhil. Instead of the Robinson Crusoe, master-slave narrative the company was arranged to reproduce, I paid Akhil to assign me tasks of his devising that, thankfully for the art’s sake, proved to be playful, biographically loaded, and unnecessary. We started by taking our pulse rates and simultaneously charting them in Microsoft Excel. Then Akhil asked me to make a video for him about the best fighter jet in the world. He was a frequent visitor to airshows with his father. I responded with a thirty-three-minute video essay. A promising engineer at an early age, he snail-mailed me pencil drawings of toy boats he had designed and asked me to construct one out of hobby parts and then mail it back to his office. I applied for a Fulbright scholarship to India that would allow me to meet Akhil and produce an exhibition in Get Friday’s offices. My application described the outsourcing relationship as a fluid material that I sought to change the flow of. The Fulbright committee in India rejected it on the grounds of it not being art. Eventually the CEO of Get Friday started to use my project as marketing material to illuminate the company’s progressive corporate values.
Near the end of my studies, I attended a Scholarship Intensive at the Banff Center, a Canadian institution dedicated to the arts, leadership development, and mountain culture. There, I was convinced by a Canadian lawyer to release my Googleplex video, which I had been sitting on for two years because of the nondisclosure and employment termination agreements I had signed. He claimed that because Google Books was already such a legally contentious project when it came to copyright law, and because he imagined many viewers would respond with commiseration, Google wouldn’t pursue legal action against an individual with nothing to lose. I took his advice. My eleven-minute drab-core video essay was played over eighty thousand times on the day Gizmodo and Gawker picked it up. Google never responded. Its only public statement was a now-deleted tweet by Marissa Meyer, vice president of Product Search at the time: “Interesting perspective,” she wrote, and linked to the video.
Suddenly I had entered the cottage industry of critical art. I had teaching jobs, invitations to speak publicly, and residencies lined up. I won a $20,000 Dedalus Foundation grant and lived off it for a year after school. I moved to New York and presented at conferences alongside artist-activists like Hito Steyerl and showed in exhibitions with Harun Farocki. I met the curator Aily Nash and our conversation about the Googleplex video turned into a curatorial project—Dream Factory/Image Employment—that showed at museums around the world. Aside from a few freelance gigs and some Airbnb hosting, I was able to spend most of my time with my work. People like Akhil in Bangalore and the workers at Google felt further away. I was surrounded by people who agreed with me, or veiled their minor disagreements behind polite professionalism.
Workers of remote personal assistant services screen the video Virtual Assistance—Video Task in the Get Friday office, Bangalore.
Hankering to make more videos, I had grandiose ideas that would require a lot of capital. If I could lure “big picture” Silicon Valley investors—the types that wanted to live forever, or abolish capitalism (or maybe just Google)—I could make that process of seduction part of the endeavor and really wow my audience with layer upon layer of conceptualism. So I started an art project disguised as an actual creative agency called SONE that formalized my economic activity as a contractual laborer, and this process became artistic content. With the help of a few advisors—entrepreneurs themselves—I formulated an executive summary where I described my startup like any other agency trying to distinguish itself:
Our core function is to serve global markets of communicators in advertising, business, art, and journalism with high quality, pre-trend stock photo and video clips that circulate both on the art market and the stock media market through sites like Getty Images. These clips are based on the idea that current offerings of stock imagery through those marketplaces typically present a limited scope of activity, situations, and identity stereotypes. SONE seeks to create alternative representations of finance and business.
Rather quickly the system I had devised became a trap. Not only did I have to make videos that represented economic discontent and uncertainty while fulfilling Getty Images’ guidelines, I had to develop and maintain an unincorporated and severely understaffed business while avoiding parody. The few Silicon Valley investors I spoke with never took me seriously.
After a year of developing the project I was offered a show by Stephan Tanbin Sastrawidjaja at his gallery, Project Native Informant, in London. Since then about half of his program has grown to consist of artist projects like Shanzai Biennial, GCC, DIS, and ÅYR that, similarly to SONE, blur the distinction between commercial and artistic production. The day before the opening I looked around at the videos in the show and at the “Risk Prevention Investment Objects” whose sales would be used to sustain SONE. I had designed a bunch of conceptual art objects into existence as stand-ins for a rhetorical argument. A gap existed between the works sitting in the gallery and what the work was “about,” which was all the invisible processes—the labor of my collaborators, Getty’s process of content approval—running through the work before, during, and after its presentation. In a way, I still felt like I was producing content for Google, but in an even more myopic hall of mirrors. For trained viewers, an engraved private jet windshield might cause a giggle and perhaps a delusional belief that something out there, beyond that gallery in London’s Mayfair district, beyond the art world, was changing. But I just saw a clever snack for the already converted. Meanwhile, the real action of production and consumption chugged along, as one billion obese humans were seduced into pouring flesh-and-bone-dissolving syrups into their bodies as they burned across vast deserts of asphalt. To actually compete with the thousands of other businesses creating stock imagery, it would mean that SONE wouldn’t be art at all anymore, but rather business as usual.
Andrew Norman Wilson, Chase ATM Emitting Blue Smoke, Bank of America ATM Emitting Red Smoke, TD Bank ATM Emitting Green Smoke, 2014. Courtesy the artist and Project Native Informant, London.
I flew to Switzerland a few weeks later to do an Art Basel Salon panel with the curator Melanie Bühler and artist Christopher Kulendran Thomas. I was paid 500 CHF for about fifteen minutes of talking, during which I delivered SONE’s investment proposal to an audience of curators and artists. Christopher talked about his new project Brace Brace (with Annika Kuhlmann), which uses the art market to sell unique luxury goods like life rings for yachts that are at once satirical, metaphorical, and functional. Afterward I used my VIP card to get a free ride in a new BMW 7 Series to the Schaulager museum in Newmünchenstein. I pored over the solo show of American artist, writer, and activist Paul Chan—whose work included a map of the 2004 Republican National Convention for protesters and an animation starring the likenesses of filmmaker Pier Paolo Pasolini and rapper Biggie Smalls that wove together Francisco de Goya’s etchings and a Samuel Beckett play. The careful distinction Chan made between art and activism back in 2008, which had bothered me then, suddenly seemed vital now.
I went home to New York, feeling the buzz of attention and opportunity before slipping into a miasma. I wrote a conspiracy theory with my then-mentee Jane Long, a RISD MFA student. Our theory detailed how the economist Friedrich Hayek had been inspired by his colleague Ludwig von Mises’s Chow Chow to transform the breed into a symbol of neoliberal economics at the inaugural meeting of the Mont Pelerin Society in Geneva in 1947. The text required strenuous research to flesh out the economic theory, history, and policy around the effortless, self-regulating beauty of the Chow—one of the first known dog breeds—and their emergence in ancient times from a spontaneous order possible only through a “free and competitive” wilderness without human intervention. I became frustrated and gave up. I wanted to make things that didn’t require a viewer’s rationalization and instead just haunted them. I started to revel in morbid anxieties and developed quite intuitively a new type of work—objects—centered on questions of absence, inaccessibility, and bodily traces.
The following spring, right before leaving New York for a six-month fellowship premised on a continuation of my Google-related work, I went to MoMA PS1 for Simon Denny’s show “The Innovator’s Dilemma.” Several projects—three years worth of work—appeared within the modular architecture of a tech-industry tradeshow. Ken Johnson described the show in the New York Times as an attack on the “irrational exuberance about technology” executed with “sardonic verve.” It “indirectly damn[ed] the high-end art market’s own inflationary mania.” In the “Disruptive Berlin” section of the show, the most fetishistic of custom computer cases were assembled as intended, except for a few visual embellishments to emphasize components that looked particularly exotic. They displayed the iconography and slogans of “top” Berlin startups like Soundcloud and Sociomantic and were platformed on sleek flat-panel TVs supported by plinths that were actually the boxes of the custom computer cases. Nearby sat empty server racks like the ones that Denny would use, later that year, as display cases for digital files rematerialized from NSA servers in the New Zealand pavilion at the Venice Biennale.
The show felt like a Best Buy feels. I tried to rationalize why those objects were in that museum, and why they were arranged in that manner. Because these containers, meant to encase flows of information, could also serve as framing devices for a materialization of the aforementioned branding? And this is, conveniently, what conceptual art looks like five generations later? Or was it that these massively distributed forms, through their customization, are now rendered as unique objects for another market—one oriented around materiality and a connoisseur’s possession—and “critical” participation is often measured only in terms of how self-conscious of it you are?
I sat on a corporate event platform and looked at large stretched canvas prints of speakers and pull-quotes from Berlin’s 2012 Digital-Life-Design conference, all presented to look like the user interface of Apple’s iOS 6. The work was asking me to process it as knowledge, and I felt as though I was one of the few thousands on this Earth trained to read it holistically. But the reading didn’t seem oriented towards my experience of it, or where this might take me, but rather towards the author, the innovator, the successful artist-as-anthropologist. It seemed that if one actually cared about the politics of information—how digital files both matter and materialize conditions that exclude other ones from mattering—one might get more out of the work of Laura Poitras, who exploits the popular documentary format to generously deliver information of such urgency at much higher stakes. Feeling as if I had spent too long of an evening after work in a big box retail store, I waded through the crowd of art professionals towards the exit.
An installation view of Simon Denny’s exhibition, The Innovator’s Dilemma at MoMA PS1 Photo: Pablo Enriquez.
Outside PS1, another male artist praised the work for its nondidactic qualities and how these allowed the viewer to form their own opinions. My eyes rolled, gesturing towards the VW Dome with the flail of a lanyard cobranded by Denny and Genius, the trendy “online knowledge” startup. “Such affection you have for an ambitious male artist opportunistically piggybacking on the tech sector to tell an already-initiated audience ‘the artist is kind of like a brand!’” Inside the dome an integrated advertising spectacle unfolded through live annotation demos and a panel that included artist/creative director Ryder Ripps and artist/Instagram-personality Nightcoregirl. My companion seemed surprised at my contempt; we had shared enthusiasm for Denny’s work in the past. “Damn. Does this mean you’re giving up a career in corporate art?” he joked. I softened. “It just seems like Simon’s state-and-finance-capital-sanctioned urges to stage his subjects as documentary have suppressed what he’s actually quite good at, which is sculpture.” My friend seemed relieved by this substitution of formalism for vitriol. We reminisced over the strangeness of Denny’s generatively dumb Deep Sea Vaudeo work and that bonkers show in Aachen with the nautical rope.
I landed in Germany for my residency at Akademie Schloss Solitude, which is situated in an eighteenth-century Rococo Schloss in the forest on the outskirts of Stuttgart. Castle rent was covered and I was to be paid €1200 per month on top of a €4200 production budget for whatever I wanted to make. I began to breed mosquitos, write a letter to Bill and Melinda Gates, and create a 3-D model of Baby Sinclair from Jim Henson’s animatronic family sitcom Dinosaurs. My entire production budget would go towards a video celebrating the existence of 3-D models of a mosquito, an oil derrick, and a syringe in a manner similar to the Romantic ekphrasis of John Keats’s “Ode on a Grecian Urn.”
I’ve been trying to articulate what I want out of art since dropping the varied endgames of 21st century social realism. It seems to me that the good gets going through a constant “evolution” of attitudes via experimentation, literally like the evolution without “progress” of webbed feet in ducks. There's no teleology there, as webbed feet weren’t arrived at for any sort of reason; it was an accident. Marx wrote fanmail to Darwin about this. So perhaps a progressive approach to commercial processes would be more like Death taking you by the hand at the best Sheryl Crow concert you’ve ever been to and realizing that it’s hard to hold on because Ring Pops adorn each finger bone. And then figuring out a way to renegotiate the conditions.
Andrew Norman Wilson, Ode to Seekers, 2012.
I’m not trying to say I feel particularly liberated as an artist with ideas like this. I’m still romping around in the same hollow plastic Little Tikes play version of society (the art world), staffed largely by delusional incompetents and monitored by horny, neglectful dads. I’ve thought about buying a boat and learning how to fish so that I could eat the sea and drink the rain, free from the obligations of a rented apartment and an occupation. I’ve thought about investing in my future by saving and owning, instead of sleeping in living rooms and unfamiliar beds all just to display things that no one can use. But I keep waking up with the feeling that there’s something to that uselessness. Not a point really—more like a knot. If being a person means being paranoid that you might be a puppet of some other force, like economic networks or algorithms or genetic coding, then being an artist means making things that defy that paranoia. It’s not that there’s no reason; ideally art takes a step beyond reason, towards what ought to be. To create disturbances in the seemingly natural order of things and unwind our counterfeit intuitions.
×
Andrew Norman Wilson is an artist based in New York. Recent and forthcoming exhibitions include the Gwangju Biennial (2016), the Berlin Biennial (2016), the Bucharest Biennial (2016), "Bread and Roses" at the Museum of Modern Art Warsaw (2016), and "On Sweat, Paper and Porcelain" at CCS Bard in Annandale-on-Hudson, New York (2015). He has lectured at Oxford University, Harvard University, Universität der Künste Berlin, and CalArts. His work has been featured in Aperture, Art in America, Artforum, Buzzfeed, Frieze, Gizmodo/Gawker, The New Yorker, and Wired.
© 2016 e-flux and the author |
#include <bits/stdc++.h>
using namespace std;
int main() {
int n, temp;
cin >> n;
vector<int> a{0};
vector<bool> b{false};
vector<bool> already{false};
for(int i=0;i<n;i++) {
cin >> temp;
a.push_back(temp);
b.push_back(false);
already.push_back(false);
}
int last = 1, ans = 0;
b.at(1) = true;
while(!b.at(2)) {
if (already.at(last)) {
cout << "-1\n";
return 0;
}
b.at(last) = false;
already.at(last) = true;
last = a.at(last);
b.at(last) = true;
ans++;
}
cout << ans << '\n';
return 0;
}
|
from stardb.btree import BTree
from stardb.btree import IndexNode
from stardb.btree import LeafNode
from stardb.util import unpack, bytesToString
from io import BytesIO
class LeafInputStream:
def __init__(self, blockStorage, buff):
self.blockStorage = blockStorage
self.blockBuffer = buff
def read(self, size):
# TODO: Try profiling this with a pre-allocated buffer
data = BytesIO()
blockDataSize = self.blockStorage.blockSize - 4 # Size of block excluding next pointer
bytesToRead = size
while bytesToRead > 0:
endOfBlock = False
# If there's enough room in the current block to read 'bytesToRead', just read
# it straight into the output.
if (self.blockBuffer.tell() + bytesToRead) < blockDataSize:
data.write(self.blockBuffer.read(bytesToRead))
bytesToRead = 0
# Otherwise, read all the data we can
else:
bytesAvailable = blockDataSize - self.blockBuffer.tell()
data.write(self.blockBuffer.read(bytesAvailable))
bytesToRead -= bytesAvailable
endOfBlock = True
# If we've reached the end of the block and we've still got more data to read,
# load the next block and begin the process again.
if endOfBlock and bytesToRead > 0:
nextBlockPointer = unpack('>i', self.blockBuffer.read(4))
if nextBlockPointer != -1:
self.blockBuffer = self.blockStorage.readBlock(nextBlockPointer)
magic = bytesToString(self.blockBuffer.read(2))
if magic != BTreeDatabase.LeafMagic:
raise Exception('Incorrect leaf block signature')
else:
raise Exception('Insufficient leaf data')
# TODO: This is probably an expensive copy
return data.getvalue()
class BTreeDatabase(BTree):
FileIdentifier = 'BTreeDB4'
IndexMagic = 'II'
LeafMagic = 'LL'
def __init__(self, blockStorage):
super().__init__()
self.blockStorage = blockStorage
self.indexCache = {}
def readRoot(self):
rootData = self.blockStorage.readUserData(28, 14)
# TODO: Figure out what's going on here
unknownBool = unpack('?', rootData.read(1))
rootData.seek(1, 1)
if unknownBool:
rootData.seek(8, 1)
self.rootPointer = unpack('>i', rootData.read(4))
self.rootIsLeaf = unpack('?', rootData.read(1))
def open(self):
self.blockStorage.open()
userData = self.blockStorage.readUserData(0, 28)
fileID = bytesToString(userData.read(12))
if fileID != self.FileIdentifier:
raise Exception(
'DB file identifier does not match expected value of "{0}" (Got {1})'.format(
self.FileIdentifier,
fileID
)
)
contentID = bytesToString(userData.read(12))
if contentID != self.getContentIdentifier():
raise Exception(
'DB content identifier does not match expected value of "{0}" (Got {1})'.format(
self.getContentIdentifier(),
contentID
)
)
keySize = unpack('>I', userData.read(4))
if keySize != self.getKeySize():
raise Exception(
'DB content key size does not match expected value of "{0}" (Got {1})'.format(
self.getKeySize(),
keySize
)
)
self.readRoot()
def readIndex(self, pointer):
index = IndexNode()
buff = self.blockStorage.readBlock(pointer)
self.currentBuffer = buff
magic = bytesToString(buff.read(2))
if magic != self.IndexMagic:
raise Exception('Incorrect index block signature.')
index.selfPointer = pointer
index.level = unpack('B', buff.read(1))
numChildren = unpack('>i', buff.read(4))
index.beginPointer = unpack('>i', buff.read(4))
for i in range(numChildren):
key = self.readKey(buff)
pointer = unpack('>i', buff.read(4))
index.addPointer(key, pointer)
return index
def loadIndex(self, pointer):
if pointer not in self.indexCache:
index = self.readIndex(pointer)
self.indexCache[pointer] = index
return index
else:
return self.indexCache[pointer]
def loadLeaf(self, pointer):
leaf = LeafNode()
self.currentLeafBlock = pointer
buff = self.blockStorage.readBlock(pointer)
magic = bytesToString(buff.read(2))
if magic != self.LeafMagic:
raise Exception('Iincorrect leaf block signature')
leaf.selfPointer = pointer
leafInput = LeafInputStream(self.blockStorage, buff)
count = unpack('>i', leafInput.read(4))
for i in range(count):
key = self.readKey(leafInput)
data = self.readData(leafInput)
leaf.addElement(key, data)
return leaf
def getDebugInfo(self):
return 'Root Is Leaf: {0}\nRoot Pointer: {1}\nIndex Cache Size: {2}'.format(
self.rootIsLeaf,
self.rootPointer,
len(self.indexCache)
)
def getKeySize(self):
raise NotImplementedError
def getContentIdentifier(self):
raise NotImplementedError
def readKey(self, buff):
raise NotImplementedError
def readData(self, buff):
raise NotImplementedError
|
/**
* Iterator Function
*
* @return An iterator object
*/
@Override
public Iterator<Space> iterator() {
Iterator<Space> it = new Iterator<Space>() {
private int currentIndex = flipRow ? 7 : 0;
private int nextIndex() {
if (flipRow) {
return currentIndex--;
} else {
return currentIndex++;
}
}
@Override
public boolean hasNext() {
return 0 <= currentIndex && currentIndex <= 7 && space[currentIndex] != null;
}
@Override
public Space next() {
return space[nextIndex()];
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
return it;
} |
Is The Covid-19 Vaccine Halal? Revisiting The Role of National Pharmaceutical Regulatory Agency (NPRA) and JAKIM
_____________________________________________________________________________________________________ The urge to be vaccinated has increased rapidly during the outbreak of the COVID-19 pandemic. Resorting vaccine products is seen as the only way to break the chain of COVID-19 spread and eventually stop the pandemic. To this dire need, many consumers face the dilemma to be vaccinated or to opt-out of the vaccination program when the safety issues of vaccine products are widely circulated. The Muslim consumers, particularly, experiences double worry on the issue of safety and halal status of the vaccine product. Due to the emergency call to have the vaccine in the market as soon as possible, the innovation and production were carried out in an expedited manner, and the necessity to have the vaccine be certified as halal was bypass. When the news on the vaccine product uses non-halal ingredients reaches the Muslim community in Malaysia, they were taken aback and demanded immediate answers from the authorities. Thus, in addressing this issue, this study intends to critically analyse the role of NPRA and JAKIM in ensuring the safety and halal status of a product with the aim of suggesting a review of the existing role. This article adopts a qualitative research methodology where interviews will be the supporting method to a library-based and doctrinal study.
Introduction
The Global health crisis that had been declared as a pandemic by WHO in 2020 requires the transformation of norms for the world population (Jamaluddin, MA, 2021). It impacted norms of living, working, innovating, social communication, celebration, education, and many more. For the pharmaceutical company, the out of control spread of the COVID-19 virus that is now moving in its second year is a call for the need to innovate vaccines that can reduce the fatal infection effects, prevent infection and eventually put reduce the morbidity case. Rapid activities to innovate and produce COVID-19 vaccine has taken place within the pharmaceutical industry. It is within the urgency call for vaccine production that many ethical and legal issues arise. Safety issues have championed the rally. The majority of consumers that received information questioning the safety and effectiveness of the vaccine are in a dilemma in making the decision to opt-in or out of the vaccination program. For Muslim consumers, the concern has double up to worry on the safety and halal status of the vaccine. The Muslim population is estimated to be 1.9 billion globally (refer to Table 1) (World Population Review, 2021). In Malaysia, 61.3% of the population is Muslim that forms the majority population. Diamant, 2019) The Global Halal market is valued at Million USD in 2018 and will reach Million USD by the end of 2025, growing at a CAGR during -2025(2 News, 2021. Significantly, Malaysia is recognised to have the potential in championing Halal pharmaceuticals manufacturing due to our membership to the Organization of Islamic Committee (OIC) and being the only OIC country accepted as a member under the Pharmaceutical Inspection Cooperation/Scheme (PIC/s). By being a member of Pharmaceutical Inspection Cooperation/Scheme (PIC/s), Malaysia has already owned its standing in the pharmaceutical industry globally. In responding to this opportunity, the government has a noteworthy pay focus on the developing halal pharmaceutical industry. This can be clearly evident when the pharmaceutical, cosmetic and personal care industry was named as one of the focus areas under Malaysia's Third Industrial Master Plan (IMP-3) (Leonard Ariff Shatar, 2011). This can be seen through Malaysia's Third Industrial Master Plan (IMP-3), where the pharmaceutical, cosmetic, and personal care industry was selected as one of the focused areas (Leonard Ariff Shatar, 2011).
With this existing recognition, having an appropriate legal framework on the halal vaccine would boost the Malaysian pharmaceutical product in the global market. NPRA is the main regulatory body for the pharmaceutical industry in Malaysia. However, there are several issues regarding its role in monitoring and developing halal pharmaceuticals in Malaysia. Thus, this article intends to review the existing role of NPRA and JAKIM in governing the safety, efficacy, quality, and halal status of the vaccine products with the aim of suggesting the revision of this role to ensure halal verification of the vaccine products.
It is a dilemma when numerous vaccine products begin to flood in this act of rescue, which has invited many manipulations and abuse of the existing laws. This relates to the fact that Muslims forms a large scale of the human population in the world, and their awareness of the need to consume halal products has escalated. The technology has increased the consumer's awareness and has resulted in the demand to know the actual contents of these products and the drug-making process which happens in the manufacturing premises. For Muslims, the halal verification and authentication of these products are highly required. For this reason, the Halal industry has become an important area in the market nowadays. In protecting the consumer, the halal pharmaceutical industry is basically administered by two authorities, namely, the National Pharmaceutical Regulatory Agency (NPRA) that is the Malaysian national Compliance Monitoring Authority appointed by the Ministry of Health to ensure the quality, efficacy, and safety of pharmaceuticals through licensing scheme. On the other hand, JAKIM governs the certification and monitoring of the halal aspect.
Methodology
This is socio-legal research to analyse and discuss the role of NPRA and JAKIM in assuring the safety and halal status of the COVID-19 vaccine. For that purpose, this research was designed based on the primary and secondary data as well as investigation of some social problems or questions which are essential in providing the solution to the issue. It is important to highlight that this paper did not focus on the empirical study as it aims at looking into the doctrinal and theoretical discussion which will best serve the suggestion put forward.
For the purpose of classification, the academic review will be conducted and subjected to content analysis, which is the most prominent method for qualitative research (Elo and Kyngäs, 2007;Lacyet al., 2015). The method involves accurate, comprehensive, and organised examination of important materials in selected paragraphs and sentences.
Primary data of this research have been obtained through interviews with the stakeholders in mediation and conciliation who are directly or indirectly involved in settlement of matrimonial disputes in Malaysia. They are the conciliators in the Department of Registration, mediators from the Malaysian Mediation Centre, Judges and Registrars of the High Court, academicians, and experts in this field. For comparative analysis, model law on family mediation from Singapore has been analysed. In addition to that, the secondary data were collected to provide better analysis and a comprehensive discussion of this research. For this purpose, a thorough examination will be conducted on halal related law, sales of drugs legal regime and related by-laws. Other secondary data includes relevant books, published articles and information from the internet.
Understanding Halal
Through literature (Baran, 2020;Sue Penny, 2006;Mian N Riaz et al., 2003;Yusof Al Qardawi, 2009;Halal Journal, 2007), it can be summarised that the Halal concept is wide, and it applies to all facets of life. Under the Arabic word, the concept carries the meaning of lawful or permitted (Johan Fischer, 2005). Nevertheless, the common usage of these terms relates to food products, meat products, cosmetics, personal care products, food ingredients, and food contact materials (Mohd Cholil, 2020;Noreen Noor, 2015;Utusan Malaysia, 2003). Under the Shariah law, the products that we consume must be hygienic, pure, clean and of quality, or also known as 'Halalan toiyyibban' (Rokshana Sirin, 2018;Nik Maheran et al., 2009;Mian N. Riaz, 2004). The narrow understanding of the meaning 'halal' is it must be free from non-halal ingredients or substances that contain pork and alcohol (Mohd Cholil,2020). In contrast, the true concept of halal is to ensure that the whole process in preparation of the product has met the shariah rites. Halal promotes that not only what we consume must be Halal (follow the approved Shariah procedure), but the product must be 'toyyib' (wholesome).
To ascertain wholesome undertakes assurance along with the production of the products. Wholesome also anticipates issues of consumption of products that can promote and prolong once healthy life. The word Halal appears in many verses in the Al-Quran (2:172) and Al-Hadith Based on the Arabic word, the concept carries the meaning of lawful or permitted (Mohd Cholil, 2020;Mian N Riaz et al., 2003). www.msocialsciences.com As far as foods are concerned, this term is always accompanied by the word thoyyiban. Hence, these denote foods that are permissible, good, pure, clean wholesome, fit etc. For example, Allah s.w.t says to the effect: - O ye people! Eat of what is on earth, Lawful and good; and do not follow the footsteps of the evil one, for he is to you an avowed enemy. (Al-Baqarah 2:168) Nevertheless, in Islamic law, the term halal is not restricted to food consumption per se. The issue of halal encompasses a wide spectrum of human life in line with the goal of Shariah as a way of life. Hence, it also covers matters regarding finance, investment, fashion, entertainment and so on so forth (Baran, 2020, Mohd Cholil, 2020. For instance, in respect of financial matters, Allah s.w.t has clearly allowed buy and sale but strictly prohibited riba based on the following verse: -
Rules of Necessity under Islam
Darurah in the Arabic language is derived from darar, which means an injury that cannot be avoided (Saim Kayadibi, 2010). Abdur Rashid extends the definition of darar to mean to harm, impair, damage, or hurt, which is the opposite of benefit (Mansour Z. Al-Mutairi, 1997). Thus, the Arabs call anyone who has lost his sight darir because of his obvious injury (Mansour Z. Al-Mutairi, 1997). Muslim jurists believed that the principle gains its legality from certain explicit textual evidence. Although these sources only deal with life and death situations, such as in the case of starvation, jurists further argued that this specific permission to eat other unlawful meals could be extended to other emergency cases (Yasmin Hanani, 2010).
Several writers (Abdul Rahman, 2021;Harmy Mohd Yusoff et al., 2011;Aznan Hasan, 2012;H.M Hasballah Thaib, 2004) who have researched and written on halal and haram acknowledged that necessity would lift the prohibition to use and consume haram goods, thus necessity permits prohibited things. However, it can be seen that the Islamic jurists are divided in agreeing on whether non-halal medicine is allowed to be taken in cases of necessity. Eminent Muslim scholars, like Yusuf al-Qaradawi states that some jurists do not consider non-halal medicine to be necessary for any emergency. He further states that these jurists cited the following Hadith to support their argument.
The Prophet Muhammad S.A.W. says, "Allah has not made things that are unlawful for you to consume to be your medicine." (al-Bayhaqi) On the other hand, other jurists maintain that in cases of emergency, the rule on non-halal is lifted (Jamaluddin, MA, 2021). They based their argument on the ground that both food and medicine preserve man's life. These jurists support their argument with the hadith of Prophet Muhammad S.A.W. narrated through Anas r.a.
They said that although male Muslims are not allowed to wear silk garments, the Prophet Muhammad S.A.W. has allowed both ͨ Abdul al-Rahman ibn Awf and al-Zubayir ibn al-ͨ Awwam to wear it because of necessity. The two men were suffering from allergies (Shaykh Mufti Muhammad ibn Adam, 2002).
www.msocialsciences.com
Another authority on these matters can be found in the reports of Sayyidina Abu al-Darda' (Syekh ͨ Abd Shukur Rahimy Ma ͨ mur Dawud, 2003) (Allah be pleased with him) quotes the wording of Prophet Muhammad S.A.W. that "Indeed, Allah has sent down both illness and its cure, and he has assigned a cure for every illness, so treat yourselves medically, but use nothing unlawful." (Sahih al-Bukhari and Muslim) Imam Abu Dawud, Imam Nasa'i and Imam Tirmidhi (Allah have mercy on them) referred to the Sunan collection that when one of the companions Arfaja ibn Aswad (Allah be pleased with him) had his nose damaged in the battle of Kulab, he replaced it first with silver, and then with a nose made of gold, with the order and consent of the Messenger of Allah s.w.t, despite gold being unlawful for men (Shaykh Mufti Muhammad ibn Adam, 2019).
All these jurists based their findings on the divine words of Allah (SWT) from the al-Qur'an. The following verse becomes the authorities for necessity: Why should you not eat of (meats) on which Allah's name has explained to you in detail what is forbidden to you except under compulsion of necessity" Al-An ͨ am, 6: 119 He has only forbidden you dead meat, and blood, and the flesh of swine and that on which any other name has been invoked besides that of Allah. But if one is forced by necessity, without wilful disobedience, nor transgressing, due limits-then is he guiltless. For is Allah Off Forgiving Most Merciful" (Al Baqarah, 2:173) .But if any is forced by hunger, with no inclination to transgression, Allah is indeed Oft-Forgiving Most Merciful" Even though the writers have agreed on the general rule of necessity that permits the usage of prohibited materials, they are not in consensus on the interpretation of darurat and the usage of the prohibited material during necessity. Relating this to the usage of the COVID-19 vaccine during the spread of the fatal virus, it can be said that humans bear a huge duty when it comes to health, for which they will be held accountable to God on the Day of Judgment. Vaccination is thus required because it respects the ideals of the sanctity of human life and the prevention of harm (mafsadah), such as death from the virulent coronavirus's effects.
The Terminology and Concept of Vaccine
Pharmaceutical and Halal are two words that must be defined separately. The merging of these two words will be made at the end of this subheading with the aim to enlighten the meaning undertaken within this study.
The origin of the word pharmaceutical derives from the Greek Latin word pharmaceuticus, or pharmakeuein, which means to administer drugs (Merriam-Webster Dictionary, 2012). This was later developed by many other usages of the word pharmaceutical, which can be divided into persons involved in the pharmaceutical industry and pharmaceutical as a medication product. As an adjective, it carries the meaning of anyone engaged in pharmacy or the manufacture and sale of pharmaceuticals.
However, the usage under the context of a noun would highlight the word pharmaceutical as a consumable product which is interpreted as any substance used in the treatment of disease which is inclusive of drug, medicament, medication, or medicine (Education Yahoo, 2012). Its synonyms www.msocialsciences.com include the words cure, drug, medicament, physic, and remedy. Under the legal dictionary (Legal-Dictionary, 2012), these itineraries are extended to include alterant, anaesthetic agent, antibiotic, chemical substance, curative preparation, and medical component.
There is also the definition that uses the word pharmaceutical as a noun but has inserts the definition to include the place of preparation. This definition states that the drugs or medicine is the one prepared or dispensed in pharmacies (Mayer Brown, 2009). Analogically, some do interpret this word by the process involved in its production. This interpretation states that pharmaceutical is as of or pertaining to the knowledge or art of pharmacy or to the art of preparing medicines according to the rules or formulas of pharmacy (Webster-Dictionary, 2012). The Encyclopaedia Britannica (2012) has extended this word to highlights the functions of pharmaceutical products, where it explains that pharmaceutical is any substances used in the diagnosis, treatment or prevention of disease and for restoring, correcting or modifying organics functions.
This definition method is in line with the definition given by the medical glossary (2012). The medical glossary defines pharmaceutical as any drugs intended for human or veterinary use, presented in their finished dosage form. Included here are materials used in the preparation and formulation of the finished dosage form. It was further elaborated that pharmaceuticals are generally classified by chemical group according to the way they operate in one body which is dependent on the pharmacological effect. They are sometimes categorised following their therapeutic use. For example, pharmaceuticals that are made from natural substances are known as alkaloids, e.g. quinine, nicotine, cocaine, morphine. While those derive from the animal are known as insulin and hormones.
The Urgent Demand for Covid 19 Vaccine
Malaysian Prime Minister encourages the food industries in Malaysia to be strictly halal compliant so as to penetrate the global food market (Nurulhuda Noordin et al., 2009). Along this decade, the focus has expanded to more than the halal food industry. The government has concentrated on several main areas which will be focusing on halal development. By January 2012, eleven guidelines had been created to assist and govern the Halal industry as a whole which is inclusive of the MS 2424: 2012 Halal Pharmaceuticals -General Guidelines. On January 30, 2013, Malaysia had marked another milestone when it became the first in the world to accord halal certification for the pharmaceutical product in accordance with the Halal Pharmaceuticals-General Guidelines (Halal Malaysia Portal, 2013).
An individual is protected against the virus when the immune system response to the antigens, which accordingly produces antibodies that fight antigen. This process happens when an individual is vaccinated, where a debilitated, killed or toxoid extracted from bacteria is injected into the body of that individual. Along with this, a group of memory cells is also produced, which results in a faster response if they are exposed to the same antigen. Hence, it can be concluded that the vaccine is the device in precautionary medicine. Another concern that influences the hesitancy among the public is the halal status of the ingredients in the vaccine. This is due to the fact that Muslims are the majority of the population. (Wan Rohani et al., 2017) In Malaysia, a process of the long-term safety effect of the vaccine is observed. Furthermore, before the license is given, all vaccine is used by the Ministry of Health is required to be registered under Drug Control Authority (DCA). It is important to highlight that before the vaccine can be registered, it must go through three-phase clinical trials to guarantee its safety. Phase 1 of clinical trials is conducted to assess the immune response elicited by a vaccine to confirm the vaccine's safety, followed by phase 2, where hundreds of volunteers have a history of diseased-acquired group and control group.
Finally, phase 3 which aims to calculate the potency and safety of the vaccine among tens of thousands and to observe the unwanted effect of the vaccine. Ultimately, the vaccine produced is safe to be used as it has gone through a strict process of screening before a license can be granted.
www.msocialsciences.com
The year 2011 has marked the conspicuous development of the halal pharmaceutical industry when the government has introduced the MS2424: 2010(p): Halal Pharmaceutical: General Guidelines to better monitor the halal pharmaceutical production within the industry. The new guideline was established on 2 March 2011 (Halal Media Admin, 2011). In support of the guideline, the government of Malaysia has appointed two Compliance Monitoring Authorities within the Pharmaceutical Industry, which are the National Pharmaceutical Control Bureau (NPCB) and the Department of Standards Malaysia. The emergence of this specific guideline has shed some light on the halal pharmaceutical industry.
The development shows that, as important as the halal food industry, the halal pharmaceutical industry has become a commodity that contributes to Malaysian economic growth. The halal labelling to the pharmaceutical product has been said (Bernama, 2012) to have added value to the product. The 2010 global market growth for pharmaceuticals expanded 4% from 2009 to a value that exceeded USD 820 billion (CCM Press Release, 2011). Thus, export opportunities could well be considered a potential revenue-earning sector.
In responding to the government's vision, many of the important players, which consist of people in the industry, policymakers, related agency and consumer organisations, have positively embarked into activities to promote the expansion of the halal industry. For example, Halal Industry Development Corporation (HDC), as the central implementation agency, has undertaken various activities to uphold the development of the Halal Industry. The Halal Industry Partners Programme (HIPP) was established by HDC to foster the involvement of companies and related associations in promoting halal industry activities. A satisfying effort was made by HDC in 2011, when they published a comprehensive halal reference known as the Halal Index.
Concurrently, the Chemical Company of Malaysia Berhad (CCM) has pioneered the halal pharmaceutical manufacturing industry. CCM is also the authorised body to analyse the evidence from the case of abuse of Halal certification. In 2011 CCM was reported to have made a commendable attempt to export the halal generic medicine. Several literatures such as Jasni, Wan Maseni and Eyas acknowledge that halal has penetrated the pharmaceutical industry worldwide. With these rapid activities being carried out within the developing halal pharmaceutical industry, there is an urgent need to strengthen the legal framework.
The Discoveries of Non-Halal Vaccine Product
The issue of Halal products has invites debates in various aspects of our lives. These days pharmaceutical product is associated with debates on the hazardous effects resulting from the consumption of the products, the insertion of the debated ingredients of Genetically Modified Organism (GMO) within the production process, and the undisclosed non-halal ingredients. All of these issues are within the prohibition under the true concept of 'Halalan Toyibban'. Halal and haram are universal terms that apply to all facets of life. Halal supply chain involves the entire circle of production, distribution, and marketing processes by which consumer receive their requested product (Suhaiza Zailani et al., 2010).
Recent development shows the diversion of demand towards halal pharmaceutical certification. The demand of the society was ignited by the variously reported discoveries from 2006 to 2011, where there have been cases on the non-halal content used in pharmaceutical products. Among the complaints (Mohd Amri, 2011) is on the famously used drug, known as clexane and fraxipane, which is said to have contained porcine. Porcine is a scientific name for pork. They are usually used in medication production or the process of biotechnology. These drugs are usually used by doctors to dilute the blood for patients suffering from irregular heartbeats and during operation.
Abdul Rahman, in his book (Abdul , quoted an email that was sent to JAKIM in 2006 to complain about this matter. JAKIM confirms that as of that date, JAKIM's halal certification did not extend to pharmaceutical products. In the same year, there was also the discovery of porcine in another pharmaceutical product scientifically known as Drixoral, which is used to treat influenza. Later in 2008, pilgrimage candidates were taken aback by the fact that the vaccine injection, ACYW www.msocialsciences.com 135-Menomune, that they received to prevent the meningococcal meningitis disease, was reported to have contained amino acid deriving from duck, swine, and bovine. The revelation continues, where in 2010, Imodium Capsule to treat diarrhoea was said to have contained the same non-halal substance. This has resulted in an immediate call off to its license by the Ministry of Health (Ikhwan Ideris, 2012).
News published by The Star on the 2 nd of March 2021, entitled "New vaccine shows promise", informed the readers that recent studies from the United Kingdom suggested a single dose of the Pfizer vaccine can provide strong protection against Covid-19. Research by Juan (2021) shows that the Covid-19 vaccine, such as the Pfizer-BioNTech, is made up of safe, efficacious, and free from any impermissible ingredients in preventing symptomatic Covid-19 disease, including the messenger ribonucleic acid (mRNA) molecules, lipids, salts, and sugar, and thus can minimise the vaccination refusal among the Muslim societies that argue about the halal status of Covid-19 vaccines.
The Force of Elucidating the Dire Discoveries
In responding to these worrying phenomena of the non-halal pharmaceutical product, the National Fatwa Council has stepped in to decide on the religious standing of the debatable pharmaceutical product. Fatwa is a collection of decisions or legal opinions made together by the Mufti's appointed as the members of the National Fatwa Council. Refer to section 34 of the Administration of Islamic Law (Federal Territories) Act 1993. There are various decisions made by this Council relating to halal pharmaceutical, i.e. the 94, 81, 73, 53 meetings of the National Fatwa Council. The making of religious rulings by the National Fatwa Council on halal pharmaceutical ingredients is an indication that significant focus is now directed to the halal pharmaceutical industry. However, fatwa is not binding unless it is inserted in the State Islamic Laws, in which the jurisdiction will be confined to the State.
Dharurah: The Rule of Necessity
Darurah in the Arabic language is derived from darar, which means an injury that cannot be avoided (Saim Kayadibi, Istihsan, 2010). Abdur Rashid extends the definition of darar to mean to harm, impair, damage, or hurt, which is the opposite of benefit (Mansour Z. Al-Mutairi, 1997). Several writers (Harmy Mohd Yusoff et al., 2011) who have researched and written on halal and haram acknowledged that necessity would lift the prohibition to use and consume haram goods; thus necessity permits prohibited things. However, it can be seen that the Islamic jurists are divided in agreeing on whether non-halal medicine is allowed to be taken in cases of necessity. Eminent Muslim scholars, like Yusuf al-Qaradawi states that some jurists do not consider non-halal medicine to be necessary for any emergency. He further states that these jurists cited the following Hadith to support their argument.
The Prophet Muhammad S.A.W. says, "Allah has not made things that are unlawful for you to consume to be your medicine." (al-Bayhaqi) On the other hand, other jurists maintain that in cases of emergency, the rule on non-halal is lifted. They based their argument on the ground that both food and medicine preserve man's life. These jurists support their argument with the hadith of Prophet Muhammad S.A.W. narrated through Anas r.a. They said that although male Muslims are not allowed to wear silk garments, the Prophet Muhammad S.A.W. has allowed both ͨ Abdul al-Rahman ibn ͨ Awf and al-Zubayir ibn al-ͨ Awwam to wear it because of necessity. The two men were suffering from allergies.
In the book Durar al-Ahkam, dharurat is a situation that forces a person to perform an act that is prohibited by sharia (Ali Haidar, 1996). When there is an excessive need or difficulty, and no one can withstand it, the concept of dharurah can be applied. However, dharurah should not be used arbitrarily, especially in determining an action or transaction that is clearly illegal for one's own benefit. Islam is for convenience, never for hardship (The Prophet Muhammad S.A.W. as recorded in Bukhari, Tirmidthi, Abu Daud). In daily life, there are times when unexpected things and emergencies occur; hence avoiding things that are forbidden becomes very difficult.
There is a narration from Anas radhiyallahu 'anhu who mentions that Rasulullah shalallahu'alaihi wasallam gave relief (rukhsah) to Zubair bin Al-'Awwam and Abdurrahman bin Auf to wear silk cloth to treat his skin diseases. However, the relief given for treatment purposes with forbidden means is not absolute and subject to certain conditions. Rasulullah S.A.W. also once said which means: "Indeed, Allah does not send down disease, unless He also sends down its cure (Abdul Aziz 1999;al-Bukhari 2003) As Muslim consumers, the issues related to the halal and haram of medicine are very much emphasised so that we get the quality of hygiene as prescribed by Allah S.W.T. Nevertheless, it is undeniable that the current Muslim community has lack experts in the field of pharmaceuticals, and it is being pioneered by non-Muslims. Medicines are inseparable from general human use, where the source of medicine used consists of several groups of materials, namely human, animal, plant, soil, and water resources. Nevertheless, through modern technology, most medicine today use synthetic substances that are closely related to alcohol, gelatine, and drugs (Department of Islamic Development Malaysia, 2004).
Among the forbidden substances in the pharmaceutical field are such as gelatine, alcohol, and drugs. Gelatin is used in the manufacture of hard capsules, soft capsules, tablets, replacement serums and injections. The 8 th Muzakarah of the Fatwa Committee of the National Council for Islamic Religious Affairs of Malaysia, which convened on 24-25 September 1984 has decided that the use of gelatine in medicine at present is required due to emergency.
Meanwhile, no doubt that alcohol consumption can cause damage to organs such as the liver and cause cancer. While methanol has a more harmful effect that can cause blindness and death. Prof. Dr. Yusuf al-Qaradhawi (2017) states that alcohol resulting from processes other than fermentation is sacred. The Malaysian National Fatwa Council also voted for the opinion that alcohol produced according to this process is not unclean. The stand was taken by the Fatwa Lajnah al-Azhar and the Fatwa Lajnah Daimah Saudi.
Like alcohol, drugs are intoxicating, delusional and can cause addiction to the user. However, the benefits of drugs in the medical world are undeniable. Opium, for example, is used in medicine as a painkiller to relieve severe pain, cancer, and labour pains. Examples of commonly used drugs are morphine and pethidine, as well as codeine (cough medicine). Cannabis was once used as a painkiller, while cocaine was used to anaesthetize and stop bleeding (Bahagian Kawalan Penyakit, 2013).
It can be summarised that the rule of necessity is only applicable where the following can be ascertained. Firstly, there is a real and actual necessity. In order words, there is no alternative to stop the emergency. Secondly, the consumption of non-halal food or drugs must be proportionate only to discard the emergency. The diminishing of emergency will disqualify the Muslim to continue consuming non-halal drugs or food. Thirdly, in receiving treatment or medication advice, the adviser (doctor) must be a Muslim who has knowledge in the medical and religious fields.
NPRA in Monitoring Halal Vaccine
In October 1978, the government under the Ministry of Health had set up the National Pharmaceutical Control Laboratory, which was to carry out the quality control activity of the Pharmacy and Supply Programme (NPCB, 2013). The task was later extended in 1985 when this unit by its new name, National Pharmaceutical Control Bureau (NPCB), was given the sole authority to ensure the quality, www.msocialsciences.com efficacy, and safety of pharmaceuticals through the registration and licensing scheme. Their jurisdiction covers medicinal products and cosmetics.
Under the ASEAN Technical Co-operation among Developing Countries (Asean TCDC) Program, NPCB has been chosen and recognised by the ASEAN countries as the regional training centre for quality control of pharmaceuticals (NPCB, 2012). In accomplishing this task, NPCB has been receiving trainees from ASEAN and other countries, including Myanmar, Bangladesh, Vietnam, Pakistan, Philippines, Indonesia, India, Sri Lanka, Thailand, Macao, Singapore, Hong Kong, Laos, Cambodia, and Mongolia. This has position Malaysia to be the global training centre for pharmaceutical governance.
Simultaneously, NPCB officers have been sent to Sri Lanka, Mongolia, and Vietnam as consultants. In view of the technical expertise and training capabilities of NPCB, it received recognition as a "WHO Collaborating Centre in the Regulatory Control of Pharmaceuticals" on 10 May 1996 (BPFK, 2012). The designation of NPCB as a WHO Collaborating Centre for regulatory Control of Pharmaceutical is effective for a new period of 4 years from 1st August 2011.
It can be seen that NPCB plays an important role at the local and international levels in safeguarding the pharmaceutical industry. In contrast, its role in assisting in promoting halal pharmaceuticals can't be seen on paper; thus, there is an undeniable implied contribution. The principle of safety, quality and efficacy are in line with the halal concept of 'thoyibban' or wholesome. This means that NPCB has been safeguarding the Islamic dietary principle when they are entrusted to ensure the wholesome concept within the pharmaceutical industry. Nonetheless, the Islamic dietary principle requires the need to consume Halal and thoyibban. It is within this combination that the role of NPCB is viewed as incomplete. Safety, efficacy, and quality does not guarantee that the pharmaceutical products are halal. Being the main regulatory authority within the pharmaceutical industry, NPCB holds a vital position to help in promoting and governing halal pharmaceuticals.
Halal Corporation Development (HDC) is working together with Chemical Company of Malaysia (CCM) and Standards Malaysia on the development of Malaysian Standard for Halal Pharmaceutical: MS2424: 2010. This guideline works together with the Good Manufacturing Practice (GMP) and Pharmaceutical Inspection Co-operation Scheme (PIC/S), which strictly regulate and monitor the sector. The role of HDC seems to be conspicuous in promoting halal pharmaceuticals. This is in concordance with the underlying reason for the establishment of HDC as a sole agency to carry out halal related activities. In the development of Halal pharmaceutical, HDC is working alongside JAKIM and CCM. While NPCB, which is a creature of the Ministry of Health, plays a significant supporting role.
It can be seen that the monitoring of the pharmaceutical industry became the main consideration in the structuring of the NPCB department. NPCB is divided into seven main centres, which are Centre for Product registration, centre for Post Registration of products, Centre for Investigational New Product, Centre for Compliance and Licensing, Centre Organisational Development, Quality Control Department, and the Administration Centre (Halal Media, 2011).
www.msocialsciences.com
Currently, Halal monitoring was not delegated under any of the existing centres. In Halal pharmaceutical, the relevant authorities are JAKIM as the governance of Halal, aided by the Ministry of Domestic Trade, Co-operatives and Consumerism and the State Islamic Authority Council (The Trade Description (Certification and Marking of Halal) Order 2011). In comparison, NPCBs' jurisdiction is confined to the pharmaceutical industry. NPCB's position as the regulatory body for the pharmaceutical industry will be in the most suitable position to safeguard the halal pharmaceutical industry. Thus, the development of halal pharmaceuticals, the legal framework requires NPCB to be given a bigger role in the industry.
JAKIM in Monitoring Halal Vaccine
Starting from the introduction of MS1500:2004, JAKIM has been authorized to be the reference centre and distinguished certification body. In addition, JAKIM was also authorized to certify the raw material and semi-finished product from a foreign country. Currently, there is no specific Halal guidelines for the vaccine but MS2424: 2012 General Guidelines for Halal Pharmaceutical includes vaccine. Some studies (Zainaba C. P Veeravu, 2010) commented that currently, there is inadequate number of officials undertaking the enforcement and monitoring activities. The worrying scenario has led to the creation of an improvised halal logo with the increase on the logo safety, which aims at preventing its abuse. Consequently, with the collaboration between the DSM and JAKIM, the MS1500:2009 was formulated (MS 1500:2009) with an audit system that is in line with the international standard (ISO/IEC Guide 65). Before the introduction of MS2424:2012: General Guidelines for Halal Pharmaceutical, halal pharmaceuticals had been governed by this guideline.
In relation to halal related matters, JAKIM becomes the main agency entrusted by the government to govern halal certification. Certification gives assurance to the consumer that the halal-certified product is indeed halal. Thus, certification is subject to the laws and administrative regulation that ensures that the parties who received certification comply with the halal regulations. Monitoring and auditing are the mechanisms used as the administrative regulation to ensure compliance with the halal pharmaceutical guidelines for pre-certification and post certification. Thus, the governing of halal vaccine is the same as the governing of other halal industry.
The responsibility of granting Halal certification in Malaysia is solely lies in the hands of JAKIM (Zaina et al., 2015). Generally, before a medicinal product can be marketed, it must first get an approval from Drug Control Authority (DCA), namely National Pharmaceutical Regulatory Agency (NPRA). The product will be inspected for quality, safety and potency based on Drug Registration Guidance Document (DRGD), 2nd Edition, September 2016 as published by the Director of Pharmaceutical Services under Regulation 29, Control of Drugs and Cosmetics Regulations 1984 (Ministry of Health (MOH), 2019).
Next, the product will be audited by JAKIM through a Halal Pharmaceutical Audit, which is based on MS 2424:2012 standard. The product evaluation will consider opinion given by National Pharmaceutical Regulatory Agency (NPRA) regarding its production process and formulation. Halal certification can be granted once all this process was completed and succeeded (Jabatan Kemajuan Islam Malaysia (JAKIM), 2014).
The Role and Relevance of IFANCA and HFCE Certification
Halal products also have a huge demand from countries that are not Muslim-dominant, especially among major exporting countries such as United States of America, Europe, and Australia (Latif, et al., 2014). Accordingly, this triggered these countries to come up with their own halal certification bodies, including Islamic Food and Nutrition Council of America (IFANCA) and Halal Food Council of Europe (HFCE) which provide third-party halal certification. This effort has favourably affected the health-conscious non-Muslim communities. (Islamic Food and Nutrition Council of America, 2017;Halal Food Council of Europe, 2017) www.msocialsciences.com Halal certification in Malaysia for import products are certified by agencies recognised by JAKIM. This simply means they are no longer required to be certified by JAKIM before being put in the market. However, by virtue of the ruling prescribed in MS 2424:2012, this ruling is not applicable to vaccines. (Department of Standards Malaysia, 2012).
Fatwa
Fatwa (The Administration of Islamic Law (Federal Territories) Act 1993 (Act 505)) is considered as part of the source of law for halal pharmaceutical product. All enforcement activities and prosecution of case on the false labelling of halal pharmaceutical product depends on the rulings made by the National Fatwa Council (Fatwa Committee of the National Council for Islamic Religious Affairs Malaysia, 1997).
In Malaysia, fatwa can be made by two authoritative councils which are the National Fatwa Council and the State Fatwa Council. The former is applied throughout Malaysia, while the latter is confined within the state boundary. Fatwa does not become binding unless it is inserted in the State Shari'ah Enactments (Baig, A. A, 2009). The Office of the Mufti of the Federal Territories (PMWP) has agreed to adopt the resolution of the Muzakarah (discussion) of the Fatwa Committee of the National Council for Islamic Religious Affairs on the 3rd of December, 2020, as reported by Bernama (2021) in the New Straits Times on the 14th of February, 2021 where the meeting has decided that the usage of COVID-19 vaccine is permissible and compulsory for groups which have been identified by the government. Unquestionably, this resolution is in line with the preservation of life in the objectives of Shariah.
Mufti of the Federal Territories, Sahibus Samahah Datuk Dr. Luqman Abdullah said from the perspective of consumerism, only vaccines that have been identified as halal and tayyiban will be used for the National Covid-19 Immunisation programme (Bernama, 2021).
Conclusion and future Recommendation
In summary it can be said that, in supporting the wishes of the government to become a halal hub country there is a need to get the world recognition to the governance system within the halal industry. Halal pharmaceutical industry has become a vital part to this role. Preparing a good monitoring system for a reliable halal pharmaceutical would aid this attempt.
NPCB are currently uncertain on its role to govern halal pharmaceutical industry. It is recommended that the existing division within NPCB be extended to include a specific department of halal drugs. Extension would always involve increased of cost. To elucidate this matter, may be its time to consider that NPCB be given the power to certified halal pharmaceutical product so as to allow minimum fees be charged to the applicants. This recommendation would be effective when the officers assign to this halal drugs department are consisted of officers certified with the knowledge of halal and pharmaceutical. With many institutions currently offers a Halal Diploma in various field, there would be the possibility of making this a reality.
It would be another suggestion to create a consultant-based services, whereby the pharmaceutical company who are interested to certified halal its product, could employ the services of NPCB's officer for a fixed period to certify and monitor the ongoing production. These NPCB's officer must be the one assign to the halal department as proposed above. This may be the area that can be research on in future. |
<reponame>rdnfn/beobench<gh_stars>1-10
"""Subpackage with dockerfiles for experiments."""
|
/**
* Super class for Boolean constants.
* @version 2.0.0
* @since 1.0
*/
public abstract class Constant extends Formula {
private static final SortedSet<Variable> EMPTY_VARIABLE_SET = Collections.unmodifiableSortedSet(new TreeSet<>());
private static final SortedSet<Literal> EMPTY_LITERAL_SET = Collections.unmodifiableSortedSet(new TreeSet<>());
private static final Iterator<Formula> ITERATOR = new Iterator<Formula>() {
@Override
public boolean hasNext() {
return false;
}
@Override
public Formula next() {
throw new NoSuchElementException();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
/**
* Constructor.
* @param type the constant type
* @param factory the factory which created this instance
*/
Constant(final FType type, final FormulaFactory factory) {
super(type, factory);
}
@Override
public long numberOfAtoms() {
return 1L;
}
@Override
public long numberOfNodes() {
return 1L;
}
@Override
public int numberOfOperands() {
return 0;
}
@Override
public boolean isConstantFormula() {
return true;
}
@Override
public boolean isAtomicFormula() {
return true;
}
@Override
public SortedSet<Variable> variables() {
return EMPTY_VARIABLE_SET;
}
@Override
public SortedSet<Literal> literals() {
return EMPTY_LITERAL_SET;
}
@Override
public boolean containsVariable(final Variable variable) {
return false;
}
@Override
public Formula restrict(final Assignment assignment) {
return this;
}
@Override
public boolean containsNode(final Formula formula) {
return this == formula;
}
@Override
public Formula substitute(final Substitution substitution) {
return this;
}
@Override
public Formula nnf() {
return this;
}
@Override
public Iterator<Formula> iterator() {
return ITERATOR;
}
@Override
public Stream<Formula> stream() {
return Stream.empty();
}
} |
import { random } from 'faker';
import { evalWithScope } from '@/parser/sandbox/main';
describe('parser/sandbox/main', () => {
it('Not allow visit out scope variables', () => {
const words = random.words();
const val = evalWithScope({ foo: { bar: words } }, 'foo.bar');
expect(val).toBe(words);
try {
evalWithScope({}, 'foo.bar');
} catch (error) {
expect(error).toBeInstanceOf(ReferenceError);
}
});
it('Eval use key not in scope, throw error', () => {
expect.assertions(1);
const scope = {};
try {
evalWithScope(scope, 'logger');
} catch (error) {
expect(error).toBeInstanceOf(Error);
}
});
it('Allow visit global variable', () => {
const scope = {};
const res = evalWithScope(scope, 'Array.isArray([])');
expect(res).toBeTruthy();
});
it('Allow function', () => {
const resultStr = 'hello eval';
const fn = `
function test(){
return '${resultStr}';
}
`;
const res = evalWithScope({}, fn) as () => void;
expect(res()).toBe(resultStr);
});
});
|
Transferability of microsatellite markers from Brachypodium distachyon to Miscanthus sinensis, a potential biomass crop.
Miscanthus sinensis has high biomass yield and contributed two of the three genomes in M. x giganteus, a bioenergy crop widely studied in Europe and North America, and thus is a potential biomass crop and an important germplasm for Miscanthus breeding. Molecular markers are essential for germplasm evaluation, genetic analyses and new cultivar development in M. sinensis. In the present study, we reported transferability of simple sequence repeat (SSR) markers from Brachypodium distachyon to M. sinensis. A set of 57 SSR markers evenly distributed across the B. distachyon genome were deliberately designed. Out of these B. distachyon SSR markers, 86.0% are transferable to M. sinensis. The SSR loci amplified in M. sinensis were validated by re-sequencing the amplicons. The polymorphism information content (PIC) of the transferable SSR markers varied from 0.073 to 0.375 with a mean of 0.263, assessed based on 21 M. sinensis genotypes. Phylogenetic tree based on 162 alleles detected by 49 SSR markers could unambiguously distinguish B. distachyon from M. sinensis, and cluster 21 M. sinensis genotypes into three groups that are basically in coincidence with their geographical distribution and ecotype classifications. The markers developed by the comparative genomic approach could be useful for germplasm evaluation, genetic analysis, and marker-assisted breeding in Miscanthus. |
package com.rainbowcard.client.common.utils;
import android.util.Log;
import java.lang.reflect.Field;
/**
* Created by gc on 14/10/20.
*/
public class ObjectUtils {
public static String toString(Object object) {
Field[] fields = object.getClass().getDeclaredFields();
StringBuilder sb = new StringBuilder();
sb.append(object.getClass().getSimpleName()).append('{');
boolean firstRound = true;
for (Field field : fields) {
if (!firstRound) {
sb.append(", ");
}
firstRound = false;
field.setAccessible(true);
try {
final Object fieldObj = field.get(object);
final String value;
if (null == fieldObj) {
value = "null";
} else {
value = fieldObj.toString();
}
sb.append(field.getName()).append('=').append('\'')
.append(value).append('\'');
} catch (IllegalAccessException ignore) {
DLog.e(Log.getStackTraceString(ignore));
//this should never happen
}
}
sb.append('}');
return sb.toString();
}
}
|
<filename>src/day3/index.ts
import fs from 'fs';
type Bit = '0' | '1';
const INPUT_FILE_PATH = 'src/day3/input.txt';
const inputText = fs.readFileSync(INPUT_FILE_PATH).toString('utf-8');
const inputValues = inputText.split('\n');
function findMostCommonBit(values: string[], position: number): Bit {
const { zeros: numZeros, ones: numOnes } = values.reduce(({ zeros, ones }, inputValue) => {
const bit = inputValue[position];
return {
zeros: bit === '0' ? zeros + 1 : zeros,
ones: bit === '1' ? ones + 1 : ones,
};
}, { zeros: 0, ones: 0 });
// If equal, prefer 1
return numZeros > numOnes ? '0' : '1';
}
function findLeastCommonBit(values: string[], position: number): Bit {
const mostCommonBit = findMostCommonBit(values, position);
return flipBit(mostCommonBit);
}
function flipBit(bit: Bit): Bit {
return bit === '0' ? '1' : '0';
}
// https://adventofcode.com/2021/day/3
const numPositions = inputValues[0].length;
let gammaRate = '';
for (let position = 0; position < numPositions; position++) {
gammaRate += findMostCommonBit(inputValues, position);
}
const epsilonRate = gammaRate
.split('')
.map((bit) => flipBit(bit as Bit))
.join('');
const partOneOutput = parseInt(gammaRate, 2) * parseInt(epsilonRate, 2);
console.log(`PART 1: ${partOneOutput}`);
// https://adventofcode.com/2021/day/3#part2
function filterToOneValue(
values: string[],
getBitToKeep: (values: string[], position: number) => Bit,
): string {
let possibleValues = values;
for (let position = 0; position < values[0].length; position++) {
const bitToKeep = getBitToKeep(possibleValues, position);
possibleValues = possibleValues.filter(
(value) => value[position] === bitToKeep,
)
if (possibleValues.length < 2) {
break;
}
}
return possibleValues[0];
}
const oxygenGeneratorRating = filterToOneValue(inputValues, findMostCommonBit);
const co2ScrubberRating = filterToOneValue(inputValues, findLeastCommonBit);
const partTwoOutput = parseInt(oxygenGeneratorRating, 2) * parseInt(co2ScrubberRating, 2);
console.log(`PART 2: ${partTwoOutput}`); |
from server import app, DBSession
from flask import Blueprint, request, session, send_file, make_response, jsonify
from utils import captcha, cmparePswd, invalid, invalidate
from flask_jwt_extended import jwt_required, jwt_optional, create_access_token, get_jwt_identity, get_raw_jwt
import io
from model import *
import datetime
bp = Blueprint('order',__name__)
@bp.route("/all", methods=['GET'])
@jwt_required
def allOrder():
sess = DBSession()
current_user = get_jwt_identity()
if current_user:
user = sess.query(User).filter_by(id=current_user).first()
if user.isOperator:
storehouse = sess.query(Storehouse).filter_by(operator_id=current_user).first()
virtual_orders = sess.query(Order).filter_by(virtual=True).all()
else:
virtual_orders = sess.query(Order).filter_by(creator_id=current_user,virtual=True).all()
orders=[]
for virorder in virtual_orders:
if virorder.virtual:
if user.isOperator:
_orders = sess.query(Order).filter_by(belonging_id=virorder.id,virtual=False).all()
else:
_orders = sess.query(Order).filter_by(belonging_id=virorder.id,virtual=False).all()
suborders = []
for order in _orders:
suborders.append({
'id': order.product_id,
'count': order.count,
'cost': str(order.product_id * order.count),
})
# addr=sess.query(Address).filter_by(id=virorder.address_id).first()
# orders.append((virorder.id, virorder.createTime, suborders, addr.receiver, addr.phonenumber, addr.address, status))
#addr = sess.query(Address).filter_by(owner_id=virorder.creator_id).first()
orders.append({
'orderid': virorder.id,
'create_time': virorder.createTime,
'products': suborders,
'status': virorder.status(),
'receiver': virorder.receiver,
'phonenumber': virorder.phonenumber,
'address': virorder.address,
'total_cost': str(virorder.cost()),
})
orders.sort(key=lambda x:x['create_time'],reverse=True)
return jsonify(orders), 200
else:
return jsonify({"msg": "Please login"}), 401
@bp.route("/cancel", methods=['POST'])
@jwt_required
def cancelOrder():
sess = DBSession()
current_user = get_jwt_identity()
if not request.is_json:
return jsonify(result=False,msg="Missing JSON in request"), 400
order_id = request.json.get('orderid')
if not order_id:
return jsonify(result=False,msg="Missing orderid parameter"), 400
user = sess.query(User).filter_by(id=current_user).first()
order = sess.query(Order).filter_by(id=order_id,cancelled=False,virtual=True).first()
if not order:
return jsonify(result=False,msg="Bad orderid"), 401
order.cancelled = True
sess.commit()
return jsonify(result=True), 200
@bp.route("/pay", methods=['POST'])
@jwt_required
def payOrder():
sess = DBSession()
current_user = get_jwt_identity()
if not request.is_json:
return jsonify({"msg": "Missing JSON in request"}), 400
order_id = request.json.get('orderid')
if not order_id:
return jsonify({"msg": "Missing orderid parameter"}), 400
user = sess.query(User).filter_by(id=current_user).first()
order = sess.query(Order).filter_by(id=order_id,paid=False,virtual=True).first()
if not order:
return jsonify(result=False,msg="Bad order_id"), 401
order.paid = True
sess.commit()
return jsonify(result=True), 200
@bp.route("/accept", methods=['POST'])
@jwt_required
def acceptOrder():
sess = DBSession()
current_user = get_jwt_identity()
if not request.is_json:
return jsonify({"msg": "Missing JSON in request"}), 400
order_id = request.json.get('orderid')
if not order_id:
return jsonify({"msg": "Missing orderid parameter"}), 400
user = sess.query(User).filter_by(id=current_user).first()
order = sess.query(Order).filter_by(id=order_id,accepted=False,virtual=True).first()
if not order:
return jsonify(result=False,msg="Bad orderid"), 401
order.accepted = True
sess.commit()
return jsonify(result=True), 200
@bp.route("/deliver", methods=['POST'])
@jwt_required
def deliverOrder():
sess = DBSession()
current_user = get_jwt_identity()
if not request.is_json:
return jsonify({"msg": "Missing JSON in request"}), 400
user = sess.query(User).filter_by(id=current_user).first()
order_id = request.json.get('orderid')
if not order_id:
return jsonify({"msg": "Missing orderid parameter"}), 400
order = sess.query(Order).filter_by(id=order_id,delivered=False,virtual=True).first()
if not order:
return jsonify({"msg": "Bad orderid"}), 401
order.delivered = True
sess.commit()
return jsonify(result=True), 200
@bp.route("/create", methods=['POST'])
@jwt_required
def createOrder():
sess = DBSession()
current_user = get_jwt_identity()
if not request.is_json:
return jsonify({"msg": "Missing JSON in request"}), 400
user = sess.query(User).filter_by(id=current_user).first()
ids = request.json.get('ids')
if not ids:
return jsonify({"msg": "Missing ids parameter"}), 400
receiver = request.json.get('receiver', '')
phonenumber = request.json.get('phonenumber', '')
address = request.json.get('address', '')
carts = sess.query(Cart).filter_by(creator_id=current_user,removed=False).all()
vir = Order(current_user)
vir.setAddress(address, receiver, phonenumber)
sess.add(vir)
sess.commit()
#rders = []
created = []
for cart in carts:
if cart.product_id in ids:
product = sess.query(Product).filter_by(id=cart.product_id,shelved=True).first()
# 限购暂未实现
#print(product.remain, cart.count)
#if (not product) or (product.remain < cart.count):
# orders.append([False,cart.id])
# continue
#product.remain = product.remain - cart.count
order = Order(current_user,False)
order.fill(cart.product_id,cart.count,product.price,vir.id)
sess.add(order)
cart.removed = True
sess.commit()
created.append(cart.product_id)
#orders.append([True,cart.id,cart.product_id,cart.count,product.price])
#return jsonify(orders=orders,price=vir.cost()), 200
return jsonify(result=True,created=created), 200
'''
@bp.route("/pay", methods=['POST'])
@jwt_required
def payOrder():
sess = DBSession()
current_user = get_jwt_identity()
if not request.is_json:
return jsonify({"msg": "Missing JSON in request"}), 400
if current_user:
user = sess.query(User).filter_by(id=current_user).first()
order_id = request.json.get('order_id')
if not order_id:
return jsonify({"msg": "Missing order_id parameter"}), 400
order = sess.query(Order).filter_by(id=order_id,paid=False,accepted=False,delivered=False,virtual=True).first()
if not order:
return jsonify({"msg": "Bad order_id"}), 401
if user.isOperator:
order.paid = True
sess.commit()
return jsonify(result=True), 200
else:
if order.creator_id == current_user:
order.paid = True
sess.commit()
return jsonify(result=True), 200
return jsonify({"msg": "No Permission"}), 403
else:
return jsonify({"msg": "Please login"}), 401
@bp.route("/cancel", methods=['POST']) #
@jwt_required
def cancelOrder():
sess = DBSession()
current_user = get_jwt_identity()
if not request.is_json:
return jsonify({"msg": "Missing JSON in request"}), 400
if current_user:
user = sess.query(User).filter_by(id=current_user).first()
order_id = request.json.get('order_id')
if not order_id:
return jsonify({"msg": "Missing order_id parameter"}), 400
order = sess.query(Order).filter_by(id=order_id,accepted=True,delivered=False,virtual=True).first()
if not order:
return jsonify({"msg": "Bad order_id"}), 401
if user.isOperator:
order.cancelled = True
sess.commit()
return jsonify(result=True), 200
else:
if order.creator_id == current_user:
order.cancelled = True
sess.commit()
return jsonify(result=True), 200
return jsonify({"msg": "No Permission"}), 403
else:
return jsonify({"msg": "Please login"}), 401
''' |
/*
** Obtain a read-lock on database version identified by the combination
** of snapshot iLsm and tree iTree. Return LSM_OK if successful, or
** an LSM error code otherwise.
*/
int lsmReadlock(lsm_db *db, i64 iLsm, u32 iShmMin, u32 iShmMax){
int rc = LSM_OK;
ShmHeader *pShm = db->pShmhdr;
int i;
assert( db->iReader<0 );
assert( shm_sequence_ge(iShmMax, iShmMin) );
if( db->bRoTrans ){
db->iReader = 0;
return LSM_OK;
}
for(i=0; db->iReader<0 && rc==LSM_OK && i<LSM_LOCK_NREADER; i++){
ShmReader *p = &pShm->aReader[i];
if( p->iLsmId==iLsm && p->iTreeId==iShmMax ){
rc = lsmShmLock(db, LSM_LOCK_READER(i), LSM_LOCK_SHARED, 0);
if( rc==LSM_OK && p->iLsmId==iLsm && p->iTreeId==iShmMax ){
db->iReader = i;
}else if( rc==LSM_BUSY ){
rc = LSM_OK;
}
}
}
for(i=0; db->iReader<0 && rc==LSM_OK && i<LSM_LOCK_NREADER; i++){
rc = lsmShmLock(db, LSM_LOCK_READER(i), LSM_LOCK_EXCL, 0);
if( rc==LSM_BUSY ){
rc = LSM_OK;
}else{
ShmReader *p = &pShm->aReader[i];
p->iLsmId = iLsm;
p->iTreeId = iShmMax;
rc = lsmShmLock(db, LSM_LOCK_READER(i), LSM_LOCK_SHARED, 0);
assert( rc!=LSM_BUSY );
if( rc==LSM_OK ) db->iReader = i;
}
}
for(i=0; db->iReader<0 && rc==LSM_OK && i<LSM_LOCK_NREADER; i++){
ShmReader *p = &pShm->aReader[i];
if( slotIsUsable(p, iLsm, iShmMin, iShmMax) ){
rc = lsmShmLock(db, LSM_LOCK_READER(i), LSM_LOCK_SHARED, 0);
if( rc==LSM_OK && slotIsUsable(p, iLsm, iShmMin, iShmMax) ){
db->iReader = i;
}else if( rc==LSM_BUSY ){
rc = LSM_OK;
}
}
}
if( rc==LSM_OK && db->iReader<0 ){
rc = LSM_BUSY;
}
return rc;
} |
def update_exam_authorization_final_grade(sender, instance, **kwargs):
authorize_user_for_schedulable_exam_runs(instance.user, instance.course_run) |
/**
* <p>
* Overrides {@link java.io.FilterInputStream#read()} to update the
* <code>IProgressMonitor</code> after the read operation.
* </p>
*
* @return the value read or <b>-1</b> if the end of the stream has been
* reached
* @throws IOException
* if an io error occured during the read operation
*/
public int read() throws IOException {
int c = this.in.read();
if (c >= 0) {
this.monitor.worked(1);
this.bytesRead++;
}
if (this.monitor.isCanceled()) {
InterruptedIOException exc = new InterruptedIOException(
"Operation canceled");
exc.bytesTransferred = this.bytesRead;
throw (exc);
}
return (c);
} |
/**
* Return the string representation of a {@link ServiceEvent} type
*/
public static String serviceEvent(int eventType) {
String retType = "[" + eventType + "]";
if (ServiceEvent.REGISTERED == eventType)
retType = "REGISTERED";
else if (ServiceEvent.UNREGISTERING == eventType)
retType = "UNREGISTERING";
else if (ServiceEvent.MODIFIED == eventType)
retType = "MODIFIED";
else if (ServiceEvent.MODIFIED_ENDMATCH == eventType)
retType = "MODIFIED_ENDMATCH";
return retType;
} |
// String marshals a keymanager kind to a string value.
func (k Kind) String() string {
switch k {
case Derived:
return "derived"
case Imported:
return "direct"
case Remote:
return "remote"
default:
return fmt.Sprintf("%d", int(k))
}
} |
<reponame>Umbriwii/DH
export const Scripts: BattleScriptsData = {
init: function() {
this.modData('Learnsets', 'thundurus').learnset.spikes = ['8L1'];
this.modData('Learnsets', 'starmie').learnset.mysticalfire = ['8L1'];
this.modData('Learnsets', 'golisopod').learnset.morningsun = ['8L1'];
this.modData('Learnsets', 'mudsdale').learnset.shoreup = ['8L1'];
},
};
|
<filename>skyfield/data/iers.py
"""Parse data files from the International Earth Rotation Service.
See:
https://datacenter.iers.org/eop.php
ftp://cddis.gsfc.nasa.gov/pub/products/iers/readme.finals2000A
"""
import numpy as np
import re
from ..constants import DAY_S
inf = float('inf')
_R = re.compile(b'^......(.........) . '
b'(.\d.......)......... '
b'(.\d.......)......... '
b'.(.\d........)', re.M)
def parse_x_y_dut1_from_finals_all(f):
data = np.fromregex(f, _R, [
('mjd_utc', np.float32),
('x', np.float32),
('y', np.float32),
('dut1', np.float32),
])
return data['mjd_utc'], data['x'], data['y'], data['dut1']
def _build_timescale_arrays(mjd_utc, dut1):
big_jumps = np.diff(dut1) > 0.9
leap_second_mask = np.concatenate([[False], big_jumps])
tt_minus_utc = np.cumsum(leap_second_mask) + 32.184 + 12.0
tt_jd = mjd_utc + tt_minus_utc / DAY_S + 2400000.5
delta_t = tt_minus_utc - dut1
delta_t_recent = np.array([tt_jd, delta_t])
leap_dates = 2400000.5 + np.concatenate([
[-inf], [41317.0, 41499.0, 41683.0], mjd_utc[leap_second_mask], [inf],
])
leap_offsets = np.arange(8.0, len(leap_dates) + 8.0)
leap_offsets[0] = leap_offsets[1] = 10.0
return delta_t_recent, leap_dates, leap_offsets
# Compatibility with older Skyfield versions:
def parse_dut1_from_finals_all(f):
v = parse_x_y_dut1_from_finals_all(f)
return v[0], v[3]
|
r, g, b = sorted(map(int, input().split(' ')))
if ((g - r) % 3 == 2) and ((b - r) % 3 == 2):
if r == 0:
print((g // 3) + (b // 3))
else:
print(r - 1 + ((g - r + 1) // 3) + ((b - r + 1) // 3))
else:
print(r + ((g - r) // 3) + ((b - r) // 3))
|
struct x {
int a;
int b;
};
x y = { 1, 2 };
int z[] = { 2, 3, 4 }; // should result in a int[]
|
The Russian developer President Donald Trump teamed up with in 2015 to try to build a tower in the heart of Moscow had a shoddy business record, the Wall Street Journal reported Friday.
Longtime Trump associate Felix Sater brought the Trump Organization a proposal to license the President’s name to IC Expert, a firm that has been faulted in Moscow court rulings for missed deadlines and construction difficulties, according to the Journal. IC Expert is headed by Andrei Rozov, whom Sater had previously worked with.
People familiar with the proposal told the newspaper that both Donald Trump and Rozov signed a non-binding letter of intent to construct the project in October 2015, well after the presidential primary campaign got underway.
Rozov did not respond to the Journal’s request for comment, and a representative for IC Expert claimed to the newspaper that discussions for a Trump-branded project had never occurred. Yet a person familiar with the House Intelligence Committee’s investigation into Russia’s interference in the 2016 election told the Journal that the letter of intent, which was among the documents that Trump’s longtime friend and former personal lawyer, Michael Cohen, turned over to the committee this week, had Rozov’s name on it.
Sater confirmed his work on the project to the newspaper, saying he wanted to help Trump build “the tallest building in Moscow.”
This was the Trump Organization’s third attempt to do so. Trump and his adult children, Donald Jr. and Ivanka, worked with Sater on an earlier Trump Moscow project in 2005. Trump also discussed a project in 2013 with Russian developer Aras Agalarov, who brought the Miss Universe pageant to Russia that year.
In a statement to the House panel obtained by the New York Times, Cohen said that he discussed the 2015 effort to build in Moscow with Trump three separate times, and had emailed Russian President Vladimir Putin’s personal spokesman for assistance moving it forward. He and Sater also exchanged emails about their dual effort to get Trump elected and complete the project, but it ultimately went nowhere.
Signing a letter of intent with Rozov is just another example of the Trump Organization’s teaming up with developers and financiers with less-than-stellar records. Bloomberg reported Friday that mass protests are expected this weekend outside one IC Expert development on the outskirts of Moscow that residents say the company failed to construct on time, leaving them unable to move in.
Sater met Rozov when both men served on the board of a company run by Sergei Polonsky, a Russian property tycoon who earned billions from his real estate empire until the financial crisis landed him deep in debt, according to the Journal. Polonsky was recently found guilty of defrauding buyers in Russia, but did not serve his sentence of five years in prison because the statute of limitations had expired, per the report.
There’s no evidence Polonsky was involved in the Moscow project Sater and Cohen explored for the Trump Organization. |
// CalcDigest calculates the digest of the object using the specified algorithm
// (md5 or sha256), using ranged downloads of the specified size.
func CalcDigest(obj Object, downloadRangeSize int64, algorithm string) ([] byte, error) {
h, err := newHash(algorithm)
if err != nil {
return nil, err
}
_, err = Download(obj, downloadRangeSize, h)
if err != nil {
return nil, err
}
digest := h.Sum(nil)
return digest, nil
} |
Crews battled a four alarm fire at a farmers market in Burlington County, New Jersey Tuesday afternoon.The fire broke out just after 2:00 p.m. inside building 4 within the landmark Columbus Farmers Market complex, located off Route 206 in Columbus.Firefighters had a lot on their hands with today's high winds fanning the inferno. But they attacked the flames from all sides with over 150 firefighters on hand."We were hampered by the winds, cold weather and winds were really a big issue on this. They gave a fire a good head start. Fortunately, the building was built with fire walls in place which helped us tremendously," Fire Chief John Trout said.Springfield Township Mayor Dennis McDaniel tells Action News, "There are fire walls between that section and the adjoining sections. So I understand that the damage has been pretty well contained to that building 4."Billed as the oldest and largest flea market in the Delaware Valley, officials estimate that about 10 of the 65 businesses were destroyed.The co-owners of Not Just Bagels say their store was among those lost and it could not have happened at a worse time, just before the busy Thanksgiving holiday."From what the fire companies have told us, that the building we're located in was just entirely gone, so everything we have is gone," co-owner Mike Charnik said."Busiest day of the year, so we don't know what we're going to do," co-owner Holly Newell said.Officials say it's fortunate that the market was closed today, because had it been open, there would have been a lot of people inside shopping."Had it been a busy Thursday or Thanksgiving, it would have been a tremendous amount of people there, it would have been sheer chaos," Trout said.The cause has not yet been determined.It remains unclear if the market will be able to, at least partially, reopen on Thanksgiving Day, which is normally a very busy shopping day.According to their website , the Columbus Farmers Market is the "The Delaware Valley's oldest and largest flea market with thousands of outdoor flea market items and over 65 inside retail stores". |
Material Preparation and Geotechnical Properties of Transparent Cemented Soil for Physical Modeling
The preparation of transparent materials suitable for simulating different rock and soil masses is the foundation for image-based physical modeling tests in studying deformation and failure mechanisms in geotechnical media. A transparent cemented soil (TCS) with similar geotechnical properties of natural soil and soft rock was prepared using fused quartz as the skeleton, hydrophobic fumed silica powder as the cement and mixed mineral oil of 15# white oil and n-dodecane as the pore fluid. Eleven groups of TCS samples with different shear strengths were synthesized by adjusting the content or mass ratio of the cement and particle size or gradation of the skeleton. Contrasting tests of unconsolidated-undrained triaxial compression were carried out and the mechanical characteristics of TCS were analyzed, showing that the stress-strain relationship, shear strength and failure mode of TCS are similar to those of natural soil. The mechanical parameters of TCS undergo complex variation with the factors, and the mesoscopic mechanism of the changes therein was revealed with the help of optical microscope photos. The similarity ratio of TCS to soft rock was derived according to geometries and stress conditions of laboratory model tests, demonstrating the feasibility of using TCS as similar materials to soft rock. Moreover, empirical formulas for the change of shear strength parameters with the factors were fitted to facilitate the preparation of TCS with target shear strength in the future. The findings can provide a basis for preparing transparent similar materials to natural soil and soft rock in physical modeling tests.
INTRODUCTION
The observation of deformation and failure of rock and soil mass through laboratory model tests is important when trying to reveal catastrophic evolution mechanisms in geotechnical engineering; however, traditional model tests based on natural geotechnical materials do not allow observing catastrophic evolution processes therein, which hinders the development of model tests. To solve the problem, researchers have developed a set of technologies for visual tests based on transparent soils (Mannheimer and Oswald, 1993;Pincus et al., 1994). This technology allows visualization of deformation and failure evolution in a rock and soil mass (Sadek et al., 2003;White et al., 2003) by replacing natural geotechnical materials with synthetic transparent soils to conduct model tests and combining this with optical image processing technologies, such as digital image correlation (DIC) and particle image velocimetry (PIV) (White et al., 2001;Take and Bolton, 2002). One of the key points of this technology is to prepare transparent soils with similar mechanical properties to conventional rock and soil mass.
Transparent soil used in the technology is a two-phase medium consisting of both the skeleton and pore fluid components (Iskander et al., 2015;Ganiyu et al., 2016). Materials representing the skeleton mainly include fused quartz, fused silica, fumed silica, amorphous silica and silica gel (Ganiyu et al., 2016). Mixed liquid with a refractive index the same as the skeleton is used as the pore fluid and the commonly used pore fluid includes mixed mineral oil containing n-dodecane and 15# white oil and a solution formed by dissolving solid calcium bromide in water Kong et al., 2016). At present, two conventional methods are used to synthesize transparent soil.
The first method is to synthesize transparent soil by using (micron-scale) amorphous silica (Pincus et al., 1994;Iskander, 1998;Iskander et al., 2002a) or fumed silica (Gill and Lehane, 2001;Hird and Stanier, 2010) as the skeleton: such soil has similar geotechnical properties to soft clay. Researchers (Pincus et al., 1994;Iskander, 1998;Iskander et al., 2002a) prepared transparent soil using amorphous silica with four different particle sizes (1.4, 10, 25, and 175 μm) and proved that the transparent soil has similar stress-strain responses, consolidation behavior, and hydraulic conductivity to common clay as evinced by triaxial test data. Afterwards, by changing the particle size and gradation of amorphous silica, further studies of mechanical properties of such transparent soil (Lei et al., 2019;Wu et al., 2020b) indicated that the cohesion and angle of internal friction of such transparent clay are about 10 kPa and 14-18°, respectively. Such transparent clay is mainly used in physical modeling tests, such as pile sinking in soft clay (McKelvey et al., 2004;Hird and Stanier, 2010) and consolidation and drainage of soft clay foundation Beckett and Augarde, 2011).
Furthermore, Kong et al. (2020) and Wu et al. (2020a) recently synthesized a variety of new transparent soil with the same strength, compression, and consolidation characteristics as natural silt by using materials such as Ultrez10, NaOH powder, Aristoflex AVC, and purified water. Such transparent soil has been rarely used in model tests due to the cost of the raw materials or unsatisfactory geotechnical properties (such as a low angle of internal friction).
Defects reducing the strength (the angle of internal friction or cohesion) mean that the aforementioned transparent soil can only be used to simulate a few specific rock and soil masses and is difficult to form physical models in more complex shapes, such as steep slopes and underground caverns. Therefore, it is important to prepare transparent soil with strength akin to that of common geotechnical materials for performing relevant model tests. By combining the advantages of the above two conventional methods for preparing transparent soil, Wei et al. (2019); Lanting et al. (2020) prepared a transparent cemented soil (TCS) by adding fumed silica powder (cement) into fused quartz-based transparent sand and proved that its mechanical and hydraulic properties make it suitable for simulating common clay. There is a lack of further research into the quantitative mixing ratio of each component, particle size and gradation of the skeleton and compactness of the soil mass in TCS, so it is difficult to apply it directly to physical models.
By referring to the idea of Wei et al. (2019); Lanting et al. (2020) for preparing TCS, a nano-scale hydrophobic fumed silica powder as cement was added to the preparation of transparent sand as described in previous studies Ezzein and Bathurst, 2011;Guzman et al., 2014; and a new TCS was prepared. By controlling the contents and mass ratios of fused quartz and silica powder as well as the particle size distribution of fused quartz, TCS showed geotechnical properties akin to those of common clay or soft rock. In view of the factors affecting the geotechnical properties, 11 groups of test schemes under different mixing ratios in TCS were designed, and influences of the factors on geotechnical parameters (such as cohesion, angle of internal friction, and Young's modulus) of TCS and their correlations were assessed through unconsolidated-undrained (UU) triaxial shear tests. Moreover, the influence mechanism of the factors on the strength was analyzed from a mesoscopic perspective, and the feasibility of using TCS as a substitute for natural clay and similar materials to soft rock was discussed. This could provide a basis for the use of transparent similar materials for clay and soft rock in physical modeling in geotechnical engineering.
Raw Materials
Fused quartz was used as the skeleton of TCS. Iskander and Liu (2010) first used fused quartz as the material representing the skeleton of a transparent sand. Ezzein and Bathurst (2011) found that fused quartz is similar to natural sand in terms of the structure and shape and can replace sand in meso-structural terms. Other experimental studies (Kong et al., 2013;Guzman et al., 2014; show that fused quartz has similar mechanical properties to sand and can replace sand in terms of the mechanical properties. The fused quartz used (purchased from Xinyi Wanhe Mining Co., Ltd, and shown in Figure 1A) was classified into three groups according to particle sizes of 0.2-0.5, 0.5 to 1.0, and 1.0-2.0 mm. The specific gravity and refractive index of the particles separately are 2.2 and 1.4585 in each group. In the natural state of accumulation, the dry densities of three groups of fused quartz are 1.40, 1.25, and 1.10 g/cm 3 , and their void ratios are 36, 43, and 50%, respectively.
Mixed mineral oil containing n-dodecane and 15# white oil was used as the pore fluid of TCS. To prevent refraction of light between the pores and skeleton to ensure high transparency of TCS, it is necessary to prepare a pore fluid whose refractive index is consistent with that of fused quartz. At 26°C, the refractive indexes of n-dodecane and 15# white oil (both were purchased from Guangdong Wengjiang Chemical Reagent Co., Ltd) are 1.424 and 1.469, respectively. The refractive index of mixed mineral oil increases with increasing temperature. In general, when the mass ratio of n-dodecane to 15# white oil is 1:3.5 to 1:8, the refractive indexes of mixed mineral oil and fused quartz are consistent or similar. The refractive index of mixed mineral oil can be reduced by adding n-dodecane when it is higher than that of fused quartz and can be increased by adding 15# white oil as it is lower than that of fused quartz.
Nano-scale hydrophobic fumed silica powder was utilized as cement of TCS. The method for preparing transparent soil by adding (micron-scale) silica powder into mixed mineral oil was proposed by Stanier et al. (2014) and it has been proved that the transparent soil has similar mechanical properties to natural soft clay. Limited by the inconsistent refractive indexes of silica powder and fused quartz, the combination of fused quartz and transparent clay is opaque. By contrast, the nanoscale hydrophobic fumed silica powder used in the study (purchased from Bengbu Jingxi Glass Products Co., Ltd, and shown in Figure 1B) is a modified fumed silica, which is a white powder with particle sizes of 15 nm and finer, a density of about 0.07 g/cm 3 in its natural state, a high specific surface area, and good dispersibility. Most importantly, the refractive index of silica powder is similar to that of fused quartz, which means that the mixture of silica powder, fused quartz and mixed mineral oil has high transparency. Moreover, such silica powder is hydrophobic (lipophilic) and can adsorb the mixed mineral oil, therefore can be adsorbed onto the surface of fused quartz after mixing with fused quartz and mixed mineral oil. The fused quartz particles can bind to each other due to the surface adsorbed with silica powder, thus manifesting the properties of clay particles.
Preparation Process
The process of preparing TCS can be divided into the following five steps: 1) The mixed mineral oil of n-dodecane and 15# white oil was prepared to make the refractive index 1.458 5. The fused quartz ( Figure 1A) was cleaned and dried to remove impurities and water on the surface. The mixed mineral oil and the fused quartz were sealed for later use. 2) The mixing ratios of fused quartz ( Figure 1A), silica powder ( Figure 1B), and mixed mineral oil were determined. Multiple tests show that the mass ratio of silica powder to fused quartz in TCS should be 2-20% and it is difficult to prepare TCS if the mass ratio of silica powder is less than 2%. To facilitate uniform mixing and compaction, the mass m s of mixed mineral oil can be determined by the mass m q of fused quartz and mass m p of silica powder.
where, 0.05m q of mixed mineral oil represents the minimum amount of the oil required for wetting dried fused quartz (as measured), and 2.5m p of mixed mineral oil indicates the minimum amount required for saturation of silica powder.
3) The fused quartz, silica powder, and mixed mineral oil were blended, so that silica powder was adsorbed onto the surface of fused quartz particles. The fused quartz particles were bound to each other through silica powder to form different sizes of block structures. In this case, the mixture was light-white in color ( Figure 1C). 4) The soil blocks were tamped layer-by-layer. After each layer was tamped, the surface was scraped before tamping the next layer. The height of each tamped layer should not exceed 30 mm. On this basis, cemented soil with an air-void ratio (the ratio of the volume of air-voids to the total volume of solids and fluid-voids) of 5-19% could be prepared. In this case, the saturation of the cemented soil was 75-85% and it was light milky white or colorless and semi-transparent due to incomplete saturation ( Figure 1D). 5) The cemented soil obtained in the previous step was placed in a vacuum box to remove trapped air for 5-6 h and then an appropriate amount of mixed mineral oil was added for saturation until the cemented soil was completely saturated. The cemented soil was transparent after saturation ( Figure 1E).
TEST PROCEDURES AND RESULTS OF TCS Test Methods and Schemes
To ascertain the effects of the content and mass ratio of silica powder, particle size and gradation of fused quartz on the geotechnical properties of TCS, 11 groups of TCS with different shear strengths were synthesized and UU triaxial shear tests were conducted by using SJ-1A.G triaxial shear apparatus. The test scheme and basic information pertaining to the TCS are summarized in Table 1, where the amount of mixed mineral oil in the specimens was determined by Formula 1. The particle size distribution in each group of TCS is shown in Figure 2. The proportioned TCS was placed into a geotechnical instrument for specimen preparation according to a certain mass, and specimens measuring 39.1 mm in diameter and 80 mm in height were prepared with an air-void ratio of 8-19% and saturation of about 80%. The confining pressures set in triaxial shear tests were 50, 100, and 200 kPa considering the low stress state in the model tests, and the rate of vertical compression displacement was 0.368 mm/min.
Stress-Strain Relationship
Effects of the Content and Mass Ratio of Silica Powder Figure 3 shows the stress-strain responses of specimens with different amounts of silica powder (the content of fused quartz is constant). Two groups of data obtained under the conditions that the mass ratios of silica powder are 2 and 3% correspond to test groups G1 and G2 in Table 1. In the two groups, the contents of fused quartz are 132 g (1.375 g/cm 3 ), while the contents of silica powder are 2.64 g (0.028 g/cm 3 ) and 3.96 g (0.041 g/cm 3 ), respectively. The group of data obtained under the condition that the mass ratio of silica powder is 0%, recorded as the group G0, are extracted from the research of Wei et al. (2019). The test conditions of the three groups are generally consistent, while the main difference lies in the content of silica powder in the groups. Therefore, the three groups form contrasting groups with the content of silica powder as a single variable under the constant mass of fused quartz. As shown in Figure 3, under a constant content of fused quartz, the stress-strain curves of TCS containing silica powder (G1 and G2) have consistent morphologies and inflection points, which show significant differences with TCS without silica powder (G0). Furthermore, such differences become more significant with increasing confining pressure. Under the same strain conditions, the stress on specimens containing more silica powder (G2) is greater, which is particularly significant at the low confining pressure of 50 kPa ( Figure 3A). Under a high confining pressure (200 kPa), the stress-strain curves of groups G1 and G2 are similar, indicating that geotechnical properties of TCS containing silica powder with mass ratios of 2 and 3% are consistent under high stress ( Figure 3C). Figure 4 illustrates the stress-strain responses of TCS with different mass ratios of silica powder (the air-void ratios are constant). The four groups of data obtained at mass ratios of silica powder of 5, 7, 10, and 15% correspond to test groups G3 to G6 in Table 1. In the four groups, the air-void ratio of the specimens is 10-8%. Although the densities of the specimens and amounts of mixed mineral oil in the four groups are different, they do not change independently, but vary with the mass ratio of silica powder. Therefore, the tests on the four groups can still be considered as contrasting tests with the mass ratio of silica powder as a single variable. Similarly, to ensure a consistent airvoid ratio in each group, the mass ratio of fused quartz correspondingly decreases with the increase in mass ratio of silica powder. As shown in Figure 4, at a constant air-void ratio, the increase in the mass ratio of silica powder has no influence on the morphology of the stress-strain curves of TCS, while the inflection points occur at a slightly lower strain as the mass ratio of silica powder increases. Under the same strain conditions, the stress acting on TCS with large mass ratios of silica powder is small, indicating that the increase of the mass ratio of silica powder can soften TCS. Figure 5 shows the stress-strain responses of TCS with different particle sizes of fused quartz under the conditions of constant mass ratio of silica powder and air-void ratio. The three groups of data obtained from TCS with particle sizes of fused quartz of 0.5-1.0, 0.2 to 0.5 and 1.0-2.0 mm correspond to the test groups G5, G7 and G8 in Table 1. The three groups have same masses of fused quartz and silica powder, while different particle sizes of fused quartz, so they are contrasting groups with the particle size of fused quartz as a single variable. As demonstrated in Figure 5, the stress-strain curves of TCS with three different particle sizes show basically same morphologies, while the inflection points appear slightly early with the increase of the particle size. Under the same strain conditions, TCS with the different particle sizes of 0.5-1.0, 0.2 to 0.5, and 1.0-2.0 mm are ranked in descending order according to stress. Figure 6 shows the stress-strain responses of TCS with different particle gradations of fused quartz as the mass ratios of silica powder and air-void ratio are constant. Three groups of data obtained when using fused quartz with gradations of 0.2-1.0, 0.2 to 2.0 and 0.5-2.0 mm separately correspond to test groups G9 to G11 in Table 1. The three groups of TCS have identical masses of fused quartz and silica powder, while different particle gradations of fused quartz, so tests on the groups are contrasting tests with the gradation of fused quartz as a single variable. Figure 6 demonstrates that the morphologies and inflection points of the stress-strain curves of TCS with different particle gradations are similar. Under the same strain conditions, TCS with different particle gradations of 0.2-1.0, 0.5 to 2.0, and 0.2-2.0 mm are ranked in descending order according to stress. Such differences in stress are significant at the lower confining pressure of 50 kPa, while negligible at the higher confining pressure of 200 kPa.
Changes in Mechanical Parameters and the Mesoscopic Mechanism
Effects of the Content and Mass Ratio of Silica Powder friction were obtained by drawing the Mohr-Coulomb strength envelopes) of TCS with different contents of silica powder in test groups G0 to G2. As illustrated, when the amount of fused quartz is consistent, with the increase in amount of silica powder ( Figure 7C), the cohesion of the specimens increases significantly, while the angle of internal friction increases slowly ( Figure 7A). Young's modulus of the specimens increases significantly with the increase of the confining pressure, consistent with natural rock and soil material behavior. Under the same confining pressure, Young's Frontiers in Materials | www.frontiersin.org November 2021 | Volume 8 | Article 740388 6 modulus gradually increases with increasing amount of silica powder ( Figure 7B). Figure 8 shows the changes in geotechnical parameters of TCS with different mass ratios of silica powder in test groups G3 to G6. This shows that with the increase of the mass ratio of silica powder (accompanied by the decrease in the mass ratio of fused quartz, and shown in Figure 8C), the cohesion and angle of internal friction tend to decrease ( Figure 8A). Young's modulus of TCS increases significantly with confining pressure. At the Frontiers in Materials | www.frontiersin.org November 2021 | Volume 8 | Article 740388 7 same confining pressure, Young's modulus decreases significantly with increasing mass ratio of silica powder ( Figure 8B). Figure 9 shows the microscopic characteristics of TCS with different mass ratios of silica powder. Figures 9A-C are photos taken with an optical microscope, and Figures 9D-F are schematic diagrams of the microscopic photos. When the mass ratio of silica powder is 5% (Figures 9A-D), the particles of fused quartz in TCS are interlocked; unconnected pores among the particles are formed and are filled with mixture of silica powder and mixed mineral oil. In this case, the shear strength of TCS is high due to the interlocking effect of the fused quartz particles. As the mass ratio of silica powder increases to 10%, the number of pores among the fused quartz particles increases, and connected pores are formed partially (Figures 9B-E). At this time, the interlocking effect of the fused quartz particles is decreased, as a result of which the shear strength of TCS is decreased. When the mass ratio of silica powder increases to 15%, the number of connected pores is increased, causing some fused quartz particles to be suspended in the mixture of silica powder and mixed mineral oil ( Figures 9C-F). At this time, the interlocking effect among the fused quartz particles is greatly reduced, and accordingly, the shear strength of TCS is significantly reduced.
Effects of the Particle Size and Gradation of Fused Quartz
The changes in geotechnical parameters of TCS with different particle sizes of fused quartz in the test groups G5, G7, and G8 are shown in Figure 10; in the three groups of TCS with different particle sizes of fused quartz, the group with the particle size of 0.5-1.0 mm has the maximum cohesion, angle of internal friction, and Young's modulus, followed by that with a particle size of 0.2-0.5 mm, while the minimum values are found when using a particle size of 1.0-2.0 mm: grading exerts a significant influence on the cohesion and Young's modulus, while having little influence on the angle of internal friction of TCS in the three groups. Figure 11 shows changes in the geotechnical parameters of TCS with different particle gradations of fused quartz in test groups G9 to G11. Among the three groups of TCS with different particle gradations of fused quartz, the group with the gradation of 0.2-1.0 mm has the largest cohesion and smallest angle of internal friction. The cohesion and angle of internal friction of the group with the gradation of 0.2-2.0 mm are lower but the minimum cohesion and the maximum angle of internal friction are found in the group with the gradation of 0.5-2.0 mm. The three groups of TCS with different gradations exhibit small differences in Young's modulus under a high confining pressure of 200 kPa. Under a low confining pressure of 50 kPa, the groups are ranked in the same order in terms of Young's moduli as they are for the apparent cohesion. In general, different gradations of fused quartz significantly affect the cohesion, while slightly influencing the angle of internal friction and Young's modulus of TCS in the three groups. Figure 12 illustrates the mesoscopic characteristics of TCS with different gradations of fused quartz. When the content of fine particles of fused quartz is low ( Figures 12A-D), coarse particles of fused quartz are strongly interlocked. The interlocked coarse particles bear the main part of shear force acting on TCS, which makes TCS display high shear strength. As the content of fine particles increases (accompanied by the decrease of coarse particles) ( Figures 12B-E), the pores among coarse particles are filled by the fine particles, so that the interlocking effect of coarse particles is weakened, resulting in a decrease in the shear strength of TCS. When the amount of fine particles dominates in TCS ( Figures 12C-F), coarse particles are surrounded by fine particles and seem to be suspended in TCS from the mesoscopic view. In this case, the interlocking effect is greatly decreased due to the suspended state of coarse particles, and accordingly, the shear strength of TCS is significantly decreased.
Failure Characteristics Under Triaxial Compression
Effects of the Mass Ratio of Silica Powder Figure 13 shows the stress-strain responses of the specimens with different mass ratios of silica powder in test groups G2, G3, and G5 under a confining pressure of 200 kPa ( Figure 13A) and their corresponding failure modes ( Figures 13B-D)). As shown, 1) as the mass ratio of silica powder increases from 3% ( Figure 13B) to 5% ( Figure 13C), and 10% ( Figure 13D), the cohesion of the specimens increases significantly. In this case, the compressed specimens are less likely to disintegrate, and the integrity of the specimen is improved. 2) When the mass ratio of silica powder is less than 10%, the specimens will disintegrate to different degrees after removing the latex films around them ( Figures 13B,C). Particularly, when the mass ratio of silica powder is less than 3%, the specimens have a very low cohesion and they disintegrate after compression and removal of the latex film. 3) It can be seen from more intact specimens that shear failure mainly occurs in the specimens and the failure characteristics are similar to those of clay or soft rock ( Figure 13D). Such a phenomenon verifies the feasibility of using TCS in model tests in geotechnical engineering from the perspective of failure. Figures 14, 15 show the stress-strain responses of the specimens with different particle sizes (specimens in groups G5, G7, and G8 in Figure 14) and gradations (specimens in groups G9 to G11 in Figure 15) of fused quartz under a confining pressure of 200 kPa and their corresponding failure modes. As shown, 1) shear failure occurs in specimens with different particle sizes and gradations of fused quartz after compression, which is similar to those of natural clay or soft rock. 2) In specimens with three different particle sizes of fused quartz, the specimen with the larger particles is less intact after compression; there is basically no clear shear band in the specimen with a particle size of 0.2-0.5 mm of fused quartz after compression ( Figure 14B), while significant shear failure occurs in the specimen with particle sizes of 1.0-2.0 mm after compression ( Figure 14D).
Effects of the Particle Size and Gradation of Fused Quartz
3) In the specimens with three different gradations of fused quartz, the specimen with the gradation of 0.2-1.0 mm of fused quartz is more intact after compression ( Figure 15B), with no clear shear band being found, however shear failure can be seen in the specimens with gradations of 0.2-2.0 and 0.5-2.0 mm after compression ( Figures 15C,D).
DISCUSSION OF PREPARATION, APPLICATION AND LIMITATION OF TCS
A variety of transparent soils have been developed in the past 2 decades, such as transparent sand (Iskander et al., 2002b;Sadek et al., 2002;Iskander and Liu, 2010;Cao et al., 2011;Ezzein and Bathurst, 2011;Guzman and Iskander, 2013;Kashuk et al., 2014;Sun and Liu, 2014) and transparent clay (Pincus et al., 1994;Iskander, 1998;Gill and Lehane, 2001;Iskander et al., 2002a;Hird and Stanier, 2010;Wu et al., 2020a;Kong et al., 2020). However, there are some defects in shear strength of these transparent soils, such as low cohesion of transparent sand and low angle of internal friction of transparent clay, making it difficult to form FIGURE 10 | Variations in geotechnical parameters of TCS with different particle sizes of fused quartz.
Preparation of TCS With Target Shear Strength
Previous studies have proved that particle characteristics affect the shear strength of soils (Ganju et al., 2021;Lu et al., 2021). For example, the angle of internal friction was found to increase with increasing particle size in natural sand through triaxial compression tests using sand with different particle sizes (Li, 2013;Vangla and Latha, 2015). Moreover, various shear strength expressions of sandy soils were fitted based on the correspondence between the gradation parameters (such as the coefficients of uniformity C u and the coefficients of curvature C c ) and the shear strength (Belkhatir et al., 2012;Bayat and Bayat, 2013;Sezer, 2013;Havaee et al., 2015). It is clear from these studies that the particle characteristics are an indicator of the shear strength of soils and can be used to predict the soil shear strength.
Since mechanical properties of TCS are significantly affected by gradation characteristics (Section 4.1.2), establishing expressions of mechanical properties varying with the gradation will be helpful for the preparation of TCS with target mechanical properties. Figure 16 illustrates the variations of cohesion and internal friction angle of TCS with different gradations in Table 1. As suggested in previous studies (Belkhatir et al., 2012;Bayat and Bayat, 2013;Sezer, 2013;Havaee et al., 2015), the coefficient of uniformity, C u , is employed as an indicator of the gradation characteristics of TCS, and it is calculated using the following equation: where D 10 and D 60 are the particle diameter at 10 and 60% passing, respectively. The particle diameters required to calculate coefficients of uniformity in Figure 16 are determined by reading the particle size distribution curves in Figure 2. As shown, the cohesive c ( Figure 16A) and internal friction angle φ ( Figure 16B) of TCS gradually decrease with increasing logarithm of coefficient of uniformity log(C u ). The variations of cohesive c and internal friction angle φ are fitted by the following equations: c −102.34 × log(C u ) + 91.33 (3) φ −53.61 × log(C u ) + 62.06 (4) Based on Eqs 2-4, TCS with the target strength can be prepared by adjusting the particle gradation in the future.
Application of TCS as the Substitute for Clay
The similarity of the stress-strain relationship is the basis of using TCS as a substitute for natural clay. It can be seen from Figures 3-6 that the shape of stress-strain curves of TCS is ideal elastoplastic type or strain softening type, which is similar to that of natural clay (Huang et al., 2016). Figure 17 illustrates the envelopes of the stress-strain curves of TCS prepared according to 11 schemes in Table 1. As shown, (1) under different particle sizes and gradations of fused quartz and different mass ratios of silica powder, the stress-strain curves of TCS change significantly. By adjusting the particle size of fused quartz and the mass ratio of silica powder, the stress-strain curves of TCS match those of natural clay. (2) Compared with several existing typical transparent geomaterials, such as transparent granular soil (Li et al., 2020), TCS (Wei et al., 2019), transparent glass soil (Kong and Lu, 2014), transparent glass sand (Kong et al., 2013) and Transparent silica gel (Iskander et al., 2002b) used in model tests, the TCS prepared in the present work has a similar stress-strain relationship to them. The difference is that the Frontiers in Materials | www.frontiersin.org November 2021 | Volume 8 | Article 740388 13 stress-strain curves of TCS used in this research can be controlled across a wider range, thus TCS has stronger applicability in simulating different natural clays.
The changes in geotechnical parameters (Figures 7,8,10,and 11) show that the geotechnical parameters of TCS change with large ranges: cohesion varies from 5 to 65 kPa, the angle of internal friction varies from 25 to 44°, and Young's modulus from 5 to 17 MPa (σ 3 50 kPa), 6-22 MPa (σ 3 100 kPa), and 9-42 MPa (σ 3 200 kPa). In addition, by changing the air-void ratio or bulk density of TCS, it can be predicted that the range of geotechnical parameters of TCS will increase further. In terms of the ranges of parameters obtained through testing, they cover those of most mechanical parameters of clay (Huang et al., 2016;Chen and Guo, 2019), such as loess, expansive soil, soft soil, frozen soil, red clay, saline soil, etc. Therefore, such TCS can be used as a substitute for natural clay in model tests.
The failure of the TCS specimens under triaxial compression (Figures 13-15) is mainly due to shearing, and the distribution, number, and dip angle of the shear zones in the specimens are similar to those in natural clay (Huang et al., 2016), so TCS can be used to simulate failure process of natural clay.
Application of TCS as Similar Material to Soft Rock
TCS differs from rock in terms of its mechanical properties, so it cannot be directly used as a substitute for rock. To expand the applicability of TCS in model tests, the feasibility of using TCS as similar materials to soft rock in model tests was explored based on the principle of similar mechanical properties of hard soil and soft rock. Subscripts P and M separately represent the prototype and model, and η denotes the similarity ratio of physical quantities between the prototype and the model. The conditions for TCS as similar material to soft rock must satisfy the requirements delineated below.
Ignoring the self-weight of a rock mass, the main physical similarity constants of TCS are as follows: where, σ, ε, and E denote the stress, strain, and Young's modulus, respectively; η σ , η ε , and η E separately represent the similarity constants of stress, strain, and Young's modulus. Based on the requirement that the stress-strain relationships in the prototype and the model should be expressed by the same equation, the following formulae are derived: By substituting Formulae 5-7 into Formula 8, Formula 8 can be rewritten as follows: If Formulae 9, 10 are equal, the similarity index is: and as ε is dimensionless and the similarity constant of strain is η ε 1, then The similarity constant of stress can be selected at will without considering the self-weight.
When considering the self-weight of both rock and soil, the similarity constants of the materials should include where, c and η c represent the bulk density and its similarity constant, respectively. Based on principles of elastic mechanics and similarity, the similarity index can be calculated as follows: where, η L represents the similarity constant of size (length) and η L L P /L M (L P and L M separately indicate the sizes of the prototype and the model). Meanwhile, Formula 12 should be satisfied.
In similarity model tests, besides that Formulae 12, 14, and 15 governing the main similarity constants should be satisfied, the requirement for a similar strength should also be met. The strength of TCS is expressed as the Mohr-Coulomb shear strength and its parameters include the cohesion and angle of internal friction. The shear strength is consistent with the dimension of stress. When selecting the strength index of simulated materials, it can be converted according to the following formulae: where, c P and c M denote the apparent cohesions of the prototype and the model, respectively. The angle of internal friction φ is dimensionless, so the similarity index should satisfy: where, η φ represents the similarity constant of the angle of internal friction; and φ P and φ M represent the angles of internal friction of the prototype and the model, respectively.
In conclusion, in the similarity model tests of TCS, the similarity index must satisfy: In accordance with Formula 19, if the geometric similarity constant is 100, the geotechnical parameters of the prototype materials corresponding to TCS materials can be obtained ( Table 2) from where it can be seen that TCS is applicable as a similar material to soft rock, such as dolomite (Liu et al., 2021), marl (Ferrero and Migliazza, 2009), shale (Geng et al., 2016), siltstone (Su et al., 2007), or claystone (Hu et al., 2014).
Limitations to the Present Study of TCS
TCS is usually required to reach a fully saturated state to realize their high transparency in physical model tests, therefore, measuring the mechanical properties of saturated TCS is the premise to the development of physical model tests using TCS (Iskander et al., 2015;Ganiyu et al., 2016). When measuring the shear strength parameters of saturated soil specimens, consolidated undrained (CU) or consolidated drained (CD) triaxial compression tests should be conducted; however, during the consolidation or drainage of saturated TCS specimens, the pore fluid (mixed mineral oil) drained from the specimens will corrode rubber products (such as the latex membrane and rubber O-ring seals), resulting in a failure of the test and even instrument damage. Therefore, CU or CD tests of TCS specimens with complete saturation were not performed in the study, but as an alternative, UU tests on specimens with a saturation ratio of about 80% were conducted. There is a certain difference in mechanical properties between TCS with saturation ratios of about 80 and 100% (the study of unsaturated silt clay (Kererat, 2019) with similar micro-structure to TCS shows that the difference is within 15% in terms of strength parameters), but this small difference does not affect the investigation of the mechanical characteristics such as strength, stiffness, and failure of TCS.
The cementation arising from use of silica powder is the key element that distinguishes TCS from transparent sand and work on the cementation mechanism is significant for further study of the mechanical behavior of TCS. Although Yang et al. (2020) used scanning electron microscopy to study the micro-structural characteristics of TCS and ascertained the changes in the cement and skeleton of TCS before and after compression, the cementation mechanism of silica powder remains unclear: The mechanism of action of silica powder in adsorbing mixed mineral oil warrants further study.
CONCLUSION
A type of TCS was prepared by using fused quartz as the skeleton, nano-scale hydrophobic fumed silica powder as a cement, and mixed mineral oil containing n-dodecane and 15# white oil as the pore fluid: the recipe and preparation methods of TCS were also validated. By conducting 11 groups of triaxial shear tests to assess the effects of four main factors of the skeleton and cement on the strength characteristics of TCS, changes in the stress-strain curves, geotechnical parameters, mesoscopic structures, and failure characteristics of TCS were analyzed. Finally, the feasibility of using TCS as a substitute for clay and similar materials to soft rock in model tests was proved and the optimum similarity ratio of TCS was determined. The results indicated that: 1) By properly grading fused quartz with the particles of 0.2-0.5, 0.5 to 1.0, and 1.0-2.0 mm and adding 2-15% of silica powder and corresponding mixed mineral oil, TCS with a cohesion of 5-65 kPa, angle of internal friction of 25-44°, and Young's modulus of 5-42 MPa (when σ 3 50 to 200 kPa) could be prepared. 2) When the particle size, gradation and content of fused quartz are constant, the strength and stiffness of TCS increase with increasing amount of silica powder. Of them, the cohesion and Young's modulus increase significantly, while the angle of internal friction rises less. As the mass ratio of silica powder increases (accompanied by the decrease in the mass ratio of fused quartz), the strength and stiffness of TCS gradually decrease and the cohesion, angle of internal friction, and Young's modulus decrease to a significant extent. 3) At a constant mass ratio of silica powder, the changes in strength and stiffness of TCS with particle size of fused quartz are as follows: TCS values with particle sizes of fused quartz of 0.5-1.0, 0.2 to 0.5, and 1.0-2.0 mm are ranked in descending order. In the three groups of TCS with the gradations of fused quartz of 0.2-1.0, 0.2 to 2.0, and 0.5-2.0 mm, the gradation of fused quartz affects the cohesion to a significant extent, while only slightly affecting the angle of internal friction and Young's modulus. 4) The stress-strain relationship, geotechnical parameters, and shear failure mode of TCS are similar to those of clay and soft rock. Accurate control can be realized by adjusting the content and mass ratio of silica powder and particle size and the gradation of fused quartz, therefore, TCS can be used as a substitute for natural clay and similar materials to soft rock in visual physical modeling tests based on transparent soil.
DATA AVAILABILITY STATEMENT
The original contributions presented in the study are included in the article/Supplementary Material, further inquiries can be directed to the corresponding author. |
/**
* A test case for Entity Centre DSL generated top level actions.
* @author TG Team
*
*/
public class EntityCentreBuilderTopLevelActionsTest {
@Test
public void top_level_actions_should_be_present_in_configuration_with_appropriate_groups_and_order() {
final EntityActionConfig topActionStub = action(FunctionalEntity.class).withContext(context().withCurrentEntity().withSelectionCrit().build()).build();
final EntityCentreConfig<TgWorkOrder> config = centreFor(TgWorkOrder.class)
.addTopAction(topActionStub)
.also()
.beginTopActionsGroup("group1")
.addGroupAction(topActionStub)
.addGroupAction(topActionStub)
.endTopActionsGroup()
.also()
.addTopAction(topActionStub)
.also()
.beginTopActionsGroup("group2")
.addGroupAction(topActionStub)
.addGroupAction(topActionStub)
.endTopActionsGroup()
.addProp("desc").build();
assertTrue(config.getTopLevelActions().isPresent());
assertEquals(6, config.getTopLevelActions().get().size());
assertFalse(config.getTopLevelActions().get().get(0).getValue().isPresent());
assertEquals("group1", config.getTopLevelActions().get().get(1).getValue().get());
assertEquals("group1", config.getTopLevelActions().get().get(2).getValue().get());
assertFalse(config.getTopLevelActions().get().get(3).getValue().isPresent());
assertEquals("group2", config.getTopLevelActions().get().get(4).getValue().get());
assertEquals("group2", config.getTopLevelActions().get().get(5).getValue().get());
}
@Test
public void top_level_actions_may_not_exists() {
final EntityCentreConfig<TgWorkOrder> config = centreFor(TgWorkOrder.class).addProp("desc").build();
assertFalse(config.getTopLevelActions().isPresent());
}
} |
mod table_result;
pub use self::table_result::TableResult;
#[cfg(test)]
mod tests;
use core::{fmt, slice::Iter};
use crate::{Column, Dice, Result, Rows, Row};
use serde::de::{self, Deserialize, Deserializer, MapAccess, SeqAccess, Visitor};
use serde_derive::{Deserialize, Serialize};
/// A `Table` that can be rolled on
#[derive(Debug, Serialize, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct Table {
#[doc(hidden)]
dice: Dice,
#[doc(hidden)]
heading: Column,
#[doc(hidden)]
results: Rows,
}
impl Table {
/// Crate a new `Table`
pub fn new<C, R>(dice: Dice, heading: C, results: R) -> Result<Self>
where
C: Into<Column>,
R: Into<Rows>,
{
let heading: Column = heading.into();
let results: Rows = results.into();
results.validate(&dice)?;
Ok(Table {
dice,
heading,
results,
})
}
/// Perform a roll on this `Table`
pub fn roll(&self) -> TableResult {
let roll = self.dice.roll();
match self
.rows()
.enumerate()
.find(|(_, row)| **row == roll)
.map(|(i, _)| TableResult::new(roll, i))
{
Some(t) => t,
None => unreachable!("Table was created without all posible rolls!"),
}
}
/// Iterate over every `Row` in this `Table`
pub fn rows(&self) -> Iter<Row> {
self.results.iter()
}
}
impl<'de> Deserialize<'de> for Table {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(field_identifier, rename_all = "lowercase")]
enum Field {
Dice,
Heading,
Results,
};
struct TableVisitor;
impl<'de> Visitor<'de> for TableVisitor {
type Value = Table;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("struct Table")
}
fn visit_seq<V>(self, mut seq: V) -> std::result::Result<Table, V::Error>
where
V: SeqAccess<'de>,
{
let dice: Dice = seq
.next_element()?
.ok_or_else(|| de::Error::invalid_length(0, &self))?;
let heading: Column = seq
.next_element()?
.ok_or_else(|| de::Error::invalid_length(1, &self))?;
let results: Rows = seq
.next_element()?
.ok_or_else(|| de::Error::invalid_length(2, &self))?;
Table::new(dice, heading, results).map_err(de::Error::custom)
}
fn visit_map<V>(self, mut map: V) -> std::result::Result<Table, V::Error>
where
V: MapAccess<'de>,
{
let mut dice = None;
let mut heading = None;
let mut results = None;
while let Some(key) = map.next_key()? {
match key {
Field::Dice => {
if dice.is_some() {
return Err(de::Error::duplicate_field("dice"));
}
dice = Some(map.next_value()?);
}
Field::Heading => {
if heading.is_some() {
return Err(de::Error::duplicate_field("heading"));
}
heading = Some(map.next_value()?);
}
Field::Results => {
if results.is_some() {
return Err(de::Error::duplicate_field("results"));
}
results = Some(map.next_value()?);
}
}
}
let dice: Dice = dice.ok_or_else(|| de::Error::missing_field("dice"))?;
let heading: Column = heading.ok_or_else(|| de::Error::missing_field("heading"))?;
let results: Rows = results.ok_or_else(|| de::Error::missing_field("results"))?;
Table::new(dice, heading, results).map_err(de::Error::custom)
}
}
const FIELDS: &'static [&'static str] = &["dice", "heading", "results"];
deserializer.deserialize_struct("Table", FIELDS, TableVisitor)
}
}
|
/**
* @param element
* with concrete class name as its tag. This class is expected to
* be in the same package as referenceType, unless the field is
* annotated with the package name. A special attribute name
* "_class", if present would be the class name
* @param field
* to which this is destined for. Used to check for parent class
* as annotation
* @param referenceType
* a class whose package may be shared with this class
*
* @param parentObject
* parent of this object
* @return
* @throws XmlParseException
*/
private static Object elementToSubclass(Element element, Field field,
Class<?> referenceType, Object parentObject)
throws XmlParseException {
Object thisObject = null;
String elementName = element.getTagName();
try {
String className = element.getAttribute(CLASS_NAME);
if (className == null || className.length() == 0) {
String packageName = null;
MapDetails ant = field.getAnnotation(MapDetails.class);
if (ant != null) {
packageName = ant.packgaeName();
} else {
packageName = referenceType.getPackage().getName();
}
/*
* we take package name either from annotation on the field or
* from the reference type
*/
className = packageName + '.'
+ elementName.substring(0, 1).toUpperCase()
+ elementName.substring(1);
thisObject = Class.forName(className).newInstance();
}
elementToObject(element, thisObject);
return thisObject;
} catch (Exception e) {
e.printStackTrace();
throw new XmlParseException("error while parsing " + elementName
+ " element as a wrapped-element\n " + e.getMessage());
}
} |
package com.kissthinker.javabean;
import java.beans.PropertyChangeSupport;
/**
* @author <NAME>
*
*/
public interface PropertyChangeSupporter
{
/**
*
* @return PropertyChangeSupport
*/
PropertyChangeSupport propertyChangeSupport();
} |
Buzz: A novel programming language for heterogeneous robot swarms
A new programming language designed specifically for robot swarms, Buzz is based on the idea that a developer must be allowed to pick the most comfortable approach to behavioral design – whether that’s bottom-up or top-down.
Designing swarm robotics systems
Swarm robotics is a branch of robotics that studies the coordination of large teams of robots. Swarm robotics systems have the potential to offer solutions for large-scale application scenarios that require reliable, scalable and autonomous behaviors.
Designing the behavior of robot swarms is difficult. The number of interactions among the robots increases steeply with the swarm size, making it difficult to predict the dynamics of the group and to pinpoint the causes of errors.
There are two general approaches to the design of swarm behaviors: the first is bottom-up, focusing on individual behaviors and low-level interactions; the second is top-down, meaning that the developer treats the swarm as a unique entity.
Both approaches have strengths and weaknesses. The bottom-up approach ensures full control of the swarm, but it also exposes the developer to unnecessary details, making development slow and error-prone. Conversely, the top-down approach allows for fast prototyping, but prevents developers from fine-tuning swarm behaviors.
Buzz concepts
Buzz is a new programming language we designed specifically for robot swarms. Buzz is based on the idea that an effective language for robot swarms must allow the developer to pick the most comfortable approach to behavior development – bottom-up or top-down.
The syntax and semantics of Buzz are inspired by well-known programming languages such as JavaScript, Python and Lua. We made this decision to allow for a short learning curve. Analogously to these languages, Buzz provides familiar constructs such as branching conditions, loops and function declarations.
Buzz also includes a number of constructs specifically designed for swarm-level development.
The “swarm” construct allows a developer to split the robots into multiple groups and assign a specific task to each. Swarms can be created, disbanded, and modified dynamically.
The “neighbors” construct captures an important concept in swarm systems: locality. In nature, individuals interact directly and only with nearby swarm-mates. Interactions include communication, obstacle avoidance or leader following. The neighbors construct provides functions to mimic these mechanisms.
Buzz also offers a construct that allows an entire robot swarm to agree on a set of (key, value) pairs. This construct is named “virtual stigmergy,” after the environment-mediated interaction process displayed by nest-building insect colonies.
The run-time platform of Buzz is designed to be lightweight (it occupies just 12KB), efficient, and extensible. In particular, developers can interface the Buzz run-time platform with other frameworks such as the Robot Operating System (ROS) and add new commands and constructs to fit diverse robots.
The video shows a swarm of Spiri robots engaged in a detection task. The environment presents two colored objects – one red, one blue – that the Spiris must find using their frontal camera. Some Spiris can see the objects directly, while others cannot, either because their view is obstructed by other robots or because they are too far from the objects. The aim of this behavior is to divide the robots into two swarms, one for each object to detect. The experiment is run using the ARGoS multi-robot simulator.
In the first phase, the robots take off. This phase ends when all robots have reached the target altitude. The robots use virtual stigmergy to inform the others when they are ready.
In the second phase, the robots individually search for the objects by rotating on the spot. When a robot finds an object, the simulator draws a cyan line. Virtual stigmergy is used again by the robots to inform the others whether or not an object was found after a complete rotation.
In the third phase, the robots that saw an object use virtual stigmergy to share information about it, such as distance and color. The robots that saw no objects use this information to pick one of the two objects.
In the fourth phase, two swarms are created – one for each object. The “neighbors structure” is used to separate the robots according to their swarm membership.
Conclusion
The possibility of expressing algorithms both in a bottom-up and in a top-down fashion allows Buzz developers to encode complex autonomous swarm behaviors in a concise way.
For this reason, Buzz has the potential to become an enabler for future research on real-world swarm robotics systems. Currently, no standardized platform exists that allows researchers to compare, share and reuse swarm behaviors. Inescapably, development involves a certain amount of re-coding of recurring swarm behaviors. The design of Buzz is motivated and encouraged by the necessity to overcome this state of affairs. We hope that Buzz will aid the growth of the swarm robotics field.
Future work on Buzz will involve several activities. First, we will integrate the run-time into multiple robotics platforms of different kinds, such as ground-based and aerial robots. Second, we will create a library of well-known swarm behaviors, which will be offered open-source to practitioners as part of the Buzz distribution. Finally, we will tackle the design of general approaches to swarm behavior, debugging and fault detection.
Buzz is released as open-source software under the MIT license. It can be downloaded at http://the.swarming.buzz/.
If you liked this post, you may also be interested in:
CoCoRo: New video series tracks dev’t of collective behaviour in autonomous underwater swarm
Evolving robot swarm behaviour suggests forgetting may be important to cultural evolution
Surgical micro-robot swarms: Science fiction, or realistic prospect?
Swarmbots: James McLurkin on why, how, and when we will see swarm robotics in practice
Thousand-robot swarm self-assembles into arbitrary shapes
See all the latest robotics news on Robohub, or sign up for our weekly newsletter. |
/**
* Keep-alive cluster member group integration tests.
*/
public class SimpleKeepAliveClusterMemberGroupIntegrationTest {
private static int testInvokedCounter;
private ClusterMemberGroup memberGroup;
@Before
public void beforeTest() {
memberGroup = ClusterMemberGroupUtils.newBuilder()
.setClusterMemberGroupInstanceClassName(SimpleKeepAliveClusterMemberGroup.class.getName())
.setStorageEnabledCount(1)
.setLogLevel(0)
.setFastStartJoinTimeoutMilliseconds(100)
.setOverrideConfiguration("littlegrid/littlegrid-fast-start-coherence-override.xml")
.buildAndConfigureForStorageDisabledClient();
CacheFactory.getCache(KNOWN_TEST_CACHE);
testInvokedCounter++;
}
@After
public void afterTest() {
final SimpleKeepAliveClusterMemberGroup instance = (SimpleKeepAliveClusterMemberGroup) memberGroup;
assertThat(instance.getCurrentUsageCount(), is(Integer.MAX_VALUE));
assertThat(instance.getPeakUsageCount(), is(testInvokedCounter));
assertThat(instance.getTotalUsageCount(), is(testInvokedCounter));
final boolean shutdownInvoked =
ClusterMemberGroupUtils.shutdownCacheFactoryThenClusterMemberGroups(memberGroup);
assertThat(shutdownInvoked, is(false));
}
@Test
public void test1() {
}
@Test
public void test2() {
}
@Test
public void test3() {
}
@Test
public void test4() {
}
} |
The leaker who allegedly got hold of an Xbox One dev kit and released a slew of information to the Internet has warned that if he is arrested today, he’ll spill everything he knows.
The individual known as Superdae told Gizmodo that he expects to be charged today, after several months of investigation.
If he is arrested, and therefore can’t log into his FTP server this evening (Perth time) an automated system will give out its IP, user name and password via Twitter, offering read-only access to an alleged collection of top secret info.
Gizmodo says the server is packed with “juicy” files, and Superdae claims to have information about the Xbox One that Microsoft does not want leaked.
In a later report on Kotaku, the server was said to contain software development kits for the PlayStation 4; Xbox One and Wii U; information on the pulled from hacks of Epic Games; Blizzard; Unitied Front Games and THQ; and unfinished versions of unreleased games like Company of Heroes 2 and WWE 14, as well as mysterious Epic projects known only as Kilo, Lima and Orion.
Superdae gave out plenty of info regarding the console ahead of its reveal, much of which seems to have been correct.
Superdae’s attempt to sell a couple of Durango dev kits on eBay resulted in a visit from the FBI and West Australia police, which Microsoft has denied any involvement with.
Thanks, Kotaku. |
/*
** List media currently mounted on a printer or printers.
*/
static void ppop_media(const char command[])
{
const char function[] = "ppop_media";
char *destname;
int destid;
int x, y;
int group = FALSE;
#ifdef DEBUG_PPOPINT
debug("%s(\"%s\")", function, command);
#endif
if(gu_sscanf(command, "f %S", &destname) != 1)
{
error("%s(): invalid \"f\" command", function);
return;
}
if(strcmp(destname, "all") == 0)
{
destid = -1;
}
else
{
if((destid = destid_by_name(destname)) == -1)
{
fprintf(reply_file, "%d\n", EXIT_BADDEST);
fprintf(reply_file, _("The destination \"%s\" does not exist.\n"), destname);
return;
}
if(destid_is_group(destid))
group = TRUE;
}
fprintf(reply_file, "%d\n", EXIT_OK_DATA);
for(x=0; x < printer_count; x++)
{
if(destid == -1 || x==destid || (group && destid_get_member_offset(destid,x)!=-1) )
{
fprintf(reply_file, "%s %d\n", printers[x].name, printers[x].nbins);
for(y=0; y < printers[x].nbins; y++)
{
DODEBUG_MEDIA(("x=%d, y=%d, media=%d",x,y,printers[x].media[y]));
fprintf(reply_file, "%s %s\n", printers[x].bins[y], get_media_name(printers[x].media[y]));
}
}
}
} |
import legate.numpy as np
import timeit
def kernel(A, p, r):
return r @ A, A @ p
def init_data(M, N, datatype):
A = np.empty((N, M), dtype=datatype)
s = np.empty((M, ), dtype=datatype)
q = np.empty((N, ), dtype=datatype)
p = np.empty((M, ), dtype=datatype)
r = np.empty((N, ), dtype=datatype)
# for i in range(M):
# p[i] = (i % M) / M
# for i in range(N):
# r[i] = (i % N) / N
# for j in range(M):
# A[i, j] = (i * (j + 1) % N) / N
p[:] = np.random.randn(M)
r[:] = np.random.randn(N)
A[:] = np.random.randn(N, M)
return A, s, q, p, r
if __name__ == "__main__":
# Initialization
M, N = 2000, 1000
A, s, q, p, r = init_data(M, N, np.float64)
# First execution
lg_s, lg_q = kernel(A, p, r)
# Benchmark
time = timeit.repeat("kernel(A, p, r)",
setup="pass",
repeat=20,
number=1,
globals=globals())
print("Legate median time: {}".format(np.median(time)))
|
Learning modular policies for robotics
A promising idea for scaling robot learning to more complex tasks is to use elemental behaviors as building blocks to compose more complex behavior. Ideally, such building blocks are used in combination with a learning algorithm that is able to learn to select, adapt, sequence and co-activate the building blocks. While there has been a lot of work on approaches that support one of these requirements, no learning algorithm exists that unifies all these properties in one framework. In this paper we present our work on a unified approach for learning such a modular control architecture. We introduce new policy search algorithms that are based on information-theoretic principles and are able to learn to select, adapt and sequence the building blocks. Furthermore, we developed a new representation for the individual building block that supports co-activation and principled ways for adapting the movement. Finally, we summarize our experiments for learning modular control architectures in simulation and with real robots.
A promising idea for scaling robot learning to more complex tasks is to use elementa behaviors as building blocks to compose more complex behavior. Ideally, such buildin blocks are used in combination with a learning algorithm that is able to learn to select adapt, sequence and co-activate the building blocks. While there has been a lot of wor on approaches that support one of these requirements, no learning algorithm exists tha unifies all these properties in one framework. In this paper we present our work on unified approach for learning such a modular control architecture. We introduce new polic search algorithms that are based on information-theoretic principles and are able to lear to select, adapt and sequence the building blocks. Furthermore, we developed a ne representation for the individual building block that supports co-activation and principle ways for adapting the movement. Finally, we summarize our experiments for learnin modular control architectures in simulation and with real robots. l g , k t a y n w d g Keywords: robotics, policy search, modularity, movement primitives, motor control, hierarchical reinforcement learning
INTRODUCTION
Robot learning approaches such as policy search methods (Kober and Peters, 2010;Kormushev et al., 2010;Theodorou et al., 2010) have been very successful. Kormushev et al. (2010) Learned to flip pan-cakes and Kober and Peters (2010) Learned the game ballin-the-cup. Despite these impressive applications, robot learning still offers many challenges due to the inherent high-dimensional continuous state and action spaces, the high costs of generating new data with the real robot, the partial observability of the environment and the risk of damaging the robot due to overly aggressive exploration strategies. These challenges have, so far, prevented robot learning methods to scale to more complex real world tasks.
However, many motor tasks are heavily structured. Exploiting such structures may well be the key to scale robot learning to more complex real world domains. One of the most common structures of a motor task is modularity. Many motor tasks can be decomposed into elemental movements or movement primitives Khansari-Zadeh and Billard, 2011;Rozo et al., 2013) that are used as building blocks in a modular control architecture. For example, playing tennis can be decomposed into single stroke-based movements, such as a forehand and a backhand stroke. To this end, we need a learning architecture that learns to select, improve, adapt, sequence and co-activate the elemental building blocks. Adaptation is needed as such building blocks are only useful if they can be reused for a wide range of situations, and, hence the building block needs to be adapted to the current situation. For example, for playing tennis, the ball will always approach the player slightly differently. Furthermore, we need to learn how to sequence such parametrized building blocks. Taking up our tennis example, we need to execute a sequence of strokes such that the opponent player can not return the ball on the long run. For sequencing the building blocks, we ideally want to be able to continuously switch from one building block to the next to avoid abrupt transitions, also called "blending" of building blocks. Finally, co-activation of the building blocks would considerably increase the expressibility of the control architecture. Coming back to the tennis example, co-activating primitives that are responsible for the upper body movement, i.e., the stroke, and primitives that are responsible for the movement of the lower body, i.e., making a side step or a forward step would significantly reduce the number of required building blocks.
In this paper we present an overview over our work that concentrates on learning such modular control architectures by reinforcement learning. We developed new policy search methods that can select and adapt the individual building blocks to the current situation, learn and improve a large number of different building blocks as well as to learn how to sequence building blocks to solve a complex task. Our learning architecture is based on an information-theoretic policy search algorithm called Relative Entropy Policy Search (REPS) proposed by Peters et al. (2010). The main insight used by REPS is that the relative entropy between the trajectory distributions of two subsequent policies during policy search should be bounded. This bound is particularly useful in robotics as it can cope with many of the mentioned challenges of robot learning. It decreases the danger of damaging the robot as the policy updates stay close to the "data" generated by the old policy and do not perform wild exploration. Moreover, it results in a smooth learning process and prevents the algorithm from getting stuck prematurely in local minima even for high dimensional parameter spaces that are typically used in robotics (Peters and Schaal, 2008;Daniel et al., 2012a). While there are several other policy search approaches which can either learn the selection (da Silva et al., 2012), adaptation (Kober et al., 2010b;Ude et al., 2010) or the sequencing of individual building blocks, to the best of our knowledge, our approach offers the first framework that unifies all these properties in a principled way.
A common way to implement the building blocks is to use movement primitives (MPs). Movement primitives provide a compact representation of elemental movements by either parameterizing the trajectory Neumann, 2011;Rozo et al., 2013), muscle activation profiles (dAvella and Pai, 2010) or directly the control policy (Khansari-Zadeh and Billard, 2011). All of these representations offer several advantages, such as the ability to learn the MP from demonstration Rozo et al., 2013), global stability properties , co-activation of multiple primitives (dAvella and Pai, 2010), or adaptability of the representation per hyper-parameter tuning Rozo et al., 2013). However, none of these approaches unifies all the desirable properties of a MP in one framework. We therefore introduced a new MP representation that is particularly well suited to be used in a modular control architecture. Our MP representation is based on distributions over trajectories and is called Probabilistic Movement Primitive (ProMP). It can, therefore, represent the variance profile of the resulting trajectories, which allows us to encode the importance of time points as well as represent optimal behavior in stochastic systems (Todorov and Jordan, 2002). However, the most important benefit of a probabilistic representation is that we can perform probabilistic operators on trajectory distributions, i.e., conditioning for adaptation of the MP and a product of distributions for co-activation and blending of MPs. Yet, such a probabilistic representation is of little use if we cannot use it to control the robot. Therefore, we showed that a stochastic time-varying feedback controller can be obtained analytically, enabling us to use the probabilistic movement primitive approach as a promising future representation of a building block in modular control architectures. We will present experiments on several real robot tasks such as playing tether-ball and shooting a hockey puck. The robots used for the experiments are illustrated in Figure 1.
Movement representations
Different elemental movement representations have been proposed in the literature. The most prominent one is the dynamic movement primitive (DMP) approach Schaal et al., 2003). DMPs encode a movement in a parametrized dynamical system. The dynamical system is implemented as a second order spring damper system which is perturbed by a non-linear forcing function f . The forcing function depends nonlinearly on the phase variable z t which denotes a clock for the movement. The evolution of the phase variable can be made faster or slower by the temporal scaling factor τ , which finally also changes the execution speed of the movement. The forcing function is linearly parametrized by a parameter vector w and can be easily learned from demonstrations. In addition to the high dimensional parameters w, we can adjust meta-parameters of the DMPs such as the goal attractor g of the spring-damper system and temporal scaling factor. In Kober et al. (2010a), the DMPs have been extended to include the final desired velocity in its meta-parameters. DMPs have several advantages. They are easy to learn from demonstrations and by reinforcement learning, they can be used for rhythmic and stroke-based movements and they have build-in stability guarantees. However, they also suffer from some disadvantages. The can not represent optimal behavior in a stochastic environment. In addition, the generalization to a new end position is based on heuristics and not learned from demonstrations and it is not clear how DMPs can be combined simultaneously. Several other movement primitive representation have been proposed in the literature. Some of them are based on DMPs to overcome their limitations (Calinon et al., 2007;Rozo et al., 2013), but none of them can overcome all the limitations in one framework. Rozo et al. (2013) estimate a time varying feedback controller for the DMPs, however, how this feedback controller is obtained is based on heuristics. They also implement a combination of primitives as a product of GMMs which is similar to the work presented here on the probabilistic movement primitives. However, this approach is lacking a principled way of determining a feedback controller that exactly matches the trajectory distribution. Therefore, it is not clear what the result of this product is if we apply the resulting controller on the robot.
Most of the movement representations explicitly depend on time Neumann and Peters, 2009;Paraschos et al., 2013;Rozo et al., 2013). For time-dependent representations, a linear controller is often sufficient to model complex behavior as the non-linearity is induced by the time dependency. In contrast, time-independent models such as the Stable Estimator of Dynamical Systems (SEDS) approach (Khansari-Zadeh and Billard, 2011) directly estimate a state dependent policy that is independent of time. Such models require more complex, non-linear controllers. For example, the SEDS approach uses a GMM to model the policy. The GMM is estimated such that the resulting policy is proofed to be stable. Due to the simplicity of the policy, time-dependent representations can be easily scaled up to higher dimensions as shown by Ijspeert and Schaal (2003). Due to the increased complexity, time-independent models are typically used for lower dimensional movements such as modeling the movement directly in task space. Yet, a time-independent model is the more general representation as it does not require the knowledge of the current time step. In this paper, we will nevertheless concentrate on time-dependent movement representations.
Policy search
The most common reinforcement learning approach to learn the parameters of an elemental movement representation such as a DMP is policy search (Williams, 1992;Peters and Schaal, 2008;Kober and Peters, 2010;Kober et al., 2010a). The goal of policy search is to find a parameter vector of the policy such that the resulting policy optimizes the expected long-term reward. Many policy search methods use a stochastic policy for exploration. They can be coarsely categorized according their policy update strategy. Policy gradient methods (Williams, 1992;Peters et al., 2003) are one of the earliest policy update strategies that were applied to motor primitive representations. They estimate the gradient of the expected long-term reward with respect to the policy parameters (Williams, 1992) and update the policy parameters in the direction of this gradient. The main disadvantages of policy gradient methods are the necessity to specify a hand-tuned learning rate, the poor learning speed and that typically many samples are required to obtain a new policy without sample re-use. More recent approaches rely on probabilistic methods. These methods typically base their derivation on the expectationmaximization algorithm (Vlassis et al., 2009;Kober and Peters, 2010) and formulate the policy search problem as inference problem by transforming the reward into an improper probability distribution, i.e., the transformed reward is required to be always positive. Such transformation is typically achieved by an exponential transformation with a hand-tuned temperature. The resulting policy update can be formulated as a weighted model fitting task where each sample is weighted by the transformed long-term rewards (Kober and Peters, 2010). Using a probabilistic model fitting approach to compute the policy update results in the important advantage that we can use a big toolbox of algorithms for estimating structured probabilistic models, such as the expectation maximization algorithm (Dempster et al., 1977) or variational inference (Neal and Hinton, 1998). Additionally, it does not require a user specified learning rate. These approaches typically directly explore in the parameter space of the policy by estimating a distribution over the policy parameters. Such approach works well if we have a moderate number of parameters.
Another algorithm that has recently gained a lot of attention is the policy improvement by path integrals (PI 2 ) algorithm (Theodorou et al., 2010;Stulp and Sigaud, 2012). The path integral theory allows to compute the globally optimal trajectory distribution along with the optimal controls without requiring a value function as opposed to traditional dynamic programming approaches. However, the current algorithm is limited to learning open-loop policies (Theodorou et al., 2010;Stulp and Sigaud, 2012) and may not be able to adapt the the variance of the exploration policy (Theodorou et al., 2010).
Generalization of skills
An important requirement in a modular control architecture is that we can adapt a building block to the current situation or task. We will describe a task or a situation with a context vector s. The context vector can contain the objectives of the agent, e.g., throwing a ball to a desired target location, or physical properties of the environment. e.g., the mass of the ball to throw. Ude et al. (2010) use supervised learning to generalize movement primitives from a set of demonstrations. Such approach is well suited to generalize a set of demonstrations to new situations, but can not be used to improve the skills upon the demonstration. To alleviate this limitation, da Silva et al. (2012) combines low-dimensional subspace extraction for generalization and policy search methods for policy improvement. Finding such low-dimensional sub-spaces is an interesting idea that can considerably improve the generalization of the skills. Yet, there is one important limitation of the approach presented in da Silva et al. (2012). The algorithms for policy improvement and skill generalization work almost independently from from each other. The only way they interact is that the generalization is used as initialization for the policy search algorithm when a new task needs to be learned. As a consequence, the method needs to create many roll-outs for the same task/context in order to improve the skill for this context. Such limitation is relaxed by contextual policy search methods (Kober et al., 2010b;Neumann, 2011). Contextual policy search methods explicitly learn a policy that choses the control parameters θ θ θ in accordance to the context vector s. Therefore, a different context can be used for each roll-out. Kober et al. (2010b) us a Gaussian Process (GP) for generalization. While GPs have good generalization properties, they are of limited use for policy search as they typically learn an uncorrelated exploration policy. The approach in Neumann (2011) can use a directed exploration strategy, but it suffers from high computational demands.
Sequencing of skills
Another requirement is to learn to sequence the building blocks. Standard policy search methods typically choose a single parameter vector per episode. Hence, such methods can be used to learn the parameters of a single building block. In order to sequence building blocks, we have to learn how to choose multiple parameter vectors per episode. The first approach (Neumann and Peters, 2009) for learning to sequence primitives was based on value-function approximation techniques, which restricted its application on a rather small set of parameters for each primitive. Recently, adapted the path integral approach to policy search to sequence movement primitives. Other approaches (Morimoto and Doya, 2001;Ghavamzadeh and Mahadevan, 2003) use hand-specified sub-tasks to learn the sequencing of elemental skills. Such an approach is limited in its flexibility of the resulting policy and the sub-tasks are typically not easy to define manually.
Segmentation and modular imitation learning
Segmentation (Kulic et al., 2009;Álvarez et al., 2010;Meier et al., 2011) and modular imitation learning (Niekum et al., 2012) is a very important and challenging problem to autonomously extract the structure of the modular control policy from demonstrations. In Meier et al. (2011) and Álvarez et al. (2010), the segmentation is done due to parameter changes in the dynamical system that is supposed to have created the motion. In Chiappa and Peters (2010), Bayesian methods are used to construct a library of building blocks. Repeated skills are modeled to be generated by one of the building-blocks, which are rescaled and noisy. Based on the segmentation of the demonstrations, we can infer the single building blocks from the data by clustering the segments. One approach that integrates clustering and segmentation is to use Hidden Markov Models (HMMs). Williams and Storkey (2007) used a HMM to extract movement primitives from hand-writing data. While this is a very general approach, it has only been used to rather low-dimensional data, i.e., 2-D movements. Niekum et al. (2012) use a beta-process auto regressive HMM to estimate the segmentation which has the advantage the number of building blocks can also be inferred from data. DMPs are used to represent the policy of the single segments. Butterfield et al. (2010) use a HMM to directly estimate the policy. For each hidden state, they fit a Gaussian Process model to represent the policy of this hidden state. The advantages of these imitation learning approaches is that we can also estimate the temporal structure of the modular control policy, i.e., when to switch from one building block to the next. So far, such imitation learning approaches have not been integrated in a reinforcement learning framework, which seems to be a very interesting direction. For example, in current reinforcement learning approaches, the duration of the building blocks is specified by a single parameter. Estimating the duration of the building blocks from the given trajectory data seems to be a fruitful and more general approach.
INFORMATION THEORETIC POLICY SEARCH FOR LEARNING MODULAR CONTROL POLICIES
In this section we will sequentially introduce our information theoretic policy search framework used for learning modular control policies. We start our discussion with the adaptation of a single building block. Subsequently, we discuss how to learn to select a building block and, finally, we will discuss sequencing of building blocks.
After introducing each component of our framework, we briefly discuss related experiments on real robots and in simulation. In this paper, we can only give a brief overview over the experiments. For more details, we refer to the corresponding papers. In our experiments with our information theoretic policy search framework, we used Dynamic Movement Primitives (DMP) introduced in Schaal et al. (2003) as building blocks in our modular control architecture. In all our experiments, we used the hyper-parameters of a DMP as parameters of the building blocks, such as the final positions and velocities of the joints (Kober et al., 2010a) as well as the temporal scaling factor of the DMPs for changing the execution speed of the movement.
LEARNING TO ADAPT THE INDIVIDUAL BUILDING BLOCKS
We formulate the learning of the adaptation of the building blocks as contextual policy search problem (Kober et al., 2010b;Neumann, 2011;Daniel et al., 2012a), where we will for now assume that we want to execute only a single building block. Adaptation of a building block is implemented by an upper-level policy π (θ θ θ|s) that chooses the parameter vector θ θ θ of the building block according to the current context vector s. The context describes the task. It might contain objectives of the agent or properties of the environment, for example, the incoming velocity of a tennis ball. After choosing the parameters θ θ θ, the lower level policy u t = f (x t , θ θ θ ) of the building block takes over and is used to control the robot. Note that we use the symbol x t to denote the state of the robot. The state x t typically contains the joint angles q t and joint velocitiesq t of the robot and it should not be confused with the context vector s. The context vector s describes the task and contains higher level objectives of the agent. For example, such a lower level policy can be defined by a trajectory tracking controller that tracks the desired trajectory of a dynamic movement primitive (DMP) .
Our aim is to learn an upper-level policy that maximizes the expected reward where R(s, θ θ θ) is the expected reward of the resulting trajectory τ when using parameters θ θ θ in context s and μ(s) denotes the distribution over the contexts that is specified by the learning problem. The distribution p(τ |s, θ θ θ) denotes the probability of a trajectory given s and θ θ θ and r(τ, s) a user-specified reward function that depends on the trajectory τ and on the context s. We use the Relative Entropy Policy Search (REPS) algorithm as underlying policy search method, The basic idea of REPS is to bound the relative entropy between the old and the new parameter distribution. Here, we will consider the episode-based contextual formulation of REPS (Daniel et al., 2012a;Kupcsik et al., 2013) that is tailored for learning such an upper-level policy. The policy update step is defined as constrained optimization problem where we want to find the distribution p(s, θ θ θ) = μ(s)π (θ θ θ|s) that maximizes the average reward given in Eq. 1 with respect to p(s, θ θ θ) and simultaneously satisfies several constraints. We will first discuss these constraints and show how to compute p(s, θ θ θ). Subsequently, we will explain how to obtain the upper-level policy π (θ θ θ|s) from p(s, θ θ θ). Generally, we initialize any policy search (PS) method with an initial policy q 0 (s, θ θ θ) = μ(s)q 0 (θ θ θ|s), either obtained through learning from demonstration or by manually setting a distribution for the parameters. The variance of the initial distribution q 0 (s, θ θ θ) defines the exploration region. Policy search is an iterative process. Given the sampling distribution q 0 (s, θ θ θ), we obtain a new distribution p 1 (s, θ θ θ). Subsequently, p 1 is used as new sampling policy q 1 and the process is repeated.
PS methods need to find a trade-off between keeping the initial exploration and constricting the policy to a (typically local) optimum. In REPS, this trade-off is realized via the Kullback-Leibler (KL) divergence. REPS maximizes the reward under the constraint that the KL-divergence to the old exploration policy is bounded, i.e., Due to this bound, we can choose between exploitation with the greedy policy (high KL-bound) or continue to explore with the old exploration policy (very small KL-bound). The KL divergence in REPS bounds not only the conditional probability π (θ θ θ|s), i.e., the differences in the policies, but also the joint state-action probabilities p(s, θ θ θ) to ensures that the observed state-action region does not change rapidly over iterations, which is paramount to a real robot learning algorithm. Using the (asymmetric) KL divergence KL p(s, θ θ θ)||q(s, θ θ θ) allows us to find a closed form solution of the algorithm. Such closed form would not be possible with the opposite KL divergence, i.e., KL q(s, θ θ θ)||p(s, θ θ θ) .
We also have to consider that the context distribution p(s) = p(s, θ θ θ )dθ θ θ cannot be freely chosen by the agent as it is specified by the learning problem and given by μ(s). Hence, we need to add the constraints ∀s : p(s) = μ(s) to match the given context distribution μ(s). However, for continuous context vector s, we would end up with infinitely many constraints. Therefore, we resort to matching feature averages instead of single probability values, i.e., p(s) φ φ φ(s)ds =φ φ φ, where φ φ φ(s) is a feature vector describing the context andφ φ φ is the mean observed feature vector.
The resulting constrained optimization problem is now given by It can be solved by the method of Lagrangian multipliers and yields a closed-form solution solution for p that is given by where is a context dependent baseline that is subtracted from the the reward signal. The scalar η and the vector v are Lagrangian multipliers that can be found by optimizing the dual function g(η, v) (Daniel et al., 2012a). It can be shown that V(s) can be interpreted as value function and, hence, estimates the mean performance of the new policy in context s. The optimization defined by the REPS algorithm is only performed on a discrete set of samples D = s , θ θ θ , . . . , N, where R denotes the return obtained by the ith rollout. The resulting probabilities p s , θ θ θ , see Equation (4), of these samples are used to weight the samples. In order to obtain the weight p for each sample, we need to divide p s , θ θ θ by the sampling distribution q(s, θ θ θ) to account for the sampling probability (Kupcsik et al., 2013), i.e., Hence, being able to sample from q is sufficient and q is not needed in its analytical form. The upper-level policy π (θ θ θ|s) is subsequently obtained by performing a weighted maximum-likelihood (ML) estimate. We use a linear-Gaussian model to represent the upper-level policy π (θ θ θ|s) = N (θ θ θ |a + As, ) of the building block, where the parameters a, A and are obtained through the ML estimation. As a building block is typically reused only for similar contexts s, a linear model is sufficient in most cases. Figure 2 shows an illustration of how a linear model can adapt the trajectories generated by a DMP. In practice, we still need FIGURE 2 | The figure illustrates the joint trajectories that can be generated when using a linear Gaussian to adapt the DMP parameters according to a one dimensional context variable. In this illustration, we show the color coding for the context variable in the color bar on the right and show how the generated trajectories change in the main plot. For this plot, we assumed no exploration noise and adapted ten basis functions of the DMP. As we can see, complex behavior can emerge already with a linear adaptation model due to the high-dimensionality of the parameter space.
an initial policy q. This initial policy can either be obtained through learning from demonstration or by selecting reasonable parameters and variance if the experimenter has sufficient task knowledge.
In Kupcsik et al. (2013), we further improved the dataefficiency of our contextual policy search algorithm by learning probabilistic forward models of the real robot and its environment. With these forward models, we can predict the reward R s , θ θ θ for unseen context-parameter pairs s and θ θ θ and use these additional samples for computing the policy update. The data-efficiency of our method could be improved up to two orders of magnitude using the learned forward models. As we used Gaussian Processes (GPs) (Rasmussen and Williams, 2006) to represent the forward models, this extension of our method is called GPREPS. These forward models were used to generate additional data points that are used for the policy update. For each of these virtual data points, we generated 15 trajectories with the learned forward models. We used the average reward of these predicted trajectories as reward used in the REPS optimization. We used sparse GPs (Snelson and Ghahramani, 2006) to deal with the high number of data points within a reasonable computation time.
Experimental evaluation of the adaptation of building blocks -robot hockey target shooting
In this task we used GPREPS with learned forward models to learn how to adapt the building blocks such that the robot can shoot hockey pucks to different locations. The objective was to make a target puck move for a specified distance by shooting a second hockey puck at the target puck. The context s was composed of the initial location T of the target puck and the distance d * that the target puck had to be shoot, i.e., s = T . We chose the initial position of the target puck to be uniformly distributed from the robot's base with displacements b x ∈ m and b y ∈ m. The desired displacement context parameter d * is also uniformly distributed d * ∈ m.
The reward function consist of two terms with equal weighting. The first term penalizes missing the target puck located at position b = T , where the control puck trajectory is x 1:T . The second term penalizes the error in the desired displacement of the target puck, where d T is the resulting displacement of the target puck after the shot. The parameters θ θ θ define the weights and goal position of the DMP. The policy in this experiment was a linear Gaussian policy. The simulated robot task is depicted in Figure 3. GPREPS first learned a forward model to predict the initial position and velocity of the first puck after contact with the racket and a travel distance of 20 cm. Subsequently, GPREPS learned the free dynamics model of both pucks and the contact model of the pucks. We assumed that we know the geometry of the pucks to detect a contact. If there is a contact, we used the contact model to predict the state of both pucks after the contact given the state of both pucks before the contact. From this state, we again predicted the final puck positions after they came to stop with a separate GP model.
We compared GPREPS in simulation to directly predicting the reward R(s, θ θ θ), model-free REPS and CrKR (Kober et al., 2010b), a state-of-the-art model-free contextual policy search method. The resulting learning curves are shown in Figure 3 (middle). GPREPS learned the task already after 120 interactions with the environment while the model-free version of REPS needed approximately 10000 interactions. Directly predicting the rewards from parameters θ θ θ using a single GP model resulted in faster convergence but the resulting policies still showed a poor performance (GP direct). The results show that CrKR could not compete with model-free REPS. The learned movement is shown in Figure 3 for a specific context. After 100 evaluations, GPREPS placed the target puck accurately at the desired distance with an error ≤ 5 cm.
Finally, we evaluated the performance of GPREPS on the hockey task using a real KUKA lightweight arm. The learning curve of this experiment is shown in Figure 3 (right) and confirms that GP-REPS can find high-quality policies within a small amount of interactions with the environment.
LEARNING TO SELECT THE BUILDING BLOCKS
In order to select between several building blocks o, we add an additional level of hierarchy on top of the upper-level policies of the individual building blocks. We assume that each building block shares the same parameter space. The parameters are now selected by first choosing the building block to execute with a gating policy π G (o|s) and, subsequently, the upper level parameter policy π P (θ θ θ|s, o) of the building block o selects the parameters θ θ θ . Hence, π (θ θ θ|s) can be written as hierarchical policy π (θ θ θ|s) = o π G (o|s)π P (θ θ θ|s, o).
In this model, the gating policy composes a complex, nonlinear parameter selection strategy out of the simpler upper level policies of the building blocks. Moreover, it can learn multiple solutions for the same context, which also increases the versatility of the learned motor skill (Daniel et al., 2012b). While a similar decomposition in gating policy and option policies has been presented in da Silva et al. (2012), their framework was not integrated in a reinforcement learning algorithm, and hence, generalization and improvement the building blocks is performed by two independent algorithms, resulting in sample-inefficient policy updates.
To incorporate multiple building blocks, we now bound the Kullback-Leibler divergence between q(s, θ θ θ, o) and p(s, θ θ θ, o). As we are interested in versatile solutions, we also want to avoid that several building blocks concentrate on the same solution. Hence, we want to limit the "overlap" between building blocks in the parameter space. In order to do so, we bound the expected entropy of the conditional distribution p(o|s, θ θ θ), i.e., − p(s, θ θ θ) o p(o|s, θ θ θ) log p(o|s, θ θ θ)dsdθ θ θ ≤ κ.
A low entropy of p(o|s, θ θ θ) ensures that our building blocks do not overlap in parameter space and, thus, represent individual and clearly separated solutions (Daniel et al., 2012a). The new optimization program results in the hierarchical version of REPS, denoted as HiREPS. We can again determine a closed form solution for p(s, θ θ θ, o) which is given in Daniel et al. (2012a). As in the previous section, the optimization problem is only solved for a given set of samples that has been generated from the distribution q(s, θ θ θ ). Subsequently, the parameters of the gating policy and the upper-level policies are obtained by weighted ML estimates. We use a Gaussian gating policy and an individual linear Gaussian policy π (θ θ θ|s, o) = N (θ θ θ|a o + A o s, o ) for each building block. As we use a linear upper-level policy and the used DMPs produce only locally valid controllers, our architecture might require a large number of building blocks.
Experimental evaluation of the selection of building blocksrobot tetherball
In robot tetherball, the robot has to shoot a ball that is fixed with a string on the ceiling such that it winds around a pole. The robot obtains a reward proportional to the speed of the ball winding around the pole. There are two different solutions, to wind the ball around the left or to the right side of the pole. Two successful hitting movements of the real robot are shown in Figure 5. We decompose our movement into a swing-in motion and a hitting motion. As we used the non-sequential algorithm for this experiment, we represented the two motions by a single set of parameters and jointly learn the parameters θ θ θ for the two DMPs. We start the policy search algorithm with 15 options with randomly distributed parameters sampled from a Gaussian distribution around the parameters of the initial demonstration. We use a higher number of building blocks to increase the probability of finding both solutions with the building blocks. If we use two randomly initialized building blocks, the probability that both cover the same solution is quite high. We delete unused building blocks that have a very small probability of being chosen, i.e., p(o) < 0.001. The learning curve is shown in Figure 4 (left). The noisy reward signal is mostly due to the vision system and partly also due to real world effects such as friction. Two resulting movements of the robot are shown in Figure 5. The robot could learn a versatile strategy that contained building blocks that wind the ball around the left and building blocks that wind the ball around the right side of the pole.
FIGURE 4 | Average rewards for learning tetherball on the real robot.
Mean and standard deviation of three trials. In all of the three trials, after 50 iterations the robot has found solutions to wind the ball around the pole on either side.
LEARNING TO SEQUENCE THE BUILDING BLOCKS
To execute multiple building blocks in a sequence, we reformulate the problem of sequencing building blocks as Markov Decision Process (MDP). Each building block defines a transition probability p(s |s, θ θ θ) over future contexts and an immediate reward function R(s, θ θ θ). It is executed until its termination condition t o (s, θ θ θ) is satisfied. However, in our experiments, we used a fixed duration for each building block. Note that traditional reinforcement learning methods, such as TD-learning, can not deal with such MDPs as its action space is high dimensional and continuous.
We concentrate on the finite-horizon case, i.e., each episode consists of K decision steps where each step is defined as the execution of an individual building block. For clarity, we will only discuss the sequencing of a single building block, however, the selection of multiple building blocks at each decision step can be easily incorporated .
In the finite horizon formulation of REPS we want to find the probabilities p k (s, θ θ θ) = p k (s)π (θ θ θ|s), k ≤ K, and p K+1 (s) that maximize the expected long term reward where R K + 1 (s K + 1 ) denotes the final reward for ending up in the state s K + 1 after executing the last building block. As in the previous case, the initial context distributions is given by the task, i.e., ∀s : p 1 (s) = μ 1 (s). Furthermore, the context distribution at future decision steps k > 1 need to be consistent with the the past distributions p k − 1 (s, θ θ θ) and the transition model p(s |s, θ θ θ ), i.e., ∀s , k > 1 : p k s = s, θ θ θ p k − 1 (s, θ θ θ)p s |s, θ θ θ dsdθ θ θ, for each decision step of the episode. These constraints connect the policies for the individual decision-steps and result in a policy π k (θ θ θ|s) that optimizes the long-term reward instead of the immediate ones. As in the previous sections, these constraints are again implemented by matching feature averages.
The closed form solution of the joint distribution p k (s, θ θ θ ) yields We can see that the reward R k (s, θ θ θ) is transformed into an advantage function A k (s, θ θ θ) where the advantage now also depends on the expected value of the next state E p(s |s,θ θ θ) V k + 1 s . This term ensures that we do not just optimize the immediate reward but the long term reward.
Experimental evaluation of sequencing of building blockssequential robot hockey
We used the sequential robot hockey task to evaluate sequential motor skill learning framework. The robot has to move the target FIGURE 5 | Time series of a successful swing of the robot. The robot first has to swing the ball to the pole and, subsequently, when the ball has swung backwards, can arc the ball around the pole. The movement is shown for a shoot to the left and to the right of the pole.
FIGURE 6 | (Left)
The sequential robot hockey task. The robot has two pucks, the pink control puck and the yellow target puck. The task is to shoot the yellow target puck into one of the colored reward zones. Since the best reward zone is too far away from the robot to be reached with only one shot, each episode consists of three strikes. After each strike, the control puck is returned to the robot, but the target puck is only reset after one episode is concluded. (Middle) Comparison of sequential motor primitive learning to the episodic learning setup on the simulated robot hockey task. The sequential motor primitive learning framework was able to find a good strategy to place the puck in the third reward zone in most of the cases while the episodic learning scenario failed to learn such a strategy. (Right) One trial of the real robot hockey tasks. The robot starts with a negative initial reward and learns to achieve an average reward of 2.5 after 300 episodes.
puck into one of three target areas by sequentially shooting a control puck at the target puck. The target areas are defined by a specified distance to the robot, see Figure 6 (left). The robot gets rewards of 1, 2, and 3 for reaching zone 1, 2 or 3, respectively. After each shot, the control puck is returned to the robot. The target puck, however, is only reset after every third shot. The 2-dimensional position of the target puck defines the context s of the task and the parameter vector θ θ θ defines the goal positions of the DMP that define the desired trajectory of the robot's joints. After performing one shot, the agent observes the new context to plan the subsequent shot. In order to give the agent an incentive to shoot at the target puck, we punished the agent with the negative minimum distance of the control puck to the target puck after each shot. While this reward was given after every step, the zone reward was only given at the end of the episode (every third step) as r K + 1 (s K + 1 ).
We compared our sequential motor primitive learning method with its episodic variant on a realistic simulation. For the episodic variant we used one extended parameter vectorθ θ θ that contained the parameters for all three hockey shoots. The comparison of both methods can be seen in Figure 6 (middle). Due to the high-dimensional parameter space, the episodic learning setup failed to learn a proper policy while our sequential motor primitive learning framework could learn policies of much higher quality.
On the real robot, we could reproduce the simulation results. The robot learned a strategy which could move the target puck to the highest reward zone in most of the cases after 300 episodes. The learning curve is shown in Figure 6 (right).
PROBABILISTIC MOVEMENT PRIMITIVES
In the second part of this paper, we investigate new representations for the individual building blocks of movements that are particularly suited to be used in a modular control architecture. In all experiments for our modular policy search framework, we so far used the Dynamic Movement Primitive (DMP) approach . DMPs are widely used, however, when used for our modular control architecture, DMPs suffer from severe limitations as they do not support co-activation or blending of building blocks. In addition, the DMPs use heuristics for the adaptation of the motion. Hence, we focus our discussion on our new movement primitive (MP) representation (Paraschos et al., 2013) on a these two important properties.
We use a trajectories τ = q t t = 0...T , defined by the joint angles q t over time, to model a single movement. We will use a probabilistic representation of a movement, which we call probabilistic movement primitives (ProMP), where a movement primitive describes several ways how to execute a movement (Paraschos et al., 2013). Hence, the movement primitive is given as distribution p(τ ) over trajectories. A probabilistic representation offers several advantages that make it particularly suitable to be used in a modular control architecture. Most importantly, it offers principled ways to adapt as well as to co-activate movement primitives. Yet, these advantages of a probabilistic trajectory representation are of little use if we can not use it to control the robot. Therefore, we derive a stochastic feedback controller in closed form that can exactly reproduce a given trajectory distribution, and, hence, trajectory distributions can be used directly for robot control.
In this section, we present two experiments that we performed with the ProMP approach. As we focused on the representation of the individual building blocks, we evaluated the new representation without the use of reinforcement learning and learned the ProMPs by imitation. In our experiments, we illustrate how to use conditioning as well as co-activation of the building blocks.
PROBABILISTIC TRAJECTORY REPRESENTATION
In the imitation learning setup, we assume that we are given several demonstrations in terms of trajectories τ i . In our probabilistic approach we want to learn a distribution of these trajectories. We will first explain the basic representation of a trajectory distribution and subsequently cover the two new operations that are now available in our probabilistic framework, i.e., conditioning and co-activation. Finally, we will explain in Section 3.3 how to control the robot with a stochastic feedback controller that exactly reproduces the given trajectory distribution.
We use a weight vector w to compactly represent a single trajectory τ . The probability of observing a trajectory τ given the weight vector w is given as a linear basis function model p(τ |w) = t N y t | T t w, y , where y t = T contains the joint position q t and joint velocityq t , t = defines the time-dependent basis matrix and y is zero-mean i.i.d. Gaussian noise.
As a probabilistic MP represents multiple ways to execute an elemental movement, we also need multiple demonstrations to learn p(w; θ θ θ). The parameters θ θ θ = {μ w , w } can be learned by maximum likelihood estimation, for example, by using the expectation maximization algorithm (Lazaric and Ghavamzadeh, 2010). For multi-dimensional systems, we can also learn the coupling between the joints. Coupling is typically represented by the covariance of the joint positions and velocities. We can learn this covariance by maintaining a parameter vector w i for each dimension i and learn a distribution over the combined weight vector w = w T 1 , . . . , w T n T .
To be able to adapt the execution speed of the movement, we introduce a phase variable z to decouple the movement from the time signal . The phase can be any function z(t) monotonically increasing with time. The basis functions ψ t are now decoupled from the time and depend on the phase, such that ψ t = ψ(z t ) andψ t = ψ (z t )ż t . The choice of the basis functions depends on whether we want to model rhythmic movements, where we use normalized Von-Mises basis functions that are periodic in the phase, or stroke-based movements, where we use normalized Gaussian basis functions, The parameter h defines the width of the basis and c i the center for the ith basis function. We normalize the basis functions
NEW PROBABILISTIC OPERATORS FOR MOVEMENT PRIMITIVES
The probabilistic formulation of MPs enables us to use new probabilistic operators on our movement primitive representation. Adaptation of the movement can be accomplished by conditioning on desired positions or velocities at time step t. Co-activation and blending of MPs can be implemented as as product of two trajectory distributions.
Adaptation of the building blocks by conditioning
For efficient adaptation, our building blocks should support the modulation of hyper-parameters of the movements such as the desired final joint positions or the joint positions at given via-points. For example, DMPs allow for the adaptation of the final position by modulation of the point attractor of the system. However, how the final position modulates the trajectory is hard-coded in the DMP-framework and can not be learned from data. This adaptation mechanism might violate other task constraints.
In our probabilistic formulation, such adaptation operations can be described by conditioning the MP to reach a certain state y * t at time t. Conditioning can be performed by adding a new desired observation x t = y * t , * y to our probabilistic model where y * t represents the desired position and velocity vector at time t and * y specifies the accuracy of the desired observation. By applying Bayes theorem, we obtain a new distribution over w, i.e., p(w|x * t ) ∝ N y * t | T t w, * y p(w). As p(w|θ θ θ) is Gaussian, the conditional distribution p w|y * t is also Gaussian and can be computed analytically We illustrated conditioning a ProMP to different target states in Figure 7A. As we can see, the modulation of a target state is also learned from demonstration, i.e., the ProMP will choose a new trajectory distribution that goes through the target state, and, at the same time, is similar to the learned trajectory distribution.
Combination and blending by multiplying distributions
In our probabilistic representation, a single MP represents a whole family of movements. Co-activating two MPs should return a new set of movements which are contained in both MPs. Such operation can be performed by multiplying two distributions. We also want to weight the activation of each primitive o i by a time-varying activation factor α i (t), for example, to continuously blend the movement execution from one primitive to the next. The activation factors can be implemented by taking the distributions of the individual primitives to the power of α i (t). Hence, the co-activation of ProMPs yields p * (τ ) ∝ t , the resulting distribution p * (y t ) is again Gaussian and we can obtain its mean μ * t and variance * t analytically with variance and mean * Both terms are required to obtain the stochastic feedback controller that is finally used to control the robot. We illustrated co-activating two ProMPs in Figure 7B and blending of two ProMPs in Figure 7C.
USING TRAJECTORY DISTRIBUTIONS FOR ROBOT CONTROL
In order to use a trajectory distribution p(τ |θ θ θ) for robot control, we have to obtain a controller which can exactly reproduce the given distribution. As we show in Paraschos et al. (2013), such controller can be obtained in closed form if we know the system dynamicsẏ = f (y, u) + y of the robot 1 . We model the controller as time-varying stochastic linear feedback controller, i.e., u t = k t + K t y t + u , where k t denotes the feed-forward gains, K t the feedback gains and u ∼ N (0, u ) the controller noise. Hence, the controller is determined by k t , K t and u for each time point. All these terms can be obtained analytically by predicting the distribution p model (y t+dt ) from p(y t |θ θ θ) with the known model of the system dynamics and subsequently matching the moments of p(y t+dt |θ θ θ) and the moments of the predicted distribution p model (y t+dt ). The resulting controller exactly reproduces the given trajectory distribution p(τ |θ θ θ) (Paraschos et al., 2013). While the ProMP approach has many similarities to the approach introduced in Rozo et al. (2013) by Calinon and colleagues, there are also important differences to this approach. They also learn a trajectory distribution which is modeled with a GMM, where the output variables are the joint angles and the time step t. The probability for the joint angles at time step t is then obtained by conditioning on t. However, it is unclear how to condition on being at a certain state q * t at time step, which is very different then just conditioning on being in time step t. In this case, the mixture components need to be changed such 1 Alternatively, we can assume that we use inverse dynamics control on the robot, and, hence, the idealized dynamics of the robot are given by a linear system. Such an approach is, for example, followed by the DMPs that also assumes that the underlying dynamical system, that represents the robot, is linear.
FIGURE 7 | (A)
Conditioning on different target states. The blue shaded area represents the learned trajectory distribution. We condition on different target positions, indicated by the "x"-markers. The produced trajectories exactly reach the desired targets while keeping the shape of the demonstrations. (B) Combination of two ProMPs. The trajectory distributions are indicated by the blue and red shaded areas. Both primitives have to reach via-points at different points in time, indicated by the "x"-markers. We co-activate both primitives with the same activation factor. The trajectory distribution generated by the resulting feedback controller now goes through all four via-points. (C) Blending of two ProMPs. We smoothly blend from the red primitive to the blue primitive. The activation factors are shown in the bottom. The resulting movement (green) first follows the red primitive and, subsequently, switches to following the blue primitive.
Frontiers in Computational Neuroscience
www.frontiersin.org June 2014 | Volume 8 | Article 62 | 10 that the trajectory distribution passes through q * t at time step t. How to implement this change with a GMM is an open problem. Note that the ProMP approach is very different from a GMM. It uses a linear basis function model and learns the correlation of the parameters of the basis functions for the different movements. Time is not modeled as random variable but as conditional variable right away. Due to the learned correlations, we can condition on reaching q * t at time step t and the trajectory distribution smoothly passes through q * t with high accuracy. Furthermore, a trajectory distribution alone is not sufficient to control a robot as it requires a feedback controller that determines the control actions. How to obtain this feedback controller from the trajectory distribution is based on heuristics in Rozo et al. (2013). I.e., when we apply the feedback controller on the real robot, we will not reproduce the learned trajectory distribution. The produced trajectory distribution might be similar, but we do not know how similar. Therefore, for all operations performed on the trajectory distributions (i.e., a combination of distributions by a product), it is hard to quantify the effect of this operation on the resulting motions that are obtained from the heuristic feedback controller. In contrast, the ProMPs come with a feedback controller that exactly matches the trajectory distribution. Hence, for a combination of distributions, we know that the feedback controller will exactly follow the product of the two distributions.
Experimental evaluation of the combination of objectives at different time-points
In this task, a seven link planar robot has to reach different target positions in end-effector space at the final time point t T and at a via-point t v . We generated the demonstrations for learning the MPs with an optimal control law, (Toussaint, 2009) and adding noise to the control outputs. In the first set of demonstrations, the robot reached a via-point at t 1 = 0.25 s with its end-effector. We used 10 normalized Gaussian basis functions per joint, resulting in a 70-dimensional weight vector. As we learned a single distribution over all joints of the robot, we can also model the correlations between the joints. These correlations are required to learn to reach a desired via-point in task space. The reproduced behavior with the ProMPs is illustrated in Figure 8 (top). The ProMP exactly reproduced the via-points in task space. Moreover, the ProMP exhibited the same variability in between the time points of the via-points. It also reproduced the coupling of the joints from the optimal control law, which can be seen by the small variance of the end-effector in comparison to the rather large variance of the single joints at the via-points. We also used a second set of demonstrations where the first via-point was located at time step t 2 = 0.75, which is illustrated in Figure 8 (middle). We co-activated the ProMPs learned from both demonstrations. The robot could accurately reach both via-points at t 1 = 0.25 and t 2 = 0.75, see Figure 8 (bottom).
Experimental evaluation of the combination of simultaneous objectives -robot hockey
In this task, the robot again has to shoot a hockey puck in different directions and distances. The task setup can be seen in Figure 9A. We record two different sets of demonstrations, one that contains straight shots with varying distances, while the second set contains shots with a varying shooting angle and almost constant distance. Both data sets contained ten demonstrations each. The demonstrations have been generated by an optimal control law. The ProMP approach was able to exactly reproduce the coupling of the joints from the demonstrations. The combination of both learned ProMPs is shown in the bottom. The resulting movement reached both via-points with high accuracy.
Frontiers in Computational Neuroscience www.frontiersin.org June 2014 | Volume 8 | Article 62 | 11 FIGURE 9 | Robot Hockey. The robot shoots a hockey puck. The setup is shown in (A). We demonstrate ten straight shots for varying distances and ten shots for varying angles. The pictures show samples from the ProMP model for straight shots (B) and angled shots (C).
Learning from combined data set yields a model that represents variance in both, distance and angle (D). Multiplying the individual models leads to a model that only reproduces shots where both models had probability mass, in the center at medium distance (E). The last picture shows the effect of conditioning on only left or right angles, the robot does not shoot in the center any more (F).
Sampling from the two models generated by the different data sets yields shots that exhibit the demonstrated variance in either angle or distance, as shown in Figures 9B,C. When combining the data sets of both primitives and learning a new primitive, we get a movement which exhibits variance in both dimensions, i.e., angle and distance, see Figure 9D. When the two individual primitives are combined by a product of MPs, the resulting model shoots only in the center at medium distance, i.e., the intersection of both MPs, see Figure 9F.
In this section, we present two experiments that we performed with the ProMP approach. As we focused on the representation of the individual building blocks, we evaluated the new representation without the use of reinforcement learning and learned the ProMPs by imitation. In our experiments, we illustrate how to use conditioning as well as co-activation of the building blocks.
CONCLUSION AND FUTURE WORK
Using structured, modular control architectures is a promising concept to scale robot learning to more complex real-world tasks. In such a modular control architecture, elemental building blocks, such as movement primitives, need to be adapted, sequenced or co-activated simultaneously. In this paper, we presented a unified data-efficient policy search framework that exploits such control architectures for robot learning. Our policy search framework can learn to select, adapt and sequence parametrized building blocks such as movement primitives while coping with the main challenges of robot learning, i.e., high dimensional, continuous state and action spaces and the high costs of generating data. Moreover, we presented a new probabilistic representation of the individual building blocks which show several beneficial properties. Most importantly, they support efficient and principled ways of adapting a building block to the current situation and we can co-activate several of these building blocks.
Future work will concentrate on integrating the new ProMP approach into our policy search framework. Interestingly, the upper-level policy would in this case directly specify the trajectory distribution. The lower level control policy is automatically given by this trajectory distribution. We will explore to incorporate the co-activation of individual building blocks also in our policy search framework. Additional future work will concentrate on incorporating perceptual feedback into the building blocks and using more complex hierarchies in policy search. |
/**
* Generic container for abstract information in a tuple-based static analysis.
*
* @author damiano
*
*/
public abstract class Tuple implements Comparable {
private Object elem;
public Object getElem() {
return elem;
}
public abstract Tuple clone();
} |
PAGO PAGO, American Samoa -- Vice President Mike Pence is shortening his stay in Hawaii to a few hours so that he could fly back to Washington in what promises to be a very busy week for the administration and Congress.
Pence’s office said he would depart Hawaii on Monday afternoon after meeting with U.S. Pacific Command leaders and troops stationed in Honolulu. Plans for a Tuesday visit to the USS Arizona in Pearl Harbor have been postponed, Pence’s office said.
Congress has until Friday to pass a new spending package or the government will shutdown. The White House is also planning to unveil a tax reform plan this week.
In American Samoa on Monday, he thanked American service members based in American Samoa, citing “challenging times” for the military in the Asia-Pacific. Completing a visit to the region and en route back to the United States, Pence addressed some 200 soldiers during a refueling stop in Pago Pago. He told the troops the Trump administration was seeking a large increase in military funding.
During his stop, Pence also dedicated a sign that will greet visitors at a veterans clinic. He met with American Samoan officials and troops and then flew to Hawaii at the last stop on a tour that included a visit to the Demilitarized Zone separating North and South Korea.
The trip offered evidence that Pence has become one of President Donald Trump’s chief emissaries on the world stage, patching up relations, reassuring allies still wondering what to expect from Trump and diving into international crises like North Korea.
Pence’s trip to Asia was planned weeks ago. But it dropped him in South Korea just in time to deliver North Korea a stern warning from the U.S.: that “all options are on the table” when it comes to curbing the North’s nuclear ambitions, and that the Trump administration will seek support from its allies to pressure Pyongyang to give up its nuclear weapons and ballistic missile programs.
His foray into the DMZ and his meetings with South Korean and Japanese leaders allowed Pence to shape a key American foreign policy issue, presenting a new challenge for a politician whose prior foreign policy experience was limited to trips to the Middle East as a congressman and trade missions to Japan, China, Israel and Europe as Indiana’s governor.
Pence’s early foreign travel schedule contrasts sharply with a mostly homebound Trump, who is not scheduled to travel overseas until late May for NATO meetings in Belgium and a gathering of the Group of Seven major industrial nations in Italy. Pence partly covered that ground when he visited Germany and Belgium in February.
Trump’s predecessor, Barack Obama, had visited nine countries by late April 2009, his first three months in office, checking in with allies such as Canada, Britain and Germany. The last first-term president to wait until May to take his first foreign trip was Jimmy Carter in 1977.
Enter Pence, whose still-evolving diplomatic playbook includes several components, all steeped in humility, personal ties and his religious faith.
In some ways, Pence is the advance team: His earlier trip to Europe and his Asia trip that ends Tuesday are partly laying the foundation for journeys being planned for Trump. In other ways, Pence is the face of reassurance, offering in-person outreach to world leaders Trump has clashed with or who have doubted Trump’s commitment to them at the start of his presidency.
In meetings with his counterparts, Pence frequently passed along “greetings” from Trump and told his hosts how much America valued their alliance, language that’s commonplace in diplomacy but understated compared to the more free-wheeling Trump.
On Thursday, for example, Pence told Indonesian President Joko “Jokowi” Widodo how “proud” he and Trump were to partner with him and spoke of their hopes of working together. |
class BlendedImageSet:
"""
@class BlendedImageSet represents a set of blended images, given a fg/bg pair.
"""
def __init__(self, bg, fgs):
self.bg = bg
self.fgs = fgs
self.blended_images = {}
self.flying_distractors = [] # Holds pairs of [image, mask]
def add_flying_distractor(self, bg, fg, aug_dic):
"""
@brief Add an artifact to the image with the shape of a tool and the texture of a
background.
@param[in] bg Background object.
@param[in] fg Foreground object.
@returns nothing.
"""
fg_im = fg.frame.raw
fg_mask = fg.seg.raw
# Get dictionary of custom augmentations applied to the foreground
custom_backend_dic = { k: aug_dic[k] for k in aug_dic if k in
AugmentationEngine.BACKENDS['custom'] }
# Remove those augmentations that are ok for tools but not suitable for distractors
custom_backend_dic.pop('tool_gray', None)
# Apply custom backend augmentations
cb = CustomBackend(custom_backend_dic)
fg_im, fg_mask = cb.augment(fg_im, fg_mask)
# Collect Keras options and perform augmentations on the flying distractor
keras_backend_dic = { k : aug_dic[k] for k in aug_dic if k in
AugmentationEngine.BACKENDS['keras'] }
fg_im, fg_mask = AugmentationEngine.keras_backend(keras_backend_dic, fg_im, fg_mask)
# FIXME: Apply albumentations that are applied to foreground tools
# Resize the flying image and mask to the standard size
h = self.bg.raw.shape[0]
w = self.bg.raw.shape[1]
flying_image = cv2.resize(bg.raw, (w, h))
flying_mask = cv2.resize(fg_mask, (w, h), interpolation=cv2.INTER_NEAREST)
# Add pair to the list of flying distractors
self.flying_distractors.append([flying_image, flying_mask])
def blend(self, blend_modes):
cy = int(round(.5 * self.bg._raw_frame.shape[0]))
cx = int(round(.5 * self.bg._raw_frame.shape[1]))
for mode in blend_modes:
blend = self.bg.raw.copy()
label = np.zeros((self.bg.shape[0], self.bg.shape[1]), dtype=np.uint8)
# Blend flying distractors on the background
for [fl_im, fl_mask] in self.flying_distractors:
blend = blending.blend(fl_im, fl_mask, blend.copy(), cy, cx, mode)
# Blend instruments
for fg in self.fgs:
blend = blending.blend(fg.frame.raw.copy(), fg.seg.raw.copy(), blend.copy(), cy, cx, mode)
label = cv2.bitwise_or(label, fg.seg.raw)
self.blended_images[mode] = [blend, label]
# return self.blended_images
def augment(self, dic_of_aug,
same_for_all_modes=['blend_border', 'albu_blend']):
# Prepare the dictionary with those augmentations that must be the same for all
# blending modes
same_aug = {k: dic_of_aug[k] for k in dic_of_aug if k in same_for_all_modes}
# Set random state that will be used to enforce the same augmentations across modes
rs = np.random.RandomState(None)
# Prepare backends to perform the same augmentations on all modes
cb = CustomBackend(same_aug, rs)
# Apply custom augmentations, identical for all blending modes
for mode in self.blended_images:
new_image = self.blended_images[mode][0]
new_label = self.blended_images[mode][1]
# Apply augmentation freezing the random state so that the
# augmentations of each mode are the same
st0 = cb.rs.get_state()
new_image, new_label = cb.augment(new_image, new_label)
cb.rs.set_state(st0)
# Save blended images
self.blended_images[mode] = [new_image, new_label]
# Apply albumentations, identical for all blending modes
blended_images_for_albu = { k: self.blended_images[k][0] for k in self.blended_images }
new_blended_images, new_label = AugmentationEngine.albumentations_backend(same_aug,
blended_images_for_albu, new_label)
self.blended_images = { k: [new_blended_images[k], new_label] for k in new_blended_images }
# TODO: Apply augmentations that can be different in each blending mode
'''
# Apply augmentation
rs=np.random.RandomState(None)
blend_border_param = None
if 'blend_border' in dic_of_augmentations:
for mode in self.blended_images:
im = self.blended_images[mode][0]
label = self.blended_images[mode][1]
st0 = rs.get_state()
new_image, new_label = cb.blend_border(dic_of_augmentations['blend_border'], im, label, rs=rs)
rs.set_state(st0)
self.blended_images[mode] = [new_image, new_label]
blend_border_param = dic_of_augmentations.pop('blend_border')
# Custom backend augmentation is applied to each of the blended images
new_blended_images = {}
for mode in self.blended_images:
im = self.blended_images[mode][0]
label = self.blended_images[mode][1]
new_image, new_label = cb.augment(im, label)
new_blended_images[mode] = [new_image, new_label]
self.blended_images = new_blended_images
if blend_border_param is not None:
dic_of_augmentations['blend_border'] = blend_border_param
''' |
/**
* Check if this living entity has a gun of this type already in possession
* @param entity the entity to check
* @return true if already in inventory, false otherwise.
*/
public boolean hasGun(LivingEntity entity) {
if (entity == null || !enabled)
return false;
ItemStack[] inv;
if (entity instanceof InventoryHolder) {
InventoryHolder holder = (InventoryHolder) entity;
inv = holder.getInventory().getContents();
} else {
inv = entity.getEquipment().getArmorContents();
}
if (inv != null) {
for (ItemStack item : inv) {
if (isGun(item)) {
return true;
}
}
}
return false;
} |
/// <reference path="chomp.d.ts"/>
export function pxToPt(px : number) : number {
return px * 3 / 4;
}
export function ptToPx(pt : number) : number {
return pt * 4 / 3;
}
export function probability(chance: number, runs: number) : number {
return 1 - Math.pow((1 - chance), runs);
}
export function description(data: string | Array<ChompDescriptionElement>, host: Element) {
if(!Array.isArray(data)) {
host.textContent = data;
return;
}
for (const elementData of data) {
const element = document.createElement(elementData.tag);
if(elementData.attributes != null) {
for(const attributeKey in elementData.attributes) {
const attr = document.createAttribute(attributeKey);
attr.value = elementData.attributes[attributeKey];
element.attributes.setNamedItem(attr);
}
}
description(elementData.text, element);
host.appendChild(element);
}
}
export function hex2hsl(hex: string) : Array<number> {
const color = parseInt(hex.startsWith("#") ? hex.substr(1) : hex, 16);
const r = (color >> 16) / 0xFF;
const g = ((color >> 8) & 0xFF) / 0xFF;
const b = (color & 0xFF) / 0xFF;
const max = Math.max(r, g, b);
const min = Math.min(r, g, b);
let h = (max + min) / 2;
let s = h;
let l = h;
if (max == min) {
h = s = 0; // achromatic
} else {
var d = max - min;
s = l > 0.5 ? d / (2 - max - min) : d / (max + min);
switch (max) {
case r:
h = (g - b) / d + (g < b ? 6 : 0);
break;
case g:
h = (b - r) / d + 2;
break;
case b:
h = (r - g) / d + 4;
break;
}
h /= 6;
}
return [Math.floor(h * 360), Math.floor(s * 100), Math.floor(l * 100)];
}
export function hslShift(color: string, adjH: number, adjS: number, adjL: number) : string {
const hsl = hex2hsl(color);
hsl[0] = (hsl[0] + adjH) % 360;
hsl[1] = Math.min(100, hsl[1] + adjS);
hsl[2] = Math.min(100, hsl[2] * adjL);
return `hsl(${hsl[0]}, ${hsl[1]}%, ${hsl[2]}%)`;
}
export function hslSetL(color: string, adjH: number, adjS: number, adjL: number) : string {
const hsl = hex2hsl(color);
hsl[0] = (hsl[0] + adjH) % 360;
hsl[1] = Math.min(100, hsl[1] + adjS);
hsl[2] = adjL
return `hsl(${hsl[0]}, ${hsl[1]}%, ${hsl[2]}%)`;
}
|
#include <bits/stdc++.h>
using namespace std;
int main()
{
ios::sync_with_stdio(0);
cin.tie(0);
cout.tie(0);
// freopen("input.txt","r",stdin);
// freopen("output.txt","w",stdout);
int n, u;
scanf("%d %d", &n, &u);
vector<int> e(n);
for(int i = 0; i < n; i++) {
scanf("%d", &e[i]);
}
double ans = 0;
for(int i = 0; i < n-2; i++) {
int t = upper_bound(e.begin(),e.end(), e[i]+u)-e.begin()-1;
//printf("%d\n", t);
if(t-i >= 2) {
ans = max(ans, 1.0*(e[t]-e[i+1])/(e[t]-e[i]));
}
}
if(ans == 0) {
printf("-1");
} else {
printf("%0.9lf", ans);
}
return 0;
} |
/**
* Given an incomplete structure or union entity and a consistent, complete
* structure or union type node, completes the entity using the information
* provided by the node.
*
* @param structureOrUnion
* an incomplete structure or union entity (non-<code>null</code>
* )
* @param node
* a complete structure or union type node consistent with the
* <code>structureOrUnion</code> (non-<code>null</code>)
* @throws SyntaxException
* if a field is declared with a non-object type or bit width is
* specified with a non-constant expression
* @see {@link #checkConsistency(TaggedEntity, StructureOrUnionTypeNode)}
*/
private void completeStructOrUnion(
StructureOrUnionType structureOrUnionType,
StructureOrUnionTypeNode node) throws SyntaxException {
SequenceNode<FieldDeclarationNode> fieldDecls = node
.getStructDeclList();
List<Field> fieldList = new LinkedList<>();
structureOrUnionType.setDefinition(node);
for (FieldDeclarationNode decl : fieldDecls) {
TypeNode fieldTypeNode = decl.getTypeNode();
ExpressionNode bitWidthExpression = decl.getBitFieldWidth();
Value bitWidth;
ObjectType fieldType;
Field field;
if (fieldTypeNode == null)
fieldType = null;
else {
Type tempType = processTypeNode(fieldTypeNode);
if (!(tempType instanceof ObjectType))
throw error("Non-object type for structure or union member",
fieldTypeNode);
fieldType = (ObjectType) tempType;
}
if (bitWidthExpression == null) {
bitWidth = null;
} else {
if (!bitWidthExpression.isConstantExpression())
throw error(
"Non-constant expression used for bit width in field declaration",
bitWidthExpression);
this.entityAnalyzer.expressionAnalyzer
.processExpression(bitWidthExpression);
bitWidth = nodeFactory.getConstantValue(bitWidthExpression);
}
field = typeFactory.newField(decl, fieldType, bitWidth);
decl.setEntity(field);
if (decl.getIdentifier() != null)
decl.getIdentifier().setEntity(field);
fieldList.add(field);
}
structureOrUnionType.complete(fieldList);
} |
<filename>src/controllers/users.controller.ts<gh_stars>1-10
import { Router, Response } from "express";
import { Auth } from "../guardians/auth";
import telkit from 'terminal-kit';
import bodyParser from 'body-parser';
import { UsersSerivce } from "../services/Users.service";
import { ORMHelper } from "../helper/orm.helper";
import { GeneralHelper } from "../helper/general.helper";
import { ParamsHelper } from "../helper/params.helper";
import { CarsSerivce } from "../services/cars.service";
import { EncryptHelper } from "../helper/encrypt.helper";
class UsersController {
private auth = new Auth();
private jsonParser = bodyParser.json();
private usersService = new UsersSerivce();
private carsService = new CarsSerivce();
private ormHelper = new ORMHelper();
private generalHelper = new GeneralHelper();
private paramsHelper = new ParamsHelper();
private encryptionHelper = new EncryptHelper();
public router: Router;
constructor() {
this.router = Router();
this.init();
}
private init() {
this.router.post('/session', this.jsonParser, this.paramsHelper.validateParams, (req: any, res: Response) => this.login(req, res));
this.router.post('/register', this.jsonParser, this.paramsHelper.validateParams, (req: any, res: Response) => this.register(req, res));
this.router.get('/cars', this.jsonParser, this.auth.authenticateToken, (req: any, res: Response) => this.getUserCars(req, res));
this.router.get('/trips', this.jsonParser, this.auth.authenticateToken, (req: any, res: Response) => this.login(req, res));
}
private async login(req: any, res: Response) {
try {
const params = this.ormHelper.formatParamsForWhere(req.body);
params.where.password = await this.encryptionHelper.encryptPassword(req.body.password);
const response = await this.usersService.getUser(params);
if (!response) res.status(404).json({ error: 'not user found' });
const { name, lastname, id } = response;
const token = await this.auth.generateToken({ name, lastname, id });
return res.status(200).json({ auth: true, response, token });
} catch (e) {
telkit.terminal(e);
}
}
private async register(req: any, res: Response) {
try {
const user = await this.ormHelper.getUserBasics(req.body);
const response = await this.usersService.postUser(user);
const { name, lastname, id } = response;
const token = await this.auth.generateToken({ name, lastname, id });
return res.status(200).json({ success: true, token, response });
} catch (e) {
telkit.terminal(e);
}
}
private async getUserCars(req: any, res: Response) {
try {
const id = req.user_id;
const params = this.ormHelper.formatParamsForWhere({ driver_id: id });
const response = await this.carsService.getUserCars(params);
return res.status(200).json({ response });
} catch (e) {
return res.status(400).json({ error: e });
}
}
}
const userController = new UsersController();
export default userController.router; |
The traditional media are so petrified of being called "liberal" that they are prepared to allow the Breitbarts of the world to become their assignment editors. Mainstream journalists regularly criticize themselves for not jumping fast enough or high enough when the Fox crowd demands coverage of one of their attack lines.
Thus did Andrew Alexander, The Washington Post's ombudsman, ask why the paper had been slow to report on, as he put it, "the Justice Department's decision to scale down a voter-intimidation case against members of the New Black Panther Party."
Never mind that this is a story about a tiny group of crackpots who stopped no one from voting. It was aimed at doing what the doctored video Breitbart posted set out to do: persuade Americans that the Obama administration favors blacks over whites.
And never mind that, to her great credit, Abigail Thernstrom, a conservative George W. Bush appointee to the U.S. Civil Rights Commission, dismissed the case and those pushing it. "This doesn't have to do with the Black Panthers," she told Politico's Ben Smith. "This has to do with their fantasies about how they could use this issue to topple the (Obama) administration."
Instead, the media are supposed to take seriously the charges of J. Christian Adams, who served in the Bush Justice Department. He's a Republican activist going back to the Bill Clinton era. His party services included time as a Bush poll watcher in Florida in 2004, when on one occasion he was involved in a controversy over whether a black couple could cast a regular ballot.
Now, Adams is accusing the Obama Justice Department of being "motivated by a lawless hostility toward equal enforcement of the law."
This is racially inflammatory, politically motivated nonsense—and it's nonsense even if Sean Hannity and Rush Limbaugh talk about it 1,000 times a day. When an outlandish charge for which there is no evidence is treated as an on-the-one-hand-on-the-other-hand issue, the liars win.
The Sherrod case should be the end of the line. If Obama hates the current media climate, he should stop overreacting to it. And the mainstream media should stop being afraid of insisting upon the difference between news and propaganda.
E.J. Dionne's e-mail address is ejdionne(at)washpost.com.
E.J. Dionne, Jr. is the author of the recently published Souled Out: Reclaiming Faith and Politics After the Religious Right. He is a Washington Post columnist, a senior fellow at the Brookings Institution, and a professor at Georgetown University.
(c) 2010, Washington Post Writers Group |
Chris Lane: Chancey Luna, 17, found guilty of murdering Australian baseballer in Duncan
Updated
US teenager Chancey Luna has been found guilty of murdering Australian baseball player Chris Lane and sentenced to life in prison without parole.
Lane, originally from Melbourne, was a student at East Central University in the town of Ada and was shot in the back while he was jogging on a street in Duncan, Oklahoma, on August 16, 2013.
The jury only took a little over an hour to find 17-year-old Luna guilty of first-degree murder, after a four-day trial.
The jury also sentenced Luna to life in prison without parole.
Lane's father Peter said he felt no joy over the verdict, but was relieved that the trial was over.
"There's no sense of joy. Something is behind us, something has moved. The trial was fair," he said.
"He's gone for life. He's now 17 and will not see the free light of day in the free world ever."
He said he and his family took comfort in the fact that the three boys involved in the shooting would be off the streets.
"I don't ever think there was ever a motive. Whatever reasons and logics — the senselessness of the whole thing is what permeates," he said.
"The kid [Lane] was out for a run. He didn't offend anybody, he didn't hurt anybody, he didn't argue with anybody ... he just got shot.
"There was no reason. It just happened ... if they'd got away with Chris maybe there was somebody else next."
Lane's mother Donna said the actions of the teenagers do not represent the Duncan community.
"These three naughty boys, they're not part of Duncan, Oklahoma," she said.
"And this naughty boy [Luna], he's now never going to do this to any other family."
Donna says the time spent in the US for the trial has been difficult for the entire Lane family, who will now return to Melbourne.
"It's been incredibly tough. Tough on lots [of time] away from home, tough to see my three daughters go through this. This is so unfamiliar to us being in a court room."
Luna did not show remorse or provide an explanation for his actions before he was sentenced.
The district attorney said Luna also expressed no emotion at the verdict, nor sentence.
Jones, Edwards to face jail time
During the trial, prosecutors said Luna, along with Michael Jones and James Edwards Junior, decided to kill someone "for the fun of it", but their lawyers denied that accusation.
Luna's lawyers admitted he fired the shot but said that it was not intended to kill.
They had sought a charge lesser than first-degree murder.
Sorry, this video has expired Video: Michael Vincent talks to News 24 (ABC News)
In Oklahoma, the prosecution does not have to prove pre-meditation in terms of targeting or planning, as the law says "intent can be formed in an instant".
Last month, Jones entered a guilty plea to second-degree murder.
Jones was the driver of the car from which the bullet was fired.
He will not be able to apply for parole until after 38 years, at which point he will be 56.
An appeal has been sought for Edwards, who was charged as an adult after the crime.
The appeal aims to have Edwards, now 17, charged as a child for turning state witness, which would see him face 20 years in jail with another 20 years' probation.
Edwards alleges he was only in the car for a short few minutes before the incident happened and had no knowledge of there being a gun in the car.
There were more than 50 bullets found in the car, along with the revolver used to shoot Lane and a shotgun in the boot.
Emergency call details final minutes of Lane's life
Lane was on a baseball scholarship at the university and was two weeks away from celebrating his 23rd birthday.
Earlier this week, Lane's parents Peter and Donna sat surrounded by the family of his then girlfriend Sarah Harper as they heard several hours of gut-wrenching testimony.
Local resident Joy Smith recalled how she tried to save Lane's life but could not find a pulse.
Richard Rhodes, who had been painting a house at the time, said he heard a pop that sounded like a gunshot then a black car came around the corner and sped off.
Outside the courtroom he spoke of his motivation for coming forward.
"I just wanted to come and make justice for that young man. That's all I've got to say," Mr Rhodes said.
Topics: law-crime-and-justice, crime, murder-and-manslaughter, united-states
First posted |
<reponame>chrisgradl/react-navigation-playground
import { createSlice, PayloadAction } from "@reduxjs/toolkit";
import {RootState} from "./types";
export interface SelectedInspector {
type: "Screen" | "Navigator" | "Theme" | "Debug";
navigatorId: string;
screenId?: string;
}
const slice = createSlice({
initialState: {
type: "Navigator",
navigatorId: "1",
screenId: undefined,
} as SelectedInspector,
name: "SelectedInspector",
reducers: {
setSelectedInspector: (state, action: PayloadAction<SelectedInspector>) =>
action.payload,
},
});
export const { setSelectedInspector } = slice.actions;
export const selectNavigator = (state: RootState) =>
state.navigators[state.inspector.navigatorId];
export const selectScreen = (state: RootState) =>
state.navigators[state.inspector?.navigatorId]?.screens[
state.inspector?.screenId
];
export default slice.reducer;
|
<filename>crates/nu-protocol/src/value/evaluate.rs
use crate::value::{Primitive, UntaggedValue, Value};
use indexmap::IndexMap;
use nu_errors::ShellError;
use query_interface::{interfaces, vtable_for, Object, ObjectHash};
use serde::{Deserialize, Serialize};
use std::cmp::{Ord, Ordering, PartialOrd};
use std::fmt::Debug;
#[derive(Debug)]
pub struct Scope {
pub it: Value,
pub vars: IndexMap<String, Value>,
}
impl Scope {
pub fn new(it: Value) -> Scope {
Scope {
it,
vars: IndexMap::new(),
}
}
}
impl Scope {
pub fn empty() -> Scope {
Scope {
it: UntaggedValue::Primitive(Primitive::Nothing).into_untagged_value(),
vars: IndexMap::new(),
}
}
pub fn it_value(value: Value) -> Scope {
Scope {
it: value,
vars: IndexMap::new(),
}
}
}
#[typetag::serde(tag = "type")]
pub trait EvaluateTrait: Debug + Send + Sync + Object + ObjectHash + 'static {
fn invoke(&self, scope: &Scope) -> Result<Value, ShellError>;
fn clone_box(&self) -> Evaluate;
}
interfaces!(Evaluate: dyn ObjectHash);
#[typetag::serde]
impl EvaluateTrait for Evaluate {
fn invoke(&self, scope: &Scope) -> Result<Value, ShellError> {
self.expr.invoke(scope)
}
fn clone_box(&self) -> Evaluate {
self.expr.clone_box()
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Evaluate {
expr: Box<dyn EvaluateTrait>,
}
impl Evaluate {
pub fn new(evaluate: impl EvaluateTrait) -> Evaluate {
Evaluate {
expr: Box::new(evaluate),
}
}
}
impl std::hash::Hash for Evaluate {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.expr.obj_hash(state)
}
}
impl Clone for Evaluate {
fn clone(&self) -> Evaluate {
self.expr.clone_box()
}
}
impl Ord for Evaluate {
fn cmp(&self, _: &Self) -> Ordering {
Ordering::Equal
}
}
impl PartialOrd for Evaluate {
fn partial_cmp(&self, _: &Evaluate) -> Option<Ordering> {
Some(Ordering::Equal)
}
}
impl PartialEq for Evaluate {
fn eq(&self, _: &Evaluate) -> bool {
true
}
}
impl Eq for Evaluate {}
|
#include <iostream>
#include <cstdio>
#include <cstring>
#include <cmath>
#include <set>
#include <deque>
#include <queue>
#include <algorithm>
#include <stack>
#include <vector>
#include <map>
#include <list>
#include <bits/stdc++.h>
typedef long long ll;
typedef unsigned long long ull;
typedef double db;
#define fake int
#define get() getchar()
#define size 666
using namespace std;
int read(){
int x=0;
char ch=get();
while(ch<'0'||ch>'9') ch=get();
while(ch<='9'&&ch>='0'){
x=(x<<1)+(x<<3)+(ch-'0');
ch=get();
}
return x;
}
int n,m,k;
int main(){
// freopen("Time to Run.in","r",stdin);
n=read();m=read();k=read();
if(k>4*m*n-2*m-2*n){
printf("NO");
return 0;
}
printf("YES\n");
if(k<=m-1){
printf("1\n");
printf("%d R",k);
return 0;
}
if(k<=2*(m-1)){
printf("2\n");
printf("%d R\n",m-1);
printf("%d L",k-(m-1));
return 0;
}
if(k<=4*m*n-2*m-3*n+1){
k-=2*(m-1);
int sum;
if(m>1) sum=2+k/(4*m-3)*3;
else sum=k;
int q=k%(4*m-3);
if(q>=1) sum++;
if(q>1) sum++;
q-=m;
if(q>0){
if(q/3>0) sum++;
if(q%3>0) sum++;
}
q+=m;
printf("%d\n",sum);
if(m-1>0){
printf("%d R\n",m-1);
printf("%d L\n",m-1);
}
int cm=k/(4*m-3);
for(int i=1;i<=cm;i++)
if(m-1>0)printf("1 D\n%d R\n%d UDL\n",m-1,m-1);
else printf("1 D\n");
if(q==0) return 0;
printf("1 D\n");
q--;
if(q<=0) return 0;
printf("%d R\n",min(q,m-1));
q-=m-1;
if(q<=0) return 0;
int yy=q/3;
if(yy>0) printf("%d UDL\n",yy);
int qq=q%3;
if(qq==1) printf("1 U");
if(qq==2) printf("1 UD");
return 0;
}
if(m>1)printf("%d\n",(n-1)*3+3);
else printf("%d\n",n);
k-=2*(m-1);
if(m-1>0){
printf("%d R\n",m-1);
printf("%d L\n",m-1);
}
for(int i=1;i<=n-1;i++)
if(m>1)printf("1 D\n%d R\n%d UDL\n",m-1,m-1);
else printf("1 D\n");
k-=(4*m-3)*(n-1);
printf("%d U",k);
return 0;
}
|
import { Injectable } from '@nestjs/common';
import { ChurchService } from 'src/church/church.service';
import { CreateEventDto } from './dto/create-event.dto';
import { CreateScheduleDto } from './dto/create-schedule.dto';
import { CreateWeeklyEventDto } from './dto/create-weekly-event.dto';
import { UpdateEventDto } from './dto/update-event.dto';
import { UpdateScheduleDto } from './dto/update-schedule.dto';
import { EventsRepository } from './events.repository';
import { ScheduleRepository } from './schedule.respository';
import { WeeklyEventsRepository } from './weekly_events.repository';
@Injectable()
export class EventsService {
constructor(
private readonly eventWeekly:WeeklyEventsRepository,
private readonly event:EventsRepository,
private readonly church: ChurchService,
private readonly schedule:ScheduleRepository
){}
async createWeeklyEvent(id,createEventWeelky:CreateWeeklyEventDto){
let churchFound = await this.church.getById(id);
let weekEvent = await this.eventWeekly.create(createEventWeelky);
weekEvent.church=churchFound;
return this.eventWeekly.save(weekEvent);
}
async updateWeeklyEvent(id,id_weekly_event,updateEventWeelky){
let weekEvent = await this.eventWeekly.createQueryBuilder('weekly_events')
.where("weekly_events.id_weekly_events = :id_weekly_event AND weekly_events.churchIdChurch = :id ",{id_weekly_event,id})
.getOne();
this.eventWeekly.merge(weekEvent,updateEventWeelky)
return await this.eventWeekly.save(weekEvent);
}
async listWeeklyEvent(id){
return await this.eventWeekly.createQueryBuilder('weekly_events')
.where("weekly_events.churchIdChurch = :id",{id}).getMany();
}
async deleteWeeklyEvent(id,id_event){
try{
const churchFound = await this.church.getById(id);
return await this.eventWeekly.delete({church:churchFound,id_weekly_events:id_event});
}
catch(e){
throw e;
}
}
async createEvent(id,createEvent:CreateEventDto){
let churchFound = await this.church.getById(id);
let evento = this.event.create(createEvent);
evento.church = churchFound;
return this.event.save(evento);
}
async editEvent(id,id_event,updateEvent:UpdateEventDto){
let evento = await this.getEvent(id,id_event);
await this.event.merge(evento,updateEvent);
return this.event.save(evento);
}
async deleteEvent(id,id_event){
try{
let churchFound = await this.church.getById(id);
return await this.event.delete({id_event:id_event,church:churchFound})
}
catch(e){
throw e;
}
}
async listEvents(offset,limit,id){
let churchFound = await this.church.getById(id);
const [result, total] = await this.event.findAndCount({
order:{created_at:"ASC"},
where:{church:churchFound},
take:limit,
skip:offset
})
return {
data:result,
count:total
}
}
async createSchedule(id,id_event,createSchedule:CreateScheduleDto){
try{
let evento = await this.getEvent(id,id_event);
let sche = await this.schedule.create(createSchedule)
sche.event=evento;
return this.schedule.save(sche);
}catch(e){
throw e;
}
}
async getEvent(id,id_event){
return await this.event.createQueryBuilder('event')
.where('event.id_event = :id_event AND event.churchIdChurch = :id',{id_event,id})
.getOne();
}
async updateSchedule(id,id_event,id_schedule,updateSchedule:UpdateScheduleDto){
try{
let evento = await this.getEvent(id,id_event);
let sche = await this.getShedule(id_schedule,evento);
this.schedule.merge(sche,updateSchedule);
return this.schedule.save(sche);
}catch(e){
throw e;
}
}
async deleteSchedule(id,id_event,id_schedule){
try{
let evento = await this.getEvent(id,id_event);
return this.schedule.delete({id_schedule,event:evento})
}
catch(e){
throw e;
}
}
async listSchedule(id,id_event){
try{
let evento = await this.getEvent(id,id_event)
return this.schedule.find({event:evento})
}catch(e){
throw e;
}
}
async getSchedule(id,id_event,id_schedule){
try{
let evento = await this.getEvent(id,id_event);
return this.schedule.findOne({id_schedule,event:evento})
}catch(e){
throw e;
}
}
async getShedule(id_schedule,evento){
return this.schedule.findOne({id_schedule,event:evento});
}
}
|
def validate(self):
try:
Configuration(**self.serialize())
except dbt.exceptions.ValidationException as e:
raise DbtProjectError(str(e)) |
//===--- app.h ----------------------------------------------------------===//
// Copyright (c) 2017 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//===--------------------------------------------------------------------===//
#ifndef __JIG_APP_H__
#define __JIG_APP_H__
#include "clipboard.h"
#include "documentlist.h"
#include "figmanager.h"
#include "selectmodehandler.h"
#include "timeutils.h"
#include "ui.h"
namespace jig {
class App {
public:
enum class Mode {
NORMAL,
SELECT,
};
static constexpr char PROGRAM_NAME[] = "jig";
static constexpr char DISPLAY_NAME[] = "Jig";
static constexpr int VERSION_MAJOR = 1;
static constexpr int VERSION_MINOR = 0;
static constexpr int VERSION_BUILD = 0;
static App &getInstance();
DocumentList &getDocumentList() { return m_DocumentList; }
UI &getUI() { return m_UI; }
Clipboard &getClipboard() { return m_Clipboard; }
SelectModeHandler &getSelectModeHandler() { return m_SelectModeHandler; }
Mode getCurrentMode() const { return m_CurrentMode; }
void setCurrentMode(Mode mode) { m_CurrentMode = mode; }
Fig *getFig() { return m_FigManager.getCurrentFig(); }
const Fig *getFig() const { return m_FigManager.getCurrentFig(); }
const char *getExecName() const { return m_ExecName; }
const char *getProgramName() const { return m_ProgramName; }
void setKeepRunning(bool keepRunning) { m_KeepRunning = keepRunning; }
int run(int argc, char **argv);
private:
App() = default;
void setProgramName();
DocumentList m_DocumentList;
UI m_UI;
time::Timer m_Timer;
Clipboard m_Clipboard;
SelectModeHandler m_SelectModeHandler;
Mode m_CurrentMode;
FigManager m_FigManager;
const char *m_ExecName;
const char *m_ProgramName;
bool m_KeepRunning = true;
};
} // namespace jig
#endif // __JIG_APP_H__
|
<filename>helix-core/src/main/java/com/linkedin/helix/messaging/handling/HelixStateTransitionHandler.java
/**
* Copyright (C) 2012 LinkedIn Inc <<EMAIL>>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.linkedin.helix.messaging.handling;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import org.apache.log4j.Logger;
import com.linkedin.helix.HelixDataAccessor;
import com.linkedin.helix.HelixException;
import com.linkedin.helix.HelixManager;
import com.linkedin.helix.NotificationContext;
import com.linkedin.helix.PropertyKey;
import com.linkedin.helix.PropertyKey.Builder;
import com.linkedin.helix.ZNRecordBucketizer;
import com.linkedin.helix.ZNRecordDelta;
import com.linkedin.helix.ZNRecordDelta.MergeOperation;
import com.linkedin.helix.model.CurrentState;
import com.linkedin.helix.model.Message;
import com.linkedin.helix.participant.statemachine.StateModel;
import com.linkedin.helix.participant.statemachine.StateModelParser;
import com.linkedin.helix.participant.statemachine.StateTransitionError;
import com.linkedin.helix.util.StatusUpdateUtil;
public class HelixStateTransitionHandler extends MessageHandler
{
public static class HelixStateMismatchException extends Exception
{
public HelixStateMismatchException(String info)
{
super(info);
}
}
private static Logger logger =
Logger.getLogger(HelixStateTransitionHandler.class);
private final StateModel _stateModel;
StatusUpdateUtil _statusUpdateUtil;
private final StateModelParser _transitionMethodFinder;
private final CurrentState _currentStateDelta;
volatile boolean _isTimeout = false;
private final HelixTaskExecutor _executor;
public HelixStateTransitionHandler(StateModel stateModel,
Message message,
NotificationContext context,
CurrentState currentStateDelta,
HelixTaskExecutor executor)
{
super(message, context);
_stateModel = stateModel;
_statusUpdateUtil = new StatusUpdateUtil();
_transitionMethodFinder = new StateModelParser();
_currentStateDelta = currentStateDelta;
_executor = executor;
}
private void prepareMessageExecution(HelixManager manager, Message message) throws HelixException,
HelixStateMismatchException
{
if (!message.isValid())
{
String errorMessage =
"Invalid Message, ensure that message: " + message
+ " has all the required fields: "
+ Arrays.toString(Message.Attributes.values());
_statusUpdateUtil.logError(message,
HelixStateTransitionHandler.class,
errorMessage,
manager.getHelixDataAccessor());
logger.error(errorMessage);
throw new HelixException(errorMessage);
}
// DataAccessor accessor = manager.getDataAccessor();
HelixDataAccessor accessor = manager.getHelixDataAccessor();
String partitionName = message.getPartitionName();
String fromState = message.getFromState();
// Verify the fromState and current state of the stateModel
String state = _currentStateDelta.getState(partitionName);
if (fromState != null && !fromState.equals("*") && !fromState.equalsIgnoreCase(state))
{
String errorMessage =
"Current state of stateModel does not match the fromState in Message"
+ ", Current State:" + state + ", message expected:" + fromState
+ ", partition: " + partitionName + ", from: " + message.getMsgSrc()
+ ", to: " + message.getTgtName();
_statusUpdateUtil.logError(message,
HelixStateTransitionHandler.class,
errorMessage,
accessor);
logger.error(errorMessage);
throw new HelixStateMismatchException(errorMessage);
}
}
void postExecutionMessage(HelixManager manager,
Message message,
NotificationContext context,
HelixTaskResult taskResult,
Exception exception)
{
String partitionKey = message.getPartitionName();
String resource = message.getResourceName();
String sessionId = message.getTgtSessionId();
String instanceName = manager.getInstanceName();
HelixDataAccessor accessor = manager.getHelixDataAccessor();
Builder keyBuilder = accessor.keyBuilder();
int bucketSize = message.getBucketSize();
ZNRecordBucketizer bucketizer = new ZNRecordBucketizer(bucketSize);
// Lock the helix manager so that the session id will not change when we update
// the state model state. for zk current state it is OK as we have the per-session
// current state node
synchronized (manager)
{
if (!message.getTgtSessionId().equals(manager.getSessionId()))
{
logger.warn("Session id has changed. Skip postExecutionMessage. Old session "
+ message.getExecutionSessionId() + " , new session : "
+ manager.getSessionId());
return;
}
if (taskResult.isSucess())
{
// String fromState = message.getFromState();
String toState = message.getToState();
_currentStateDelta.setState(partitionKey, toState);
if (toState.equalsIgnoreCase("DROPPED"))
{
// for "OnOfflineToDROPPED" message, we need to remove the resource key record
// from the current state of the instance because the resource key is dropped.
// In the state model it will be stayed as "OFFLINE", which is OK.
ZNRecordDelta delta =
new ZNRecordDelta(_currentStateDelta.getRecord(), MergeOperation.SUBTRACT);
// Don't subtract simple fields since they contain stateModelDefRef
delta._record.getSimpleFields().clear();
List<ZNRecordDelta> deltaList = new ArrayList<ZNRecordDelta>();
deltaList.add(delta);
_currentStateDelta.setDeltaList(deltaList);
}
else
{
// if the partition is not to be dropped, update _stateModel to the TO_STATE
_stateModel.updateState(toState);
}
}
else
{
if (exception instanceof HelixStateMismatchException)
{
// if fromState mismatch, set current state on zk to stateModel's current state
logger.warn("Force CurrentState on Zk to be stateModel's CurrentState. partitionKey: "
+ partitionKey
+ ", currentState: "
+ _stateModel.getCurrentState()
+ ", message: " + message);
_currentStateDelta.setState(partitionKey, _stateModel.getCurrentState());
}
else
{
StateTransitionError error =
new StateTransitionError(ErrorType.INTERNAL, ErrorCode.ERROR, exception);
if (exception instanceof InterruptedException)
{
if (_isTimeout)
{
error =
new StateTransitionError(ErrorType.INTERNAL,
ErrorCode.TIMEOUT,
exception);
}
else
{
// State transition interrupted but not caused by timeout. Keep the current
// state in this case
logger.error("State transition interrupted but not timeout. Not updating state. Partition : "
+ message.getPartitionName() + " MsgId : " + message.getMsgId());
return;
}
}
_stateModel.rollbackOnError(message, context, error);
_currentStateDelta.setState(partitionKey, "ERROR");
_stateModel.updateState("ERROR");
}
}
}
try
{
// Update the ZK current state of the node
PropertyKey key = keyBuilder.currentState(instanceName,
sessionId,
resource,
bucketizer.getBucketName(partitionKey));
if (!_message.getGroupMessageMode())
{
accessor.updateProperty(key, _currentStateDelta);
}
else
{
_executor._groupMsgHandler.addCurStateUpdate(_message, key, _currentStateDelta);
}
}
catch (Exception e)
{
logger.error("Error when updating the state ", e);
StateTransitionError error =
new StateTransitionError(ErrorType.FRAMEWORK, ErrorCode.ERROR, e);
_stateModel.rollbackOnError(message, context, error);
_statusUpdateUtil.logError(message,
HelixStateTransitionHandler.class,
e,
"Error when update the state ",
accessor);
}
}
public HelixTaskResult handleMessageInternal(Message message,
NotificationContext context)
{
synchronized (_stateModel)
{
HelixTaskResult taskResult = new HelixTaskResult();
HelixManager manager = context.getManager();
HelixDataAccessor accessor = manager.getHelixDataAccessor();
_statusUpdateUtil.logInfo(message,
HelixStateTransitionHandler.class,
"Message handling task begin execute",
accessor);
message.setExecuteStartTimeStamp(new Date().getTime());
Exception exception = null;
try
{
prepareMessageExecution(manager, message);
invoke(accessor, context, taskResult, message);
}
catch (HelixStateMismatchException e)
{
// Simply log error and return from here if State mismatch.
// The current state of the state model is intact.
taskResult.setSuccess(false);
taskResult.setMessage(e.toString());
taskResult.setException(e);
exception = e;
// return taskResult;
}
catch (Exception e)
{
String errorMessage =
"Exception while executing a state transition task "
+ message.getPartitionName();
logger.error(errorMessage, e);
if (e.getCause() != null && e.getCause() instanceof InterruptedException)
{
e = (InterruptedException) e.getCause();
}
_statusUpdateUtil.logError(message,
HelixStateTransitionHandler.class,
e,
errorMessage,
accessor);
taskResult.setSuccess(false);
taskResult.setMessage(e.toString());
taskResult.setException(e);
taskResult.setInterrupted(e instanceof InterruptedException);
exception = e;
}
postExecutionMessage(manager, message, context, taskResult, exception);
return taskResult;
}
}
private void invoke(HelixDataAccessor accessor,
NotificationContext context,
HelixTaskResult taskResult,
Message message) throws IllegalAccessException,
InvocationTargetException,
InterruptedException
{
_statusUpdateUtil.logInfo(message,
HelixStateTransitionHandler.class,
"Message handling invoking",
accessor);
// by default, we invoke state transition function in state model
Method methodToInvoke = null;
String fromState = message.getFromState();
String toState = message.getToState();
methodToInvoke =
_transitionMethodFinder.getMethodForTransition(_stateModel.getClass(),
fromState,
toState,
new Class[] { Message.class,
NotificationContext.class });
if (methodToInvoke != null)
{
methodToInvoke.invoke(_stateModel, new Object[] { message, context });
taskResult.setSuccess(true);
}
else
{
String errorMessage =
"Unable to find method for transition from " + fromState + " to " + toState
+ "in " + _stateModel.getClass();
logger.error(errorMessage);
taskResult.setSuccess(false);
_statusUpdateUtil.logError(message,
HelixStateTransitionHandler.class,
errorMessage,
accessor);
}
}
@Override
public HelixTaskResult handleMessage()
{
return handleMessageInternal(_message, _notificationContext);
}
@Override
public void onError(Exception e, ErrorCode code, ErrorType type)
{
// All internal error has been processed already, so we can skip them
if (type == ErrorType.INTERNAL)
{
logger.error("Skip internal error " + e.getMessage() + " " + code);
return;
}
HelixManager manager = _notificationContext.getManager();
HelixDataAccessor accessor = manager.getHelixDataAccessor();
Builder keyBuilder = accessor.keyBuilder();
String instanceName = manager.getInstanceName();
String partition = _message.getPartitionName();
String resourceName = _message.getResourceName();
CurrentState currentStateDelta = new CurrentState(resourceName);
StateTransitionError error = new StateTransitionError(type, code, e);
_stateModel.rollbackOnError(_message, _notificationContext, error);
// if the transition is not canceled, it should go into error state
if (code == ErrorCode.ERROR)
{
currentStateDelta.setState(partition, "ERROR");
_stateModel.updateState("ERROR");
accessor.updateProperty(keyBuilder.currentState(instanceName,
_message.getTgtSessionId(),
resourceName),
currentStateDelta);
}
}
@Override
public void onTimeout()
{
_isTimeout = true;
}
};
|
# -*- coding: utf-8 -*-
import logging
import sys
import time
import boto3
import lumbermill.utils.DictUtils as DictUtils
from lumbermill.BaseThreadedModule import BaseThreadedModule
from lumbermill.utils.Decorators import ModuleDocstringParser
@ModuleDocstringParser
class SQS(BaseThreadedModule):
"""
Read messages from amazon sqs service.
aws_access_key_id: Your AWS id.
aws_secret_access_key: Your AWS password.
region: The region in which to find your sqs service.
queue: Queue name.
attribute_names: A list of attributes that need to be returned along with each message.
message_attribute_names: A list of message attributes that need to be returned.
poll_interval_in_secs: How often should the queue be checked for new messages.
batch_size: Number of messages to retrieve in one call.
Configuration template:
- input.SQS:
aws_access_key_id: # <type: string; is: required>
aws_secret_access_key: # <type: string; is: required>
region: # <type: string; is: required; values: ['us-east-1', 'us-west-1', 'us-west-2', 'eu-central-1', 'eu-west-1', 'ap-southeast-1', 'ap-southeast-2', 'ap-northeast-1', 'sa-east-1', 'us-gov-west-1', 'cn-north-1']>
queue: # <type: string; is: required>
attribute_names: # <default: ['All']; type: list; is: optional>
message_attribute_names: # <default: ['All']; type: list; is: optional>
poll_interval_in_secs: # <default: 1; type: integer; is: optional>
batch_size: # <default: 10; type: integer; is: optional>
receivers:
- NextModule
"""
module_type = "input"
"""Set module type"""
can_run_forked = True
def configure(self, configuration):
# Call parent configure method
BaseThreadedModule.configure(self, configuration)
# Set boto log level.
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
self.sqs_queue_name = self.getConfigurationValue('queue')
self.attribute_names = self.getConfigurationValue('attribute_names')
self.message_attribute_names = self.getConfigurationValue('message_attribute_names')
self.poll_interval = self.getConfigurationValue('poll_interval_in_secs')
self.batch_size = self.getConfigurationValue('batch_size')
try:
self.sqs_client = boto3.client('sqs', region_name=self.getConfigurationValue('region'),
api_version=None,
use_ssl=True,
verify=None,
endpoint_url=None,
aws_access_key_id=self.getConfigurationValue('aws_access_key_id'),
aws_secret_access_key=self.getConfigurationValue('aws_secret_access_key'),
aws_session_token=None,
config=None)
except:
etype, evalue, etb = sys.exc_info()
self.logger.error("Could not connect to sqs service. Exception: %s, Error: %s." % (etype, evalue))
self.lumbermill.shutDown()
try:
self.sqs_queue_url = self.sqs_client.get_queue_url(QueueName=self.sqs_queue_name)['QueueUrl']
except:
etype, evalue, etb = sys.exc_info()
self.logger.error("Could not get queue url for sqs queue %s. Exception: %s, Error: %s." % (self.sqs_queue_name, etype, evalue))
self.lumbermill.shutDown()
def run(self):
while self.alive:
messages_to_delete = []
response = self.sqs_client.receive_message(QueueUrl=self.sqs_queue_url,
MaxNumberOfMessages=self.batch_size,
AttributeNames=self.attribute_names,
MessageAttributeNames=self.message_attribute_names)
if 'Messages' not in response:
time.sleep(self.poll_interval)
continue
for message in response['Messages']:
event = DictUtils.getDefaultEventDict({"data": message['Body']}, caller_class_name="Sqs")
event['sqs'] = {'attributes': message['Attributes'],
'id': message['MessageId'],
'md5_of_body': message['MD5OfBody'],
'md5_of_message_attributes': message.get('MD5OfMessageAttributes', None),
'message_attributes': message.get('MessageAttributes', None)}
messages_to_delete.append({'Id': message['MessageId'],
'ReceiptHandle': message['ReceiptHandle']})
self.sendEvent(event)
self.sqs_client.delete_message_batch(QueueUrl=self.sqs_queue_url, Entries=messages_to_delete)
self.lumbermill.shutDown()
|
// ParseFields parses a string to fetch fields
func (c *Caller) ParseFields(data string) map[string]Field {
var ita []string
var key string
var value string
m := regexp.MustCompile(`{\$(.*?)}`)
items := m.FindAllString(data, -1)
fields := make(map[string]Field)
for _, item := range items {
if !strings.HasPrefix(item, `{$`) || !strings.HasSuffix(item, "}") {
continue
}
if !strings.Contains(item, ":") {
item = strings.Replace(item, "$", "", -1)
item = strings.Replace(item, "{", "", -1)
item = strings.Replace(item, "}", "", -1)
fields[item] = Field{
Prompt: fmt.Sprintf(`$%s%s (default=''):`, item, Red("*")),
IsOptional: false,
Default: "",
}
continue
}
m = regexp.MustCompile(`{\$(.*?):`)
ita = m.FindAllString(item, -1)
key = strings.TrimPrefix(ita[0], `{$`)
key = strings.TrimSuffix(key, `:`)
m = regexp.MustCompile(`:(.*?)}`)
ita = m.FindAllString(item, -1)
value = strings.TrimPrefix(ita[0], `:`)
value = strings.TrimSuffix(value, `}`)
fields[key] = Field{
Prompt: fmt.Sprintf(`$%s (default='%s'):`, key, Yellow(value)),
IsOptional: true,
Default: value,
}
}
return fields
} |
// Trace print sql message.
func (l Logger) Trace(ctx context.Context, begin time.Time, fc func() (string, int64), err error) {
if l.LogLevel <= gormlogger.Silent {
return
}
elapsed := time.Since(begin)
switch {
case err != nil && l.LogLevel >= gormlogger.Error && (!errors.Is(err, gormlogger.ErrRecordNotFound) || !l.IgnoreRecordNotFoundError):
sql, rows := fc()
l.ZapLogger.Error("trace", zap.Error(err), zap.Duration("elapsed", elapsed), zap.Int64("rows", rows), zap.String("sql", sql))
case elapsed > l.SlowThreshold && l.SlowThreshold != 0 && l.LogLevel >= gormlogger.Warn:
sql, rows := fc()
slowLog := fmt.Sprintf("SLOW SQL >= %v", l.SlowThreshold)
l.ZapLogger.Warn("trace", zap.Duration("elapsed", elapsed), zap.Int64("rows", rows), zap.String("sql", sql), zap.String("slow sql", slowLog))
case l.LogLevel == gormlogger.Info:
sql, rows := fc()
l.ZapLogger.Info("trace", zap.Duration("elapsed", elapsed), zap.Int64("rows", rows), zap.String("sql", sql))
}
} |
// This works just like http.ListenAndServeTLS but certificates are loaded into
// a wrapper struct that reloads certificates from disk when a SIGHUP is
// received.
func ListenAndServeTLS(addr, certFile, keyFile string, handler http.Handler) error {
server := &http.Server{Addr: addr, Handler: handler}
keypair, err := NewKeypairReloader(certFile, keyFile)
if err != nil {
return err
}
server.TLSConfig = &tls.Config{GetCertificate: keypair.GetCertificateFunc()}
return server.ListenAndServeTLS("", "")
} |
<filename>fengshen/models/auto/__init__.py<gh_stars>100-1000
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from transformers.file_utils import _LazyModule, is_torch_available
_import_structure = {
"auto_factory": ["get_values"],
"configuration_auto": ["ALL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CONFIG_MAPPING", "MODEL_NAMES_MAPPING", "AutoConfig"],
"tokenization_auto": ["TOKENIZER_MAPPING", "AutoTokenizer"],
}
if is_torch_available():
_import_structure["modeling_auto"] = [
"AutoModel",
"AutoModelForMaskedLM",
"AutoModelForMultipleChoice",
"AutoModelForPreTraining",
"AutoModelForQuestionAnswering",
"AutoModelForSequenceClassification",
"AutoModelForTokenClassification",
]
if TYPE_CHECKING:
from .auto_factory import get_values
from .configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, CONFIG_MAPPING, MODEL_NAMES_MAPPING, AutoConfig
from .tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer
if is_torch_available():
from .modeling_auto import (
AutoModel,
AutoModelForMaskedLM,
AutoModelForMultipleChoice,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
|
<gh_stars>10-100
export const METRIC_NAMES = {
viewHome: "loaded screen: home",
};
|
<reponame>11Zero/DemoBCG
//*******************************************************************************
// COPYRIGHT NOTES
// ---------------
// This is a part of the BCGControlBar Library
// Copyright (C) 1998-2014 BCGSoft Ltd.
// All rights reserved.
//
// This source code can be used, distributed or modified
// only under terms and conditions
// of the accompanying license agreement.
//*******************************************************************************
//
// BCGPComboBox.cpp : implementation file
//
#include "stdafx.h"
#include "BCGPComboBox.h"
#include "BCGPDlgImpl.h"
#ifndef _BCGSUITE_
#include "BCGPToolBarImages.h"
#include "BCGPToolbarComboBoxButton.h"
#endif
#include "BCGPVisualManager.h"
#include "BCGPDrawManager.h"
#ifdef _DEBUG
#define new DEBUG_NEW
#undef THIS_FILE
static char THIS_FILE[] = __FILE__;
#endif
/////////////////////////////////////////////////////////////////////////////
// CBCGPComboBox
IMPLEMENT_DYNAMIC(CBCGPComboBox, CComboBox)
CBCGPComboBox::CBCGPComboBox()
{
m_bVisualManagerStyle = FALSE;
m_bOnGlass = FALSE;
m_bIsDroppedDown = FALSE;
m_bIsButtonHighlighted = FALSE;
m_rectBtn.SetRectEmpty ();
m_bTracked = FALSE;
m_clrPrompt = (COLORREF)-1;
m_clrErrorText = (COLORREF)-1;
m_bDefaultPrintClient = FALSE;
}
CBCGPComboBox::~CBCGPComboBox()
{
}
BEGIN_MESSAGE_MAP(CBCGPComboBox, CComboBox)
//{{AFX_MSG_MAP(CBCGPComboBox)
ON_WM_NCPAINT()
ON_WM_PAINT()
ON_WM_MOUSEMOVE()
ON_WM_CANCELMODE()
ON_WM_LBUTTONDOWN()
ON_WM_KILLFOCUS()
ON_WM_CREATE()
ON_WM_SETFOCUS()
//}}AFX_MSG_MAP
ON_CONTROL_REFLECT_EX(CBN_EDITUPDATE, OnEditupdate)
ON_CONTROL_REFLECT_EX(CBN_SELCHANGE, OnSelchange)
ON_CONTROL_REFLECT_EX(CBN_CLOSEUP, OnCloseup)
ON_CONTROL_REFLECT_EX(CBN_DROPDOWN, OnDropdown)
ON_REGISTERED_MESSAGE(BCGM_ONSETCONTROLVMMODE, OnBCGSetControlVMMode)
ON_REGISTERED_MESSAGE(BCGM_ONSETCONTROLAERO, OnBCGSetControlAero)
ON_MESSAGE(WM_SETTEXT, OnSetText)
ON_MESSAGE(WM_PRINTCLIENT, OnPrintClient)
END_MESSAGE_MAP()
/////////////////////////////////////////////////////////////////////////////
// CBCGPComboBox message handlers
LRESULT CBCGPComboBox::OnBCGSetControlVMMode (WPARAM wp, LPARAM)
{
m_bVisualManagerStyle = (BOOL) wp;
return 0;
}
//**************************************************************************
void CBCGPComboBox::SubclassEditBox()
{
if (GetSafeHwnd() == NULL)
{
return;
}
if (m_wndEdit.GetSafeHwnd () == NULL && (GetStyle () & CBS_DROPDOWN))
{
CWnd* pWndChild = GetWindow (GW_CHILD);
while (pWndChild != NULL)
{
ASSERT_VALID (pWndChild);
if (CWnd::FromHandlePermanent (pWndChild->GetSafeHwnd ()) == NULL)
{
#define MAX_CLASS_NAME 255
#define EDIT_CLASS _T("Edit")
TCHAR lpszClassName [MAX_CLASS_NAME + 1];
::GetClassName (pWndChild->GetSafeHwnd (), lpszClassName, MAX_CLASS_NAME);
CString strClass = lpszClassName;
if (strClass == EDIT_CLASS)
{
m_wndEdit.SubclassWindow (pWndChild->GetSafeHwnd ());
m_wndEdit.m_bOnGlass = m_bOnGlass;
m_wndEdit.m_bVisualManagerStyle = m_bVisualManagerStyle;
break;
}
}
pWndChild = pWndChild->GetNextWindow ();
}
}
}
//**************************************************************************
LRESULT CBCGPComboBox::OnBCGSetControlAero (WPARAM wp, LPARAM)
{
m_bOnGlass = (BOOL) wp;
if (m_bOnGlass)
{
SubclassEditBox();
}
return 0;
}
//**************************************************************************
void CBCGPComboBox::OnNcPaint()
{
#ifndef _BCGSUITE_
if (globalData.bIsWindows9x)
{
Default();
}
#endif
}
//**************************************************************************
void CBCGPComboBox::OnPaint()
{
#ifndef _BCGSUITE_
if (globalData.bIsWindows9x)
{
Default();
return;
}
#endif
if ((GetStyle () & 0x0003L) == CBS_SIMPLE)
{
Default ();
return;
}
BOOL bDrawPrompt = FALSE;
if (!m_strPrompt.IsEmpty() || !m_strErrorMessage.IsEmpty())
{
BOOL bTextIsEmpty = GetWindowTextLength() == 0;
if (m_wndEdit.GetSafeHwnd () != NULL)
{
if (!m_strErrorMessage.IsEmpty())
{
m_wndEdit.SetErrorMessage(m_strErrorMessage, m_clrErrorText);
}
else
{
m_wndEdit.SetPrompt(bTextIsEmpty ? m_strPrompt : _T(""), m_clrPrompt);
}
}
else
{
bDrawPrompt = bTextIsEmpty || !m_strErrorMessage.IsEmpty();
}
}
if (!m_bVisualManagerStyle && !m_bOnGlass && !bDrawPrompt)
{
Default ();
return;
}
CPaintDC dc(this); // device context for painting
OnDraw(&dc, bDrawPrompt);
}
//**************************************************************************
void CBCGPComboBox::OnDraw(CDC* pDCIn, BOOL bDrawPrompt)
{
ASSERT_VALID(pDCIn);
BYTE alpha = 0;
if (m_bOnGlass)
{
alpha = 255;
}
CBCGPMemDC memDC(*pDCIn, this, alpha);
CDC* pDC = &memDC.GetDC ();
CRect rectClient;
GetClientRect (rectClient);
CBCGPDrawManager dm (*pDC);
dm.DrawRect (rectClient, m_bVisualManagerStyle ? globalData.clrBarHilite : globalData.clrWindow, (COLORREF)-1);
BOOL bDefaultDraw = TRUE;
if (bDrawPrompt)
{
COLORREF clrText = !m_strErrorMessage.IsEmpty() ? m_clrErrorText : m_clrPrompt;
if (clrText == (COLORREF)-1)
{
#ifndef _BCGSUITE_
clrText = m_bVisualManagerStyle ? CBCGPVisualManager::GetInstance ()->GetToolbarEditPromptColor() : globalData.clrPrompt;
#else
clrText = globalData.clrGrayedText;
#endif
}
pDC->SetTextColor(clrText);
pDC->SetBkMode(TRANSPARENT);
CFont* pOldFont = pDC->SelectObject (GetFont());
CRect rectText;
GetClientRect(rectText);
rectText.left += 4;
if ((GetStyle () & WS_BORDER) != 0 || (GetExStyle () & WS_EX_CLIENTEDGE) != 0)
{
rectText.DeflateRect (1, 1);
}
UINT nFormat = DT_LEFT | DT_SINGLELINE | DT_VCENTER;
const CString& str = !m_strErrorMessage.IsEmpty() ? m_strErrorMessage : m_strPrompt;
if (m_bOnGlass)
{
CBCGPVisualManager::GetInstance ()->DrawTextOnGlass(pDC, str, rectText, nFormat, 0, clrText);
}
else
{
pDC->DrawText(str, rectText, nFormat);
}
pDC->SelectObject (pOldFont);
}
else
{
#ifndef _BCGSUITE_
if ((GetStyle() & CBS_OWNERDRAWFIXED) == 0 && (GetStyle() & CBS_OWNERDRAWVARIABLE) == 0)
{
bDefaultDraw = !CBCGPVisualManager::GetInstance ()->OnDrawComboBoxText(pDC, this);
if (m_bVisualManagerStyle && bDefaultDraw)
{
CString strText;
GetWindowText(strText);
CRect rect;
GetClientRect(rect);
BOOL bIsFocused = GetSafeHwnd() == CWnd::GetFocus()->GetSafeHwnd();
COLORREF clrText = CBCGPVisualManager::GetInstance ()->OnFillComboBoxItem(pDC, this, GetCurSel(), rect, bIsFocused, bIsFocused);
if (!IsWindowEnabled())
{
clrText = CBCGPVisualManager::GetInstance ()->GetToolbarDisabledTextColor();
}
rect.left += 4;
CFont* pOldFont = pDC->SelectObject(GetFont());
ASSERT_VALID(pOldFont);
int nOldBkMode = pDC->SetBkMode(TRANSPARENT);
COLORREF clrTextOld = pDC->SetTextColor(clrText);
const int cxDropDown = ::GetSystemMetrics (SM_CXVSCROLL);
rect.right -= cxDropDown;
UINT nFormat = DT_SINGLELINE | DT_VCENTER | DT_LEFT;
if (m_bOnGlass)
{
CBCGPVisualManager::GetInstance ()->DrawTextOnGlass(pDC, strText, rect, nFormat, 0, clrText);
}
else
{
pDC->DrawText(strText, rect, nFormat);
}
pDC->SelectObject(pOldFont);
pDC->SetBkMode(nOldBkMode);
pDC->SetTextColor(clrTextOld);
bDefaultDraw = FALSE;
}
}
#endif
if (bDefaultDraw)
{
m_bDefaultPrintClient = TRUE;
SendMessage (WM_PRINTCLIENT, (WPARAM) pDC->GetSafeHdc (), (LPARAM) PRF_CLIENT);
m_bDefaultPrintClient = FALSE;
}
}
if ((GetStyle() & CBS_OWNERDRAWFIXED) != 0 || (GetStyle() & CBS_OWNERDRAWVARIABLE) != 0)
{
pDC->SelectClipRgn (NULL);
}
const int cxDropDown = ::GetSystemMetrics (SM_CXVSCROLL) + 4;
m_rectBtn = rectClient;
m_rectBtn.left = m_rectBtn.right - cxDropDown;
m_rectBtn.DeflateRect (2, 2);
CBCGPDrawOnGlass dog (m_bOnGlass);
CBCGPToolbarComboBoxButton buttonDummy;
#ifndef _BCGSUITE_
buttonDummy.m_bIsCtrl = TRUE;
CBCGPVisualManager::GetInstance ()->OnDrawComboDropButton (
pDC, m_rectBtn, !IsWindowEnabled (), m_bIsDroppedDown,
m_bIsButtonHighlighted,
&buttonDummy);
if (bDefaultDraw)
{
dm.DrawRect (rectClient, (COLORREF)-1, globalData.clrBarShadow);
}
else
{
CBCGPVisualManager::GetInstance ()->OnDrawControlBorder (pDC, rectClient, this, m_bOnGlass);
}
#else
CMFCVisualManager::GetInstance ()->OnDrawComboDropButton (
pDC, m_rectBtn, !IsWindowEnabled (), m_bIsDroppedDown,
m_bIsButtonHighlighted,
&buttonDummy);
dm.DrawRect (rectClient, (COLORREF)-1, globalData.clrBarShadow);
#endif
rectClient.DeflateRect (1, 1);
dm.DrawRect (rectClient, (COLORREF)-1, m_bVisualManagerStyle ? globalData.clrBarHilite : globalData.clrWindow);
}
//**************************************************************************
BOOL CBCGPComboBox::OnCloseup()
{
m_bIsDroppedDown = FALSE;
m_bIsButtonHighlighted = FALSE;
RedrawWindow (NULL, NULL, RDW_INVALIDATE | RDW_FRAME | RDW_UPDATENOW | RDW_ALLCHILDREN);
return FALSE;
}
//**************************************************************************
BOOL CBCGPComboBox::OnDropdown()
{
if (m_bTracked)
{
ReleaseCapture ();
m_bTracked = FALSE;
}
m_bIsDroppedDown = TRUE;
m_bIsButtonHighlighted = FALSE;
RedrawWindow (NULL, NULL, RDW_INVALIDATE | RDW_FRAME | RDW_UPDATENOW | RDW_ALLCHILDREN);
return FALSE;
}
//**************************************************************************
void CBCGPComboBox::OnMouseMove(UINT nFlags, CPoint point)
{
if ((nFlags & MK_LBUTTON) == 0)
{
BOOL bIsButtonHighlighted = m_bIsButtonHighlighted;
m_bIsButtonHighlighted = m_rectBtn.PtInRect (point);
if (bIsButtonHighlighted != m_bIsButtonHighlighted)
{
if (!m_bTracked)
{
if (m_bIsButtonHighlighted)
{
SetCapture ();
m_bTracked = TRUE;
}
}
else
{
if (!m_bIsButtonHighlighted)
{
ReleaseCapture ();
m_bTracked = FALSE;
}
}
RedrawWindow(m_rectBtn, NULL, RDW_INVALIDATE | RDW_UPDATENOW);
}
}
CComboBox::OnMouseMove(nFlags, point);
}
//*****************************************************************************************
void CBCGPComboBox::OnCancelMode()
{
CComboBox::OnCancelMode();
if (m_bTracked)
{
ReleaseCapture ();
m_bIsButtonHighlighted = FALSE;
m_bTracked = FALSE;
RedrawWindow (NULL, NULL, RDW_INVALIDATE | RDW_FRAME | RDW_UPDATENOW | RDW_ALLCHILDREN);
}
}
//**************************************************************************
void CBCGPComboBox::OnLButtonDown(UINT nFlags, CPoint point)
{
if (m_bTracked)
{
ReleaseCapture ();
m_bIsButtonHighlighted = FALSE;
m_bTracked = FALSE;
RedrawWindow (NULL, NULL, RDW_INVALIDATE | RDW_FRAME | RDW_UPDATENOW | RDW_ALLCHILDREN);
}
CComboBox::OnLButtonDown(nFlags, point);
}
//**************************************************************************
void CBCGPComboBox::OnKillFocus(CWnd* pNewWnd)
{
CComboBox::OnKillFocus(pNewWnd);
BOOL bDrawPrompt = (!m_strPrompt.IsEmpty() && GetWindowTextLength() == 0) || !m_strErrorMessage.IsEmpty();
if (bDrawPrompt)
{
m_bIsButtonHighlighted = FALSE;
}
if (m_bVisualManagerStyle || bDrawPrompt)
{
RedrawWindow (NULL, NULL, RDW_INVALIDATE | RDW_FRAME | RDW_UPDATENOW | RDW_ALLCHILDREN);
}
}
//**************************************************************************
void CBCGPComboBox::SetPrompt(LPCTSTR lpszPrompt, COLORREF clrText, BOOL bRedraw)
{
ASSERT_VALID (this);
CString strOldPrompt = m_strPrompt;
BOOL bColorWasChanged = m_clrPrompt != clrText;
m_strPrompt = (lpszPrompt == NULL) ? _T("") : lpszPrompt;
m_clrPrompt = clrText;
if (!m_strPrompt.IsEmpty())
{
SubclassEditBox();
}
if (m_wndEdit.GetSafeHwnd () != NULL)
{
m_wndEdit.SetPrompt(lpszPrompt, clrText, bRedraw);
}
if (bRedraw && GetSafeHwnd() != NULL && (bColorWasChanged || m_strPrompt != strOldPrompt))
{
RedrawWindow(NULL, NULL, RDW_FRAME | RDW_INVALIDATE | RDW_ERASE | RDW_UPDATENOW);
}
}
//**************************************************************************
void CBCGPComboBox::SetErrorMessage(LPCTSTR lpszPrompt, COLORREF clrText, BOOL bRedraw)
{
ASSERT_VALID (this);
CString strOldPrompt = m_strErrorMessage;
BOOL bColorWasChanged = m_clrErrorText != clrText;
m_strErrorMessage = (lpszPrompt == NULL) ? _T("") : lpszPrompt;
m_clrErrorText = clrText;
if (!m_strErrorMessage.IsEmpty())
{
SubclassEditBox();
}
if (m_wndEdit.GetSafeHwnd () != NULL)
{
m_wndEdit.SetErrorMessage(lpszPrompt, clrText, bRedraw);
}
if (bRedraw && GetSafeHwnd() != NULL && (bColorWasChanged || m_strErrorMessage != strOldPrompt))
{
RedrawWindow(NULL, NULL, RDW_FRAME | RDW_INVALIDATE | RDW_ERASE | RDW_UPDATENOW);
}
}
//**************************************************************************
void CBCGPComboBox::PreSubclassWindow()
{
CComboBox::PreSubclassWindow();
if (!m_strPrompt.IsEmpty())
{
SubclassEditBox();
if (m_wndEdit.GetSafeHwnd () != NULL)
{
m_wndEdit.SetPrompt(m_strPrompt, m_clrPrompt, FALSE);
}
}
}
//**************************************************************************
int CBCGPComboBox::OnCreate(LPCREATESTRUCT lpCreateStruct)
{
if (CComboBox::OnCreate(lpCreateStruct) == -1)
return -1;
if (!m_strPrompt.IsEmpty())
{
SubclassEditBox();
if (m_wndEdit.GetSafeHwnd () != NULL)
{
m_wndEdit.SetPrompt(m_strPrompt, m_clrPrompt, FALSE);
}
}
return 0;
}
//**************************************************************************
void CBCGPComboBox::OnSetFocus(CWnd* pOldWnd)
{
CComboBox::OnSetFocus(pOldWnd);
BOOL bDrawPrompt = (!m_strPrompt.IsEmpty() && GetWindowTextLength() == 0) || !m_strErrorMessage.IsEmpty();
if (bDrawPrompt)
{
m_bIsButtonHighlighted = TRUE;
}
if (bDrawPrompt)
{
RedrawWindow (NULL, NULL, RDW_INVALIDATE | RDW_FRAME | RDW_UPDATENOW | RDW_ALLCHILDREN);
}
}
//**************************************************************************
BOOL CBCGPComboBox::OnEditupdate()
{
if (!m_strErrorMessage.IsEmpty())
{
SetErrorMessage(NULL, m_clrErrorText);
}
return FALSE;
}
//**************************************************************************
BOOL CBCGPComboBox::OnSelchange()
{
if (!m_strErrorMessage.IsEmpty())
{
SetErrorMessage(NULL, m_clrErrorText);
}
if (m_bVisualManagerStyle)
{
RedrawWindow(NULL, NULL, RDW_INVALIDATE | RDW_FRAME | RDW_UPDATENOW | RDW_ALLCHILDREN);
}
return FALSE;
}
//**************************************************************************
LRESULT CBCGPComboBox::OnSetText (WPARAM, LPARAM)
{
LRESULT lRes = Default();
if (m_bVisualManagerStyle)
{
RedrawWindow(NULL, NULL, RDW_INVALIDATE | RDW_FRAME | RDW_UPDATENOW | RDW_ALLCHILDREN);
}
return lRes;
}
//**************************************************************************
LRESULT CBCGPComboBox::WindowProc(UINT message, WPARAM wParam, LPARAM lParam)
{
LRESULT lRes = CComboBox::WindowProc(message, wParam, lParam);
if (message == CB_SETCURSEL && m_bVisualManagerStyle)
{
RedrawWindow(NULL, NULL, RDW_INVALIDATE | RDW_FRAME | RDW_UPDATENOW | RDW_ALLCHILDREN);
}
return lRes;
}
//*******************************************************************************
LRESULT CBCGPComboBox::OnPrintClient(WPARAM wp, LPARAM lp)
{
if ((lp & PRF_CLIENT) == PRF_CLIENT)
{
if (m_bDefaultPrintClient)
{
return Default();
}
CDC* pDC = CDC::FromHandle((HDC) wp);
ASSERT_VALID(pDC);
if ((GetStyle () & 0x0003L) == CBS_SIMPLE)
{
return Default ();
}
BOOL bDrawPrompt = FALSE;
if (!m_strPrompt.IsEmpty() || !m_strErrorMessage.IsEmpty())
{
BOOL bTextIsEmpty = GetWindowTextLength() == 0;
if (m_wndEdit.GetSafeHwnd () == NULL)
{
bDrawPrompt = bTextIsEmpty || !m_strErrorMessage.IsEmpty();
}
}
if (!m_bVisualManagerStyle && !bDrawPrompt)
{
return Default();
}
OnDraw(pDC, bDrawPrompt);
}
return 0;
}
|
<reponame>acetrand/es6-mocha-snippets-vs-code
type Snippet = { body: string[], description: string, prefix: string, functionType: 'arrow' | 'function' | 'both' }
const snippets: Snippet[] = [
{
prefix: "before",
functionType: 'arrow',
body: [
"before(() => {",
"\t${1}",
"});"
],
description: "Mocha::Before "
},
{
prefix: "fbefore",
functionType: 'function',
body: [
"before(function () {",
"\t${1}",
"});"
],
description: "Mocha::Before::Function"
},
{
prefix: "beforeNamed",
functionType: 'arrow',
body: [
"before(function ${1}() {",
"\t${2}",
"});"
],
description: "Mocha::Before with Named Function"
},
{
prefix: "beforeDescription",
functionType: 'arrow',
body: [
"before('${1}', () => {",
"\t${2}",
"});"
],
description: "Mocha::Before with Description"
},
{
prefix: "fbeforeDescription",
functionType: 'function',
body: [
"before('${1}', function () {",
"\t${2}",
"});"
],
description: "Mocha::Before with Description::Function"
},
{
prefix: "beforeEach",
functionType: 'arrow',
body: [
"beforeEach(() => {",
"\t${1}",
"});"
],
description: "Mocha::Before Each"
},
{
prefix: "fbeforeEach",
functionType: 'function',
body: [
"beforeEach(function () {",
"\t${1}",
"});"
],
description: "Mocha::Before Each::Function"
},
{
prefix: "beforeEachNamed",
functionType: 'arrow',
body: [
"beforeEach(function ${1}() {",
"\t${2}",
"});"
],
description: "Mocha::Before Each with Named Function"
},
{
prefix: "beforeEachDescription",
functionType: 'arrow',
body: [
"beforeEach('${1}', () => {",
"\t${2}",
"});"
],
description: "Mocha::Before Each with Description"
},
{
prefix: "fbeforeEachDescription",
functionType: 'function',
body: [
"beforeEach('${1}', function () {",
"\t${2}",
"});"
],
description: "Mocha::Before Each with Description::Function"
},
{
prefix: "after",
functionType: 'arrow',
body: [
"after(() => {",
"\t${1}",
"});"
],
description: "Mocha::After"
},
{
prefix: "fafter",
functionType: 'function',
body: [
"after(function () {",
"\t${1}",
"});"
],
description: "Mocha::After::Function"
},
{
prefix: "afterNamed",
functionType: 'arrow',
body: [
"after(function ${1}() {",
"\t${2}",
"});"
],
description: "Mocha::After with Named Function"
},
{
prefix: "afterDescription",
functionType: 'arrow',
body: [
"after('${1}', () => {",
"\t${2}",
"});"
],
description: "Mocha::After with Description"
},
{
prefix: "fafterDescription",
functionType: 'function',
body: [
"after('${1}', function () {",
"\t${2}",
"});"
],
description: "Mocha::After with Description::Function"
},
{
prefix: "afterEach",
functionType: 'arrow',
body: [
"afterEach(() => {",
"\t${1}",
"});"
],
description: "Mocha::After Each"
},
{
prefix: "fafterEach",
functionType: 'function',
body: [
"afterEach(function () {",
"\t${1}",
"});"
],
description: "Mocha::After Each::Function"
},
{
prefix: "afterEachNamed",
functionType: 'arrow',
body: [
"afterEach(function ${1}() {",
"\t${2}",
"});"
],
description: "Mocha::After Each with Named Function"
},
{
prefix: "afterEachDescription",
functionType: 'arrow',
body: [
"afterEach('${1}', () => {",
"\t${2}",
"});"
],
description: "Mocha::After Each with Description"
},
{
prefix: "fafterEachDescription",
functionType: 'function',
body: [
"afterEach('${1}', function () {",
"\t${2}",
"});"
],
description: "Mocha::After Each with Description::Function"
},
{
prefix: "describeAndIt",
functionType: 'arrow',
body: [
"describe('${1}', () => {",
"\tit('${2}', () => {",
"\t\t${3}",
"\t});",
"});"
],
description: "Mocha::Describe with It"
},
{
prefix: "fdescribeAndIt",
functionType: 'function',
body: [
"describe('${1}', function () {",
"\tit('${2}', function () {",
"\t\t${3}",
"\t});",
"});"
],
description: "Mocha::Describe with It::Function"
},
{
prefix: "describe",
functionType: 'arrow',
body: [
"describe('${1}', () => {",
"\t${2}",
"});"
],
description: "Mocha::Describe"
},
{
prefix: "fdescribe",
functionType: 'function',
body: [
"describe('${1}', function () {",
"\t${2}",
"});"
],
description: "Mocha::Describe::Function"
},
{
prefix: "contextAndIt",
functionType: 'arrow',
body: [
"context('${1}', () => {",
"\tit('${2}', () => {",
"\t\t${3}",
"\t});",
"});"
],
description: "Mocha::Context with It"
},
{
prefix: "fcontextAndIt",
functionType: 'function',
body: [
"context('${1}', function () {",
"\tit('${2}', function () {",
"\t\t${3}",
"\t});",
"});"
],
description: "Mocha::Context with It::Function"
},
{
prefix: "context",
functionType: 'arrow',
body: [
"context('${1}', () => {",
"\t${2}",
"});"
],
description: "Mocha::Context"
},
{
prefix: "fcontext",
functionType: 'function',
body: [
"context('${1}', function () {",
"\t${2}",
"});"
],
description: "Mocha::Context::Function"
},
{
prefix: "it",
functionType: 'arrow',
body: [
"it('${1}', () => {",
"\t${2}",
"});"
],
description: "Mocha::It"
},
{
prefix: "fit",
functionType: 'function',
body: [
"it('${1}', function () {",
"\t${2}",
"});"
],
description: "Mocha::It::Function"
},
{
prefix: "suite",
functionType: 'arrow',
body: [
"suite('${1}', () => {",
"\t${2}",
"});"
],
description: "Mocha::Suite"
},
{
prefix: "fsuite",
functionType: 'function',
body: [
"suite('${1}', function () {",
"\t${2}",
"});"
],
description: "Mocha::Suite::Function"
},
{
prefix: "suiteSetup",
functionType: 'arrow',
body: [
"suiteSetup(() => {",
"\t${1}",
"});"
],
description: "Mocha::SuiteSetup"
},
{
prefix: "fsuiteSetup",
functionType: 'function',
body: [
"suiteSetup(function () {",
"\t${1}",
"});"
],
description: "Mocha::SuiteSetup::Function"
},
{
prefix: "setup",
functionType: 'arrow',
body: [
"setup(() => {",
"\t${1}",
"});"
],
description: "Mocha::Setup"
},
{
prefix: "fsetup",
functionType: 'function',
body: [
"setup(function () {",
"\t${1}",
"});"
],
description: "Mocha::Setup::Function"
},
{
prefix: "suiteTeardown",
functionType: 'arrow',
body: [
"suiteTeardown(() => {",
"\t${1}",
"});"
],
description: "Mocha::SuiteTeardown"
},
{
prefix: "fsuiteTeardown",
functionType: 'function',
body: [
"suiteTeardown(function () {",
"\t${1}",
"});"
],
description: "Mocha::SuiteTeardown::Function"
},
{
prefix: "teardown",
functionType: 'arrow',
body: [
"teardown(() => {",
"\t${1}",
"});"
],
description: "Mocha::Teardown"
},
{
prefix: "fteardown",
functionType: 'function',
body: [
"teardown(function () {",
"\t${1}",
"});"
],
description: "Mocha::Teardown::Function"
},
{
prefix: "test",
functionType: 'arrow',
body: [
"test('${1}', () => {",
"\t${2}",
"});"
],
description: "Mocha::Test"
},
{
prefix: "ftest",
functionType: 'function',
body: [
"test('${1}', function () {",
"\t${2}",
"});"
],
description: "Mocha::Test::Function"
},
{
prefix: "entireSuite",
functionType: 'arrow',
body: [
"suite('${1}', () => {",
"",
"\tsuiteSetup(() => { });",
"",
"\ttest('${2}', () => {",
"\t\t${3}",
"\t});",
"",
"\tsuiteTeardown(() => { });",
"});"
],
description: "Mocha::EntireSuite"
},
{
prefix: "fentireSuite",
functionType: 'function',
body: [
"suite('${1}', function () {",
"",
"\tsuiteSetup(function () { });",
"",
"\ttest('${2}', function () {",
"\t\t${3}",
"\t});",
"",
"\tsuiteTeardown(function () { });",
"});"
],
description: "Mocha::EntireSuite::Function"
},
{
prefix: "exportsSuite",
functionType: 'arrow',
body: [
"exports.${1} = {",
"\t'${2}': {",
"\t\t'${3}': () => {",
"\t\t\t${4}",
"\t\t},",
"\t}",
"};"
],
description: "Mocha::ExportsSuite"
},
{
prefix: "fexportsSuite",
functionType: 'function',
body: [
"exports.${1} = {",
"\t'${2}': {",
"\t\t'${3}': function () {",
"\t\t\t${4}",
"\t\t},",
"\t}",
"};"
],
description: "Mocha::ExportsSuite::Function"
}
]
export default snippets
|
import paho.mqtt.client as mqtt
from settings import settings
ldr_value = 100 # Last LDR value received
light_stat = False # Light Stat (as sent from here)
def turn_light(stat):
"""Switch ON/OFF the light"""
global light_stat
if stat:
payload = "ON"
light_stat = True
else:
payload = "OFF"
light_stat = False
client.publish(settings.light_cmd_topic, payload)
print("Switched Light", payload)
def on_connect(*args):
"""Callback to execute when MQTT connects"""
print("MQTT Connected!")
client.subscribe(settings.ldr_topic)
client.subscribe(settings.pir_topic)
def on_message(*args):
"""Callback to execute when MQTT receives new message"""
global ldr_value
msg: mqtt.MQTTMessage = next(a for a in args if isinstance(a, mqtt.MQTTMessage))
topic = msg.topic
payload = msg.payload.decode()
print(f"Rx @ {topic}: {payload}")
if topic == settings.ldr_topic:
ldr_value = float(payload)
elif topic == settings.pir_topic:
if payload == "ON" and not light_stat and ldr_value <= settings.ldr_threshold:
turn_light(True)
elif payload == "OFF" and light_stat:
turn_light(False)
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
if __name__ == "__main__":
client.connect(settings.broker, settings.port)
try:
client.loop_forever()
except (KeyboardInterrupt, InterruptedError):
pass
print("Bye!")
|
/** Actions to perform on entering S_GROW_CACHE state. */
static int grow_cache_st_in(struct m0_sm *mach)
{
struct m0_conf_fetch_resp *resp;
int rc;
struct m0_confc_ctx *ctx = mach_to_ctx(mach);
struct m0_rpc_item *item = ctx->fc_rpc_item;
struct m0_rpc_machine *rmach = item->ri_rmachine;
M0_ENTRY("mach=%p ctx=%p", mach, ctx);
M0_PRE(item != NULL && item->ri_error == 0 && item->ri_reply != NULL &&
rmach != NULL);
resp = m0_fop_data(m0_rpc_item_to_fop(item->ri_reply));
rc = resp->fr_rc;
if (*confc_cache_ver(ctx) == M0_CONF_VER_UNKNOWN)
*confc_cache_ver(ctx) = resp->fr_ver;
else if (*confc_cache_ver(ctx) != resp->fr_ver)
rc = M0_ERR(-EPROTO);
if (rc == 0)
rc = cache_grow(ctx->fc_confc, resp);
m0_rpc_machine_lock(rmach);
m0_rpc_item_put(item->ri_reply);
m0_rpc_item_put(item);
m0_rpc_machine_unlock(rmach);
ctx->fc_rpc_item = NULL;
mach->sm_rc = rc;
M0_LEAVE("rc=%d retval=%s", rc, rc == 0 ? "S_CHECK" : "S_FAILURE");
return rc == 0 ? S_CHECK : S_FAILURE;
} |
<reponame>gitter-badger/yggdrash<filename>yggdrash-core/src/main/java/io/yggdrash/core/BranchGroup.java
/*
* Copyright 2018 Akashic Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.yggdrash.core;
import com.google.gson.JsonObject;
import io.yggdrash.common.Sha3Hash;
import io.yggdrash.contract.Contract;
import io.yggdrash.core.event.BranchEventListener;
import io.yggdrash.core.exception.DuplicatedException;
import io.yggdrash.core.exception.FailedOperationException;
import io.yggdrash.core.store.StateStore;
import io.yggdrash.core.store.TransactionReceiptStore;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
public class BranchGroup {
private final Map<BranchId, BlockChain> branches = new ConcurrentHashMap<>();
public void addBranch(BranchId branchId, BlockChain blockChain,
BranchEventListener branchEventListener) {
if (branches.containsKey(branchId)) {
throw new DuplicatedException(branchId.toString() + " duplicated");
}
blockChain.addListener(branchEventListener);
blockChain.init();
branches.put(branchId, blockChain);
}
public BlockChain getBranch(BranchId branchId) {
return branches.get(branchId);
}
public Collection<BlockChain> getAllBranch() {
return branches.values();
}
public TransactionHusk addTransaction(TransactionHusk tx) {
if (branches.containsKey(tx.getBranchId())) {
return branches.get(tx.getBranchId()).addTransaction(tx);
}
return tx;
}
public long getLastIndex(BranchId id) {
return branches.get(id).getLastIndex();
}
public Collection<TransactionHusk> getRecentTxs(BranchId branchId) {
return branches.get(branchId).getRecentTxs();
}
public TransactionHusk getTxByHash(BranchId branchId, String id) {
return getTxByHash(branchId, new Sha3Hash(id));
}
TransactionHusk getTxByHash(BranchId branchId, Sha3Hash hash) {
return branches.get(branchId).getTxByHash(hash);
}
public void generateBlock(Wallet wallet) {
for (BlockChain blockChain : branches.values()) {
blockChain.generateBlock(wallet);
}
}
public BlockHusk addBlock(BlockHusk block) {
if (branches.containsKey(block.getBranchId())) {
return branches.get(block.getBranchId()).addBlock(block, true);
}
return block;
}
public BlockHusk getBlockByIndex(BranchId branchId, long index) {
return branches.get(branchId).getBlockByIndex(index);
}
public BlockHusk getBlockByHash(BranchId branchId, String hash) {
return branches.get(branchId).getBlockByHash(hash);
}
public int getBranchSize() {
return branches.size();
}
public StateStore<?> getStateStore(BranchId branchId) {
return branches.get(branchId).getRuntime().getStateStore();
}
public TransactionReceiptStore getTransactionReceiptStore(BranchId branchId) {
return branches.get(branchId).getRuntime().getTransactionReceiptStore();
}
public List<TransactionHusk> getUnconfirmedTxs(BranchId branchId) {
return branches.get(branchId).getUnconfirmedTxs();
}
Contract getContract(BranchId branchId) {
return branches.get(branchId).getContract();
}
public JsonObject query(JsonObject query) {
try {
BranchId branchId = BranchId.of(query.get("address").getAsString());
BlockChain chain = branches.get(branchId);
return chain.getRuntime().query(chain.getContract(), query);
} catch (Exception e) {
throw new FailedOperationException(e);
}
}
public long countOfTxs(BranchId branchId) {
return branches.get(branchId).countOfTxs();
}
}
|
<reponame>RulLu16/dss
package io.github.ztkmkoo.dss.core.message.rest;
import akka.actor.typed.ActorRef;
import io.github.ztkmkoo.dss.core.network.rest.enumeration.DssRestContentType;
import io.github.ztkmkoo.dss.core.network.rest.enumeration.DssRestMethodType;
import lombok.Builder;
import lombok.Getter;
import java.util.Objects;
/**
* Project: dss
* Created by: @ztkmkoo(<EMAIL>)
* Date: 20. 3. 3. 오후 10:07
*/
@Getter
public class DssRestMasterActorCommandRequest implements DssRestMasterActorCommand {
private static final long serialVersionUID = 6046370337632314401L;
private final String channelId;
private final ActorRef<DssRestChannelHandlerCommand> sender;
private final DssRestMethodType methodType;
private final DssRestContentType contentType;
private final String path;
private final String content;
@Builder
private DssRestMasterActorCommandRequest(
String channelId,
ActorRef<DssRestChannelHandlerCommand> sender,
DssRestMethodType methodType,
DssRestContentType contentType,
String path,
String content) {
Objects.requireNonNull(channelId);
Objects.requireNonNull(sender);
Objects.requireNonNull(methodType);
Objects.requireNonNull(path);
this.channelId = channelId;
this.sender = sender;
this.methodType = methodType;
this.contentType = contentType;
this.path = path;
this.content = content;
}
protected DssRestMasterActorCommandRequest(DssRestMasterActorCommandRequest request) {
this(
request.getChannelId(),
request.getSender(),
request.getMethodType(),
request.getContentType(),
request.getPath(),
request.getContent()
);
}
@Override
public String toString() {
return "DssRestMasterActorCommandRequest{" +
"channelId: '" + channelId + "', " +
"sender: '" + (Objects.nonNull(sender)? sender.path().name() : "null") + "', " +
"methodType: '" + methodType + "', " +
"contentType: '" + contentType + "', " +
"path: '" + path + "', " +
"content: '" + content + "'" +
"}";
}
}
|
import { Handler } from "../../../../../../../shared/command-bus";
import { SCHEDULE_JOB_COMMAND_TYPE, ScheduleJobCommand } from "../commands/schedule-job.command";
import { Scheduler } from "../../../../scheduler/producer/scheduler.types";
export interface ScheduleJobHandlerProps {
scheduler: Scheduler;
}
export default class ScheduleJobHandler implements Handler<ScheduleJobCommand> {
public commandType: string = SCHEDULE_JOB_COMMAND_TYPE;
constructor(private dependencies: ScheduleJobHandlerProps) {}
async execute(command: ScheduleJobCommand) {
const { scheduler } = this.dependencies;
return scheduler.createJob(command.payload);
}
}
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.