content
stringlengths 10
4.9M
|
---|
<reponame>protolambda/go-beacon-transition<filename>eth2/beacon/altair/sync_contribution.go
package altair
import (
"errors"
"fmt"
blsu "github.com/protolambda/bls12-381-util"
"github.com/protolambda/zrnt/eth2/beacon/common"
"github.com/protolambda/ztyp/codec"
"github.com/protolambda/ztyp/tree"
. "github.com/protolambda/ztyp/view"
)
type SyncCommitteeContribution struct {
// Slot to which this contribution pertains
Slot common.Slot `yaml:"slot" json:"slot"`
// Block root for this contribution
BeaconBlockRoot common.Root `yaml:"beacon_block_root" json:"beacon_block_root"`
// The subcommittee this contribution pertains to out of the broader sync committee
SubcommitteeIndex Uint64View `yaml:"subcommittee_index" json:"subcommittee_index"`
// A bit is set if a signature from the validator at the corresponding
// index in the subcommittee is present in the aggregate `signature`.
AggregationBits SyncCommitteeSubnetBits `yaml:"aggregation_bits" json:"aggregation_bits"`
// Signature by the validator(s) over the block root of `slot`
Signature common.BLSSignature `yaml:"signature" json:"signature"`
}
func SyncCommitteeContributionType(spec *common.Spec) *ContainerTypeDef {
return ContainerType("SyncCommitteeContribution", []FieldDef{
{"slot", common.SlotType},
{"beacon_block_root", RootType},
{"subcommittee_index", Uint64Type},
{"aggregation_bits", SyncCommitteeSubnetBitsType(spec)},
{"signature", common.BLSSignatureType},
})
}
func (sc *SyncCommitteeContribution) Deserialize(spec *common.Spec, dr *codec.DecodingReader) error {
return dr.FixedLenContainer(
&sc.Slot,
&sc.BeaconBlockRoot,
&sc.SubcommitteeIndex,
spec.Wrap(&sc.AggregationBits),
&sc.Signature,
)
}
func (sc *SyncCommitteeContribution) Serialize(spec *common.Spec, w *codec.EncodingWriter) error {
return w.FixedLenContainer(
&sc.Slot,
&sc.BeaconBlockRoot,
&sc.SubcommitteeIndex,
spec.Wrap(&sc.AggregationBits),
&sc.Signature,
)
}
func (sc *SyncCommitteeContribution) ByteLength(spec *common.Spec) uint64 {
return codec.ContainerLength(
&sc.Slot,
&sc.BeaconBlockRoot,
&sc.SubcommitteeIndex,
spec.Wrap(&sc.AggregationBits),
&sc.Signature,
)
}
func (sc *SyncCommitteeContribution) FixedLength(spec *common.Spec) uint64 {
return codec.ContainerLength(
&sc.Slot,
&sc.BeaconBlockRoot,
&sc.SubcommitteeIndex,
spec.Wrap(&sc.AggregationBits),
&sc.Signature,
)
}
func (sc *SyncCommitteeContribution) HashTreeRoot(spec *common.Spec, hFn tree.HashFn) common.Root {
return hFn.HashTreeRoot(
&sc.Slot,
&sc.BeaconBlockRoot,
&sc.SubcommitteeIndex,
spec.Wrap(&sc.AggregationBits),
&sc.Signature,
)
}
func (sc *SyncCommitteeContribution) VerifySignature(spec *common.Spec, subcommitteePubkeys []*common.CachedPubkey, domFn common.BLSDomainFn) error {
pubkeys := make([]*blsu.Pubkey, 0, len(subcommitteePubkeys))
for i, pub := range subcommitteePubkeys {
if sc.AggregationBits.GetBit(uint64(i)) {
p, err := pub.Pubkey()
if err != nil {
return fmt.Errorf("found invalid pubkey in cache")
}
pubkeys = append(pubkeys, p)
}
}
dom, err := domFn(common.DOMAIN_SYNC_COMMITTEE, spec.SlotToEpoch(sc.Slot))
if err != nil {
return err
}
signingRoot := common.ComputeSigningRoot(sc.BeaconBlockRoot, dom)
sig, err := sc.Signature.Signature()
if err != nil {
return fmt.Errorf("failed to deserialize and sub-group check sync committee contribution signature: %v", err)
}
if !blsu.Eth2FastAggregateVerify(pubkeys, signingRoot[:], sig) {
return errors.New("could not verify BLS signature for sync committee contribution")
}
return nil
}
type SyncCommitteeContributionView struct {
*ContainerView
}
func AsSyncCommitteeContribution(v View, err error) (*SyncCommitteeContributionView, error) {
c, err := AsContainer(v, err)
return &SyncCommitteeContributionView{c}, err
}
|
/*
* Physics.h
*
* Created on: Apr 15, 2014
* Author: david
*/
#ifndef PHYSICS_H_
#define PHYSICS_H_
#include "Basic.h"
class material_c;
class dryMaterial_c;
class wetMaterial_c;
class physics_c;
/*----------------------------------------------------------------------------------------------------
*
* Physics-Material
*
----------------------------------------------------------------------------------------------------*/
class material_c {
public:
std::string material;
numeric_t MolarDensity;//[Kg/mol]
numeric_t density; //[kg/m^3]
material_c(const std::string & s="Air", const numeric_t & md=0.02897, const numeric_t & d=1.204)
:material(s), MolarDensity(md), density(d) {}
friend std::istream & operator >> (std::istream &is , material_c & m);
friend std::ostream & operator << (std::ostream & os, const material_c & m);
};
//----------------------------------------------------------------------------------------------------
class dryMaterial_c:public material_c {
public:
numeric_t dynamicViscosity;
dryMaterial_c(const std::string & s="Air", const numeric_t & md=0.02897, const numeric_t & d=1.204
, const numeric_t & dv=1.813e-5)
: material_c(s, md, d)
, dynamicViscosity(dv) {}
friend std::istream & operator >> (std::istream & is, dryMaterial_c & m);
friend std::ostream & operator << (std::ostream & os, const dryMaterial_c & m);
};
//----------------------------------------------------------------------------------------------------
class wetMaterial_c:public dryMaterial_c {
public:
numeric_t massDiffusivity; //[m^2/s]
numeric_t contactAngle; //[Radians]
numeric_t surfaceTension; //[N/m]
numeric_t saturatedConcentration; //[mol/m^3]
wetMaterial_c(const std::string & s="Water", const numeric_t & md=0.01802, const numeric_t & d=998.2
, const numeric_t & dv=1.002e-3
, const numeric_t & mdi=2.119e-5, const numeric_t & cav=0, const numeric_t & stv=7.266e-2, const numeric_t & scv=Csat)
: dryMaterial_c(s, md, d, dv)
, massDiffusivity(mdi), contactAngle(cav), surfaceTension(stv), saturatedConcentration(scv){}
friend std::istream & operator >> (std::istream & is, wetMaterial_c & m);
friend std::ostream & operator << (std::ostream & os, const wetMaterial_c & m);
};
//----------------------------------------------------------------------------------------------------
class physics_c {
public:
physicsModel_e phyModel;
bool filmEffect, viscosityEffect;
dryMaterial_c Dry;
wetMaterial_c Wet;
numeric_t RC; //Radius of Corner, decided by manufacturing technology
numeric_t RefLength, RefM, RefDensity, RefViscosity, RefMassDiffusivity, RefSurfaceTension, RefConcentration;
numeric_t RefTime, RefVelocity, RefPressure, RefMass, RefMassFlux;
numeric_t Re, Pe, Ca; //used for calculation
numeric_t FeatureLength, FeatureDensity, FeatureVelocity, FeaturePressure, FeatureViscosity;
numeric_t ReFeature, PeFeature, CaFeature; //used for show the feathers
numeric_t EnvironmentConcentration;
numeric_t SaturatedConcentration, LiquidConcentration;
algorithm_e AlgorithmFlowfield, AlgorithmEvaporation;
scheme_e SchemeFlowfield, SchemeEvaporation;
bool ImplicitFlowfield, ImplicitEvaporation;
size_t FlowfieldMaxStep, EvaporationMaxStep;
numeric_t RelativeConvergeCriteria, FlowCriteria, DryCriteria;
size_t EvaporationSubStep;
size_t FlowOutputFrequency, DryOutputFrequency;
numeric_t UVRelaxFactor, PRelaxFactor, CRelaxFactor;
size_t FilmApproximationType;
numeric_t FilmApproximationCoefficient;
bool InletFlowDeveloped;
physics_c(const physicsModel_e & pm=isothermalEvaporation
, const bool & feff=false, const bool & veff=false
, const dryMaterial_c & dry=dryMaterial_c(), const wetMaterial_c & wet=wetMaterial_c()
, const numeric_t & rcv=0
, const numeric_t & rl=1
, const numeric_t & flv=1, const numeric_t & fvv=0, const numeric_t & fpv=0
, const numeric_t & ec=0
, const algorithm_e & afl=SIMPLER , const scheme_e & schfl=Hybrid, const bool & imf=true
, const algorithm_e & ae=OperatorSplitting, const scheme_e & sche=Hybrid, const bool & ime=true
, const size_t & fms=1000, const size_t & ems=1000
, const numeric_t & rcc=1e-4, const numeric_t & fc=1e-4, const numeric_t & dc=1e-4, const numeric_t & emt=1
, const size_t & ess=1
, const size_t & fof=1, const size_t & dof=1
, const numeric_t & uvrf=1, const numeric_t & prf=1, const numeric_t & crf=1
, const size_t & fatv=1, const numeric_t & facv=3
, const bool & ifdv=true)
: phyModel(pm), filmEffect(feff), viscosityEffect(veff)
, Dry(dry), Wet(wet)
, RC(rcv)
, RefLength(rl)
, RefM(wet.MolarDensity), RefDensity(wet.MolarDensity*wet.saturatedConcentration), RefViscosity(wet.dynamicViscosity), RefMassDiffusivity(wet.massDiffusivity), RefSurfaceTension(wet.surfaceTension)
, RefConcentration(wet.saturatedConcentration)
, RefTime(0), RefVelocity(0), RefPressure(0)
, RefMass(0), RefMassFlux(0)
, Re(0), Pe(0), Ca(0)
, FeatureLength(flv), FeatureDensity(dry.density), FeatureVelocity(fvv), FeaturePressure(fpv), FeatureViscosity(dry.dynamicViscosity)
, ReFeature(0), PeFeature(0), CaFeature(0)
, EnvironmentConcentration(ec)
, SaturatedConcentration(Csat), LiquidConcentration(Cliquid)
, AlgorithmFlowfield(afl), AlgorithmEvaporation(ae), SchemeFlowfield(schfl), SchemeEvaporation(sche), ImplicitFlowfield(imf), ImplicitEvaporation(ime)
, FlowfieldMaxStep(fms), EvaporationMaxStep(ems), RelativeConvergeCriteria(rcc), FlowCriteria(fc), DryCriteria(dc)
, EvaporationSubStep(ess)
, FlowOutputFrequency(fof), DryOutputFrequency(dof)
, UVRelaxFactor(uvrf), PRelaxFactor(prf), CRelaxFactor(crf)
, FilmApproximationType(fatv), FilmApproximationCoefficient(facv)
, InletFlowDeveloped(ifdv) {
RefTime=RefDensity*RefLength*RefLength/RefMassDiffusivity/RefM/RefConcentration;
RefVelocity=RefLength/RefTime;
RefPressure=RefDensity*RefVelocity*RefVelocity;
RefMass=RefDensity*pow(RefLength, 3);
RefMassFlux=RefMass/RefTime;
Re=RefDensity*RefVelocity*RefLength/RefViscosity;
Pe=RefVelocity*RefLength/RefMassDiffusivity;
Ca=RefViscosity*RefVelocity/RefSurfaceTension;
if(FeatureVelocity!=0 && FeaturePressure==0) {
FeaturePressure=FeatureDensity*FeatureVelocity*FeatureVelocity;
} else if(FeatureVelocity==0 && FeaturePressure!=0) {
FeatureVelocity=sqrt(FeaturePressure/FeatureDensity);
} else {
//do nothing
}
ReFeature=FeatureDensity*FeatureVelocity*FeatureLength/FeatureViscosity;//All these Feature Properties are used to calculate the External Flow Field
PeFeature=FeatureVelocity*FeatureLength/RefMassDiffusivity;
CaFeature=RefViscosity*FeatureVelocity/RefSurfaceTension;//Almost no use, does not reflect any phenomena
}
friend std::istream & operator >> (std::istream & is, physics_c & p);
friend std::ostream & operator << (std::ostream & os, const physics_c & p);
bool nondimensionalize();
};
#endif /* PHYSICS_H_ */
|
/**
* Copyright (C) 2015 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.fabric8.camelk.test.crud;
import io.fabric8.camelk.client.CamelKClient;
import io.fabric8.camelk.v1.Integration;
import io.fabric8.camelk.v1.IntegrationBuilder;
import io.fabric8.camelk.v1.IntegrationList;
import io.fabric8.kubernetes.client.server.mock.EnableKubernetesMockClient;
import org.junit.jupiter.api.Test;
import java.io.ByteArrayInputStream;
import java.util.Arrays;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
@EnableKubernetesMockClient(crud = true)
class IntegrationCrudTest {
CamelKClient client;
@Test
void shouldReturnEmptyList() {
IntegrationList list = client.v1().integrations().inNamespace("ns1").list();
assertNotNull(list);
assertTrue(list.getItems().isEmpty());
}
@Test
void shouldListAndGetIntegration() {
Integration integration2 = new IntegrationBuilder().withNewMetadata().withName("integration2").endMetadata().build();
client.v1().integrations().inNamespace("ns2").create(integration2);
IntegrationList integrationList = client.v1().integrations().inNamespace("ns2").list();
assertNotNull(integrationList);
assertEquals(1, integrationList.getItems().size());
Integration integration = client.v1().integrations().inNamespace("ns2").withName("integration2").get();
assertNotNull(integration);
assertEquals("integration2", integration.getMetadata().getName());
}
@Test
void shouldDeleteAIntegration() {
Integration integration3 = new IntegrationBuilder().withNewMetadata().withName("integration3").endMetadata().build();
client.v1().integrations().inNamespace("ns3").create(integration3);
boolean deleted = client.v1().integrations().inNamespace("ns3").withName("integration3").delete().size() == 1;
assertTrue(deleted);
}
@Test
void shouldLoadAIntegrationWithParams() {
String definition = String.join("\n", Arrays.asList(
"apiVersion: camel.apache.org/v1alpha1",
"kind: Integration",
"metadata:",
" name: integration4",
"spec:",
" flows:",
" - from:",
" parameters:",
" period: \"1000\"",
" steps:",
" - set-body:",
" constant: Hello Camel K from yaml",
" - to: log:info",
" uri: timer:yaml",
" traits:",
" container:",
" configuration:",
" requestCPU: \"1\""));
Integration i = client.v1().integrations().inNamespace("ns4").load(new ByteArrayInputStream(definition.getBytes()))
.createOrReplace();
assertNotNull(i);
}
}
|
Nothing attracts news organizations like Facebook. And nothing makes them more nervous.
With 1.4 billion users, the social media site has become a vital source of traffic for publishers looking to reach an increasingly fragmented audience glued to smartphones. In recent months, Facebook has been quietly holding talks with at least half a dozen media companies about hosting their content inside Facebook rather than making users tap a link to go to an external site.
Such a plan would represent a leap of faith for news organizations accustomed to keeping their readers within their own ecosystems, as well as accumulating valuable data on them. Facebook has been trying to allay their fears, according to several of the people briefed on the talks, who spoke on condition of anonymity because they were bound by nondisclosure agreements.
Facebook intends to begin testing the new format in the next several months, according to two people with knowledge of the discussions. The initial partners are expected to be The New York Times, BuzzFeed and National Geographic, although others may be added since discussions are continuing. The Times and Facebook are moving closer to a firm deal, one person said.
To make the proposal more appealing to publishers, Facebook has discussed ways for publishers to make money from advertising that would run alongside the content. |
/**
* Number of entries in Market Data message
*/
public static class NoMDEntriesDecoder
implements Iterable<NoMDEntriesDecoder>, java.util.Iterator<NoMDEntriesDecoder>
{
public static final int HEADER_SIZE = 3;
private final SnapshotFullRefreshOrderBook53Decoder parentMessage;
private DirectBuffer buffer;
private int count;
private int index;
private int offset;
private int blockLength;
NoMDEntriesDecoder(final SnapshotFullRefreshOrderBook53Decoder parentMessage)
{
this.parentMessage = parentMessage;
}
public void wrap(final DirectBuffer buffer)
{
if (buffer != this.buffer)
{
this.buffer = buffer;
}
index = -1;
final int limit = parentMessage.limit();
parentMessage.limit(limit + HEADER_SIZE);
blockLength = (int)(buffer.getShort(limit + 0, java.nio.ByteOrder.LITTLE_ENDIAN) & 0xFFFF);
count = (int)((short)(buffer.getByte(limit + 2) & 0xFF));
}
public static int sbeHeaderSize()
{
return HEADER_SIZE;
}
public static int sbeBlockLength()
{
return 29;
}
public int actingBlockLength()
{
return blockLength;
}
public int count()
{
return count;
}
public java.util.Iterator<NoMDEntriesDecoder> iterator()
{
return this;
}
public void remove()
{
throw new UnsupportedOperationException();
}
public boolean hasNext()
{
return (index + 1) < count;
}
public NoMDEntriesDecoder next()
{
if (index + 1 >= count)
{
throw new java.util.NoSuchElementException();
}
offset = parentMessage.limit();
parentMessage.limit(offset + blockLength);
++index;
return this;
}
public static int orderIDId()
{
return 37;
}
public static int orderIDSinceVersion()
{
return 0;
}
public static int orderIDEncodingOffset()
{
return 0;
}
public static int orderIDEncodingLength()
{
return 8;
}
public static String orderIDMetaAttribute(final MetaAttribute metaAttribute)
{
switch (metaAttribute)
{
case EPOCH: return "";
case TIME_UNIT: return "";
case SEMANTIC_TYPE: return "int";
case PRESENCE: return "required";
}
return "";
}
public static long orderIDNullValue()
{
return 0xffffffffffffffffL;
}
public static long orderIDMinValue()
{
return 0x0L;
}
public static long orderIDMaxValue()
{
return 0xfffffffffffffffeL;
}
public long orderID()
{
return buffer.getLong(offset + 0, java.nio.ByteOrder.LITTLE_ENDIAN);
}
public static int mDOrderPriorityId()
{
return 37707;
}
public static int mDOrderPrioritySinceVersion()
{
return 0;
}
public static int mDOrderPriorityEncodingOffset()
{
return 8;
}
public static int mDOrderPriorityEncodingLength()
{
return 8;
}
public static String mDOrderPriorityMetaAttribute(final MetaAttribute metaAttribute)
{
switch (metaAttribute)
{
case EPOCH: return "";
case TIME_UNIT: return "";
case SEMANTIC_TYPE: return "int";
case PRESENCE: return "optional";
}
return "";
}
public static long mDOrderPriorityNullValue()
{
return 0xffffffffffffffffL;
}
public static long mDOrderPriorityMinValue()
{
return 0x0L;
}
public static long mDOrderPriorityMaxValue()
{
return 0xfffffffffffffffeL;
}
public long mDOrderPriority()
{
return buffer.getLong(offset + 8, java.nio.ByteOrder.LITTLE_ENDIAN);
}
public static int mDEntryPxId()
{
return 270;
}
public static int mDEntryPxSinceVersion()
{
return 0;
}
public static int mDEntryPxEncodingOffset()
{
return 16;
}
public static int mDEntryPxEncodingLength()
{
return 8;
}
public static String mDEntryPxMetaAttribute(final MetaAttribute metaAttribute)
{
switch (metaAttribute)
{
case EPOCH: return "";
case TIME_UNIT: return "";
case SEMANTIC_TYPE: return "Price";
case PRESENCE: return "required";
}
return "";
}
private final PRICE9Decoder mDEntryPx = new PRICE9Decoder();
/**
* Order Price
*
* @return PRICE9Decoder : Order Price
*/
public PRICE9Decoder mDEntryPx()
{
mDEntryPx.wrap(buffer, offset + 16);
return mDEntryPx;
}
public static int mDDisplayQtyId()
{
return 37706;
}
public static int mDDisplayQtySinceVersion()
{
return 0;
}
public static int mDDisplayQtyEncodingOffset()
{
return 24;
}
public static int mDDisplayQtyEncodingLength()
{
return 4;
}
public static String mDDisplayQtyMetaAttribute(final MetaAttribute metaAttribute)
{
switch (metaAttribute)
{
case EPOCH: return "";
case TIME_UNIT: return "";
case SEMANTIC_TYPE: return "Qty";
case PRESENCE: return "required";
}
return "";
}
public static int mDDisplayQtyNullValue()
{
return -2147483648;
}
public static int mDDisplayQtyMinValue()
{
return -2147483647;
}
public static int mDDisplayQtyMaxValue()
{
return 2147483647;
}
public int mDDisplayQty()
{
return buffer.getInt(offset + 24, java.nio.ByteOrder.LITTLE_ENDIAN);
}
public static int mDEntryTypeId()
{
return 269;
}
public static int mDEntryTypeSinceVersion()
{
return 0;
}
public static int mDEntryTypeEncodingOffset()
{
return 28;
}
public static int mDEntryTypeEncodingLength()
{
return 1;
}
public static String mDEntryTypeMetaAttribute(final MetaAttribute metaAttribute)
{
switch (metaAttribute)
{
case EPOCH: return "";
case TIME_UNIT: return "";
case SEMANTIC_TYPE: return "char";
case PRESENCE: return "required";
}
return "";
}
public MDEntryTypeBook mDEntryType()
{
return MDEntryTypeBook.get(buffer.getByte(offset + 28));
}
public String toString()
{
return appendTo(new StringBuilder(100)).toString();
}
public StringBuilder appendTo(final StringBuilder builder)
{
builder.append('(');
//Token{signal=BEGIN_FIELD, name='OrderID', referencedName='null', description='Unique Order ID', id=37, version=0, deprecated=0, encodedLength=8, offset=0, componentTokenCount=3, encoding=Encoding{presence=REQUIRED, primitiveType=null, byteOrder=LITTLE_ENDIAN, minValue=null, maxValue=null, nullValue=null, constValue=null, characterEncoding='null', epoch='null', timeUnit=null, semanticType='int'}}
//Token{signal=ENCODING, name='uInt64', referencedName='null', description='uInt64', id=-1, version=0, deprecated=0, encodedLength=8, offset=0, componentTokenCount=1, encoding=Encoding{presence=REQUIRED, primitiveType=UINT64, byteOrder=LITTLE_ENDIAN, minValue=null, maxValue=null, nullValue=null, constValue=null, characterEncoding='null', epoch='null', timeUnit=null, semanticType='int'}}
builder.append("orderID=");
builder.append(orderID());
builder.append('|');
//Token{signal=BEGIN_FIELD, name='MDOrderPriority', referencedName='null', description='Order priority for execution on the order book', id=37707, version=0, deprecated=0, encodedLength=8, offset=8, componentTokenCount=3, encoding=Encoding{presence=OPTIONAL, primitiveType=null, byteOrder=LITTLE_ENDIAN, minValue=null, maxValue=null, nullValue=null, constValue=null, characterEncoding='null', epoch='null', timeUnit=null, semanticType='int'}}
//Token{signal=ENCODING, name='uInt64NULL', referencedName='null', description='uInt64 optional', id=-1, version=7, deprecated=0, encodedLength=8, offset=8, componentTokenCount=1, encoding=Encoding{presence=OPTIONAL, primitiveType=UINT64, byteOrder=LITTLE_ENDIAN, minValue=null, maxValue=null, nullValue=-1, constValue=null, characterEncoding='null', epoch='null', timeUnit=null, semanticType='int'}}
builder.append("mDOrderPriority=");
builder.append(mDOrderPriority());
builder.append('|');
//Token{signal=BEGIN_FIELD, name='MDEntryPx', referencedName='null', description='Order Price', id=270, version=0, deprecated=0, encodedLength=8, offset=16, componentTokenCount=6, encoding=Encoding{presence=REQUIRED, primitiveType=null, byteOrder=LITTLE_ENDIAN, minValue=null, maxValue=null, nullValue=null, constValue=null, characterEncoding='null', epoch='null', timeUnit=null, semanticType='Price'}}
//Token{signal=BEGIN_COMPOSITE, name='PRICE9', referencedName='null', description='Price with constant exponent -9', id=-1, version=9, deprecated=0, encodedLength=8, offset=16, componentTokenCount=4, encoding=Encoding{presence=REQUIRED, primitiveType=null, byteOrder=LITTLE_ENDIAN, minValue=null, maxValue=null, nullValue=null, constValue=null, characterEncoding='null', epoch='null', timeUnit=null, semanticType='Price'}}
builder.append("mDEntryPx=");
mDEntryPx().appendTo(builder);
builder.append('|');
//Token{signal=BEGIN_FIELD, name='MDDisplayQty', referencedName='null', description='Visible order qty', id=37706, version=0, deprecated=0, encodedLength=4, offset=24, componentTokenCount=3, encoding=Encoding{presence=REQUIRED, primitiveType=null, byteOrder=LITTLE_ENDIAN, minValue=null, maxValue=null, nullValue=null, constValue=null, characterEncoding='null', epoch='null', timeUnit=null, semanticType='Qty'}}
//Token{signal=ENCODING, name='Int32', referencedName='null', description='int32', id=-1, version=0, deprecated=0, encodedLength=4, offset=24, componentTokenCount=1, encoding=Encoding{presence=REQUIRED, primitiveType=INT32, byteOrder=LITTLE_ENDIAN, minValue=null, maxValue=null, nullValue=null, constValue=null, characterEncoding='null', epoch='null', timeUnit=null, semanticType='Qty'}}
builder.append("mDDisplayQty=");
builder.append(mDDisplayQty());
builder.append('|');
//Token{signal=BEGIN_FIELD, name='MDEntryType', referencedName='null', description='Market Data entry type', id=269, version=0, deprecated=0, encodedLength=1, offset=28, componentTokenCount=9, encoding=Encoding{presence=REQUIRED, primitiveType=null, byteOrder=LITTLE_ENDIAN, minValue=null, maxValue=null, nullValue=null, constValue=null, characterEncoding='null', epoch='null', timeUnit=null, semanticType='char'}}
//Token{signal=BEGIN_ENUM, name='MDEntryTypeBook', referencedName='null', description='null', id=-1, version=0, deprecated=0, encodedLength=1, offset=28, componentTokenCount=7, encoding=Encoding{presence=REQUIRED, primitiveType=CHAR, byteOrder=LITTLE_ENDIAN, minValue=null, maxValue=null, nullValue=null, constValue=null, characterEncoding='null', epoch='null', timeUnit=null, semanticType='char'}}
builder.append("mDEntryType=");
builder.append(mDEntryType());
builder.append(')');
return builder;
}
} |
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
/*
* (C) 2010 by Argonne National Laboratory.
* See COPYRIGHT in top-level directory.
*/
/* This test is a simplification of the one in perf/manyrma.c that tests
for correct handling of the case where many RMA operations occur between
synchronization events.
This is one of the ways that RMA may be used, and is used in the
reference implementation of the graph500 benchmark.
*/
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "mpitest.h"
#define MAX_COUNT 65536*4/16
#define MAX_RMA_SIZE 2 /* 16 in manyrma performance test */
#define MAX_RUNS 8
#define MAX_ITER_TIME 5.0 /* seconds */
typedef enum { SYNC_NONE = 0,
SYNC_ALL = -1, SYNC_FENCE = 1, SYNC_LOCK = 2, SYNC_PSCW = 4
} sync_t;
typedef enum { RMA_NONE = 0, RMA_ALL = -1, RMA_PUT = 1, RMA_ACC = 2, RMA_GET = 4 } rma_t;
/* Note GET not yet implemented */
/* By default, run only a subset of the available tests, to keep the
total runtime reasonably short. Command line arguments may be used
to run other tests. */
sync_t syncChoice = SYNC_FENCE;
rma_t rmaChoice = RMA_ACC;
static int verbose = 0;
void RunAccFence(MPI_Win win, int destRank, int cnt, int sz);
void RunAccLock(MPI_Win win, int destRank, int cnt, int sz);
void RunPutFence(MPI_Win win, int destRank, int cnt, int sz);
void RunPutLock(MPI_Win win, int destRank, int cnt, int sz);
void RunAccPSCW(MPI_Win win, int destRank, int cnt, int sz,
MPI_Group exposureGroup, MPI_Group accessGroup);
void RunPutPSCW(MPI_Win win, int destRank, int cnt, int sz,
MPI_Group exposureGroup, MPI_Group accessGroup);
int main(int argc, char *argv[])
{
int arraysize, i, cnt, sz, maxCount = MAX_COUNT, *arraybuffer;
int wrank, wsize, destRank, srcRank;
MPI_Win win;
MPI_Group wgroup, accessGroup, exposureGroup;
int maxSz = MAX_RMA_SIZE;
double start, end;
MTest_Init(&argc, &argv);
for (i = 1; i < argc; i++) {
if (strcmp(argv[i], "-put") == 0) {
if (rmaChoice == RMA_ALL)
rmaChoice = RMA_NONE;
rmaChoice |= RMA_PUT;
} else if (strcmp(argv[i], "-acc") == 0) {
if (rmaChoice == RMA_ALL)
rmaChoice = RMA_NONE;
rmaChoice |= RMA_ACC;
} else if (strcmp(argv[i], "-fence") == 0) {
if (syncChoice == SYNC_ALL)
syncChoice = SYNC_NONE;
syncChoice |= SYNC_FENCE;
} else if (strcmp(argv[i], "-lock") == 0) {
if (syncChoice == SYNC_ALL)
syncChoice = SYNC_NONE;
syncChoice |= SYNC_LOCK;
} else if (strcmp(argv[i], "-pscw") == 0) {
if (syncChoice == SYNC_ALL)
syncChoice = SYNC_NONE;
syncChoice |= SYNC_PSCW;
} else if (strcmp(argv[i], "-maxsz") == 0) {
i++;
maxSz = atoi(argv[i]);
} else if (strcmp(argv[i], "-maxcount") == 0) {
i++;
maxCount = atoi(argv[i]);
} else {
fprintf(stderr, "Unrecognized argument %s\n", argv[i]);
fprintf(stderr,
"%s [ -put ] [ -acc ] [ -lock ] [ -fence ] [ -pscw ] [ -maxsz msgsize ]\n",
argv[0]);
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
MPI_Comm_size(MPI_COMM_WORLD, &wsize);
destRank = wrank + 1;
while (destRank >= wsize)
destRank = destRank - wsize;
srcRank = wrank - 1;
if (srcRank < 0)
srcRank += wsize;
/* Create groups for PSCW */
MPI_Comm_group(MPI_COMM_WORLD, &wgroup);
MPI_Group_incl(wgroup, 1, &destRank, &accessGroup);
MPI_Group_incl(wgroup, 1, &srcRank, &exposureGroup);
MPI_Group_free(&wgroup);
arraysize = maxSz * MAX_COUNT;
#ifdef USE_WIN_ALLOCATE
MPI_Win_allocate(arraysize * sizeof(int), (int) sizeof(int), MPI_INFO_NULL,
MPI_COMM_WORLD, &arraybuffer, &win);
if (!arraybuffer) {
fprintf(stderr, "Unable to allocate %d words\n", arraysize);
MPI_Abort(MPI_COMM_WORLD, 1);
}
#else
arraybuffer = (int *) malloc(arraysize * sizeof(int));
if (!arraybuffer) {
fprintf(stderr, "Unable to allocate %d words\n", arraysize);
MPI_Abort(MPI_COMM_WORLD, 1);
}
MPI_Win_create(arraybuffer, arraysize * sizeof(int), (int) sizeof(int),
MPI_INFO_NULL, MPI_COMM_WORLD, &win);
#endif
if (maxCount > MAX_COUNT) {
fprintf(stderr, "MaxCount must not exceed %d\n", MAX_COUNT);
MPI_Abort(MPI_COMM_WORLD, 1);
}
if ((syncChoice & SYNC_FENCE) && (rmaChoice & RMA_ACC)) {
for (sz = 1; sz <= maxSz; sz = sz + sz) {
if (wrank == 0 && verbose)
printf("Accumulate with fence, %d elements\n", sz);
for (cnt = 1; cnt <= maxCount; cnt *= 2) {
start = MPI_Wtime();
RunAccFence(win, destRank, cnt, sz);
end = MPI_Wtime();
if (end - start > MAX_ITER_TIME)
break;
}
}
}
if ((syncChoice & SYNC_LOCK) && (rmaChoice & RMA_ACC)) {
for (sz = 1; sz <= maxSz; sz = sz + sz) {
if (wrank == 0 && verbose)
printf("Accumulate with lock, %d elements\n", sz);
for (cnt = 1; cnt <= maxCount; cnt *= 2) {
start = MPI_Wtime();
RunAccLock(win, destRank, cnt, sz);
end = MPI_Wtime();
if (end - start > MAX_ITER_TIME)
break;
}
}
}
if ((syncChoice & SYNC_FENCE) && (rmaChoice & RMA_PUT)) {
for (sz = 1; sz <= maxSz; sz = sz + sz) {
if (wrank == 0 && verbose)
printf("Put with fence, %d elements\n", sz);
for (cnt = 1; cnt <= maxCount; cnt *= 2) {
start = MPI_Wtime();
RunPutFence(win, destRank, cnt, sz);
end = MPI_Wtime();
if (end - start > MAX_ITER_TIME)
break;
}
}
}
if ((syncChoice & SYNC_LOCK) && (rmaChoice & RMA_PUT)) {
for (sz = 1; sz <= maxSz; sz = sz + sz) {
if (wrank == 0 && verbose)
printf("Put with lock, %d elements\n", sz);
for (cnt = 1; cnt <= maxCount; cnt *= 2) {
start = MPI_Wtime();
RunPutLock(win, destRank, cnt, sz);
end = MPI_Wtime();
if (end - start > MAX_ITER_TIME)
break;
}
}
}
if ((syncChoice & SYNC_PSCW) && (rmaChoice & RMA_PUT)) {
for (sz = 1; sz <= maxSz; sz = sz + sz) {
if (wrank == 0 && verbose)
printf("Put with pscw, %d elements\n", sz);
for (cnt = 1; cnt <= maxCount; cnt *= 2) {
start = MPI_Wtime();
RunPutPSCW(win, destRank, cnt, sz, exposureGroup, accessGroup);
end = MPI_Wtime();
if (end - start > MAX_ITER_TIME)
break;
}
}
}
if ((syncChoice & SYNC_PSCW) && (rmaChoice & RMA_ACC)) {
for (sz = 1; sz <= maxSz; sz = sz + sz) {
if (wrank == 0 && verbose)
printf("Accumulate with pscw, %d elements\n", sz);
for (cnt = 1; cnt <= maxCount; cnt *= 2) {
start = MPI_Wtime();
RunAccPSCW(win, destRank, cnt, sz, exposureGroup, accessGroup);
end = MPI_Wtime();
if (end - start > MAX_ITER_TIME)
break;
}
}
}
MPI_Win_free(&win);
#ifndef USE_WIN_ALLOCATE
free(arraybuffer);
#endif
MPI_Group_free(&accessGroup);
MPI_Group_free(&exposureGroup);
MTest_Finalize(0);
return 0;
}
void RunAccFence(MPI_Win win, int destRank, int cnt, int sz)
{
int k, i, j, one = 1;
for (k = 0; k < MAX_RUNS; k++) {
MPI_Barrier(MPI_COMM_WORLD);
MPI_Win_fence(0, win);
j = 0;
for (i = 0; i < cnt; i++) {
MPI_Accumulate(&one, sz, MPI_INT, destRank, j, sz, MPI_INT, MPI_SUM, win);
j += sz;
}
MPI_Win_fence(0, win);
}
}
void RunAccLock(MPI_Win win, int destRank, int cnt, int sz)
{
int k, i, j, one = 1;
for (k = 0; k < MAX_RUNS; k++) {
MPI_Barrier(MPI_COMM_WORLD);
MPI_Win_lock(MPI_LOCK_SHARED, destRank, 0, win);
j = 0;
for (i = 0; i < cnt; i++) {
MPI_Accumulate(&one, sz, MPI_INT, destRank, j, sz, MPI_INT, MPI_SUM, win);
j += sz;
}
MPI_Win_unlock(destRank, win);
}
}
void RunPutFence(MPI_Win win, int destRank, int cnt, int sz)
{
int k, i, j, one = 1;
for (k = 0; k < MAX_RUNS; k++) {
MPI_Barrier(MPI_COMM_WORLD);
MPI_Win_fence(0, win);
j = 0;
for (i = 0; i < cnt; i++) {
MPI_Put(&one, sz, MPI_INT, destRank, j, sz, MPI_INT, win);
j += sz;
}
MPI_Win_fence(0, win);
}
}
void RunPutLock(MPI_Win win, int destRank, int cnt, int sz)
{
int k, i, j, one = 1;
for (k = 0; k < MAX_RUNS; k++) {
MPI_Barrier(MPI_COMM_WORLD);
MPI_Win_lock(MPI_LOCK_SHARED, destRank, 0, win);
j = 0;
for (i = 0; i < cnt; i++) {
MPI_Put(&one, sz, MPI_INT, destRank, j, sz, MPI_INT, win);
j += sz;
}
MPI_Win_unlock(destRank, win);
}
}
void RunPutPSCW(MPI_Win win, int destRank, int cnt, int sz,
MPI_Group exposureGroup, MPI_Group accessGroup)
{
int k, i, j, one = 1;
for (k = 0; k < MAX_RUNS; k++) {
MPI_Barrier(MPI_COMM_WORLD);
MPI_Win_post(exposureGroup, 0, win);
MPI_Win_start(accessGroup, 0, win);
j = 0;
for (i = 0; i < cnt; i++) {
MPI_Put(&one, sz, MPI_INT, destRank, j, sz, MPI_INT, win);
j += sz;
}
MPI_Win_complete(win);
MPI_Win_wait(win);
}
}
void RunAccPSCW(MPI_Win win, int destRank, int cnt, int sz,
MPI_Group exposureGroup, MPI_Group accessGroup)
{
int k, i, j, one = 1;
for (k = 0; k < MAX_RUNS; k++) {
MPI_Barrier(MPI_COMM_WORLD);
MPI_Win_post(exposureGroup, 0, win);
MPI_Win_start(accessGroup, 0, win);
j = 0;
for (i = 0; i < cnt; i++) {
MPI_Accumulate(&one, sz, MPI_INT, destRank, j, sz, MPI_INT, MPI_SUM, win);
j += sz;
}
MPI_Win_complete(win);
MPI_Win_wait(win);
}
}
|
/**
* Finds an executable file.
*
* @param name
* The file name of the executable without a path, e. g. "dlabpro".
* The Windows executable file suffix ".exe" is appended
* automatically in Windows and ignored automatically on other
* platforms.
* @return The executable file.
* @throws FileNotFoundException
* If the executable has not been found.
*/
public static File findExecutable(String name) throws FileNotFoundException
{
String path = System.getenv("PATH");
if (name.endsWith(".exe")) name=name.substring(0,name.length()-4);
final String exeName = name;
for (String dir : path.split(";"))
{
File f = new File(dir);
String exes[] = f.list(new FilenameFilter()
{
public boolean accept(File dir, String name)
{
File f = new File(dir,name);
if (!f.canExecute()) return false;
if (exeName.equals(name)) return true;
if ((exeName+".exe").equals(name)) return true;
return false;
}
});
if (exes!=null && exes.length>0)
return (new File(f,exes[0]));
}
throw new FileNotFoundException(exeName+" not found in "+path);
} |
<filename>demo/workwx.go
package demo
import (
"context"
"encoding/json"
"fmt"
"github.com/go-redis/redis/v8"
"github.com/zsmhub/workweixin/apis"
"log"
"time"
)
var ctx = context.Background()
var redisDb = redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "", // no password set
DB: 0, // use default DB
})
// 实现企微 access_token 的 redis 存取方案
type DcsTokenByRedis struct{}
var _ apis.DcsToken = DcsTokenByRedis{}
func (DcsTokenByRedis) Get(cacheKey string) apis.TokenInfo {
var tokenInfo apis.TokenInfo
result, err := redisDb.Get(ctx, cacheKey).Bytes()
if err == nil {
_ = json.Unmarshal(result, &tokenInfo)
} else if err != redis.Nil {
log.Println("获取 access_token 失败:", err)
}
return tokenInfo
}
func (DcsTokenByRedis) Set(cacheKey string, tokenInfo apis.TokenInfo, ttl time.Duration) error {
data, _ := json.Marshal(tokenInfo)
err := redisDb.Set(ctx, cacheKey, string(data), ttl).Err()
if err != nil {
log.Println("保存 access_token 失败:", err)
}
return err
}
func (DcsTokenByRedis) Del(cacheKey string) error {
return redisDb.Del(ctx, cacheKey).Err()
}
func (DcsTokenByRedis) Lock(cacheKey string, ttl time.Duration) bool {
if ok, _ := redisDb.SetNX(ctx, cacheKey, 1, ttl).Result(); ok {
return true
}
return false
}
func (DcsTokenByRedis) Unlock(cacheKey string) error {
return redisDb.Del(ctx, cacheKey).Err()
}
// 实现企微 suite_ticket 的 redis 存取方案
type DcsAppSuiteTicketByRedis struct{}
var _ apis.DcsAppSuiteTicket = DcsAppSuiteTicketByRedis{}
func (DcsAppSuiteTicketByRedis) Get(cacheKey string) string {
suiteTicket, err := redisDb.Get(ctx, cacheKey).Result()
if err != nil {
return ""
}
return suiteTicket
}
func (DcsAppSuiteTicketByRedis) Set(cacheKey, suiteTicket string, ttl time.Duration) {
err := redisDb.Set(ctx, cacheKey, suiteTicket, ttl).Err()
if err != nil {
log.Println("保存 suite_ticket 失败:", err)
}
}
// 日志记录器,可按需改造
type Logger struct{}
var _ apis.Logger = Logger{}
func (Logger) Info(args ...interface{}) {
fmt.Println(args...)
}
func (Logger) Infof(template string, args ...interface{}) {
fmt.Printf(template, args...)
}
func (Logger) Error(args ...interface{}) {
fmt.Println(args...)
}
func (Logger) Errorf(template string, args ...interface{}) {
fmt.Printf(template, args...)
}
// 获取第三方应用的企业数据
func GetThirdAppAuthCorpToSdk(corpId, appSuiteId string) (apis.AuthCorp, error) {
// todo
return apis.AuthCorp{PermanentCode: "xxx", AgentId: 1}, nil
}
// 获取自建应用代开发的企业数据
func GetCustomizedAppAuthCorpToSdk(corpId, appSuiteId string) (apis.AuthCorp, error) {
// todo
return apis.AuthCorp{PermanentCode: "xxx", AgentId: 2}, nil
} |
package mekanism.common.recipe.ingredient.chemical;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import javax.annotation.Nonnull;
import mekanism.api.JsonConstants;
import mekanism.api.annotations.NonNull;
import mekanism.api.chemical.Chemical;
import mekanism.api.chemical.ChemicalStack;
import mekanism.api.chemical.ChemicalTags;
import mekanism.api.recipes.ingredients.ChemicalStackIngredient;
import mekanism.common.recipe.ingredient.chemical.ChemicalIngredientDeserializer.IngredientType;
import net.minecraft.network.FriendlyByteBuf;
import net.minecraft.tags.TagKey;
import net.minecraftforge.registries.tags.ITag;
public abstract class TaggedChemicalStackIngredient<CHEMICAL extends Chemical<CHEMICAL>, STACK extends ChemicalStack<CHEMICAL>>
implements ChemicalStackIngredient<CHEMICAL, STACK> {
@Nonnull
private final ITag<CHEMICAL> tag;
private final long amount;
protected TaggedChemicalStackIngredient(@Nonnull ChemicalTags<CHEMICAL> tags, @Nonnull TagKey<CHEMICAL> tag, long amount) {
this(tags.getManager().map(manager -> manager.getTag(tag)).orElseThrow(), amount);
}
protected TaggedChemicalStackIngredient(@Nonnull ITag<CHEMICAL> tag, long amount) {
this.tag = tag;
this.amount = amount;
}
protected abstract ChemicalIngredientInfo<CHEMICAL, STACK> getIngredientInfo();
@Override
public boolean test(@Nonnull STACK chemicalStack) {
return testType(chemicalStack) && chemicalStack.getAmount() >= amount;
}
@Override
public boolean testType(@Nonnull STACK chemicalStack) {
return testType(Objects.requireNonNull(chemicalStack).getType());
}
@Override
public boolean testType(@Nonnull CHEMICAL chemical) {
return tag.contains(Objects.requireNonNull(chemical));
}
@Nonnull
@Override
public STACK getMatchingInstance(@Nonnull STACK chemicalStack) {
if (test(chemicalStack)) {
//Our chemical is in the tag, so we make a new stack with the given amount
return getIngredientInfo().createStack(chemicalStack, amount);
}
return getIngredientInfo().getEmptyStack();
}
@Override
public long getNeededAmount(@Nonnull STACK stack) {
return testType(stack) ? amount : 0;
}
@Override
public boolean hasNoMatchingInstances() {
return tag.isEmpty();
}
@Nonnull
@Override
public List<@NonNull STACK> getRepresentations() {
ChemicalIngredientInfo<CHEMICAL, STACK> ingredientInfo = getIngredientInfo();
//TODO: Can this be cached some how
List<@NonNull STACK> representations = new ArrayList<>();
for (CHEMICAL chemical : tag) {
representations.add(ingredientInfo.createStack(chemical, amount));
}
return representations;
}
/**
* For use in recipe input caching.
*/
public Iterable<CHEMICAL> getRawInput() {
return tag;
}
@Override
public void write(FriendlyByteBuf buffer) {
buffer.writeEnum(IngredientType.TAGGED);
buffer.writeResourceLocation(tag.getKey().location());
buffer.writeVarLong(amount);
}
@Nonnull
@Override
public JsonElement serialize() {
JsonObject json = new JsonObject();
json.addProperty(JsonConstants.AMOUNT, amount);
json.addProperty(JsonConstants.TAG, tag.getKey().location().toString());
return json;
}
} |
/**
* Format an absolute wallclock system time string.
* May be called upto 4 times in a single printf() statement.
* Displays microsecond resolution.
*
* @return buffer to a formatted system time string.
*/
const char *
olsr_wallclock_string(void)
{
static char buf[4][sizeof("00:00:00.000000")];
static int idx = 0;
char *ret;
struct timeval now;
int sec, usec;
ret = buf[idx];
idx = (idx + 1) & 3;
gettimeofday(&now, NULL);
sec = (int)now.tv_sec + olsr_get_timezone();
usec = (int)now.tv_usec;
snprintf(ret, sizeof(buf) / 4, "%02u:%02u:%02u.%06u", (sec % 86400) / 3600, (sec % 3600) / 60, sec % 60, usec);
return ret;
} |
On Three Series of my Sculpture
I like to work with new types of materials and to employ modern techniques, for they are representative of the kind of life I know. Someday I hope to visit the Moon; however, my 1968 series of sculpture called 'Moon Projects' (Fig. 1) is not proposed for installation there. Instead, the purpose of these sculptures is to create the feeling of something extraterrestrial on Earth. I do not continue to work long with any particular idea. Whenever I find that I am getting accustomed to one direction, I stop and seek another one. My 'Moon Projects' series was followed by 'Flashes and Sounds', a series of sculpture, each accompanied by synchronized flashing lights and sounds which seem to produce a kind of hypnotic effect on the viewer. Then I made a series called 'Rotating Objects' (Fig. 2). These are meshing forms of contrasting |
/**
* start method comment.
*
* @exception java.lang.Exception Description of Exception
*/
public void start()
throws java.lang.Exception
{
running = true;
worker = new Thread(Connection.getThreadGroup(), this, "OILClientILService-" +threadNumber++);
worker.setDaemon(true);
worker.start();
} |
/**
* Test the {@link JsonContentUtil#handle(JsonContentHandler, List)}
* method with a list with a <code>null</code> element and <code>fragment<code>
* set to <code>true</code>.
*/
@Test
public void testHandleListWithNullElementAsFragment() {
List<Object> testList = new LinkedList<Object>();
testList.add(null);
JsonContentUtil.handle(mockJsonContentHandler, testList, true);
InOrder inOrder = inOrder(mockJsonContentHandler);
inOrder.verify(mockJsonContentHandler).startArray();
inOrder.verify(mockJsonContentHandler).primitive(null);
inOrder.verify(mockJsonContentHandler).endArray();
verifyNoMoreInteractions(mockJsonContentHandler);
} |
Comparative efficacy of the front-line anti-HBV drugs in nucleos(t)ide analogue-naive chronic hepatitis B
Abstract Background: During the COVID-19 period, there was a huge gap in the understanding of masks between east and west. At the same time, the mechanism of the mask and the effect after use, also appeared differences. The Objective of this Meta-analysis is to systematically evaluate the efficacy of masks for influenza in the community. Methods: The Web of Science, PubMed, The Cochrane Library, EMBASE and Clinical Trials will be electronically searched to collect randomized controlled trials regarding the efficacy of masks for influenza in the community through Apr 2020. Two researchers independently screened and evaluated the obtained studies and extracted the outcome indexes. Revman 5.3 software will be used for the meta-analysis. Results: The outbreak is continuing, and we need to be prepared for a long fight. If masks are effective, we need to promote their use as soon as possible. If masks are ineffective, strong evidence should be given. This is an urgent task and our team will finish it as soon as possible. Conclusion: Provide stronger evidence to solve the problem, should we wear masks or not right now.
Introduction
Hepatitis B (HB) is an infectious disease caused by hepatitis B virus (HBV) that affects the liver. Chronic hepatitis B (CHB) is the most common form of HB. The clinical manifestations are asthenia, fear of food, nausea, abdominal distension, liver pain, and other symptoms. The liver is large, moderately hard, and tender. Severe cases can be accompanied by symptoms of chronic liver disease, spider nevus, liver palm, and abnormal liver function. At least 391 million people, or 5% of the world's population, had chronic HBV infection as of 2017, and more than 1 million patients were first discovered. Over 750,000 people die of CHB each year. About 300,000 of these are due to liver cancer. So CHB is a thorny problem, especially in less developed countries. Currently, front-line drugs for CHB include entecavir (ETV) and tenofovir mainly, tenofovir is further divided into tenofovir disoproxil fumarate (TDF) and tenofovir alafenamide fumarate (TAF). Suppressing hepatitis B virus is nearly a lifelong undertaking. For the patients without treated (nucleos(t)ide analogue-naive), it is important to choose their first drug that is right for them. In China, the prices of all 3 drugs are close. So in the choice of drugs, we pay more attention to the efficacy and safety of these drugs. At present, there is still a lack of relevant systematic research on the efficacy and safety of these 3 drugs for the patients with nucleos(t)ide analogue-naive in the treatment of CHB. It is difficult for patients to choose which drug to take, the newer, the better; or the more expensive, the better? In this study, the efficacy and safety of ETV, TDF, and TAF in nucleos(t)ide analogue-naive CHB patients will be compared to provide a basis for patients to choose the more appropriate anti-viral drug.
Methods
The purpose of this study is to compare the efficacy and safety of TAF, TDF, and ETV. However, if only pairwise comparison will be conducted, few literatures will be retrieved. Therefore, lamivudine (LAM), adefovir (ADV), and placebo (PLA) will be introduced into this study for comparison, and direct and indirect comparison will make the conclusion more convincing. Certainly, TAF, TDF, and ETV will still be the focus of analysis.
Design and registration
A network meta-analysis will be conducted to evaluate the efficacy of ETV, TDF, and TAF in nucleos(t)ide analogue-naive CHB. This protocol has been registered on the international prospective register of systematic reviews (PROSPERO), registration number is CRD42019143233 (https://www.crd.york.ac. uk/PROSPERO). No ethical approval is required since this study used data that will be already in the public domain.
Study selection 2.2.1. Study type.
The study type is randomized controlled trials (RCTs).
Study object.
Patients with definite CHB and no prior experience with nucleos(t)ide analogue therapy will be included.
The following patients will be excluded: patients who are infected with HIV or other hepatotropic viruses; those who have druginduced liver diseases, alcoholic liver disease, or autoimmune liver diseases, tumors, serious complications in the heart, kidney, brain, and other organs; and patients who are in pregnant or lactating.
2.2.3.
Intervening measure. ETV group: the enrolled patients were given the conventional dose of entecavir 0.5 g/day orally. TDF group: the enrolled patients were given the conventional dose of TDF 300 mg/ day orally. TAF group: the enrolled patients were given the conventional dose of TAF 25 mg/day orally. LAM group: the enrolled patients were given the conventional dose of LAM 100 mg/day orally.
ADV group: the enrolled patients were given the conventional dose of ADV 100 mg/day orally.
PLA group: the enrolled patients were given placebo once daily orally.
Outcome indicator.
The following outcomes will be assessed and compared among ETV, TDF, and TAF groups: We defined the virological response as the inability to detect HBV-DNA by PCR.
Exclusion criteria.
Studies with data that could not be extracted or utilized, studies with animal experiments; and literature reviews were excluded.
Data sources and searches
We will search English and Chinese language publications through Apr 2020 using the following databases: Web of Science, PubMed, the Cochrane Library, EMBASE, and Clinical Trials. The search terms included "Tenofovir", "Entecavir", and "Hepatitis B, Chronic". In Figure 1, we use the PubMed database as an example.
Study screening, data extraction, and risk assessment of bias
Data will be collected independently by 2 researchers. The unqualified studies will be eliminated, and the qualified ones will be selected after reading the title, abstract, and full text. Then, the research data will be extracted and checked, and disagreements will be discussed or a decision will be made by the authors. The extracted data include the following: 1. basic information of the study, including title, author, and year of publication; 2. characteristics of the included study, consisting of the study duration, the sample size of the test group and the control group, and the intervention measures; 3. the outcome indicators and data; 4. the information needed to assess the risk of bias.
The risk of bias in the included studies will be assessed using the RCT bias risk assessment tool recommended in the Cochrane Handbook for Systematic Reviews of Interventions (5.1.0). This work will also be done independently by 2 researchers.
Statistical analysis
The bayesian hierarchical model will be used in this study and ADDIS 1.16.8 software will be used for the network metaanalysis. The dichotomous variables will be expressed as the relative risk (RR) as an effect indicator and the estimated value and 95% confidence interval (CI) will be included as effect analysis statistics. The significance level sets at a = 0.05. A heterogeneity test will be conducted with the results of each study. If there is no statistical heterogeneity among the results (I 2 50%), network meta-analysis will be performed directly. If there is statistical heterogeneity among the results (I 2 > 50%), the source of heterogeneity needs to be found. If we could not find the source of heterogeneity, descriptive analysis will be performed only. Consistency test is needed for network metaanalysis. If P > .05, there is no statistically significant difference between direct and indirect comparison, and the results of the 2 are consistent, the consistency model will be used; otherwise, the inconsistency model will be used. After the comparison of various interventions, the ranking probability table was used to rank the advantages and disadvantages of the interventions. The STATA 16 software will be used to draw a network diagram of the various interventions, showing direct and indirect comparisons between them. The funnel plot will be drawn to make qualitative judgment of publication deviation.
Subgroup analysis
We will explore whether treatment effects for our primary outcomes are robust in subgroup analyses using the following characteristics: sex, age, race, nationality, duration of medication, etc.
Assessment of publication bias
If more than 15 articles are available for quantitative analysis, we will generate funnel plots to assess publication bias. A symmetrical distribution of funnel plot data indicates that there is no publication bias, otherwise, we will analyze the possible cause and give reasonable interpretation for asymmetric funnel plots. 2.8. Confidence in cumulative evidence GRADE system will be used for assessing the quality of our evidence. According to the grading system, the level of evidence will be rated high, moderate, low, and very low. 3
. Discussions
As front-line drugs for the treatment of CHB, the efficacy and safety of ETV, TDF, and TAF should be guaranteed. Nevertheless, for a drug that may require lifetime use, we should give patients more information to help them to make judgments and this can help patients control liver inflammation and inhibit virus reproduction better. TAF has the same mechanism of action as TDF and is a nucleotide reverse transcriptase inhibitor. Tenofovir bisphosphonates, the active component of tenofovir, inhibit the viral polymerase by directly competing with the natural deoxyribose substrates and terminating DNA strands by inserting DNA. Entecavir is a guanine nucleoside analogue, and its anti-viral pharmacological action is similar to that of tenofovir. For the selection of quantitative analysis outcomes, we pay more attention to the indicators of liver injury and the amount of virus in serum. Because these outcomes are most relevant to functional cure of CHB. For other outcomes, such as HBeAg clearance, HBeAg seroconversion, adverse effects, and so on, descriptive analysis will be conducted.
In the literatures inclusion, if it is a comparison of 2 anti-viral drugs in ETV, TDF, and TAF, we will conduct quantitative analysis, and if it is a comparison of these 3 drugs with placebo, it will still be included in the quantitative analysis. To make the research more credible, we will also conduct quantitative analysis of LAM and ADV in the control group. A placebo control group will be also included in the study. LAM and ADV are common anti-hepatitis B drugs, before these 3 drugs are available. So there are 6 interventions in this study, ETV, TDF, TAF, LAM, ADV, and PLA. Perhaps the placebo group may not exist.
This study will conduct a network meta-analysis of related RCTs, provide evidence on the efficacy and safety of ETV, TDF, and TAF in CHB treatment, and compare the advantages and disadvantages of ETV, TDF, and TAF, so as to better guide clinical practice.
Author contributions
Mao-bing Chen and Hua Wang proposed the concept of this study and designed this systematic review. Mao-bing Chen registered the protocol of the systematic review and metaanalysis. Mao-bing Chen, Qi-han Zheng, Xu-wen Zheng and Hua Wang were responsible for the collection, collation and statistical processing of the literature. All authors participated in the drafting of the first draft of the paper. Mao-bing Chen reviewed and proofread the paper. All authors agree to publish the paper publicly. Conceptualization: Mao-bing Chen, Hua Wang. Data curation: Mao-bing Chen, Qi-han Zheng, Xu-wen Zheng, Hua Wang. Methodology: Mao-bing Chen, Wei-yan Cui. |
/// Iterate over the item pairs of all children of a given item.
pub fn children_of(&self, parent: DefId) -> Option<impl Iterator<Item = (DefId, DefId)> + '_> {
self.child_mapping
.get(&parent)
.map(|m| m.iter().map(move |old| (*old, self.internal_mapping[old])))
} |
import { Command, flags } from "@oclif/command";
import { PublicKey } from "@solana/web3.js";
import {
clusterFlag,
gatekeeperKeyFlag,
gatekeeperNetworkPubkeyFlag,
} from "../util/oclif/flags";
import { getTokenUpdateProperties } from "../util/oclif/utils";
export default class Freeze extends Command {
static description = "Freeze a gateway token";
static examples = [
`$ gateway refresh EzZgkwaDrgycsiyGeCVRXXRcieE1fxhGMp829qwj5TMv 54000
Refreshed
`,
];
static flags = {
help: flags.help({ char: "h" }),
gatekeeperKey: gatekeeperKeyFlag(),
gatekeeperNetworkKey: gatekeeperNetworkPubkeyFlag(),
cluster: clusterFlag(),
};
static args = [
{
name: "gatewayToken",
required: true,
description: "The gateway token to freeze",
parse: (input: string) => new PublicKey(input),
},
{
name: "expiry",
description:
"The new expiry time in seconds for the gateway token (default 15 minutes)",
default: 15 * 60 * 60, // 15 minutes
parse: (input: string) => Number(input),
},
];
async run() {
const { args, flags } = this.parse(Freeze);
const { gatewayToken, gatekeeper, service } =
await getTokenUpdateProperties(args, flags);
this.log(`Refreshing:
${gatewayToken.toBase58()}
by gatekeeper ${gatekeeper.publicKey.toBase58()}`);
const token = await service.updateExpiry(
gatewayToken,
args.expiry + Math.floor(Date.now() / 1000)
);
this.log("Refreshed");
}
}
|
/**
* Creates an item and inserts it into the player's inventory.
*/
public class ItemProvider extends Provider
{
public ItemProvider (PlayerObject user, Good good, Object[] args)
throws InvocationException
{
super(user, good, args);
_item = createItem();
if (!_item.allowsDuplicates() && user.holdsEquivalentItem(_item)) {
throw new InvocationException("m.already_owned");
}
}
@Override // documentation inherited
protected String persistentAction ()
throws PersistenceException
{
// we check here as well as on the dobj thread because another server may have
// created the item
return (_itemrepo.insertItem(_item) ? null : "m.already_owned");
}
@Override // documentation inherited
protected void rollbackPersistentAction ()
throws PersistenceException
{
_itemrepo.deleteItem(_item, "item_provider_rollback");
}
@Override // documentation inherited
protected void actionCompleted ()
{
_user.addToInventory(_item);
super.actionCompleted();
}
/** Creates the item that will be delivered by this provider. */
protected Item createItem ()
throws InvocationException
{
return _good.createItem(_user.playerId);
}
/** The item that will be delivered. */
protected Item _item;
// depends
@Inject protected ItemRepository _itemrepo;
} |
Michael Nagle/Getty Images
For those who were wondering and anticipating the details of the new digital UFC network, some of those questions have been answered.
As of now, UFC Fight Pass will cost $9.99 per month, which for a full year's service, will run you about $120.
The first three fight cards to be shown in real time are UFC in Singapore, UFC Fight Night in Macau and UFC Fight Night in London, the latter of which is headlined by Alexander Gustafsson and Jimi Manuwa.
For those across the world hoping to see these fight cards on television, they will need to check with their local cable provider. In North America, fans who do not wish to purchase the digital network, and are willing to wait for the televised tape delay, should be in luck.
Clarified in a report from mmajunkie.com, "Events will continue to air on pay-per-view, FOX, FOX Sports 1 and FOX Sports 2 in 2014, of course, but some of the UFC’s international events are geared toward region-specific audiences."
Other features that will be on UFC Fight Pass are the preliminary fights that usually air on Facebook, event replays, fight libraries (UFC, WEC, Strikeforce, Pride), UFC television programming and some original content.
In looking at this, there is definitely bound to be mixed reactions to the digital network the UFC plans to launch here.
On one hand, you have an entire library of content that could have you busy on a rainy day for hours. You could relive the Pride days, catch a card you may have missed in the past or watch some in-depth interviews with top UFC athletes.
On the other hand, if you're like me, this is $120 bucks to watch a small Fight Night card per month and preliminary card fights that I used to be able to watch on Facebook for free.
The idea of the digital network is great, but expecting fans to not only pay for pricey pay-per-views every once a month or so ($45-55 per show) plus dropping $10 a month for a digital network may be too much.
We will see how this experiment goes. That's most of what is known for now, but stay tuned in case more details emerge from UFC Fight Pass. |
<gh_stars>0
import {
ADD_EDUCATION,
EDIT_EDUCATION,
DELETE_EDUCATION,
} from "./models/action";
import { IEducationDetails } from "./models/educationDetails";
export const add_education = (payload: IEducationDetails[]) => ({
type: ADD_EDUCATION,
payload,
});
export const edit_education = (payload: IEducationDetails[]) => ({
type: EDIT_EDUCATION,
payload,
});
export const delete_education = (payload: IEducationDetails[]) => ({
type: DELETE_EDUCATION,
payload,
});
|
/**
* Checks if the usb cable is physically connected or not
* Note: the intent here is a sticky intent so registerReceiver is actually a synchronous call and doesn't register a receiver on each call
*
* @param context a context instance
* @return boolean value that represents whether the usb cable is physically connected or not
*/
public static boolean isUSBCableConnected(Context context) {
Intent intent = context.registerReceiver(null, new IntentFilter(Intent.ACTION_BATTERY_CHANGED));
if (intent == null) {
return false;
}
int plugged = intent.getIntExtra(BatteryManager.EXTRA_PLUGGED, -1);
return plugged == BatteryManager.BATTERY_PLUGGED_AC || plugged == BatteryManager.BATTERY_PLUGGED_USB;
} |
Story highlights Abdel Fattah El-Sisi: Egypt will "rise" to "correct the mistakes of the past"
Ex-military chief takes the oath of office for a four-year term
El-Sisi won 96% of the vote in last month's presidential election
Egypt's first democratically elected president removed in coup last year
Egypt's former military chief Abdel Fattah el-Sisi was sworn in Sunday as President, vowing to lead the country through important changes.
In its next phase, Egypt "will witness a total rise on both internal and external fronts, to compensate what we have missed and correct the mistakes of the past," he said.
Despite the political upheaval Egypt has faced in recent years, el-Sisi celebrated the transition from interim President Adly Mansour. "In the long history that goes back thousands of years, our homeland did not witness democratic transfer of power. Now, for the first time, the President-elect shakes hands with the outgoing President, and together they sign a power transfer document in an unprecedented occasion," he said in an address in front of the Supreme Constitutional Court's General Assembly in Cairo.
El-Sisi won 96% of the vote in last month's presidential election for a four-year term. When he was declared the winner last week, a boisterous celebration erupted in Cairo's Tahrir Square, filled with fireworks and balloons bearing his image. Military and security personnel watched from the edges as crowds danced and sang.
Washington is looking forward to working with el-Sisi "to advance our strategic partnership and the many interests shared by the United States and Egypt," the White House press secretary said last week.
JUST WATCHED Lackluster landslide for el-Sisi Replay More Videos ... MUST WATCH Lackluster landslide for el-Sisi 02:32
The election was called amid political turbulence that saw Mohamed Morsy -- the country's first democratically elected President after the ouster of longtime leader Hosni Mubarak -- removed from power in a July military coup.
El-Sisi, who was army chief at the time, stepped down from his military post this year to run for President.
The White House said while it is pleased that international observers were allowed to participate in the election, "we also share concerns raised by observation groups about the restrictive political environment in which this election took place."
"As Egypt looks toward parliamentary elections later this year, we urge the government to consider the recommendations of the observer groups on ways to improve the administration of future elections," the White House said.
El-Sisi's sole opponent, Hamdeen Sabahy, received 3.9% of the vote, the country's election commission said. Sabahy conceded defeat but raised questions about the political process.
Allegations were made that Sabahy campaign representatives were attacked and detained, and that el-Sisi's representatives were allowed inside polling stations, Egypt's state-run Ahram Online news agency has reported.
"We cannot give any credibility or ratification to the announced numbers of turnout or results," Sabahy said last month. "The announced results are an insult to the intelligence of the Egyptians." |
export { default as FormField } from './FormField';
|
package com.iyb.retry.annotation;
import java.lang.annotation.*;
/**
* BackOff
*
* @author 2020/3/26 17:45 created by iyb-wangyanbing
* @version 1.0.0
* @modifier:
*/
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
@Documented
public @interface BackOff {
/**
* 默认一秒重试一次
* @return 重试间隔(delay == 0时生效)
*/
long value() default 1000L;
/**
*
* @return > 0 生效
*/
long delay() default 0;
/**
*
* @return > 0 生效
*/
long maxDelay() default 0;
/**
* 作为重试间隔的倍增系数
*
* @return > 0 生效
*/
double multiplier() default 0;
}
|
<filename>commercia/offers/management/commands/create_offer_aspects.py
from django.contrib.webdesign import lorem_ipsum
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from ... import factories
from ... import models
class Command(BaseCommand):
help = 'Create a sample of Offers Aspects'
def handle(self, *args, **options):
offers = models.Offer.objects.all()
for offer in offers:
op_instance = factories.OfferPriceFactory(
offer=offer)
print "OfferPriceFactory: %s :: title: %s" % (
op_instance,
op_instance.title)
onfo_instance = factories.OfferNForOneFactory(
offer=offer)
print "OfferNForOneFactory: %s :: title: %s" % (
onfo_instance,
onfo_instance.title)
# Calling save so we trigger the collection
# serialization
onfo_instance.offeraspect_ptr.save()
od_instance = factories.OfferDiscountFactory(
offer=offer)
print "OfferDiscountFactory: %s :: title: %s" % (
od_instance,
od_instance.title)
od_instance.offeraspect_ptr.save()
|
from enum import Enum
class Modifier(Enum):
static = "static"
abstract = "abstract"
override = "override"
none = ""
|
/**
* Cascades fields and their values from ancestral containers into current record.
*/
public void cascadeFromParent() {
if (parent.toCascadeToChild != null) {
return;
}
parent.toCascadeToChild = parent.cascadePairList.stream()
.map(fv -> new Pair<>(
String.format("%s.%s", toPrefixedTag(parent.recordName),
fv.getKey()), fv.getVal()))
.collect(Collectors.toList());
parent.toCascadeToChild.addAll(parent.parent.toCascadeToChild);
} |
#include "python/pyOffMeshLink.h"
#include "python/pyOffMeshLink_doc_en.h"
#include "python/pySceneObject.h"
#include "components/navigation/OffMeshLink.h"
#include "scene/SceneObject.h"
#include "scene/Scene.h"
#include "utils/PyxieHeaders.h"
using namespace pyxie;
#include <pyVectorMath.h>
#include <pythonResource.h>
namespace ige::scene
{
void OffMeshLink_dealloc(PyObject_OffMeshLink *self)
{
if (self && self->component)
{
self->component = nullptr;
}
PyObject_Del(self);
}
PyObject *OffMeshLink_str(PyObject_OffMeshLink *self)
{
return PyUnicode_FromString("C++ OffMeshLink object");
}
// Radius
PyObject *OffMeshLink_getRadius(PyObject_OffMeshLink *self)
{
return PyFloat_FromDouble(self->component->getRadius());
}
int OffMeshLink_setRadius(PyObject_OffMeshLink *self, PyObject *value)
{
if (PyFloat_Check(value))
{
float val = (float)PyFloat_AsDouble(value);
self->component->setRadius(val);
return 0;
}
return -1;
}
//! Mask
PyObject *OffMeshLink_getMask(PyObject_OffMeshLink *self)
{
return PyLong_FromLong(self->component->getMask());
}
int OffMeshLink_setMask(PyObject_OffMeshLink *self, PyObject *value)
{
if (PyLong_Check(value))
{
auto val = (uint32_t)PyLong_AsLong(value);
self->component->setMask(val);
return 0;
}
return -1;
}
//! AreaId
PyObject *OffMeshLink_getAreaId(PyObject_OffMeshLink *self)
{
return PyLong_FromLong(self->component->getAreaId());
}
int OffMeshLink_setAreaId(PyObject_OffMeshLink *self, PyObject *value)
{
if (PyLong_Check(value))
{
auto val = (uint32_t)PyLong_AsLong(value);
self->component->setAreaId(val);
return 0;
}
return -1;
}
//! Endpoint
PyObject *OffMeshLink_getEndPoint(PyObject_OffMeshLink *self)
{
auto obj = PyObject_New(PyObject_SceneObject, &PyTypeObject_SceneObject);
obj->sceneObject = self->component->getEndPoint().get();
return (PyObject*)obj;
}
int OffMeshLink_setEndPoint(PyObject_OffMeshLink *self, PyObject *value)
{
if (PyUnicode_Check(value))
{
const char* uuid = PyUnicode_AsUTF8(value);
auto sceneObject = self->component->getOwner()->getScene()->findObjectByUUID(std::string(uuid));
if (sceneObject)
{
self->component->setEndPoint(sceneObject);
return 0;
}
}
else if (value->ob_type == &PyTypeObject_SceneObject)
{
auto sceneObj = (PyObject_SceneObject*)value;
auto sceneObject = self->component->getOwner()->getScene()->findObjectByUUID(std::string(sceneObj->sceneObject->getUUID()));
self->component->setEndPoint(sceneObject);
return 0;
}
return -1;
}
//! Bidirectional
PyObject *OffMeshLink_isBidirectional(PyObject_OffMeshLink *self)
{
return PyBool_FromLong(self->component->isBidirectional());
}
int OffMeshLink_setBidirectional(PyObject_OffMeshLink *self, PyObject *value)
{
if (PyLong_Check(value))
{
auto val = (uint32_t)PyLong_AsLong(value);
self->component->setBidirectional(val);
return 0;
}
return -1;
}
PyGetSetDef OffMeshLink_getsets[] = {
{"radius", (getter)OffMeshLink_getRadius, (setter)OffMeshLink_setRadius, OffMeshLink_radius_doc, NULL},
{"mask", (getter)OffMeshLink_getMask, (setter)OffMeshLink_setMask, OffMeshLink_mask_doc, NULL},
{"areaId", (getter)OffMeshLink_getAreaId, (setter)OffMeshLink_setAreaId, OffMeshLink_areaId_doc, NULL},
{"endpoint", (getter)OffMeshLink_getEndPoint, (setter)OffMeshLink_setEndPoint, OffMeshLink_endpoint_doc, NULL},
{"bidirectional", (getter)OffMeshLink_isBidirectional, (setter)OffMeshLink_setBidirectional, OffMeshLink_bidirectional_doc, NULL},
{NULL, NULL}};
PyTypeObject PyTypeObject_OffMeshLink = {
PyVarObject_HEAD_INIT(NULL, 1) "igeScene.OffMeshLink", /* tp_name */
sizeof(PyObject_OffMeshLink), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)OffMeshLink_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_reserved */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
(reprfunc)OffMeshLink_str, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT, /* tp_flags */
0, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
0, /* tp_methods */
0, /* tp_members */
OffMeshLink_getsets, /* tp_getset */
&PyTypeObject_Component, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
0, /* tp_new */
0, /* tp_free */
};
} // namespace ige::scene
|
/// Returns a new `IpcMemoryMapper` instance.
///
/// # Arguments
///
/// * `request_tx` - A tube to send `TranslateRequest` to another process.
/// * `response_rx` - A tube to receive `Option<Vec<MemRegion>>`
/// * `endpoint_id` - For the remote iommu to identify the device/ipc mapper.
pub fn new(request_tx: Tube, response_rx: Tube, endpoint_id: u32) -> Self {
Self {
request_tx,
response_rx,
endpoint_id,
}
} |
/**
* @brief Create a new local resource and update parent
* @param url - The bucket, path, and name of the resource
* @retval TRUE This was added and is a new item
* @retval FALSE This was not added because the url wasn't valid
*/
bool DirManCoreBase::HostNewDir(const faodel::ResourceURL &url){
faodel::ResourceURL tmp_url = url;
if(tmp_url.bucket==BUCKET_UNSPECIFIED) {
F_ASSERT(!strict_checking, "HostNewDir given a url with a null bucket");
tmp_url.bucket=default_bucket;
}
tmp_url.reference_node = my_node;
return HostNewDir(DirectoryInfo(tmp_url));
} |
def add_node(rf, text, hidden_dim, num_layers = 3, num_node_type = NUM_NODES_TYPES + 1):
with tf.variable_scope('add_node'):
graph_embedding = get_graph_embedding(rf, hidden_dim)
out = tf.concat([graph_embedding, text], axis = -1)
for layer_idx in range(num_layers):
out = dense(out, 'layer_{}'.format(layer_idx), hidden_dim)
out = tf.nn.softmax(dense(out, 'projection', num_node_type))
out = tf.random.categorical(out, 1)
return out |
<reponame>nilebox/k8s-deploy
package release
import (
"log"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api"
"k8s.io/client-go/rest"
deployv1 "github.com/nilebox/k8s-deploy/pkg/apis/v1"
strategy "github.com/nilebox/k8s-deploy/pkg/release/strategy"
)
// ReleaseEventHandler can handle notifications for events to a Release resource
type ReleaseEventHandler struct {
client *rest.RESTClient
canary *strategy.Canary
}
func NewHandler(client *rest.RESTClient, clientset kubernetes.Interface) *ReleaseEventHandler {
return &ReleaseEventHandler{
client: client,
canary: &strategy.Canary{
Clientset: clientset,
},
}
}
func (h *ReleaseEventHandler) OnAdd(obj interface{}) {
release := obj.(*deployv1.Release)
log.Printf("[HANDLER] OnAdd %s", release.Metadata.SelfLink)
if release.Metadata.Name == "" {
log.Printf("ERROR Release name is empty!")
return
}
h.handle(release)
}
func (h *ReleaseEventHandler) OnUpdate(oldObj, newObj interface{}) {
oldRelease := oldObj.(*deployv1.Release)
newRelease := newObj.(*deployv1.Release)
log.Printf("[HANDLER] OnUpdate oldObj: %s", oldRelease.Metadata.SelfLink)
log.Printf("[HANDLER] OnUpdate newObj: %s", newRelease.Metadata.SelfLink)
}
func (h *ReleaseEventHandler) OnDelete(obj interface{}) {
release := obj.(*deployv1.Release)
log.Printf("[HANDLER] OnDelete %s", release.Metadata.SelfLink)
}
func (h *ReleaseEventHandler) handle(release *deployv1.Release) {
log.Printf("Processing new release %s", release.Metadata.Name)
var err error
switch release.Spec.Strategy.Type {
case "Canary":
log.Printf("Starting Canary deployment")
err = h.canary.Run(release)
case "BlueGreen":
log.Printf("Starting BlueGreen deployment")
case "":
log.Printf("Starting default deployment (Canary)")
err = h.canary.Run(release)
default:
log.Printf("Unknown deployment strategy: %s", release.Spec.Strategy.Type)
return
}
if err != nil {
log.Printf("[HANDLER] ERROR Failed to process Release object: ")
return
}
// TODO: Use Patch instead of Put? At least create a deep copy before update
// Update the status to signal that the Release has finished it's update and ready
// releasePatch := deployv1.Release{
// Status: deployv1.ReleaseStatus{
// State: deployv1.ReleaseStateReady,
// },
// }
release.Status.State = deployv1.ReleaseStateReady
var result deployv1.Release
// err = h.client.Patch(types.MergePatchType).
// Resource(deployv1.ReleaseResourcePath).
// Namespace(api.NamespaceDefault).
// Name(release.Metadata.Name).
// Body(release).
// Do().Into(&result)
err = h.client.Put().
Resource(deployv1.ReleaseResourcePath).
Namespace(api.NamespaceDefault).
Name(release.Metadata.Name).
Body(release).
Do().Into(&result)
if err != nil {
panic(err)
}
log.Printf("PATCHED: %#v\n", result)
}
|
def plot_trajectories(self, ax, trajectories, dont_unflatten=False, jitter_scale=1):
if not dont_unflatten:
trajectories_unflat = list(self.unflat_trajectories(trajectories))
else:
trajectories_unflat = trajectories
for trajectory_unflattened in trajectories_unflat:
x, y = list(zip(*trajectory_unflattened))
x = np.array(x) + jitter_scale * np.random.rand(len(x)) / (2 * self.size)
y = np.array(y) + jitter_scale * np.random.rand(len(x)) / (2 * self.size)
ax.plot(x, y)
return ax |
<reponame>penguinsAreFunny/bugFinder-localityRecorder-commitPath<gh_stars>0
import * as crypto from "crypto";
import {FileAndConsoleLogger, Locality, LocalityMap, LogConfig, SHARED_TYPES} from "bugfinder-framework";
import {Commit, GitFile} from "bugfinder-localityrecorder-commit";
import {inject, optional} from "inversify";
import {Logger} from "ts-log";
import {PredecessorDefault, PredecessorDelegation, PredecessorsUnique} from "./Predecessors";
import {PostdecessorsDefault, PostdecessorsDelegation, PostdecessorsUnique} from "./Postdecessors";
/**
* If you want logging in static methods you need to set
* CommitPath.logger to some Logger manually as inversify does not
* support static injections
*/
export class CommitPath implements Locality {
// no static injection possible
//@optional() @inject(SHARED_TYPES.logger)
static _logger?: Logger
static set logger(logger: Logger) {
CommitPath._logger = logger
CommitPath.predecessorDelegation = new PredecessorDefault(logger)
}
static get logger(): Logger {
return CommitPath._logger
}
/**
* Map of Commit.key to Commit. Used to normalize CommitPaths and reduce redundancy
* It is not a common use case to change anything in this map!
*/
public static _commitMap = new Map<string, Commit>();
/**
* All Commits of all CommitPaths known.
* It is not a common use case to change this array. Usually only CommitPath is using this
* to normalize CommitPaths to Commits and the Paths of CommitPaths
*/
public static _commits: Commit[] = [];
/**
* Delegation to calculate predecessors with different strategies
* @private
*/
private static predecessorDelegation: PredecessorDelegation = new PredecessorDefault()
/**
* Delegation to calculate postdecessors with different strategies
* @private
*/
private static postdecessorDelegation: PostdecessorsDelegation = new PostdecessorsDefault()
/**
* Set the predecessorDelegation to change the method of calculating predecessors
* @param predecessorDelegation
*/
static setPredecessorDelegation(predecessorDelegation: PredecessorDelegation) {
CommitPath.predecessorDelegation = predecessorDelegation
}
/**
* Set the postdecessorDelegation to change the method of calculating postdecessors alias progeny
* @param postdecessorDelegation
*/
static setPostdecessorDelegation(postdecessorDelegation: PostdecessorsDelegation) {
CommitPath.postdecessorDelegation = postdecessorDelegation
}
/**
* To change method of calculating predecessors @see CommitPath.setPredecessorDelegation
* Performance optimizes wrapper call to CommitPath.getNPostdecessors
* Returns up to n postdecessors (progeny CommitPaths) for each CommitPath of localities
* @param localities
* @param n
* @param upToN
* @param uniqueMode
* @param allLocalities
*/
static getNPostdecessorMap(localities: CommitPath[], n: number, upToN: boolean, uniqueMode: boolean, allLocalities: CommitPath[]):
LocalityMap<CommitPath, CommitPath[]> {
if (n == 0) {
const val = upToN? []: null
const ret = new LocalityMap<CommitPath, CommitPath[]>()
for (const loc of localities) {
ret.set(loc, val)
}
return ret
}
CommitPath.postdecessorDelegation = uniqueMode ?
new PostdecessorsUnique(this.logger) : new PostdecessorsDefault(this.logger)
return CommitPath.postdecessorDelegation.getNPostdecessorsMap(localities, n, upToN, allLocalities)
}
/**
* To change method of calculating predecessors @see CommitPath.setPredecessorDelegation
* Returns up to n postdecessors CommitPaths of locality. Postdecessors match the path of locality
* Returns null on finding less than n predecessors if upToN is false
* Set initMode after first call to false to achieve performance optimization
* @param locality
* @param n
* @param upToN also return predecessors if less than n predecessors are found. False: return null if less than
* n predecessors are found
* @param allLocalities
* @param initMode initializes map over allLocalities. If you want to call this function many times with same
* allLocalities you can set this to false after first call!
* This will achieve huge performance advantages.
*/
static getNPostdecessors(locality: CommitPath, n: number, upToN: boolean, allLocalities: CommitPath[],
initMode: any): CommitPath[] {
return CommitPath.postdecessorDelegation.getNPostdecessors(locality, n, upToN, allLocalities, initMode)
}
/**
* Returns the next postdecessor CommitPath, returns null if all localities until maxOrder were searched
* and no match was found
* @param path of the CommitPath of which the predecessor should be returned
* @param orderedLocalities a map of order (of all localities: CommitPath[]) to CommitPath[] with that order
* @param beginOrder order of the CommitPath of which the predecessor should be returned
* @param maxOrder min order of allLocalities
* @param allLocalities
*/
static getNextPostdecessor(path: string,
orderedLocalities: Map<number, CommitPath[]>,
beginOrder: number,
maxOrder: number,
allLocalities: CommitPath[]): CommitPath {
return CommitPath.predecessorDelegation.getNextPredecessor(path, orderedLocalities, beginOrder,
maxOrder, allLocalities)
}
/**
* To change method of calculating predecessors @see CommitPath.setPredecessorDelegation
* Performance optimizes wrapper call to CommitPath.getNPredecessors
* Returns up to n predecessors for each CommitPath of localities
* @param localities
* @param n
* @param upToN
* @param uniqueMode
* @param allLocalities
*/
static getNPredecessorsMap(localities: CommitPath[], n: number, upToN: boolean, uniqueMode: boolean,
allLocalities: CommitPath[]):
LocalityMap<CommitPath, CommitPath[]> {
if (n == 0) {
const val = upToN? []: null
const ret = new LocalityMap<CommitPath, CommitPath[]>()
for (const loc of localities) {
ret.set(loc, val)
}
return ret
}
CommitPath.predecessorDelegation = uniqueMode ?
new PredecessorsUnique(this.logger) : new PredecessorDefault(this.logger)
return CommitPath.predecessorDelegation.getNPredecessorsMap(localities, n, upToN, allLocalities)
}
/**
* To change method of calculating predecessors @see CommitPath.setPredecessorDelegation
* Returns up to n predecessor CommitPaths of locality. Predecessors match the path of locality
* Returns null on finding less than n predecessors if upToN is false
* Set initMode after first call to false to achieve performance optimization
* @param locality
* @param n
* @param upToN also return predecessors if less than n predecessors are found. False: return null if less than
* n predecessors are found
* @param allLocalities
* @param initMode initializes map over allLocalities. If you want to call this function many times with same
* allLocalities you can set this to false after first call!
* This will achieve huge performance advantages.
*/
static getNPredecessors(locality: CommitPath, n: number, upToN: boolean, allLocalities: CommitPath[], initMode: any)
: CommitPath[] {
return CommitPath.predecessorDelegation.getNPredecessors(locality, n, upToN, allLocalities, initMode)
}
/**
* Returns the next predecessor CommitPath, returns null if all localities until minOrder were searched
* and no match was found
* @param path of the CommitPath of which the predecessor should be returned
* @param orderedLocalities a map of order (of all localities: CommitPath[]) to CommitPath[] with that order
* @param beginOrder order of the CommitPath of which the predecessor should be returned
* @param minOrder min order of allLocalities
* @param allLocalities
*/
static getNextPredecessor(path: string,
orderedLocalities: Map<number, CommitPath[]>,
beginOrder: number,
minOrder: number,
allLocalities: CommitPath[]): CommitPath {
return CommitPath.predecessorDelegation.getNextPredecessor(path, orderedLocalities, beginOrder, minOrder, allLocalities)
}
/**
* To achieve normalization und reduce redundancy commits
* are stored static and received functional with getter method
* of CommitPath objects. All commits need to be stored once.
* Push every commit which is referenced in a CommitPath instance.
* @param commit
*/
static pushCommit(commit: Commit) {
const commitKey = commit.key();
if (CommitPath._commitMap.get(commitKey) == null) {
CommitPath._commits.push(commit);
CommitPath._commitMap.set(commitKey, commit);
}
}
/**
* Returns all commits handled by static CommitPath
*/
static get commits(): Commit[] {
return CommitPath._commits;
}
/**
* Returns a map of commit.key to commits. Used to normalize CommitPaths and reduce redundancy.
*/
static get commitMap(): Map<string, Commit> {
return CommitPath._commitMap;
}
public static removeFromMap(locality: CommitPath, map: Map<number, CommitPath[]>) {
const curOrder = locality.commit.order
const cps = map.get(curOrder)
const newCPs = []
for (const cp of cps) {
// dont push pred -> will be removed
if (cp.is(locality)) continue
newCPs.push(cp)
}
if (newCPs.length == 0) {
map.set(curOrder, undefined)
} else {
map.set(curOrder, newCPs)
}
}
/**
* Removing locality from array
* @param locality
* @param array
* @private
*/
public static removeFromCPArray(locality: CommitPath, array: CommitPath[]): CommitPath[] {
const newCPs = []
for (const cp of array) {
// dont push pred -> will be removed
if (cp.is(locality)) continue
newCPs.push(cp)
}
return newCPs
}
constructor(commit?: Commit, path?: GitFile) {
if (commit == null) return;
CommitPath.pushCommit(commit);
this.parentKey = commit.key();
this.path = path;
}
/**
* Normalizes CommitPaths so that no duplicate Commits are stored.
* All commitPaths are mapped to their commitKey and path and all unique commits are collected
* @param commitPaths
*/
static normalize(commitPaths: CommitPath[])
: { commitPaths: { parentKey: string, path: GitFile }[], commits: Commit[] } {
const cps = commitPaths.map(cp => {
return {parentKey: cp.parentKey, path: cp.path}
})
const commits: Commit[] = [];
const commitMap = new Map<string, Commit>();
commitPaths.forEach(cp => {
const cp_commit = cp.commit;
if (commitMap.get(cp_commit.key()) != null) {
return;
}
commitMap.set(cp_commit.key(), cp_commit)
commits.push(cp_commit);
})
return {
commitPaths: cps,
commits: commits
}
}
/**
* Returns an array of all commits within the commitPaths given
* @param commitPaths
*/
static getCommits(commitPaths: CommitPath[]): Commit[] {
const map = this.getCommitsMap(commitPaths);
const commits: Commit[] = [];
for (const key of map.keys()) {
const commitPath = map.get(key)[0];
const commit = commitPath.commit;
commits.push(commit);
}
return commits;
}
/**
* Returns a map of commit hashes to CommitPaths which belong to that commit(-hash)
* @param commitPaths
*/
static getCommitsMap(commitPaths: CommitPath[]): Map<string, CommitPath[]> {
const map = new Map<string, CommitPath[]>();
commitPaths.forEach((commitPath, i) => {
const commit = CommitPath._commitMap.get(commitPath.parentKey);
const val = map.get(commit.hash);
const commitPathsWithHash = val == null ? [] : val;
commitPathsWithHash.push(commitPath);
map.set(commit.hash, commitPathsWithHash);
})
return map;
}
/**
* Return an array of Commits containing each CommitPath. Array of commits is ordered in same order as
* commitPaths given a parameter
* @param commitPaths
*/
static getCommitsOrdered(commitPaths: CommitPath[]): Array<CommitPath[]> {
const commits: Map<string, CommitPath[]> = CommitPath.getCommitsMap(commitPaths);
const orderedCommits = new Array<CommitPath[]>();
const visited = new Map<string, boolean>();
commitPaths.forEach(commitPath => {
const parent = commitPath.commit;
if (!visited.get(parent.hash)) orderedCommits.push(commits.get(parent.hash))
visited.set(parent.hash, true);
})
return orderedCommits;
}
/**
* Gets the n predecessors of the cur CommitPath containing the CommitPaths which have the cur.hash.
* If there are less than n predecessors all predecessors are returned.
* All CommitPaths are needed to reconstruct the Commit-History.
* Strategy: Branch-Nodes are always the nearest historic nodes. @See default: git log
* @param cur
* @param all
* @param n
*/
static getPredecessorCommitPaths(cur: CommitPath, all: CommitPath[], n: number): Array<CommitPath[]> {
const commitMap = CommitPath.getCommitsMap(all);
const commits: Commit[] = [];
for (const key of commitMap.keys()) {
const commitPath: CommitPath = commitMap.get(key)[0];
const parent = commitPath.commit;
commits.push(parent);
}
// @formatter:off
const commit: Commit = cur.commit;
const curCommitPath: CommitPath = commitMap.get(commit.hash)[0];
const parentCommit: Commit = curCommitPath.commit;
// @formatter:on
const predecessorHashes = Commit.getPredecessorCommits(parentCommit, commits, n)
.map(predecessor => {
return predecessor.hash
});
const predecessors = [];
predecessorHashes.forEach(hash => {
const commitPaths = commitMap.get(hash);
predecessors.push(commitPaths);
})
return predecessors;
}
is(other: CommitPath) {
const parent: Commit = CommitPath._commitMap.get(this.parentKey);
const otherParent: Commit = other.commit;
return this.path ?
parent.is(otherParent) && this.path.path === other.path.path
: parent.is(otherParent);
}
key(): string {
const string = this.path ? this.parentKey + this.path.path : this.parentKey;
return crypto.createHash("sha1").update(string).digest("hex");
}
setMethods(localityDTO: CommitPath) {
/**
* TODO: Noch mal überlegen, ob ich nicht irgendwie doch den Konstruktor aufrufen könnte und dann Werte setzen könnte
* So ist das extrem hacky und nicht ganz sauber, wer weiß was TypeScript sonst noch alles setzt, wenn Objekte erzeugt werden
* evtl: leeren CommitPath erzeugen und dann über Object.keys vom DTO iterieren und alles übertragen, was bekannt ist? deepClone?
* Nachteil: Performanz
*/
// @formatter:off
localityDTO.is = CommitPath.prototype.is;
localityDTO.key = CommitPath.prototype.key;
localityDTO.setMethods = CommitPath.prototype.setMethods;
const commitPropertyDescriptors = Object.getOwnPropertyDescriptors(CommitPath.prototype).commit;
Object.defineProperty(localityDTO, "commit", {
get: commitPropertyDescriptors.get,
set: commitPropertyDescriptors.set
});
// @formatter:on
}
get commit(): Commit {
return CommitPath.commitMap.get(this.parentKey);
}
set commit(commit: Commit) {
this.parentKey = commit.key();
CommitPath.pushCommit(commit);
}
/**
* (file)path of a commit which should be measured and annotated
* file can be undefined if commit does not affect a file. These commitsPaths are needed to
* reconstruct Commit-History. example for undeinfed files: certain merge commits
*/
path?: GitFile;
parentKey: string;
}
|
/**
* Initiate a call, and join a conference
*
* This is a separate thread so that it monitor the call status.
*
* This class handles a single party (joining a conference)
* as well as two party calls.
*
* There is also support to try alternate gateways if one
* gateway can't handle a new call.
*/
public class OutgoingCallHandler extends CallHandler implements CallEventListener {
private CallEventListener csl;
private Integer callInitiatedLock = new Integer(0);
private Integer stateChangeLock = new Integer(0);
private Integer waitCallAnswerLock = new Integer(0);
private Integer waitCallEstablishedLock = new Integer(0);
private boolean lastGateway = false;
private boolean onlyOneGateway = false;
public OutgoingCallHandler(CallEventListener callEventListener, CallParticipant cp) {
addCallEventListener(this);
csl = callEventListener;
this.cp = cp;
setName("Outgoing CallHandler for " + cp);
}
public CallEventListener getRequestHandler() {
return csl;
}
/*
* Thread to start a new call and join a conference.
*/
private static int nCalls = 0; // for debugging two gateways
public void run() {
/*
* Join an existing conference or create a new one.
*/
synchronized (ConferenceManager.getConferenceList()) {
conferenceManager = ConferenceManager.getConference(cp);
if (conferenceManager == null) {
Logger.error("Couldn't start conference " + cp.getConferenceId());
sendCallEventNotification( new CallEvent(CallEvent.CANT_START_CONFERENCE));
return;
}
try {
member = conferenceManager.joinConference(cp);
memberSender = member.getMemberSender();
memberReceiver = member.getMemberReceiver();
} catch (IOException e) {
CallEvent callEvent =
new CallEvent(CallEvent.CANT_CREATE_MEMBER);
callEvent.setInfo(e.getMessage());
sendCallEventNotification(callEvent);
removeCallEventListener(this);
return;
}
}
addCall(this); // add to list of active calls
lastGateway = false;
onlyOneGateway = false;
/*
* Start the call (INVITE) and wait for it to end (BYE).
*/
ArrayList voIPGateways = SipServer.getVoIPGateways();
String gateway = cp.getVoIPGateway();
if (gateway != null) {
/*
* User specified a specific gateway. Use that one only.
*/
Logger.println("Call " + this + ": Using gateway specified for the call: " + gateway);
lastGateway = true;
onlyOneGateway = true;
placeCall();
} else if (voIPGateways.size() > 0) {
if (voIPGateways.size() == 1) {
onlyOneGateway = true;
}
lastGateway = true;
placeCall();
} else if (cp.getPhoneNumber() != null && SipServer.clientRegistrations.containsKey(cp.getPhoneNumber())) {
placeCall(); // no gateway involved, client registration extension call
} else if (cp.getPhoneNumber() != null && cp.getPhoneNumber().indexOf("sip:") == 0) {
placeCall(); // no gateway involved, direct SIP call
} else if (cp.getProtocol() != null && ("Speaker".equals(cp.getProtocol()) || "WebRtc".equals(cp.getProtocol()) || "Rtmfp".equals(cp.getProtocol()) || "Multicast".equals(cp.getProtocol()))) {
placeCall(); // WebRtc call
} else {
Logger.error("Couldn't place call " + cp);
sendCallEventNotification( new CallEvent(CallEvent.CANT_START_CONFERENCE));
}
conferenceManager.leave(member); // Remove member from conference.
removeCall(this); // remove call from active call list
removeCallEventListener(this);
done = true;
}
private void placeCall() {
String protocol = Bridge.getDefaultProtocol();
if (cp.getProtocol() != null) {
protocol = cp.getProtocol();
}
if (protocol.equalsIgnoreCase("SIP")) {
csa = new SipTPCCallAgent(this);
} else if (protocol.equalsIgnoreCase("NS")) {
csa = new NSOutgoingCallAgent(this);
} else if (protocol.equalsIgnoreCase("Speaker")) {
csa = new SpeakerCallAgent(this);
} else if (protocol.equalsIgnoreCase("Multicast")) {
csa = new MulticastCallAgent(this);
} else {
//csa = new H323TPCCallAgent(this);
reasonCallEnded =
CallEvent.getEventString(CallEvent.H323_NOT_IMPLEMENTED);
sendCallEventNotification(
new CallEvent(CallEvent.H323_NOT_IMPLEMENTED));
Logger.println("Call " + cp + ": " + reasonCallEnded);
return;
}
try {
csa.initiateCall();
synchronized (callInitiatedLock) {
callInitiatedLock.notifyAll();
}
synchronized(stateChangeLock) {
if (reasonCallEnded == null) {
//if (protocol.equalsIgnoreCase("SIP") == false) {
// /*
// * Leave Conference and rejoin with the right local media parameters
// * XXX Need to somehow get the socket from the h323 stack!
// */
// member.getMemberReceiver().setReceiveSocket();
// conferenceManager.transferMember(conferenceManager, member);
//}
try {
stateChangeLock.wait(); // wait for call to end
} catch (InterruptedException e) {
}
}
}
} catch (IOException e) {
synchronized (callInitiatedLock) {
callInitiatedLock.notifyAll();
}
if (reasonCallEnded == null) {
cancelRequest(e.getMessage());
}
Logger.println("Call " + this + " Exception " + e.getMessage());
}
}
public boolean routeIncomingSIP(CallParticipant cp)
{
return false;
}
/*
* This method is called where there is new status information.
* Status can be a state change, dtmf key pressed,
* or speaking not speaking notification.
*/
public void callEventNotification(CallEvent callEvent) {
if (Logger.logLevel >= Logger.LOG_INFO) {
Logger.println("Notification: " + callEvent);
}
if (callEvent.equals(CallEvent.STATE_CHANGED)) {
if (callEvent.getCallState().equals(CallState.ANSWERED)) {
/*
* For two party calls
*/
synchronized(waitCallAnswerLock) {
waitCallAnswerLock.notify();
}
} else if (callEvent.getCallState().equals(CallState.ESTABLISHED)) {
/*
* For migrating calls
*/
synchronized(waitCallEstablishedLock) {
waitCallEstablishedLock.notify();
}
} else if (callEvent.getCallState().equals(CallState.ENDING)) {
CallHandler callHandler =
CallHandler.findMigratingCall(cp.getCallId());
if (callHandler == this) {
/*
* If it's a gateway error and it's not the last gateway,
* don't end the call. It will be retried with the
* alternate gateway.
*/
if (callEvent.getInfo().indexOf("gateway error") >= 0 &&
lastGateway == false) {
return;
}
callEvent = new CallEvent(CallEvent.MIGRATION_FAILED);
callEvent.setInfo("Migration failed: " + getReasonCallEnded());
sendCallEventNotification(callEvent);
}
} else if (callEvent.getCallState().equals(CallState.ENDED)) {
reasonCallEnded = callEvent.getInfo();
synchronized(waitCallAnswerLock) {
waitCallAnswerLock.notify();
}
if (reasonCallEnded.indexOf("gateway error") >= 0 &&
lastGateway == false) {
CallHandler callHandler =
CallHandler.findMigratingCall(cp.getCallId());
if (callHandler == this) {
synchronized(stateChangeLock) {
/*
* Let the outgoing call handler know so
* it can try another gateway.
*/
stateChangeLock.notify();
}
return; // don't tell the migrator yet
}
}
synchronized(waitCallEstablishedLock) {
waitCallEstablishedLock.notify();
}
synchronized(stateChangeLock) {
stateChangeLock.notify(); // the call has ended
}
/*
* If it's a gateway error and not the last gateway,
* don't end the call. It will be retried with the
* alternate gateway.
*/
if (reasonCallEnded.indexOf("gateway error") >= 0 &&
lastGateway == false) {
return;
}
cancelRequest(reasonCallEnded);
}
}
if (suppressEvent(cp, callEvent) == false) {
Application.outgoingCallNotification(callEvent);
if (csl != null) csl.callEventNotification(callEvent);
}
}
/*
* This method is called by a CallSetupAgent once the endpoint
* address is known. The endpoint address is the address from which
* we expect to receive RTP packets and to which we will send RTP packets.
*/
//public void setEndpointAddress(InetSocketAddress isa, byte mediaPayload,
// byte receivePayload, byte telephoneEventPayload) {
//
// member.initialize(this, isa, mediaPayload, receivePayload, telephoneEventPayload);
//}
/*
* To make call migration and automatic retries to alternate gateways
* transparent to the facilitator, we need to suppress certain
* status messages.
*/
private boolean suppressEvent(CallParticipant cp, CallEvent callEvent) {
/*
* Suppress status from migrated calls so the facilitator
* doesn't see CALL_ENDED from the previous call.
*
* XXX Not sure about this. I think we want the status to go through.
* The receiver of the status will see "migrated" in the message
* and can decide what to do.
*
* For the new call, we allow "No Answer". Once the call is answered
* we clear the migrateCall flag so that CALL_ENDING and CALL_END
* will be delivered to the client
*/
if (suppressStatus == true) {
if (callEvent.getInfo() != null &&
callEvent.getInfo().indexOf("No Answer") >= 0 ||
callEvent.equals(CallEvent.BUSY_HERE) ||
callEvent.equals(CallEvent.CALL_ANSWER_TIMEOUT) ||
callEvent.equals(CallEvent.MIGRATED) ||
callEvent.equals(CallEvent.MIGRATION_FAILED) ||
callEvent.equals(CallEvent.JOIN_TIMEOUT)) {
return false;
}
return true;
}
/*
* We automatically retry calls with an alternate gateway
* when there is a gateway error. The status sent to the
* socket should make the switch to the alternate gateway transparent.
* We don't want to send CALL_ENDING or CALL_ENDED until
* we've tried the alternate gateway.
* We also want to suppress CALL_PARTICIPANT_INVITED when
* trying the alternate gateway.
*/
if (lastGateway == false) {
/*
* Suppress gateway errors from default gateway
*/
if (callEvent.getInfo().indexOf("gateway error") >= 0) {
return true;
}
return false;
}
/*
* Suppress CALL_PARTICIPANT_INVITED message from alternate gateway
*/
if (onlyOneGateway == false && callEvent.equals(CallEvent.STATE_CHANGED) &&
callEvent.getCallState().equals(CallState.INVITED)) {
return true;
}
/*
* No need to suppress this message.
*/
return false;
}
/*
* terminate a call.
*/
public void cancelRequest(String reason) {
done = true;
if (csa != null) {
CallHandler migratingCall =
CallHandler.findMigratingCall(cp.getCallId());
if (migratingCall == this) {
Logger.println("Failed to Migrate: " + reason);
}
csa.cancelRequest(reason);
}
synchronized(waitCallAnswerLock) {
waitCallAnswerLock.notify();
}
CallHandler otherCall = this.otherCall;
this.otherCall = null;
if (otherCall != null) {
Logger.println("otherCall is " + otherCall.getCallParticipant());
otherCall.cancelRequest("Two party call ended");
}
}
public String getSdp() {
synchronized (callInitiatedLock) {
while (csa == null && !done && reasonCallEnded == null) {
try {
callInitiatedLock.wait();
} catch (InterruptedException e) {
}
}
return csa.getSdp();
}
}
/*
* For two party calls.
*
* When one party hangs up, the other call should be terminated as well.
*/
//private OutgoingCallHandler otherCall;
public void setOtherCall(OutgoingCallHandler otherCall) {
this.otherCall = otherCall;
}
/*
* For two party calls, we wait until the first party answers
* before calling the second party.
*
* When the first party answers, the second party is called and
* the treatment is played to the first party.
*
* When the second party answers, the treatment to the first party
* is stopped.
*/
public boolean waitForCallToBeAnswered() {
String protocol = Bridge.getDefaultProtocol();
if (cp.getProtocol() != null) {
protocol = cp.getProtocol();
}
if (protocol.equalsIgnoreCase("WebRtc") || protocol.equalsIgnoreCase("Rtmfp") || protocol.equalsIgnoreCase("Speaker") || protocol.equalsIgnoreCase("Multicast")) {
return true;
}
synchronized(waitCallAnswerLock) {
if (done || reasonCallEnded != null) {
return false;
}
try {
waitCallAnswerLock.wait();
} catch (InterruptedException e) {
}
}
if (done || reasonCallEnded != null) {
return false;
}
return true;
}
public boolean waitForCallToBeEstablished() {
if (cp.getProtocol().equalsIgnoreCase("WebRtc") || cp.getProtocol().equalsIgnoreCase("Rtmfp") || cp.getProtocol().equalsIgnoreCase("Speaker") || cp.getProtocol().equalsIgnoreCase("Multicast")) {
return true;
}
synchronized(waitCallEstablishedLock) {
if (done || reasonCallEnded != null) {
return false;
}
try {
waitCallEstablishedLock.wait();
} catch (InterruptedException e) {
}
}
if (done || reasonCallEnded != null) {
return false;
}
return true;
}
/*
* Cancel all calls started by the specified requestHandler
*/
public static void hangup(CallEventListener callEventListener, String reason) {
ArrayList<CallHandler> callsToCancel = new ArrayList();
synchronized(activeCalls) {
/*
* Make a list of all the calls we want to cancel, then cancel them.
* We have to cancel them while not synchronized or
* we could deadlock.
*/
for (int i = 0; i < activeCalls.size(); i++) {
CallHandler call = (CallHandler)activeCalls.elementAt(i);
if (call.getRequestHandler() == callEventListener) {
callsToCancel.add(call);
}
}
}
cancel(callsToCancel, reason);
}
private static void cancel(ArrayList<CallHandler> callsToCancel, String reason) {
while (callsToCancel.size() > 0) {
CallHandler call = callsToCancel.remove(0);
call.cancelRequest(reason);
}
}
/**
* String representation of this OutgoingCallHandler
* @return the string representation of this OutgoingCallHandler
*/
public String toString() {
return cp.toString();
}
} |
Multi-Lingual Radio in South Australia
However, SBS radio services are not, at present, available outside the lEA and 3EA listening areas (Sydney/Newcastle/ Wollongong and Melbourne/Geelong respectively). In all other areas of NSW and Victoria and in all other states, multilingual broadcasting is provided exclusively by public and commercial stations. The two most notable examples are the public stations 4EB (Brisbane) and 5EBI-FM (Adelaide) which are full-time "ethnic" stations. These stations are operated by volunteer broadcasters elected from among the members of the Ethnic Broadcasting Association of Queensland (in the case of 4EB) and at public meetings of the relevant speech communities (in the case of 5E81). The various management committees of the two stations are also elected. |
/// Consumes the builder and constructs a [`DescribeExperimentOutput`](crate::output::DescribeExperimentOutput)
pub fn build(self) -> crate::output::DescribeExperimentOutput {
crate::output::DescribeExperimentOutput {
experiment_name: self.experiment_name,
experiment_arn: self.experiment_arn,
display_name: self.display_name,
source: self.source,
description: self.description,
creation_time: self.creation_time,
created_by: self.created_by,
last_modified_time: self.last_modified_time,
last_modified_by: self.last_modified_by,
}
} |
package pilikino
import (
"errors"
"fmt"
"io/ioutil"
"net/url"
"os"
"regexp"
"strings"
"time"
"github.com/araddon/dateparse"
"github.com/blevesearch/bleve"
"github.com/yuin/goldmark"
meta "github.com/yuin/goldmark-meta"
"github.com/yuin/goldmark/ast"
"github.com/yuin/goldmark/parser"
"github.com/yuin/goldmark/text"
)
type noteData struct {
Filename string `json:"filename"`
Date time.Time `json:"date"`
ModTime time.Time `json:"modified"`
Title string `json:"title"`
Tags []string `json:"tags"`
Links []string `json:"links"`
Content string `json:"content"`
ParseErrors []string `json:"errors"`
}
func (note *noteData) AddLink(dest string) {
note.Links = append(note.Links, dest)
}
func (note *noteData) AddParseError(err string) {
note.ParseErrors = append(note.ParseErrors, err)
}
var tagSep = regexp.MustCompile("[^-a-z0-9_]+")
func resolveLink(index *Index, from, to string) (string, error) {
toURI, err := url.Parse(to)
if err != nil {
return "", err
}
if toURI.Host != "" {
return "", nil
}
return index.ResolveLink(from, to)
}
func extractLinks(index *Index, note *noteData, file []byte) func(n ast.Node, entering bool) (ast.WalkStatus, error) {
return func(n ast.Node, entering bool) (ast.WalkStatus, error) {
if entering && n.Kind() == ast.KindLink {
link, ok := n.(*ast.Link)
if !ok {
return ast.WalkStop, errors.New("node KindLink is not ast.Link")
}
dest := string(link.Destination)
resolved, err := resolveLink(index, note.Filename, dest)
if err != nil {
note.AddParseError(fmt.Sprintf("%v: %#v", err, dest))
} else {
note.AddLink(resolved)
}
}
return ast.WalkContinue, nil
}
}
func parseMarkdownNote(index *Index, note *noteData, file []byte) error {
markdown := goldmark.New(goldmark.WithExtensions(meta.Meta))
context := parser.NewContext()
reader := text.NewReader(file)
doc := markdown.Parser().Parse(reader, parser.WithContext(context))
frontmatter := meta.Get(context)
if titleVal, ok := frontmatter["title"]; ok {
if titleStr, ok := titleVal.(string); ok {
note.Title = titleStr
} else {
note.AddParseError("title is not string")
}
}
if ctimeVal, ok := frontmatter["date"]; ok {
if ctimeStr, ok := ctimeVal.(string); ok {
ctime, err := dateparse.ParseLocal(ctimeStr)
if err != nil {
note.AddParseError(fmt.Sprintf("ctime unrecognized: %v", err))
} else {
note.Date = ctime
}
}
}
if tagField, ok := frontmatter["tags"]; ok {
var tagList []string
if tagStr, ok := tagField.(string); ok {
tagList = tagSep.Split(tagStr, -1)
} else if tagListVar, ok := tagField.([]interface{}); ok {
tagList = make([]string, len(tagListVar))
for i, v := range tagListVar {
tagList[i], _ = v.(string)
}
}
note.Tags = []string{}
for _, tag := range tagList {
tag = strings.TrimSpace(tag)
if tag != "" {
note.Tags = append(note.Tags, tag)
}
}
}
if err := ast.Walk(doc, extractLinks(index, note, file)); err != nil {
note.AddParseError(err.Error())
}
return nil
}
func indexNote(index *Index, batch *bleve.Batch, path string, info os.FileInfo) error {
if !strings.HasSuffix(path, ".md") {
return nil
}
content, err := ioutil.ReadFile(path)
if err != nil {
return err
}
note := noteData{
Filename: path,
ModTime: info.ModTime(),
Title: path,
Content: string(content),
}
err = parseMarkdownNote(index, ¬e, content)
if indexErr := batch.Index(path, note); indexErr != nil {
note.ParseErrors = append(note.ParseErrors, err.Error())
}
return err
}
|
def echoview(Sv, idim, jdim,
thr=-70, mincan=(3,10), maxlink=(3,15), minsho=(3,15)):
if np.isnan(idim).any():
raise Exception('Can not proceed with NAN values in i dimension')
if np.isnan(jdim).any():
raise Exception('Can not proceed with NAN values in j dimension')
mask = np.ma.masked_greater(Sv, thr).mask
if isinstance(mask, np.bool_):
mask = np.zeros_like(Sv, dtype=bool)
candidateslabeled= nd.label(mask, np.ones((3,3)))[0]
candidateslabels = pd.factorize(candidateslabeled[candidateslabeled!=0])[1]
for cl in candidateslabels:
candidate = candidateslabeled==cl
idx = np.where(candidate)[0]
jdx = np.where(candidate)[1]
candidateheight = idim[max(idx+1)] - idim[min(idx)]
candidatewidth = jdim[max(jdx+1)] - jdim[min(jdx)]
if (candidateheight<mincan[0]) | (candidatewidth<mincan[1]):
mask[idx, jdx] = False
linked = np.zeros(mask.shape, dtype=int)
shoalslabeled = nd.label(mask, np.ones((3,3)))[0]
shoalslabels = pd.factorize(shoalslabeled[shoalslabeled!=0])[1]
for fl in shoalslabels:
shoal = shoalslabeled==fl
i0 = min(np.where(shoal)[0])
i1 = max(np.where(shoal)[0])
j0 = min(np.where(shoal)[1])
j1 = max(np.where(shoal)[1])
i00 = np.nanargmin(abs(idim-(idim[i0]-(maxlink[0]+1))))
i11 = np.nanargmin(abs(idim-(idim[i1]+(maxlink[0]+1))))+1
j00 = np.nanargmin(abs(jdim-(jdim[j0]-(maxlink[1]+1))))
j11 = np.nanargmin(abs(jdim-(jdim[j1]+(maxlink[1]+1))))+1
around = np.zeros_like(mask, dtype=bool)
around[i00:i11,j00:j11] = True
neighbours = around & mask
neighbourlabels = pd.factorize(shoalslabeled[neighbours])[1]
neighbourlabels = neighbourlabels[neighbourlabels!=0]
neighbours = np.isin(shoalslabeled, neighbourlabels)
if (pd.factorize(linked[neighbours])[1]==0).all():
linked[neighbours] = np.max(linked)+1
else:
formerlabels = pd.factorize(linked[neighbours])[1]
minlabel = np.min(formerlabels[formerlabels!=0])
linked[neighbours] = minlabel
for fl in formerlabels[formerlabels!=0]:
linked[linked==fl] = minlabel
linkedlabels = pd.factorize(linked[linked!=0])[1]
for ll in linkedlabels:
linkedshoal = linked==ll
idx = np.where(linkedshoal)[0]
jdx = np.where(linkedshoal)[1]
linkedshoalheight = idim[max(idx+1)] - idim[min(idx)]
linkedshoalwidth = jdim[max(jdx+1)] - jdim[min(jdx)]
if (linkedshoalheight<minsho[0]) | (linkedshoalwidth<minsho[1]):
mask[idx, jdx] = False
mask_ = np.ones(mask.shape, dtype=bool)
edgeheight = np.max([mincan[0], maxlink[0], minsho[0]])
edgewidth = np.max([mincan[1], maxlink[1], minsho[1]])
i0 = np.where((idim-idim[ 0]) - edgeheight >= 0)[0][ 0]
i1 = np.where((idim-idim[-1]) + edgeheight < 0)[0][-1]+1
j0 = np.where((jdim-jdim[ 0]) - edgewidth >= 0)[0][ 0]
j1 = np.where((jdim-jdim[-1]) + edgewidth < 0)[0][-1]+1
mask_[i0:i1, j0:j1] = False
return mask, mask_ |
/**
* Process categories that were moved in MY program, but are not
* conflicts, i.e., not renamed, moved, or deleted in LATEST.
* @param id category ID
*/
private void processCategoryMoved(long id) {
Category myCat = dtms[MY].getCategory(id);
if (myCat == null) {
return;
}
if (categoryWasMoved(id, dtms[MY])) {
Category resultCat = dtms[RESULT].getCategory(id);
if (resultCat != null) {
Category myParent = myCat.getParent();
Category resultNewParent = dtms[RESULT].getCategory(myParent.getCategoryPath());
if (resultNewParent == null) {
resultNewParent = dtms[RESULT].createCategory(myParent.getCategoryPath());
}
moveCategory(resultNewParent, resultCat);
}
}
} |
def In(container, msg=None):
@wraps(In)
def validator(value):
if not value in container:
raise Invalid(msg or 'value is not allowed')
return value
return validator |
/**
* Return true if a tap at (x, y) should trigger a flip to the next page.
*/
protected boolean hitsNextPage(float x, float y) {
if (isLayoutRtl()) {
return (x < getViewportOffsetX() + getPaddingLeft() + mPageSpacing);
}
return (x > (getViewportOffsetX() + getViewportWidth() -
getPaddingRight() - mPageSpacing));
} |
def showChannelNames(self, visible=True):
self.channelNamesVisible = visible
if self.viewFrame is not None:
self.viewFrame.showChannelNames(visible) |
Before Devin Patrick Kelley carried out the deadliest mass shooting in Texas history on Sunday, the former airman had displayed a pattern of violent and disturbing behavior.
Domestic violence. Sexual assault accusations. Animal cruelty. Escape from a mental health facility. Threatening text messages. An obsession with guns and mass shootings.
The signs may seem obvious now, though psychiatrists caution against what they call hindsight bias.
But in the moment, none of it prevented Kelley from buying the semi-automatic rifle he used to kill 25 people and an unborn child at First Baptist Church in Sutherland Springs.
As investigators piece together a portrait of the dead gunman, questions remain about what set him off and what possibly could have been done to prevent the massacre, renewing fierce debates over gun control.
Devin Patrick Kelley, 26, killed 26 people at a Texas church, police say.
President Donald Trump called the mass shooting a "mental health problem at the highest level," not a "guns situation." Though Kelley's mental health history has not been fully disclosed, experts cautioned against focusing on that factor to the exclusion of others. No single factor on its own, including a diagnosis of mental illness, can predict how a person will act. But, certain indicators carry more weight than others, said Duke University professor Jeffrey Swanson.
"A history of violent behavior is a far better predictor of future violence than mental illness," said Swanson, a professor in psychiatry and behavioral sciences at Duke University who specializes in gun violence and mental illness. Though prevention is elusive, he said, "we could have better criteria for buying guns based on actual risk, and legal tools to remove guns based on risk."
'Somebody really dropped the ball'
Certain measures exist to prevent people like Kelley from obtaining firearms. His domestic violence record alone should have barred him under Texas law from purchasing four guns between 2014 and 2017. But his name did not show up in the federal database that licensed gun dealers are required to check before selling someone a firearm.
"Somebody really dropped the ball," former Air Force chief prosecutor Col. Don Christensen told CNN.
The Air Force acknowledged Monday it did not appropriately relay Kelley's court-martial conviction for domestic assault to civilian law enforcement, preventing it from appearing in three databases, including the FBI's National Instant Criminal Background Check System (NICS).
"Had his information been in the database, it should have prevented gun sales to Kelley," the Air Force said in a statement.
The Air Force said the Air Force inspector general is conducting an investigation into what happened.
In total, the shooter bought four weapons -- two in Colorado and two in Texas, the Bureau of Alcohol, Tobacco, Firearms and Explosives said.
Two handguns were found in his vehicle. A Ruger AR-556 rifle was found in front of the church where Kelley dropped it in a standoff with a local resident
Kelley bought the rifle in April 2016 from an Academy Sports + Outdoors store in San Antonio, a law enforcement official told CNN. When Kelley filled out the background check, he did not check the box indicating he had a disqualifying criminal history, the official said. He listed an address in Colorado Springs, Colorado, when he bought the rifle, the official said.
At one point, the shooter tried to get a license to carry a gun in Texas, but the state denied it, officials said. In Texas, a permit is not required to buy a handgun or a long gun, and gun owners do not have to be licensed.
Loopholes notwithstanding, convictions for misdemeanor domestic violence and felonies are entered into the NICS databases. So are domestic violence restraining orders and dishonorable discharges from the military. But Kelley did not receive a dishonorable discharge for his assault convictions. After serving 12 months in a military prison, he received a bad conduct discharge in 2014.
A history of domestic violence
Kelley served in logistics readiness at Holloman Air Force Base in New Mexico from 2010 until his discharge for assaulting his then-wife and stepson. The two married in 2010 and divorced in 2012, according to court records.
While he was awaiting his court martial in June 2012, he escaped from Peak Behavioral Health Systems in Santa Teresa, New Mexico, according to documents from the El Paso Police Department obtained by CNN affiliate KVIA. Officers were told that Kelley "suffered from mental disorders and was a danger to himself, having previously been caught sneaking firearms on the base, the documents said. Kelley had made death threats toward his chain of command.
When officers found Kelley, he did not resist or make any comments about harming himself or others to the officers, the documents said.
Kelley initially faced multiple charges, according to military records: assault and battery against his spouse, aggravated assault against his stepson and four charges involving firearms, including two of pointing a loaded firearm at his wife and two of pointing an unloaded firearm.
Military prosecutors dropped the firearms charges before trial in an agreement in which Kelley pleaded guilty to aggravated assault against the child and assault against his wife.
As part of his plea, Kelley admitted to hitting his stepson on the head and body "with a force likely to produce death or grievous bodily harm." As for his then-wife, Kelley admitted to hitting and kicking his her, choking her and pulling her hair.
Kelley repeatedly shook the boy, leading to injuries that fractured the young boy's skull and caused internal bleeding, Christensen told CNN.
The nature of the crimes suggested a propensity for violence that should have been taken more seriously, Christensen said. But the military justice system doesn't offer adequate treatment or services for transitioning to civilian life, leaving Kelley's history of violence unaddressed, he said.
Allegations of animal cruelty and sexual assault
After his release in 2014, Kelley landed in an RV park in Colorado Springs, Colorado. While there, a neighbor told police he saw him punch a dog.
Kelley denied the allegation but eventually pleaded guilty in October 2014 to an animal cruelty charge, according to court records. He was ordered to pay more than $500 in fines and restitution and complete an animal cruelty evaluation as part of a deferred sentence of 18 months of unsupervised probation. In March 2016, after Kelley met the terms, the guilty plea was withdrawn and the case was dismissed, the records show.
Meanwhile, in his hometown of New Braunfels, Texas, Kelley had started dating Danielle Shields, who would become his second wife. The couple married in April 2014, when she 19 and he was 23.
About two months before their marriage, a friend of hers contacted the Comal County Sheriff's Office to report that Shields said her boyfriend was abusing her, according to law enforcement records. When members of the sheriff's office showed up, they were told it was a "misunderstanding and teenage drama," according to notes from the incident.
It was not Kelley's first contact with the Comal County Sheriff's Office. In October 2013, he was investigated for an alleged sexual assault. By then, he was divorced from his first wife, but she was listed as a witness. "The alleged sexual assault investigation stalled sometime in October 2013," the sheriff's office said, leaving its status unclear.
CNN has been unable to reach the women who accused Kelley of sexual assault.
Co-worker: 'He wasn't chatty'
Kelley was supposed to show up Sunday at his security guard job at the Summit Vacation Resort in New Braunfels, manager Claudia Varjabedian told CNN. His shift started at 4 p.m. No one had heard from him when they turned on the news.
"We couldn't get our mouths to close. We were all shocked," Varjabedian said.
He had only worked at the family resort for five weeks, she said, describing him as quiet. "He wasn't chatty with people, but he was polite."
He never let on to his plans, she said. On Friday, he asked for clarification on a policy related to vehicles on the property.
"He didn't seem like he had any plans to leave or do anything," she said. "I wish I could help by telling other people what to look out for to prevent this in the future."
In June, Kelley was registered as a noncommissioned security officer, affiliated with Schlitterbahn Waterpark and Resort in New Braunfels, where he lived, according to the state Department of Public Safety. In order to complete the registration, Kelley would have submitted fingerprints and fees.
Kelley worked at the waterpark for 5½ weeks this summer as a seasonal unarmed night security guard before he was terminated, Schlitterbahn representative Winter Prosapio told CNN. The park has not provided a reason for his termination.
JUST WATCHED Neighbor saw shooting, held young survivor Replay More Videos ... MUST WATCH Neighbor saw shooting, held young survivor 01:38
Troubling Facebook posts
Kelley was consumed by a dispute with his mother-in-law, investigators said. He sent threatening text messages her law and texted her as recently as Sunday morning -- not long before he opened fire in the church. He fled the scene after an exchange of gunfire with a local resident and was found dead in his car a few miles down the road. He had shot himself.
Kelley's social media posts suggested a fascination with mass shootings, according to a law enforcement official.
In addition to "non-God beliefs, atheism," his recent Facebook posts included "a lot of gun violence" and weapons that he was into," high school classmate Christopher Leo Longoria said.
Kelley had been launching personal attacks against friends on Facebook, too, Longoria said, leading him to unfriend Kelley about a month ago. He didn't want to see the posts on his Facebook feed.
A screenshot of an October 29 Facebook post, ostensibly from Kelley, shows a picture of a rifle. In the status field above the picture is the message, "She's a bad bitch."
Kelley's Facebook page was taken down Sunday, hours after the shooting. But people in the Sutherland Springs area confirmed its existence and contents with CNN.
A gun expert told CNN that the weapon in the picture could be a Ruger AR-556, the type of gun used in Sunday's shooting. It is not clear whether the rifle in the picture was the gun from the massacre.
Neighbor heard target practice
Robert Gonzalez lives near Kelley's residence in New Braunfels, about 35 miles north of Sutherland Springs. In the week before the shooting, he heard gunfire from Kelley's property every morning.
"A load of rounds that would always be going off this time," he told CNN. "I was concerned because it was so close to our house."
Gonzalez said his time in the armed forces helps him identify guns by their sound. What he heard from his neighbor's house could have been a .45 or an assault rifle, a burst of rapid fire all of a sudden, he said.
It's not unusual for people in the area to practice firing on targets, said Gonzalez. This time, though, it was an unusual amount of gunfire.
Correction: A previous version of this story misidentified the style of rifle Kelley used. |
/**
* A FeatureType class for Feathr's TENSOR feature type.
*/
public class TensorFeatureType extends FeatureType {
private final TensorType _tensorType;
private TensorFeatureType(TensorType tensorType) {
super(BasicType.TENSOR);
_tensorType = Objects.requireNonNull(tensorType);
}
public static TensorFeatureType withTensorType(TensorType tensorType) {
return new TensorFeatureType(tensorType);
}
public TensorType getTensorType() {
return _tensorType;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
TensorFeatureType that = (TensorFeatureType) o;
return _tensorType.equals(that._tensorType);
}
@Override
public int hashCode() {
return Objects.hash(_tensorType);
}
@Override
public String toString() {
return "TensorFeatureType{" + "_tensorType=" + _tensorType + '}';
}
} |
import {Observable} from 'data/observable';
import {AppIconChanger} from 'nativescript-app-icon-changer';
export class HelloWorldModel extends Observable {
private appIconChanger: AppIconChanger;
public showFeedback: boolean = true;
constructor() {
super();
this.appIconChanger = new AppIconChanger();
console.log("Current alternate icon: " + this.appIconChanger.currentAlternateIcon());
}
public changeIconRed(): void {
this.changeIcon('icon-red');
}
public changeIconBlue(): void {
this.changeIcon('icon-blue');
}
public changeIconGreen(): void {
this.changeIcon('icon-green');
}
public changeIconYellow(): void {
this.changeIcon('icon-yellow');
}
public changeIconPurple(): void {
this.changeIcon('icon-purple');
}
public changeIconDefault(): void {
this.changeIcon(null);
}
private changeIcon(name: string | null): void {
this.appIconChanger.changeIcon({
iconName: name,
suppressUserNotification: !this.showFeedback // default true
}).then(() => {
console.log(`Icon changed to ${name}.`);
}, (error: any) => {
console.log(`Error code: ${error.code}`);
console.log(`Error message: ${error.message}`);
});
}
} |
<filename>StudentWork/Lama Alyousef/Java/src/com/company/animal/Dog.java
package com.company.animal;
public class Dog implements Animal{
private String name;
public Dog(String name){
this.name=name;
}
@Override
public void eat() {
System.out.println("nom,nom, mo,");
}
@Override
public void talk(String voice) {
System.out.println("sip,sip,sip");
}
}
/* public class Dog extends Animal{
public Dog(){
setName("mutt");
}
public Dog(String name){
setName(name);
}
// Accessor
//Override
//Animal.getName / Dog.getName
public String getName() {
System.out.println("Inside Dog.getName");
return super.getName();
}
public void talk(){
System.out.println("bark");
//System.out.println("name: " + name);
}
// Override talk method but this with parameter
public void talk(String voice){
System.out.println(voice);
}
} */
|
Solitary fibrous tumor of the orbit presenting in pregnancy A 32-year-old
woman, three months pregnant, reported with the complaint of protrusion of the right eye for six months. She gave history of rapid protrusion of eyeball for the last two months along with the history of double vision for the last one month. Computer tomography (CT) scan revealed a well-de (cid:222) ned mass lesion in the intraconal space of the right orbit which was excised through a lateral orbitotomy approach. Histological examination and immunohistochemistry revealed a solitary (cid:222) brous tumor, which showed a rapid progression in pregnancy.
traumatic cataract if any and monitor its progression. In our case, the fact that the vitreous face was intact, there was no lens matt er in the vitreous and the edges of the PCT were Þ brosed allowed the surgeon to proceed with phacoemulsiÞ cation. The size and shape of the PCT allowed the surgeon to assess that a posterior chamber (PC) IOL could be implanted. Additionally, the absence of any vitreous prolapse was a good prognostic indicator.
Recently, Por et al., suggested that blunt trauma-induced blowout PCT in children occurs due to a combination of forces: equatorial stretching pulls on the zonule and stretches the capsule and this anterior-posterior force tends to push it back thereby increasing the probability of the posterior capsule giving way. It usually occurs in young children where the lens matt er is soft and elastic and the zonules are strong. The vitreous face maintains its integrity and the lens matt er bulging through this tear in the posterior capsule gives an erroneous clinical proÞ le of posterior lenticonus, a term we suggest as posterior pseudo-lenticonus.
Previously, such cases have been managed by a pars plana lensectomy. Management using a clear corneal incision, phacoaspiration and PCIOL implantation in the capsular bag has also been well established now. This report highlights the use of Scheimpflug imaging in visualizing and quantifying the PCT. While slit-lamp examination does illustrate the defect, the primary advantage of the rotating Scheimpß ug camera is that it allows accurate and objective quantiÞ cation of the PCT. Additionally changes in the dimensions of the tear may be followed in cases where the surgeon decides to delay the surgery. The centration and tilt of the IOL can also be objectively documented following surgery. Using similar advanced imaging techniques could bett er help elucidate the pathogenesis of such injuries.
Jayanta K Das, Angshuman Sen Sharma, Akshay Ch Deka, Dipankar Das
A 32-year-old woman, three months pregnant, reported with the complaint of protrusion of the right eye for six months. She gave history of rapid protrusion of eyeball for the last two months along with the history of double vision for the last one month. Computer tomography (CT) scan revealed a well-deÞ ned mass lesion in the intraconal space of the right orbit which was excised through a lateral orbitotomy approach. Histological examination and immunohistochemistry revealed a solitary Þ brous tumor, which showed a rapid progression in pregnancy. Solitary Þ brous tumor (SFT) is a rare spindle-cell neoplasm usually found in the pleura but has been recently described in extra-pleural sites including the orbit. We report an orbital SFT presenting in a 32-year-old lady with rapid progression during pregnancy.
Case Report
A 32-year-old woman, three months pregnant, reported with the complaint of protrusion of the right eye for six months, which had progressed rapidly for the last two months. She also gave history of double vision for the last one month. Extraocular movements were grossly restricted in the right eye. Anterior segment examination was unremarkable. The intraocular pressure measured with applanation tonometer was 26 mm Hg in the right eye and 14 mm Hg in the left eye. Visual acuity (Snellen chart) in the right eye was 20/120, N12 and 20/20, N6 in the left eye. There was an axial proptosis of 14 mm in the left eye. Posterior segment examination revealed a few choroidal folds in the posterior pole of the right fundus.
Computer tomography (CT) scan revealed a well-deÞ ned mass lesion in the intraconal space of the right orbit, which measured 37 mm × 25 mm × 22 mm, with globe wall ß att ening in the posterolateral aspect . The optic nerve was displaced superomedially. CT scan impression was suggestive of cavernous hemangioma (right orbit). Considering her hyperglycemic status (fasting blood sugar 148 mg/dl, post prandial blood sugar 270 mg/dl) and amenorrhea, opinion from obstetrician and endocrinologist was sought.
As per the advice of her obstetrician, she underwent medical termination of pregnancy (MTP), due to her highrisk obstetric history, (twice post caesarean section, poorly controlled hyperglycemic state) and because the pregnancy was the result of a failed contraception. Following MTP and normalization of glycemic state, uneventful surgical excision 79-100% of cases. In our case, immunohistochemical positivity to CD 34+ conÞ rmed the diagnosis of SFT.
Though the association of orbital SFT with pregnancy is a rare association, the focal expression of progesterone receptors, in the tumor cells may be related to pregnancy. Recently, it has been described that steroid hormone receptors, progesterone receptors in particular, are expressed by extra-pleural SFT. In addition, progesterone may participate as growth factor in many CD34(+) neoplasms, which expresses low levels of the hormone receptors. In our case, the rapid growth of the tumor during pregnancy adds another dimension to the behavior of the tumor and is probably due to the presence of hormone receptors in orbital SFT. Similar progression of cavernous hemangioma of the orbit in pregnancy might be misleading in the clinical diagnosis of this entity. of mass (measuring 36 mm × 26 mm × 21 mm) through a lateral orbitotomy approach was done and the specimen was subjected to histological examination.
Hematoxylin and Eosin stain of multiple sections showed spindle cells arranged in hypercellular and hypocellular patt ern with rich vascularity. Large numbers of capillaries containing erythrocytes were also noted in the periphery . There was no evidence of mitotic Þ gures or necrosis. With trichome staining, the tumor exhibited predominantly collagen production. Immunohistochemistry reactivity to CD34 conÞ rmed the diagnosis of an orbital SFT. Subsequently, there was improvement in visual acuity from 20/120 to 20/20 in the right eye and normalization of IOP (14 mm Hg). No recurrence was noted over the two years of follow-up.
Discussion
SFT are rare spindle-cell neoplasms normally found associated with serosal surfaces, especially the pleurae. Only recently, they have been recognized in extra-serosal sites such as lungs, liver, paranasal sinuses, salivary glands, adrenals, dural sheath and the orbit. Diagnosis of SFT can only be performed by histological examination, as clinical signs and radiological features are not conclusive. The classic histopathological features of SFT such as thick bands of collagen, alternating hypercellular and hypocellular areas, and a hemangiopericytoma-like patt ern of vascularity are consistent with our Þ ndings.
Oft en from microscopic features alone it may be diffi cult to diff erentiate SFT from other spindle-shaped cell tumors of the orbit such as Þ brous histocytoma, hemangiopericytoma, meningioma and Schwannoma. The reason behind the low incidence of orbital SFT is probably because it histologically mimics other spindle-cell tumors and hence was Þ rst reported only in 1994. The key to diff erentiating SFT is its immunohistochemical reactivity. SFT shows strong and diffuse positivity with vimentin, BCL2 and CD 34+. CD34 is an antigen expressed on the surface of the endothelium and hematopoietic progenitor cells. CD34 immunoreactivity was consistently found in SFT and this marker facilitates histopathological diff erentiation of this lesion from other recognized spindle cell tumors of the orbit. SFT have demonstrated strong CD34+ reactivity in |
// newScanner creates a new scanner for a given destination IP address, using
// router to determine how to route packets to that IP.
func newScanner(ip net.IP, router routing.Router) (*scanner, error) {
s := &scanner{
dst: ip,
opts: gopacket.SerializeOptions{
FixLengths: true,
ComputeChecksums: true,
},
buf: gopacket.NewSerializeBuffer(),
}
iface, gw, src, err := router.Route(ip)
if err != nil {
return nil, err
}
log.Printf("scanning ip %v with interface %v, gateway %v, src %v", ip, iface.Name, gw, src)
s.gw, s.src, s.iface = gw, src, iface
handle, err := pcap.OpenLive(iface.Name, 65536, true, pcap.BlockForever)
if err != nil {
return nil, err
}
s.handle = handle
return s, nil
} |
A Better Dipole
We present a dipole BSSRDF with modified diffusion asymptotics, improved exitance calculations and consistent boundary conditions. This BSSRDF improves significantly upon the classical dipole model, requires negligible extra evaluation cost and is very easy to adapt in new and existing subsurface scattering algorithms. While not as accurate as a diffusion model using an extended source, such as the recent quantized diffusion method, this dipole model is far easier to implement and is of potential interest in some rendering and measurement applications. We include a comprehensive quantitative comparison between the classical dipole and the “better" dipole as well as quantized diffusion and benchmark Monte Carlo results for reference. |
/**
* Adds the given JPQL fragment as a select item.
*
* @param jpqlFragment The portion of a JPQL query that represents a select expression
* @param resultVariable The result variable identifying the select expression
* @return The newly created {@link ResultVariableStateObject}
*/
public ResultVariableStateObject addItemAs(String jpqlFragment, String resultVariable) {
ResultVariableStateObject item = new ResultVariableStateObject(this);
item.addAs();
item.parse(jpqlFragment);
item.setResultVariable(resultVariable);
addItem(item);
return item;
} |
/**
* Created by CovertJaguar on 2/20/2019 for Railcraft.
*
* @author CovertJaguar <http://www.railcraft.info>
*/
public class ChargeLogic extends Logic {
private final Charge network;
public ChargeLogic(Adapter adapter, Charge network) {
super(adapter);
this.network = network;
}
public IBatteryBlock getBattery() {
Optional<? extends IBatteryBlock> battery = access().getBattery();
assert battery.isPresent();
return battery.get();
}
public Charge.IAccess access() {
return network.network(theWorldAsserted()).access(getPos());
}
} |
def update_flags_from_ymlfile(self, yml_filename):
self.flags.read_flags_from_yml(yml_filename)
self.flags.update_flags({"VIEW_batchmode": True}) |
<reponame>Martin91/gofixtures<filename>collection.go
package fixtures
import (
"database/sql"
"fmt"
"github.com/pkg/errors"
)
// Collection maps to a table
type Collection struct {
DbName string `yaml:"db"`
TableName string `yaml:"table_name"`
Rows map[string]*Fixture `yaml:"rows"`
}
func (c *Collection) getTableName() string {
if c.DbName != "" {
return fmt.Sprintf("`%s`.`%s`", c.DbName, c.TableName)
}
return fmt.Sprintf("`%s`", c.TableName)
}
func (c *Collection) insertData(db *sql.DB) error {
for name, fixture := range c.Rows {
if name == "DEFAULT" {
continue
}
if err := fixture.insertRow(db, c.getTableName()); err != nil {
return errors.WithMessagef(err, "create fixture: %s", name)
}
}
return nil
}
|
// Fold Transpose into matrix multiplication.
class FoldConjugateIntoTranspose : public ArithmeticOptimizerStage {
public:
explicit FoldConjugateIntoTranspose(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("FoldConjugateIntoTranspose", ctx, ctx_ext) {}
~FoldConjugateIntoTranspose() override = default;
bool IsSupported(const NodeDef* node) const override {
return IsConj(*node) || IsTranspose(*node) || IsConjugateTranspose(*node);
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
const NodeScopeAndName matmul = ParseNodeScopeAndName(node->name());
const string optimized_node_name = OptimizedNodeName(matmul);
if (ctx().node_map->NodeExists(optimized_node_name)) return Status::OK();
NodeDef* input;
TF_RETURN_IF_ERROR(GetInputNode(node->input(0), &input));
const NodeDef* transpose_op = node->op() == "Conj" ? input : node;
const NodeDef* conj_op = node->op() == "Conj" ? node : input;
if ((IsTranspose(*transpose_op) || IsConjugateTranspose(*transpose_op)) &&
IsConj(*conj_op)) {
NodeDef* new_op = AddCopyNode(optimized_node_name, transpose_op);
new_op->set_op(transpose_op->op() == "Transpose" ? "ConjugateTranspose"
: "Transpose");
new_op->set_input(0, input->input(0));
ctx().node_map->UpdateInput(new_op->name(), node->name(),
input->input(0));
ForwardControlDependencies(new_op, {node, input});
*simplified_node_name = new_op->name();
}
return Status::OK();
}
} |
<reponame>Eikosa/super-productivity
import { Injectable } from '@angular/core';
import { Actions, createEffect, ofType } from '@ngrx/effects';
import { select, Store } from '@ngrx/store';
import { concatMap, filter, first, map, switchMap, take, tap } from 'rxjs/operators';
import {
addProject,
addProjects,
addToProjectBreakTime,
archiveProject,
deleteProject,
loadProjectRelatedDataSuccess,
moveProjectTaskDownInBacklogList,
moveProjectTaskInBacklogList,
moveProjectTaskToBacklogList,
moveProjectTaskToBacklogListAuto,
moveProjectTaskToTodayList,
moveProjectTaskToTodayListAuto,
moveProjectTaskUpInBacklogList,
unarchiveProject,
updateProject,
updateProjectAdvancedCfg,
updateProjectIssueProviderCfg,
updateProjectOrder,
updateProjectWorkEnd,
updateProjectWorkStart,
upsertProject,
} from './project.actions';
import { PersistenceService } from '../../../core/persistence/persistence.service';
import { BookmarkService } from '../../bookmark/bookmark.service';
import { NoteService } from '../../note/note.service';
import { SnackService } from '../../../core/snack/snack.service';
import { getWorklogStr } from '../../../util/get-work-log-str';
import {
addTask,
addTimeSpent,
convertToMainTask,
deleteMainTasks,
deleteTask,
moveToArchive,
moveToOtherProject,
restoreTask,
updateTaskTags,
} from '../../tasks/store/task.actions';
import { ReminderService } from '../../reminder/reminder.service';
import { ProjectService } from '../project.service';
import { GlobalConfigService } from '../../config/global-config.service';
import { T } from '../../../t.const';
import {
moveTaskDownInTodayList,
moveTaskInTodayList,
moveTaskUpInTodayList,
} from '../../work-context/store/work-context-meta.actions';
import { WorkContextType } from '../../work-context/work-context.model';
import { setActiveWorkContext } from '../../work-context/store/work-context.actions';
import { Project } from '../project.model';
import { TaskService } from '../../tasks/task.service';
import { Task, TaskArchive, TaskState } from '../../tasks/task.model';
import { unique } from '../../../util/unique';
import { TaskRepeatCfgService } from '../../task-repeat-cfg/task-repeat-cfg.service';
import { TODAY_TAG } from '../../tag/tag.const';
import { EMPTY, Observable, of } from 'rxjs';
import { TaskRepeatCfg } from '../../task-repeat-cfg/task-repeat-cfg.model';
import { projectSelectors } from './project.selectors';
import { addNote, deleteNote, updateNoteOrder } from '../../note/store/note.actions';
@Injectable()
export class ProjectEffects {
syncProjectToLs$: Observable<unknown> = createEffect(
() =>
this._actions$.pipe(
ofType(
addProject.type,
addProjects.type,
upsertProject.type,
deleteProject.type,
updateProject.type,
updateProjectAdvancedCfg.type,
updateProjectIssueProviderCfg.type,
updateProjectWorkStart.type,
updateProjectWorkEnd.type,
addToProjectBreakTime.type,
updateProjectOrder.type,
archiveProject.type,
unarchiveProject.type,
moveProjectTaskInBacklogList.type,
moveProjectTaskToBacklogList.type,
moveProjectTaskToTodayList.type,
moveProjectTaskUpInBacklogList.type,
moveProjectTaskDownInBacklogList.type,
moveProjectTaskToBacklogListAuto.type,
moveProjectTaskToTodayListAuto.type,
),
switchMap((a) => {
// exclude ui only actions
if (
[updateProjectWorkStart.type, updateProjectWorkEnd.type].includes(
a.type as any,
)
) {
return this.saveToLs$(false);
} else {
return this.saveToLs$(true);
}
}),
),
{ dispatch: false },
);
updateProjectStorageConditionalNote$: Observable<unknown> = createEffect(
() =>
this._actions$.pipe(
ofType(updateNoteOrder, addNote, deleteNote),
switchMap((a) => {
let isChange = false;
switch (a.type) {
case updateNoteOrder.type:
isChange = a.activeContextType === WorkContextType.PROJECT;
break;
case addNote.type:
isChange = !!a.note.projectId;
break;
case deleteNote.type:
isChange = !!a.projectId;
break;
}
return isChange ? of(a) : EMPTY;
}),
switchMap(() => this.saveToLs$(true)),
),
{ dispatch: false },
);
updateProjectStorageConditionalTask$: Observable<unknown> = createEffect(
() =>
this._actions$.pipe(
ofType(
addTask,
deleteTask,
moveToOtherProject,
restoreTask,
moveToArchive,
convertToMainTask,
),
switchMap((a) => {
let isChange = false;
switch (a.type) {
case addTask.type:
isChange = !!a.task.projectId;
break;
case deleteTask.type:
isChange = !!a.task.projectId;
break;
case moveToOtherProject.type:
isChange = !!a.task.projectId;
break;
case moveToArchive.type:
isChange = !!a.tasks.find((task) => !!task.projectId);
break;
case restoreTask.type:
isChange = !!a.task.projectId;
break;
case convertToMainTask.type:
isChange = !!a.task.projectId;
break;
}
return isChange ? of(a) : EMPTY;
}),
switchMap(() => this.saveToLs$(true)),
),
{ dispatch: false },
);
updateProjectStorageConditional$: Observable<unknown> = createEffect(
() =>
this._actions$.pipe(
ofType(moveTaskInTodayList, moveTaskUpInTodayList, moveTaskDownInTodayList),
filter((p) => p.workContextType === WorkContextType.PROJECT),
switchMap(() => this.saveToLs$(true)),
),
{ dispatch: false },
);
updateWorkStart$: any = createEffect(() =>
this._actions$.pipe(
ofType(addTimeSpent),
filter(({ task }) => !!task.projectId),
concatMap(({ task }) =>
this._projectService.getByIdOnce$(task.projectId as string).pipe(first()),
),
filter((project: Project) => !project.workStart[getWorklogStr()]),
map((project) => {
return updateProjectWorkStart({
id: project.id,
date: getWorklogStr(),
newVal: Date.now(),
});
}),
),
);
updateWorkEnd$: Observable<unknown> = createEffect(() =>
this._actions$.pipe(
ofType(addTimeSpent),
filter(({ task }) => !!task.projectId),
map(({ task }) => {
return updateProjectWorkEnd({
id: task.projectId as string,
date: getWorklogStr(),
newVal: Date.now(),
});
}),
),
);
onProjectIdChange$: Observable<unknown> = createEffect(() =>
this._actions$.pipe(
ofType(setActiveWorkContext),
filter(({ activeType }) => activeType === WorkContextType.PROJECT),
switchMap((action) => {
const projectId = action.activeId;
return Promise.all([this._bookmarkService.loadStateForProject(projectId)]).then(
() => projectId,
);
}),
map((projectId) => {
return loadProjectRelatedDataSuccess({ projectId });
}),
),
);
// TODO a solution for orphaned tasks might be needed
deleteProjectRelatedData: Observable<unknown> = createEffect(
() =>
this._actions$.pipe(
ofType(deleteProject.type),
tap(async ({ id }) => {
await this._persistenceService.removeCompleteRelatedDataForProject(id);
this._reminderService.removeRemindersByWorkContextId(id);
this._removeAllTasksForProject(id);
this._removeAllArchiveTasksForProject(id);
this._removeAllRepeatingTasksForProject(id);
// we also might need to account for this unlikely but very nasty scenario
const misc = await this._globalConfigService.misc$.pipe(take(1)).toPromise();
if (id === misc.defaultProjectId) {
this._globalConfigService.updateSection('misc', { defaultProjectId: null });
}
}),
),
{ dispatch: false },
);
archiveProject: Observable<unknown> = createEffect(
() =>
this._actions$.pipe(
ofType(archiveProject.type),
tap(async ({ id }) => {
await this._persistenceService.archiveProject(id);
this._reminderService.removeRemindersByWorkContextId(id);
this._snackService.open({
ico: 'archive',
msg: T.F.PROJECT.S.ARCHIVED,
});
}),
),
{ dispatch: false },
);
unarchiveProject: Observable<unknown> = createEffect(
() =>
this._actions$.pipe(
ofType(unarchiveProject.type),
tap(async ({ id }) => {
await this._persistenceService.unarchiveProject(id);
this._snackService.open({
ico: 'unarchive',
msg: T.F.PROJECT.S.UNARCHIVED,
});
}),
),
{ dispatch: false },
);
// PURE SNACKS
// -----------
snackUpdateIssueProvider$: Observable<unknown> = createEffect(
() =>
this._actions$.pipe(
ofType(updateProjectIssueProviderCfg.type),
tap(({ issueProviderKey }) => {
this._snackService.open({
type: 'SUCCESS',
msg: T.F.PROJECT.S.ISSUE_PROVIDER_UPDATED,
translateParams: {
issueProviderKey,
},
});
}),
),
{ dispatch: false },
);
snackUpdateBaseSettings$: Observable<unknown> = createEffect(
() =>
this._actions$.pipe(
ofType(updateProject.type),
tap(() => {
this._snackService.open({
type: 'SUCCESS',
msg: T.F.PROJECT.S.UPDATED,
});
}),
),
{ dispatch: false },
);
onProjectCreatedSnack: Observable<unknown> = createEffect(
() =>
this._actions$.pipe(
ofType(addProject.type),
tap(({ project }) => {
this._snackService.open({
ico: 'add',
type: 'SUCCESS',
msg: T.F.PROJECT.S.CREATED,
translateParams: { title: (project as Project).title },
});
}),
),
{ dispatch: false },
);
showDeletionSnack: Observable<unknown> = createEffect(
() =>
this._actions$.pipe(
ofType(deleteProject.type),
tap(() => {
this._snackService.open({
ico: 'delete_forever',
msg: T.F.PROJECT.S.DELETED,
});
}),
),
{ dispatch: false },
);
moveToTodayListOnAddTodayTag: Observable<unknown> = createEffect(() =>
this._actions$.pipe(
ofType(updateTaskTags),
filter(
({ task, newTagIds }) => !!task.projectId && newTagIds.includes(TODAY_TAG.id),
),
concatMap(({ task, newTagIds }) =>
this._projectService.getByIdOnce$(task.projectId as string).pipe(
map((project) => ({
project,
task,
newTagIds,
})),
),
),
filter(({ project }) => !project.taskIds.includes(TODAY_TAG.id)),
map(({ task, newTagIds, project }) =>
moveProjectTaskToTodayListAuto({
projectId: project.id,
taskId: task.id,
isMoveToTop: false,
}),
),
),
);
// @Effect()
// moveToBacklogOnRemoveTodayTag: Observable<unknown> = this._actions$.pipe(
// ofType(updateTaskTags),
// filter((action: UpdateTaskTags) =>
// task.projectId &&
// oldTagIds.includes(TODAY_TAG.id)
// ),
// concatMap((action) => this._projectService.getByIdOnce$(task.projectId).pipe(
// map((project) => ({
// project,
// p: action.payload,
// }))
// )),
// filter(({project}) => !project.taskIds.includes(TODAY_TAG.id)),
// map(({p, project}) => moveTaskToTodayList({
// workContextId: project.id,
// taskId: p.task.id,
// newOrderedIds: [p.task.id, ...project.backlogTaskIds],
// src: 'DONE',
// target: 'BACKLOG'
// })),
// );
constructor(
private _actions$: Actions,
private _store$: Store<any>,
private _snackService: SnackService,
private _projectService: ProjectService,
private _persistenceService: PersistenceService,
private _bookmarkService: BookmarkService,
private _noteService: NoteService,
private _globalConfigService: GlobalConfigService,
private _reminderService: ReminderService,
// private _workContextService: WorkContextService,
private _taskService: TaskService,
private _taskRepeatCfgService: TaskRepeatCfgService,
) {}
private async _removeAllTasksForProject(projectIdToDelete: string): Promise<any> {
const taskState: TaskState = await this._taskService.taskFeatureState$
.pipe(
filter((s) => s.isDataLoaded),
first(),
)
.toPromise();
const nonArchiveTaskIdsToDelete = taskState.ids.filter((id) => {
const t = taskState.entities[id] as Task;
if (!t) {
throw new Error('No task');
}
// NOTE sub tasks are accounted for in DeleteMainTasks action
return t.projectId === projectIdToDelete && !t.parentId;
});
console.log(
'TaskIds to remove/unique',
nonArchiveTaskIdsToDelete,
unique(nonArchiveTaskIdsToDelete),
);
this._taskService.removeMultipleMainTasks(nonArchiveTaskIdsToDelete);
}
private async _removeAllArchiveTasksForProject(
projectIdToDelete: string,
): Promise<any> {
const taskArchiveState: TaskArchive =
await this._persistenceService.taskArchive.loadState();
// NOTE: task archive might not if there never was a day completed
const archiveTaskIdsToDelete = !!taskArchiveState
? (taskArchiveState.ids as string[]).filter((id) => {
const t = taskArchiveState.entities[id] as Task;
if (!t) {
throw new Error('No task');
}
// NOTE sub tasks are accounted for in DeleteMainTasks action
return t.projectId === projectIdToDelete && !t.parentId;
})
: [];
console.log(
'Archive TaskIds to remove/unique',
archiveTaskIdsToDelete,
unique(archiveTaskIdsToDelete),
);
// remove archive
await this._persistenceService.taskArchive.execAction(
deleteMainTasks({ taskIds: archiveTaskIdsToDelete }),
);
}
private async _removeAllRepeatingTasksForProject(
projectIdToDelete: string,
): Promise<any> {
const taskRepeatCfgs: TaskRepeatCfg[] =
await this._taskRepeatCfgService.taskRepeatCfgs$.pipe(first()).toPromise();
const allCfgIdsForProject = taskRepeatCfgs.filter(
(cfg) => cfg.projectId === projectIdToDelete,
);
const cfgsIdsToRemove: string[] = allCfgIdsForProject
.filter((cfg) => !cfg.tagIds || cfg.tagIds.length === 0)
.map((cfg) => cfg.id as string);
if (cfgsIdsToRemove.length > 0) {
this._taskRepeatCfgService.deleteTaskRepeatCfgsNoTaskCleanup(cfgsIdsToRemove);
}
const cfgsToUpdate: string[] = allCfgIdsForProject
.filter((cfg) => cfg.tagIds && cfg.tagIds.length > 0)
.map((taskRepeatCfg) => taskRepeatCfg.id as string);
if (cfgsToUpdate.length > 0) {
this._taskRepeatCfgService.updateTaskRepeatCfgs(cfgsToUpdate, { projectId: null });
}
}
private saveToLs$(isSyncModelChange: boolean): Observable<unknown> {
return this._store$.pipe(
// tap(() => console.log('SAVE')),
select(projectSelectors),
take(1),
switchMap((projectState) =>
this._persistenceService.project.saveState(projectState, { isSyncModelChange }),
),
);
}
}
|
// Main starts the main loop of a immediate mode loop.
func Main(fn func(frame *Frame)) {
if err := termbox.Init(); err != nil {
log.Fatal(err)
}
defer termbox.Close()
var frame Frame
termbox.SetInputMode(termbox.InputEsc | termbox.InputMouse)
loop:
for {
switch ev := termbox.PollEvent(); ev.Type {
case termbox.EventResize:
ev.Key = 0
fallthrough
case termbox.EventKey:
if ev.Key == termbox.KeyCtrlC {
break loop
}
frame.Key = ev.Key
frame.Draw.Invalidate = true
for frame.Draw.Invalidate {
frame.Draw.BeforeFrame()
termbox.Clear(termbox.ColorDefault, termbox.ColorDefault)
fn(&frame)
termbox.Flush()
if frame.Focus.AfterFrame() {
frame.Draw.Invalidate = true
}
frame.Key = 0
}
}
}
} |
// New creates a sql.DB with rollback support.
func New(t *testing.T) *sql.DB {
t.Helper()
if testing.Short() {
t.Skip("skip store test because short mode")
}
db, err := sql.Open("txdb", t.Name())
if err != nil {
t.Fatalf("can't open db: %s", err)
}
t.Cleanup(func() {
db.Close()
})
return db
} |
/**
* <br/>
*
* @author Charles Prud'homme
* @since 05/06/2014
*/
public class ColorVariablesPanel extends APanel implements IMonitorOpenNode, IMonitorInitialize {
int size;
int psize = 10;
BufferedImage image;
JLabel theLabel;
ImageIcon icon;
IntVar[] ivars;
float[] ids;
public ColorVariablesPanel(GUI frame) {
super(frame);
solver.plugMonitor(this);
size = (int) Math.ceil(Math.sqrt(model.getNbVars()));
psize = 600 / size;
image = new BufferedImage(size * psize, size * psize, BufferedImage.TYPE_INT_ARGB);
solver.plugMonitor(this);
icon = new ImageIcon(image);
theLabel = new JLabel(new ImageIcon(image));
add(theLabel);
afterInitialize();
}
@Override
public void paintComponent(Graphics g) {
super.paintComponent(g);
for(int i = 0 ; i < ivars.length; i++){
int x = i / size;
int y = i % size;
float[] rgb = rgb(ivars[i].getDomainSize() / ids[i]);
Color color = new Color(rgb[0], rgb[1], rgb[2]);
for (int j = 0; j < psize; j++)
for (int k = 0; k < psize; k++)
image.setRGB(x * psize + j, y * psize + k, color.getRGB());
}
}
private static float[] rgb(float value){
int aR = 0; int aG = 255; int aB=0; // RGB for our 1st color (blue in this case).
int bR = 0; int bG = 0; int bB=255; // RGB for our 2nd color (red in this case).
float[] rgb = new float[3];
rgb[0] = ((bR - aR) * value + aR) / 255f; // Evaluated as -255*value + 255.
rgb[1] = ((bG - aG) * value + aG) / 255f; // Evaluates as 0.
rgb[2] = ((bB - aB) * value + aB) / 255f; // Evaluates as 255*value + 0.
return rgb;
}
@Override
public void plug(JTabbedPane tabbedpanel) {
super.plug(tabbedpanel);
tabbedpanel.addTab("Color map", this);
}
@Override
public void afterInitialize() {
ivars = model.retrieveIntVars(true);
ids = new float[ivars.length];
for(int i = 0 ; i < ivars.length; i++){
ids[i] = 1.f * ivars[i].getDomainSize();
}
}
@Override
public void beforeOpenNode() {
}
@Override
public void afterOpenNode() {
if (frame.canUpdate() && activate) {
repaint();
}
}
} |
<gh_stars>1-10
import datetime
from typing import Tuple, Any, List
from idact.core.config import ClusterConfig
from idact.core.synchronized_deployments import SynchronizedDeployments
from idact.detail.deployment_sync.dask_deployments. \
materialize_dask_deployment import materialize_dask_deployment
from idact.detail.deployment_sync.deployment_definitions import \
DeploymentDefinitions
from idact.detail.deployment_sync.jupyter_deployments. \
materialize_jupyter_deployment import materialize_jupyter_deployment
from idact.detail.deployment_sync.nodes.materialize_nodes import \
materialize_nodes
from idact.detail.deployment_sync.synchronized_deployments_impl import \
SynchronizedDeploymentsImpl
from idact.detail.log.get_logger import get_logger
from idact.detail.nodes.node_internal import NodeInternal
# pylint: disable=bad-continuation
def sorted_by_expiration_date(
to_sort: List[Tuple[Any, datetime.datetime]]) -> List[Any]: # noqa
"""Returns the values sorted by expiration date, discarding the date."""
return [first for (first, expiration_date)
in sorted(to_sort, key=lambda value: value[1])]
def report_pulled_deployments(deployments: SynchronizedDeploymentsImpl):
"""Prints pulled deployments.
:param deployments: Deployments to report.
"""
log = get_logger(__name__)
for node in deployments.nodes:
log.info("Pulled allocation deployment: %s", node)
for jupyter in deployments.jupyter_deployments:
log.info("Pulled Jupyter deployment: %s", jupyter)
for dask in deployments.dask_deployments:
log.info("Pulled Dask deployment: %s", dask)
# pylint: disable=bad-continuation
def materialize_deployments(
config: ClusterConfig,
access_node: NodeInternal,
deployments: DeploymentDefinitions) -> SynchronizedDeployments: # noqa
"""Creates deployment objects from definitions.
:param config: Cluster config.
:param access_node: Cluster access node.
:param deployments: Definitions to materialize.
"""
log = get_logger(__name__)
nodes_index = 0
jupyter_index = 1
dask_index = 2
deployments_by_date = ([], [], [])
for uuid, definition in deployments.nodes.items():
try:
materialized_nodes = materialize_nodes(config=config,
access_node=access_node,
uuid=uuid,
definition=definition)
deployments_by_date[nodes_index].append(
(materialized_nodes, definition.expiration_date))
except RuntimeError:
log.warning("Discarding a synchronized allocation deployment,"
" unable to materialize: %s", uuid)
log.debug("Exception", exc_info=1)
for uuid, definition in deployments.jupyter_deployments.items():
try:
materialized_jupyter = materialize_jupyter_deployment(
config=config,
uuid=uuid,
definition=definition)
deployments_by_date[jupyter_index].append(
(materialized_jupyter, definition.expiration_date))
except RuntimeError:
log.warning("Discarding a Jupyter deployment,"
" unable to materialize: %s", uuid)
log.debug("Exception", exc_info=1)
for uuid, definition in deployments.dask_deployments.items():
try:
materialized_dask = materialize_dask_deployment(
config=config,
uuid=uuid,
definition=definition)
deployments_by_date[dask_index].append(
(materialized_dask, definition.expiration_date))
except RuntimeError:
log.warning("Discarding a Dask deployment,"
" unable to materialize: %s", uuid)
log.debug("Exception", exc_info=1)
deployments_sorted = tuple(map(sorted_by_expiration_date,
deployments_by_date))
synchronized_deployments = SynchronizedDeploymentsImpl(
nodes=deployments_sorted[nodes_index],
jupyter_deployments=deployments_sorted[jupyter_index],
dask_deployments=deployments_sorted[dask_index])
report_pulled_deployments(deployments=synchronized_deployments)
return synchronized_deployments
|
#include<stdio.h>
#include<stdlib.h>
main(){
char l[]="qwertyuiop";
char s[]="asdfghjkl;";
char t[]="zxcvbnm,./";
char a;
char msg[100];
scanf("%c",&a);
scanf("%s",msg);
int i=0;
if(a=='L'){
while (msg[i]){
int j=0;
while(l[j]){
if (msg[i]==l[j]){
printf("%c",l[j+1]);
}
else if(msg[i]==s[j]){
printf("%c",s[j+1]);
}
else if(msg[i]==t[j]){
printf("%c",t[j+1]);;
}
j++;
}
i++;
}
}
else if(a='R') {
while (msg[i]){
int j=0;
while(l[j]){
if (msg[i]==l[j]){
printf("%c",l[j-1]);
}
else if(msg[i]==s[j]){
printf("%c",s[j-1]);
}
else if(msg[i]==t[j]){
printf("%c",t[j-1]);;
}
j++;
}
i++;
}
}
printf("\n");
} |
def input2(
prompt,
*,
autocomplete=None,
autocomplete_fuzzy=False,
choices=None,
normalize=True,
type=None,
verify=None
):
if choices is not None and verify is not None:
raise ValueError("`choices` and `verify` may not both be specified")
if isinstance(autocomplete, Sequence):
autocomplete = sequence_to_autocomplete(autocomplete, fuzzy=autocomplete_fuzzy)
while True:
if autocomplete is not None:
with Autocomplete(sys.stdout, sys.stdin, autocomplete) as ac:
response = ac.input(prompt)
else:
response = input(prompt)
if normalize:
response = response.strip()
if type is not None:
try:
response = type(response)
except Exception:
continue
if choices is not None and response not in choices:
continue
if verify is not None and not verify(response):
continue
return response |
/** Helper methods for working with IDs. */
public final class Id {
private static final String UNIQUE_ID = "id-";
private static int counter = 0;
/** Creates an identifier guaranteed to be unique within this document. This is useful for allocating element IDs. */
public static String unique() {
String id;
do {
id = UNIQUE_ID + counter; // no Ids.build(ELEMENTO_UID, counter) for performance reasons
counter++;
} while (document.getElementById(id) != null);
return id;
}
/** Creates an identifier guaranteed to be unique within this document. The unique part comes last. */
public static String unique(String id, String... additionalIds) {
return build(id, additionalIds) + "-" + unique();
}
public static String build(String id, String... additionalIds) {
return build(id, '-', additionalIds);
}
static String build(String id, char separator, String... additionalIds) {
if (id == null || id.trim().length() == 0) {
throw new IllegalArgumentException("ID must not be null or empty.");
}
List<String> ids = new ArrayList<>();
ids.add(id);
if (additionalIds != null) {
for (String additionalId : additionalIds) {
if (additionalId != null && additionalId.trim().length() != 0) {
ids.add(additionalId);
}
}
}
return ids.stream().map(Id::asId).filter(Objects::nonNull).collect(joining(String.valueOf(separator)));
}
/**
* Turns a string which can contain whitespace and upper/lower case characters into an all lowercase id separated by
* "-".
*/
static String asId(String text) {
String[] parts = text.split("[-\\s]");
List<String> sanitized = new ArrayList<>();
for (String part : parts) {
if (part != null) {
String s = part.replaceAll("\\s+", "");
s = s.replaceAll("[^a-zA-Z0-9-_]", "");
s = s.replace('_', '-');
if (s.length() != 0) {
sanitized.add(s);
}
}
}
if (sanitized.isEmpty()) {
return null;
} else {
return sanitized.stream()
.filter(s -> s != null && s.trim().length() != 0)
.map(String::toLowerCase)
.collect(joining("-"));
}
}
private Id() {
}
} |
<gh_stars>0
// SPDX-FileCopyrightText: 2021 <NAME>
// SPDX-License-Identifier: MIT
#include <Jolt.h>
#include <Physics/Constraints/PathConstraint.h>
#include <Physics/Body/Body.h>
#include <Core/StringTools.h>
#include <Core/StatCollector.h>
#include <ObjectStream/TypeDeclarations.h>
#include <Core/StreamIn.h>
#include <Core/StreamOut.h>
#ifdef JPH_DEBUG_RENDERER
#include <Renderer/DebugRenderer.h>
#endif // JPH_DEBUG_RENDERER
namespace JPH {
JPH_IMPLEMENT_SERIALIZABLE_VIRTUAL(PathConstraintSettings)
{
JPH_ADD_BASE_CLASS(PathConstraintSettings, TwoBodyConstraintSettings)
JPH_ADD_ATTRIBUTE(PathConstraintSettings, mPath)
JPH_ADD_ATTRIBUTE(PathConstraintSettings, mPathPosition)
JPH_ADD_ATTRIBUTE(PathConstraintSettings, mPathRotation)
JPH_ADD_ATTRIBUTE(PathConstraintSettings, mPathFraction)
JPH_ADD_ATTRIBUTE(PathConstraintSettings, mMaxFrictionForce)
JPH_ADD_ATTRIBUTE(PathConstraintSettings, mPositionMotorSettings)
JPH_ADD_ENUM_ATTRIBUTE(PathConstraintSettings, mRotationConstraintType)
}
void PathConstraintSettings::SaveBinaryState(StreamOut &inStream) const
{
ConstraintSettings::SaveBinaryState(inStream);
mPath->SaveBinaryState(inStream);
inStream.Write(mPathPosition);
inStream.Write(mPathRotation);
inStream.Write(mPathFraction);
inStream.Write(mMaxFrictionForce);
inStream.Write(mRotationConstraintType);
mPositionMotorSettings.SaveBinaryState(inStream);
}
void PathConstraintSettings::RestoreBinaryState(StreamIn &inStream)
{
ConstraintSettings::RestoreBinaryState(inStream);
PathConstraintPath::PathResult result = PathConstraintPath::sRestoreFromBinaryState(inStream);
if (!result.HasError())
mPath = result.Get();
inStream.Read(mPathPosition);
inStream.Read(mPathRotation);
inStream.Read(mPathFraction);
inStream.Read(mMaxFrictionForce);
inStream.Read(mRotationConstraintType);
mPositionMotorSettings.RestoreBinaryState(inStream);
}
TwoBodyConstraint *PathConstraintSettings::Create(Body &inBody1, Body &inBody2) const
{
return new PathConstraint(inBody1, inBody2, *this);
}
PathConstraint::PathConstraint(Body &inBody1, Body &inBody2, const PathConstraintSettings &inSettings) :
TwoBodyConstraint(inBody1, inBody2, inSettings)
{
// Copy properties
mMaxFrictionForce = inSettings.mMaxFrictionForce;
mPositionMotorSettings = inSettings.mPositionMotorSettings;
mRotationConstraintType = inSettings.mRotationConstraintType;
// Calculate transform that takes us from the path start to center of mass space of body 1
mPathToBody1 = Mat44::sRotationTranslation(inSettings.mPathRotation, inSettings.mPathPosition - inBody1.GetShape()->GetCenterOfMass());
SetPath(inSettings.mPath, inSettings.mPathFraction);
}
void PathConstraint::SetPath(const PathConstraintPath *inPath, float inPathFraction)
{
mPath = inPath;
mPathFraction = inPathFraction;
if (mPath != nullptr)
{
// Get the point on the path for this fraction
Vec3 path_point, path_tangent, path_normal, path_binormal;
mPath->GetPointOnPath(mPathFraction, path_point, path_tangent, path_normal, path_binormal);
// Construct the matrix that takes us from the closest point on the path to body 2 center of mass space
Mat44 closest_point_to_path(Vec4(path_tangent, 0), Vec4(path_binormal, 0), Vec4(path_normal, 0), Vec4(path_point, 1));
Mat44 inv_transform2 = mBody2->GetInverseCenterOfMassTransform();
Mat44 path_to_world = mBody1->GetCenterOfMassTransform() * mPathToBody1;
mPathToBody2 = inv_transform2 * path_to_world * closest_point_to_path;
// Calculate initial orientation
if (mRotationConstraintType == EPathRotationConstraintType::FullyConstrained)
mInvInitialOrientation = RotationQuatConstraintPart::sGetInvInitialOrientation(*mBody1, *mBody2);
}
}
void PathConstraint::CalculateConstraintProperties(float inDeltaTime)
{
// Get transforms of body 1 and 2
Mat44 transform1 = mBody1->GetCenterOfMassTransform();
Mat44 transform2 = mBody2->GetCenterOfMassTransform();
// Get the transform of the path transform as seen from body 1 in world space
Mat44 path_to_world_1 = transform1 * mPathToBody1;
// Get the transform of from the point on path that body 2 is attached to in world space
Mat44 path_to_world_2 = transform2 * mPathToBody2;
// Calculate new closest point on path
Vec3 position2 = path_to_world_2.GetTranslation();
Vec3 position2_local_to_path = path_to_world_1.InversedRotationTranslation() * position2;
mPathFraction = mPath->GetClosestPoint(position2_local_to_path);
// Get the point on the path for this fraction
Vec3 path_point, path_tangent, path_normal, path_binormal;
mPath->GetPointOnPath(mPathFraction, path_point, path_tangent, path_normal, path_binormal);
// Calculate R1 and R2
path_point = path_to_world_1 * path_point;
mR1 = path_point - mBody1->GetCenterOfMassPosition();
mR2 = position2 - mBody2->GetCenterOfMassPosition();
// Calculate U = X2 + R2 - X1 - R1
mU = position2 - path_point;
// Calculate world space normals
mPathNormal = path_to_world_1.Multiply3x3(path_normal);
mPathBinormal = path_to_world_1.Multiply3x3(path_binormal);
// Calculate slide axis
mPathTangent = path_to_world_1.Multiply3x3(path_tangent);
// Prepare constraint part for position constraint to slide along the path
mPositionConstraintPart.CalculateConstraintProperties(*mBody1, transform1.GetRotation(), mR1 + mU, *mBody2, transform2.GetRotation(), mR2, mPathNormal, mPathBinormal);
// Check if closest point is on the boundary of the path and if so apply limit
if (!mPath->IsLooping() && (mPathFraction <= 0.0f || mPathFraction >= mPath->GetPathMaxFraction()))
mPositionLimitsConstraintPart.CalculateConstraintProperties(inDeltaTime, *mBody1, mR1 + mU, *mBody2, mR2, mPathTangent);
else
mPositionLimitsConstraintPart.Deactivate();
// Prepare rotation constraint part
switch (mRotationConstraintType)
{
case EPathRotationConstraintType::Free:
// No rotational limits
break;
case EPathRotationConstraintType::ConstrainAroundTangent:
mHingeConstraintPart.CalculateConstraintProperties(*mBody1, transform1.GetRotation(), mPathTangent, *mBody2, transform2.GetRotation(), path_to_world_2.GetAxisX());
break;
case EPathRotationConstraintType::ConstrainAroundNormal:
mHingeConstraintPart.CalculateConstraintProperties(*mBody1, transform1.GetRotation(), mPathNormal, *mBody2, transform2.GetRotation(), path_to_world_2.GetAxisZ());
break;
case EPathRotationConstraintType::ConstrainAroundBinormal:
mHingeConstraintPart.CalculateConstraintProperties(*mBody1, transform1.GetRotation(), mPathBinormal, *mBody2, transform2.GetRotation(), path_to_world_2.GetAxisY());
break;
case EPathRotationConstraintType::ConstaintToPath:
// We need to calculate the inverse of the rotation from body 1 to body 2 for the current path position (see: RotationQuatConstraintPart::sGetInvInitialOrientation)
// RotationBody2 = RotationBody1 * InitialOrientation <=> InitialOrientation^-1 = RotationBody2^-1 * RotationBody1
// We can express RotationBody2 in terms of RotationBody1: RotationBody2 = RotationBody1 * PathToBody1 * RotationClosestPointOnPath * PathToBody2^-1
// Combining these two: InitialOrientation^-1 = PathToBody2 * (PathToBody1 * RotationClosestPointOnPath)^-1
mInvInitialOrientation = mPathToBody2.Multiply3x3RightTransposed(mPathToBody1.Multiply3x3(Mat44(Vec4(path_tangent, 0), Vec4(path_binormal, 0), Vec4(path_normal, 0), Vec4::sZero()))).GetQuaternion();
[[fallthrough]];
case EPathRotationConstraintType::FullyConstrained:
mRotationConstraintPart.CalculateConstraintProperties(*mBody1, transform1.GetRotation(), *mBody2, transform2.GetRotation(), mInvInitialOrientation);
break;
}
// Motor properties
switch (mPositionMotorState)
{
case EMotorState::Off:
if (mMaxFrictionForce > 0.0f)
mPositionMotorConstraintPart.CalculateConstraintProperties(inDeltaTime, *mBody1, mR1 + mU, *mBody2, mR2, mPathTangent);
else
mPositionMotorConstraintPart.Deactivate();
break;
case EMotorState::Velocity:
mPositionMotorConstraintPart.CalculateConstraintProperties(inDeltaTime, *mBody1, mR1 + mU, *mBody2, mR2, mPathTangent, -mTargetVelocity);
break;
case EMotorState::Position:
{
// Calculate constraint value to drive to
float c;
if (mPath->IsLooping())
{
float max_fraction = mPath->GetPathMaxFraction();
c = fmod(mPathFraction - mTargetPathFraction, max_fraction);
float half_max_fraction = 0.5f * max_fraction;
if (c > half_max_fraction)
c -= max_fraction;
else if (c < -half_max_fraction)
c += max_fraction;
}
else
c = mPathFraction - mTargetPathFraction;
mPositionMotorConstraintPart.CalculateConstraintProperties(inDeltaTime, *mBody1, mR1 + mU, *mBody2, mR2, mPathTangent, 0.0f, c, mPositionMotorSettings.mFrequency, mPositionMotorSettings.mDamping);
break;
}
}
}
void PathConstraint::SetupVelocityConstraint(float inDeltaTime)
{
CalculateConstraintProperties(inDeltaTime);
}
void PathConstraint::WarmStartVelocityConstraint(float inWarmStartImpulseRatio)
{
// Warm starting: Apply previous frame impulse
mPositionMotorConstraintPart.WarmStart(*mBody1, *mBody2, mPathTangent, inWarmStartImpulseRatio);
mPositionConstraintPart.WarmStart(*mBody1, *mBody2, mPathNormal, mPathBinormal, inWarmStartImpulseRatio);
mPositionLimitsConstraintPart.WarmStart(*mBody1, *mBody2, mPathTangent, inWarmStartImpulseRatio);
switch (mRotationConstraintType)
{
case EPathRotationConstraintType::Free:
// No rotational limits
break;
case EPathRotationConstraintType::ConstrainAroundTangent:
case EPathRotationConstraintType::ConstrainAroundNormal:
case EPathRotationConstraintType::ConstrainAroundBinormal:
mHingeConstraintPart.WarmStart(*mBody1, *mBody2, inWarmStartImpulseRatio);
break;
case EPathRotationConstraintType::ConstaintToPath:
case EPathRotationConstraintType::FullyConstrained:
mRotationConstraintPart.WarmStart(*mBody1, *mBody2, inWarmStartImpulseRatio);
break;
}
}
bool PathConstraint::SolveVelocityConstraint(float inDeltaTime)
{
// Solve motor
bool motor = false;
if (mPositionMotorConstraintPart.IsActive())
{
switch (mPositionMotorState)
{
case EMotorState::Off:
{
float max_lambda = mMaxFrictionForce * inDeltaTime;
motor = mPositionMotorConstraintPart.SolveVelocityConstraint(*mBody1, *mBody2, mPathTangent, -max_lambda, max_lambda);
break;
}
case EMotorState::Velocity:
case EMotorState::Position:
motor = mPositionMotorConstraintPart.SolveVelocityConstraint(*mBody1, *mBody2, mPathTangent, inDeltaTime * mPositionMotorSettings.mMinForceLimit, inDeltaTime * mPositionMotorSettings.mMaxForceLimit);
break;
}
}
// Solve position constraint along 2 axis
bool pos = mPositionConstraintPart.SolveVelocityConstraint(*mBody1, *mBody2, mPathNormal, mPathBinormal);
// Solve limits along path axis
bool limit = false;
if (mPositionLimitsConstraintPart.IsActive())
{
if (mPathFraction <= 0.0f)
limit = mPositionLimitsConstraintPart.SolveVelocityConstraint(*mBody1, *mBody2, mPathTangent, 0, FLT_MAX);
else
{
JPH_ASSERT(mPathFraction >= mPath->GetPathMaxFraction());
limit = mPositionLimitsConstraintPart.SolveVelocityConstraint(*mBody1, *mBody2, mPathTangent, -FLT_MAX, 0);
}
}
// Solve rotational constraint
// Note, this is not entirely correct, we should apply a velocity constraint so that the body will actually follow the path
// by looking at the derivative of the tangent, normal or binormal but we don't. This means the position constraint solver
// will need to correct the orientation error that builds up, which in turn means that the simulation is not physically correct.
bool rot = false;
switch (mRotationConstraintType)
{
case EPathRotationConstraintType::Free:
// No rotational limits
break;
case EPathRotationConstraintType::ConstrainAroundTangent:
case EPathRotationConstraintType::ConstrainAroundNormal:
case EPathRotationConstraintType::ConstrainAroundBinormal:
rot = mHingeConstraintPart.SolveVelocityConstraint(*mBody1, *mBody2);
break;
case EPathRotationConstraintType::ConstaintToPath:
case EPathRotationConstraintType::FullyConstrained:
rot = mRotationConstraintPart.SolveVelocityConstraint(*mBody1, *mBody2);
break;
}
return motor || pos || limit || rot;
}
bool PathConstraint::SolvePositionConstraint(float inDeltaTime, float inBaumgarte)
{
// Update constraint properties (bodies may have moved)
CalculateConstraintProperties(inDeltaTime);
// Solve position constraint along 2 axis
bool pos = mPositionConstraintPart.SolvePositionConstraint(*mBody1, *mBody2, mU, mPathNormal, mPathBinormal, inBaumgarte);
// Solve limits along path axis
bool limit = false;
if (mPositionLimitsConstraintPart.IsActive())
{
if (mPathFraction <= 0.0f)
limit = mPositionLimitsConstraintPart.SolvePositionConstraint(*mBody1, *mBody2, mPathTangent, mU.Dot(mPathTangent), inBaumgarte);
else
{
JPH_ASSERT(mPathFraction >= mPath->GetPathMaxFraction());
limit = mPositionLimitsConstraintPart.SolvePositionConstraint(*mBody1, *mBody2, mPathTangent, mU.Dot(mPathTangent), inBaumgarte);
}
}
// Solve rotational constraint
bool rot = false;
switch (mRotationConstraintType)
{
case EPathRotationConstraintType::Free:
// No rotational limits
break;
case EPathRotationConstraintType::ConstrainAroundTangent:
case EPathRotationConstraintType::ConstrainAroundNormal:
case EPathRotationConstraintType::ConstrainAroundBinormal:
rot = mHingeConstraintPart.SolvePositionConstraint(*mBody1, *mBody2, inBaumgarte);
break;
case EPathRotationConstraintType::ConstaintToPath:
case EPathRotationConstraintType::FullyConstrained:
rot = mRotationConstraintPart.SolvePositionConstraint(*mBody1, *mBody2, mInvInitialOrientation, inBaumgarte);
break;
}
return pos || limit || rot;
}
#ifdef JPH_STAT_COLLECTOR
void PathConstraint::CollectStats() const
{
string prefix = "Constraint." + mBody1->GetDebugName() + "-" + mBody2->GetDebugName();
JPH_STAT_COLLECTOR_ADD(prefix + ".PathFraction", mPathFraction);
JPH_STAT_COLLECTOR_ADD(prefix + ".DualAxis.TotalLambda0", mPositionConstraintPart.GetTotalLambda()[0]);
JPH_STAT_COLLECTOR_ADD(prefix + ".DualAxis.TotalLambda1", mPositionConstraintPart.GetTotalLambda()[1]);
switch (mRotationConstraintType)
{
case EPathRotationConstraintType::Free:
// No rotational limits
break;
case EPathRotationConstraintType::ConstrainAroundTangent:
case EPathRotationConstraintType::ConstrainAroundNormal:
case EPathRotationConstraintType::ConstrainAroundBinormal:
JPH_STAT_COLLECTOR_ADD(prefix + ".Hinge.TotalLambda0", mHingeConstraintPart.GetTotalLambda()[0]);
JPH_STAT_COLLECTOR_ADD(prefix + ".Hinge.TotalLambda1", mHingeConstraintPart.GetTotalLambda()[1]);
break;
case EPathRotationConstraintType::ConstaintToPath:
case EPathRotationConstraintType::FullyConstrained:
JPH_STAT_COLLECTOR_ADD(prefix + ".Rotation.TotalLambda0", mRotationConstraintPart.GetTotalLambda()[0]);
JPH_STAT_COLLECTOR_ADD(prefix + ".Rotation.TotalLambda1", mRotationConstraintPart.GetTotalLambda()[1]);
JPH_STAT_COLLECTOR_ADD(prefix + ".Rotation.TotalLambda2", mRotationConstraintPart.GetTotalLambda()[2]);
break;
}
if (mPositionLimitsConstraintPart.IsActive())
JPH_STAT_COLLECTOR_ADD(prefix + ".PositionLimit.TotalLambda", mPositionLimitsConstraintPart.GetTotalLambda());
if (mPositionMotorConstraintPart.IsActive())
JPH_STAT_COLLECTOR_ADD(prefix + ".PositionMotor.TotalLambda", mPositionMotorConstraintPart.GetTotalLambda());
}
#endif // JPH_STAT_COLLECTOR
#ifdef JPH_DEBUG_RENDERER
void PathConstraint::DrawConstraint(DebugRenderer *inRenderer) const
{
// Draw the path in world space
Mat44 path_to_world = mBody1->GetCenterOfMassTransform() * mPathToBody1;
mPath->DrawPath(inRenderer, path_to_world);
// Draw anchor point of both bodies in world space
Vec3 x1 = mBody1->GetCenterOfMassPosition() + mR1;
Vec3 x2 = mBody2->GetCenterOfMassPosition() + mR2;
inRenderer->DrawMarker(x1, Color::sYellow, 0.1f);
inRenderer->DrawMarker(x2, Color::sYellow, 0.1f);
inRenderer->DrawArrow(x1, x1 + mPathTangent, Color::sBlue, 0.1f);
inRenderer->DrawArrow(x1, x1 + mPathNormal, Color::sRed, 0.1f);
inRenderer->DrawArrow(x1, x1 + mPathBinormal, Color::sGreen, 0.1f);
inRenderer->DrawText3D(x1, StringFormat("%.1f", (double)mPathFraction));
// Draw motor
switch (mPositionMotorState)
{
case EMotorState::Position:
{
// Draw target marker
Vec3 position, tangent, normal, binormal;
mPath->GetPointOnPath(mTargetPathFraction, position, tangent, normal, binormal);
inRenderer->DrawMarker(path_to_world * position, Color::sYellow, 1.0f);
break;
}
case EMotorState::Velocity:
{
Vec3 position = mBody2->GetCenterOfMassPosition() + mR2;
inRenderer->DrawArrow(position, position + mPathTangent * mTargetVelocity, Color::sRed, 0.1f);
break;
}
case EMotorState::Off:
break;
}
}
#endif // JPH_DEBUG_RENDERER
void PathConstraint::SaveState(StateRecorder &inStream) const
{
TwoBodyConstraint::SaveState(inStream);
mPositionConstraintPart.SaveState(inStream);
mPositionLimitsConstraintPart.SaveState(inStream);
mPositionMotorConstraintPart.SaveState(inStream);
mHingeConstraintPart.SaveState(inStream);
mRotationConstraintPart.SaveState(inStream);
inStream.Write(mMaxFrictionForce);
inStream.Write(mPositionMotorSettings);
inStream.Write(mPositionMotorState);
inStream.Write(mTargetVelocity);
inStream.Write(mTargetPathFraction);
inStream.Write(mPathFraction);
}
void PathConstraint::RestoreState(StateRecorder &inStream)
{
TwoBodyConstraint::RestoreState(inStream);
mPositionConstraintPart.RestoreState(inStream);
mPositionLimitsConstraintPart.RestoreState(inStream);
mPositionMotorConstraintPart.RestoreState(inStream);
mHingeConstraintPart.RestoreState(inStream);
mRotationConstraintPart.RestoreState(inStream);
inStream.Read(mMaxFrictionForce);
inStream.Read(mPositionMotorSettings);
inStream.Read(mPositionMotorState);
inStream.Read(mTargetVelocity);
inStream.Read(mTargetPathFraction);
inStream.Read(mPathFraction);
}
} // JPH |
/*
* Copyright (c) 2000-2008, 2011, 2017, Juniper Networks, Inc.
* All rights reserved.
* This SOFTWARE is licensed under the LICENSE provided in the
* ../Copyright file. By downloading, installing, copying, or otherwise
* using the SOFTWARE, you agree to be bound by the terms of that
* LICENSE.
*/
#ifndef LIBPSU_PSUSTRING_H
#define LIBPSU_PSUSTRING_H
#include <stdlib.h>
/**
* @brief
* Produces output into a dynamically-allocated character string buffer
* according to a format specification string and an appropriate number
* of arguments.
*
* @param[in] fmt
* Format string (see sprintf(3) for a description of the format)
* @param[in] ...
* Arguments sufficient to satisfy the format string
*
* @return
* A pointer to the resultant string.
*/
char *strdupf (const char *fmt, ...) PSU_PRINTFLIKE(1, 2);
/**
* @brief
* Safe form of snprintf(3) that returns the number of characters written,
* rather than the number of characters that would have been written.
*
* @param[out] out
* Pointer to the output buffer
* @param[in] outsize
* Size of the output buffer, in bytes
* @param[in] fmt
* Format string (see sprintf(3) for a description of the format)
* @param[in] ...
* Arguments sufficient to satisfy the format string
*
* @return
* The number of characters written to the output buffer.
*/
size_t snprintf_safe (char *out, size_t outsize, const char *fmt, ...)
PSU_PRINTFLIKE(3, 4);
/*
* memdup(): allocates sufficient memory for a copy of the
* buffer buf, does the copy, and returns a pointer to it. The pointer may
* subsequently be used as an argument to the function free(3).
*/
static inline void *
memdup (const void *buf, size_t size)
{
void *vp = malloc(size);
if (vp) memcpy(vp, buf, size);
return vp;
}
#ifndef HAVE_STRNSTR
static inline char *
strnstr (char *s1, const char *s2, size_t n)
{
char first = *s2++;
size_t s2len;
char *cp, *np;
if (first == '\0') /* Empty string means immediate match */
return s1;
s2len = strlen(s2); /* Does not count first */
for (cp = s1; *cp; cp = np + 1) {
np = strchr(cp, first);
if (np == NULL)
return NULL;
if (s2len == 0) /* s2 is only one character long */
return np;
if (n - (np - s1) < s2len)
return NULL;
if (strncmp(np + 1, s2, s2len) == 0)
return np;
}
return NULL;
}
#endif /* HAVE_STRNSTR */
#ifndef HAVE_STRLCPY
/*
* strlcpy, for those that don't have it
*/
static inline size_t
strlcpy (char *dst, const char *src, size_t sz)
{
size_t len = strlen(src);
if (sz > len)
sz = len;
memmove(dst, src, sz);
dst[sz] = '\0';
return len;
}
#endif /* HAVE_STRLCPY */
#endif /* LIBPSU_PSUSTRING_H */
|
<filename>packages/web/src/components/table/Table.tsx
import React from 'react';
const Table: React.FC = ({ children, ...rest }) => {
return (
<table className="w-full table-auto rounded-md border-collapse border border-slate-500" {...rest}>
{children}
</table>
);
};
export default Table;
|
/**
* This job attempts to install a set of exported plug-ins or
* features into the current runtime.
*/
public class RuntimeInstallJob extends Job {
private FeatureExportInfo fInfo;
private ProvisioningUI ui;
/**
* Creates a new job that will install exported plug-ins. For a
* successful install, specific option in the feature export info
* object need to be set before the export operation see
* {@link #modifyInfoForInstall(FeatureExportInfo)}
*
* @param jobName the name to use for this job
* @param info the info object describing what is being exported
*/
public RuntimeInstallJob(String jobName, FeatureExportInfo info) {
super(jobName);
fInfo = info;
// This provisioning UI manages the currently running profile.
ui = ProvisioningUI.getDefaultUI();
ui.manageJob(this, ProvisioningJob.RESTART_OR_APPLY);
}
/**
* Sets the export options required to make the export installable.
* This method should be called before the export operation takes
* place.
*
* @param info the feature info object that will be modified
*/
public static void modifyInfoForInstall(FeatureExportInfo info) {
info.exportSource = false;
info.useJarFormat = true;
info.exportMetadata = true;
info.qualifier = QualifierReplacer.getDateQualifier();
}
@Override
protected IStatus run(IProgressMonitor monitor) {
try {
ProvisioningSession session = ui.getSession();
SubMonitor subMonitor = SubMonitor.convert(monitor, PDEUIMessages.RuntimeInstallJob_Job_name_installing, 12 + (2 * fInfo.items.length));
// p2 needs to know about the generated repos
URI destination = new File(fInfo.destinationDirectory).toURI();
ui.loadArtifactRepository(destination, false, subMonitor.split(1));
IMetadataRepository metaRepo = ui.loadMetadataRepository(destination, false, subMonitor.split(1));
IProfileRegistry profileRegistry = (IProfileRegistry) session.getProvisioningAgent().getService(IProfileRegistry.SERVICE_NAME);
if (profileRegistry == null) {
return new Status(IStatus.ERROR, PDEPlugin.getPluginId(), PDEUIMessages.RuntimeInstallJob_ErrorCouldntOpenProfile);
}
IProfile profile = profileRegistry.getProfile(IProfileRegistry.SELF);
if (profile == null) {
return new Status(IStatus.ERROR, PDEPlugin.getPluginId(), PDEUIMessages.RuntimeInstallJob_ErrorCouldntOpenProfile);
}
List<IInstallableUnit> toInstall = new ArrayList();
for (int i = 0; i < fInfo.items.length; i++) {
if (subMonitor.isCanceled()) {
return Status.CANCEL_STATUS;
}
subMonitor.subTask(NLS.bind(PDEUIMessages.RuntimeInstallJob_Creating_installable_unit, fInfo.items[i].toString()));
//Get the installable unit from the repo
String id = null;
String version = null;
if (fInfo.items[i] instanceof IPluginModelBase) {
id = ((IPluginModelBase) fInfo.items[i]).getPluginBase().getId();
version = ((IPluginModelBase) fInfo.items[i]).getPluginBase().getVersion();
} else if (fInfo.items[i] instanceof IFeatureModel) {
id = //$NON-NLS-1$
((IFeatureModel) fInfo.items[i]).getFeature().getId() + //$NON-NLS-1$
".feature.group";
version = ((IFeatureModel) fInfo.items[i]).getFeature().getVersion();
}
if (id == null && version == null) {
return new Status(IStatus.ERROR, PDEPlugin.getPluginId(), NLS.bind(PDEUIMessages.RuntimeInstallJob_ErrorCouldNotGetIdOrVersion, fInfo.items[i].toString()));
}
// Use the same qualifier replacement as the export operation used
version = QualifierReplacer.replaceQualifierInVersion(version, id, null, null);
// Check if the right version exists in the new meta repo
Version newVersion = Version.parseVersion(version);
IQueryResult<?> queryMatches = metaRepo.query(QueryUtil.createIUQuery(id, newVersion), monitor);
if (queryMatches.isEmpty()) {
return new Status(IStatus.ERROR, PDEPlugin.getPluginId(), NLS.bind(PDEUIMessages.RuntimeInstallJob_ErrorCouldNotFindUnitInRepo, new String[] { id, version }));
}
IInstallableUnit iuToInstall = (IInstallableUnit) queryMatches.iterator().next();
// Find out if the profile already has that iu installed
queryMatches = profile.query(QueryUtil.createIUQuery(id), subMonitor.split(1));
if (queryMatches.isEmpty()) {
// Just install the new iu into the profile
toInstall.add(iuToInstall);
} else {
// There is an existing iu that we need to replace using an installable unit patch
IInstallableUnit existingIU = (IInstallableUnit) queryMatches.iterator().next();
toInstall.add(createInstallableUnitPatch(existingIU, newVersion, profile, subMonitor.split(1)));
}
subMonitor.worked(2);
}
if (toInstall.size() > 0) {
InstallOperation operation = ui.getInstallOperation(toInstall, new URI[] { destination });
operation.resolveModal(subMonitor.split(5));
IStatus status = operation.getResolutionResult();
if (status.getSeverity() == IStatus.CANCEL || !(status.isOK() || status.getSeverity() == IStatus.INFO)) {
return status;
}
ProvisioningJob job = operation.getProvisioningJob(null);
status = job.runModal(subMonitor.split(5));
return status;
}
if (subMonitor.isCanceled()) {
return Status.CANCEL_STATUS;
}
return Status.OK_STATUS;
} catch (ProvisionException e) {
return e.getStatus();
}
}
/**
* Creates an installable unit patch that will change the version of
* existing requirements with the given version.
*
* @param existingIU an existing plug-in that this patch will replace, used to generate lifecycle
* @param newVersion the new version to require
* @param profile the profile we are installing in
* @param monitor progress monitor
* @return an installable unit patch
*/
private IInstallableUnitPatch createInstallableUnitPatch(IInstallableUnit existingIU, Version newVersion, IProfile profile, IProgressMonitor monitor) {
InstallableUnitPatchDescription iuPatchDescription = new MetadataFactory.InstallableUnitPatchDescription();
String id = existingIU.getId();
//$NON-NLS-1$
iuPatchDescription.setId(id + ".patch");
iuPatchDescription.setProperty(IInstallableUnit.PROP_NAME, NLS.bind(PDEUIMessages.RuntimeInstallJob_installPatchName, id));
iuPatchDescription.setProperty(IInstallableUnit.PROP_DESCRIPTION, PDEUIMessages.RuntimeInstallJob_installPatchDescription);
Version patchVersion = Version.createOSGi(1, 0, 0, QualifierReplacer.getDateQualifier());
iuPatchDescription.setVersion(patchVersion);
iuPatchDescription.setUpdateDescriptor(MetadataFactory.createUpdateDescriptor(iuPatchDescription.getId(), new VersionRange(Version.createOSGi(0, 0, 0), true, patchVersion, false), 0, null));
ArrayList<IProvidedCapability> list = new ArrayList(1);
list.add(MetadataFactory.createProvidedCapability(IInstallableUnit.NAMESPACE_IU_ID, iuPatchDescription.getId(), iuPatchDescription.getVersion()));
iuPatchDescription.addProvidedCapabilities(list);
IRequirement applyTo = MetadataFactory.createRequirement(IInstallableUnit.NAMESPACE_IU_ID, id, null, null, false, false);
IRequirement newValue = MetadataFactory.createRequirement(IInstallableUnit.NAMESPACE_IU_ID, id, new VersionRange(newVersion, true, newVersion, true), null, false, false);
iuPatchDescription.setRequirementChanges(new IRequirementChange[] { MetadataFactory.createRequirementChange(applyTo, newValue) });
iuPatchDescription.setApplicabilityScope(new IRequirement[0][0]);
// Locate IU's that appoint the existing version of the IU that we are patching.
// Add lifecycle requirement on a changed bundle, if it gets updated, then we should uninstall the patch
//$NON-NLS-1$
IQueryResult<?> queryMatches = profile.query(QueryUtil.createMatchQuery("requirements.exists(rc | $0 ~= rc)", new Object[] { existingIU }), monitor);
if (!queryMatches.isEmpty()) {
IInstallableUnit lifecycleUnit = (IInstallableUnit) queryMatches.iterator().next();
iuPatchDescription.setLifeCycle(MetadataFactory.createRequirement(IInstallableUnit.NAMESPACE_IU_ID, lifecycleUnit.getId(), new VersionRange(lifecycleUnit.getVersion(), true, lifecycleUnit.getVersion(), true), null, false, false, false));
}
iuPatchDescription.setProperty(InstallableUnitDescription.PROP_TYPE_PATCH, Boolean.TRUE.toString());
return MetadataFactory.createInstallableUnitPatch(iuPatchDescription);
}
} |
<gh_stars>0
{-# LANGUAGE TypeSynonymInstances #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE InstanceSigs #-}
module Main where
import Control.Monad.State.Lazy
data MockState = MockState { outputs :: [String]} deriving (Show, Eq)
type SimpleMockedIO = State MockState
runMockIO :: SimpleMockedIO a -> MockState -> (a, MockState)
runMockIO = runState
type NewIO = IO
class Monad m => MonadCustomLogger m where
logDebug :: String -> m ()
instance MonadCustomLogger IO where
logDebug x = putStrLn ("Debug!: " <> x)
instance MonadCustomLogger SimpleMockedIO where
logDebug msg = do
curState <- get
let newState = curState { outputs = msg:outputs curState}
put newState
return ()
main :: MonadCustomLogger m => m ()
main = logDebug "hello world"
test :: MockState
test = do
let env = MockState []
let internalTest :: MonadCustomLogger m => m ()
internalTest = do
logDebug "1"
logDebug "2"
logDebug "3"
let (res, finalState) = runMockIO internalTest env
finalState
|
t = int(input())
cases = []
for case in range(t):
ncaves = int(input())
caves = []
for c in range(ncaves):
caves.append(list(map(int, input().split()))[1:])
cases.append(caves)
def sort_by_max(max_size):
return max_size[0]
def solve(case):
max_and_size_of_caves = []
for cave in case:
for i in range(len(cave)):
cave[i] -= i
max_and_size_of_caves.append([max(cave), len(cave)])
max_and_size_of_caves.sort(key=sort_by_max)
run = []
power = 0
for max_size in max_and_size_of_caves:
run.append(max_size[0] - power)
power += max_size[1]
return max(run)+1
for case in cases:
print(solve(case))
|
The use of loop inductances in signal integrity modeling
For many years, partial inductances have been used in circuit models for signal integrity. However, the author's experience has indicated that partial inductances have more drawbacks than benefits for such work. This paper describes these drawbacks, and proposes a different method of including inductive effects in signal integrity modeling, based on loop inductances. A discussion of the theory and an example are given. |
<reponame>mrkldshv/tools
use crate::prelude::*;
use rome_formatter::write;
use crate::FormatNodeFields;
use rome_js_syntax::JsTemplate;
use rome_js_syntax::JsTemplateFields;
impl FormatNodeFields<JsTemplate> for FormatNodeRule<JsTemplate> {
fn fmt_fields(node: &JsTemplate, f: &mut JsFormatter) -> FormatResult<()> {
let JsTemplateFields {
tag,
type_arguments,
l_tick_token,
elements,
r_tick_token,
} = node.as_fields();
write![
f,
[
tag.format(),
type_arguments.format(),
line_suffix_boundary(),
l_tick_token.format(),
elements.format(),
r_tick_token.format()
]
]
}
}
|
package service
import (
"context"
"errors"
"fmt"
"github.com/KoteiIto/wire-sample/pkg/repository"
)
type AuthenticateInput struct {
Email string
Password string
}
type AuthenticateOutput struct {
UserId int64
OnetimeToken string
}
type IAuthService interface {
Authenticate(ctx context.Context, input *AuthenticateInput) (*AuthenticateOutput, error)
}
type AuthService struct {
userRepository repository.IUserRepository
onetimeTokenRepository repository.IOnetimeTokenRepository
}
var _ IAuthService = (*AuthService)(nil)
func NewAuthService(
userRepository *repository.UserRepository,
onetimeTokenRepository *repository.OnetimeTokenRepository,
) *AuthService {
return &AuthService{
userRepository: userRepository,
onetimeTokenRepository: onetimeTokenRepository,
}
}
func NewAuthServiceFromInterface(
userRepository repository.IUserRepository,
onetimeTokenRepository repository.IOnetimeTokenRepository,
) *AuthService {
return &AuthService{
userRepository: userRepository,
onetimeTokenRepository: onetimeTokenRepository,
}
}
func NewAuthServiceWithCleanup(
userRepository *repository.UserRepository,
onetimeTokenRepository *repository.OnetimeTokenRepository,
) (*AuthService, func()) {
cleanup := func() {
fmt.Println("clean up end")
}
return &AuthService{
userRepository: userRepository,
onetimeTokenRepository: onetimeTokenRepository,
}, cleanup
}
func (s *AuthService) Authenticate(ctx context.Context, input *AuthenticateInput) (*AuthenticateOutput, error) {
user, err := s.userRepository.FindByEmail(ctx, input.Email)
if err != nil {
return nil, err
}
if input.Password != user.Password {
return nil, errors.New("passwordが一致しません")
}
token, err := s.onetimeTokenRepository.Issue(user)
if err != nil {
return nil, err
}
return &AuthenticateOutput{
UserId: user.UserId,
OnetimeToken: token,
}, nil
}
|
def add_associated_tools(tool_list):
for tool in tool_list:
if tool.name in rejected_tools:
continue
for technique in tool.techniques:
curr_id = technique.id.split('.')[0]
if ('deprecated' not in technique.description) and (tool.name not in keyword_dict[curr_id]):
keyword_dict[curr_id].append(tool.name)
if tool.additional_names is not None:
for name in tool.additional_names:
if tool.name in rejected_tools:
continue
keyword_dict[technique.id.split('.')[0]].append(name) |
from collections import Counter
n = int(input())
for _ in range(n):
s_str, c_str = input().split()
s, c = list(s_str), list(c_str)
arranged = list(sorted(s))
idx = -1
while idx + 1 < len(s) and s[idx + 1] == arranged[idx + 1]:
idx += 1
idx += 1
if idx < len(s):
last_idx = s_str.rindex(arranged[idx])
s[idx], s[last_idx] = s[last_idx], s[idx]
if ''.join(s) < ''.join(c):
print(''.join(s))
else:
print("---")
|
Who are the richest rappers alive today? We spent the last week doing research and we are very excited to release the results! We started off with more than 50 artists in the hip hop industry, then narrowed the list down to the top 20 richest rappers alive today. You may recognize a lot of these names and faces but you are going to be shocked by how much cash the richest rappers have. Some of these rappers have been in the game for more than 20 years, while others are relatively new and already banking big. The moguls on this list control over $3 billion. The vast majority of the people on this list are African-American which unfortunately is rare for a list on Celebrity Net Worth. One common theme among these richest rappers is that they all have leveraged their popularity to launch new business ventures. Many of these artists made far more money outside of hip hop than they ever did selling records. There are actually 22 artists on this list thanks to a handful of ties, we also included three bonus celebrities who are not rappers but have had a major influence on the genre. The secret to being one of the richest rappers seems to be in clothing lines, owning your own record label and investing in commercial products you sell to the mass public. So if you are planning on being a rapper or a hip hop mogul, follow the trail these men have blazed and you will be very happy and very rich!
***Please note that this article was published in 2012! Here is our more recent list of the richest rappers alive today: The Richest Rappers In The World 2014***
#20 Rick Ross – Net Worth $25 Million
#19 Tie between T-Pain and T.I – Net Worth $30 Million
#18 Nelly – Net Worth $55 Million
#17 Busta Rhymes – Net Worth $60 Million
#16 Ludacris – Net Worth $65 Million
#15 Beastie Boys – Net Worth $75 Million Each
#14 Timbaland – Net Worth $75 Million
#13 Pharrell Williams – Net Worth $77.5 Million
#12 Tie between LL Cool J and Akon – Net Worth $80 Million
#11 Kanye West – Net Worth $90 Million
#10 Lil Wayne – Net Worth $95 Million
#9 Ice Cube – Net Worth $100 Million
#8 Snoop Dogg – Net Worth $110 Million
#7 Birdman – Net Worth $115 Million
#6 Eminem – Net Worth $120 Million
#5 50 Cent – Net Worth $130 Million
#4 Dr. Dre – Net Worth $260 Million
#3 Master P – Net Worth $350 Million
#2 Jay-Z – Net Worth $475 Million
#1 Diddy – Net Worth $500 Million |
/**
* Adds annotations to the merged annotation storage of this interface.
* Merged annotations are passed to all output slots.
*
* @param annotations the annotations
* @param strategy strategy to apply on merging existing values
*/
public void addMergedTextAnnotations(Map<String, String> annotations, JIPipeTextAnnotationMergeMode strategy) {
for (Map.Entry<String, String> entry : annotations.entrySet()) {
JIPipeTextAnnotation existing = this.mergedTextAnnotations.getOrDefault(entry.getKey(), null);
if (existing == null) {
this.mergedTextAnnotations.put(entry.getKey(), new JIPipeTextAnnotation(entry.getKey(), entry.getValue()));
} else {
String newValue = strategy.merge(existing.getValue(), entry.getValue());
this.mergedTextAnnotations.put(entry.getKey(), new JIPipeTextAnnotation(entry.getKey(), newValue));
}
}
} |
Loot Box Updates
We always want the experience of opening an in-game loot box to feel exciting and rewarding, and in our latest patch we’re working to improve that experience in two key ways. First, we’re drastically reducing the amount of duplicates players will receive when opening loot boxes. Second, to compensate for this reduction of duplicate items, we’re also increasing the overall amount of credits players will receive from loot boxes. On average, players should be earning just as many credits, if not slightly more, from loot boxes than they did prior to these changes.
To help us test this update, all players who log in to the PTR this patch cycle will receive five (5) PTR Loot Boxes. These are standard loot boxes that will only be available on the PTR. Any items earned from loot boxes or unlocked via credits on the PTR will not transfer over to your live account.
Hey everyone,We recently announced that major improvements are on the way for in-game loot boxes and have made those updates available for testing with our latest PTR patch (1.13.0). Since that announcement, we’ve seen several of you noting that you’ll be stocking up on loot boxes (or holding off on opening the ones you already have) in preparation for this update going live. Before you do that, we ask that you please keep the following functionality in mind:This means that any loot box you earn or purchase now—or at any point before our loot box update is released—will not be affected by the changes listed below. Only those loot boxes that are earned or purchased after patch 1.13.0 is live and playable on your gaming platform will be eligible for the following improvements.To read the full PTR patch notes, click here Please note that we don’t have an ETA for when patch 1.13.0 will be released. Stay tuned to playoverwatch.com for related game news and updates.Thank you! |
Using a Morphological Database to Increase the Accuracy in POS Tagging
We experiment with extending the dictionaries used by three open-source partof-speech taggers, by using data from a large Icelandic morphological database. We show that the accuracy of the taggers can be improved significantly by using the database. The reason is that the unknown word ratio reduces dramatically when adding data from the database to the taggers’ dictionaries. For the best performing tagger, the overall tagging accuracy increases from the base tagging result of 92.73% to 93.32%, when the unknown word ratio decreases from 6.8% to 1.1%. When we add reliable frequency information to the tag profiles for some of the words originating from the database, we are able to increase the accuracy further to 93.48% ‐ this is equivalent to 10.3% error reduction compared to the base tagger.
Introduction
In general, part-of-speech (PoS) taggers can be catagorised into two types. First, data-driven taggers, i.e. taggers that are trained on pre-tagged corpora and are both language and tagset independent, e.g. (Brants, 2000;Toutanova et al., 2003;Shen et al., 2007). Second, linguistic rule-based taggers, which are developed "by hand" using linguistic knowledge, with the purpose of tagging a specific language using a particular tagset, e.g. (Karlsson et al., 1995;Loftsson, 2008).
All taggers use a particular tagset T and rely on a dictionary D containing the tag profile (ambiguity class) T w for each word w. A tag profile T w indicates which tags are assignable to w, thus T w ⊂ T . Essentially, for each word w, a tagger disambiguates T w by selecting (or removing all but) one tag from it with regard to context. The dictionary D is derived by a data-driven tagger during training, and derived or built during development of a linguistic rule-based tagger.
When tagging new text, PoS taggers frequently encounter words that are not in D, i.e. so-called unknown words. An unknown word u can be quite problematic for a tagger, because the tag profile for u needs to be guessed. In most cases, PoS taggers therefore contain a special module, called an unknown word guesser, to generate the tag profile for unknown words. Frequently, the guessing of the tag profile for unknown words is incorrect and therefore the tagging accuracy for these words is considerably lower than the tagging accuracy for known words. To increase the overall tagging accuracy of PoS taggers, one might therefore try to refine the underlying unknown word guessers. Another approach is simply to try to minimise the ratio of unknown words by extending the dictionaries used by the taggers.
In this paper, we use the latter approach. We experiment with extending the dictionaries used by three PoS taggers for Icelandic with data from a large morphological database (Bjarnadóttir, 2005). Our logical assumption is that the overall tagging accuracies of the taggers can be increased by this method, but we are also interested in how extended dictionaries affect the accuracy for unknown words and known words separately.
The three taggers used in our experiments are: i) the linguistic rule-based tagger IceTagger (Loftsson, 2008); ii) TriTagger, a re-implementation of the statistical tagger TnT by Brants (2000); and iii) a serial combination of the two (Loftsson et al., 2009).
The morphological database does not contain any frequency information for the tags in the tag profile for each word, but, nevertheless, we show that the tagging accuracy of the taggers can be improved significantly by using the database. The reason is that when we add most of the data from the database to the taggers' dictionaries the unknown word ratio decreases dramatically, from 6.8% to 1.1%. In that case, the overall tagging accuracy of the best performing tagger, the serial combination of IceTagger and TriTagger, increases from the base tagging result of 92.73% to 93.32%. When we add reliable frequency information, derived from a corpus, to the tag profiles for a part of the words originating from the database, we are able to increase the accuracy further to 93.48% -this is equivalent to 10.3% error reduction compared to the base tagger.
Interestingly, it seems that very few papers exist in the literature regarding extensions of the dictionaries used by PoS taggers. In (Rupnik et al., 2008), a dictionary derived from training is essentially extended by using a backup lexicon extracted from a large corpus (which is different from the training corpus). In contrast, we use a morphological database to extend a tagger's dictionary, but use a corpus for deriving frequency information for part of the dictionary entries. In (Tufis et al., 2008), an unknown word u, and its tag profile and lemma obtained by a tagger when tagging new texts, is used by a morphological generator to generate tag profiles for new word forms that are morphologically related to u. The dictionary is thus extended incrementally, each time new text is tagged. In contrast, since we have access to a large morphological database, we extend a tagger's dictionary once and for all.
The morphological database
At the Árni Magnússon Institute for Icelandic Studies, a comprehensive full form database of modern Icelandic inflections has been developed (Bjarnadóttir, 2005). Its Icelandic abbreviation is BÍN ("Beygingarlýsing íslensks nútímamáls"), and henceforth we use that term. BÍN contains about 280,000 paradigms, with over 5.8 million inflectional forms. The output from the database used in this project contains lemma, word form, word class, and morphological features for common nouns, proper nouns, adjectives, verbs, and adverbs. It is important to note that the database does, however, not contain any frequency information for the word forms.
A web interface for BÍN is available at http: //bin.arnastofnun.is, from where a text file in the format used in this project can be downloaded. Below are 16 lines from the file, demon-strating entries for the lemma "hestur" 'horse': The exact meaning of the data in each column is not important for our discussion, but we point out that the lemma is in the first column, gender is in third column ("kk"=masculine), the word form is in the fifth column, and the morphological features case, number and definiteness are in the last column (for example, "NF"=nominative, "ET"=singular, "gr"=definite article).
The corpus and the taggers used
The Icelandic Frequency Dictionary (IFD) corpus (Pind et al., 1991) has been used to train and test taggers for Icelandic (Helgadóttir, 2005;Loftsson, 2008;Dredze and Wallenberg, 2008;Loftsson et al., 2009). The corpus contains about 590,000 tokens, and its underlying tagset about 700 tags, of which 639 tags actually appear in the corpus. The tags are character strings where each character has a particular function. The first character denotes the word class. For each word class there is a predefined number of additional characters (at most six), which describe morphological features, like gender, number and case for nouns; degree and declension for adjectives; voice, mood and tense for verbs, etc. To illustrate, consider the word form "hestur" 'horse'. The corresponding tag is "nken", denoting noun (n), masculine (k), singular (e), and nominative (n) case.
As mentioned in Section 1, we use one linguistic rule-based tagger (IceTagger), one data-driven tagger (TriTagger), and a serial combination of the two in our experiments. Both IceTagger and TriTagger are implemented in Java and are part of the open-source IceNLP toolkit 1 .
IceTagger is reductionistic in nature, i.e. it removes inappropriate tags from the tag profile T w for a specific word w in a given context. Ice-Tagger first applies local rules for initial disambiguation and then uses a set of heuristics (global rules) for further disambiguation. The tag profile for each word used by IceTagger is ordered by the frequency of the tags -the first tag listed is the most frequent one and the last tag is the least frequent one. If a word is still ambiguous after the application of the heuristics, the default heuristic is simply to choose the most frequent tag (the first tag) for the word. An important part of Ice-Tagger is its unknown word guesser, IceMorphy. It guesses the tag profile for unknown words by applying morphological analysis and ending analysis. In addition, IceMorphy can fill in the tag profile gaps 2 in the dictionary for words belonging to certain morphological classes (Loftsson, 2008).
TriTagger is a re-implementation of the well known Hidden Markov Model (HMM) tagger TnT by Brants (2000) 3 . TriTagger uses a trigram model to find the sequence of tags for words in a sentence which maximises the product of contextual probabilities (P (t i |t i−2 , t i−1 )) and lexical probabilities (P (w i |t i )): (1) In the above equation, w i denotes word i in a sentence of length n (1 ≤ i ≤ n) and t i denotes the tag for w i . The probabilities are derived using maximum likelihood estimation based on the frequencies of tags found during training.
HMM taggers handle unknown words by setting tag probabilities according to words' suffixes. The term suffix is here defined as a final sequence of characters of a word. TnT, and thus TriTagger, generate probability distributions for suffixes of various lengths. The distribution for particular suffixes is based on words in the training data that share the same suffix. The reader is referred to (Brants, 2000) for the details of suffix handling.
Below, we exemplify the tag profiles stored in the dictionaries for IceTagger and TriTagger for a specific word "konu" 'woman': konu nveþ nveo nvee konu 122 nveþ 44 nveo 42 nvee 36 The first tag profile is stored in the dictionary for IceTagger. The possible tags are "nveþ", "nveo", and "nvee" (denoting noun, feminine, singular, dative/accusative/genetive), sorted by decreasing frequency. The second tag profile is stored in the dictionary for TriTagger. It contains similar information, but, additionally, frequency information is attached to both the word itself and each possible tag.
Base tagging results
We have previously shown (Loftsson et al., 2009) that a significant improvement in tagging accuracy is obtainable by running a serial combination of IceTagger and a HMM tagger (TriTagger). Specifically, the best result was obtained by making the HMM perform initial disambiguation only with regard to the word class (the first letter of a tag), then running IceTagger, and finally by making the HMM disambiguate words that IceTagger was not able to fully disambiguate. This tagger is called HMM+Ice+HMM.
In our current experiments, we use 10-fold cross-validation on the exact same training and test splits of the so-called corrected version of the IFD corpus used by Loftsson et al. (2009). Each test corpus contains about 10% of the tokens from the IFD, while the corresponding training corpus contains about 90% of the tokens. The average unknown word ratio using this data split is about 6.8%.
We use a version of the corrected IFD corpus in which type information for proper nouns (namedentity classification) has been removed, and additionally we only use one tag for numerical constants. The reason for these changes is to make the tagset of the corpus comparable to tagsets for other languages. These changes reduce the size of the tagset from about 700 tags to about 600 tags, and the number of tags actually appearing in the IFD reduces from 639 tags to 567. Table 1 shows the average accuracy of the three taggers. In this table (and in all the ones that follow), the average accuracy is based on testing using the first nine test corpora, because the tenth one was used for developing IceTagger. We consider the accuracy figures in Table 1 tagging results -in the experiments described in the next section we try to improve on these figures.
The experiments
In this section, we describe the setup and results of two experiments. First, we extend the dictionaries used by the three taggers by using data from the morphological database BÍN. Second, we add reliable frequency information to some of the dictionary entries (tag profiles).
Extending the dictionaries
This part of our experiment is in two parts. First, we generate a file F 1 by extracting only lemmata from the database output described in Section 2. F 1 contains about 280,000 lemmata. To clarify, only the first line in the example output shown in Section 2 is then included in F 1 . Second, we drop the lemmata condition and generate a file F 2 by selecting most of the word forms from the database output 4 . F 2 contains about 5.3 million rows.
To generate an extended dictionary for a tagger (classifier) C using data from F 1 , we perform the following (the same procedure applies when using F 2 ): 1. Derive a dictionary from F 1 , containing words and their corresponding tag profiles. Symbols denoting morphological features in F 1 are mapped to the symbols used in the IFD tagset. We call the resulting dictionary D BIN .
2. Combine D BIN with the dictionary D generated by a tagger C during training (the number of entries in D are about 55,000, on the average). The result is a new dictionary D EXT . If a word exists in both D and D BIN then only the entry from D appears in D EXT .
3. Test tagger C using dictionary D EXT . Table 2: Average tagging accuracy (%) using dictionaries extended with lemmata only from BÍN. Average ratio of unknown words in testing is about 5.3%.
The above description holds when generating an extended dictionary for IceTagger, a tagger which does not need frequency information in the tag profile for words. In the case of TriTagger, we simply assume a uniform distribution, i.e. we mark each tag in the tag profile T w for word w with the frequency 1. Note that for TriTagger, extending the dictionary only affects the lexical probabilities from Equation 1 -the contextual probabilities remain unchanged.
Recall (from Section 3) that HMM taggers handle unknown words by generating probability distributions for suffixes of various lengths using the words in the training data. We want the generation of these probability distributions to be only dependent on the data from D (from the IFD corpus), but not as well from D BIN . The reason is twofold. First, the IFD corpus is large enough for deriving reliable suffix probability distributions. Second, using all the words from a very large dictionary (like D EXT ) to generate the distributions significantly slows down the tagging process. This issue demonstrates the importance of having access to open-source software. We simply changed the loading module of TriTagger such that it does not use all dictionary entries for suffix handling. If the loading module finds a special entry in the dictionary (essentially a specially marked comment) it does not use the succeeding entries for suffix handling. We put the special entry into D EXT after the last entry from D and thus before the first entry from D BIN .
Let us first consider the case of using file F 1 for extending the dictionaries, i.e. when only extracting lemmata from the database output. In that case, the resulting D BIN contains about 260,000 entries. Table 2 shows the accuracy of the taggers when using this version of the extended dictionary.
Comparing the results from Tables 2 and 1, we note the following: • The average unknown word ratio decreases by about 1.5% (from about 6.8% to about 5.3%).
• The accuracy for known words decreases in the three taggers. The most probable reason is that the tag profile for some of the lemmata entries coming from D BIN contains gaps (see Section 3). This can be attributed to the fact that only a single line from the database output is selected when extracting the lemmata, but in many cases a lemma can have multiple analysis (tags). Note that this decrease in accuracy for known words is considerably higher in TriTagger (0.65 percentage points) than in IceTagger (0.24 percentage points). This is because the unknown word guesser IceMorphy, used by IceTagger, can fill into the tag profile gaps for certain morphological classes, as mentioned in Section 3.
• The accuracy for unknown words increases in all the three taggers -the highest gain (3.42 percentage points) is obtained by IceTagger. For the case of IceTagger the reason is that IceMorphy first applies morphological analysis to unknown words (before trying ending analysis). For an unknown word u, Ice-Morphy searches for a morphologically related word (a known word) to u in its dictionary, i.e. a word containing the same stem but a different morphological suffix. The added lemmata entries can thus serve as related words for unknown words and since the morphological analysis module of IceTagger is quite accurate (Loftsson, 2008), the added lemmata entries help to increase the tagging accuracy of unknown words.
• The accuracy for all words increases in both IceTagger and HMM+Ice+HMM, but only by 0.20 and 0.16 percentage points, respectively. Obviously, the decreased accuracy for known words "cut backs" the gain obtained in the accuracy for unknown words. TriTagger's relatively large reduction in accuracy for known words is to blame for the reduction in its accuracy for all words.
Let us now consider the second case, when using file F 2 for extending the dictionaries. F 2 contains most of the entries from the database and the resulting D BIN contains about 2.6 million entries. Table 3: Average tagging accuracy (%) using dictionaries extended with most of the data from BÍN. Average ratio of unknown words in testing is 1.1%. Table 3 shows the accuracy of the taggers when using this large version of the extended dictionary. Comparing the results from Tables 3 and 1, we note the following: • The average unknown word ratio drops down to 1.1%. Concurrently, the accuracy for unknown words decreases substantially in all the three taggers. This is because the unknown word ratio drops dramatically and only "hard" unknown words remain -mostly proper nouns and foreign words.
• The accuracy for known words decreases in the three taggers by 0.15-0.22 percentage points. This is a lower decrease than when using only lemmata entries from BÍN (see Table 2) and can be explained by the fact that in this case the added entries from BÍN should not contain tag profile gaps. Why do we then see a slight decrease in accuracy for known words? Recall that BÍN does not contain any frequency information and therefore, for the added dictionary entries, we had to: i) assume a uniform distribution of tags in the the tag profile for TriTagger, and ii) assume no specific order for the tags in the tag profile for IceTagger (see the discussion on the order of the tags in Section 3). This is the most probable reason for the slight reduction in the tagging accuracy of known words.
• The accuracy for all words increases significantly in all the three taggers, about 0.4-0.8 percentage points. This result confirms our logical assumption that the tagging accuracy can be increased by extending the dictionaries of taggers -even in the absence of reliable frequency information.
Adding frequency information
Recall from Section 3 that the tag profile in the dictionary used by IceTagger is assumed to be sorted. When a word cannot be fully disambiguated, this enables IceTagger to select the most frequent tag (the first tag) in the tag profile for the word. On the other hand, when frequency information is missing, as is the case for the BÍN data, the first tag of the remaining tags in the tag profile may or may not be the most frequent tag. Thus, when IceTagger applies the defult heuristic to choose the first tag that may be an arbitrary choice. For a HMM tagger, the lack of reliable frequency information in a tag profile for a word can also cause problems. This follows directly from Equation 1, i.e. the term P (w i |t i ) stands for lexical probabilities which are computed using maximum likelihood estimation from a dictionary containing frequency information for each tag in the tag profiles for words.
In order to get reliable frequency information for the BÍN data, we use a tagged corpus named MÍM ("Mörkuð íslensk málheild"; http: //mim.hi.is) which is being developed at the Árni Magnússon Institute for Icelandic Studies. The final size of the MÍM corpus will be 25 million tokens, but the version that we use contains about 17 million tokens.
Recall from Section 4.1 that D BIN denotes a dictionary derived from BÍN. From the MÍM corpus, we derive a frequency dictionary D M IM . We then create a new dictionary D N EW (based on D BIN ) in which frequency information for some of its tag profiles comes from D M IM . Specifically, we use the following procedure: 1. Each word w in D BIN is looked up in D M IM . If w is not found in D M IM , then w and its tag profile is copied to D N EW . Each tag in the tag profile for w is given the frequency 1 (i.e. a uniform distribution is assumed). If w is found in D M IM , proceed to step 2.
2. Order the tags in the tag profile for w in D BIN , according to the frequencies of the tags in the tag profile for w in D M IM . If a tag t for a word w is found in D M IM but not in D BIN , then t does not become a part of the tag profile for w in D N EW . The reason is that the dictionary D M IM is derived from a tagged corpus which has not been manually inspected and thus contains tagging errors. In other words, the tag profile from D BIN Table 4: Average tagging accuracy (%) using dictionaries extended with most of the data from BÍN and with arranged tag profiles for some of the words. Average ratio of unknown words in testing is 1.1%.
is considered more reliable than the one in D M IM .
3. Combine the new dictionary D N EW with the dictionary D used by a tagger C as explained in step 2 in Section 4.1.
To illustrate, consider the following three tag profiles for the word "skögultennur" 'buckteeth': skögultennur nvfn nvfo skögultennur nvfo nken nvfn skögultennur nvfo nvfn The first tag profile appears in D BIN . The tags "nvfn" and "nvfo" appear in alphabetic order. The second tag profile appears in D M IM (shown here without the frequency numbers for each tag). The tag profile is sorted in ascending order of frequency of the tags. Note that the second tag profile contains the tag "nken" (resulting from a tagging error in MÍM) which does not appear in the first tag profile. When generating the resulting tag profile for D N EW -the third line in the illustration above -the tag "nken" does thus not appear. We used the procedure described above to generate extended dictionaries with frequency information for TriTagger and sorted tag profiles for IceTagger. Of the 2.6 million tag profiles in D BIN , 250,000 were found in D M IM (i.e. about 10%). This procedure thus "arranged" 250,000 of the tag profiles in D BIN . Table 4 shows the result of using the three taggers with extended dictionaries and with arranged tag profiles for some of the words. The accuracy of TriTagger improves from 91.66%, when using BÍN data without frequency information (see Ta-ble 3) to 91.93% (3.25% error reduction). The accuracy of IceTagger improves from 92.53% to 92.78% (3.5% error reduction), and the accuracy of HMM+Ice+HMM improves from 93.32% to 93.48% (2.4% error reduction). The error reduction between our HMM+Ice+HMM tagger, with an extended dictionary and arranged tag profiles, and the base version of HMM+Ice+HMM (see Table 1), is 10.3%.
Future work
In Section 4.2, we showed that the accuracies of the three taggers can be improved significantly by arranging the tag profiles of the taggers using frequency information from the MÍM corpus. We used about 17 million tokens from the corpus, but once it has been extended to its final size of 25 million tokens, we would like to repeat this part of the experiment, thus using more data, to see if the accuracy increases further.
Note that we have only been able to arrange part of the tag profiles (about 10%) in the extended dictionaries by using frequency information from MÍM. In future work, we would also like to experiment with arranging the remainder of the tag profiles according to unigram tag frequencies (for example, derived from the IFD corpus), i.e. tag frequenies that are not associated with individual words. We would then be seeking an answer to the question whether assigning unigram tag frequencies to the tag profiles of words, for which we do not have reliable frequency information, results in higher tagging accuracy compared to assigning a uniform distribution to the tag profiles (i.e. giving each tag the frequency 1 as we have done).
Conclusion
We have experimented with adding data from a large morphological database to the dictionaries used by three open-source PoS taggers for Icelandic. Our results show that the tagging accuracy improves significantly when extending the dictionaries, and even further improvement in accuracy can be obtained by adding frequency information to some of the dictionary entries (tag profiles).
Our best performing tagger, a serial combination of a linguistic rule-based tagger and a statistical tagger, obtains a state-of-the-art tagging accuracy of 93.48% when using extended dictionaries and added frequency information. This is equivalent to 10.3% error reduction compared to the best base tagger. |
// clamp the 3 channels of the input vector to the allowable range based on FORMAT
// note that each channel is a float storing the allowable range as a bit pattern converted to float
// that is, for unsigned f16 say, we would clamp each channel to the range [0, F16MAX]
void Utils::clamp(Vector3 &v)
{
for (int i=0; i<3; ++i)
{
switch(Utils::FORMAT)
{
case UNSIGNED_F16:
if (v.component[i] < 0.0) v.component[i] = 0;
else if (v.component[i] > F16MAX) v.component[i] = F16MAX;
break;
case SIGNED_F16:
if (v.component[i] < -F16MAX) v.component[i] = -F16MAX;
else if (v.component[i] > F16MAX) v.component[i] = F16MAX;
break;
default:
nvUnreachable();
}
}
} |
def preempt_candidates(
images_ref: List[str],
images_cand: List[str],
exifs: Dict[str, Any],
reference: geo.TopocentricConverter,
max_gps_neighbors: int,
max_gps_distance: float,
) -> Tuple[Dict[str, list], Set[str]]:
preempted_cand = {im: images_cand for im in images_ref}
if max_gps_distance > 0 or max_gps_neighbors > 0:
gps_pairs = match_candidates_by_distance(
images_ref,
images_cand,
exifs,
reference,
max_gps_neighbors,
max_gps_distance,
)
preempted_cand = defaultdict(list)
for p in gps_pairs:
if p[0] in images_ref:
preempted_cand[p[0]].append(p[1])
if p[1] in images_ref:
preempted_cand[p[1]].append(p[0])
need_load = set(preempted_cand.keys())
for k, v in preempted_cand.items():
need_load.update(v)
need_load.add(k)
return preempted_cand, need_load |
Enzymatic and histopathologic biomarkers as indicators of contaminant exposure and effect in Asian clam (Potamocorbula amurensis).
Enzymatic and histopathologic alterations of the digestive gland, gill, gonad, and kidney were studied in Asian clam (Potamocorbula amurensis) in April, 1997 from each of four United States Geological Survey (USGS) stations in the San Francisco Estuary. Stations were selected based on differing body burdens of metallic contaminants in clams (Stn 4.1> 6.1> 8.1>12.5) observed over 7 years. Because no pristine sites are known within the estuary and because no laboratory-reared stocks of P. amurensis were available, clams from station 12.5 served as reference animals. Histopathologic analysis revealed no lesions in clams collected from station 12.5. Mild digestive gland atrophy and moderate distal kidney tubular vacuolation were seen in clams collected from station 8.1. Mild digestive gland atrophy, moderate kidney tubular atrophy, and moderate gill inflammation were seen in clams collected from station 6.1. Lesions found only in clams from station 4.1 were: (1) severe inflammation and moderate atrophy of primary ducts and diverticula, and decreased numbers of heterophagosomes and heterolysosomes in diverticula of the digestive gland; (2) severe gill inflammation; (3) severe kidney tubular atrophy; (4) severe ovarian and testicular inflammation and necrosis; (5) decreased numbers of mature ova; and (6) decreased number of glycogen storage cells in the ovary and testis. Localization of specific enzymes including adenosine triphosphatase (ATP), acid phosphatase (ACP), alkaline phosphatase (ALKP), gamma-glutamyl transpeptidase (GGT), and glucose-6- phosphate dehydrogenase (G6PDH) was performed and correlated, in serial sections with glycogen (PAS) and haematoxylin and eosin stains. Enzymatic analysis revealed: (1) increased digestive diverticula ATP in stations 6.1 and 4.1; (2) decreased digestive diverticula ACP in stations 6.1 and 4.1 and proximal kidney tubular ACP deficiency in station 4.1; (3) no ALKP differences among stations; (4) increased distal kidney tubular GGT at station 12.5 and decreased distal kidney tubular GGT at station 4.1; (5) decreased digestive diverticula G6PDH G6PDH in all stations except 12.5 and decreased proximal kidney tubular G6PDH in stations 8.1 and 6.1. It is possible that other anthropogenic and natural stressors may have affected the results in this study. However, the prevalence and increased severity of lesions in clams with highest metal body burden suggests a contaminant-associated etiology. Enzymatic and histopathologic biomarker alterations identified in this study were positively correlated with the metal body burden. Clams with the higher prevalence of diseases and enzyme alterations also showed a lower condition index and glycogen content in the month when histopathological assessment was performed. Further study will seek to develop enzymatic and histopathologic biomarkers for use in controlled laboratory conditions to help validatethe field study. |
// Generate a descriptive text for this item, to put in browser & window
// titles. Warning: the buffer used is overwritten each time!
const char* FluidType::title() {
#define MAXLABEL 128
static char buffer[MAXLABEL];
const char* t1 = type_name();
const char* type = 0;
if (is_widget()) type = t1 = ((WidgetType*)this)->subclass();
const char* name = this->name();
bool quoted = false;
if (!name || !*name) {
name = label();
if (!name || !*name) return t1;
quoted = true;
}
char* e = buffer+MAXLABEL-1; if (quoted) e--;
char* p = buffer;
if (type) {
while (p < e && *type) *p++ = *type++;
if (p >= e-4) return name;
*p++ = ' ';
}
if (quoted) *p++ = '"';
while (p < e && (*name&~31)) *p++ = *name++;
if (*name) {
if (p > e-3) p = e-3;
strcpy(p, quoted ? "...\"" : "...");
} else {
if (quoted) *p++ = '"';
*p++ = 0;
}
return buffer;
} |
<reponame>MsrTian/jsh_erp<gh_stars>0
package com.jsh.service.materials;
import com.jsh.base.BaseIService;
import com.jsh.model.po.DepotItem;
import com.jsh.util.JshException;
import com.jsh.util.PageUtil;
import net.sf.json.JSONArray;
import java.io.InputStream;
public interface DepotItemIService extends BaseIService<DepotItem> {
void findByType(PageUtil<DepotItem> depotItem, String type, Integer ProjectId, Long MId, String MonthTime, Boolean isPrev) throws JshException;
void findByTypeAndMaterialId(PageUtil<DepotItem> depotItem, String type, Long MId) throws JshException;
void findDetailByTypeAndMaterialId(PageUtil<DepotItem> depotItem, Long MId) throws JshException;
void findPriceByType(PageUtil<DepotItem> depotItem, String type, Integer ProjectId, Long MId, String MonthTime, Boolean isPrev) throws JshException;
void buyOrSale(PageUtil<DepotItem> depotItem, String type, String subType, Long MId, String MonthTime, String sumType) throws JshException;
void findGiftByType(PageUtil<DepotItem> depotItem, String subType, Integer ProjectId, Long MId, String type) throws JshException;
/**
* 导出信息
*
* @return
*/
InputStream exmportExcel(String isAllPage, JSONArray dataArray) throws JshException;
}
|
// NewOpenchainServer creates a new instance of the ServerOpenchain.
func NewOpenchainServer() (*ServerOpenchain, error) {
ledger, err := ledger.GetLedger()
if err != nil {
return nil, err
}
s := &ServerOpenchain{ledger: ledger}
return s, nil
} |
package services
import (
"github.com/golang/glog"
)
type Service interface {
Process(req string) string
CheckFormat(req string) bool
CheckRead(req string) bool
MarshalJSON() ([]byte, error)
UnmarshalJSON(snap []byte) error
}
func StartService(config string) Service {
var serv Service
switch config {
case "kv-store":
serv = newStore()
case "dummy":
serv = newDummy()
default:
glog.Fatal("No valid service specified")
}
return serv
}
func GetInteractiveText(config string) string {
var s string
switch config {
case "kv-store":
s =
`The following commands are available:
get [key]: to return the value of a given key
exists [key]: to test if a given key is present
update [key] [value]: to set the value of a given key, if key already exists then overwrite
delete [key]: to remove a key value pair if present
count: to return the number of keys
print: to return all key value pairs
`
case "dummy":
s =
`The following commands are available:
ping: ping dummy application
`
}
return s
}
func Parse(config string, request string) (bool, bool) {
serv := StartService(config)
if !serv.CheckFormat(request) {
return false, false
}
return true, serv.CheckRead(request)
}
|
<reponame>monax/release
package main
import (
"fmt"
"github.com/monax/relic/v2/project"
)
func main() {
fmt.Println(project.History.MustChangelog())
}
|
def swap_target_tensor_for(self, new_target_tensor: np.ndarray):
self._state._swap_target_tensor_for(new_target_tensor) |
def IsGranted(perm):
pass |
<reponame>openyard/ebics
// Generated with goxc v0.1.13 - rev f5cc87998c35abe9b532e49b5672e8667bcbd00c
package w3c
// BaseType
type GYear string
func NewGYear(value string) *GYear {
me := (*GYear)(&value)
return me
}
|
def process(x, mu=0.4, sigma=0.224):
x = (x - mu) / sigma
if isinstance(x, torch.Tensor):
return x.transpose(-1, -2).transpose(-2, -3)
else:
return np.moveaxis(x, -1, -3) |
/**
* A {@link Document} performing auto completion on the inserted text. This
* document can be used on any {@link JTextComponent}.
* <p>
* The completion will only happen for inserts, that is, when characters are
* typed. If characters are erased, no new completion is suggested until a new
* character is typed.
*
* @see CompletionService
*
* @author Samuel Sjoberg, http://samuelsjoberg.com
* @version 1.0.0
*/
public class AutoCompleteDocument extends PlainDocument {
/** Default serial version UID. */
private static final long serialVersionUID = 1L;
/** Completion service. */
private final CompletionService<?> completionService;
/** The document owner. */
private final JTextComponent documentOwner;
/**
* Create a new <code>AutoCompletionDocument</code>.
*
* @param service
* the service to use when searching for completions
* @param documentOwner
* the document owner
*/
public AutoCompleteDocument(CompletionService<?> service,
JTextComponent documentOwner) {
this.completionService = service;
this.documentOwner = documentOwner;
}
/**
* Look up the completion string.
*
* @param str
* the prefix string to complete
* @return the completion or <code>null</code> if completion was found.
*/
protected String complete(String str) {
Object o = completionService.autoComplete(str);
return o == null ? null : o.toString();
}
@Override
public void insertString(int offs, String str, AttributeSet a)
throws BadLocationException {
if (str == null || str.length() == 0) {
return;
}
String text = getText(0, offs); // Current text.
String completion = complete(text + str);
int length = offs + str.length();
if (completion != null && text.length() > 0) {
str = completion.substring(length - 1);
super.insertString(offs, str, a);
documentOwner.select(length, getLength());
} else {
super.insertString(offs, str, a);
}
}
} |
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
labelmaker.py is a tool that reads key:value pairs from a json file, and
labels the running instance and all attached drives accordingly. It
is designed to run on boot in a startup-script or userdata.
"""
from __future__ import print_function
import sys
import json
from operator import itemgetter
import requests
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
def label(self_link, access_token, data):
""" Format and make label request
Args:
self_link -- Resource uri
access_token -- auth token set in header
data -- label data as dict
Returns:
Http status code of request
"""
headers = {
"Authorization": "Bearer %s" % access_token,
"Content-Type": "application/json"
}
try:
req = requests.post("%s/setLabels" % self_link, headers=headers,
data=json.dumps(data))
req.raise_for_status()
except requests.exceptions.HTTPError as err:
print(err, file=sys.stderr)
if req.status_code == requests.codes.ok:
print("Labels set on %s" % ':'.join(self_link.split('/')[-2:]))
return req.status_code
def get_metadata():
""" Read metadata from local instance
Args:
None
Returns:
Dict of instance metadata
"""
metadata_server = "http://metadata/computeMetadata/v1/instance"
headers = {"Metadata-Flavor": "Google"}
return json.loads(requests.get("%s/?recursive=true" % metadata_server,\
headers=headers).text)
def label_merge(current, fingerprint, new):
""" Merge label dicts, and apply fingerprint
Args:
current -- current label object
fingerprint -- fringerprint of current label object
new -- request labels
Returns:
Merged Dict of labels
"""
labels = current.copy()
labels.update(new)
return {"labels": labels, "labelFingerprint": fingerprint}
def main(argv):
# Load label file
try:
new_lables = json.load(open(argv[1]))
except IndexError:
print("%s <lables.json> required!" % __file__, file=sys.stderr)
sys.exit(1)
except ValueError as err:
print("%s invalid json: %s" % (sys.argv[1], err), file=sys.stderr)
sys.exit(1)
# Pull defaults from metadata
metadata = get_metadata()
project, zone = itemgetter(1, 3)(metadata['zone'].split("/"))
instance_name = metadata['name']
# Google Creds
creds = GoogleCredentials.get_application_default()
# Describe Instance
conn = discovery.build('compute', 'beta', credentials=creds)
instance = conn.instances().get(project=project, zone=zone,
instance=instance_name).execute()
# Label Instance
label(instance['selfLink'], creds.get_access_token().access_token,
label_merge(instance['labels'] if 'labels' in instance else {},
instance["labelFingerprint"], new_lables))
# Label Disks
for i in instance['disks']:
# Skip local disk
if 'source' not in i:
continue
disk = conn.disks().get(project=project, zone=zone,
disk=i['source'].split('/')[-1]).execute()
label(disk['selfLink'], creds.get_access_token().access_token,
label_merge(disk['labels'] if 'labels' in disk else {},
disk["labelFingerprint"], new_lables))
if __name__ == '__main__':
main(sys.argv)
|
<filename>src/Python/701-800/743.NetworkDelayTime.py<gh_stars>1-10
from queue import PriorityQueue
import collections
class Solution:
def networkDelayTime(self, times, N, K):
"""
:type times: List[List[int]]
:type N: int
:type K: int
:rtype: int
"""
INF = 0x7FFFFFFF
adj = collections.defaultdict(list)
visit = [0] * (N + 1)
dist = [INF] * (N + 1)
q = PriorityQueue()
for u, v, w in times:
adj[u].append((v, w))
dist[K] = 0
q.put((dist[K], K))
while not q.empty():
MIN, u = q.get()
if visit[u] == 0:
visit[u] = 1
else:
continue
for v, w in adj[u]:
if MIN + w < dist[v]:
dist[v] = MIN + w
q.put((dist[v], v))
return max(dist[1:]) if visit.count(0) == 1 else -1
if __name__ == '__main__':
# Dijkstra 单源最短路径算法
solution = Solution()
print(solution.networkDelayTime([[2,1,1],[2,3,1],[3,4,1]], 4, 2))
print(solution.networkDelayTime([[1,2,1],[2,3,2],[1,3,4]], 3, 1))
else:
pass
|
/**
* this method sync existing record CREATE | WRITE | UNLINK
* @param context
* @throws Exception
*/
public synchronized void synchronize(Context context) throws Exception {
final String TAG = "MerlinService";
for (T item : Merlin.where(tClass).find()) {
switch (item.getFlag()) {
case Flag.NEED_CREATE:
T newItem = onCreate(context, item);
newItem.setFlag(Flag.IDLE);
Logging.i(TAG, "Updating local flag to idle.[01]");
newItem.save();
break;
case Flag.NEED_WRITE:
try {
onWrite(context, item);
item.setFlag(Flag.IDLE);
Logging.i(TAG, "Updating local flag to idle.[11]");
item.save();
} catch (RecordNotFound e) {
Logging.w(TAG, e.message);
Logging.i(TAG, "Deleting from local.[11]");
item.delete();
}
break;
case Flag.NEED_UNLINK:
try {
onUnlink(context, item);
Logging.i(TAG, "Deleting from local.[21]");
item.delete();
} catch (RecordNotFound e) {
Logging.w(TAG, e.message);
Logging.i(TAG, "Deleting from local.[22]");
item.delete();
}
break;
}
}
} |
def configure(self, install_cfg):
for comp, comp_data in install_cfg.items():
install_prefix = comp_data.get('prefix')
install_dir = comp_data.get('install_dir')
if install_dir and not os.path.isabs(install_dir):
install_dir = os.path.join(install_prefix, install_dir)
self.announce(f"Installing {comp}", level=3)
self.spawn(["cmake",
"--install", CMAKE_BUILD_DIR,
"--prefix", install_prefix,
"--component", comp_data.get('name')])
if sys.platform != "win32" and comp_data.get('rpath'):
lib_pattern = "*.so" if sys.platform == "linux" else "*.dylib"
for path in Path(install_dir).glob(lib_pattern):
set_rpath(comp_data['rpath'], path) |
// AOJ Volume 1 Problem 0105
#include <stdio.h>
#include <string.h>
typedef struct {
char word[31];
int page[1001];
} INDEX;
int main(void)
{
INDEX index[100];
char word[31];
INDEX *index_p[100], *tmp;
int page;
int n;
int i, j;
int flag;
memset(index, 0, sizeof(index));
for (i = 0; i < 100; i++){
index_p[i] = &index[i];
}
n = 0;
while (scanf("%s%d", word, &page) != EOF){
for (i = 0; i < n; i++){
if (strcmp(index_p[i]->word, word) == 0){
break;
}
}
if (i == n){
strcpy(index_p[n]->word, word);
n++;
}
index_p[i]->page[page] = 1;
}
for (i = 0; i < n; i++){
for (j = 0; j < n - i - 1; j++){
if (strcmp(index_p[j]->word, index_p[j + 1]->word) > 0){
tmp = index_p[j];
index_p[j] = index_p[j + 1];
index_p[j + 1] = tmp;
}
}
}
for (i = 0; i < n; i++){
printf("%s\n", index_p[i]->word);
flag = 0;
for (j = 1; j <= 1000; j++){
if (index_p[i]->page[j] != 0){
if (flag == 1){
printf(" ");
}
flag = 1;
printf("%d", j);
}
}
printf("\n");
}
return (0);
} |
<reponame>Pioneer18/Rent-A-Car
import { RadiusToMeters } from './radius-to-meters';
describe('RadiusToMeters', () => {
it('should be defined', () => {
expect(new RadiusToMeters()).toBeDefined();
});
});
|
<filename>src/webparts/archivedComments/components/organisms/IArchivedCommentsProps.ts
import { IComment } from '../../../models/IComment';
export interface IArchivedCommentsProps {
Comments: Array<IComment>;
}
|
<reponame>badeball/cypress-configuration
import fs from "fs";
import path from "path";
import util from "util";
import debug from "./debug";
import { assert, assertAndReturn } from "./assertions";
import {
isString,
isStringOrFalse,
isStringOrStringArray,
} from "./type-guards";
function isStringEntry(entry: [any, any]): entry is [string, string] {
return typeof entry[0] === "string" && typeof entry[1] === "string";
}
/**
* This is obviously a non-exhaustive list.
*
* Definitions can found in https://github.com/cypress-io/cypress/blob/develop/cli/schema/cypress.schema.json.
*/
export interface ICypressConfiguration {
projectRoot: string;
integrationFolder: string;
fixturesFolder: string | false;
supportFile: string | false;
testFiles: string | string[];
ignoreTestFiles: string | string[];
}
function validateConfigurationEntry(
key: string,
value: unknown
): Partial<ICypressConfiguration> {
switch (key) {
case "projectRoot":
if (!isString(value)) {
throw new Error(
`Expected a string (projectRoot), but got ${util.inspect(value)}`
);
}
return { [key]: value };
case "integrationFolder":
if (!isString(value)) {
throw new Error(
`Expected a string (integrationFolder), but got ${util.inspect(
value
)}`
);
}
return { [key]: value };
case "fixturesFolder":
if (!isStringOrFalse(value)) {
throw new Error(
`Expected a string or false (fixturesFolder), but got ${util.inspect(
value
)}`
);
}
return { [key]: value };
case "supportFile":
if (!isStringOrFalse(value)) {
throw new Error(
`Expected a string or false (supportFile), but got ${util.inspect(
value
)}`
);
}
return { [key]: value };
case "testFiles":
if (!isStringOrStringArray(value)) {
throw new Error(
`Expected a string or array of strings (testFiles), but got ${util.inspect(
value
)}`
);
}
return { [key]: value };
case "ignoreTestFiles":
if (!isStringOrStringArray(value)) {
throw new Error(
`Expected a string or array of strings (ignoreTestFiles), but got ${util.inspect(
value
)}`
);
}
return { [key]: value };
default:
return {};
}
}
function parseJsonFile(filepath: string) {
const content = fs.readFileSync(filepath).toString("utf8");
try {
return JSON.parse(content);
} catch {
throw new Error(`Malformed ${filepath}, expected JSON`);
}
}
export function findLastIndex<T>(
collection: ArrayLike<T>,
predicate: (value: T) => boolean,
beforeIndex = collection.length
): number {
for (let i = beforeIndex - 1; i >= 0; --i) {
if (predicate(collection[i])) {
return i;
}
}
return -1;
}
export function* traverseArgvMatching(
argv: string[],
name: string,
allowEqual: boolean
) {
let beforeIndex = argv.length,
matchingIndex;
while (
(matchingIndex = findLastIndex(
argv,
(arg) => arg.startsWith(name),
beforeIndex
)) !== -1
) {
if (argv[matchingIndex] === name) {
if (argv.length - 1 === matchingIndex) {
debug(`'${name}' argument missing`);
} else {
yield argv[matchingIndex + 1];
}
} else if (allowEqual && argv[matchingIndex][name.length] === "=") {
yield argv[matchingIndex].slice(name.length + 1);
}
beforeIndex = matchingIndex;
}
}
export function* combine<T>(...generators: Generator<T, unknown, unknown>[]) {
for (const generator of generators) {
yield* generator;
}
}
export function findArgumentValue(
argv: string[],
name: string,
allowEqual: boolean
): string | undefined {
for (const value of traverseArgvMatching(argv, name, allowEqual)) {
return value;
}
}
export function toSnakeCase(value: string) {
return value.replace(/[A-Z]/g, (letter) => `_${letter.toLowerCase()}`);
}
export function capitalize(word: string) {
return word.toLowerCase().replace(/\b\w/g, (l) => l.toUpperCase());
}
export function toCamelCase(value: string) {
return value
.split("_")
.map((word, index) =>
index === 0 ? word.toLocaleLowerCase() : capitalize(word)
)
.join("");
}
export function resolveConfiguration(options: {
argv: string[];
env: NodeJS.ProcessEnv;
cwd: string;
}): ICypressConfiguration {
debug(
`attempting to resolve Cypress configuration using ${util.inspect(options)}`
);
const { argv, env } = options;
const projectPath = resolveProjectPath(options);
const cliOrigin: Partial<ICypressConfiguration> = Object.assign(
{},
...Array.from(
combine(
traverseArgvMatching(argv, "--config", true),
traverseArgvMatching(argv, "-c", false)
)
)
.reverse()
.flatMap((argument) => {
const keypairExpr = /(?:^|,)([^=]+)=([^,$]+)/g;
const entries: Partial<ICypressConfiguration>[] = [];
let match;
while ((match = keypairExpr.exec(argument)) !== null) {
entries.push(validateConfigurationEntry(match[1], match[2]));
}
return entries;
})
);
const envPrefixExpr = /^cypress_(.+)/i;
const envOrigin: Partial<ICypressConfiguration> = Object.assign(
{},
...Object.entries(env)
.filter((entry) => {
return envPrefixExpr.test(entry[0]);
})
.filter(isStringEntry)
.map<[string, string]>((entry) => {
const match = entry[0].match(envPrefixExpr);
assert(
match,
"cypress-cucumber-preprocessor: expected match after test, this is likely a bug."
);
return [assertAndReturn(match[1]), entry[1]];
})
.map((entry) => {
return validateConfigurationEntry(
entry[0].includes("_") ? toCamelCase(entry[0]) : entry[0],
entry[1]
);
})
);
let configOrigin: Partial<ICypressConfiguration> = {};
const cypressConfigPath = path.join(
projectPath,
resolveConfigurationFile(options)
);
if (fs.existsSync(cypressConfigPath)) {
const cypressConfig = parseJsonFile(cypressConfigPath);
if (typeof cypressConfig !== "object" || cypressConfig == null) {
throw new Error(`Malformed ${cypressConfigPath}, expected an object`);
}
configOrigin = Object.assign(
{},
...Object.entries(cypressConfig).map((entry) =>
validateConfigurationEntry(...entry)
)
);
}
const configuration = Object.assign(
{
projectRoot: resolveProjectPath(options),
integrationFolder: "cypress/integration",
fixturesFolder: "cypress/fixtures",
supportFile: "cypress/support/index.js",
testFiles: "**/*.*",
ignoreTestFiles: "*.hot-update.js",
},
configOrigin,
envOrigin,
cliOrigin
);
debug(`resolved configuration of ${util.inspect(configuration)}`);
return configuration;
}
export function resolveEnvironment(options: {
argv: string[];
env: NodeJS.ProcessEnv;
cwd: string;
}): Record<string, any> {
debug(
`attempting to resolve Cypress environment using ${util.inspect(options)}`
);
const { argv, env } = options;
const projectPath = resolveProjectPath(options);
const envEntries = Array.from(
combine(
traverseArgvMatching(argv, "--env", true),
traverseArgvMatching(argv, "-e", false)
)
);
if (envEntries.length > 1) {
console.warn(
"You have specified -e / --env multiple times. This is likely a mistake, as only the last one will take affect. Multiple values should instead be comma-separated."
);
}
const cliOrigin: Record<string, string> = Object.fromEntries(
envEntries.slice(0, 1).flatMap((argument) => {
const keypairExpr = /(?:^|,)([^=]+)=([^,$]+)/g;
const entries: [string, string][] = [];
let match;
while ((match = keypairExpr.exec(argument)) !== null) {
entries.push([match[1], match[2]]);
}
return entries;
})
);
const envPrefixExpr = /^cypress_(.+)/i;
const envOrigin: Record<string, string> = Object.fromEntries(
Object.entries(env)
.filter((entry) => {
return envPrefixExpr.test(entry[0]);
})
.filter(isStringEntry)
.map<[string, string]>((entry) => {
const match = entry[0].match(envPrefixExpr);
assert(
match,
"cypress-cucumber-preprocessor: expected match after test"
);
return [assertAndReturn(match[1]), entry[1]];
})
);
const cypressConfigPath = path.join(
projectPath,
resolveConfigurationFile(options)
);
let configOrigin: Record<string, any> = {};
if (fs.existsSync(cypressConfigPath)) {
const content = fs.readFileSync(cypressConfigPath).toString("utf8");
const cypressConfig = JSON.parse(content);
if (cypressConfig.env) {
configOrigin = cypressConfig.env;
}
}
const cypressEnvironmentFilePath = path.join(projectPath, "cypress.env.json");
let cypressEnvOrigin: Record<string, any> = {};
if (fs.existsSync(cypressEnvironmentFilePath)) {
const content = fs
.readFileSync(cypressEnvironmentFilePath)
.toString("utf8");
cypressEnvOrigin = JSON.parse(content);
}
const environment = Object.assign(
{},
cypressEnvOrigin,
configOrigin,
envOrigin,
cliOrigin
);
debug(`resolved environment of ${util.inspect(environment)}`);
return environment;
}
export function resolveConfigurationFile(options: { argv: string[] }): string {
const { argv } = options;
return (
findArgumentValue(argv, "--config-file", true) ||
findArgumentValue(argv, "-C", false) ||
"cypress.json"
);
}
export function resolveProjectPath(options: {
argv: string[];
cwd: string;
}): string {
const { argv, cwd } = options;
const customProjectPath =
findArgumentValue(argv, "--project", true) ||
findArgumentValue(argv, "-P", false);
if (customProjectPath) {
if (path.isAbsolute(customProjectPath)) {
return customProjectPath;
} else {
return path.join(cwd, customProjectPath);
}
} else {
return cwd;
}
}
|
<filename>core/services/pipeline/helpers_test.go
package pipeline
import (
"reflect"
"github.com/jinzhu/gorm"
)
func NewBaseTask(dotID string, t Task, index int32) BaseTask {
return BaseTask{dotID: dotID, outputTask: t, Index: index}
}
func (t *BridgeTask) HelperSetConfigAndTxDB(config Config, txdb *gorm.DB) {
t.config = config
t.txdb = txdb
}
func (t *HTTPTask) HelperSetConfig(config Config) {
t.config = config
}
func (t ResultTask) ExportedEquals(otherTask Task) bool {
other, ok := otherTask.(*ResultTask)
if !ok {
return false
} else if t.Index != other.Index {
return false
}
return true
}
func (t MultiplyTask) ExportedEquals(otherTask Task) bool {
other, ok := otherTask.(*MultiplyTask)
if !ok {
return false
} else if t.Index != other.Index {
return false
} else if !t.Times.Equal(other.Times) {
return false
}
return true
}
func (t MedianTask) ExportedEquals(otherTask Task) bool {
other, ok := otherTask.(*MedianTask)
if !ok {
return false
} else if t.Index != other.Index {
return false
}
return true
}
func (t JSONParseTask) ExportedEquals(otherTask Task) bool {
other, ok := otherTask.(*JSONParseTask)
if !ok {
return false
} else if t.Index != other.Index {
return false
} else if !reflect.DeepEqual(t.Path, other.Path) {
return false
}
return true
}
func (t HTTPTask) ExportedEquals(otherTask Task) bool {
other, ok := otherTask.(*HTTPTask)
if !ok {
return false
} else if t.Index != other.Index {
return false
} else if t.Method != other.Method {
return false
} else if t.URL != other.URL {
return false
} else if !reflect.DeepEqual(t.RequestData, other.RequestData) {
return false
}
return true
}
func (t BridgeTask) ExportedEquals(otherTask Task) bool {
other, ok := otherTask.(*BridgeTask)
if !ok {
return false
} else if t.Index != other.Index {
return false
} else if t.Name != other.Name {
return false
} else if !reflect.DeepEqual(t.RequestData, other.RequestData) {
return false
}
return true
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class ContactPersonInfo(object):
def __init__(self):
self._contact_email = None
self._contact_mobile = None
self._contact_name = None
self._contact_type = None
@property
def contact_email(self):
return self._contact_email
@contact_email.setter
def contact_email(self, value):
self._contact_email = value
@property
def contact_mobile(self):
return self._contact_mobile
@contact_mobile.setter
def contact_mobile(self, value):
self._contact_mobile = value
@property
def contact_name(self):
return self._contact_name
@contact_name.setter
def contact_name(self, value):
self._contact_name = value
@property
def contact_type(self):
return self._contact_type
@contact_type.setter
def contact_type(self, value):
self._contact_type = value
def to_alipay_dict(self):
params = dict()
if self.contact_email:
if hasattr(self.contact_email, 'to_alipay_dict'):
params['contact_email'] = self.contact_email.to_alipay_dict()
else:
params['contact_email'] = self.contact_email
if self.contact_mobile:
if hasattr(self.contact_mobile, 'to_alipay_dict'):
params['contact_mobile'] = self.contact_mobile.to_alipay_dict()
else:
params['contact_mobile'] = self.contact_mobile
if self.contact_name:
if hasattr(self.contact_name, 'to_alipay_dict'):
params['contact_name'] = self.contact_name.to_alipay_dict()
else:
params['contact_name'] = self.contact_name
if self.contact_type:
if hasattr(self.contact_type, 'to_alipay_dict'):
params['contact_type'] = self.contact_type.to_alipay_dict()
else:
params['contact_type'] = self.contact_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ContactPersonInfo()
if 'contact_email' in d:
o.contact_email = d['contact_email']
if 'contact_mobile' in d:
o.contact_mobile = d['contact_mobile']
if 'contact_name' in d:
o.contact_name = d['contact_name']
if 'contact_type' in d:
o.contact_type = d['contact_type']
return o
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.