content
stringlengths
10
4.9M
<gh_stars>0 package com.nergachev.roman.showmebeers.model.json; import java.util.List; /** * Created by rone on 06/02/16. */ public class BreweriesList { List<Brewery> data; public List<Brewery> getData() { return data; } }
/** * Created by evan on 8/22/14. */ public class SampleObservables { private static final String TAG = "RxLoader Sample"; public static Observable<String> delay() { return Observable.timer(2, TimeUnit.SECONDS).map(new Func1<Long, String>() { @Override public String call(Long aLong) { Log.d(TAG, "2 second delay!"); return "Async Complete!"; } }); } public static Func1<String, Observable<String>> inputDelay() { return new Func1<String, Observable<String>>() { @Override public Observable<String> call(final String input) { return Observable.timer(2, TimeUnit.SECONDS).map(new Func1<Long, String>() { @Override public String call(Long aLong) { Log.d(TAG, "2 second delay! [" + input + "]"); return "Async Complete! [" + input + "]"; } }); } }; } public static Observable<Long> count() { return Observable.interval(100, TimeUnit.MILLISECONDS).doOnEach(new Action1<Notification<? super Long>>() { @Override public void call(Notification<? super Long> notification) { Log.d(TAG, "tick!"); } }).take(100); } }
export * from './SyncReduxToRecoil'; export * from './atomFromRedux'; export * from './selectorFromReselect'; export * from './syncChangesFromRecoil'; export * from './useSyncReduxToRecoil'; export * from './options'; // Danger! Internals shouldn't be used unless you absolutely know what you're doing export * as _internals from './internals';
<filename>configmanager/pkg/mgmt/configmanager.go<gh_stars>10-100 /* Copyright SecureKey Technologies Inc. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package mgmt import ( "encoding/json" "github.com/hyperledger/fabric-sdk-go/pkg/common/logging" "github.com/hyperledger/fabric/core/chaincode/shim" "github.com/securekey/fabric-snaps/configmanager/api" cfgsnapapi "github.com/securekey/fabric-snaps/configurationsnap/api" "github.com/securekey/fabric-snaps/util/errors" ) var logger = logging.NewLogger("configsnap") const ( // indexOrg is the name of the index to retrieve configurations per org indexMspID = "cfgmgmt-mspid" ) // indexes contains a list of indexes that should be added for configurations var indexes = [...]string{indexMspID} // ConfigManagerImpl implements configuration management functionality type configManagerImpl struct { stub shim.ChaincodeStubInterface } //NewConfigManager returns config manager implementation func NewConfigManager(stub shim.ChaincodeStubInterface) api.ConfigManager { return &configManagerImpl{stub: stub} } // Save saves configuration data in the ledger func (cmngr *configManagerImpl) Save(configData []byte) errors.Error { if len(configData) == 0 { return errors.New(errors.MissingRequiredParameterError, "Configuration must be provided") } //parse configuration request configMessageMap, err := ParseConfigMessage(configData, cmngr.stub.GetTxID()) if err != nil { return err } err1 := cmngr.stub.SetEvent(cfgsnapapi.ConfigCCEventName, nil) if err1 != nil { return errors.Wrap(errors.SystemError, err1, "SetEvent failed") } return cmngr.saveConfigs(configMessageMap) } //saveConfigs saves key&configs to the repository. //also it adds indexes for saved records func (cmngr *configManagerImpl) saveConfigs(configMessageMap map[api.ConfigKey][]byte) errors.Error { for key, value := range configMessageMap { logger.Debugf("Saving configs %v,%s", key, string(value[:])) strkey, err := ConfigKeyToString(key) if err != nil { return err } if e := cmngr.stub.PutState(strkey, value); e != nil { return errors.Wrap(errors.SystemError, e, "PutState has failed") } //add index for saved state if err := cmngr.addIndexes(key); err != nil { return err } } return nil } // Get gets configuration from the ledger using config key func (cmngr *configManagerImpl) Get(configKey api.ConfigKey) ([]*api.ConfigKV, errors.Error) { err := ValidateConfigKey(configKey) if err != nil { //search for all configs by mspID return cmngr.getConfigs(configKey) } if len(configKey.ComponentName) > 0 && len(configKey.ComponentVersion) == 0 { values, getConfigsErr := cmngr.getConfigs(configKey) if getConfigsErr != nil { return nil, getConfigsErr } filterComp := make([]*api.ConfigKV, 0) for _, v := range values { if v.Key.ComponentName == configKey.ComponentName && v.Key.AppName == configKey.AppName { filterComp = append(filterComp, v) } } return filterComp, nil } //search for one config by valid key config, getConfigErr := cmngr.getConfig(configKey) if getConfigErr != nil { return nil, getConfigErr } configKeys := []*api.ConfigKV{{Key: configKey, Value: config}} return configKeys, nil } //getConfig to get config for valid key func (cmngr *configManagerImpl) getConfig(configKey api.ConfigKey) ([]byte, errors.Error) { logger.Debugf("Getting config for %v", configKey) key, codedErr := ConfigKeyToString(configKey) if codedErr != nil { return nil, codedErr } //get configuration for valid key config, err := cmngr.stub.GetState(key) if err != nil { return nil, errors.Wrap(errors.SystemError, err, "GetState failed") } if config == nil && len(config) == 0 { logger.Debugf("Nothing there for key %s", key) } return config, nil } //getConfigs to get configs for MspId func (cmngr *configManagerImpl) getConfigs(configKey api.ConfigKey) ([]*api.ConfigKV, errors.Error) { if configKey.MspID == "" { return nil, errors.Errorf(errors.InvalidConfigKey, "Invalid config key %v. MspID is required. ", configKey) } logger.Debugf("Getting configs for %v", configKey) configs, err := cmngr.search(configKey) if err != nil { return nil, err } return configs, nil } func (cmngr *configManagerImpl) deleteConfigs(configKey api.ConfigKey) errors.Error { if configKey.MspID == "" { return errors.Errorf(errors.InvalidConfigKey, "Invalid config key %+v. MspID is required.", configKey) } configs, err := cmngr.getConfigs(configKey) if err != nil { return err } for _, value := range configs { logger.Debugf("Deleting state for key: %+v", value.Key) keyStr, err := ConfigKeyToString(value.Key) if err != nil { return err } if err := cmngr.stub.DelState(keyStr); err != nil { return errors.Wrap(errors.SystemError, err, "DeleteState failed") } } return nil } //Delete deletes configuration from the ledger using config key func (cmngr *configManagerImpl) Delete(configKey api.ConfigKey) errors.Error { err := ValidateConfigKey(configKey) if err != nil { //search for all configs by mspID return cmngr.deleteConfigs(configKey) } if deleteStateErr := cmngr.deleteState(configKey); deleteStateErr != nil { return deleteStateErr } key, configKeyToStringErr := ConfigKeyToString(configKey) if configKeyToStringErr != nil { return configKeyToStringErr } //delete configuration for valid key e := cmngr.stub.DelState(key) if e != nil { return errors.Wrap(errors.SystemError, e, "DelState failed") } return nil } func (cmngr *configManagerImpl) deleteState(configKey api.ConfigKey) errors.Error { if len(configKey.ComponentName) > 0 && len(configKey.ComponentVersion) == 0 { configs, err := cmngr.getConfigs(configKey) if err != nil { return err } for _, value := range configs { logger.Debugf("Deleting state for key: %+v", value.Key) keyStr, err := ConfigKeyToString(value.Key) if err != nil { return err } if value.Key.ComponentName == configKey.ComponentName && value.Key.AppName == configKey.AppName { if err := cmngr.stub.DelState(keyStr); err != nil { return errors.Wrap(errors.SystemError, err, "DeleteState failed") } } } } return nil } //ParseConfigMessage unmarshals supplied config message and returns //map[compositekey]configurationbytes to the caller func ParseConfigMessage(configData []byte, txID string) (map[api.ConfigKey][]byte, errors.Error) { configMap := make(map[api.ConfigKey][]byte) var parsedConfig api.ConfigMessage if err := json.Unmarshal(configData, &parsedConfig); err != nil { return nil, errors.Errorf(errors.UnmarshalError, "Cannot unmarshal config message %s %s", string(configData[:]), err) } //validate config if err := parsedConfig.IsValid(); err != nil { return nil, err } mspID := parsedConfig.MspID for _, config := range parsedConfig.Peers { for _, appConfig := range config.App { key, err := CreateConfigKey(mspID, config.PeerID, appConfig.AppName, appConfig.Version, "", "") if err != nil { return nil, err } configMap[key] = []byte(appConfig.Config) } } configMap, err := parseConfigComponent(parsedConfig, configMap, txID) if err != nil { return nil, err } return configMap, nil } func parseConfigComponent(parsedConfig api.ConfigMessage, configMap map[api.ConfigKey][]byte, txID string) (map[api.ConfigKey][]byte, errors.Error) { var key api.ConfigKey var err errors.Error mspID := parsedConfig.MspID for _, app := range parsedConfig.Apps { if len(app.Components) == 0 { key, err = CreateConfigKey(mspID, "", app.AppName, app.Version, "", "") if err != nil { return nil, err } configMap[key] = []byte(app.Config) } else { for _, v := range app.Components { v.TxID = txID key, err = CreateConfigKey(mspID, "", app.AppName, app.Version, v.Name, v.Version) if err != nil { return nil, err } bytes, e := json.Marshal(v) if e != nil { return nil, errors.WithMessage(errors.SystemError, e, "Failed to marshal app component") } configMap[key] = bytes } } } return configMap, nil } //addIndexes for configKey func (cmngr *configManagerImpl) addIndexes(key api.ConfigKey) errors.Error { if err := ValidateConfigKey(key); err != nil { return err } for _, index := range indexes { if addIndexErr := cmngr.addIndex(index, key); addIndexErr != nil { return addIndexErr } } return nil } //addIndex for configKey func (cmngr *configManagerImpl) addIndex(index string, configKey api.ConfigKey) errors.Error { if index == "" { return errors.Errorf(errors.SystemError, "Index is empty") } if err := ValidateConfigKey(configKey); err != nil { return err } fields, err := getFieldsForIndex(index, configKey) if err != nil { return err } strKey, err := ConfigKeyToString(configKey) if err != nil { return err } indexKey, err := cmngr.getIndexKey(index, strKey, fields) if err != nil { return err } logger.Debugf("Adding index [%s]\n", indexKey) e := cmngr.stub.PutState(indexKey, []byte{0x00}) if e != nil { return errors.WithMessage(errors.SystemError, e, "Failed to create index") } return nil } //getIndexKey uses CreateCompositeKey to create key using index, key and fields func (cmngr *configManagerImpl) getIndexKey(index string, key string, fields []string) (string, errors.Error) { if index == "" { return "", errors.New(errors.MissingRequiredParameterError, "Index is empty") } if key == "" { return "", errors.New(errors.MissingRequiredParameterError, "Key is empty") } if len(fields) == 0 { return "", errors.New(errors.MissingRequiredParameterError, "Field list is empty") } attributes := append(fields, key) indexKey, err := cmngr.stub.CreateCompositeKey(index, attributes) if err != nil { return "", errors.Wrapf(errors.SystemError, err, "Error creating comnposite key: %v", err) } return indexKey, nil } //getFieldsForIndex returns collection of fields to be indexed func getFieldsForIndex(index string, key api.ConfigKey) ([]string, errors.Error) { if err := ValidateConfigKey(key); err != nil { return nil, err } switch index { case indexMspID: return []string{key.MspID}, nil default: return nil, errors.Errorf(errors.SystemError, "unknown index [%s]", index) } } func (cmngr *configManagerImpl) search(key api.ConfigKey) ([]*api.ConfigKV, errors.Error) { //verify if key has MspID if key.MspID == "" { return nil, errors.Errorf(errors.InvalidConfigKey, "Invalid config key %+v", key) } index, fields, err := getIndexAndFields(key) if err != nil { return nil, errors.Wrap(errors.GetKeyError, err, "Failed to search Index and Fields for key") } configsMap, err := cmngr.getConfigurations(index, fields) if err != nil { return nil, errors.Wrap(errors.GetKeyError, err, "Failed to get configuration for s key") } return configsMap, nil } //getConfigurations for given index and indexed fields func (cmngr *configManagerImpl) getConfigurations(index string, fields []string) ([]*api.ConfigKV, errors.Error) { it, err := cmngr.stub.GetStateByPartialCompositeKey(index, fields) if err != nil { return nil, errors.Errorf(errors.SystemError, "Unexpected error retrieving message statuses with index [%s]: %s", index, err) } defer func() { iteratorErr := it.Close() if iteratorErr != nil { logger.Fatalf("Failed to close iterator : %s", iteratorErr) } }() configKeys := []*api.ConfigKV{} for it.HasNext() { compositeKey, e := it.Next() if e != nil { return nil, errors.WithMessage(errors.SystemError, e, "Failed to get next value from iterator") } _, compositeKeyParts, e := cmngr.stub.SplitCompositeKey(compositeKey.Key) if e != nil { return nil, errors.Wrapf(errors.SystemError, err, "Unexpected error splitting composite key. Key: [%s], Error: %s", compositeKey, err) } configID := compositeKeyParts[len(compositeKeyParts)-1] ck, err := StringToConfigKey(configID) if err != nil { return nil, err } //get config for key config, err := cmngr.getConfig(ck) if err != nil { return nil, err } configKV := api.ConfigKV{Key: ck, Value: config} configKeys = append(configKeys, &configKV) } return configKeys, nil } //getIndexAndFields index and fields for search func getIndexAndFields(key api.ConfigKey) (string, []string, errors.Error) { fields, err := getIndexedFields(key) if err != nil { return "", nil, err } return indexMspID, fields, nil } //getIndexedFields returns fields defined for search criteria func getIndexedFields(key api.ConfigKey) ([]string, errors.Error) { if key.MspID == "" { return nil, errors.Errorf(errors.InvalidConfigKey, "Invalid key %+v", key) } var fields []string fields = append(fields, key.MspID) return fields, nil }
<gh_stars>0 //! Tests targeting auto traits specifically use super::*; #[test] fn auto_semantics() { test! { program { trait Sized { } #[auto] trait Send { } struct TypeA { } struct Ptr<T> { } impl<T> Send for Ptr<T> where T: Send { } struct List<T> { data: T, next: Ptr<List<T>> } } goal { forall<T> { List<T>: Send } } yields { "No possible solution" } goal { forall<T> { if (T: Send) { List<T>: Send } } } yields { "Unique" } goal { List<TypeA>: Send } yields { "Unique" } goal { exists<T> { T: Send } } yields { "Ambiguous" } } } #[test] fn auto_trait_without_impls() { test! { program { #[auto] trait Send { } struct TypeA { } struct Useless<T> { } struct Data<T> { data: T } } goal { TypeA: Send } yields { "Unique" } // No fields so `Useless<T>` is `Send`. goal { forall<T> { Useless<T>: Send } } yields { "Unique" } goal { forall<T> { if (T: Send) { Data<T>: Send } } } yields { "Unique" } } } #[test] fn auto_trait_with_impls() { test! { program { #[auto] trait Send { } struct TypeA { } struct TypeB { } struct Vec<T> { } impl<T> Send for Vec<T> where T: Send { } impl !Send for TypeA { } } goal { TypeA: Send } yields { "No possible solution" } goal { TypeB: Send } yields { "Unique" } goal { Vec<TypeA>: Send } yields { "No possible solution" } goal { Vec<TypeB>: Send } yields { "Unique" } goal { forall<T> { Vec<T>: Send } } yields { "No possible solution" } } } /// This Flounders because auto traits can't be enumerated #[test] fn auto_traits_flounder() { test! { program { struct Foo { } struct Bar { } #[auto] trait Send { } } goal { exists<A> { A: Send } } yields_first[SolverChoice::slg(3, None)] { "Floundered" } } } #[test] fn enum_auto_trait() { test! { program { #[auto] trait Send { } struct Foo { } struct Bar { } impl Send for Foo { } impl !Send for Bar { } enum A { X, Y(Foo), Z { z: Foo, } } enum B { X, Y(Foo), Z { z: Bar, } } enum C { X, Y(Bar), Z { z: Foo, } } } goal { A: Send } yields { "Unique; substitution [], lifetime constraints []" } goal { B: Send } yields { "No possible solution" } goal { C: Send } yields { "No possible solution" } } }
// ParseShardSet parses a shard set from the input string. func ParseShardSet(s string) (ShardSet, error) { ss := make(ShardSet, defaultNumShards) if err := ss.ParseRange(s); err != nil { return nil, err } return ss, nil }
// PINCache is a modified version of TMCache // Modifications by <NAME> // Copyright (c) 2015 Pinterest. All rights reserved. #import <Foundation/Foundation.h> #import "PINCacheMacros.h" #import "PINCaching.h" #import "PINDiskCache.h" #import "PINMemoryCache.h" NS_ASSUME_NONNULL_BEGIN @class PINCacheService; /** `PINCache` is a thread safe key/value store designed for persisting temporary objects that are expensive to reproduce, such as downloaded data or the results of slow processing. It is comprised of two self-similar stores, one in memory (<PINMemoryCache>) and one on disk (<PINDiskCache>). `PINCache` itself actually does very little; its main function is providing a front end for a common use case: a small, fast memory cache that asynchronously persists itself to a large, slow disk cache. When objects are removed from the memory cache in response to an "apocalyptic" event they remain in the disk cache and are repopulated in memory the next time they are accessed. `PINCache` also does the tedious work of creating a dispatch group to wait for both caches to finish their operations without blocking each other. The parallel caches are accessible as public properties (<memoryCache> and <diskCache>) and can be manipulated separately if necessary. See the docs for <PINMemoryCache> and <PINDiskCache> for more details. @warning when using in extension or watch extension, define PIN_APP_EXTENSIONS=1 */ PIN_SUBCLASSING_RESTRICTED @interface PINCacheService : NSObject <PINCaching, PINCacheObjectSubscripting> #pragma mark - /// @name Core /** Synchronously retrieves the total byte count of the <diskCache> on the shared disk queue. */ @property (readonly) NSUInteger diskByteCount; /** The underlying disk cache, see <PINDiskCache> for additional configuration and trimming options. */ @property (readonly) PINDiskCache *diskCache; /** The underlying memory cache, see <PINMemoryCache> for additional configuration and trimming options. */ @property (readonly) PINMemoryCache *memoryCache; #pragma mark - Lifecycle /// @name Initialization /** A shared cache. @result The shared singleton cache instance. */ @property (class, strong, readonly) PINCacheService *sharedCache; - (instancetype)init NS_UNAVAILABLE; /** Multiple instances with the same name are *not* allowed and can *not* safely access the same data on disk. Also used to create the <diskCache>. @see name @param name The name of the cache. @result A new cache with the specified name. */ - (instancetype)initWithName:(nonnull NSString *)name; /** Multiple instances with the same name are *not* allowed and can *not* safely access the same data on disk. Also used to create the <diskCache>. @see name @param name The name of the cache. @param rootPath The path of the cache on disk. @result A new cache with the specified name. */ - (instancetype)initWithName:(nonnull NSString *)name rootPath:(nonnull NSString *)rootPath; /** Multiple instances with the same name are *not* allowed and can *not* safely access the same data on disk.. Also used to create the <diskCache>. Initializer allows you to override default NSKeyedArchiver/NSKeyedUnarchiver serialization for <diskCache>. You must provide both serializer and deserializer, or opt-out to default implementation providing nil values. @see name @param name The name of the cache. @param rootPath The path of the cache on disk. @param serializer A block used to serialize object before writing to disk. If nil provided, default NSKeyedArchiver serialized will be used. @param deserializer A block used to deserialize object read from disk. If nil provided, default NSKeyedUnarchiver serialized will be used. @result A new cache with the specified name. */ - (instancetype)initWithName:(NSString *)name rootPath:(NSString *)rootPath serializer:(nullable PINDiskCacheSerializerBlock)serializer deserializer:(nullable PINDiskCacheDeserializerBlock)deserializer; /** Multiple instances with the same name are *not* allowed and can *not* safely access the same data on disk. Also used to create the <diskCache>. Initializer allows you to override default NSKeyedArchiver/NSKeyedUnarchiver serialization for <diskCache>. You must provide both serializer and deserializer, or opt-out to default implementation providing nil values. @see name @param name The name of the cache. @param rootPath The path of the cache on disk. @param serializer A block used to serialize object before writing to disk. If nil provided, default NSKeyedArchiver serialized will be used. @param deserializer A block used to deserialize object read from disk. If nil provided, default NSKeyedUnarchiver serialized will be used. @param keyEncoder A block used to encode key(filename). If nil provided, default url encoder will be used @param keyDecoder A block used to decode key(filename). If nil provided, default url decoder will be used @result A new cache with the specified name. */ - (instancetype)initWithName:(nonnull NSString *)name rootPath:(nonnull NSString *)rootPath serializer:(nullable PINDiskCacheSerializerBlock)serializer deserializer:(nullable PINDiskCacheDeserializerBlock)deserializer keyEncoder:(nullable PINDiskCacheKeyEncoderBlock)keyEncoder keyDecoder:(nullable PINDiskCacheKeyDecoderBlock)keyDecoder; /** Multiple instances with the same name are *not* allowed and can *not* safely access the same data on disk. Also used to create the <diskCache>. Initializer allows you to override default NSKeyedArchiver/NSKeyedUnarchiver serialization for <diskCache>. You must provide both serializer and deserializer, or opt-out to default implementation providing nil values. @see name @param name The name of the cache. @param rootPath The path of the cache on disk. @param serializer A block used to serialize object before writing to disk. If nil provided, default NSKeyedArchiver serialized will be used. @param deserializer A block used to deserialize object read from disk. If nil provided, default NSKeyedUnarchiver serialized will be used. @param keyEncoder A block used to encode key(filename). If nil provided, default url encoder will be used @param keyDecoder A block used to decode key(filename). If nil provided, default url decoder will be used @param ttlCache Whether or not the cache should behave as a TTL cache. @result A new cache with the specified name. */ - (instancetype)initWithName:(nonnull NSString *)name rootPath:(nonnull NSString *)rootPath serializer:(nullable PINDiskCacheSerializerBlock)serializer deserializer:(nullable PINDiskCacheDeserializerBlock)deserializer keyEncoder:(nullable PINDiskCacheKeyEncoderBlock)keyEncoder keyDecoder:(nullable PINDiskCacheKeyDecoderBlock)keyDecoder ttlCache:(BOOL)ttlCache NS_DESIGNATED_INITIALIZER; @end @interface PINCacheService (Deprecated) - (void)containsObjectForKey:(NSString *)key block:(PINCacheObjectContainmentBlock)block __attribute__((deprecated)); - (void)objectForKey:(NSString *)key block:(PINCacheObjectBlock)block __attribute__((deprecated)); - (void)setObject:(id <NSCoding>)object forKey:(NSString *)key block:(nullable PINCacheObjectBlock)block __attribute__((deprecated)); - (void)setObject:(id <NSCoding>)object forKey:(NSString *)key withCost:(NSUInteger)cost block:(nullable PINCacheObjectBlock)block __attribute__((deprecated)); - (void)removeObjectForKey:(NSString *)key block:(nullable PINCacheObjectBlock)block __attribute__((deprecated)); - (void)trimToDate:(NSDate *)date block:(nullable PINCacheBlock)block __attribute__((deprecated)); - (void)removeAllObjects:(nullable PINCacheBlock)block __attribute__((deprecated)); @end NS_ASSUME_NONNULL_END
export class Node { constructor(type: any, defaults: any, options: any, binds: any, config: any, attributes: any); type: any; _id: string; parent: any; root: any; path: any; index: any; configure(config: any, attributes: any): any; _config: { traits: any; props: any; finals: any; freeform: any; } | undefined; attributes: any; dispose(): null; _added(parent: any): any; _removed(): null; _index(index: any, parent: any): any; order: number | undefined; _encode(path: any): number; toString(): string; toMarkup(selector: null | undefined, indent: any): any; print(selector: any, level: any): any; }
Do We Still Need Preoperative Autologous Blood Donation? – It Is High Time for a Reappraisal! Preoperative autologous blood donation (PABD) can be an adequate solution to a patient’s problem before nonurgent surgery if a high (>10%) probability of perioperative blood transfusion exists in this individual case and, at the same time, pre-existing alloimmunization or a different ethnic background make it difficult to supply this patient with compatible red blood cell concentrates. In times of acute shortness of blood, when nonurgent surgery might be rescheduled and delayed, and in cases of extreme angst-ridden patients fearing homologous blood transfusion, PABD can be considered in rare individual cases as well. Nonetheless, in all these cases, the patient must be eligible for PABD, and the indication as well as potential contraindications have to weighted against each other in a thorough individual risk-benefit analysis. However, these are relatively rare cases. Apart from these and few other situations, PABD in our opinion should not be used because of high costs and logistic efforts, high wastage, considerable risks for the donor/patient on the one hand and miniscule benefits on the other hand. PABD cannot significantly contribute to the blood supply. PABD in error-free use, which is unrealistic, reduces the risk of transmission of known and unknown viral pathogens, but possibly not of bacteria. In real life situations, the residual risk for transmission of transfusion-relevant viruses such as HIV, HVC or HBV is not diminished by PABD. The same is true for clerical errors and ABO incompatibility or acute hemolytic transfusion reactions due to a mix-up of products. PABD reduces the risk of (allo)immunization and therefore has its indication in special clinical situations. PABD on the other hand increases the risks for the donor at the time of donation and afterwards. Autologous blood products seem to have higher rates of bacterial contamination. The risk for the patient to receive any transfusion perioperatively is increased due to the lower hemoglobin levels at admission (iatrogenic anemia) as well as due to a wrongly more liberal transfusion strategy in autologous hemotherapy compared to transfusion of homologous blood. A significant number of patients, who would never receive any transfusion perioperatively at all, enter the transfusion chain due to PABD procedures, facing the risks of a blood donor as well as the risks of a transfusion recipient.
<reponame>timgates42/trex-core /* hhaim Cisco Systems, Inc. */ /* Copyright (c) 2016-2016 Cisco Systems, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifndef __TREX_STACK_COUNTERS_H__ #define __TREX_STACK_COUNTERS_H__ #include <string> #include <json/json.h> #include "utl_counter.h" #include "utl_dbl_human.h" class CRxCounters { public: enum { /* TX/RX */ CNT_RX, CNT_TX, CNT_RX_TX_SIZE, }; enum { /* PKT/BYTE*/ CNT_PKT, CNT_BYTE, CNT_TYPE, }; enum { /* type of packet */ CNT_UNICAST, CNT_MULTICAST, CNT_BROADCAST, CNT_UMB_TYPE, }; bool Create(); void Delete(); void clear_counters(); void dump(); void dump_meta(std::string name, Json::Value & json); void dump_values(std::string name, bool zeros, Json::Value & obj); public: uint64_t m_gen_cnt[CNT_RX_TX_SIZE][CNT_TYPE][CNT_UMB_TYPE]; uint64_t m_tx_err_small_pkt; uint64_t m_tx_err_big_9k; uint64_t m_tx_dropped_no_mbuf; uint64_t m_rx_err_invalid_pkt; uint64_t m_rx_bcast_filtered; uint64_t m_rx_mcast_filtered; CPPSMeasure m_rx_pps; CPPSMeasure m_tx_pps; CGTblClmCounters m_clm; /* utility for dump */ CTblGCounters m_tbl; }; #endif /* __TREX_STACK_COUNTERS_H__ */
/* Simple remapper to remap a split parameter to the same expression based on a special dummy decl so that edge redirections can detect transitive splitting and finish them. */ static tree remap_split_decl_to_dummy (tree *tp, int *walk_subtrees, void *data) { tree t = *tp; if (DECL_P (t) || TREE_CODE (t) == SSA_NAME) { struct simple_tree_swap_info *swapinfo = (struct simple_tree_swap_info *) data; if (t == swapinfo->from || (TREE_CODE (t) == SSA_NAME && SSA_NAME_VAR (t) == swapinfo->from)) { *tp = swapinfo->to; swapinfo->done = true; } *walk_subtrees = 0; } else if (TYPE_P (t)) *walk_subtrees = 0; else *walk_subtrees = 1; return NULL_TREE; }
def auto_delete_file_on_delete(sender, instance, **kwargs): if instance.value.path: try: if os.path.isfile(instance.value.path): os.remove(instance.value.path) except Exception as e: storage, name = instance.value.storage, instance.value.name storage.delete(name)
<reponame>dadkisson/bridgr<filename>internal/bridgr/ruby.go package bridgr import ( "fmt" "os" "path" "reflect" "text/template" "github.com/aztechian/bridgr/internal/bridgr/asset" "github.com/docker/distribution/reference" "github.com/mitchellh/mapstructure" log "unknwon.dev/clog/v2" ) var ( rbImage reference.Named rbGems *template.Template ) const defaultRbSource = "https://rubygems.org" func init() { rbImage, _ = reference.ParseNormalizedNamed(baseImage["ruby"] + ":2-alpine") rbGems = asset.Template("Gemfile") } // Ruby struct is the configuration object specifically for the Ruby section of the config file type Ruby struct { Gems []rubyItem Version rubyVersion Sources []string } // RubyItem is a struct to hold a ruby gem specification type rubyItem struct { Package string Version string } type rubyVersion reference.Named func (ri rubyItem) String() string { if ri.Version != "" { return fmt.Sprintf("%s, %s", ri.Package, ri.Version) } return ri.Package } // BaseDir is the top-level directory name for all objects written out under the Python worker func (r Ruby) dir() string { return BaseDir(r.Name()) } // Image implements the Imager interface func (r *Ruby) Image() reference.Named { if r.Version == nil { return rbImage } return r.Version } // Name returns the name of this Configuration func (r Ruby) Name() string { return "ruby" } func stringToRuby(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { if t == reflect.TypeOf(rubyItem{}) && f.Kind() == reflect.String { return rubyItem{ Package: data.(string), }, nil } return data, nil } func versionToRubyImage(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { if f.Kind() != reflect.String || t != reflect.TypeOf((*rubyVersion)(nil)).Elem() { return data, nil } return reference.ParseAnyReference(baseImage["ruby"] + ":" + data.(string)) } func arrayToRuby(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { if f.Kind() != reflect.Slice || t != reflect.TypeOf(Ruby{}) { return data, nil } var gemList []rubyItem for _, g := range data.([]interface{}) { if pkg, ok := g.(string); ok { gemList = append(gemList, rubyItem{Package: pkg}) } } return Ruby{ Version: rbImage, Sources: []string{defaultRbSource}, Gems: gemList, }, nil } // Hook implements the Parser interface, returns a function for use by mapstructure when parsing config files func (r Ruby) Hook() mapstructure.DecodeHookFunc { return mapstructure.ComposeDecodeHookFunc( stringToRuby, versionToRubyImage, arrayToRuby, ) } // Setup creates the items that are needed to fetch artifacts for the Python worker. It does not actually fetch artifacts. func (r *Ruby) Setup() error { log.Trace("Called Ruby.Setup()") _ = os.MkdirAll(r.dir(), os.ModePerm) gemfile, err := os.Create(path.Join(r.dir(), "Gemfile")) if err != nil { return fmt.Errorf("Unable to create Ruby Gemfile: %s", err) } return asset.RenderFile(rbGems, r, gemfile) } // Run fetches all artifacts for the Python configuration func (r *Ruby) Run() error { log.Trace("Called Ruby.Run()") if err := r.Setup(); err != nil { return err } shell, err := asset.Load("ruby.sh") //no parsing needed, so just Load is fine here if err != nil { return err } batcher := newBatch(r.Image().Name(), r.dir(), path.Join(r.dir(), "Gemfile"), "/Gemfile") return batcher.runContainer("bridgr_ruby", shell) }
Samuel Leighton Dore, seen here at age four, says his home life 'could not have been more ideal'. Instead, childhood sexual trauma, risky health behaviours, smoking, a lack of positive support and negative social interactions pose more of a risk to people's mental health. Samuel Leighton Dore's mother is a sexuality counsellor, and he describes his father as a "sensitive new-age man". "My home life could not have been more ideal," he said. "I come from a very happy, strong, supportive family unit." However, the 25-year-old from Newtown, who edits gay and lesbian website SameSame.com.au, said he was forced to seek psychological help after being "badly" bullied at school because of his feminine traits and long hair. Samuel Leighton Dore (right) with his partner Bradley Tennant. "It's only when [your sexuality] is thrown into doubt by the assistance of other people who start to question it that you do," he said. "If I wasn't told I was supposed to like girls, I wouldn't question my liking of boys." Mr Dore has since struggled with low self-esteem and trusting people as an adult because of the abuse. The study's lead researcher, Dr Richard Burns, backed up Mr Dore's experience, saying gay people were more likely to experience mental health risk factors than heterosexual people. "Initially, we found there was a long-term risk for depression and anxiety among individuals with a bisexual orientation, and there was long-term risk for anxiety among homosexual individuals," he said. "But when we adjusted for these other mental-health risk factors, we found no major risk associated with sexual orientation itself." The findings come as Australia's Parliament wrestles with whether same-sex marriage should be legalised. "We concluded that all things being equal that there is no particular mental health risk for people with a homosexual or bisexual orientation," Dr Burns said. Mr Dore said if there was no stigma associated with being gay, people wouldn't experience the problems he had.
import * as React from 'react'; import { Field, InjectedFormProps, reduxForm } from 'redux-form'; import { renderDatePicker, renderTextField } from '../../helpers/form_helpers'; import { Cohort } from '../../../Types'; // Material UI import { createStyles, Theme, WithStyles, withStyles } from '@material-ui/core'; import Button from '@material-ui/core/Button'; import Icon from '@material-ui/core/Icon'; const styles = (theme: Theme) => createStyles({ button: { marginTop: '2em', }, rightIcon: { marginLeft: theme.spacing.unit, }, }); interface OwnProps { errorMessage: string; cohort: Cohort; } type Props = OwnProps & WithStyles<typeof styles> & InjectedFormProps<Cohort, OwnProps>; class EditCohortForm extends React.Component<Props, {}> { renderAlert() { if (this.props.errorMessage) { return ( <div className="alert alert-danger"> <strong>Oops: </strong> {this.props.errorMessage} </div> ); } } componentWillMount() { this.props.initialize({ ...this.props.cohort, }); } render() { const { handleSubmit } = this.props; return ( <div> {this.renderAlert()} <form onSubmit={handleSubmit}> <Field label="Name" name="name" component={renderTextField} type="text" value={this.props.cohort.name} /> <Field label="Campus City" name="campus" component={renderTextField} type="text" value={this.props.cohort.campus} /> <Field label="Start Date" name="startDate" component={renderDatePicker} value={this.props.cohort.startDate} /> <Field label="End Date" name="endDate" component={renderDatePicker} value={this.props.cohort.endDate} /> <Button variant="contained" color="secondary" className={this.props.classes.button} type="submit" > Edit <Icon className={this.props.classes.rightIcon}>send</Icon> </Button> </form> </div> ); } } export default reduxForm<Cohort, OwnProps>({ form: 'editCohort', })(withStyles(styles)(EditCohortForm));
<filename>src/cookie/protocol/network/messages/GetPartInfoMessage.ts import Message from "./Message"; export default class GetPartInfoMessage extends Message { public id: string; constructor(id = "") { super(); this.id = id; } }
A newly released interactive campus map offers a highly-customizable tool for finding specific buildings, discovering accessible routes, and exploring the Virginia Tech campus in new ways. The map is the result of collaboration between Virginia Tech’s Enterprise Geographic Information Systems, University Relations, Facilities Services, and the Office of Equity and Access. The map was designed for use on mobile devices or in standard browsers, and includes a new high-resolution aerial photography layer in addition to the familiar color-coded Virginia Tech campus map. The mapping tool facilitates the location of campus points of interest, and provides rich new features that make it easier to get around by bus, bike, car, on foot, or using assistive devices. The new map complements the static maps that have been available on the Virginia Tech website for several years. Those maps will continue to be available. The interactive campus map also uses many of the same symbols as the static maps to give it a familiar feel. Visitors can search campus street addresses, which were added in June, by accessing the My Location option in the Customize this Map menu. Street addresses will also appear whenever a building is clicked. “The interactive map captures many of the dynamic elements that exist on the Virginia Tech campus,” says John Jackson, director of web communications at Virginia Tech. “It provides a much-needed service for the campus community, visitors, and guests. Today’s introduction is just the beginning; we plan to add more features, information, and functionality to the map.”
/** * * * @author Stefan Meyer <[email protected]> * @version $Id$ */ public class ilServer { private String version = "4.4.0.1"; private String[] arguments; private String command; private static final Logger logger = Logger.getLogger(ilServer.class); /** * @param args */ public ilServer(String[] args) { arguments = args; } /** * @param args */ public static void main(String[] args) { ilServer server = null; BasicConfigurator.configure(); logger.setLevel(Level.INFO); Logger root = Logger.getLogger("org"); root.setLevel(Level.OFF); server = new ilServer(args); server.handleRequest(); } /** * @return success status */ private boolean handleRequest() { if(arguments.length < 1) { logger.error(getUsage()); return false; } if(arguments.length == 1) { command = arguments[0]; if(command.compareTo("version") == 0) { System.out.println("ILIAS java server version \"" + version + "\""); return true; } } command = arguments[1]; if(command.compareTo("start") == 0) { if(arguments.length != 2) { logger.error("Usage: java -jar ilServer.jar PATH_TO_SERVER_INI start"); return false; } return startServer(); } else if(command.compareTo("stop") == 0) { if(arguments.length != 2) { logger.error("Usage: java -jar ilServer.jar PATH_TO_SERVER_INI stop"); return false; } return stopServer(); } else if(command.compareTo("createIndex") == 0) { if(arguments.length != 3) { logger.error("Usage java -jar ilServer.jar PATH_TO_SERVER_INI createIndex CLIENT_KEY"); return false; } return createIndexer(); } else if(command.compareTo("updateIndex") == 0) { if(arguments.length < 3) { logger.error("Usage java -jar ilServer.jar PATH_TO_SERVER_INI updateIndex CLIENT_KEY"); return false; } return updateIndexer(); } else if(command.compareTo("search") == 0) { if(arguments.length != 4) { logger.error("Usage java -jar ilServer.jar PATH_TO_SERVER_INI CLIENT_KEY search QUERY_STRING"); return false; } return startSearch(); } else if(command.compareTo("status") == 0) { if(arguments.length != 2) { logger.error("Usage java -jar ilServer.jar PATH_TO_SERVER_INI status"); return false; } return getStatus(); } else { logger.error(getUsage()); return false; } } /** * @return */ @SuppressWarnings("unchecked") private boolean createIndexer() { XmlRpcClient client; IniFileParser parser; try { parser = new IniFileParser(); parser.parseServerSettings(arguments[0],true); if(!ClientSettings.exists(arguments[2])) { throw new ConfigurationException("Unknown client given: " + arguments[2]); } client = initRpcClient(); Vector params = new Vector(); params.add(arguments[2]); params.add(false); client.execute("RPCIndexHandler.index",params); return true; } catch (Exception e) { System.err.println(e); logger.fatal(e.getMessage()); System.exit(1); } return false; } /** * @return */ @SuppressWarnings("unchecked") private boolean updateIndexer() { XmlRpcClient client; IniFileParser parser; try { parser = new IniFileParser(); parser.parseServerSettings(arguments[0],true); if(!ClientSettings.exists(arguments[2])) { throw new ConfigurationException("Unknown client given: " + arguments[2]); } client = initRpcClient(); Vector params = new Vector(); params.add(arguments[2]); params.add(true); client.execute("RPCIndexHandler.index",params); return true; } catch (Exception e) { System.err.println(e); logger.fatal(e.getMessage()); System.exit(1); } return false; } /** * @return */ @SuppressWarnings("unchecked") private boolean startSearch() { XmlRpcClient client; IniFileParser parser; try { parser = new IniFileParser(); parser.parseServerSettings(arguments[0],true); if(!ClientSettings.exists(arguments[2])) { throw new ConfigurationException("Unknown client given: " + arguments[2]); } client = initRpcClient(); Vector params = new Vector(); params.add(arguments[2]); params.add(arguments[3]); params.add(1); String response = (String) client.execute("RPCSearchHandler.search",params); System.out.println(response); return true; } catch (Exception e) { System.err.println(e); logger.fatal(e.getMessage()); System.exit(1); } return false; } /** * Start RPC services */ private boolean startServer() { ServerSettings settings; RPCServer rpc; XmlRpcClient client; IniFileParser parser; String status; try { parser = new IniFileParser(); parser.parseServerSettings(arguments[0],true); client = initRpcClient(); // Check if server is already running try { status = (String) client.execute("RPCAdministration.status",new Vector()); System.err.println("Server already started. Aborting"); System.exit(1); } catch(XmlRpcException e) { logger.info("No server running. Starting new instance..."); } settings = ServerSettings.getInstance(); logger.info("New rpc server"); rpc = RPCServer.getInstance(settings.getHost(),settings.getPort()); logger.info("Server start"); rpc.start(); client = initRpcClient(); client.execute("RPCAdministration.start",new Vector()); // Check if webserver is alive // otherwise stop execution while(true) { Thread.sleep(3000); if(!rpc.isAlive()) { rpc.shutdown(); break; } } logger.info("WebServer shutdown. Aborting..."); return true; } catch (ConfigurationException e) { //logger.error(e); System.exit(1); return false; } catch (InterruptedException e) { logger.error("VM did not allow to sleep. Aborting!"); } catch (XmlRpcException e) { System.out.println("Error starting server: " + e); System.exit(1); } catch (IOException e) { logger.error("IOException " + e.getMessage()); } catch (Exception e) { logger.error("IOException " + e.getMessage()); } catch(Throwable e) { logger.error("IOException " + e.getMessage()); } return false; } /** * Call RPC stop method, which will stop the WebServer * and after that stop the execution of the main thread * */ @SuppressWarnings("unchecked") private boolean stopServer() { XmlRpcClient client; IniFileParser parser; try { parser = new IniFileParser(); parser.parseServerSettings(arguments[0],false); client = initRpcClient(); client.execute("RPCAdministration.stop",new Vector()); return true; } catch (ConfigurationException e) { logger.error("Configuration " + e.getMessage()); } catch (XmlRpcException e) { logger.error("XMLRPC " + e.getMessage()); } catch (IOException e) { logger.error("IOException " + e.getMessage()); } return false; } @SuppressWarnings("unchecked") private boolean getStatus() { XmlRpcClient client; IniFileParser parser; ServerSettings settings; String status; try { parser = new IniFileParser(); parser.parseServerSettings(arguments[0],false); settings = ServerSettings.getInstance(); client = initRpcClient(); status = (String) client.execute("RPCAdministration.status",new Vector()); System.out.println(status); return true; } catch (ConfigurationException e) { logger.error("Configuration " + e.getMessage()); } catch (XmlRpcException e) { System.out.println(ilServerStatus.STOPPED); System.exit(1); } catch (IOException e) { System.out.println(ilServerStatus.STOPPED); System.exit(1); } return false; } /** * * @return String usage */ private String getUsage() { return "Usage: java -jar ilServer.jar PATH_TO_SERVER_INI start|stop|createIndex|updateIndex|search PARAMS"; } /** * * @return XmlRpcClient * @throws ConfigurationException * @throws MalformedURLException */ private XmlRpcClient initRpcClient() throws ConfigurationException, MalformedURLException { XmlRpcClient client; XmlRpcClientConfigImpl config; ServerSettings settings; settings = ServerSettings.getInstance(); config = new XmlRpcClientConfigImpl(); config.setServerURL(new URL(settings.getServerUrl())); config.setConnectionTimeout(10000); config.setReplyTimeout(0); client = new XmlRpcClient(); client.setTransportFactory(new XmlRpcCommonsTransportFactory(client)); client.setConfig(config); return client; } }
/** * A sample filter that captures Request and Response headers and sends them to * Mantis using the mantis-publish library. */ @Slf4j @Singleton public class CaptureRequestEventFilter implements Filter { private static final String RESPONSE_HEADER_PREFIX = "response.header."; private static final String REQUEST_HEADER_PREFIX = "request.header."; private static final String VALUE_SEPARATOR = ","; @Inject private EventPublisher publisher; @Override public void init(FilterConfig filterConfig) { log.info("Capture Request data filter inited"); } @Override public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain) throws IOException, ServletException { final HttpServletRequest req = (HttpServletRequest) servletRequest; final HttpServletResponse res = (HttpServletResponse)servletResponse; log.debug("In do filter"); final long startMillis = System.currentTimeMillis(); // Add a wrapper around the Response object to capture headers. final ResponseSpy responseSpy = new ResponseSpy(res); // Send request down the filter chain filterChain.doFilter(servletRequest,responseSpy); // request is complete now gather all the request data and send to mantis. processPostFilter(startMillis, req, responseSpy); } /** * Invoked after the request has been completed. Used to gather all the request and response headers * associated with this request and publish to mantis. * @param startMillis The time processing began for this request. * @param req The servlet request object * @param responseSpy The spy servlet response. */ private void processPostFilter(long startMillis, HttpServletRequest req, ResponseSpy responseSpy) { try { Map<String, Object> event = new HashMap<>(); postProcess(req, responseSpy,event); Event rEvent = new Event(event); final long duration = System.currentTimeMillis() - startMillis; rEvent.set("duration", duration); log.info("sending event {} to stream {}", rEvent); CompletionStage<PublishStatus> sendResult = publisher.publish(rEvent); sendResult.whenCompleteAsync((status,throwable) -> { log.info("Filter send event status=> {}", status); }); } catch (Exception e) { log.error("failed to process event", e); } } /** * Captures the request and response headers associated with this request. * @param httpServletRequest * @param responseSpy * @param event */ private void postProcess(HttpServletRequest httpServletRequest, ResponseSpy responseSpy, Map<String,Object> event) { try { int rdm = ThreadLocalRandom.current().nextInt(); if(rdm < 0) { rdm = rdm * (-1); } event.put("request.uuid", rdm); captureRequestData(event, httpServletRequest); captureResponseData(event, responseSpy); } catch (Exception e) { event.put("exception", e.toString()); log.error("Error capturing data in api.RequestEventInfoCollector filter! uri=" + httpServletRequest.getRequestURI(), e); } } /** * Captures response headers. * @param event * @param res */ private void captureResponseData(Map<String, Object> event, ResponseSpy res ) { log.debug("Capturing response data"); // response headers for (String name : res.headers.keySet()) { final StringBuilder valBuilder = new StringBuilder(); boolean firstValue = true; for (String s : res.headers.get(name)) { // only prepends separator for non-first header values if (firstValue) firstValue = false; else { valBuilder.append(VALUE_SEPARATOR); } valBuilder.append(s); } event.put(RESPONSE_HEADER_PREFIX + name, valBuilder.toString()); } // Set Cookies if (!res.cookies.isEmpty()) { Iterator<Cookie> cookies = res.cookies.iterator(); StringBuilder setCookies = new StringBuilder(); while (cookies.hasNext()) { Cookie cookie = cookies.next(); setCookies.append(cookie.getName()).append("=").append(cookie.getValue()); String domain = cookie.getDomain(); if (domain != null) { setCookies.append("; Domain=").append(domain); } int maxAge = cookie.getMaxAge(); if (maxAge >= 0) { setCookies.append("; Max-Age=").append(maxAge); } String path = cookie.getPath(); if (path != null) { setCookies.append("; Path=").append(path); } if (cookie.getSecure()) { setCookies.append("; Secure"); } if (cookie.isHttpOnly()) { setCookies.append("; HttpOnly"); } if (cookies.hasNext()) { setCookies.append(VALUE_SEPARATOR); } } event.put(RESPONSE_HEADER_PREFIX + "set-cookie", setCookies.toString()); } // status of the request int status = res.statusCode; event.put("status", status); } /** * Captures request headers. * @param event * @param req */ private void captureRequestData(Map<String, Object> event, HttpServletRequest req) { // basic request properties String path = req.getRequestURI(); if (path == null) path = "/"; event.put("path", path); event.put("host", req.getHeader("host")); event.put("query", req.getQueryString()); event.put("method", req.getMethod()); event.put("currentTime", System.currentTimeMillis()); // request headers for (final Enumeration<String> names = req.getHeaderNames(); names.hasMoreElements();) { final String name = (String)names.nextElement(); final StringBuilder valBuilder = new StringBuilder(); boolean firstValue = true; for (final Enumeration<String> vals = req.getHeaders(name); vals.hasMoreElements();) { // only prepends separator for non-first header values if (firstValue) firstValue = false; else { valBuilder.append(VALUE_SEPARATOR); } valBuilder.append(vals.nextElement()); } event.put(REQUEST_HEADER_PREFIX + name, valBuilder.toString()); } // request params // HTTP POSTs send a param with a weird encoded name, so we strip them out with this regex if("GET".equals(req.getMethod())) { final Map<String,String[]> params = req.getParameterMap(); for (final Object key : params.keySet()) { final String keyString = key.toString(); final Object val = params.get(key); String valString; if (val instanceof String[]) { final String[] valArray = (String[]) val; if (valArray.length == 1) valString = valArray[0]; else valString = Arrays.asList((String[]) val).toString(); } else { valString = val.toString(); } event.put("param." + key, valString); } } } @Override public void destroy() { } /** * A simple wrapper for {@link HttpServletResponseWrapper} that is used to capture headers * and cookies associated with the response. */ private static final class ResponseSpy extends HttpServletResponseWrapper { int statusCode = 200; final Map<String, List<String>> headers = new ConcurrentHashMap<>(); final List<Cookie> cookies = new ArrayList<>(); private ResponseSpy(HttpServletResponse response) { super(response); } @Override public void setStatus(int sc) { super.setStatus(sc); this.statusCode = sc; } @Override public void addCookie(Cookie cookie) { cookies.add(cookie); super.addCookie(cookie); } @Override public void setHeader(String name, String value) { List<String> values = new ArrayList<>(); values.add(value); headers.put(name, values); super.setHeader(name, value); } @Override public void addHeader(String name, String value) { List<String> values = headers.computeIfAbsent(name, k -> new ArrayList<>()); values.add(value); super.addHeader(name, value); } @Override public void setDateHeader(String name, long date) { List<String> values = new ArrayList<>(); values.add(Long.toString(date)); headers.put(name, values); super.setDateHeader(name, date); } @Override public void setIntHeader(String name, int val) { List<String> values = new ArrayList<>(); values.add(Integer.toString(val)); headers.put(name, values); super.setIntHeader(name, val); } } }
package cn.yjxxclub.springboot.entity; import java.util.Date; import java.util.List; /** * Author: 遇见小星 * Email: <EMAIL> * Date: 17-6-7 * Time: 下午4:49 * Describe: 客户 */ public class Member implements java.io.Serializable{ private Integer id; private String name; private User user; private String phoneNumber; private String address; private BmIndustry bmIndustry; private BmArea bmArea; private BmType bmType; private BmStatus bmStatus; private Contact contact; private List<ContactNotes> contactNotes; private Integer status; private Date updateDate; private String creator; private Date createDate; public List<ContactNotes> getContactNotes() { return contactNotes; } public void setContactNotes(List<ContactNotes> contactNotes) { this.contactNotes = contactNotes; } public Contact getContact() { return contact; } public void setContact(Contact contact) { this.contact = contact; } public Integer getId() { return id; } public void setId(Integer id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } public User getUser() { return user; } public void setUser(User user) { this.user = user; } public String getPhoneNumber() { return phoneNumber; } public void setPhoneNumber(String phoneNumber) { this.phoneNumber = phoneNumber; } public String getAddress() { return address; } public void setAddress(String address) { this.address = address; } public BmIndustry getBmIndustry() { return bmIndustry; } public void setBmIndustry(BmIndustry bmIndustry) { this.bmIndustry = bmIndustry; } public BmArea getBmArea() { return bmArea; } public void setBmArea(BmArea bmArea) { this.bmArea = bmArea; } public BmType getBmType() { return bmType; } public void setBmType(BmType bmType) { this.bmType = bmType; } public BmStatus getBmStatus() { return bmStatus; } public void setBmStatus(BmStatus bmStatus) { this.bmStatus = bmStatus; } public Integer getStatus() { return status; } public void setStatus(Integer status) { this.status = status; } public Date getUpdateDate() { return updateDate; } public void setUpdateDate(Date updateDate) { this.updateDate = updateDate; } public String getCreator() { return creator; } public void setCreator(String creator) { this.creator = creator; } public Date getCreateDate() { return createDate; } public void setCreateDate(Date createDate) { this.createDate = createDate; } @Override public String toString() { return "Member{" + "id=" + id + ", name='" + name + '\'' + ", user=" + user + ", phoneNumber='" + phoneNumber + '\'' + ", address='" + address + '\'' + ", bmIndustry=" + bmIndustry + ", bmArea=" + bmArea + ", bmType=" + bmType + ", bmStatus=" + bmStatus + ", status=" + status + ", updateDate=" + updateDate + ", creator='" + creator + '\'' + ", createDate=" + createDate + '}'; } }
<reponame>LucaHermann/Typescript<gh_stars>0 /** * It's not a best practice to create two time the same class with the same logic inside. * By using generic we allow this class to have the type we pass at the creation of the class. * * ex: new AnythingCollection<string>(['a', 'b']); * new AnythingCollection<number>([1,2,3]); */ class NumberCollection { constructor(public collection: number[]) { } get(index: number): number { return this.collection[index]; } } class StringCollection { constructor(public collection: string[]) { } get(index: number): string { return this.collection[index]; } } /** * Here we use generic <T> for give to a class a more 'flexible' type. */ class AnythingCollection<T> { constructor(public collection: T[]) { } get(index: number): T { return this.collection[index]; } } // Generics Type inference const arr0 = new AnythingCollection(['a', 'b', 'c']); const arr1 = new AnythingCollection([1, 2, 3]); // Generics Type inference errors // let arr2 = new AnythingCollection([]); // arr2 = ([1, 2, 3, 4]); Type 'number[]' is not assignable to type 'AnythingCollection<any>' /** * Same here is not the best implementation to have two time the same logic for two different functions. */ function printString(arr: string[]): void { for (let i = 0; i < arr.length; i++) { console.log(arr[i]); } } function printNumber(arr: number[]): void { for (let i = 0; i < arr.length; i++) { console.log(arr[i]); } } function printAnything<T>(arr: T[]): void { for (let i = 0; i < arr.length; i++) { console.log(arr[i]); } } // Generics Annotation printAnything<string>(['a', 'b']); // Generics Inference printAnything([1, 2, 3]); // Generics Constraints class Car { print() { console.log('im a car'); } } class House { print() { console.log('im a house'); } } interface IPrintable { print(): void; } function printAnyClass<T extends IPrintable>(arr: T[]) { for (let i = 0; i < arr.length; i++) { arr[i].print(); } } // printAnyClass([1, 2, 3]); Type 'number' is not assignable to type 'IPrintable' printAnyClass([new Car(), new Car(), new Car, new House(), new House()]);
<gh_stars>10-100 /* MIT License * * Copyright (c) 2018 <NAME> <mitghi[at]gmail.com> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ package main import ( "sync" "time" "github.com/mitghi/protox/client" "github.com/mitghi/protox/protobase" "github.com/mitghi/protox/protocol" ) type User struct { *client.CLBUser } type CustomClient struct { sync.RWMutex client.Client publishing bool user *User } func NewUser(opts client.CLBOptions) *User { ncl, ok := client.NewCLBUser(opts) if !ok { return nil } ret := &User{ncl} return ret } func NewCustomClient(uid, pid, cid string) *CustomClient { return &CustomClient{ sync.RWMutex{}, client.Client{Username: uid, Password: <PASSWORD>, ClientId: cid}, false, nil, } } func (self *CustomClient) Connected(opts protobase.OptionInterface) bool { logger.Infof("+ [USER] %s connected with opts %+v.\n", self.Username, opts.(*protocol.ConnackOpts)) self.user.SetConnected(true) self.sendPubs() return true } func (self *CustomClient) Disconnected(opts protobase.OptCode) { logger.Infof("+ [USER] %s disconnected.\n", self.Username) self.user.SetConnected(true) self.sendPubs() } func (self *CustomClient) Subscribe(msg protobase.MsgInterface) { logger.Infof("+ [USER] %s subscribed to %s.\n", self.Username, msg.Envelope().Route()) } func (self *CustomClient) Publish(msg protobase.MsgInterface) { var ( dir protobase.MsgDir = msg.Dir() envelope protobase.MsgEnvelopeInterface = msg.Envelope() ) var ( topic string = envelope.Route() message []byte = envelope.Payload() ) switch dir { case protobase.MDInbound: logger.Infof("+ [USER][publish] %s is sending to topic [%s], message [%s].\n", self.Username, topic, string(message)) case protobase.MDOutbound: logger.Infof("+ [USER][publish] %s has receive dtopic [%s], message [%s].\n", self.Username, topic, string(message)) } } func (self *CustomClient) sendPubs() { var isp bool self.RLock() isp = self.publishing self.RUnlock() if isp { return } self.Lock() if !self.publishing { self.publishing = true go func() { ticker := time.NewTicker(time.Second * 2) for _ = range ticker.C { self.user.Conn.Publish(*pr, []byte(*pm), byte(*qos), pcallback) if !self.user.IsRunning() { break } } ticker.Stop() return }() } self.Unlock() }
There are people who live in vans by choice. For some, living in a van allows them to travel the country. For others, it lets them save money to pursue a passion like starting a new company. And still others are just drawn to van living because they want to maintain a lifestyle of minimalism and self-reliance. Priceonomics thought it would be interesting to explore this subculture of van dwellers. Have these people figured out the best way to live? While the rest of us are blindly chasing the dream of owning huge homes, is living in a 40 square foot vehicle actually the way to go? Before diving into the van living lifestyle, it’s worth noting that we are talking about people that want to live in vans. There are quite a lot of people that turn to living in vehicles because of a financial catastrophe or because they’ve found themselves in a really bad situation. As we’ll later point out, embarking on van dwelling isn’t often a feasible backup plan for people on the verge of homelessness. The Basics of Van Living Living in a van is different than living in an RV (recreational vehicle). A van is basically a slightly larger car. An RV is basically a house on wheels and isn’t particularly inexpensive or minimalist. Because RVs are so large, you’re confined to staying in RV parks, rural environments, and Walmart parking lots. Since vans are smaller and more discreet, you can park overnight in a city or in a rural area. RVs also tend to have toilets and showers, while most vans to do not. To give you an example, a very popular van for inhabiting is the VW Westfalia. From the outside, it looks like a regular vehicle you might see anywhere. On the inside, a camper van has some creature comforts you don’t find in a normal van. In the Westfalia, the backset folds back into a bed. There is a propane powered grilled, a fridge, a table, and rotating chairs. The Westfalia in living room mode. You are looking at the stove directly in front of you with a fridge and storage cabinet under it. The Westfalia in bedroom mode. Fold down the rear seats into a bed! Raise the pop-top for a second bed. If you want to get a sense about what it’s like to live in one of these things for a while, check out these videos on Youtube by van dwellers (here, here and here). Most notably you’ll find that there is not a lot of space in these camper vans, but they are relatively modular so they can serve as a vehicle, living room, office or bedroom when called upon. The Economics of Van Living You need some amount of initial capital to become a van dweller. This initial capital is what makes van dwelling an infeasible fallback option for someone in financial straits. A used camper van like a Westfalia from the 1980s can cost approximately $10,000. You can likely buy a less expensive van, but doing so will require modifications to make it suitable for sleeping. One online van dweller put his initial modification budget at about $2,000. You’ll probably need to buy things like a portable heater, an alternate power source, tinted windows and a solution for when nature calls. Like any vehicle, there is the ever present risk that your van will break down and you’ll need to have a mechanic repair it. So, the less money you spend on the van upfront, the more you need to reserve for future repairs. Let’s say it costs you $12,000 ($10,000 initial cost + $2,000 in repairs or supplies) to launch your new van lifestyle. Are you able to save money versus living in a $1,000 a month apartment? First off, let’s assume you’re able to park your van every night for free (an RV would require a $25 a night charge, but with a van you might be able to get away with parking on the street for free). At this rate, if you could last a year in the van, you would be financially better off, right? Actually, you’d break even much sooner because your camper van is an asset. If you bought a used camper van from the 1980s, it’s unlikely to lose much more value under your watch. If you fix it up, it may even have appreciated in value when you look to sell it. If you resell it for anywhere near what you paid for it, you break even in just a couple of months. If you found a great deal on a van, you’d break even sooner. If you drove it around a lot and had high fuel costs, you might not. What happens if you have to go Number 2 and other important questions? When you hear that someone lives in a van, many questions immediately come to mind. We researched what the most common answers are to these questions in van living forums, blogs and YouTube videos. How do you go to the bathroom? The majority of online contributors (many of whom appear to be male) use pee-bottles and public bathrooms for more, eh substantial bathroom breaks. Some have small chemical toliets which they try to avoid using except for emergencies. How do you shower? If you live in a van, you can basically kiss your daily shower goodbye. If you plan on living in one city, you can get a gym membership. However, that’s not really a good solution if you want to maintain a mobile lifestyle. For the mobile van traveler, truck stops offer showers for $10. Other than that, most people living in vans get by cleaning themselves using wet ones or some variant of dumping water on themselves. What do you do for internet access? As you dig into the van living community, its striking how connected they are. While the motivation of many of them is to be physically “off-the-grid”, being connected to the internet is critical. Many van livers get by on free wifi from coffee shops and libraries. Using your cell phone as a hotspot is also an option, though we didn’t find that to be a common answer in the van dwelling forums. Is this legal? If you pay to stay at a campsite, it’s most certainly legal, but then this lifestyle ceases to be inexpensive. If you park on the streets, it’s generally illegal, but this varies by city. For people that live in vans, the primary rule they appear to follow is to be inconspicuous - blend into the environment and don’t make it look like you’re living in a van. Whether that is easy to achieve is another question. The burden of having to move the van constantly and be on the lookout for police appears to be one of the biggest reasons people ultimately abandon van living. Is it lonely? Yes. Most people appear to be living in vans solo. I’d like to waste all day reading about living in van. Where should I start? Here, here, here, here, here andhere. Also this story about living in a car in Silicon Valley is neat too. A real life example - an entrepreneur living in an RV As luck would have it, this author was introduced to a entrepreneur who recently spent three months living in a van. This van dwelling entrepreneur requested to be anonymous so he could more freely discuss certain “intimate” parts of vehicle living. Basically, in order to work on his own apps and web projects, he decided to live in a van to save money. At the end of 3 months though, he took a fulltime programming job and abandoned the van experiment. Technically this entrepreneur lived in an RV, but most of the RV’s amenities were broken so it was almost like living in a van. "So, I had about $2K that I could put towards buying an RV or trying to find a short term apartment rental. Because I have two dogs, finding an apartment was impossible. Even if I found one, that money would have been gone pretty fast." The plucky entrepreneur was in luck though. His father had recently purchased some land on which to build a house. If he could find a vehicle to live in, he had a safe place to park it each night. "I went and saw this one RV for $2,500 that was a complete piece of crap and didn’t run. I ended up buying that same kind of RV from someone else. It was in good condition and ran well and the guy sold it for $2,000. He had to get rid of it because he was getting so many parking tickets. He had it parked in front of his house and his neighbors kept complaining." The RV had lots of amenities, none of which worked: "The RV had a place in the back for me to put a bed down and I put in some new carpet. There was a bathroom, shower, toilet, stove and fridge, but none of that worked at all. It was just taking up space." "For internet access I just tethered my phone. It was about $60 for 5 gigs of data and it worked really well. I only went over the limit the last month when I started working for my current employer and we had to send lots of images back and forth. I had to give up Netflix and Hulu though." On productivity in the the van: "The first month I lived in the van, I was really productive. Well, workwise I probably wasn’t so productive, but productive enough. Living in a small space though, I didn’t waste time watching re-runs of Cheers for three hours on Netflix. I read a lot of books, I worked on my music more. When I lived in a house, I never really used my guitar because it was in another room. When I was in the RV, everything I owned was in the same room so I played my guitar a lot." Making it work: "I got by spending less than $600 bucks a month, mostly on food. I was lucky that there was a support network for me because I had family nearby. I didn’t have to ever move the van and if I needed a shower, I could go to my Dad’s house. I’m not someone who feels the need to shower much, so I really only showered a few times a month. Not having a bathroom really really wore on me after a while." The weirdest thing: "I got to be kind of jealous of my dogs. They could just go to the bathroom wherever they wanted! I had to drive to a Starbucks and order a coffee just to use the toilet." But ultimately, living in a van was not so fun: "By the second or third month, it was not a very pleasant experience. I was getting pretty lonely and it was pretty boring to have the van parked in one place all the time. I thought about taking it on the road, but I was pretty sure it would break down and then I’d be screwed. Also let’s be honest, when you are living in a van, your dating prospects are not good." Calling it quits on van living: "So, after three months I took an awesome job doing iOS development and moved out of the van. Now I’m trying to sell the van, but I need to fix the dead battery first. It’s just sitting around doing nothing right now." Conclusion Van living is a way to save money, but it requires capital, work, and discipline. Most people don’t last that long doing this. The two most common reasons for quitting van living are the stress of finding a place to park it each night and the loneliness of the solitary existence. One van dweller chronicled her life living in a van on an amazing blog. After five years though, she gave up the van life to settle down with her partner; some things just have a stronger pull than the call of the open road. We are hiring. Comment on Hacker News. This post was written by Rohin Dhar. Follow him on Twitter here or Google. Get the latest from Priceonomics on Facebook or Twitter.
/** * This class represents rich text dialog field. */ @PageObject public class RichText implements DialogField { @FindBy(css = ".coral-RichText") private WebElement input; @Inject private Actions actions; /** * This method deleted already set rich text value and sets new one. * * @param value desired string value of field. \\\\n will be replaced with RETURN key value. */ @Override public void setValue(Object value) { String text = (String) value; actions.keyDown(input, CONTROL) // .sendKeys("a") // .keyUp(CONTROL) // .sendKeys(BACK_SPACE); List<String> textDividedByLines = Arrays.asList(text.split("\\\\n")); for (int i = 0; i < textDividedByLines.size(); i++) { if(i!=0) { actions.sendKeys(RETURN); } actions.sendKeys(textDividedByLines.get(i).trim()); } actions.perform(); } }
In an unusually strong rebuke, the US accused Israel of breaking its trust over plans for a new West Bank settlement. The White House on Wednesday condemned Israel as plans to construct new settlement homes deep into the West Bank emerged. In its notably forthright response, the Obama administration lashed out at the proposal for some 300 housing units. Spokesman Mark Toner said that the plan undermined hopes for peace, and that it was "another step towards cementing a one-state reality of perpetual occupation." "Such moves will only draw condemnation from the international community, distance Israel from many of its partners, and further call into question Israel's commitment to achieving a negotiated peace," Toner said in a statement. Toner said the plan would see homes built on land "far closer to Jordan than Israel ... and make the possibility of a viable Palestinian state more remote." Washington opposes Israel's policy of building Jewish settlements on land that would be claimed by the Palestinians in any "two-state" peace agreement. "Israelis must ultimately decide between expanding settlements and preserving the possibility of a peaceful two-state solution," Toner said. Question of friendship Press secretary Josh Earnest said the decision called into question the word of Prime Minister Benjamin Netanyahu. "We did receive public assurances from the Israeli government that contradict this announcement," he said. "I guess when we're talking about how good friends treat one another, that's a source of serious concern as well." US officials have adopted a stronger line in their dealings with Israel in recent weeks. Washington claims that Prime Minister Benjamin Netanyahu's government is recklessly accelerating the home-building program despite international concern. The Middle East Quartet - a contact group including the EU, Russia, United States and the United Nations - in July called on Israel to halt settlement building. Airstrikes in Gaza Also on Wednesday, Israeli aircraft attacked Palestinian militant targets in the Gaza Strip on Wednesday, with at least one person wounded. The raid came after a rocket fired from the enclave hit an Israeli border town. Three Hamas training camps and a security complex were targeted in the airstrikes. There were no casualties in the rocket strike on Sderot, Israeli police said. However, Israel has a declared policy of responding to any attack from the Hamas-run Gaza Strip with military force. A poll published on Sunday showed that two-thirds of Israelis believe there will never be a peace agreement with the Palestinians. rc/bw (AFP, AP, dpa, Reuters)
//! Hierarchical Deterministic seed implementing BIP39 use parity_scale_codec::{Decode, Encode}; use chain_core::init::network::{get_bip44_coin_type_from_network, Network}; use client_common::{ErrorKind, PrivateKey, PublicKey, Result, ResultExt}; use crate::hd_wallet::{ ChainPath, DefaultKeyChain, ExtendedPrivKey, ExtendedPubKey, KeyChain, KeyIndex, }; use crate::Mnemonic; /// Hierarchical Deterministic seed #[derive(Debug, Clone, PartialEq, Decode, Encode)] pub struct HDSeed { /// raw data of HDSeed pub bytes: Vec<u8>, } impl From<&Mnemonic> for HDSeed { fn from(mnemonic: &Mnemonic) -> Self { HDSeed { bytes: mnemonic.seed().to_vec(), } } } impl HDSeed { /// Create new HD seed from seed bytes #[inline] pub fn new(bytes: Vec<u8>) -> Self { HDSeed { bytes } } #[inline] /// Returns the seed value as a byte slice pub fn as_bytes(&self) -> &[u8] { &self.bytes } /// Derive HD wallet at specific bip44 path, and returns the key pair pub fn derive_key_pair( &self, network: Network, account_index: u32, index: u32, ) -> Result<(PublicKey, PrivateKey)> { let coin_type = get_bip44_coin_type_from_network(network); let chain_path_string = format!("m/44'/{}'/{}'/0/{}", coin_type, account_index, index); let chain_path = ChainPath::from(chain_path_string); let key_chain = DefaultKeyChain::new( ExtendedPrivKey::with_seed(&self.bytes) .chain(|| (ErrorKind::InternalError, "Invalid seed bytes"))?, ); let (extended_private_key, _) = key_chain.derive_private_key(chain_path).chain(|| { ( ErrorKind::InternalError, "Failed to derive HD wallet private key", ) })?; let private_key = PrivateKey::from(extended_private_key.private_key); let public_key = PublicKey::from(&private_key); Ok((public_key, private_key)) } /// get publickey on specific index pub fn get_pubkey( &self, network: Network, account_index: u32, index: u32, ) -> Result<PublicKey> { let parent_pubkey = self.get_parent_pubkey(network, account_index)?; HDSeed::get_pubkey_from_parent_pubkey(&parent_pubkey, index) } /// get parent pubkey pub fn get_parent_pubkey( &self, network: Network, account_index: u32, ) -> Result<ExtendedPubKey> { let coin_type = get_bip44_coin_type_from_network(network); let chain_path_string = format!("m/44'/{}'/{}'/0", coin_type, account_index); let chain_path = ChainPath::from(chain_path_string); let key_chain = DefaultKeyChain::new( ExtendedPrivKey::with_seed(&self.bytes) .chain(|| (ErrorKind::InternalError, "Invalid seed bytes"))?, ); let (parentkey, _) = key_chain.derive_private_key(chain_path).chain(|| { ( ErrorKind::InternalError, "Failed to derive HD wallet private key", ) })?; let parent_pubkey = ExtendedPubKey::from_private_key(&parentkey); Ok(parent_pubkey) } /// Get publickey from parent extended publickey on specific index pub fn get_pubkey_from_parent_pubkey( parent_pubkey: &ExtendedPubKey, index: u32, ) -> Result<PublicKey> { let pubkey = parent_pubkey .derive_public_key(KeyIndex::Normal(index)) .chain(|| (ErrorKind::InternalError, "Invalid extended pubkey"))? .public_key; let public_key = PublicKey::from(pubkey); Ok(public_key) } } #[cfg(test)] mod hd_seed_tests { use super::*; use crate::service::HDAccountType; use secstr::SecUtf8; #[test] fn same_mnemonic_words_should_restore_the_same_hd_seed() { let mnemonic_words = Mnemonic::new(24).expect("get 24 words mnemonics").phrase(); let restored_hd_seed_1 = HDSeed::from( &Mnemonic::from_secstr(&mnemonic_words.clone()) .expect("should restore from mnemonic words"), ); let restored_hd_seed_2 = HDSeed::from( &Mnemonic::from_secstr(&mnemonic_words.clone()) .expect("should restore from mnemonic words"), ); assert_wallet_is_same(&restored_hd_seed_1, &restored_hd_seed_2); } mod derive_key_pair { use super::*; #[test] fn should_derive_at_correct_cro_path() { let mnemonic_words = SecUtf8::from("point shiver hurt flight fun online hub antenna engine pave chef fantasy front interest poem accident catch load frequent praise elite pet remove used"); let mnemonic = Mnemonic::from_secstr(&mnemonic_words) .expect("should create mnemonic from mnemonic words"); let hd_seed = HDSeed::from(&mnemonic); let expected_public_key = hex::decode("0396bb69cbbf27c07e08c0a9d8ac2002ed75a6287a3f2e4cfe11977817ca14fad0") .expect("should decode from public key hex"); let expected_private_key = hex::decode("e92a3a7859600762bca9dff4f3f3dea17b6cf1333218f38ede5b4017b54f30f5") .expect("should decode from private key hex"); let (public_key, private_key) = hd_seed .derive_key_pair(Network::Mainnet, HDAccountType::Transfer.index(), 1) .expect("should derive key pair"); assert_eq!(expected_public_key, public_key.serialize_compressed()); assert_eq!(expected_private_key, private_key.serialize()); let expected_public_key = hex::decode("037f48caf0998415cad9b57e27d9aeaeb498324ceaf8b506eee1df31b92ee5f18b") .expect("should decode from public key hex"); let expected_private_key = hex::decode("0ce8339e5cb4f71903991ed7b1e12b09a7e7904b5926eb22c7f7c0afdddd6f5a") .expect("should decode from private key hex"); let (public_key, private_key) = hd_seed .derive_key_pair(Network::Devnet, HDAccountType::Staking.index(), 1) .expect("should derive key pair"); assert_eq!(expected_public_key, public_key.serialize_compressed()); assert_eq!(expected_private_key, private_key.serialize()); } } fn assert_wallet_is_same(wallet: &HDSeed, other: &HDSeed) { assert_eq!(wallet.as_bytes(), other.as_bytes()); } #[test] fn should_get_publickey_return_correct_key() { let mnemonic_words = SecUtf8::from("point shiver hurt flight fun online hub antenna engine pave chef fantasy front interest poem accident catch load frequent praise elite pet remove used"); let mnemonic = Mnemonic::from_secstr(&mnemonic_words) .expect("should create mnemonic from mnemonic words"); let hd_seed = HDSeed::from(&mnemonic); let expected_public_key = hex::decode("0396bb69cbbf27c07e08c0a9d8ac2002ed75a6287a3f2e4cfe11977817ca14fad0") .expect("should decode from public key hex"); let expected_private_key = hex::decode("e92a3a7859600762bca9dff4f3f3dea17b6cf1333218f38ede5b4017b54f30f5") .expect("should decode from private key hex"); let (public_key, private_key) = hd_seed .derive_key_pair(Network::Mainnet, HDAccountType::Transfer.index(), 1) .expect("should derive key pair"); assert_eq!(expected_public_key, public_key.serialize_compressed()); assert_eq!(expected_private_key, private_key.serialize()); let pubkey = hd_seed .get_pubkey(Network::Mainnet, HDAccountType::Transfer.index(), 1) .expect("get_publickey"); assert_eq!(pubkey.serialize_compressed(), expected_public_key); let parent_pubkey = hd_seed .get_parent_pubkey(Network::Mainnet, HDAccountType::Transfer.index()) .unwrap(); for i in 0..256 { let pubkey = HDSeed::get_pubkey_from_parent_pubkey(&parent_pubkey, i) .unwrap() .serialize(); assert_eq!( hd_seed .derive_key_pair(Network::Mainnet, HDAccountType::Transfer.index(), i) .unwrap() .0 .serialize(), pubkey ); } } }
<filename>init.py import csv c = csv.reader(open('nasdaq.csv')) rows = [] for row in c: rows.append(row) listToStr = ' '.join([str(elem) for elem in rows]) print(listToStr[0])
/** * Populate the entry set with the given search results. */ private void populatePopup() { contextMenu.getItems().clear(); String text = textField.getText().toLowerCase(); contextMenu.getItems() .addAll(entries.stream().filter(string -> string.toLowerCase().contains(text.toLowerCase())).limit(maximumEntries).map(MenuItem::new).collect(Collectors.toList())); contextMenu.getItems().forEach(item -> item.setOnAction(a -> { textField.setText(item.getText()); textField.positionCaret(textField.getLength()); })); }
Magnetic Resonance Imaging in Cardiac Amyloidosis: Unraveling the Stealth Entity. Amyloidosis is a systemic disease involving many organs. Cardiac involvement is a significant cause of morbidity and mortality in these patients. Diagnosis of cardiac amyloidosis is based on endomyocardial biopsy which however is invasive and associated with complications. Noninvasive methods of diagnosis include magnetic resonance imaging (MRI) with various methods and sequences involved. Our study aims at describing MRI features of cardiac amyloidosis including new imaging sequences and to prognosticate the patients based on imaging features. We included 35 patients with suspected cardiac amyloidosis who underwent MRI at our center over 4 years. All images were retrieved from our archive and assessed by an experienced radiologist. Common morphological features in our patients included increased wall thickness of left ventricle (LV) (16. 1 ± 4.1 mm), right ventricle (RV) (6.3 ± 1.1 mm), and interatrial septum (6.2 ± 0.8 mm). Global late gadolinium enhancement (LGE) ( n  = 21 ) including subendocardial or transmural was the most common pattern followed by patchy enhancement. Global transmural LGE was associated with worse prognosis. Four types of myocardial nulling patterns were observed on postcontrast time to invert (TI) scout imaging: normal nulling pattern (myocardium nulls after blood and coincident with spleen) and abnormal nulling pattern (ANP) which is further divided into three types: Type 1-myocardium nulls before blood pool but coincident with spleen, Type 2-myocardium nulling coincident with blood but not coincident with spleen, and Type 3-features of both Type 1 and Type 2. Type 3 ANP was the most common ( n  = 23) nulling pattern in our patients. Cardiac MRI is an essential in noninvasive diagnosis of cardiac amyloidosis. Transmural global LGE serves as a poor prognosticator in these patients. "Three-tier" TI scout imaging is essential to avoid false-negative enhancement results. Type 3 ANP is the most specific nulling pattern in cardiac amyloidosis.
/** * Determines if the target in OpenSearch. It appears to be true for OpenDistro as well. * Created custom for Data Prepper * TODO: Determine if we need this at all. Can we assume all supported versions of OpenDistro also have the same capabilities? * TODO: It may be possible to remove testing against HTTP after decoupling from the OpenSearch test framework Gradle plugin */ static boolean isOSBundle() { final boolean osFlag = Optional.ofNullable(System.getProperty("tests.opensearch.bundle")) .map("true"::equalsIgnoreCase).orElse(false); if (osFlag) { if (!Optional.ofNullable(System.getProperty("tests.opensearch.host")).isPresent()) { throw new RuntimeException("cluster url should be provided for security enabled OpenSearch testing"); } } return osFlag; }
def newSkelFileEvent(self, selected_file): matching_maskname = results2maskedvideo(selected_file) matching_imgstorename = results2imgstore(selected_file) if (self.fid is None) and (self.isimgstore is False): self.updateSkelAndVideoFileFromSkelName(selected_file) elif self.vfilename in [matching_imgstorename, matching_maskname]: self.updateSkelFile(selected_file) else: msgbox = QMessageBox(self) msgbox.setIcon(QMessageBox.Question) msgbox.setText( ("The results file does not match the open video file\n" "Select an option:") ) open_results_anyway_msg = 'Open results anyway' find_vid_open_both_msg = 'Open with matching video' msgbox.addButton(QMessageBox.Cancel) msgbox.addButton( QPushButton(open_results_anyway_msg), QMessageBox.NoRole) msgbox.addButton( QPushButton(find_vid_open_both_msg), QMessageBox.YesRole) msgbox.setDefaultButton(QMessageBox.Cancel) msgbox.exec_() qres = msgbox.clickedButton() if (qres is None) or (qres.text() == 'Cancel'): pass elif qres.text() == open_results_anyway_msg: self.updateSkelFile(selected_file) elif qres.text() == find_vid_open_both_msg: self.updateSkelAndVideoFileFromSkelName(selected_file) else: print(qres) print(qres.text()) print('This should never be triggered') return
/** * Returns true if the passed object is primitive in respect to V8. */ private static boolean isBasicallyPrimitive(Object object) { return object instanceof V8Value || object instanceof String || object instanceof Boolean || object instanceof Short || object instanceof Integer || object instanceof Long || object instanceof Float || object instanceof Double; }
#include "klee/System/Time.h" #include "gtest/gtest.h" #include "gtest/gtest-death-test.h" #include <cerrno> #include <sstream> int finished = 0; using namespace klee; TEST(TimeTest, TimingFunctions) { auto t = time::getClockInfo(); ASSERT_STRNE(t.c_str(), ""); auto p0 = time::getWallTime(); auto p1 = time::getWallTime(); ASSERT_GT(p0, time::Point()); ASSERT_GT(p1, time::Point()); ASSERT_LE(p0, p1); time::getUserTime(); } TEST(TimeTest, TimeParserNewFormat) { // valid errno = 0; auto s0 = time::Span(""); ASSERT_EQ(s0, time::Span()); ASSERT_EQ(errno, 0); s0 = time::Span("3h10min"); ASSERT_EQ(s0, time::minutes(190)); s0 = time::Span("5min5min"); ASSERT_EQ(s0, time::seconds(600)); s0 = time::Span("3us"); ASSERT_EQ(s0, time::microseconds(3)); s0 = time::Span("1h1min1s1ms1us1ns"); ASSERT_EQ(s0, time::nanoseconds(3661001001001)); s0 = time::Span("1min1min"); ASSERT_EQ(s0, time::minutes(2)); // invalid ASSERT_EXIT(time::Span("h"), ::testing::ExitedWithCode(1), "Illegal number format: h"); ASSERT_EXIT(time::Span("-5min"), ::testing::ExitedWithCode(1), "Illegal number format: -5min"); ASSERT_EXIT(time::Span("3.5h"), ::testing::ExitedWithCode(1), "Illegal number format: 3.5h"); ASSERT_EXIT(time::Span("3mi"), ::testing::ExitedWithCode(1), "Illegal number format: 3mi"); } TEST(TimeTest, TimeParserOldFormat) { // valid errno = 0; auto s0 = time::Span("20"); ASSERT_EQ(s0, time::seconds(20)); s0 = time::Span("3.5"); ASSERT_EQ(s0, time::milliseconds(3500)); s0 = time::Span("0.0"); ASSERT_EQ(s0, time::Span()); s0 = time::Span("0"); ASSERT_EQ(s0, time::Span()); ASSERT_EQ(errno, 0); // invalid ASSERT_EXIT(time::Span("-3.5"), ::testing::ExitedWithCode(1), "Illegal number format: -3.5"); ASSERT_EXIT(time::Span("NAN"), ::testing::ExitedWithCode(1), "Illegal number format: NAN"); ASSERT_EXIT(time::Span("INF"), ::testing::ExitedWithCode(1), "Illegal number format: INF"); ASSERT_EXIT(time::Span("foo"), ::testing::ExitedWithCode(1), "Illegal number format: foo"); } TEST(TimeTest, TimeArithmeticAndComparisons) { auto h = time::hours(1); auto min = time::minutes(1); auto sec = time::seconds(1); auto ms = time::milliseconds(1); auto us = time::microseconds(1); auto ns = time::nanoseconds(1); ASSERT_GT(h, min); ASSERT_GT(min, sec); ASSERT_GT(sec, ms); ASSERT_GT(ms, us); ASSERT_GT(us, ns); ASSERT_LT(min, h); ASSERT_LT(sec, min); ASSERT_LT(ms, sec); ASSERT_LT(us, ms); ASSERT_LT(ns, us); ASSERT_GE(h, min); ASSERT_LE(min, h); auto sec2 = time::seconds(1); ASSERT_EQ(sec, sec2); sec2 += time::nanoseconds(1); ASSERT_LT(sec, sec2); auto op0 = time::seconds(1); auto op1 = op0 / 1000U; ASSERT_EQ(op1, ms); op0 = time::nanoseconds(3); op1 = op0 * 1000U; ASSERT_EQ(op1, 3U*us); op0 = (time::seconds(10) + time::minutes(1) - time::milliseconds(10000)) * 60U; ASSERT_EQ(op0, h); auto p1 = time::getWallTime(); auto p2 = p1; p1 += time::seconds(10); p2 -= time::seconds(15); ASSERT_EQ(p1 - p2, time::seconds(25)); auto s = time::minutes(3); p1 = s + p2; ASSERT_NE(p1, p2); ASSERT_LT(p2, p1); p1 = p1 - s; ASSERT_EQ(p1, p2); s = time::minutes(5); s -= time::minutes(4); ASSERT_EQ(s, time::seconds(60)); } TEST(TimeTest, TimeConversions) { auto t = time::Span("3h14min1s"); auto d = t.toSeconds(); ASSERT_EQ(d, 11641.0); std::uint32_t h; std::uint8_t m, s; std::tie(h, m, s) = t.toHMS(); ASSERT_EQ(h, 3u); ASSERT_EQ(m, 14u); ASSERT_EQ(s, 1u); ASSERT_TRUE((bool)t); ASSERT_FALSE((bool)time::Span()); auto us = t.toMicroseconds(); ASSERT_EQ(us, 11641000000u); t += time::microseconds(42); auto v = (timeval)t; ASSERT_EQ(v.tv_sec, 11641); ASSERT_EQ(v.tv_usec, 42); t = std::chrono::seconds(1); ASSERT_EQ(t, time::seconds(1)); auto u = (std::chrono::steady_clock::duration)t; ASSERT_EQ(u, std::chrono::seconds(1)); std::ostringstream os; os << time::Span("2.5"); ASSERT_EQ(os.str(), "2.5s"); } TEST(TimeTest, ImplicitArithmeticConversions) { auto t1 = time::Span("1000s"); t1 *= 2U; auto d = t1.toSeconds(); ASSERT_EQ(d, 2000.0); auto t2 = t1 * 1.5; d = t2.toSeconds(); ASSERT_EQ(d, 3000.0); t2 = 2.5 * t1; d = t2.toSeconds(); ASSERT_EQ(d, 5000.0); t1 = time::Span("1000s"); t1 *= 2.2; d = t1.toSeconds(); ASSERT_EQ(d, 2200.0); }
/** * @brief This function handles System tick timer. */ void SysTick_Handler(void) { HAL_IncTick(); lv_tick_inc(1); }
def initialize_statev(self): numx = getattr(self.material, 'num_sdv', Material.num_sdv) statev = None if numx is None else np.zeros(numx) try: statev = self.material.sdvini(statev) except AttributeError: pass aux_sdv = [] if hasattr(self.material, 'aux_models'): for (name, aux_model) in self.material.aux_models.items(): xv = np.zeros(aux_model.num_sdv) aux_sdv.extend(aux_model.sdvini(xv)) if aux_sdv: if statev is not None: statev = np.append(statev, aux_sdv) else: statev = np.array(aux_sdv) return statev
// Calcs the sum of a position on the board. // Looks at each of it's eight neighbors and sums them. func SumNeighbors(board *Board, y int, x int) int { sum := 0 if y < len(board)-1 && x > 0 { sum += board[y+1][x-1] } if y < len(board)-1 { sum += board[y+1][x] } if y < len(board)-1 && x < len(board[y])-1 { sum += board[y+1][x+1] } if x > 0 { sum += board[y][x-1] } if x < len(board[y])-1 { sum += board[y][x+1] } if y > 0 && x > 0 { sum += board[y-1][x-1] } if y > 0 { sum += board[y-1][x] } if y > 0 && x < len(board[y])-1 { sum += board[y-1][x+1] } return sum }
<reponame>IBM/cf-node-template import { CalculatorInput } from "../models"; export class Calculator{ add(calculatorInput: CalculatorInput) { return calculatorInput.numOne + calculatorInput.numTwo; } }
I. the Finsler-type Extensions of the Classic Riemannian Framework -a Prolific Class of Models in Natural Sciences 1. Basic Differential Geometric Structures on Manifolds: Euclidean, Riemannian, Finslerian. 2. Generalized Structures: Lagrange, Generalized Lagrange, Eisenhart. 3. Specific Geometric O The classic Euclidean and Riemannian differential geometric structures on manifolds proved to provide fruitful applicative models for various fields, especially for Physics. These structures admit natural extensions which aim to provide alternative, more elaborated frameworks for the continuously increasing complexity of modeled phenomena. Among the extensions which emphasize the anisotropic dependence on directions and more generally, on tangent vectors regarded as parameters, the Finsler, Lagrange and Generalized Lagrange first or higher-order structures and their duals, are intensively investigated at present. The talk provides a brief historic overview of these geometric structures, on their basic mathematical objects, concepts and results and presents numerous applications in Mechanics, General and Special Relativity Theory, Relativistic Electrodynamics, Electromagnetic Field Theory, Ecosystems, Seismology, and Economy. A special interest concerns the basic notion of spray and its KCC stability, locally described by systems of second order differential equations and the behavior of solutions which naturally extend the equations of geodesics from the Riemannian framework and the associated Jacobi theory. Illustrative applications are overviewed. II. CMC submanifolds in Riemann-Finsler framework and spectral theory of m-th root structures main trends and recent applications
© vk.com/clubmashany Сибирская певица Машани выпустила патриотический клип в поддержку рубля. Произведение является обращением к президенту России Владимиру Путину и рассказывает о желании главной героини платить «за доллар рубль и ни копейкой больше». как-то Клип снят среди сена и русских берез, с балалайкой и девушками в цветастых сарафанах. Сама певица одета в платье из монет. По сюжету она пишет письмо некоему «начальнику». Представитель певицы пояснил, что послание адресовано президенту России.В песне Машани предлагает Путину платить за доллар рубль, а потом и вовсе избавиться от иностранной валюты.«Плачу за доллар рубль и ни копейкой больше. Хочу за доллар рубль и да за евро тоже. Хочу за доллар рубль, и это даже много», — поет она в припеве.В произведении она выдвигает и еще одно патриотическое предложение — вернуть Аляску России.«А если нужного товара не найдется, натурпродуктом легко мы обойдемся. Проведем голосование, какой продукт, ой, штат, нам лучше взять. Составим список, поставим крестик — явка будет процентов 95. Конечно, не Аляску, за такое отношенье, клевету, неуваженье Аляску мы и так давно должны вернуть», — говорится в тексте песни.Напомним, певица стала известна, выпустив год назад клип «Мой Путин». Видео отразило гражданскую позицию исполнительницы, которая считает, что президент РФ — «единственный человек, который может Украинепомочь», а «русские и украинцы — это братские народы, которые должны идти рука об руку».
Benefits and Bear Traps The EU Defence Directive DOI: 10.1080/03071840802386166 The ‘transatlantic technology gap’ has been common parlance in the defence world for a number of years. Talk of ‘the gap’ reflects the fact that in a number of key areas US military equipment is more advanced than European kit. This does not mean that the Americans are somehow more scientifically or technologically adept than Europeans; rather it is largely a question of investment. And when it comes to defence research and development (R&D), combined European expenditure is only about one-sixth of that of the US. As the old adage goes ‘you get what you pay for’. Technology is the foundation of military capability and if US R&D spending continues to outstrip European funding so overwhelmingly, the transatlantic technology gap will continue to widen as European countries fall further and further behind. There is a direct correlation between R&D spending and the quality of military
/* Checks whether the object is present in the left or right array * Parameters: * object - object that needs to be checked if present in either of the array */ @Override public boolean contains(Object object) { try { for (int i = 0; i < numElementLeftArray; i++) { if (leftArray[i].equals(object)) { return true; } } for (int i = 0; i < numElementRightArray; i++) { if (rightArray[i].equals(object)) { return true; } } } catch (NullPointerException exception) { System.out.println("Error: " + exception.getMessage()); } return false; }
/* * Prints Searching, Resting, Paralysis, or 'count' status * Display is always exactly 10 characters wide (see below) * * This function was a major bottleneck when resting, so a lot of * the text formatting code was optimized in place below. */ static void prt_state(player_type *p_ptr) { bool p, s, r; if (p_ptr->paralyzed) { p = TRUE; } else { p = FALSE; } if (p_ptr->searching) { s = TRUE; if (p_ptr->cp_ptr->flags & CF_STEALTH_MODE) s = 2; } else { s = FALSE; } if (p_ptr->resting) { r = TRUE; } else { r = FALSE; } send_indication(p_ptr, IN_STATE, p, s, r); }
import SplitParameters from "./split-parameters"; import Path from "../path"; import {NameParameterArgumentPath} from "../file/string/name-parameter"; export type SplitParameterArgumentEmpty = { empty : boolean } export type SplitParameterArgumentToString = Path & SplitParameterArgumentEmpty export type SplitParameterArgumentPath = NameParameterArgumentPath & SplitParameterArgumentEmpty; export type SplitParameterArgument = SplitParameterArgumentToString | SplitParameterArgumentPath; export default function SplitParameter( argument : SplitParameterArgumentPath ) : string[]; export default function SplitParameter( argument : SplitParameterArgumentToString ) : string[]; export default function SplitParameter( { path, separator = ':/\\', empty = true } : SplitParameterArgument & SplitParameterArgumentToString & SplitParameterArgumentPath ) : string[] { return SplitParameters( path || arguments[0].toString(), separator, empty ) }
<filename>app/bidi_line_iterator.h // Copyright (c) 2010 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef APP_BIDI_LINE_ITERATOR_H_ #define APP_BIDI_LINE_ITERATOR_H_ #pragma once #include <string> #include "unicode/ubidi.h" #include "base/basictypes.h" // A simple wrapper class for the bidirectional iterator of ICU. // This class uses the bidirectional iterator of ICU to split a line of // bidirectional texts into visual runs in its display order. class BiDiLineIterator { public: BiDiLineIterator() : bidi_(NULL) { } ~BiDiLineIterator(); // Initializes the bidirectional iterator with the specified text. Returns // whether initialization succeeded. UBool Open(const std::wstring& text, bool right_to_left, bool url); // Returns the number of visual runs in the text, or zero on error. int CountRuns(); // Gets the logical offset, length, and direction of the specified visual run. UBiDiDirection GetVisualRun(int index, int* start, int* length); // Given a start position, figure out where the run ends (and the BiDiLevel). void GetLogicalRun(int start, int* end, UBiDiLevel* level); private: UBiDi* bidi_; DISALLOW_COPY_AND_ASSIGN(BiDiLineIterator); }; #endif // APP_BIDI_LINE_ITERATOR_H_
import { Component, OnInit } from '@angular/core'; import { HttpClient } from '@angular/common/http'; import {NgbModal, ModalDismissReasons} from '@ng-bootstrap/ng-bootstrap'; import { Router } from '@angular/router'; import { MainserviceService } from '../mainservice.service'; @Component({ selector: 'app-portfolio', templateUrl: './portfolio.component.html', styleUrls: ['./portfolio.component.css'] }) export class PortfolioComponent implements OnInit { tickers :any costs :any quantity :any compdesc = [] latsp = [] closeResult = ''; values = '0.00' available :boolean = true spin_it :boolean = true constructor(private router : Router, private http:HttpClient, private modalService: NgbModal, private _mains: MainserviceService) { } open(content) { this.values = '0.00' this.modalService.open(content, {ariaLabelledBy: 'modal-basic-title'}).result.then((result) => { this.closeResult = `Closed with: ${result}`; }, (reason) => { this.closeResult = `Dismissed ${this.getDismissReason(reason)}`; }); } private getDismissReason(reason: any): string { if (reason === ModalDismissReasons.ESC) { return 'by pressing ESC'; } else if (reason === ModalDismissReasons.BACKDROP_CLICK) { return 'by clicking on a backdrop'; } else { return `with: ${reason}`; } } ngOnInit(): void { let sid = document.getElementById('src') let wid = document.getElementById('watch') let pid = document.getElementById('port') sid.style.border = 'none' wid.style.border = 'none' pid.style.border = 'white solid' document.getElementById('plink').style.color = 'white' document.getElementById('wlink').removeAttribute('style') document.getElementById('slink').removeAttribute('style') if(localStorage.getItem('port_tickers') == null || localStorage.getItem('port_tickers').length == 2) { this.available = false } else { this.tickers = JSON.parse(localStorage.getItem('port_tickers')) this.costs = JSON.parse(localStorage.getItem('port_cost')) this.quantity = JSON.parse(localStorage.getItem('port_quantity')) for(let i = 0; i<this.tickers.length; ++i){ this._mains.send_dp(this.tickers[i]).subscribe((data) => this.compdesc[i] = data) this._mains.send_lp(this.tickers[i]).subscribe((data) => this.latsp[i] = data) } setTimeout(() => { this.spin_it = false }, 1000); } } onKey(value, lp) { if(value == ''){ this.values = '0.00' } this.values = this.putco((value*lp).toFixed(2)); } putco(x:any){ return x.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ",") } buyit(ticker, quan, lp) { let index = 0 for(let i = 0; i<this.tickers.length; ++i){ if(this.tickers[i] == ticker){ index = i; break; } } this.costs[index] = (Number(this.costs[index]) + Number(quan)*Number(lp)).toFixed(2) this.quantity[index] = (Number(this.quantity[index]) + Number(quan)).toString() localStorage.removeItem('port_cost') localStorage.removeItem('port_quantity') localStorage.setItem('port_cost',JSON.stringify(this.costs)) localStorage.setItem('port_quantity',JSON.stringify(this.quantity)) for(let i = 0; i<this.tickers.length; ++i){ this._mains.send_dp(this.tickers[i]).subscribe((data) => this.compdesc[i] = data) this._mains.send_lp(this.tickers[i]).subscribe((data) => this.latsp[i] = data) } this.modalService.dismissAll() } sellit(ticker, quan, lp) { let index = 0 for(let i = 0; i<this.tickers.length; ++i){ if(this.tickers[i] == ticker){ index = i; break; } } let avgcost = Number(this.costs[index])/Number(this.quantity[index]) this.quantity[index] = (Number(this.quantity[index]) - Number(quan)).toString() if(this.quantity[index] == 0){ this.tickers.splice(index, 1) this.costs.splice(index, 1) this.quantity.splice(index, 1) document.getElementById(ticker).style.display = 'none' if(this.tickers.length != 0){ localStorage.setItem('port_tickers',JSON.stringify(this.tickers)) } else { localStorage.removeItem('port_tickers') localStorage.removeItem('port_cost') localStorage.removeItem('port_quantity') this.available = false this.modalService.dismissAll() return } } else { this.costs[index] = (Number(this.costs[index]) - Number(quan)*avgcost).toFixed(2) } localStorage.removeItem('port_cost') localStorage.removeItem('port_quantity') localStorage.setItem('port_cost',JSON.stringify(this.costs)) localStorage.setItem('port_quantity',JSON.stringify(this.quantity)) for(let i = 0; i<this.tickers.length; ++i){ this._mains.send_dp(this.tickers[i]).subscribe((data) => this.compdesc[i] = data) this._mains.send_lp(this.tickers[i]).subscribe((data) => this.latsp[i] = data) } this.modalService.dismissAll() } getDetails(t){ this.router.navigate(['/details', t]) } numm(vs :string){ return Number(vs) } }
/** * Wait until barrier opens * @param the allowable timeout of the waiting * @return the result of the waiting */ const bool shared_barrier::wait(const struct timespec& timeout) const { knock(); return expect(timeout); }
def insert(self, node , data): if node is None: return self.createNode(data) if data < node.data: node.left = self.insert(node.left, data) elif data > node.data: node.right = self.insert(node.right, data) return node
1 4.000931005326223 0 2 4.000931005326223 17 3 4.000931005326223 49 4 4.000931005326223 19 5 0.1680124573111111 51 6 0.326106272 101 7 4.000931005326223 63 8 4.000931005326223 29 9 4.000931005326223 61 10 4.000931005326223 35 11 4.000931005326223 81 12 4.000931005326223 34 13 4.000931005326223 64 14 4.000931005326223 36 15 4.000931005326223 30 16 0.22397230073600002 74 17 4.000931005326223 65 18 4.000931005326223 32 19 0.18373071057600002 54 20 0.24855207804800006 73 21 0.20115996055466667 98 22 0.198031372 75 23 4.000931005326223 31 24 4.000931005326223 62 25 4.000931005326223 21 26 4.000931005326223 82 27 0.22367160324444443 68 28 0.178688639728 55 29 4.000931005326223 33 30 4.000931005326223 45
<reponame>pontus-vision/simple-react-panel import { PVGridColDef } from './PVGrid'; export interface SimpleOptions { namespace: string; url: string; isNeighbour: boolean; neighbourNamespace: string; dataType?: string; colSettings?: PVGridColDef[]; customFilter?: string; filter?: string; } export const defaults: SimpleOptions = { namespace: 'namespace', url: 'http://localhost:18443/gateway/sandbox/pvgdpr_server/home/agrecords', isNeighbour: false, neighbourNamespace: 'neighbour', };
<reponame>meltinglava/gura-rs-parser<gh_stars>10-100 // Basic Gura parser usage example use gura::{dump, parse, GuraType}; fn main() { let gura_string = r##" # This is a Gura document. title: "Gura Example" an_object: username: "Stephen" pass: "<PASSWORD>" # Line breaks are OK when inside arrays hosts: [ "alpha", "omega" ]"##; // Parse: transforms a Gura string into a dictionary let parsed = parse(&gura_string).unwrap(); // Debug and Display // println!("{:#?}", parsed); // println!("{}", parsed); // Access a specific field println!("Title -> {}", parsed["title"]); // You can check if object contains a key if parsed.contains_key("an_object") { println!("\nGura object contains 'an_object' key!"); } // Iterate over structure println!("\nHosts:"); if let GuraType::Array(hosts) = &parsed["hosts"] { for host in hosts.iter() { println!("Host -> {}", *host); } } // Dump: transforms a dictionary into a Gura string let string_again = dump(&parsed); println!("\n+++++ Dump result +++++"); println!("{}", string_again); }
#include "sprite.h" namespace dream { namespace graphics { // Sprite Sprite::Sprite(float x, float y, float width, float height, unsigned int color) : IRenderable2D(maths::Vector3(x, y, 0), maths::Vector2(width, height), color) { } Sprite::Sprite(float x, float y, float width, float height, Texture * texture) : IRenderable2D(maths::Vector3(x, y, 0), maths::Vector2(width, height), 0xffddffdd) { m_Texture = texture; } } }
Novel colchicine conjugate with unusual effect on the microtubules of cancer cells Abstract Colchicine derivative bearing substituted bispidine moiety, namely N-{7-(3,7-Di-(tert-butoxycarbonyl)-1,5-dimethyl-3,7-diazabicyclo nonan-9-yl)-oxy-7-oxoheptanoyl}-N-deacetylcolchicine, was synthesized and tested for its effect on the net of microtubules (MT) in lung cancer cells A549. The compound induced not only MT depolymerization but stimulated the formation of small tubulin aggregates and long tubulin fibrils localized mainly around nuclei. The assemblies were morphologically different from tubulin clusters induced by structurally related anticancer agent tubuloclustin. The biotests data demonstrate that the depolymerization takes place for both pure tubulin and tubulin in cellulo, while fibrils are formed only in the cells. The research data of structure–activity relationship for several similar colchicine derivatives synthesized in the work give evidence for the proposition that the initial conjugate may interact not only with tubulin and MT in the cells, but also with MT-associated proteins, involved in the process of tubulin polymerization. The ability to affect simultaneously MAP – tubulin interactions opens attractive prospects in the design of novel anticancer agents.
#include<stdio.h> #include<string.h> #include<math.h> //#include<stdlib.h> int main() { int t; scanf("%d",&t); while(t--) { char str[100005]; scanf("%s",str); int i,j,k,flag=0; j=strlen(str); for(i=0;i<j-1;i++) { if(str[i]==str[i+1]&&str[i]!='?') {printf("-1\n");flag=1;break;} } if(flag!=1) { //printf("1\n"); for(i=0;i<j;i++) { if(str[i]=='?'&&i!=0&&str[i+1]!='?') { if(abs('a'-str[i-1])>0&&abs('a'-str[i+1])>0) str[i]='a'; else if(abs('b'-str[i-1])>0&&abs('b'-str[i+1])>0) str[i]='b'; else if(abs('c'-str[i-1])>0&&abs('c'-str[i+1])>0) str[i]='c'; } else if (str[i]=='?'&&i!=0) { if(fabs('a'-str[i-1])==1) str[i]='a'; else if(fabs('b'-str[i-1])==1) str[i]='b'; else str[i]='c'; } else if(str[i]=='?'&&i==0) { if(fabs('a'-str[i+1])==1) str[i]='a'; else str[i]='b'; } /* else if(str[i]=='?'&&i==j-1) { if(fabs('a'-str[i-1])==1) str[i]='a'; else str[i]='b'; }*/ } printf("%s\n",str); } } return 0; }
// check if asking for an interface that some wrapper in the chain inherits from nsXPCWrappedJS* nsXPCWrappedJS::FindInherited(REFNSIID aIID) { NS_ASSERTION(!aIID.Equals(NS_GET_IID(nsISupports)), "bad call sequence"); for (nsXPCWrappedJS* cur = mRoot; cur; cur = cur->mNext) { bool found; if (NS_SUCCEEDED(cur->GetClass()->GetInterfaceInfo()-> HasAncestor(&aIID, &found)) && found) return cur; } return nsnull; }
<reponame>LimeChain/celo-monorepo import { RPCSubprovider, Web3ProviderEngine } from '@0x/subproviders' import Web3 from 'web3' import { CeloProvider } from '../src/transaction-utils' export class Web3Utils { // This web3 has no signing ability. static async getWeb3(protocol: string, ipAddress: string, port: number): Promise<Web3> { return new Web3(`${protocol}://${ipAddress}:${port}`) } // This web3 has signing ability. static async getWeb3WithSigningAbility( protocol: string, ipAddress: string, port: number, privateKey: string ): Promise<Web3> { const celoProvider = new CeloProvider(privateKey) const rpcProvider = new RPCSubprovider(`${protocol}://${ipAddress}:${port}`) // Create a Web3 Provider Engine const providerEngine = new Web3ProviderEngine() // Compose our Providers, order matters // Celo provider provides signing providerEngine.addProvider(celoProvider) // Use an RPC provider to route all other requests providerEngine.addProvider(rpcProvider) providerEngine.start() const web3 = new Web3(providerEngine) return web3 } }
/** * This publishes the airavata-default-xacml-policy.xml to the PDP via PAP API (of WSO2 Identity Server) */ public class DefaultPAPClient { private final static Logger logger = LoggerFactory.getLogger(DefaultPAPClient.class); private EntitlementPolicyAdminServiceStub entitlementPolicyAdminServiceStub; public DefaultPAPClient(String auhorizationServerURL, String username, String password, ConfigurationContext configCtx) throws AiravataSecurityException { try { String PDPURL = auhorizationServerURL + "EntitlementPolicyAdminService"; entitlementPolicyAdminServiceStub = new EntitlementPolicyAdminServiceStub(configCtx, PDPURL); CarbonUtils.setBasicAccessSecurityHeaders(username, password, true, entitlementPolicyAdminServiceStub._getServiceClient()); } catch (AxisFault e) { logger.error(e.getMessage(), e); throw new AiravataSecurityException("Error initializing XACML PEP client."); } } public boolean isPolicyAdded(String policyName) { try { PolicyDTO policyDTO = entitlementPolicyAdminServiceStub.getPolicy(policyName, false); } catch (RemoteException e) { logger.debug("Error in retrieving the policy.", e); return false; } catch (EntitlementPolicyAdminServiceEntitlementException e) { logger.debug("Error in retrieving the policy.", e); return false; } return true; } public void addPolicy(String policy) throws AiravataSecurityException { new Thread() { public void run() { try { PolicyDTO policyDTO = new PolicyDTO(); policyDTO.setPolicy(policy); entitlementPolicyAdminServiceStub.addPolicy(policyDTO); entitlementPolicyAdminServiceStub.publishToPDP(new String[]{ServerSettings.getAuthorizationPoliyName()}, EntitlementConstants.PolicyPublish.ACTION_CREATE, null, false, 0); //Since policy publishing happens asynchronously, we need to retrieve the status and verify. Thread.sleep(2000); PaginatedStatusHolder paginatedStatusHolder = entitlementPolicyAdminServiceStub. getStatusData(EntitlementConstants.Status.ABOUT_POLICY, ServerSettings.getAuthorizationPoliyName(), EntitlementConstants.StatusTypes.PUBLISH_POLICY, "*", 1); StatusHolder statusHolder = paginatedStatusHolder.getStatusHolders()[0]; if (statusHolder.getSuccess() && EntitlementConstants.PolicyPublish.ACTION_CREATE.equals(statusHolder.getTargetAction())) { logger.info("Authorization policy is published successfully."); } else { throw new AiravataSecurityException("Failed to publish the authorization policy."); } //enable the published policy entitlementPolicyAdminServiceStub.enableDisablePolicy(ServerSettings.getAuthorizationPoliyName(), true); //Since policy enabling happens asynchronously, we need to retrieve the status and verify. Thread.sleep(2000); paginatedStatusHolder = entitlementPolicyAdminServiceStub. getStatusData(EntitlementConstants.Status.ABOUT_POLICY, ServerSettings.getAuthorizationPoliyName(), EntitlementConstants.StatusTypes.PUBLISH_POLICY, "*", 1); statusHolder = paginatedStatusHolder.getStatusHolders()[0]; if (statusHolder.getSuccess() && EntitlementConstants.PolicyPublish.ACTION_ENABLE.equals(statusHolder.getTargetAction())) { logger.info("Authorization policy is enabled successfully."); } else { throw new AiravataSecurityException("Failed to enable the authorization policy."); } } catch (RemoteException e) { logger.error(e.getMessage(), e); } catch (InterruptedException e) { logger.error(e.getMessage(), e); } catch (ApplicationSettingsException e) { logger.error(e.getMessage(), e); } catch (AiravataSecurityException e) { logger.error(e.getMessage(), e); } catch (EntitlementPolicyAdminServiceEntitlementException e) { logger.error(e.getMessage(), e); } } }.start(); } }
def kg(context, ns, kind, field_selector=None): args = f'{kind} --context {context} -o json'.split() args += ['-n', ns] if ns else ['--all-namespaces'] if field_selector is not None: args += ['--field-selector=' + field_selector] result = k.get(*args) return json.loads(str(result.stdout, 'utf-8'))
<filename>cloud-aws/src/test/java/com/sequenceiq/cloudbreak/cloud/aws/connector/resource/AwsUpdateServiceTest.java package com.sequenceiq.cloudbreak.cloud.aws.connector.resource; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import java.util.Collections; import java.util.List; import org.junit.Before; import org.junit.Test; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.MockitoAnnotations; import com.sequenceiq.cloudbreak.cloud.aws.AwsImageUpdateService; import com.sequenceiq.cloudbreak.cloud.context.AuthenticatedContext; import com.sequenceiq.cloudbreak.cloud.model.CloudResource; import com.sequenceiq.cloudbreak.cloud.model.CloudResourceStatus; import com.sequenceiq.cloudbreak.cloud.model.CloudStack; import com.sequenceiq.cloudbreak.cloud.model.ResourceStatus; import com.sequenceiq.common.api.type.ResourceType; public class AwsUpdateServiceTest { @Mock private AwsImageUpdateService awsImageUpdateService; @Mock private CloudStack stack; @Mock private AuthenticatedContext ac; @InjectMocks private AwsUpdateService underTest; @Before public void setUp() { MockitoAnnotations.initMocks(this); } @Test public void updateCloudFormationTemplateResourceWithImageParameter() { CloudResource cloudResource = CloudResource.builder() .name("cf") .type(ResourceType.CLOUDFORMATION_STACK) .params(Collections.singletonMap(CloudResource.IMAGE, "dummy")) .build(); List<CloudResourceStatus> statuses = underTest.update(ac, stack, Collections.singletonList(cloudResource)); verify(awsImageUpdateService, times(1)).updateImage(ac, stack, cloudResource); assertEquals(1, statuses.size()); assertEquals(ResourceStatus.UPDATED, statuses.get(0).getStatus()); assertEquals(cloudResource, statuses.get(0).getCloudResource()); } @Test public void updateCloudFormationTemplateResourceWithoutImageParameter() { CloudResource cloudResource = CloudResource.builder() .name("cf") .type(ResourceType.CLOUDFORMATION_STACK) .build(); List<CloudResourceStatus> statuses = underTest.update(ac, stack, Collections.singletonList(cloudResource)); verify(awsImageUpdateService, times(0)).updateImage(ac, stack, cloudResource); assertEquals(0, statuses.size()); } @Test public void updateRandomResource() { CloudResource cloudResource = CloudResource.builder() .name("cf") .type(ResourceType.AWS_LAUNCHCONFIGURATION) .params(Collections.singletonMap(CloudResource.IMAGE, "dummy")) .build(); List<CloudResourceStatus> statuses = underTest.update(ac, stack, Collections.singletonList(cloudResource)); verify(awsImageUpdateService, times(0)).updateImage(ac, stack, cloudResource); assertEquals(0, statuses.size()); } }
// // When streaming the database out, we have to write out the same stuff for each media type, // so we split this out to call separately. And some folks may just want to use these // directly to deal with a single media type. // // Similar for formatting out to XML as well. // tCIDLib::TVoid TMediaDB::FormatBin(const tCQCMedia::EMediaTypes eMType , TBinOutStream& strmTar) const { strmTar << tCIDLib::EStreamMarkers::Frame << eMType; tCIDLib::TCard4 c4Count; { const tCQCMedia::TNCImgIdList& colImgs = *m_colImgsById[eMType]; c4Count = colImgs.c4ElemCount(); strmTar << tCQCMedia::EDataTypes::Image << c4Count; for (tCIDLib::TCard4 c4Index = 0; c4Index < c4Count; c4Index++) { strmTar << *colImgs[c4Index] << tCIDLib::EStreamMarkers::Frame; } } { const tCQCMedia::TNCCatList& colCats = *m_colCats[eMType]; c4Count = colCats.c4ElemCount(); strmTar << tCQCMedia::EDataTypes::Cat << c4Count; for (tCIDLib::TCard4 c4Index = 0; c4Index < c4Count; c4Index++) { strmTar << *colCats[c4Index] << tCIDLib::EStreamMarkers::Frame; } } { const tCQCMedia::TNCItemIdList& colItems = *m_colItemsById[eMType]; c4Count = colItems.c4ElemCount(); strmTar << tCQCMedia::EDataTypes::Item << c4Count; for (tCIDLib::TCard4 c4Index = 0; c4Index < c4Count; c4Index++) { strmTar << *colItems[c4Index] << tCIDLib::EStreamMarkers::Frame; } } { const tCQCMedia::TNCColIdList& colCols = *m_colColsById[eMType]; c4Count = colCols.c4ElemCount(); strmTar << tCQCMedia::EDataTypes::Collect << c4Count; for (tCIDLib::TCard4 c4Index = 0; c4Index < c4Count; c4Index++) { strmTar << *colCols[c4Index] << tCIDLib::EStreamMarkers::Frame; } } { const tCQCMedia::TNCSetIdList& colSets = *m_colSetsById[eMType]; c4Count = colSets.c4ElemCount(); strmTar << tCQCMedia::EDataTypes::TitleSet << c4Count; for (tCIDLib::TCard4 c4Index = 0; c4Index < c4Count; c4Index++) { strmTar << *colSets[c4Index] << tCIDLib::EStreamMarkers::Frame; } } }
<filename>FlashPanel.java package simpleflashcards; import javax.swing.*; import java.awt.*; public class FlashPanel extends JPanel { int firstNumber; int secondNumber; int answer; boolean answerSet; String operand; public FlashPanel(int firstNumber, int secondNumber, String operand) { this.firstNumber = firstNumber; this.secondNumber = secondNumber; if (operand == "+" || operand == "-" || operand == "*" || operand == "/") this.operand = operand; this.answerSet = false; } public void setAnswer(int answer) { this.answer = answer; answerSet = true; } public int calcOffset(int number) { int digitOffset = 0; for(int i = 0; i < (number + "").length(); i++) digitOffset += 24; return digitOffset; } public void paintComponent(Graphics comp) { Graphics2D comp2D = (Graphics2D)comp; comp2D.setFont(new Font("Raleway", Font.PLAIN, 36)); comp2D.drawString(firstNumber + "", this.getBounds().width - calcOffset(firstNumber), 50); comp2D.drawString(secondNumber + "", this.getBounds().width - calcOffset(secondNumber), 100); comp2D.drawString(operand, 15, 100); if (answerSet) comp2D.drawString(answer + "", this.getBounds().width - calcOffset(answer), 150); } }
// NewReleaseListResolver creates a new ReleaseListResolver using the given Lister and Resolver func NewReleaseListResolver(lister Lister, resolver Resolver) *ReleaseListResolver { return &ReleaseListResolver{ lister: lister, resolver: resolver, } }
package com.alibaba.fastjson.serializer; public class SerialContext { public final SerialContext parent; public final Object object; public final Object fieldName; public final int features; public SerialContext(SerialContext parent, Object object, Object fieldName, int features, int fieldFeatures){ this.parent = parent; this.object = object; this.fieldName = fieldName; this.features = features; } public String toString() { if (parent == null) { return "$"; } else { StringBuilder buf = new StringBuilder(); toString(buf); return buf.toString(); } } protected void toString(StringBuilder buf) { if (parent == null) { buf.append('$'); } else { parent.toString(buf); if (fieldName == null) { buf.append(".null"); } else if (fieldName instanceof Integer) { buf.append('['); buf.append(((Integer)fieldName).intValue()); buf.append(']'); } else { buf.append('.'); String fieldName = this.fieldName.toString(); boolean special = false; for (int i = 0; i < fieldName.length(); ++i) { char ch = fieldName.charAt(i); if ((ch >= '0' && ch <='9') || (ch >= 'A' && ch <='Z') || (ch >= 'a' && ch <='z') || ch > 128) { continue; } special = true; break; } if (special) { for (int i = 0; i < fieldName.length(); ++i) { char ch = fieldName.charAt(i); if (ch == '\\') { buf.append('\\'); buf.append('\\'); buf.append('\\'); } else if ((ch >= '0' && ch <='9') || (ch >= 'A' && ch <='Z') || (ch >= 'a' && ch <='z') || ch > 128) { buf.append(ch); continue; } else { buf.append('\\'); buf.append('\\'); } buf.append(ch); } } else { buf.append(fieldName); } } } } /** * @deprecated */ public SerialContext getParent() { return parent; } /** * @deprecated */ public Object getObject() { return object; } /** * @deprecated */ public Object getFieldName() { return fieldName; } /** * @deprecated */ public String getPath() { return toString(); } }
/// @file /// @brief Contains #using_ keyword. #pragma once /// @brief The Switch namespace contains all fundamental classes to access Hardware, Os, System, and more. namespace Switch { /// @brief The specified expression is cleared automaticaly when the scope is ended. /// @par Library /// Switch.Core /// @par Examples /// @code /// // sr is released automatically after the end cosure }. /// using_($<StreamReader> sr = new_<StreamReader>(path)) { /// Console::WriteLine(sr->ReadToEnd()); /// } /// @endcode /// @ingroup Keywords #define using_(expression) \ for (bool __switch_using__ = true; __switch_using__; __switch_using__ = false)\ for (expression; __switch_using__; __switch_using__ = false) } using namespace Switch;
package life.catalogue.tools; import life.catalogue.api.model.Dataset; import life.catalogue.api.model.DatasetSettings; import life.catalogue.api.vocab.DatasetOrigin; import life.catalogue.api.vocab.Setting; import life.catalogue.common.text.CitationUtils; import life.catalogue.dao.*; import life.catalogue.db.MybatisFactory; import life.catalogue.db.PgConfig; import life.catalogue.db.mapper.CitationMapper; import life.catalogue.db.mapper.DatasetMapper; import life.catalogue.db.mapper.DatasetSourceMapper; import life.catalogue.es.NameUsageIndexService; import life.catalogue.matching.NameIndexFactory; import life.catalogue.matching.decision.EstimateRematcher; import life.catalogue.matching.decision.RematcherBase; import life.catalogue.matching.decision.SectorRematchRequest; import life.catalogue.matching.decision.SectorRematcher; import java.util.concurrent.atomic.AtomicInteger; import org.apache.ibatis.session.SqlSession; import org.apache.ibatis.session.SqlSessionFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.zaxxer.hikari.HikariDataSource; public class UpdateReleaseTool implements AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(UpdateReleaseTool.class); final SqlSessionFactory factory; final HikariDataSource dataSource; final Dataset release; final Dataset project; final DatasetSettings settings; final int userKey; public UpdateReleaseTool(int releaseKey, PgConfig cfg, int userKey) { dataSource = cfg.pool(); this.userKey = userKey; factory = MybatisFactory.configure(dataSource, "tools"); DatasetInfoCache.CACHE.setFactory(factory); try (SqlSession session = factory.openSession()) { DatasetMapper dm = session.getMapper(DatasetMapper.class); release = dm.get(releaseKey); if (release.getOrigin() != DatasetOrigin.RELEASED) { throw new IllegalArgumentException("Dataset key "+releaseKey+" is not a release!"); } project = dm.get(release.getSourceKey()); settings = dm.getSettings(project.getKey()); } } /** * Rebuilds the source metadata from latest patches and templates */ public void rebuildSourceMetadata(){ System.out.printf("%s: %s\n\n", release.getKey(), release.getTitle()); DatasetSourceDao dao = new DatasetSourceDao(factory); show(dao); //update(dao); } /** * Rematches all sector targets for releases */ public void rematchSectorTargets(){ System.out.printf("Matching all sector targets of %s: %s\n\n", release.getKey(), release.getTitle()); NameUsageIndexService indexService = NameUsageIndexService.passThru(); EstimateDao edao = new EstimateDao(factory); NameDao ndao = new NameDao(factory, indexService, NameIndexFactory.passThru()); TaxonDao tdao = new TaxonDao(factory, ndao, indexService); SectorDao sdao = new SectorDao(factory, indexService, tdao); SectorRematchRequest req = new SectorRematchRequest(); req.setAllowImmutableDatasets(true); req.setDatasetKey(release.getKey()); //req.setId(1134); req.setTarget(true); req.setSubject(false); RematcherBase.MatchCounter mc = SectorRematcher.match(sdao, req, userKey); System.out.println("Sectors: " + mc); RematcherBase.MatchCounter mc2 = EstimateRematcher.match(edao, req, userKey); System.out.println("Estimates: " + mc2); } void show(DatasetSourceDao dao){ System.out.printf("%s\n", release.getTitle()); if (settings.has(Setting.RELEASE_TITLE_TEMPLATE)) { String title = CitationUtils.fromTemplate(release, settings.getString(Setting.RELEASE_TITLE_TEMPLATE)); release.setTitle(title); } System.out.printf("%s\n", release.getTitle()); dao.list(release.getKey(), release, true).forEach(d -> { System.out.printf("%s: %s\n", d.getKey(), d.getTitle()); }); } void update(DatasetSourceDao dao) { try (SqlSession session = factory.openSession(false)) { DatasetSourceMapper psm = session.getMapper(DatasetSourceMapper.class); var cm = session.getMapper(CitationMapper.class); int cnt = psm.deleteByRelease(release.getKey()); session.commit(); System.out.printf("Deleted %s old source metadata records\n", cnt); AtomicInteger counter = new AtomicInteger(0); dao.list(release.getKey(), release, true).forEach(d -> { counter.incrementAndGet(); System.out.printf("%s: %s\n", d.getKey(), d.getTitle()); psm.create(release.getKey(), d); cm.createRelease(d.getKey(), release.getKey(), d.getAttempt()); }); session.commit(); System.out.printf("Created %s new source metadata records\n", counter); } } public void close() { dataSource.close(); } public static void main(String[] args) { PgConfig cfg = new PgConfig(); cfg.host = ""; cfg.database = "col"; cfg.user = "col"; cfg.password = ""; try (UpdateReleaseTool reg = new UpdateReleaseTool(2230,cfg, 101)) { // 101=markus reg.rebuildSourceMetadata(); } } }
// This is an open source non-commercial project. Dear PVS-Studio, please check it. // PVS-Studio Static Code Analyzer for C, C++ and C#: http://www.viva64.com // ---------------------------------------------------------------------------- // "THE BEER-WARE LICENSE" (Revision 42): // <<EMAIL>> wrote this file. As long as you retain this notice you // can do whatever you want with this stuff. If we meet some day, and you think // this stuff is worth it, you can buy me a beer in return. <NAME> // ---------------------------------------------------------------------------- // // loop.cpp // #include "loop.hpp" #include "gui/date-time.hpp" #include "gui/display.hpp" #include "gui/music-operator.hpp" #include "gui/sound-manager.hpp" #include "misc/callback.hpp" #include "misc/log-macros.hpp" #include "sfutil/event.hpp" #include "stage/stage-base.hpp" #include <algorithm> #include <iomanip> namespace heroespath { namespace game { Loop::Loop(ActiveStages & stages, IStatusForLoop & iStatus, const ExecuteCommand FLAGS) : stages_(stages) , iStatus_(iStatus) , flags_(FLAGS) , prevKeyStrokeEventType_(sf::Event::EventType::Count) , prevKeyStrokeEventKey_(sf::Keyboard::Key::Unknown) , isMouseHovering_(false) , prevMousePosV_(gui::Display::Instance()->GetMousePosition()) , frameMouseInfo_(false, sf::Vector2i()) , toLogEvents_() , frameCounter_(0) , durationSec_(0.0f) , prevFadeColor_(sf::Color::Transparent) , fadeColorChangeCounter_(0) , componentFramerateTrials_("ComponentFramerate", TimeRes::Micro, true, 200, 0.0) //, componentFrameRateTrialsIndexAudio_(componentFramerateTrials_.AddCollecter("Audio")) //, componentFrameRateTrialsIndexUpdate_(componentFramerateTrials_.AddCollecter("Update")) //, componentFrameRateTrialsIndexDraw_(componentFramerateTrials_.AddCollecter("Draw")) //, componentFrameRateTrialsIndexDisplay_(componentFramerateTrials_.AddCollecter("Display")) { toLogEvents_.reserve(10); iStatus_.SetLoopRunning(true); } Loop::~Loop() { iStatus_.SetLoopRunning(false); } void Loop::Execute() { gui::Display::Instance()->SetMouseCursorVisibility(!flags_.will_hide_mouse); sf::Clock durationClock_; sf::Clock secondClock_; sf::Clock frameClock; try { ConsumeAndIgnoreStrayEvents(); misc::TimeTrials framerateTrials("Framerate", TimeRes::Micro, true, 200, 0.0); const std::size_t FRAMERATE_COLLECTER_INDEX { framerateTrials.AddCollecter( flags_.ToString()) }; durationClock_.restart(); while (!iStatus_.IsLoopStopRequested()) { M_HP_SCOPED_TIME_TRIAL(framerateTrials, FRAMERATE_COLLECTER_INDEX); gui::Display::Instance()->ClearToBlack(); // TODO TEMP REMOVE remove this crap once all testing is in unit tests ExecuteNextTest(); frameMouseInfo_ = UpdateMouseInfo(); StopMouseHover(frameMouseInfo_.has_moved); HandleMouseMove(); HandleEvents(); const auto FRAME_TIME_SEC { frameClock.getElapsedTime().asSeconds() }; frameClock.restart(); UpdateTimeFade(FRAME_TIME_SEC); UpdateTimeAudio(FRAME_TIME_SEC); UpdateTimeStages(FRAME_TIME_SEC); OncePerSecondTasks(secondClock_); Draw(); // this must remain last (just before display) so that any of the code above // can set a popup response and it will always be handled before the loop // exits stages_.HandlePopupResponseCallback(); Display(); ++frameCounter_; } framerateTrials.EndAllContests(); componentFramerateTrials_.EndAllContests(); } catch (const std::exception & EXCEPTION) { M_HP_LOG_FAT( "Exception=\"" << EXCEPTION.what() << "\" thrown during game loop execution, which is " "fatal. Re-Throwing to kill the game."); ExecuteCleanup(); throw; } durationSec_ = durationClock_.getElapsedTime().asSeconds(); ExecuteCleanup(); } void Loop::ExecuteCleanup() { StopMouseHoverImpl(); } const MouseThisFrame Loop::UpdateMouseInfo() { const auto NEW_MOUSE_POS_VI { gui::Display::Instance()->GetMousePosition() }; const auto HAS_MOUSE_MOVED { (NEW_MOUSE_POS_VI != prevMousePosV_) }; prevMousePosV_ = NEW_MOUSE_POS_VI; return MouseThisFrame(HAS_MOUSE_MOVED, NEW_MOUSE_POS_VI); } void Loop::StopMouseHover(const bool HAS_MOUSE_MOVED) { if (isMouseHovering_ && HAS_MOUSE_MOVED) { StopMouseHoverImpl(); } } void Loop::StopMouseHoverImpl() { if (!isMouseHovering_) { return; } isMouseHovering_ = false; auto handleStopMouseHover = [](stage::IStagePtr_t iStagePtr) { iStagePtr->SetMouseHover(sf::Vector2f(0.0f, 0.0f), false); return boost::none; }; stages_.ExecuteOnPopupStage(handleStopMouseHover); stages_.ExecuteOnNonPopupStages(handleStopMouseHover); } void Loop::OncePerSecondTasks(sf::Clock & secondClock) { if (secondClock.getElapsedTime().asSeconds() < 1.0f) { return; } secondClock.restart(); OncePerSecondTaskStartMouseHover(); OncePerSecondTaskCheckIfDisplayOpen(); } void Loop::OncePerSecondTaskStartMouseHover() { // don't show mouseovers while fading, if the mouse is moving, or if already showing if (iStatus_.GetFadeStatus().is_fading || frameMouseInfo_.has_moved || isMouseHovering_) { return; } isMouseHovering_ = true; auto handleStartMouseHover = [MOUSE_POS_V = frameMouseInfo_.pos_vf](stage::IStagePtr_t iStagePtr) { iStagePtr->SetMouseHover(MOUSE_POS_V, true); return boost::none; }; stages_.ExecuteOnForegroundStages(handleStartMouseHover); } void Loop::OncePerSecondTaskCheckIfDisplayOpen() { if (gui::Display::Instance()->IsOpen() == false) { iStatus_.LoopStopRequest(); } } void Loop::HandleMouseMove() { if (flags_.will_mouse_ignore || (frameMouseInfo_.has_moved == false)) { return; } auto handleMouseMove = [MOUSE_THIS_FRAME = frameMouseInfo_](stage::IStagePtr_t iStagePtr) { iStagePtr->UpdateMousePos(MOUSE_THIS_FRAME.pos_vi); return boost::none; }; stages_.ExecuteOnForegroundStages(handleMouseMove); } void Loop::UpdateTimeFade(const float FRAME_TIME_SEC) { if (iStatus_.GetFadeStatus().is_fading == false) { return; } const auto NEW_COLOR { gui::Display::Instance()->UpdateFadeColor(FRAME_TIME_SEC) }; iStatus_.SetFadeCurrentColor(NEW_COLOR); if (NEW_COLOR != prevFadeColor_) { ++fadeColorChangeCounter_; prevFadeColor_ = NEW_COLOR; } if (gui::Display::Instance()->IsFadeFinished()) { if (game::FadeDirection::In == iStatus_.GetFadeStatus().direction) { M_HP_LOG("fade in end (after " << fadeColorChangeCounter_ << " color changes)"); } else { M_HP_LOG("fade out end (after " << fadeColorChangeCounter_ << " color changes)"); } iStatus_.SetFadeTargetColorReached(); iStatus_.LoopStopRequest(); stages_.SetIsFadingForAllStages(false); } } void Loop::HandleEvents() { for (const auto & EVENT : gui::Display::Instance()->PollEvents()) { HandleEvent(EVENT); } } void Loop::HandleEvent(const sf::Event & EVENT) { // toLogEvents_.emplace_back(EVENT); if (HandleEventIfWindowClosed(EVENT)) { return; } if ((EVENT.type == sf::Event::KeyPressed) || (EVENT.type == sf::Event::KeyReleased)) { HandleEventKeyStroke(EVENT); return; } if ((EVENT.type == sf::Event::MouseButtonPressed) && (EVENT.mouseButton.button == sf::Mouse::Left)) { HandleEventMouseButtonLeftPressed(); return; } if ((EVENT.type == sf::Event::MouseButtonReleased) && (EVENT.mouseButton.button == sf::Mouse::Left)) { HandleEventMouseButtonLeftReleased(); return; } } void Loop::HandleEventKeyStroke(const sf::Event & EVENT) { const auto IS_KEYPRESS { (EVENT.type == sf::Event::KeyPressed) }; const auto IS_KEY_STROKE_EVENT_DIFFERENT_FROM_PREV { (EVENT.type != prevKeyStrokeEventType_) || (EVENT.key.code != prevKeyStrokeEventKey_) }; if (IS_KEY_STROKE_EVENT_DIFFERENT_FROM_PREV) { prevKeyStrokeEventType_ = EVENT.type; prevKeyStrokeEventKey_ = EVENT.key.code; } // allow screenshots even if keystrokes are ignored if (IS_KEY_STROKE_EVENT_DIFFERENT_FROM_PREV && (IS_KEYPRESS == false) && (EVENT.key.code == sf::Keyboard::F12)) { gui::Display::Instance()->TakeScreenshot(); return; } // TEMP TODO REMOVE AFTER TESTING // allow F1 to instant kill the game even if keystrokes are ignored if (IS_KEY_STROKE_EVENT_DIFFERENT_FROM_PREV && (IS_KEYPRESS == false) && (EVENT.key.code == sf::Keyboard::F1)) { iStatus_.GameExitRequest(); iStatus_.LoopStopRequest(); return; } if (flags_.will_keystroke_exit) { iStatus_.LoopStopRequest(); } if (flags_.will_keystroke_ignore) { return; } auto handleKeyPressOrRelease = [EVENT](stage::IStagePtr_t iStagePtr) { if (EVENT.type == sf::Event::KeyPressed) { iStagePtr->KeyPress(EVENT.key); } else { iStagePtr->KeyRelease(EVENT.key); } return boost::none; }; stages_.ExecuteOnForegroundStages(handleKeyPressOrRelease); } void Loop::HandleEventMouseButtonLeftPressed() { if (flags_.will_mouse_click_exit) { iStatus_.LoopStopRequest(); return; } if (flags_.will_mouse_ignore) { return; } auto handleMouseButtonPressed = [MOUSE_POS_VF = frameMouseInfo_.pos_vf](stage::IStagePtr_t iStagePtr) { iStagePtr->UpdateMouseDown(MOUSE_POS_VF); return boost::none; }; stages_.ExecuteOnForegroundStages(handleMouseButtonPressed); } void Loop::HandleEventMouseButtonLeftReleased() { if (flags_.will_mouse_ignore) { return; } auto handleLeftMouseButtonRelease = [MOUSE_POS_VF = frameMouseInfo_.pos_vf](stage::IStagePtr_t iStagePtr) { return iStagePtr->UpdateMouseUp(MOUSE_POS_VF); }; stages_.ExecuteOnForegroundStages(handleLeftMouseButtonRelease); } bool Loop::HandleEventIfWindowClosed(const sf::Event & EVENT) { if (EVENT.type == sf::Event::Closed) { // toLogEvents_.emplace_back(EVENT); iStatus_.GameExitRequest(); iStatus_.LoopStopRequest(); return true; } else { return false; } } void Loop::ConsumeAndIgnoreStrayEvents() { for (const auto & EVENT : gui::Display::Instance()->PollEvents()) { if (HandleEventIfWindowClosed(EVENT)) { break; } } } // I had problems getting smooth mouse wheel motion so I'm pausing this code // void Loop::ProcessMouseWheelRoll(const sf::Event & EVENT, const sf::Vector2i & // NEW_MOUSE_POS) //{ // const sf::Vector2f NEW_MOUSE_POS_F(NEW_MOUSE_POS); // // if (popupStagePtrOpt_) // { // popupStagePtrOpt_.value()->UpdateMouseWheel( // NEW_MOUSE_POS_F, EVENT.mouseWheelScroll.delta); // } // else // { // for (auto & iStageUPtr : stages_.non_popup_uvec) // { // iStageUPtr->UpdateMouseWheel(NEW_MOUSE_POS_F, EVENT.mouseWheelScroll.delta); // } // } //} void Loop::UpdateTimeStages(const float FRAME_TIME_SEC) { // M_HP_SCOPED_TIME_TRIAL(componentFramerateTrials_, componentFrameRateTrialsIndexUpdate_); auto handleTimeUpdate = [FRAME_TIME_SEC](stage::IStagePtr_t iStagePtr) { iStagePtr->UpdateTime(FRAME_TIME_SEC); return boost::none; }; stages_.ExecuteOnNonPopupStages(handleTimeUpdate); stages_.ExecuteOnPopupStage(handleTimeUpdate); } void Loop::Draw() { // M_HP_SCOPED_TIME_TRIAL(componentFramerateTrials_, componentFrameRateTrialsIndexDraw_); auto handleDrawing = [](stage::IStagePtr_t iStagePtr) { gui::Display::Instance()->DrawStage(iStagePtr); return boost::none; }; stages_.ExecuteOnNonPopupStages(handleDrawing); if (iStatus_.GetFadeStatus().will_draw_under_popup) { gui::Display::Instance()->DrawFade(); } stages_.ExecuteOnPopupStage(handleDrawing); if (iStatus_.GetFadeStatus().will_draw_under_popup == false) { gui::Display::Instance()->DrawFade(); } } void Loop::Display() { // M_HP_SCOPED_TIME_TRIAL(componentFramerateTrials_, componentFrameRateTrialsIndexDisplay_); gui::Display::Instance()->DisplayFrameBuffer(); } void Loop::ExecuteNextTest() { auto handlePerformNextTest = [](stage::IStagePtr_t iStagePtr) { iStagePtr->PerformNextTest(); return boost::none; }; stages_.ExecuteOnNonPopupStages(handlePerformNextTest); } void Loop::UpdateTimeAudio(const float FRAME_TIME_SEC) { // M_HP_SCOPED_TIME_TRIAL(componentFramerateTrials_, componentFrameRateTrialsIndexAudio_); gui::SoundManager::Instance()->UpdateTime(FRAME_TIME_SEC); } } // namespace game } // namespace heroespath
import h5py import random import numpy as np from PIL import Image from pathlib import Path from torch.utils.data import Dataset from torchvision.transforms import transforms DIV2K_RGB_MEAN = (0.4488, 0.4371, 0.4040) DIV2K_RGB_STD = (1.0, 1.0, 1.0) def augment_five_crop(batch, scale=None): hr_augment_path = None lr_augment_path = None if scale is None: if len(batch['hr']) > 0: scale = get_scale(Image.open(batch['lr'][0]), Image.open(batch['hr'][0])) hr_augment_path = Path(batch['hr'][0]).parent / f'augment_x{scale}' lr_augment_path = Path(batch['lr'][0]).parent / f'augment_x{scale}' hr_augment_path.mkdir(parents=True, exist_ok=True) lr_augment_path.mkdir(parents=True, exist_ok=True) outputs_lr = [] outputs_hr = [] for idx, example in enumerate(batch['hr']): hr_path = Path(example) lr_path = Path(batch['lr'][idx]) hr = Image.open(hr_path).convert('RGB') for aug_idx, hr in enumerate(transforms.FiveCrop(size=(hr.height // 2, hr.width // 2))(hr)): hr = hr.resize(((hr.width // scale) * scale, (hr.height // scale) * scale), resample=Image.BICUBIC) lr = hr.resize((hr.width // scale, hr.height // scale), resample=Image.BICUBIC) hr_file_path = hr_augment_path / f'{hr_path.stem}_{aug_idx}{hr_path.suffix}' lr_file_path = lr_augment_path / f'{lr_path.stem}_{aug_idx}{lr_path.suffix}' hr.save(hr_file_path, 'PNG') lr.save(lr_file_path, 'PNG') outputs_hr.append(hr_file_path.as_posix()) outputs_lr.append(lr_file_path.as_posix()) return { 'lr': outputs_lr, 'hr': outputs_hr } def get_scale_from_dataset(dataset): scale = None if len(dataset) > 0: lr = Image.open(dataset[0]['lr']) hr = Image.open(dataset[0]['hr']) dim1 = round(hr.width / lr.width) dim2 = round(hr.height / lr.height) scale = max(dim1, dim2) return scale def get_scale(lr, hr): dim1 = round(hr.width / lr.width) dim2 = round(hr.height / lr.height) scale = max(dim1, dim2) return scale def resize_image(lr_image, hr_image, scale=None): if scale is None: scale = get_scale(lr_image, hr_image) if lr_image.width * scale != hr_image.width or lr_image.height * scale != hr_image.height: hr_width = lr_image.width * scale hr_height = lr_image.height * scale return hr_image.resize((hr_width, hr_height), resample=Image.BICUBIC) return hr_image class EvalDataset(Dataset): def __init__(self, dataset): super(EvalDataset, self).__init__() self.dataset = dataset self.scale = get_scale_from_dataset(dataset) def __getitem__(self, idx): lr_image = Image.open(self.dataset[idx]['lr']).convert('RGB') hr_image = resize_image(lr_image, Image.open(self.dataset[idx]['hr']).convert('RGB'), scale=self.scale) lr = np.array(lr_image) hr = np.array(hr_image) lr = lr.astype(np.float32).transpose([2, 0, 1]) / 255.0 hr = hr.astype(np.float32).transpose([2, 0, 1]) / 255.0 return lr, hr def __len__(self): return len(self.dataset) class EvalDatasetH5(Dataset): def __init__(self, h5_file): super(EvalDatasetH5, self).__init__() self.h5_file = h5_file def __getitem__(self, idx): with h5py.File(self.h5_file, 'r') as f: lr = f['lr'][str(idx)][::].astype(np.float32).transpose([2, 0, 1]) / 255.0 hr = f['hr'][str(idx)][::].astype(np.float32).transpose([2, 0, 1]) / 255.0 return lr, hr def __len__(self): with h5py.File(self.h5_file, 'r') as f: return len(f['lr']) class TrainDataset(Dataset): def __init__(self, dataset, patch_size=64): super(TrainDataset, self).__init__() self.dataset = dataset self.patch_size = patch_size self.scale = get_scale_from_dataset(dataset) @staticmethod def random_crop(lr, hr, size, scale): lr_left = random.randint(0, lr.shape[1] - size) lr_right = lr_left + size lr_top = random.randint(0, lr.shape[0] - size) lr_bottom = lr_top + size hr_left = lr_left * scale hr_right = lr_right * scale hr_top = lr_top * scale hr_bottom = lr_bottom * scale lr = lr[lr_top:lr_bottom, lr_left:lr_right] hr = hr[hr_top:hr_bottom, hr_left:hr_right] return lr, hr @staticmethod def random_horizontal_flip(lr, hr): if random.random() < 0.5: lr = lr[:, ::-1, :].copy() hr = hr[:, ::-1, :].copy() return lr, hr @staticmethod def random_vertical_flip(lr, hr): if random.random() < 0.5: lr = lr[::-1, :, :].copy() hr = hr[::-1, :, :].copy() return lr, hr @staticmethod def random_rotate_90(lr, hr): if random.random() < 0.5: lr = np.rot90(lr, axes=(1, 0)).copy() hr = np.rot90(hr, axes=(1, 0)).copy() return lr, hr def __getitem__(self, idx): lr_image = Image.open(self.dataset[idx]['lr']).convert('RGB') hr_image = resize_image(lr_image, Image.open(self.dataset[idx]['hr']).convert('RGB'), scale=self.scale) lr = np.array(lr_image) hr = np.array(hr_image) lr, hr = self.random_crop(lr, hr, self.patch_size, self.scale) lr, hr = self.random_horizontal_flip(lr, hr) lr, hr = self.random_vertical_flip(lr, hr) lr, hr = self.random_rotate_90(lr, hr) lr = lr.astype(np.float32).transpose([2, 0, 1]) / 255.0 hr = hr.astype(np.float32).transpose([2, 0, 1]) / 255.0 return lr, hr def __len__(self): return len(self.dataset) class TrainAugmentDatasetH5(Dataset): def __init__(self, h5_file, scale, patch_size=64): super(TrainAugmentDatasetH5, self).__init__() self.h5_file = h5_file self.patch_size = patch_size self.scale = scale @staticmethod def random_crop(lr, hr, size, scale): lr_left = random.randint(0, lr.shape[1] - size) lr_right = lr_left + size lr_top = random.randint(0, lr.shape[0] - size) lr_bottom = lr_top + size hr_left = lr_left * scale hr_right = lr_right * scale hr_top = lr_top * scale hr_bottom = lr_bottom * scale lr = lr[lr_top:lr_bottom, lr_left:lr_right] hr = hr[hr_top:hr_bottom, hr_left:hr_right] return lr, hr @staticmethod def random_horizontal_flip(lr, hr): if random.random() < 0.5: lr = lr[:, ::-1, :].copy() hr = hr[:, ::-1, :].copy() return lr, hr @staticmethod def random_vertical_flip(lr, hr): if random.random() < 0.5: lr = lr[::-1, :, :].copy() hr = hr[::-1, :, :].copy() return lr, hr @staticmethod def random_rotate_90(lr, hr): if random.random() < 0.5: lr = np.rot90(lr, axes=(1, 0)).copy() hr = np.rot90(hr, axes=(1, 0)).copy() return lr, hr def __getitem__(self, idx): with h5py.File(self.h5_file, 'r') as f: lr = f['lr'][str(idx)][::] hr = f['hr'][str(idx)][::] lr, hr = self.random_crop(lr, hr, self.patch_size, self.scale) lr, hr = self.random_horizontal_flip(lr, hr) lr, hr = self.random_vertical_flip(lr, hr) lr, hr = self.random_rotate_90(lr, hr) lr = lr.astype(np.float32).transpose([2, 0, 1]) / 255.0 hr = hr.astype(np.float32).transpose([2, 0, 1]) / 255.0 return lr, hr def __len__(self): with h5py.File(self.h5_file, 'r') as f: return len(f['lr'])
<reponame>jedzeins/go-chi-webserver<gh_stars>0 package pokemonService import ( "github.com/jedzeins/go-chi-webserver/domains" "github.com/jedzeins/go-chi-webserver/providers/pokemonProviders" ) // service should handle the response func GetPikachu() domains.Pokemon { return pokemonProviders.ProvidePikachu() } func GetAllPokemon() []domains.Pokemon { return pokemonProviders.ProvideAll() }
DNA DAMAGE AND REPAIR IN HUMAN CELLS EXPOSED TO SUNLIGHT Cultured human cells were treated with direct sunlight under conditions which minimised the hypertonic, hyperthermic and fixative effects of solar radiation. Sunlight produced similar levels of DNA strand breaks as equitoxic 254 nm UV in two fibroblast strains and a melanoma cell line, but DNA repair synthesis and inhibition of semiconservative DNA synthesis and of DNA chain elongation were significantly less for sunlight‐exposed cells. DNA breaks induced by sunlight were removed more rapidly. Thus, the repair of solar damage differs considerably from 254 nm UV repair. Glass‐filtered sunlight (> 320 nm) was not toxic to cells and did not induce repair synthesis but gave a low level of short‐lived DNA breaks and some inhibition of DNA chain elongation; thymidine uptake was enhanced. Filtered sunlight slightly enhanced UV‐induced repair synthesis and UV toxicity; photoreactivation of UV damage was not found. Attempts to transform human fibroblasts using sunlight, with or without phorbol ester, were unsuccessful.
<gh_stars>10-100 /** * Copyright 2011-2021 Asakusa Framework Team. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.asakusafw.dmdl.java.emitter.driver; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import com.asakusafw.dmdl.java.emitter.EmitContext; import com.asakusafw.dmdl.java.spi.JavaDataModelDriver; import com.asakusafw.dmdl.java.util.JavaName; import com.asakusafw.dmdl.model.BasicTypeKind; import com.asakusafw.dmdl.model.ModelDefinitionKind; import com.asakusafw.dmdl.semantics.ModelDeclaration; import com.asakusafw.dmdl.semantics.PropertyDeclaration; import com.asakusafw.dmdl.semantics.type.BasicType; import com.asakusafw.utils.java.model.syntax.Attribute; import com.asakusafw.utils.java.model.syntax.FormalParameterDeclaration; import com.asakusafw.utils.java.model.syntax.MethodDeclaration; import com.asakusafw.utils.java.model.syntax.ModelFactory; import com.asakusafw.utils.java.model.syntax.ModelKind; import com.asakusafw.utils.java.model.syntax.Modifier; import com.asakusafw.utils.java.model.syntax.ModifierKind; import com.asakusafw.utils.java.model.syntax.Name; import com.asakusafw.utils.java.model.syntax.SimpleName; import com.asakusafw.utils.java.model.syntax.SingleElementAnnotation; import com.asakusafw.utils.java.model.util.AttributeBuilder; import com.asakusafw.utils.java.model.util.ExpressionBuilder; import com.asakusafw.utils.java.model.util.JavadocBuilder; import com.asakusafw.utils.java.model.util.Models; /** * Implements {@code *AsString} methods. */ public class StringPropertyDriver extends JavaDataModelDriver { private static final BasicType TEXT_TYPE = new BasicType(null, BasicTypeKind.TEXT); @Override public List<MethodDeclaration> getMethods(EmitContext context, ModelDeclaration model) { boolean projective = model.getOriginalAst().kind == ModelDefinitionKind.PROJECTIVE; List<MethodDeclaration> results = new ArrayList<>(); for (PropertyDeclaration property : model.getDeclaredProperties()) { if (isTextType(property) == false) { continue; } if (projective) { ModelFactory f = context.getModelFactory(); results.add(makeInterfaceMethod(f, createStringGetter(context, property))); results.add(makeInterfaceMethod(f, createStringSetter(context, property))); } else { results.add(createStringGetter(context, property)); results.add(createStringSetter(context, property)); } } return results; } private MethodDeclaration createStringGetter(EmitContext context, PropertyDeclaration property) { assert context != null; assert property != null; JavaName name = JavaName.of(property.getName()); name.addFirst("get"); //$NON-NLS-1$ name.addLast("as"); //$NON-NLS-1$ name.addLast("string"); //$NON-NLS-1$ ModelFactory f = context.getModelFactory(); return f.newMethodDeclaration( new JavadocBuilder(f) .text(Messages.getString("StringPropertyDriver.javadocGetter"), //$NON-NLS-1$ context.getDescription(property)) .returns() .text(context.getDescription(property)) .exception(context.resolve(NullPointerException.class)) .text(Messages.getString("StringPropertyDriver.javadocGetterNullPointerException"), //$NON-NLS-1$ context.getDescription(property)) .toJavadoc(), new AttributeBuilder(f) .Public() .toAttributes(), context.resolve(String.class), f.newSimpleName(name.toMemberName()), Collections.emptyList(), Collections.singletonList(new ExpressionBuilder(f, f.newThis()) .field(context.getFieldName(property)) .method("getAsString") //$NON-NLS-1$ .toReturnStatement())); } private MethodDeclaration createStringSetter(EmitContext context, PropertyDeclaration property) { assert context != null; assert property != null; JavaName name = JavaName.of(property.getName()); name.addFirst("set"); //$NON-NLS-1$ name.addLast("as"); //$NON-NLS-1$ name.addLast("string"); //$NON-NLS-1$ ModelFactory f = context.getModelFactory(); SimpleName paramName = context.createVariableName( context.getFieldName(property).getToken()); return f.newMethodDeclaration( new JavadocBuilder(f) .text(Messages.getString("StringPropertyDriver.javadocSetter"), //$NON-NLS-1$ context.getDescription(property)) .param(paramName) .text(Messages.getString("StringPropertyDriver.javadocSetterParameter"), //$NON-NLS-1$ context.getDescription(property)) .toJavadoc(), new AttributeBuilder(f) .annotation( context.resolve(SuppressWarnings.class), Models.toLiteral(f, "deprecation")) //$NON-NLS-1$ .Public() .toAttributes(), context.resolve(void.class), f.newSimpleName(name.toMemberName()), Arrays.asList(new FormalParameterDeclaration[] { f.newFormalParameterDeclaration( context.resolve(String.class), paramName) }), Collections.singletonList(new ExpressionBuilder(f, f.newThis()) .field(context.getFieldName(property)) .method("modify", paramName) //$NON-NLS-1$ .toStatement())); } private boolean isTextType(PropertyDeclaration property) { assert property != null; return property.getType().isSame(TEXT_TYPE); } private MethodDeclaration makeInterfaceMethod(ModelFactory f, MethodDeclaration method) { assert f != null; assert method != null; return f.newMethodDeclaration( method.getJavadoc(), filterInterfaceMethodModifiers(method.getModifiers()), method.getTypeParameters(), method.getReturnType(), method.getName(), method.getFormalParameters(), 0, method.getExceptionTypes(), null); } private List<Attribute> filterInterfaceMethodModifiers(List<? extends Attribute> modifiers) { assert modifiers != null; List<Attribute> results = new ArrayList<>(); for (Attribute attribute : modifiers) { if (attribute.getModelKind() == ModelKind.MODIFIER) { ModifierKind kind = ((Modifier) attribute).getModifierKind(); if (kind == ModifierKind.PUBLIC || kind == ModifierKind.ABSTRACT) { continue; } } else if (attribute.getModelKind() == ModelKind.SINGLE_ELEMENT_ANNOTATION) { SingleElementAnnotation an = (SingleElementAnnotation) attribute; Name name = an.getType().getName(); if (name.toNameString().equals(SuppressWarnings.class.getSimpleName())) { continue; } } results.add(attribute); } return results; } }
<reponame>astrotycoon/bold-utils //===- OwningPtrTest.cpp --------------------------------------------------===// // // The Bold Project // // This file is distributed under the New BSD License. // See LICENSE for details. // //===----------------------------------------------------------------------===// #include "OwningPtrTest.h" #include <bold/ADT/OwningPtr.h> using namespace bold; using namespace bold::test; //===----------------------------------------------------------------------===// // OwningPtr //===----------------------------------------------------------------------===// // Constructor can do set-up work for all test here. OwningPtrTest::OwningPtrTest() { } // Destructor can do clean-up work that doesn't throw exceptions here. OwningPtrTest::~OwningPtrTest() { } // SetUp() will be called immediately before each test. void OwningPtrTest::SetUp() { } // TearDown() will be called immediately after each test. void OwningPtrTest::TearDown() { } //===----------------------------------------------------------------------===// // Testcases //===----------------------------------------------------------------------===// namespace { class TestObj { }; } // anonymous namespace PAT_C( OwningPtrTest, trival ) { bold::OwningPtr<TestObj> ptr; ASSERT_TRUE(NULL == ptr.give()); ASSERT_TRUE(NULL == ptr.get()); } PAT_C( OwningPtrTest, simple ) { bold::OwningPtr<TestObj> ptr(new TestObj()); ASSERT_FALSE(NULL == ptr.get()); ASSERT_FALSE(NULL == ptr.give()); ASSERT_TRUE(NULL == ptr.get()); }
#include "pluginmanager.h" #include <QDir> #include <QDebug> #include <QFileInfo> #include <QApplication> #include <iostream> #include "../QtUtil/Utils/Log.h" PluginManager::PluginManager(QObject *parent) : ConfigObject(parent), _pluginPath("") { Init(); } PluginManager::~PluginManager() { } /** * @函数名 * @功能描述 * @参数 * @date 2018.9.11 */ void PluginManager::Init() { } /** * @函数名 * @功能描述 加载所有qt插件 * @参数 * @date 2018.9.11 */ void PluginManager::LoadPluginAllQt() { // QJsonObject obj = Get("PluginsQt").toObject(); foreach (QString plugkey, obj.keys()) { #ifdef _MACOS #ifdef QT_NO_DEBUG QString plugin = _pluginPath + "lib" + obj.value(plugkey).toString() + ".dylib"; #else QString plugin = _pluginPath + "lib" + obj.value(plugkey).toString() + "d.dylib"; #endif #else #ifdef _WINDOWS #ifdef QT_NO_DEBUG QString plugin = _pluginPath + obj.value(plugkey).toString() + ".dll"; #else QString plugin = _pluginPath + obj.value(plugkey).toString() + "d.dll"; #endif #else #ifdef QT_NO_DEBUG QString plugin = _pluginPath + "lib" + obj.value(plugkey).toString() + ".so"; #else QString plugin = _pluginPath + "lib" + obj.value(plugkey).toString() + "d.so"; #endif #endif #endif QFileInfo file(plugin); qDebug() << "plugin name: " << plugin; QString filepath = file.absoluteFilePath(); if (file.exists()) { std::cout << "plugin has been found: " << filepath.toStdString() << std::endl; QPluginLoader *loader = new QPluginLoader(plugin); _pluginRegisterQt.insert(plugkey, loader); } else { std::cout << "plugin file is not exists... path: " << filepath.toStdString() << std::endl; } } } /** * @函数名 * @功能描述 加载qt插件 * @参数 * @date 2018.9.11 */ bool PluginManager::LoadPluginQt(const QString &key, const QString &PluginAllPath) { QFileInfo file(PluginAllPath); if (file.exists()) { QPluginLoader *loader = new QPluginLoader(PluginAllPath); if (loader->instance()) { _pluginRegisterQt.insert(key, loader); return true; } else { return false; } } else { return false; } } /** * @函数名 * @功能描述 卸载说有插件 * @参数 * @date 2018.9.11 */ void PluginManager::UnloadPluginAllQt() { foreach (QPluginLoader *loader, _pluginRegisterQt.values()) { loader->unload(); delete loader; } _pluginRegisterQt.clear(); } /** * @函数名 * @功能描述 根据关键字卸载插件 * @参数 * @date 2018.9.11 */ bool PluginManager::UnloadPluginQt(const QString &key) { QPluginLoader *loader = _pluginRegisterQt.value(key); if (nullptr != loader) { bool result = loader->unload(); _pluginRegisterQt.remove(key); delete loader; return result; } return false; } /** * @函数名 * @功能描述 获取插件实例 * @参数 * @date 2018.9.11 */ QObject *PluginManager::GetPluginInstanceQt(const QString &key) { // 如果插件没有加载,则加载插件 if (_pluginRegisterQt.find(key) == _pluginRegisterQt.end()) { QJsonObject obj = Get("PluginsQt").toObject(); #ifdef _MACOS #ifdef QT_NO_DEBUG QString plugin = _pluginPath + "lib" + obj.value(key).toString() + ".dylib"; #else QString plugin = _pluginPath + "lib" + obj.value(key).toString() + "d.dylib"; #endif #else #ifdef _WINDOWS #ifdef QT_NO_DEBUG QString plugin = _pluginPath + obj.value(key).toString() + ".dll"; #else QString plugin = _pluginPath + obj.value(key).toString() + "d.dll"; #endif #else #ifdef QT_NO_DEBUG QString plugin = _pluginPath + "lib" + obj.value(key).toString() + ".so"; #else QString plugin = _pluginPath + "lib" + obj.value(key).toString() + "d.so"; #endif #endif #endif if (!LoadPluginQt(key, plugin)) { return nullptr; } } if (_pluginRegisterQt.value(key)) { QObject *plugin = _pluginRegisterQt.value(key)->instance(); if (plugin) { return plugin; } else { std::string strErr = _pluginRegisterQt.value(key)->errorString().toStdString(); std::cout << "error load: " << strErr << std::endl; return NULL; } } else { return NULL; } } /** * @函数名 * @功能描述 * @参数 * @date 2018.9.27 */ std::shared_ptr<QMap<QString, QObject *> > PluginManager::GetAllPluginInstanceQt() const { std::shared_ptr<QMap<QString, QObject *> > plugins(new QMap<QString, QObject *>); foreach (QString pluginName, _pluginRegisterQt.keys()) { QPluginLoader *pluginloader = _pluginRegisterQt.value(pluginName); if (pluginloader->instance()) { plugins->insert(pluginName, pluginloader->instance()); } } return plugins; } /** * @函数名 * @功能描述 设置插件相对应用程序路径 * @参数 * @date 2018.9.27 */ void PluginManager::setPluginPath(const QString &path) { _pluginPath = path; }
# Adapted from: # https://realpython.com/testing-third-party-apis-with-mock-servers/ import unittest from unittest.mock import patch from http.server import HTTPServer, SimpleHTTPRequestHandler from http import HTTPStatus import socket from threading import Thread import os import sys from typing import ClassVar, List import logging import pytest from deadseeker.action import run_action DIRECTORY = os.path.join(os.path.dirname(__file__), "mock_server") class MockServerRequestHandler(SimpleHTTPRequestHandler): def translate_path(self, path): return DIRECTORY + path def do_HEAD(self): if self.path.endswith('.ico'): self.send_response(HTTPStatus.METHOD_NOT_ALLOWED) self.end_headers() return if not self.check_error(): super().do_HEAD() def do_GET(self): if not self.check_error(): super().do_GET() def check_error(self): if self.path.endswith('/page4.html'): self.send_response(HTTPStatus.INTERNAL_SERVER_ERROR) self.end_headers() return True return False def get_free_port(): s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM) s.bind(('localhost', 0)) address, port = s.getsockname() s.close() return port @pytest.mark.integrationtest class TestIntegration(unittest.TestCase): url: ClassVar[str] mock_server: ClassVar[HTTPServer] mock_server_thread: ClassVar[Thread] @classmethod def setUpClass(cls): # Configure mock server. port = get_free_port() cls.mock_server = HTTPServer( ('localhost', port), MockServerRequestHandler) # Start running mock server in a separate thread. # Daemon threads automatically shut down when the main process exits. cls.mock_server_thread = Thread(target=cls.mock_server.serve_forever) cls.mock_server_thread.setDaemon(True) cls.mock_server_thread.start() cls.url = f'http://localhost:{port}' def setUp(self): self.logger = logging.getLogger('deadseeker.loggingresponsehandler') self.env = { "INPUT_WEBSITE_URL": self.url, "INPUT_MAX_RETRIES": "3", "INPUT_MAX_RETRY_TIME": "30", "INPUT_VERBOSE": "true" } self.exit_patch = patch.object(sys, 'exit') self.exit = self.exit_patch.start() def tearDown(self): self.exit_patch.stop() def test_works(self): self.env['INPUT_EXCLUDE_URL_PREFIX'] = \ 'https://www.google.com,/page3.html,/page4.html' with patch.dict(os.environ, self.env): run_action() self.exit.assert_not_called() def test_exit_1_on_any_failure(self): self.env['INPUT_EXCLUDE_URL_PREFIX'] = \ 'https://www.google.com' with patch.dict(os.environ, self.env): run_action() self.exit.assert_called_with(1) def test_messagesLogged(self): self.env['INPUT_EXCLUDE_URL_PREFIX'] = \ 'https://www.google.com' with \ patch.dict(os.environ, self.env),\ patch.object(self.logger, 'error') as error_mock,\ patch.object(self.logger, 'info') as info_mock: run_action() expected_errors = [ f'::error ::ClientResponseError: 500 - {self.url}/page4.html', f'::error ::ClientResponseError: 404 - {self.url}/page3.html' ] actual_errors: List[str] = [] for call in error_mock.call_args_list: args, kwargs = call actual_errors.append(args[0]) self.assertEqual(expected_errors, actual_errors) expected_info_prefixes = [ f'200 - {self.url} - ', f'200 - {self.url}/favicon.ico - ', f'200 - {self.url}/page1.html - ', f'200 - {self.url}/subpages/subpage1.html - ', f'200 - {self.url}/page2.html - ', f'200 - {self.url}/subpages/subpage2.html - ', f'200 - {self.url}/index.html - ' ] actual_infos: List[str] = [] for call in info_mock.call_args_list: args, kwargs = call actual_infos.append(args[0]) self.assertEqual(len(expected_info_prefixes), len(actual_infos)) for expected_prefix in expected_info_prefixes: found = False for actual in actual_infos: if(actual.startswith(expected_prefix)): actual_infos.remove(actual) found = True break self.assertTrue( found, 'Did not find actual result beginning' + ' with "{expected_prefix}"') self.assertFalse( actual_infos, f'Unexpected actual responses: {actual_infos}') if __name__ == '__main__': unittest.main()
// RunE executes cluster list command func (c *ClusterListCommand) RunE() error { if c.Format != TableOutputFormat && c.Format != "name" { return phaseerrors.ErrInvalidOutputFormat{RequestedFormat: c.Format} } cfg, err := c.Factory() if err != nil { return err } helper, err := NewHelper(cfg) if err != nil { return err } clusterMap, err := helper.ClusterMap() if err != nil { return err } err = clusterMap.Write(c.Writer, clustermap.WriteOptions{Format: c.Format}) if err != nil { return err } return nil }
News Azure Data Lake Service for Big Data Analyses Now Available Microsoft this week released Azure Data Lake as a generally available (GA), production-ready service, backed by Microsoft's 99.9 percent service-level agreement. Azure Data Lake is a service for "Big Data" massively parallel types of analyses, with the ability to tap into pools of structured and unstructured data without limits. The service has been at the preview stage since November of 2015, according to a Microsoft Channel 9 presentation, so it's taken one year to arrive fully baked. Microsoft is marketing the Azure Data Lake service as enabling "Big Cognition." The idea is glean insights from multiple inputs of various data types. It's about "joining all the extracted cognitive data with other types of data, so you can do some really powerful analytics with it," according to a Microsoft announcement. Azure Data Lake Components Azure Data Lake is composed of three Azure services, according to the presentation. It has HDInsight, which is Microsoft's Hadoop-based Big Data service. Another component is the new Data Lake Store (GA this week), a repository for structured and unstructured data that can scale to meet developer needs. Lastly, there's the new Data Lake Analytics (GA this week), which permits users to run "massively parallel data transformation and processing programs in U-SQL, R, Python and .NET over petabytes of data," per the announcement. The overall system is based on the open Apache Hadoop Distributed File System. Microsoft illustrates the Azure Data Lake components in the following diagram: [Click on image for larger view.] Azure Data Lake components. Source: Microsoft blog post. The U-SQL query language is frequently mentioned in Microsoft's Azure Data Lake announcements. It may seem new, but Microsoft has been using U-SQL internally since 2008, according to the Channel 9 presentation. U-SQL is supported in Azure Data Lake Tools for Visual Studio Code at the preview stage and "combines the declarative advantage of T-SQL and extensibility of C#." A Reddit Ask Me Anything session conducted by Microsoft Azure team members this week offered an additional definition of U-SQL, as follows: It [U-SQL] has an official meaning which is that it unifies: structured and unstructured data processing declarative SQL with user code (written in C#, Python, R etc.) querying data in Azure Data Lake with querying data from Windows Azure Blog Store, SQL Server in Azure The inofficial meaning is that you need a submarine to explore the depth of your data lake and discover your treasures. And in German and Swedish, submarines are called U-Boot :). It is not true that it is called U-SQL because the U comes after the T. Developers love the U-SQL query language and pick it up very fast, according to Microsoft's presentation. Microsoft offers a tutorial here. Spinning up an Azure Data Lake workload takes "30 seconds," Microsoft claims. Essentially, Microsoft takes care of managing the cluster for developers or data scientists. Additionally, Data Lake Store is designed to have "no limits" on the data size or the number of files or objects used in the analysis. No repartitioning of the data is required to run analyses. Developers don't have to define a schema up front. In addition, Azure Data Lake is integrated with Azure Active Directory. It has role-based access controls over the Data Lake Store via "POSIX-based ACLs for all data" or "Apache Ranger in HDInsight," Microsoft's announcement explained. Organizations have single sign-on and multifactor authentication access options. Data are encrypted at rest via the service or Azure Key Vault. Data are encrypted in motion using the Secure Sockets Layer protocol. Azure HDInsight Support Microsoft also announced some additions to Azure HDInsight this week. One addition is R Server for HDInsight, which is now generally available. It's Microsoft's implementation of the R programming language "integrated with Spark clusters created from HDInsight," which can process terabytes of data. When it is run on Apache Spark, R Server "enables handling up to 1000x more data and up to 50x faster speeds than open source R," Microsoft claimed. A new capability is the ability to work with Spark SQL data sources. It's supported for developers and data scientists with an included R Studio Server Community Edition, but they can also use R Tools for Visual Studio. Microsoft also issued a preview of the open source Kafka for HDInsight. It's designed to enable the ingestion of "massive amounts of real-time data." It can be used for operations such as "fraud detection, click-stream analysis, financial alerts, or social analytics solutions," the announcement indicated. It's also designed to work with Storm for HDInsight or Spark Stream for HDInsight stream analytics solutions.
Asking pupils as young as six to work on pre-test drills until midnight obviously perverts the purpose of education, and even amounts to child abuse. However, the Education Bureau has no intention of binning the much-criticised Territory-wide System Assessment (TSA) at Primary 3, for students aged 9-10. Parents, teachers, pupils and politicians are convulsing again. Starting this year, the test will be shrouded in a new name: the Basic Competency Assessment (BCA) Research Study. Specific new features include: (1) improved assessment papers and question design, (2) better school reports, (3) strengthening diversified professional support measures, and (4) conducting a questionnaire survey on pupils’ learning attitude and motivation. These initiatives, the government argue, will effectively mitigate the harmful effects of the test, thus “removing the incentives for over-drilling caused by TSA; alleviating stakeholders’ concerns about the stakes involved; and deepening mutual trust among schools, parents and stakeholders to enhance assessment literacy.” Hold on. Aren’t these the sort of lofty visions that led to the birth of TSA at the very beginning? Let’s dust off the reform proposal submitted by the Education Commission in 2000 and see how familiar these words sound: ‘…the major function of assessment is help teachers and parents understand the learning, progress and needs of their students, as well as their strengths and weaknesses. …For this purpose, we propose to put in place Basic Competency Assessments in Chinese, English and Mathematics at various stages of basic education. …We also recommend to use multiple modes of assessment, …to minimise the amount of quantitative evaluation…Excessive dictations, mechanical drilling, tests and examinations should be avoided so that students would have more time to participate in useful learning activities.’ (p.46) The last sentence still applies today, doesn’t it? Even John Tsang’s CE election platform, rolled out on Monday, expresses the same determination – “to abolish all TSA/BCA tests to arrest the practice of intensive drilling.” Enormous energy has been devoted to the doomed task of wiping out drilling and spoon-feeding in Hong Kong schools. It is a doomed task because, as history shows, the more we battle against it, the worse it gets. In 1978, the notorious Secondary School Entrance Examination at Primary 6 was abolished. It was replaced by the now defunct Hong Kong Academic Aptitude Test (HKAAT) (like the ’11-plus’ in the UK) that governs admission to secondary school. This standardised test was warmly welcomed at the beginning. It measures P6 pupils’ logical and quantitative reasoning skills, while test-takers were not required to regurgitate large chunks of information as in the previous exam. However, as time went by, homework and learning activities in P5 and P6 across all schools morphed into “drill and kill” practices. Teaching to the test dominated the classroom. In 2000 the government finally abolished the aptitude test. The Llewellyn Report of 1982 had expressed disapproval of the HKAAT and the sheer number of examinations in a pupil’s educational career. In the 1980s, the maximum number of public examinations a pupil went through could reach up to eight: the HKAAT, HKCEE, A-level, GCE (A and O Levels), and Higher Level exams. ‘The very frequency of examinations,” wrote Sir John Llewellyn, “is in itself disconcerting.” Now that these tests are history and we have the HKDSE as the only high-stakes public exam, shouldn’t we celebrate and stop whinging? If TSA is a low-stakes standardised test that has nothing to do with secondary school places allocation, why do primary schools still drill their pupils? The deep roots of this drilling madness lie not in the design of tests or the lack of government action. It is the rampant “culture of fear”, as the British sociologist Frank Furedi argues, that saturates our education system. The motivation to drill pupils for TSA is hidden in the Secondary School Places Allocation (SSPA) System. Since the academic year 2005/06, results of the Pre-Secondary One Hong Kong Attainment Test (Pre-S1 HKAT), which replaces the HKAAT, have been used for working out individual P6 pupils’ allocation bands for school selection. The allocation band is the product of a complex equation involving a P6 pupil’s internal school results in P5 and P6, and the weighted scores through scaling the sampled results of Pre-S1 HKAT. In other words, a pupil’s performance in Pre-S1 HKAT doesn’t have a direct influence on the allocation, but it will have an indirect bearing on the entire cohort’s allocation results. The government never publishes the scaling formula. So here comes the element of risk and uncertainty: as you never know which score will be selected for the sample, to avoid the impact of a lazy pupil’s result upon the entire group, let’s drill them all, and start as early as possible. Results of the TSA are not related to the school allocation mechanism, but we’d better train them with test-tackling skills early so that they will perform well in the Pre-S1 HKAT. Controversy over the BCA/TSA just manifests our incapacity to control the uncontrollable. What if we abolish the TSA/BCA altogether? As I mentioned earlier, upending the culture of drilling through name change (from TSA to BCA) or ditching it right away from our system is a doomed task, because any systemic change will be accompanied by fear. Parents fear that their children will “lose at the starting line.” How to calm ourselves down? Drill our kids. Teachers are worried about whether the TSA scores will tank compared with previous performance. The easy way is – drill our kids. School leaders quail at the demographic time bomb, budget cuts, and the risk of closure. What can be done to save the school? Well, drill our kids. Drilling is somehow palatable to our belief that number is all that matters. It gives us a false sense of hope but fulfils our simplistic thirst for certainty. We all lie with scores somehow: after all, a benchmark score is only an arbitrary number.
São Paulo in five words “Efferverscent chaos of mashed cultures.” The sound of the city São Paulo is known as the land of drizzle. Whether harmless showers or heavy storms, it rains quite a bit here. Sometimes it is a problem and some neighbourhoods flood; sometimes it is a blessing because our water system is archaic and we rely on the rain to fill our reservoirs. What everyone is tuning in to Although she’s from Rio, Jout Jout is getting people from São Paulo (and all over Brazil) hooked on her super simple yet humorous and smart videoblog. In it she talks about frivolities alongside more serious issues, such as abusive relationships and professional dissatisfaction. The best venue Facebook Twitter Pinterest Inside Farol: ‘one of the Centro neighbourhood’s most collaborative artistic hubs.’ Photograph: Farol In the Centro neighbourhood, there are many beautiful but neglected buildings that have become creative and artistic hubs in the past few years. Farol is one of the most complete and collaborative of these hubs. On different floors of the same building you can find Eduqativo, an institute created to develop new ways of collective education; Líquen, a space for designers finding balance through digital and manual work; Fluxo, a journalism staffroom, dedicated to raw news covered across different platforms; and Balsa, a place for hanging out that has a lovely view from the terrace. Top of the playlist Terno Rei is a rising band in São Paulo’s music scene. Their sound is part gloomy, part mellow and they often write about urban loneliness – a subject most Paulistanos can relate to. Later this month they will perform at Primavera Sound Festival in Barcelona. Favourite local artist Facebook Twitter Pinterest Marina Rheingantz’s Brejo, 2014. Photograph: Eduardo Ortega/Galeria Fortes Vilaça Marina Rheingantz is a young painter who pictures dark and odd landscapes in an almost abstract way. She finds inspiration in memories and images that already exist, from photographic work to the films of Wim Wenders. The big talking point In São Paulo, water is an increasingly important issue. In 2014, the city suffered its most severe drought since 1930. Even though it rains a lot – and last February was the wettest month in 20 years – the reservoirs still haven’t filled to more than 5% of capacity. The look on the street The look on the street is as diverse as the city itself, but on the whole it’s cosmopolitan and smart, with some lightness and freshness to it – it is a tropical country we live in, after all. And because there can be sun, rain and cold wind in the same day, Paulistanos know how to work their layers. The best Instagram account Lane Marinho is a visual artist and crafter with an eye for São Paulo’s best features, capturing beautiful shapes and colours under a dreamy light. What São Paulo does better than anywhere else … Immigrant festivals. Whether Italian or Japanese, there are many big traditional festivities held in the city. There is even one festival organised by the Immigration Museum, where immigrant citizens from 20 different countries exhibit their traditional cuisine and culture in order to celebrate São Paulo’s diversity. The big cultural moment Most people say the city’s greatest cultural moment was the Brazilian Music festival, which was televised in the late 1960s and has recently inspired a documentary called A Night in ’67. The festival showcased artists from one of the most important and subversive movements in Brazilian cultural history, the Tropicália, including Caetano Veloso, Gilberto Gil and Os Mutantes – artists who were later censored or exiled. The best street art Facebook Twitter Pinterest Deconstruction of advertising ... a mural by Daniel Melim. Photograph: Daniel Melim, Galeria Choque Cultural Street art is pretty strong in São Paulo. Whether graffiti or just straight-forward pixação – as we call our spiky hieroglyphic tags – you can find it anywhere. Local artist Daniel Melim’s Mural da Luz, as eye-catching and interesting as it is, may not be the most significant in content – a sort of pop-art deconstruction of an advertisement. But, because of its location and integration with the city’s landscape, it has become one of the most memorable and engaging pieces of street art around. From me Facebook Twitter Pinterest Photograph: Luísa Graça Luísa Graça is a journalist and art director from São Paulo, Brazil. Five to follow Verena Smit Aldo the Band Feira Plana Os Gêmeos Amem!Amém
<reponame>MrHen/incremental-time-loop import { GeneratorTypes, getGeneratorValue, IGeneratorsState } from "../models/generators"; export const getRecalculatedRates = ({ [GeneratorTypes.Basic]: basic, [GeneratorTypes.AlphaOne]: alphaOne, }: IGeneratorsState) => { let energyPerSecond = 0; energyPerSecond += getGeneratorValue(basic); energyPerSecond += getGeneratorValue(alphaOne); return energyPerSecond; };
package hex.tree.xgboost.matrix; /** * Sparse Matrix representation for XGBoost * * CSR: * long[] rowHeaders = new long[] {0, 2, 4, 7}; // offsets * float[] data = new float[] {1f,2f, 4f,3f, 3f,1f,2f}; // non-zeros across each row * int[] colIndex = new int[] {0, 2, 0, 3, 0, 1, 2}; // col index for each non-zero */ public final class SparseMatrix { /** * Maximum size of one dimension of SPARSE matrix with data. Sparse matrix is square matrix. */ public static int MAX_DIM = Integer.MAX_VALUE - 10; public final float[][] _sparseData; public final long[][] _rowHeaders; public final int[][] _colIndices; /** * Constructs a {@link SparseMatrix} instance * * @param sparseData Non-zero data of a sparse matrix * @param rowIndices Indices to elements in sparseData rows begin with * @param colIndices Column indices of elements in sparseData */ public SparseMatrix(final float[][] sparseData, final long[][] rowIndices, final int[][] colIndices) { _sparseData = sparseData; _rowHeaders = rowIndices; _colIndices = colIndices; } }
On Feb. 26, jihadist social media sites reported that Abu al Khayr al Masri, one of al Qaeda’s top leaders, had been killed in a US drone strike in Idlib, Syria. Images of Masri’s car were first posted online by Pieter Van Ostaeyen, a PhD candidate who closely tracks the fighting in Iraq and Syria. One of the pictures, which Mr. Van Ostaeyen obtained from an administrator at the jihadi outfit Al Maqalaat, can be seen above. Masri’s death was subsequently confirmed in a joint statement issued by Al Qaeda in the Arabian Peninsula (AQAP) and Al Qaeda in the Islamic Maghreb (AQIM) on Mar. 2. The two al Qaeda branches described Masri as a “wise leader” and “teacher,” who instructed young Muslims how to live “with honor” and die as martyrs. According to AQAP and AQIM, Masri was a “hero” who was killed in a “Crusader drone” strike. The Al Qaeda groups claim that the airstrike is just “one of America’s and the Crusader coalition’s crimes against Islam” and all Muslims. They also offered their condolences to Masri’s family, the Islamic ummah [community of Muslims around the globe], and “our sheikh and emir” Ayman al Zawahiri. AQAP and AQIM addressed America and its allies directly, saying that the deaths of their “martyrs” only increases their determination to “avenge them.” From the Egyptian Islamic Jihad to al Qaeda deputy In July 2016, Masri was identified as Ayman al Zawahiri’s “general deputy.” By that time, US counterterrorism officials had been tracking him for years. The US Treasury Department designated Masri as a terrorist in Oct. 2005, saying that his real name was Abdullah Muhammad Rajab Abd al Rahman and that he was born in Kafr al Shaykh, Egypt on Nov. 3, 1957. Masri “was responsible for coordinating al Qaeda’s work with other terrorist organizations,” Treasury noted at the time. US intelligence identified Masri as the chairman of al Qaeda’s management council, according to the Washington Post. Masri also previously served as al Qaeda’s “chief of foreign relations” and in that capacity he was a “liaison to the Taliban.” A dated photo of Masri, first disseminated by the US government, can be seen on the right.** Masri was originally a senior member of the Egyptian Islamic Jihad (EIJ). In fact, he belonged to the EIJ’s elite shura (or consultation) council. The EIJ, led by Ayman al Zawahiri, effectively merged with Osama bin Laden’s operation during the 1990s, when the two worked hand-in-hand in a series of terrorist plots, including the Aug. 1998 US Embassy bombings. Masri was tried and convicted in absentia by an Egyptian court as part of the “returnees from Albania case” in the late 1990s. That case was among the largest terror trials to be held during Hosni Mubarak’s tenure as Egyptian president. According to Arabic press accounts of the court proceedings, Masri was identified as one of Zawahiri’s closest advisers back then. In Dec. 2003, Al-Sharq al-Awsat, a London-based Arabic publication, reported that Masri had served as the head of al Qaeda’s “Sudan station” in 1993. Bin Laden, Zawahiri and their men were stationed in Sudan at the time and Masri was reportedly tasked with forging paperwork for jihadists who wanted to travel around the globe. Masri followed Zawahiri on journeys through Sudan, Yemen and ultimately to Afghanistan in the 1990s. As one of al Qaeda’s most senior managers, Masri likely had foreknowledge of the 9/11 attacks.* Sometime after 9/11, Masri fled to Iran. It appears he was held under some form of detention for years. Then, in Sept. 2015, Masri and several other senior al Qaeda figures were reportedly freed from Iranian custody as part of a hostage swap negotiated by AQAP. As FDD’s Long War Journal reported at the time, however, the details of the al Qaeda leaders’ detention inside Iran are murky. Masri and some of the others were still in Iranian custody as of mid-2010, according to files recovered in Osama bin Laden’s compound. Another letter delivered to bin Laden, authored in early Apr. 2011, noted that Masri’s sons had relocated to Baluchistan. But Masri’s status was a bit of a mystery. There were several reports in the years since mid-2010 indicating that Masri and the others had been either released already, or the terms of their detention had been relaxed, allowing them to even travel. It is not clear what transpired, exactly, but Masri was certainly integrated back into al Qaeda’s hierarchy by late 2015. And while Iran held some al Qaeda operatives under house arrest, others inside Iran have been allowed to oversee the terror organization’s primary facilitation hub. [See FDD’s Long War Journal report, Senior al Qaeda leaders reportedly released from custody in Iran.] Then, in July 2016, Masri oversaw one of al Qaeda’s most important moves in Syria to date. Abu al Khayr al Masri sought to “unify” the rebels’ “ranks” On July 28, 2016, Al Nusrah Front’s propaganda arm released a short audio message from Masri. Al Nusrah publicly identified Masri as Ayman al Zawahiri’s “general deputy” for the first time. Masri’s speech paved the way for Abu Muhammad al Julani, Al Nusrah’s emir, to announce that his group was being relaunched as Jabhat Fath al Sham (JFS). Julani also claimed that JFS wouldn’t be tied to any “external entity,” a phrase that some interpreted as indicating a split from al Qaeda. As FDD’s Long War Journal assessed at the time, however, much of al Qaeda’s own leadership was moved to Syria, meaning al Qaeda itself isn’t “external” to the country. Abu al Khayr al Masri’s death in an American drone strike in Syria late last month underscores that point. Julani also did not renounce his bayat (oath of allegiance) to Zawahiri. Moreover, al Qaeda has sought to hide its organizational affiliations and the extent of its influence in the Syrian uprising since 2011. In many ways, Al Nusrah’s rebranding was a return to al Qaeda’s original strategy for the Syrian war. That strategy was interrupted by the rise of the Abu Bakr al Baghdadi’s Islamic State. Julani was forced to publicly reveal his loyalty to Zawahiri only after Baghdadi threatened to subsume Al Nusrah Front. A closer examination of Masri’s message demonstrates that Al Nusrah’s rebranding was indeed the result of a decision made by al Qaeda’s most senior leadership. Masri emphasized that the jihadist ideology had taken root inside Syria. And he explained that al Qaeda wanted to protect the jihadists’ project, as it could lead to the creation of a truly Islamic state, or emirate. This has been al Qaeda’s main goal in Syria for years. At the outset of his message, Masri claimed that the world was witnessing a “blessed stage of renaissance” in the Islamic ummah. In this “stage of revival,” Masri said, the people are now waging a “jihad of word and weapon” in their revolt against rulers such as Bashar al Assad. Most importantly, from al Qaeda’s perspective, the jihadist ideology was becoming more widely accepted among the people. Masri claimed that “a new generation” of Muslims was being “raised on the jurisprudence” of Islamic law and was learning that jihad is necessary “to preserve” their “religion.” The ummah has “reached a phase where jihad is expanding and entering Muslim society,” Masri said, according to a translation obtained by FDD’s Long War Journal. “The concept of jihad has changed from being a jihad of an elite few to a jihad of the ummah.” Masri said that al Qaeda had studied both the “military and political aspects” of “the situation” in the Levant and determined that “every effort” should be made to “continue the jihad.” But they had to prevent their enemies from dividing “the mujahideen” from the “Sunni people.” The “mujahideen” had become a major “force,” administering the “liberated areas” through sharia courts. In addition, Masri explained, the jihadists had set up “public service bureaus” to attend to the peoples’ affairs and in the “next phase” they would “establish an entity,” or government. Masri then delivered his key line. “Based on my responsibility, we ask the leadership of Al Nusrah Front to proceed with that which safeguards the interests of Islam and Muslims, and protects the jihad of the people of the Levant,” Masri said, according to a translation obtained by FDD’s Long War Journal. Al Nusrah’s leadership should “take the appropriate steps with regard to this issue.” Masri explained that this “step” was taken as part of al Qaeda’s call on “all mujahideen factions” to “unify” their “ranks to protect our people and our land.” Masri made it clear that unification was necessary to achieve their goal of building “a rightly-guided Islamic government.” In the event that a Taliban-style government was established, Masri said, al Qaeda’s leaders would be the “first to support this government.” Masri told listeners that his boss, Dr. Ayman al Zawahiri (“our emir and sheikh”), had confirmed this pledge in his own statement. The audio of Masri’s speech then cut to an excerpt from a message delivered by Zawahiri in Jan. 2014. In it, Zawahiri addressed all of the insurgents fighting against Bashar al Assad’s regime, saying they are the best “hope” for establishing an Islamic state in the heart of the Levant, as well as for “liberating Jerusalem.” Al Qaeda respects and admires “all of you,” Zawahiri said, addressing all of the factions as “brothers.” “We have always considered that your unity” is “more valued and cherished by us than any other organizational bonds,” Zawahiri said. “Your unity and the unity of your ranks are more important for us than organizational belonging and partisan fanaticism.” Zawahiri implored the mujahideen to “let go” of their “partisan fanaticism,” as their infighting distracts them from the war against their true enemies, including Shiite forces, Russia, and China, all of whom are supposedly colluding with the “Crusader campaign.” Shortly after Masri’s speech, Abu Muhammad al Julani delivered his own. Echoing Masri’s message, Julani said JFS (formerly Al Nusrah) hoped “to form a unified body, whose basis is Al Shura [consultation],” and which was capable of “uniting the masses of the people of Al Sham.” Julani, who was dressed in garb similar to that worn by Osama bin Laden, heaped praise on Masri and Zawahiri. “We thank them for their stance, whereby they gave priority to the interests of the people of Al Sham, their Jihad, their revolution, as well as their proper assessment of the general benefits [of] the Jihad,” Julani said. “This noble stance will be recorded in the annals of history. Their blessed leadership has, and shall continue to be, an exemplar of putting the needs of the community and their higher interests before the interest of any individual group.” Julani added that Masri and Zawahiri had “practically implemented the words of Sheikh Osama bin Laden.” Julani then quoted bin Laden as saying: “The interests of the ummah take precedence over the interest of any state; the interests of the state take precedence over the interest of any Jama’ah (group); the interests of the Jama’ah (group) take precedence over any individual.” Hay’at Tahrir al Sham: An attempt to “unify” the rebels’ ranks As Julani, Masri, and Zawahiri all made clear, al Qaeda has sought to “unify” the rebel groups in Syria. By merging with other groups, al Qaeda could further mask its operations and portray any outside intervention, especially by the US and the West, as part of a conspiracy against Syria’s Sunni population. Indeed, this is exactly how the jihadists have sought to characterize targeted airstrikes against al Qaeda veterans. While the US government says drone strikes and other bombings are necessary to suppress the al Qaeda threat to the West, the jihadists claim such operations are really intended to disrupt the anti-Assad insurgency. Julani himself said that Al Nusrah’s relaunch as JFS was intended, in part, “to expose the deceptions of the international community, the leaders being the US and Russia, in their relentless bombardment and displacement of the Muslim masses of Al Sham under the pretense of targeting Jabhat al Nusrah, an al Qaeda affiliate.” In this regard, Al Nusrah’s relaunch as JFS was just the first step in al Qaeda’s plan as of mid-2016. After offering some deliberately vague language about Al Nusrah disassociating from any “external [foreign] entity,” al Qaeda quickly moved to unite JFS with other groups. However, it wasn’t until Jan. 2017 that a new joint venture, Hay’at Tahrir al Sham (HTS), the “Assembly for the Liberation of the Levant (or Syria),” was announced. HTS includes JFS and several of its allies. It is likely that JFS and the other groups wanted to announce the creation HTS just days after Masri’s and Julani’s announcements on July 28, 2016. For example, FDD’s Long War Journal observed that official social media accounts associated with HTS began posting messages in early Aug. 2016. A screen shot of one HTS Twitter account can be seen above. The jihadists operating this account began tweeting on Aug. 3, meaning there was a clear expectation that HTS was going to be announced. Some of the tweets documented the battles then raging in Aleppo. The delay in the launch of HTS may have been caused by disagreements between JFS and its battlefield partners. In early 2016, jihadists on social media and others reported that Julani had already floated a unification proposal in closed door meetings. But some within Ahrar al Sham, which has modeled itself after the Afghan Taliban, didn’t think Julani’s initiative was sufficient and so they rejected it. When HTS was announced in January, a powerful faction within Ahrar al Sham, as well as a number of Ahrar’s fighting battalions, defected to the joint venture. These jihadists were led by Abu Jaber, the former head of Ahrar al Sham who was named the first overall leader of HTS. (Julani is the organization’s military chief.) Abu Jaber has adopted the same population-centric approach to waging jihad as Masri and other senior al Qaeda leaders. He is seeking to make jihad even more “popular” in Syria. However, other factions in Ahrar al Sham refused to join HTS. And in the weeks leading up to the merger in late January, there were multiple reports of infighting between JFS and other rebel groups in northern Syria, including parts of Ahrar al Sham. These disagreements demonstrate that, thus far, there are limits to al Qaeda’s ability to absorb other rebel factions under a common banner. Still, HTS continues to cooperate with Free Syrian Army (FSA) branded groups, the reconstituted Ahrar al Sham and other Islamist factions. Zawahiri has long advised al Qaeda’s men to adopt this approach in Syria. Veteran Egyptian jihadists have held key positions in al Qaeda Nearly thirty years after al Qaeda was founded, veteran Egyptian jihadists who have long been allied with Ayman al Zawahiri continue to play a major role in the organization. Saif al Adel and Abu Mohammed al Masri, both of whom were also senior figures in the Egyptian Islamic Jihad (EIJ), were reportedly freed by Iran alongside Abu al Khayr al Masri in 2015. Like Abu al Khayr, the circumstances of their detention inside Iran prior to that time is shrouded in uncertainty. Since late 2015, one or both of them may have operated inside Syria. Another EIJ official and Zawahiri loyalist, Ibrahim al Banna, is a co-founder of Al Qaeda in the Arabian Peninsula. Al Banna was sent to Yemen in the early 1990s to build up al Qaeda’s tribal relations. He was designated as a terrorist by the US State Department earlier this year. The US air campaign in Syria has targeted legacy EIJ and Gama’at al-Islamiyya (Islamic Group, or IG) leaders who have played major roles in al Qaeda. In Apr. 2016, the US killed Rifai Ahmed Taha Musa in an airstrike in Idlib. Taha was once the overall leader of the IG. He was a close ally of bin Laden and Zawahiri in the 1990s. Taha and another Egyptian jihadi, Mohammed Islambouli, were top leaders in al Qaeda’s so-called Khorasan Group. Islambouli is still alive and has lived in Turkey for some time. Ahmed Salama Mabrouk, one of Masri’s and Zawahiri’s old school comrades in the EIJ, sat on Julani’s right hand side as he announced Al Nusrah’s rebranding in July 2016. Mabrouk was killed in early Oct. 2016. The Pentagon explained at the time that he was “one of Al Qaeda’s most senior leaders.” In early February, the Defense Department announced that still another Egyptian, Hani Haykal (also known as Abu Hani al Masri), was struck down near Idlib. The Pentagon said that Haykal was “one of the founders of Egyptian Islamic Jihad.” Haykal swore an oath of allegiance to bin Laden, and went on to become a top military leader in Ahrar al Sham. Haykal wasn’t the only Egyptian al Qaeda veteran to join Ahrar al Sham either. Al Qaeda has tried to introduce some of these Egyptians – specifically Saif al Adel, Abu Mohammed al Masri, and Abu al Khayr al Masri – to younger jihadis. One of the ways al Qaeda has done this is by emphasizing the role they played in mentoring prominent figures. Nasser bin Ali al Ansi, a senior AQAP leader and deputy general manager of al Qaeda, was killed in Apr. 2015. In his eulogy for al Ansi, the al Qaeda veteran Khalid Saeed Batarfi specifically mentioned that this trio of Egyptians had groomed him. Hamza bin Laden, Osama’s son, also raised their profile in an audio message released in Aug. 2015. Hamza called for the three, along with others, to be released from custody. But again, they may have already had some freedom by that point. Abu al Khayr al Masri’s death is a significant tactical blow to al Qaeda. But as the roster above shows, longtime talent continues to lead the organization. And they’ve had ample time to raise a “new generation” of jihadists as well. *The Defense Department released a transcript of a video recording of bin Laden talking about the 9/11 hijackings in Nov. 2001. “We had notification since the previous Thursday that the event would take place that day,” bin Laden said. “We had finished our work that day and had the radio on. It was 5:30 p.m. our time. I was sitting with Dr. Ahmad Abu-al-((Khair)).” It is possible that the Dr. Ahmad in question is Abu Khayr al Masri, as Ahmad Hasan Abu al Khayr is one of his known aliases. “Immediately,” bin Laden continued, “we heard the news that a plane had hit the World Trade Center. We turned the radio station to the news from Washington. The news continued and no mention of the attack until the end. At the end of the newscast, they reported that a plane just hit the World Trade Center.” If the man sitting next to bin Laden was in fact Abu al Khayr al Masri, then he was with the al Qaeda founder when news of the 9/11 attacks reached al Qaeda over the radio. **After Abu al Khayr al Masri’s death was reported, jihadis disseminated another, more recent image of him. The photo was first published by CNN’s Paul Cruickshank. The photo can be seen below: Thomas Joscelyn is a Senior Fellow at the Foundation for Defense of Democracies and the Senior Editor for FDD's Long War Journal. Are you a dedicated reader of FDD's Long War Journal? Has our research benefitted you or your team over the years? Support our independent reporting and analysis today by considering a one-time or monthly donation. Thanks for reading! You can make a tax-deductible donation here.
// Copyright (C) 2019 Orange // // This software is distributed under the terms and conditions of the 'Apache License 2.0' // license which can be found in the file 'License.txt' in this package distribution // or at 'http://www.apache.org/licenses/LICENSE-2.0'. package v1 import ( "context" "errors" "fmt" grpc_middleware "optisam-backend/common/optisam/middleware/grpc" "optisam-backend/common/optisam/token/claims" v1 "optisam-backend/license-service/pkg/api/v1" repo "optisam-backend/license-service/pkg/repository/v1" "optisam-backend/license-service/pkg/repository/v1/mock" "testing" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" ) func Test_licenseServiceServer_ListAcqRightsForProductAggregation(t *testing.T) { ctx := grpc_middleware.AddClaims(context.Background(), &claims.Claims{ UserID: "<EMAIL>", Role: "SuperAdmin", Socpes: []string{"Scope1"}, }) var mockCtrl *gomock.Controller var rep repo.License type args struct { ctx context.Context req *v1.ListAcqRightsForProductAggregationRequest } tests := []struct { name string s *licenseServiceServer args args setup func() want *v1.ListAcqRightsForProductAggregationResponse wantErr bool }{ {name: "SUCCESS - metric type OPS", args: args{ ctx: ctx, req: &v1.ListAcqRightsForProductAggregationRequest{ ID: "proAggID1", Scope: "Scope1", }, }, setup: func() { mockCtrl = gomock.NewController(t) mockLicense := mock.NewMockLicense(mockCtrl) rep = mockLicense mockLicense.EXPECT().ProductAggregationDetails(ctx, "proAggID1", &repo.QueryProductAggregations{}, []string{"Scope1"}).Return(&repo.ProductAggregation{ ID: "proAggID1", Name: "pro1", Editor: "e1", Product: "productName", Metric: "OPS", NumOfApplications: 1, NumOfEquipments: 1, TotalCost: 1000, Products: []string{"Scope1", "Scope2"}, AcqRightsFull: []*repo.AcquiredRights{ &repo.AcquiredRights{ Entity: "", SKU: "ORAC001PROC", SwidTag: "ORAC001", ProductName: "Oracle Client", Editor: "oracle", Metric: "oracle.processor.standard", AcquiredLicensesNumber: 1016, LicensesUnderMaintenanceNumber: 1008, AvgLicenesUnitPrice: 2042, AvgMaintenanceUnitPrice: 14294, TotalPurchaseCost: 2074672, TotalMaintenanceCost: 14408352, TotalCost: 35155072, }, &repo.AcquiredRights{ Entity: "", SKU: "ORAC002PROC", SwidTag: "ORAC002", ProductName: "Oracle XML Development Kit", Editor: "oracle", Metric: "oracle.processor.standard", AcquiredLicensesNumber: 181, LicensesUnderMaintenanceNumber: 181, AvgLicenesUnitPrice: 1759, AvgMaintenanceUnitPrice: 12313, TotalPurchaseCost: 318379, TotalMaintenanceCost: 2228653, TotalCost: 5412443, }, }, }, nil).Times(1) mockLicense.EXPECT().ListMetrices(ctx, []string{"Scope1"}).Return([]*repo.Metric{ &repo.Metric{ Name: "OPS", Type: repo.MetricOPSOracleProcessorStandard, }, &repo.Metric{ Name: "WS", Type: repo.MetricOPSOracleProcessorStandard, }, }, nil).Times(1) cores := &repo.Attribute{ ID: "cores", Type: repo.DataTypeInt, } cpu := &repo.Attribute{ ID: "cpus", Type: repo.DataTypeInt, } corefactor := &repo.Attribute{ ID: "corefactor", Type: repo.DataTypeInt, } base := &repo.EquipmentType{ ID: "e2", ParentID: "e3", Attributes: []*repo.Attribute{cores, cpu, corefactor}, } start := &repo.EquipmentType{ ID: "e1", ParentID: "e2", } agg := &repo.EquipmentType{ ID: "e3", ParentID: "e4", } end := &repo.EquipmentType{ ID: "e4", ParentID: "e5", } endP := &repo.EquipmentType{ ID: "e5", } mockLicense.EXPECT().EquipmentTypes(ctx, []string{"Scope1"}).Return([]*repo.EquipmentType{start, base, agg, end, endP}, nil).Times(1) mat := &repo.MetricOPSComputed{ EqTypeTree: []*repo.EquipmentType{start, base, agg, end}, BaseType: base, AggregateLevel: agg, NumCoresAttr: cores, NumCPUAttr: cpu, CoreFactorAttr: corefactor, } mockLicense.EXPECT().MetricOPSComputedLicensesAgg(ctx, "pro1", "OPS", mat, []string{"Scope1"}).Return(uint64(10), nil).Times(1) mockLicense.EXPECT().ListMetricOPS(ctx, []string{"Scope1"}).Times(1).Return([]*repo.MetricOPS{ &repo.MetricOPS{ Name: "OPS", NumCoreAttrID: "cores", NumCPUAttrID: "cpus", CoreFactorAttrID: "corefactor", BaseEqTypeID: "e2", AggerateLevelEqTypeID: "e3", StartEqTypeID: "e1", EndEqTypeID: "e4", }, &repo.MetricOPS{ Name: "WS", NumCoreAttrID: "cores", NumCPUAttrID: "cpus", CoreFactorAttrID: "corefactor", BaseEqTypeID: "e2", AggerateLevelEqTypeID: "e3", StartEqTypeID: "e1", EndEqTypeID: "e4", }, &repo.MetricOPS{ Name: "IMB", }, }, nil) }, want: &v1.ListAcqRightsForProductAggregationResponse{ AcqRights: []*v1.ProductAcquiredRights{ &v1.ProductAcquiredRights{ SKU: "ORAC001PROC,ORAC002PROC", SwidTag: "ORAC001,ORAC002", Metric: "OPS", NumCptLicences: 10, NumAcqLicences: 1197, TotalCost: 4.0567515e+07, DeltaNumber: 1187, DeltaCost: 2.2558935e+06, }, }, }, wantErr: false, }, {name: "SUCCESS - metric type SPS - licensesProd > licensesNonProd", args: args{ ctx: ctx, req: &v1.ListAcqRightsForProductAggregationRequest{ ID: "proAggID1", Scope: "Scope1", }, }, setup: func() { mockCtrl = gomock.NewController(t) mockLicense := mock.NewMockLicense(mockCtrl) rep = mockLicense mockLicense.EXPECT().ProductAggregationDetails(ctx, "proAggID1", &repo.QueryProductAggregations{}, []string{"Scope1"}).Return(&repo.ProductAggregation{ ID: "proAggID1", Name: "pro1", Editor: "e1", Product: "productName", Metric: "SPS", NumOfApplications: 1, NumOfEquipments: 1, TotalCost: 1000, Products: []string{"Scope1", "Scope2"}, AcqRightsFull: []*repo.AcquiredRights{ &repo.AcquiredRights{ Entity: "", SKU: "ORAC001PROC", SwidTag: "ORAC001", ProductName: "Oracle Client", Editor: "oracle", Metric: "sag.processor.standard", AcquiredLicensesNumber: 1016, LicensesUnderMaintenanceNumber: 1008, AvgLicenesUnitPrice: 2042, AvgMaintenanceUnitPrice: 14294, TotalPurchaseCost: 2074672, TotalMaintenanceCost: 14408352, TotalCost: 35155072, }, &repo.AcquiredRights{ Entity: "", SKU: "ORAC002PROC", SwidTag: "ORAC002", ProductName: "Oracle XML Development Kit", Editor: "oracle", Metric: "sag.processor.standard", AcquiredLicensesNumber: 181, LicensesUnderMaintenanceNumber: 181, AvgLicenesUnitPrice: 1759, AvgMaintenanceUnitPrice: 12313, TotalPurchaseCost: 318379, TotalMaintenanceCost: 2228653, TotalCost: 5412443, }, }, }, nil).Times(1) mockLicense.EXPECT().ListMetrices(ctx, []string{"Scope1"}).Return([]*repo.Metric{ &repo.Metric{ Name: "OPS", Type: repo.MetricOPSOracleProcessorStandard, }, &repo.Metric{ Name: "SPS", Type: repo.MetricSPSSagProcessorStandard, }, }, nil).Times(1) cores := &repo.Attribute{ ID: "cores", Type: repo.DataTypeInt, } cpu := &repo.Attribute{ ID: "cpus", Type: repo.DataTypeInt, } corefactor := &repo.Attribute{ ID: "corefactor", Type: repo.DataTypeInt, } base := &repo.EquipmentType{ ID: "e2", ParentID: "e3", Attributes: []*repo.Attribute{cores, cpu, corefactor}, } start := &repo.EquipmentType{ ID: "e1", ParentID: "e2", } agg := &repo.EquipmentType{ ID: "e3", ParentID: "e4", } end := &repo.EquipmentType{ ID: "e4", ParentID: "e5", } endP := &repo.EquipmentType{ ID: "e5", } mockLicense.EXPECT().EquipmentTypes(ctx, []string{"Scope1"}).Return([]*repo.EquipmentType{start, base, agg, end, endP}, nil).Times(1) mat := &repo.MetricSPSComputed{ BaseType: base, NumCoresAttr: cores, CoreFactorAttr: corefactor, } mockLicense.EXPECT().MetricSPSComputedLicensesAgg(ctx, "pro1", "SPS", mat, []string{"Scope1"}).Return(uint64(12), uint64(10), nil).Times(1) mockLicense.EXPECT().ListMetricSPS(ctx, []string{"Scope1"}).Times(1).Return([]*repo.MetricSPS{ &repo.MetricSPS{ Name: "OPS", NumCoreAttrID: "cores", CoreFactorAttrID: "corefactor", BaseEqTypeID: "e2", }, &repo.MetricSPS{ Name: "SPS", NumCoreAttrID: "cores", CoreFactorAttrID: "corefactor", BaseEqTypeID: "e2", }, &repo.MetricSPS{ Name: "IMB", }, }, nil) }, want: &v1.ListAcqRightsForProductAggregationResponse{ AcqRights: []*v1.ProductAcquiredRights{ &v1.ProductAcquiredRights{ SKU: "ORAC001PROC,ORAC002PROC", SwidTag: "ORAC001,ORAC002", Metric: "SPS", NumCptLicences: 12, NumAcqLicences: 1197, TotalCost: 4.0567515e+07, DeltaNumber: 1185, DeltaCost: 2.2520925e+06, }, }, }, wantErr: false, }, {name: "SUCCESS - metric type SPS - licensesProd <= licensesNonProd", args: args{ ctx: ctx, req: &v1.ListAcqRightsForProductAggregationRequest{ ID: "proAggID1", Scope: "Scope1", }, }, setup: func() { mockCtrl = gomock.NewController(t) mockLicense := mock.NewMockLicense(mockCtrl) rep = mockLicense mockLicense.EXPECT().ProductAggregationDetails(ctx, "proAggID1", &repo.QueryProductAggregations{}, []string{"Scope1"}).Return(&repo.ProductAggregation{ ID: "proAggID1", Name: "pro1", Editor: "e1", Product: "productName", Metric: "SPS", NumOfApplications: 1, NumOfEquipments: 1, TotalCost: 1000, Products: []string{"Scope1", "Scope2"}, AcqRightsFull: []*repo.AcquiredRights{ &repo.AcquiredRights{ Entity: "", SKU: "ORAC001PROC", SwidTag: "ORAC001", ProductName: "Oracle Client", Editor: "oracle", Metric: "sag.processor.standard", AcquiredLicensesNumber: 1016, LicensesUnderMaintenanceNumber: 1008, AvgLicenesUnitPrice: 2042, AvgMaintenanceUnitPrice: 14294, TotalPurchaseCost: 2074672, TotalMaintenanceCost: 14408352, TotalCost: 35155072, }, &repo.AcquiredRights{ Entity: "", SKU: "ORAC002PROC", SwidTag: "ORAC002", ProductName: "Oracle XML Development Kit", Editor: "oracle", Metric: "sag.processor.standard", AcquiredLicensesNumber: 181, LicensesUnderMaintenanceNumber: 181, AvgLicenesUnitPrice: 1759, AvgMaintenanceUnitPrice: 12313, TotalPurchaseCost: 318379, TotalMaintenanceCost: 2228653, TotalCost: 5412443, }, }, }, nil).Times(1) mockLicense.EXPECT().ListMetrices(ctx, []string{"Scope1"}).Return([]*repo.Metric{ &repo.Metric{ Name: "OPS", Type: repo.MetricOPSOracleProcessorStandard, }, &repo.Metric{ Name: "SPS", Type: repo.MetricSPSSagProcessorStandard, }, }, nil).Times(1) cores := &repo.Attribute{ ID: "cores", Type: repo.DataTypeInt, } cpu := &repo.Attribute{ ID: "cpus", Type: repo.DataTypeInt, } corefactor := &repo.Attribute{ ID: "corefactor", Type: repo.DataTypeInt, } base := &repo.EquipmentType{ ID: "e2", ParentID: "e3", Attributes: []*repo.Attribute{cores, cpu, corefactor}, } start := &repo.EquipmentType{ ID: "e1", ParentID: "e2", } agg := &repo.EquipmentType{ ID: "e3", ParentID: "e4", } end := &repo.EquipmentType{ ID: "e4", ParentID: "e5", } endP := &repo.EquipmentType{ ID: "e5", } mockLicense.EXPECT().EquipmentTypes(ctx, []string{"Scope1"}).Return([]*repo.EquipmentType{start, base, agg, end, endP}, nil).Times(1) mat := &repo.MetricSPSComputed{ BaseType: base, NumCoresAttr: cores, CoreFactorAttr: corefactor, } mockLicense.EXPECT().MetricSPSComputedLicensesAgg(ctx, "pro1", "SPS", mat, []string{"Scope1"}).Return(uint64(8), uint64(10), nil).Times(1) mockLicense.EXPECT().ListMetricSPS(ctx, []string{"Scope1"}).Times(1).Return([]*repo.MetricSPS{ &repo.MetricSPS{ Name: "OPS", NumCoreAttrID: "cores", CoreFactorAttrID: "corefactor", BaseEqTypeID: "e2", }, &repo.MetricSPS{ Name: "SPS", NumCoreAttrID: "cores", CoreFactorAttrID: "corefactor", BaseEqTypeID: "e2", }, &repo.MetricSPS{ Name: "IMB", }, }, nil) }, want: &v1.ListAcqRightsForProductAggregationResponse{ AcqRights: []*v1.ProductAcquiredRights{ &v1.ProductAcquiredRights{ SKU: "ORAC001PROC,ORAC002PROC", SwidTag: "ORAC001,ORAC002", Metric: "SPS", NumCptLicences: 10, NumAcqLicences: 1197, TotalCost: 4.0567515e+07, DeltaNumber: 1187, DeltaCost: 2.2558935e+06, }, }, }, wantErr: false, }, {name: "SUCCESS - metric type ACS", args: args{ ctx: ctx, req: &v1.ListAcqRightsForProductAggregationRequest{ ID: "proAggID1", Scope: "Scope1", }, }, setup: func() { mockCtrl = gomock.NewController(t) mockLicense := mock.NewMockLicense(mockCtrl) rep = mockLicense mockLicense.EXPECT().ProductAggregationDetails(ctx, "proAggID1", &repo.QueryProductAggregations{}, []string{"Scope1"}).Return(&repo.ProductAggregation{ ID: "proAggID1", Name: "pro1", Editor: "e1", Product: "productName", Metric: "acs1", NumOfApplications: 1, NumOfEquipments: 1, TotalCost: 1000, Products: []string{"Scope1", "Scope2"}, AcqRightsFull: []*repo.AcquiredRights{ &repo.AcquiredRights{ Entity: "", SKU: "ORAC001PROC", SwidTag: "ORAC001", ProductName: "Oracle Client", Editor: "oracle", Metric: "acs1", AcquiredLicensesNumber: 1016, LicensesUnderMaintenanceNumber: 1008, AvgLicenesUnitPrice: 2042, AvgMaintenanceUnitPrice: 14294, TotalPurchaseCost: 2074672, TotalMaintenanceCost: 14408352, TotalCost: 35155072, }, &repo.AcquiredRights{ Entity: "", SKU: "ORAC002PROC", SwidTag: "ORAC002", ProductName: "Oracle XML Development Kit", Editor: "oracle", Metric: "acs1", AcquiredLicensesNumber: 181, LicensesUnderMaintenanceNumber: 181, AvgLicenesUnitPrice: 1759, AvgMaintenanceUnitPrice: 12313, TotalPurchaseCost: 318379, TotalMaintenanceCost: 2228653, TotalCost: 5412443, }, }, }, nil).Times(1) mockLicense.EXPECT().ListMetrices(ctx, []string{"Scope1"}).Return([]*repo.Metric{ &repo.Metric{ Name: "OPS", Type: repo.MetricOPSOracleProcessorStandard, }, &repo.Metric{ Name: "acs1", Type: repo.MetricAttrCounterStandard, }, }, nil).Times(1) cores := &repo.Attribute{ Name: "cores", Type: repo.DataTypeInt, } cpu := &repo.Attribute{ Name: "cpus", Type: repo.DataTypeInt, } corefactor := &repo.Attribute{ Name: "corefactor", Type: repo.DataTypeInt, } base := &repo.EquipmentType{ ID: "e2", Type: "Server", ParentID: "e3", Attributes: []*repo.Attribute{cores, cpu, corefactor}, } start := &repo.EquipmentType{ ID: "e1", ParentID: "e2", } agg := &repo.EquipmentType{ ID: "e3", ParentID: "e4", } end := &repo.EquipmentType{ ID: "e4", ParentID: "e5", } endP := &repo.EquipmentType{ ID: "e5", } mockLicense.EXPECT().EquipmentTypes(ctx, []string{"Scope1"}).Return([]*repo.EquipmentType{start, base, agg, end, endP}, nil).Times(1) mat := &repo.MetricACSComputed{ Name: "acs1", BaseType: base, Attribute: corefactor, Value: "2", } mockLicense.EXPECT().MetricACSComputedLicensesAgg(ctx, "pro1", "acs1", mat, []string{"Scope1"}).Return(uint64(10), nil).Times(1) mockLicense.EXPECT().ListMetricACS(ctx, []string{"Scope1"}).Times(1).Return([]*repo.MetricACS{ &repo.MetricACS{ Name: "acs1", EqType: "Server", AttributeName: "corefactor", Value: "2", }, &repo.MetricACS{ Name: "acs2", EqType: "Server", AttributeName: "cores", Value: "2", }, }, nil) }, want: &v1.ListAcqRightsForProductAggregationResponse{ AcqRights: []*v1.ProductAcquiredRights{ &v1.ProductAcquiredRights{ SKU: "ORAC001PROC,ORAC002PROC", SwidTag: "ORAC001,ORAC002", Metric: "acs1", NumCptLicences: 10, NumAcqLicences: 1197, TotalCost: 4.0567515e+07, DeltaNumber: 1187, DeltaCost: 2.2558935e+06, }, }, }, wantErr: false, }, {name: "SUCCESS - metric type IPS", args: args{ ctx: ctx, req: &v1.ListAcqRightsForProductAggregationRequest{ ID: "proAggID1", Scope: "Scope1", }, }, setup: func() { mockCtrl = gomock.NewController(t) mockLicense := mock.NewMockLicense(mockCtrl) rep = mockLicense mockLicense.EXPECT().ProductAggregationDetails(ctx, "proAggID1", &repo.QueryProductAggregations{}, []string{"Scope1"}).Return(&repo.ProductAggregation{ ID: "proAggID1", Name: "pro1", Editor: "e1", Product: "productName", Metric: "IPS", NumOfApplications: 1, NumOfEquipments: 1, TotalCost: 1000, Products: []string{"Scope1", "Scope2"}, AcqRightsFull: []*repo.AcquiredRights{ &repo.AcquiredRights{ Entity: "", SKU: "ORAC001PROC", SwidTag: "ORAC001", ProductName: "Oracle Client", Editor: "oracle", Metric: "ibm.pvu.standard", AcquiredLicensesNumber: 1016, LicensesUnderMaintenanceNumber: 1008, AvgLicenesUnitPrice: 2042, AvgMaintenanceUnitPrice: 14294, TotalPurchaseCost: 2074672, TotalMaintenanceCost: 14408352, TotalCost: 35155072, }, &repo.AcquiredRights{ Entity: "", SKU: "ORAC002PROC", SwidTag: "ORAC002", ProductName: "Oracle XML Development Kit", Editor: "oracle", Metric: "ibm.pvu.standard", AcquiredLicensesNumber: 181, LicensesUnderMaintenanceNumber: 181, AvgLicenesUnitPrice: 1759, AvgMaintenanceUnitPrice: 12313, TotalPurchaseCost: 318379, TotalMaintenanceCost: 2228653, TotalCost: 5412443, }, }, }, nil).Times(1) mockLicense.EXPECT().ListMetrices(ctx, []string{"Scope1"}).Return([]*repo.Metric{ &repo.Metric{ Name: "OPS", Type: repo.MetricOPSOracleProcessorStandard, }, &repo.Metric{ Name: "IPS", Type: repo.MetricIPSIbmPvuStandard, }, }, nil).Times(1) cores := &repo.Attribute{ ID: "cores", Type: repo.DataTypeInt, } cpu := &repo.Attribute{ ID: "cpus", Type: repo.DataTypeInt, } corefactor := &repo.Attribute{ ID: "corefactor", Type: repo.DataTypeInt, } base := &repo.EquipmentType{ ID: "e2", ParentID: "e3", Attributes: []*repo.Attribute{cores, cpu, corefactor}, } start := &repo.EquipmentType{ ID: "e1", ParentID: "e2", } agg := &repo.EquipmentType{ ID: "e3", ParentID: "e4", } end := &repo.EquipmentType{ ID: "e4", ParentID: "e5", } endP := &repo.EquipmentType{ ID: "e5", } mockLicense.EXPECT().EquipmentTypes(ctx, []string{"Scope1"}).Return([]*repo.EquipmentType{start, base, agg, end, endP}, nil).Times(1) mat := &repo.MetricIPSComputed{ BaseType: base, NumCoresAttr: cores, CoreFactorAttr: corefactor, } mockLicense.EXPECT().MetricIPSComputedLicensesAgg(ctx, "pro1", "IPS", mat, []string{"Scope1"}).Return(uint64(10), nil).Times(1) mockLicense.EXPECT().ListMetricIPS(ctx, []string{"Scope1"}).Times(1).Return([]*repo.MetricIPS{ &repo.MetricIPS{ Name: "OPS", NumCoreAttrID: "cores", CoreFactorAttrID: "corefactor", BaseEqTypeID: "e2", }, &repo.MetricIPS{ Name: "IPS", NumCoreAttrID: "cores", CoreFactorAttrID: "corefactor", BaseEqTypeID: "e2", }, &repo.MetricIPS{ Name: "IMB", }, }, nil) }, want: &v1.ListAcqRightsForProductAggregationResponse{ AcqRights: []*v1.ProductAcquiredRights{ &v1.ProductAcquiredRights{ SKU: "ORAC001PROC,ORAC002PROC", SwidTag: "ORAC001,ORAC002", Metric: "IPS", NumCptLicences: 10, NumAcqLicences: 1197, TotalCost: 4.0567515e+07, DeltaNumber: 1187, DeltaCost: 2.2558935e+06, }, }, }, wantErr: false, }, {name: "SUCCESS - metric name doesnt exist", args: args{ ctx: ctx, req: &v1.ListAcqRightsForProductAggregationRequest{ ID: "proAggID1", Scope: "Scope1", }, }, setup: func() { mockCtrl = gomock.NewController(t) mockLicense := mock.NewMockLicense(mockCtrl) rep = mockLicense mockLicense.EXPECT().ProductAggregationDetails(ctx, "proAggID1", &repo.QueryProductAggregations{}, []string{"Scope1"}).Return(&repo.ProductAggregation{ ID: "proAggID1", Name: "pro1", Editor: "e1", Product: "productName", Metric: "SPS", NumOfApplications: 1, NumOfEquipments: 1, TotalCost: 1000, Products: []string{"Scope1", "Scope2"}, AcqRightsFull: []*repo.AcquiredRights{ &repo.AcquiredRights{ Entity: "", SKU: "ORAC001PROC", SwidTag: "ORAC001", ProductName: "Oracle Client", Editor: "oracle", Metric: "oracle.processor.standard", AcquiredLicensesNumber: 1016, LicensesUnderMaintenanceNumber: 1008, AvgLicenesUnitPrice: 2042, AvgMaintenanceUnitPrice: 14294, TotalPurchaseCost: 2074672, TotalMaintenanceCost: 14408352, TotalCost: 35155072, }, &repo.AcquiredRights{ Entity: "", SKU: "ORAC002PROC", SwidTag: "ORAC002", ProductName: "Oracle XML Development Kit", Editor: "oracle", Metric: "oracle.processor.standard", AcquiredLicensesNumber: 181, LicensesUnderMaintenanceNumber: 181, AvgLicenesUnitPrice: 1759, AvgMaintenanceUnitPrice: 12313, TotalPurchaseCost: 318379, TotalMaintenanceCost: 2228653, TotalCost: 5412443, }, }, }, nil).Times(1) mockLicense.EXPECT().ListMetrices(ctx, []string{"Scope1"}).Return([]*repo.Metric{ &repo.Metric{ Name: "OPS", Type: repo.MetricOPSOracleProcessorStandard, }, &repo.Metric{ Name: "WS", Type: repo.MetricOPSOracleProcessorStandard, }, }, nil).Times(1) }, want: &v1.ListAcqRightsForProductAggregationResponse{ AcqRights: []*v1.ProductAcquiredRights{ &v1.ProductAcquiredRights{ SKU: "ORAC001PROC,ORAC002PROC", SwidTag: "ORAC001,ORAC002", Metric: "SPS", NumAcqLicences: 1197, TotalCost: 4.0567515e+07, }, }, }, wantErr: false, }, {name: "SUCCESS - no equipments linked with product", args: args{ ctx: ctx, req: &v1.ListAcqRightsForProductAggregationRequest{ ID: "proAggID1", Scope: "Scope1", }, }, setup: func() { mockCtrl = gomock.NewController(t) mockLicense := mock.NewMockLicense(mockCtrl) rep = mockLicense mockLicense.EXPECT().ProductAggregationDetails(ctx, "proAggID1", &repo.QueryProductAggregations{}, []string{"Scope1"}).Return(&repo.ProductAggregation{ ID: "proAggID1", Name: "pro1", Editor: "e1", Product: "productName", Metric: "OPS", NumOfApplications: 1, NumOfEquipments: 0, TotalCost: 1000, Products: []string{"Scope1", "Scope2"}, AcqRightsFull: []*repo.AcquiredRights{ &repo.AcquiredRights{ Entity: "", SKU: "ORAC001PROC", SwidTag: "ORAC001", ProductName: "Oracle Client", Editor: "oracle", Metric: "oracle.processor.standard", AcquiredLicensesNumber: 1016, LicensesUnderMaintenanceNumber: 1008, AvgLicenesUnitPrice: 2042, AvgMaintenanceUnitPrice: 14294, TotalPurchaseCost: 2074672, TotalMaintenanceCost: 14408352, TotalCost: 35155072, }, &repo.AcquiredRights{ Entity: "", SKU: "ORAC002PROC", SwidTag: "ORAC002", ProductName: "Oracle XML Development Kit", Editor: "oracle", Metric: "oracle.processor.standard", AcquiredLicensesNumber: 181, LicensesUnderMaintenanceNumber: 181, AvgLicenesUnitPrice: 1759, AvgMaintenanceUnitPrice: 12313, TotalPurchaseCost: 318379, TotalMaintenanceCost: 2228653, TotalCost: 5412443, }, }, }, nil).Times(1) mockLicense.EXPECT().ListMetrices(ctx, []string{"Scope1"}).Return([]*repo.Metric{ &repo.Metric{ Name: "OPS", Type: repo.MetricOPSOracleProcessorStandard, }, &repo.Metric{ Name: "WS", Type: repo.MetricOPSOracleProcessorStandard, }, }, nil).Times(1) }, want: &v1.ListAcqRightsForProductAggregationResponse{ AcqRights: []*v1.ProductAcquiredRights{ &v1.ProductAcquiredRights{ SKU: "ORAC001PROC,ORAC002PROC", SwidTag: "ORAC001,ORAC002", Metric: "OPS", NumAcqLicences: 1197, TotalCost: 4.0567515e+07, }, }, }, wantErr: false, }, {name: "FAILURE - ListAcqRightsForProductAggregation - cannot find claims in context", args: args{ ctx: context.Background(), req: &v1.ListAcqRightsForProductAggregationRequest{ ID: "proAggID1", }, }, setup: func() {}, wantErr: true, }, {name: "FAILURE - ListAcqRightsForProductAggregation - failed to get product aggregation", args: args{ ctx: ctx, req: &v1.ListAcqRightsForProductAggregationRequest{ ID: "proAggID1", }, }, setup: func() { mockCtrl = gomock.NewController(t) mockLicense := mock.NewMockLicense(mockCtrl) rep = mockLicense mockLicense.EXPECT().ProductAggregationDetails(ctx, "proAggID1", &repo.QueryProductAggregations{}, []string{"Scope1"}).Return(nil, errors.New(("Internal"))).Times(1) }, wantErr: true, }, {name: "FAILURE - ListAcqRightsForProductAggregation - cannot fetch metrics", args: args{ ctx: ctx, req: &v1.ListAcqRightsForProductAggregationRequest{ ID: "proAggID1", }, }, setup: func() { mockCtrl = gomock.NewController(t) mockLicense := mock.NewMockLicense(mockCtrl) rep = mockLicense mockLicense.EXPECT().ProductAggregationDetails(ctx, "proAggID1", &repo.QueryProductAggregations{}, []string{"Scope1"}).Return(&repo.ProductAggregation{ ID: "proAggID1", Name: "pro1", Editor: "e1", Product: "productName", Metric: "OPS", NumOfApplications: 1, NumOfEquipments: 1, TotalCost: 1000, Products: []string{"Scope1", "Scope2"}, AcqRightsFull: []*repo.AcquiredRights{ &repo.AcquiredRights{ Entity: "", SKU: "ORAC001PROC", SwidTag: "ORAC001", ProductName: "Oracle Client", Editor: "oracle", Metric: "oracle.processor.standard", AcquiredLicensesNumber: 1016, LicensesUnderMaintenanceNumber: 1008, AvgLicenesUnitPrice: 2042, AvgMaintenanceUnitPrice: 14294, TotalPurchaseCost: 2074672, TotalMaintenanceCost: 14408352, TotalCost: 35155072, }, &repo.AcquiredRights{ Entity: "", SKU: "ORAC002PROC", SwidTag: "ORAC002", ProductName: "Oracle XML Development Kit", Editor: "oracle", Metric: "oracle.processor.standard", AcquiredLicensesNumber: 181, LicensesUnderMaintenanceNumber: 181, AvgLicenesUnitPrice: 1759, AvgMaintenanceUnitPrice: 12313, TotalPurchaseCost: 318379, TotalMaintenanceCost: 2228653, TotalCost: 5412443, }, }, }, nil).Times(1) mockLicense.EXPECT().ListMetrices(ctx, []string{"Scope1"}).Return(nil, errors.New("Internal")).Times(1) }, wantErr: true, }, {name: "FAILURE - ListAcqRightsForProductAggregation - cannot fetch equipment types", args: args{ ctx: ctx, req: &v1.ListAcqRightsForProductAggregationRequest{ ID: "proAggID1", }, }, setup: func() { mockCtrl = gomock.NewController(t) mockLicense := mock.NewMockLicense(mockCtrl) rep = mockLicense mockLicense.EXPECT().ProductAggregationDetails(ctx, "proAggID1", &repo.QueryProductAggregations{}, []string{"Scope1"}).Return(&repo.ProductAggregation{ ID: "proAggID1", Name: "pro1", Editor: "e1", Product: "productName", Metric: "OPS", NumOfApplications: 1, NumOfEquipments: 1, TotalCost: 1000, Products: []string{"Scope1", "Scope2"}, AcqRightsFull: []*repo.AcquiredRights{ &repo.AcquiredRights{ Entity: "", SKU: "ORAC001PROC", SwidTag: "ORAC001", ProductName: "Oracle Client", Editor: "oracle", Metric: "oracle.processor.standard", AcquiredLicensesNumber: 1016, LicensesUnderMaintenanceNumber: 1008, AvgLicenesUnitPrice: 2042, AvgMaintenanceUnitPrice: 14294, TotalPurchaseCost: 2074672, TotalMaintenanceCost: 14408352, TotalCost: 35155072, }, &repo.AcquiredRights{ Entity: "", SKU: "ORAC002PROC", SwidTag: "ORAC002", ProductName: "Oracle XML Development Kit", Editor: "oracle", Metric: "oracle.processor.standard", AcquiredLicensesNumber: 181, LicensesUnderMaintenanceNumber: 181, AvgLicenesUnitPrice: 1759, AvgMaintenanceUnitPrice: 12313, TotalPurchaseCost: 318379, TotalMaintenanceCost: 2228653, TotalCost: 5412443, }, }, }, nil).Times(1) mockLicense.EXPECT().ListMetrices(ctx, []string{"Scope1"}).Return([]*repo.Metric{ &repo.Metric{ Name: "OPS", Type: repo.MetricOPSOracleProcessorStandard, }, &repo.Metric{ Name: "WS", Type: repo.MetricOPSOracleProcessorStandard, }, }, nil).Times(1) mockLicense.EXPECT().EquipmentTypes(ctx, []string{"Scope1"}).Return(nil, errors.New("Internal")).Times(1) }, wantErr: true, }, {name: "FAILURE - ListAcqRightsForProductAggregation - cannot fetch metric OPS", args: args{ ctx: ctx, req: &v1.ListAcqRightsForProductAggregationRequest{ ID: "proAggID1", }, }, setup: func() { mockCtrl = gomock.NewController(t) mockLicense := mock.NewMockLicense(mockCtrl) rep = mockLicense mockLicense.EXPECT().ProductAggregationDetails(ctx, "proAggID1", &repo.QueryProductAggregations{}, []string{"Scope1"}).Return(&repo.ProductAggregation{ ID: "proAggID1", Name: "pro1", Editor: "e1", Product: "productName", Metric: "OPS", NumOfApplications: 1, NumOfEquipments: 1, TotalCost: 1000, Products: []string{"Scope1", "Scope2"}, AcqRightsFull: []*repo.AcquiredRights{ &repo.AcquiredRights{ Entity: "", SKU: "ORAC001PROC", SwidTag: "ORAC001", ProductName: "Oracle Client", Editor: "oracle", Metric: "oracle.processor.standard", AcquiredLicensesNumber: 1016, LicensesUnderMaintenanceNumber: 1008, AvgLicenesUnitPrice: 2042, AvgMaintenanceUnitPrice: 14294, TotalPurchaseCost: 2074672, TotalMaintenanceCost: 14408352, TotalCost: 35155072, }, &repo.AcquiredRights{ Entity: "", SKU: "ORAC002PROC", SwidTag: "ORAC002", ProductName: "Oracle XML Development Kit", Editor: "oracle", Metric: "oracle.processor.standard", AcquiredLicensesNumber: 181, LicensesUnderMaintenanceNumber: 181, AvgLicenesUnitPrice: 1759, AvgMaintenanceUnitPrice: 12313, TotalPurchaseCost: 318379, TotalMaintenanceCost: 2228653, TotalCost: 5412443, }, }, }, nil).Times(1) mockLicense.EXPECT().ListMetrices(ctx, []string{"Scope1"}).Return([]*repo.Metric{ &repo.Metric{ Name: "OPS", Type: repo.MetricOPSOracleProcessorStandard, }, &repo.Metric{ Name: "WS", Type: repo.MetricOPSOracleProcessorStandard, }, }, nil).Times(1) cores := &repo.Attribute{ ID: "cores", Type: repo.DataTypeInt, } cpu := &repo.Attribute{ ID: "cpus", Type: repo.DataTypeInt, } corefactor := &repo.Attribute{ ID: "corefactor", Type: repo.DataTypeInt, } base := &repo.EquipmentType{ ID: "e2", ParentID: "e3", Attributes: []*repo.Attribute{cores, cpu, corefactor}, } start := &repo.EquipmentType{ ID: "e1", ParentID: "e2", } agg := &repo.EquipmentType{ ID: "e3", ParentID: "e4", } end := &repo.EquipmentType{ ID: "e4", ParentID: "e5", } endP := &repo.EquipmentType{ ID: "e5", } mockLicense.EXPECT().EquipmentTypes(ctx, []string{"Scope1"}).Return([]*repo.EquipmentType{start, base, agg, end, endP}, nil).Times(1) mat := &repo.MetricOPSComputed{ EqTypeTree: []*repo.EquipmentType{start, base, agg, end}, BaseType: base, AggregateLevel: agg, NumCoresAttr: cores, NumCPUAttr: cpu, CoreFactorAttr: corefactor, } mockLicense.EXPECT().MetricOPSComputedLicensesAgg(ctx, "pro1", "OPS", mat, []string{"Scope1"}).Return(uint64(10), nil).Times(1) mockLicense.EXPECT().ListMetricOPS(ctx, []string{"Scope1"}).Times(1).Return(nil, errors.New("Internal")) }, wantErr: true, }, {name: "FAILURE - ListAcqRightsForProductAggregation - cannot fetch metric SPS", args: args{ ctx: ctx, req: &v1.ListAcqRightsForProductAggregationRequest{ ID: "proAggID1", }, }, setup: func() { mockCtrl = gomock.NewController(t) mockLicense := mock.NewMockLicense(mockCtrl) rep = mockLicense mockLicense.EXPECT().ProductAggregationDetails(ctx, "proAggID1", &repo.QueryProductAggregations{}, []string{"Scope1"}).Return(&repo.ProductAggregation{ ID: "proAggID1", Name: "pro1", Editor: "e1", Product: "productName", Metric: "SPS", NumOfApplications: 1, NumOfEquipments: 1, TotalCost: 1000, Products: []string{"Scope1", "Scope2"}, AcqRightsFull: []*repo.AcquiredRights{ &repo.AcquiredRights{ Entity: "", SKU: "ORAC001PROC", SwidTag: "ORAC001", ProductName: "Oracle Client", Editor: "oracle", Metric: "sag.processor.standard", AcquiredLicensesNumber: 1016, LicensesUnderMaintenanceNumber: 1008, AvgLicenesUnitPrice: 2042, AvgMaintenanceUnitPrice: 14294, TotalPurchaseCost: 2074672, TotalMaintenanceCost: 14408352, TotalCost: 35155072, }, &repo.AcquiredRights{ Entity: "", SKU: "ORAC002PROC", SwidTag: "ORAC002", ProductName: "Oracle XML Development Kit", Editor: "oracle", Metric: "sag.processor.standard", AcquiredLicensesNumber: 181, LicensesUnderMaintenanceNumber: 181, AvgLicenesUnitPrice: 1759, AvgMaintenanceUnitPrice: 12313, TotalPurchaseCost: 318379, TotalMaintenanceCost: 2228653, TotalCost: 5412443, }, }, }, nil).Times(1) mockLicense.EXPECT().ListMetrices(ctx, []string{"Scope1"}).Return([]*repo.Metric{ &repo.Metric{ Name: "OPS", Type: repo.MetricOPSOracleProcessorStandard, }, &repo.Metric{ Name: "SPS", Type: repo.MetricSPSSagProcessorStandard, }, }, nil).Times(1) cores := &repo.Attribute{ ID: "cores", Type: repo.DataTypeInt, } cpu := &repo.Attribute{ ID: "cpus", Type: repo.DataTypeInt, } corefactor := &repo.Attribute{ ID: "corefactor", Type: repo.DataTypeInt, } base := &repo.EquipmentType{ ID: "e2", ParentID: "e3", Attributes: []*repo.Attribute{cores, cpu, corefactor}, } start := &repo.EquipmentType{ ID: "e1", ParentID: "e2", } agg := &repo.EquipmentType{ ID: "e3", ParentID: "e4", } end := &repo.EquipmentType{ ID: "e4", ParentID: "e5", } endP := &repo.EquipmentType{ ID: "e5", } mockLicense.EXPECT().EquipmentTypes(ctx, []string{"Scope1"}).Return([]*repo.EquipmentType{start, base, agg, end, endP}, nil).Times(1) mat := &repo.MetricSPSComputed{ BaseType: base, NumCoresAttr: cores, CoreFactorAttr: corefactor, } mockLicense.EXPECT().MetricSPSComputedLicensesAgg(ctx, "pro1", "SPS", mat, []string{"Scope1"}).Return(uint64(12), uint64(10), nil).Times(1) mockLicense.EXPECT().ListMetricSPS(ctx, []string{"Scope1"}).Times(1).Return(nil, errors.New("Internal")) }, wantErr: true, }, {name: "FAILURE - ListAcqRightsForProductAggregation - cannot fetch metric IPS", args: args{ ctx: ctx, req: &v1.ListAcqRightsForProductAggregationRequest{ ID: "proAggID1", }, }, setup: func() { mockCtrl = gomock.NewController(t) mockLicense := mock.NewMockLicense(mockCtrl) rep = mockLicense mockLicense.EXPECT().ProductAggregationDetails(ctx, "proAggID1", &repo.QueryProductAggregations{}, []string{"Scope1"}).Return(&repo.ProductAggregation{ ID: "proAggID1", Name: "pro1", Editor: "e1", Product: "productName", Metric: "IPS", NumOfApplications: 1, NumOfEquipments: 1, TotalCost: 1000, Products: []string{"Scope1", "Scope2"}, AcqRightsFull: []*repo.AcquiredRights{ &repo.AcquiredRights{ Entity: "", SKU: "ORAC001PROC", SwidTag: "ORAC001", ProductName: "Oracle Client", Editor: "oracle", Metric: "ibm.pvu.standard", AcquiredLicensesNumber: 1016, LicensesUnderMaintenanceNumber: 1008, AvgLicenesUnitPrice: 2042, AvgMaintenanceUnitPrice: 14294, TotalPurchaseCost: 2074672, TotalMaintenanceCost: 14408352, TotalCost: 35155072, }, &repo.AcquiredRights{ Entity: "", SKU: "ORAC002PROC", SwidTag: "ORAC002", ProductName: "Oracle XML Development Kit", Editor: "oracle", Metric: "ibm.pvu.standard", AcquiredLicensesNumber: 181, LicensesUnderMaintenanceNumber: 181, AvgLicenesUnitPrice: 1759, AvgMaintenanceUnitPrice: 12313, TotalPurchaseCost: 318379, TotalMaintenanceCost: 2228653, TotalCost: 5412443, }, }, }, nil).Times(1) mockLicense.EXPECT().ListMetrices(ctx, []string{"Scope1"}).Return([]*repo.Metric{ &repo.Metric{ Name: "OPS", Type: repo.MetricOPSOracleProcessorStandard, }, &repo.Metric{ Name: "IPS", Type: repo.MetricIPSIbmPvuStandard, }, }, nil).Times(1) cores := &repo.Attribute{ ID: "cores", Type: repo.DataTypeInt, } cpu := &repo.Attribute{ ID: "cpus", Type: repo.DataTypeInt, } corefactor := &repo.Attribute{ ID: "corefactor", Type: repo.DataTypeInt, } base := &repo.EquipmentType{ ID: "e2", ParentID: "e3", Attributes: []*repo.Attribute{cores, cpu, corefactor}, } start := &repo.EquipmentType{ ID: "e1", ParentID: "e2", } agg := &repo.EquipmentType{ ID: "e3", ParentID: "e4", } end := &repo.EquipmentType{ ID: "e4", ParentID: "e5", } endP := &repo.EquipmentType{ ID: "e5", } mockLicense.EXPECT().EquipmentTypes(ctx, []string{"Scope1"}).Return([]*repo.EquipmentType{start, base, agg, end, endP}, nil).Times(1) mat := &repo.MetricIPSComputed{ BaseType: base, NumCoresAttr: cores, CoreFactorAttr: corefactor, } mockLicense.EXPECT().MetricIPSComputedLicensesAgg(ctx, "pro1", "IPS", mat, []string{"Scope1"}).Return(uint64(10), nil).Times(1) mockLicense.EXPECT().ListMetricIPS(ctx, []string{"Scope1"}).Times(1).Return(nil, errors.New("Internal")) }, wantErr: true, }, {name: "FAILURE - ListAcqRightsForProductAggregation - cannot fetch metric ACS", args: args{ ctx: ctx, req: &v1.ListAcqRightsForProductAggregationRequest{ ID: "proAggID1", }, }, setup: func() { mockCtrl = gomock.NewController(t) mockLicense := mock.NewMockLicense(mockCtrl) rep = mockLicense mockLicense.EXPECT().ProductAggregationDetails(ctx, "proAggID1", &repo.QueryProductAggregations{}, []string{"Scope1"}).Return(&repo.ProductAggregation{ ID: "proAggID1", Name: "pro1", Editor: "e1", Product: "productName", Metric: "acs1", NumOfApplications: 1, NumOfEquipments: 1, TotalCost: 1000, Products: []string{"Scope1", "Scope2"}, AcqRightsFull: []*repo.AcquiredRights{ &repo.AcquiredRights{ Entity: "", SKU: "ORAC001PROC", SwidTag: "ORAC001", ProductName: "Oracle Client", Editor: "oracle", Metric: "acs1", AcquiredLicensesNumber: 1016, LicensesUnderMaintenanceNumber: 1008, AvgLicenesUnitPrice: 2042, AvgMaintenanceUnitPrice: 14294, TotalPurchaseCost: 2074672, TotalMaintenanceCost: 14408352, TotalCost: 35155072, }, &repo.AcquiredRights{ Entity: "", SKU: "ORAC002PROC", SwidTag: "ORAC002", ProductName: "Oracle XML Development Kit", Editor: "oracle", Metric: "acs1", AcquiredLicensesNumber: 181, LicensesUnderMaintenanceNumber: 181, AvgLicenesUnitPrice: 1759, AvgMaintenanceUnitPrice: 12313, TotalPurchaseCost: 318379, TotalMaintenanceCost: 2228653, TotalCost: 5412443, }, }, }, nil).Times(1) mockLicense.EXPECT().ListMetrices(ctx, []string{"Scope1"}).Return([]*repo.Metric{ &repo.Metric{ Name: "OPS", Type: repo.MetricOPSOracleProcessorStandard, }, &repo.Metric{ Name: "acs1", Type: repo.MetricAttrCounterStandard, }, }, nil).Times(1) cores := &repo.Attribute{ Name: "cores", Type: repo.DataTypeInt, } cpu := &repo.Attribute{ Name: "cpus", Type: repo.DataTypeInt, } corefactor := &repo.Attribute{ Name: "corefactor", Type: repo.DataTypeInt, } base := &repo.EquipmentType{ ID: "e2", Type: "Server", ParentID: "e3", Attributes: []*repo.Attribute{cores, cpu, corefactor}, } start := &repo.EquipmentType{ ID: "e1", ParentID: "e2", } agg := &repo.EquipmentType{ ID: "e3", ParentID: "e4", } end := &repo.EquipmentType{ ID: "e4", ParentID: "e5", } endP := &repo.EquipmentType{ ID: "e5", } mockLicense.EXPECT().EquipmentTypes(ctx, []string{"Scope1"}).Return([]*repo.EquipmentType{start, base, agg, end, endP}, nil).Times(1) mat := &repo.MetricACSComputed{ Name: "acs1", BaseType: base, Attribute: corefactor, Value: "2", } mockLicense.EXPECT().MetricACSComputedLicensesAgg(ctx, "pro1", "acs1", mat, []string{"Scope1"}).Return(uint64(10), nil).Times(1) mockLicense.EXPECT().ListMetricACS(ctx, []string{"Scope1"}).Times(1).Return(nil, errors.New("Internal")) }, wantErr: true, }, {name: "FAILURE - ListAcqRightsForProductAggregation - cannot find metric for computation", args: args{ ctx: ctx, req: &v1.ListAcqRightsForProductAggregationRequest{ ID: "proAggID1", }, }, setup: func() { mockCtrl = gomock.NewController(t) mockLicense := mock.NewMockLicense(mockCtrl) rep = mockLicense mockLicense.EXPECT().ProductAggregationDetails(ctx, "proAggID1", &repo.QueryProductAggregations{}, []string{"Scope1"}).Return(&repo.ProductAggregation{ ID: "proAggID1", Name: "pro1", Editor: "e1", Product: "productName", Metric: "OPS", NumOfApplications: 1, NumOfEquipments: 1, TotalCost: 1000, Products: []string{"Scope1", "Scope2"}, AcqRightsFull: []*repo.AcquiredRights{ &repo.AcquiredRights{ Entity: "", SKU: "ORAC001PROC", SwidTag: "ORAC001", ProductName: "Oracle Client", Editor: "oracle", Metric: "oracle.processor.standard", AcquiredLicensesNumber: 1016, LicensesUnderMaintenanceNumber: 1008, AvgLicenesUnitPrice: 2042, AvgMaintenanceUnitPrice: 14294, TotalPurchaseCost: 2074672, TotalMaintenanceCost: 14408352, TotalCost: 35155072, }, &repo.AcquiredRights{ Entity: "", SKU: "ORAC002PROC", SwidTag: "ORAC002", ProductName: "Oracle XML Development Kit", Editor: "oracle", Metric: "oracle.processor.standard", AcquiredLicensesNumber: 181, LicensesUnderMaintenanceNumber: 181, AvgLicenesUnitPrice: 1759, AvgMaintenanceUnitPrice: 12313, TotalPurchaseCost: 318379, TotalMaintenanceCost: 2228653, TotalCost: 5412443, }, }, }, nil).Times(1) mockLicense.EXPECT().ListMetrices(ctx, []string{"Scope1"}).Return([]*repo.Metric{ &repo.Metric{ Name: "OPS", Type: "abc", }, &repo.Metric{ Name: "WS", Type: repo.MetricOPSOracleProcessorStandard, }, }, nil).Times(1) cores := &repo.Attribute{ ID: "cores", Type: repo.DataTypeInt, } cpu := &repo.Attribute{ ID: "cpus", Type: repo.DataTypeInt, } corefactor := &repo.Attribute{ ID: "corefactor", Type: repo.DataTypeInt, } base := &repo.EquipmentType{ ID: "e2", ParentID: "e3", Attributes: []*repo.Attribute{cores, cpu, corefactor}, } start := &repo.EquipmentType{ ID: "e1", ParentID: "e2", } agg := &repo.EquipmentType{ ID: "e3", ParentID: "e4", } end := &repo.EquipmentType{ ID: "e4", ParentID: "e5", } endP := &repo.EquipmentType{ ID: "e5", } mockLicense.EXPECT().EquipmentTypes(ctx, []string{"Scope1"}).Return([]*repo.EquipmentType{start, base, agg, end, endP}, nil).Times(1) }, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tt.setup() s := NewLicenseServiceServer(rep) got, err := s.ListAcqRightsForProductAggregation(tt.args.ctx, tt.args.req) if (err != nil) != tt.wantErr { t.Errorf("licenseServiceServer.ListAcqRightsForProductAggregation() error = %v, wantErr %v", err, tt.wantErr) return } if !tt.wantErr { compareAcqRightforProAggResponse(t, "ListAcqRightsForProductAggregation", got, tt.want) } else { fmt.Println("test case passed : [", tt.name, "]") } }) } } func compareAcqRightforProAggResponse(t *testing.T, name string, exp *v1.ListAcqRightsForProductAggregationResponse, act *v1.ListAcqRightsForProductAggregationResponse) { if exp == nil && act == nil { return } if exp == nil { assert.Nil(t, act, "attribute is expected to be nil") } compareAcqRightforProAggAll(t, name+".AcqRights", exp.AcqRights, act.AcqRights) } func compareAcqRightforProAggAll(t *testing.T, name string, exp []*v1.ProductAcquiredRights, act []*v1.ProductAcquiredRights) { if !assert.Lenf(t, act, len(exp), "expected number of elemnts are: %d", len(exp)) { return } for i := range exp { compareAcqRightforProAgg(t, fmt.Sprintf("%s[%d]", name, i), exp[i], act[i]) } } func compareAcqRightforProAgg(t *testing.T, name string, exp *v1.ProductAcquiredRights, act *v1.ProductAcquiredRights) { if exp == nil && act == nil { return } if exp == nil { assert.Nil(t, act, "attribute is expected to be nil") } assert.Equalf(t, exp.SKU, act.SKU, "%s.SKU are not same", name) assert.Equalf(t, exp.Metric, act.Metric, "%s.Metric are not same", name) assert.Equalf(t, exp.SwidTag, act.SwidTag, "%s.SwidTag are not same", name) assert.Equalf(t, exp.NumCptLicences, act.NumCptLicences, "%s.NumCptLicences are not same", name) assert.Equalf(t, exp.NumAcqLicences, act.NumAcqLicences, "%s.NumAcqLicences are not same", name) assert.Equalf(t, exp.TotalCost, act.TotalCost, "%s.TotalCost are not same", name) assert.Equalf(t, exp.DeltaNumber, act.DeltaNumber, "%s.DeltaNumber are not same", name) assert.Equalf(t, exp.DeltaCost, act.DeltaCost, "%s.DeltaCost are not same", name) } func compareQueryFiltersWithoutOrder(t *testing.T, name string, expFilter []repo.Queryable, actFilter []repo.Queryable) bool { for i := range expFilter { idx := queryFilterindex(expFilter[i].Key(), actFilter) if idx == -1 { return false } if !compareQueryFilter(t, fmt.Sprintf("%s[%d]", name, i), expFilter[i], actFilter[idx]) { return false } } return true } func queryFilterindex(key string, filter []repo.Queryable) int { for i := range filter { if key == filter[i].Key() { return i } } return -1 }
/** * Thrown if a version code is provided which is not recognized or supported */ @SuppressWarnings("WeakerAccess") class UnsupportedBkdfVersionException extends IllegalStateException { private final int unsupportedByte; /** * Create new instance * * @param unsupportedByte the unsupported version code */ public UnsupportedBkdfVersionException(int unsupportedByte) { this.unsupportedByte = unsupportedByte; } @Override public String getMessage() { return String.format("Version 0x%s is not supported in this implementation of BKDF", Bytes.from((byte) unsupportedByte).encodeHex()); } }
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package sesion2; import java.util.Scanner; /** * * @author josef */ public class CalificacionMensajes { /** * @param args the command line arguments */ public static void main(String[] args) { Scanner ponny = new Scanner(System.in); System.out.println("Dar calificacion del animalito de la creacion"); double calificacion = ponny.nextDouble(); if(calificacion < 6){ System.out.println("Nos vemos en recursa"); } if(calificacion >= 6 && calificacion < 7){ System.out.println("De panzazo"); } if(calificacion >= 7 && calificacion < 8){ System.out.println("Echale más punch"); } if(calificacion >= 8 && calificacion < 9){ System.out.println("Bien puedes mejorar"); } if(calificacion >= 9 && calificacion < 10){ System.out.println("Muy bien, te falto tantito"); } if(calificacion >= 10){ System.out.println("Excelente, con toda la actitud"); } } }
18th May 2008, 09:01 pm The post Beautiful differentiation showed how easily and beautifully one can construct an infinite tower of derivative values in Haskell programs, while computing plain old values. The trick (from Jerzy Karczmarczuk) was to overload numeric operators to operate on the following (co)recursive type: data Dif b = D b (Dif b) This representation, however, works only when differentiating functions from a scalar (one-dimensional) domain, i.e., functions of type a -> b for a scalar type a . The reason for this limitation is that only in those cases can the type of derivative values be identified with the type of regular values. Consider a function f :: (R,R) -> R , where R is, say, Double . The value of f at a domain value (x,y) has type R , but the derivative of f consists of two partial derivatives. Moreover, the second derivative consists of four partial second-order derivatives (or three, depending how you count). A function f :: (R,R) -> (R,R,R) also has two partial derivatives at each point (x,y) , each of which is a triple. That pair of triples is commonly written as a two-by-three matrix. Each of these situations has its own derivative shape and its own chain rule (for the derivative of function compositions), using plain-old multiplication, scalar-times-vector, vector-dot-vector, matrix-times-vector, or matrix-times-matrix. Second derivatives are more complex and varied. How many forms of derivatives and chain rules are enough? Are we doomed to work with a plethora of increasingly complex types of derivatives, as well as the diverse chain rules needed to accommodate all compatible pairs of derivatives? Fortunately, not. There is a single, simple, unifying generalization. By reconsidering what we mean by a derivative value, we can see that these various forms are all representations of a single notion, and all the chain rules mean the same thing on the meanings of the representations. This blog post is about that unifying view of derivatives. Edits: 2008-05-20: There are several comments about this post on reddit. 2008-05-20: Renamed derivative operator from D to deriv to avoid confusion with the data constructor for derivative towers. to to avoid confusion with the data constructor for derivative towers. 2008-05-20: Renamed linear map type from (:->) to (:-*) to make it visually closer to a standard notation. What’s a derivative? To get an intuitive sense of what’s going on with derivatives in general, let’s look at some examples. If you already know about calculus on manifolds, you might want to skip ahead One dimension Start with a simple function on real numbers: f1 :: R -> R f1 x = x^2 + 3*x + 1 Writing the derivative of a function f as deriv f , let’s now consider the question: what is deriv f1 ? We might say that deriv f1 x = 2*x+3 so e.g., deriv f1 5 = 13 . In other words, f1 is changing 13 times as fast as its argument, when its argument is passing 5. Rephrased yet again, if dx is a very tiny number, then f1(5+dx) - f1 5 is very nearly 13 * dx . If f1 maps seconds to meters, then deriv f1 5 is 13 meters per second. So already, we can see that the range of f (meters) and the range of deriv f (meters/second) disagree. Two dimensions in and one dimension out As a second example, consider a two-dimensional domain: f2 :: (R,R) -> R f2 (x,y) = 2*x*y + 3*x + 5*y + 7 Again, let’s consider some units, to get a guess of what kind of thing deriv f2 (x,y) really is. Suppose that f2 measures altitude of terrain above a plane, as a function of the position in the plane. (So f2 is a “height field”.) You can guess that deriv f (x,y) is going to have something to do with how fast the altitude is changing, i.e. the slope, at (x,y) . But there isn’t a single slope. Instead, there’s a slope for every possible compass direction (a hiker’s degrees of freedom). Now consider the conventional math answer to what is deriv f2 (x,y) . Since f2 has a two-dimensional domain, it has two partial derivatives, and its derivative is commonly written as a pair of the two partials: deriv f2 (x,y) = (2*y+3, 2*x+5) In our example, these two pieces of information correspond to two of the possible slopes. The first is the slope if heading directly east, and the second if directly north (increasing x and increasing y , respectively). What good does it do our hiker to be told just two of the infinitude of possible slopes at a point? The answer is perhaps magical: for well-behaved terrains, these two pieces of information are enough to calculate all (infinitely many) slopes, with just a bit of math. Every direction can be described as partly east and partly north (perhaps negatively for westish and southish directions). Given a direction angle ang (where east is zero and north is 90 degrees), the east and north components are cos ang and sin ang , respectively. When heading in the direction ang , the slope will be a weighted sum of the north-going slope and the east-going slope, where the weights are the north and south components ( cos ang and sin ang ). Instead of angles, our hiker may prefer thinking directly about the north and east components of a tiny step from the position (x,y) . If the step is small enough and lands dx feet to the east and dy feet to the north, then the change in altitude, f2(x+dx,y+dy) - f2(x,y) is very nearly equal to (2*y+3)*dx + (2*x+5)*dy . If we use (<.>) to mean dot (inner) product, then this change in altitude is deriv f2 (x,y) <.> (dx,dy) . From this second example, we can see that the derivative value is not a range value, but also not a rate-of-change of range values. It’s a pair of such rates with the know-how to use those rates to determine output changes. Two dimensions in and three dimensions out Next, imagine moving around on a surface in space, say a torus, and suppose that the surface has grid marks to define a two-dimensional parameter space. As our hiker travels around in the 2D parameter space, his position in 3D space changes accordingly, more flexibly than just an altitude. This situation corresponds to a function from 2D to 3D: f3 :: (R,R) -> (R,R,R) At any position (s,t) in the parameter space, and for every choice of direction through parameter space, each of the the coordinates of the position in 3D space has a rate of change. Again, if the function is mathematically well-behaved (differentiable), then all of these rates of change can be summarized in two partial derivatives. This time, however, each partial derivative has components in X, Y, and Z, so it takes six numbers to describe the 3D velocities for all possible directions in parameter space. These numbers are usually written as a 3-by-2 matrix m (the Jacobian of f3 ). Given a small parameter step (dx,dy) , the resulting change in 3D position is equal to the product of the derivative matrix and the difference vector, i.e., m `timesVec` (dx,dy) . A common perspective The examples above use different representations for derivatives: scalar numbers, a vector (pair of numbers), and a matrix. Common to all of these representations is the ability to turn a small step in the function’s domain into a resulting step in the range. In f1 , the (scalar) derivative c really means (c *) , meaning multiply by c . , the (scalar) derivative really means , meaning multiply by . In f2 , the (vector) derivative v means (v <.>) . , the (vector) derivative means . In f3 , the (matrix) derivative m means (m `timesVec`) . So, the common meaning of these derivative representations is a function, and not just any function, but a linear function–often called a “linear map” or “linear transformation”. For a function lf to be linear in this context means that lf (u+v) == lf u + lf v , and , and lf (c*v) == c * lf v , for scalar values c . Now what about the different chain rules, saying to combine derivative values via various kinds of products (scalar/scalar, scalar/vector, vector/vector dot, matrix/vector)? Each of these products implements the same abstract notion, which is composition of linear maps. What about Dif ? Now let’s return to the derivative towers we used before: data Dif b = D b (Dif b) As I mentioned above, this representation only works when derivative values can be represented just like range values. That punning of derivative values with range values works when the domain type is one dimensional. For functions over higher-dimensional domains, we’ll have to use a different representation. Assume a type of linear functions from a to b : type a :-* b = . . . (In Haskell, type constructors beginning with a colon are used infix.) Since the derivative type depends on domain as well as range, our derivative tower will have two type parameters instead of one. To make definitions prettier, I’ll change derivative towers to an infix operator as well. data a :> b = D b (a :> (a :-* b)) An infinitely differentiable function is then one that produces a derivative tower: type a :~> b = a -> (a:>b) What’s next? Perhaps now you’re wondering: Are these lovely ideas workable in practice? What happens to the code from Beautiful differentiation? What use are derivatives, anyway? These questions and more will be answered in upcoming installments. The post Beautiful differentiation showed how easily and beautifully one can construct an infinite tower of derivative values in Haskell programs, while computing plain old values. The trick (from Jerzy...
def tokenize(text): text = text.lower() text = re.sub('[^a-zA-Z0-9]', ' ', text) text = word_tokenize(text) stop_words = set(stopwords.words('english')) text = [word for word in text if word not in stop_words] lemmatizer = WordNetLemmatizer() text = [lemmatizer.lemmatize(word=word) for word in text] return text
/** * Write out any pending data, including padding if necessary. * * @throws IOException if an I/O error occurs */ public final void complete() throws IOException { if (!completed) { if (rest > 0) { encode(in, 0, rest); } flushBuffer(); completed = true; } }
/* Copyright 2014-2016 Freescale Semiconductor Inc. * Copyright 2016 NXP * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Freescale Semiconductor nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef __DPAA2_ETH_H #define __DPAA2_ETH_H #include <linux/netdevice.h> #include <linux/if_vlan.h> #include "../../fsl-mc/include/dpaa2-io.h" #include "../../fsl-mc/include/dpaa2-fd.h" #include "../../fsl-mc/include/dpbp.h" #include "../../fsl-mc/include/dpcon.h" #include "dpni.h" #include "dpni-cmd.h" #include "dpaa2-eth-trace.h" #define DPAA2_ETH_STORE_SIZE 16 /* Maximum number of scatter-gather entries in an ingress frame, * considering the maximum receive frame size is 64K */ #define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE) /* Maximum acceptable MTU value. It is in direct relation with the hardware * enforced Max Frame Length (currently 10k). */ #define DPAA2_ETH_MFL (10 * 1024) #define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN) /* Convert L3 MTU to L2 MFL */ #define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN) /* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo * frames in the Rx queues (length of the current frame is not * taken into account when making the taildrop decision) */ #define DPAA2_ETH_TAILDROP_THRESH (64 * 1024) /* Buffer quota per queue. Must be large enough such that for minimum sized * frames taildrop kicks in before the bpool gets depleted, so we compute * how many 64B frames fit inside the taildrop threshold and add a margin * to accommodate the buffer refill delay. */ #define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64) #define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256) #define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE /* Maximum number of buffers that can be acquired/released through a single * QBMan command */ #define DPAA2_ETH_BUFS_PER_CMD 7 /* Hardware requires alignment for ingress/egress buffer addresses * and ingress buffer lengths. */ #define DPAA2_ETH_RX_BUF_SIZE 2048 #define DPAA2_ETH_TX_BUF_ALIGN 64 #define DPAA2_ETH_RX_BUF_ALIGN 256 #define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \ ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN) /* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress * buffers large enough to allow building an skb around them and also account * for alignment restrictions */ #define DPAA2_ETH_BUF_RAW_SIZE \ (DPAA2_ETH_RX_BUF_SIZE + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \ DPAA2_ETH_RX_BUF_ALIGN) /* We are accommodating a skb backpointer and some S/G info * in the frame's software annotation. The hardware * options are either 0 or 64, so we choose the latter. */ #define DPAA2_ETH_SWA_SIZE 64 /* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */ struct dpaa2_eth_swa { struct sk_buff *skb; struct scatterlist *scl; int num_sg; int num_dma_bufs; }; /* Annotation valid bits in FD FRC */ #define DPAA2_FD_FRC_FASV 0x8000 #define DPAA2_FD_FRC_FAEADV 0x4000 #define DPAA2_FD_FRC_FAPRV 0x2000 #define DPAA2_FD_FRC_FAIADV 0x1000 #define DPAA2_FD_FRC_FASWOV 0x0800 #define DPAA2_FD_FRC_FAICFDV 0x0400 /* Error bits in FD CTRL */ #define DPAA2_FD_CTRL_UFD 0x00000004 #define DPAA2_FD_CTRL_SBE 0x00000008 #define DPAA2_FD_CTRL_FSE 0x00000010 #define DPAA2_FD_CTRL_FAERR 0x00000020 #define DPAA2_FD_RX_ERR_MASK (DPAA2_FD_CTRL_SBE | \ DPAA2_FD_CTRL_FAERR) #define DPAA2_FD_TX_ERR_MASK (DPAA2_FD_CTRL_UFD | \ DPAA2_FD_CTRL_SBE | \ DPAA2_FD_CTRL_FSE | \ DPAA2_FD_CTRL_FAERR) /* Annotation bits in FD CTRL */ #define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */ #define DPAA2_FD_CTRL_PTA 0x00800000 #define DPAA2_FD_CTRL_PTV1 0x00400000 /* Frame annotation status */ struct dpaa2_fas { u8 reserved; u8 ppid; __le16 ifpid; __le32 status; } __packed; /* Frame annotation status word is located in the first 8 bytes * of the buffer's hardware annoatation area */ #define DPAA2_FAS_OFFSET 0 #define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas)) /* Accessors for the hardware annotation fields that we use */ #define dpaa2_get_hwa(buf_addr) \ ((void *)(buf_addr) + DPAA2_ETH_SWA_SIZE) #define dpaa2_get_fas(buf_addr) \ (struct dpaa2_fas *)(dpaa2_get_hwa(buf_addr) + DPAA2_FAS_OFFSET) /* Error and status bits in the frame annotation status word */ /* Debug frame, otherwise supposed to be discarded */ #define DPAA2_FAS_DISC 0x80000000 /* MACSEC frame */ #define DPAA2_FAS_MS 0x40000000 #define DPAA2_FAS_PTP 0x08000000 /* Ethernet multicast frame */ #define DPAA2_FAS_MC 0x04000000 /* Ethernet broadcast frame */ #define DPAA2_FAS_BC 0x02000000 #define DPAA2_FAS_KSE 0x00040000 #define DPAA2_FAS_EOFHE 0x00020000 #define DPAA2_FAS_MNLE 0x00010000 #define DPAA2_FAS_TIDE 0x00008000 #define DPAA2_FAS_PIEE 0x00004000 /* Frame length error */ #define DPAA2_FAS_FLE 0x00002000 /* Frame physical error */ #define DPAA2_FAS_FPE 0x00001000 #define DPAA2_FAS_PTE 0x00000080 #define DPAA2_FAS_ISP 0x00000040 #define DPAA2_FAS_PHE 0x00000020 #define DPAA2_FAS_BLE 0x00000010 /* L3 csum validation performed */ #define DPAA2_FAS_L3CV 0x00000008 /* L3 csum error */ #define DPAA2_FAS_L3CE 0x00000004 /* L4 csum validation performed */ #define DPAA2_FAS_L4CV 0x00000002 /* L4 csum error */ #define DPAA2_FAS_L4CE 0x00000001 /* Possible errors on the ingress path */ #define DPAA2_FAS_RX_ERR_MASK (DPAA2_FAS_KSE | \ DPAA2_FAS_EOFHE | \ DPAA2_FAS_MNLE | \ DPAA2_FAS_TIDE | \ DPAA2_FAS_PIEE | \ DPAA2_FAS_FLE | \ DPAA2_FAS_FPE | \ DPAA2_FAS_PTE | \ DPAA2_FAS_ISP | \ DPAA2_FAS_PHE | \ DPAA2_FAS_BLE | \ DPAA2_FAS_L3CE | \ DPAA2_FAS_L4CE) /* Tx errors */ #define DPAA2_FAS_TX_ERR_MASK (DPAA2_FAS_KSE | \ DPAA2_FAS_EOFHE | \ DPAA2_FAS_MNLE | \ DPAA2_FAS_TIDE) /* Time in milliseconds between link state updates */ #define DPAA2_ETH_LINK_STATE_REFRESH 1000 /* Number of times to retry a frame enqueue before giving up. * Value determined empirically, in order to minimize the number * of frames dropped on Tx */ #define DPAA2_ETH_ENQUEUE_RETRIES 10 /* Driver statistics, other than those in struct rtnl_link_stats64. * These are usually collected per-CPU and aggregated by ethtool. */ struct dpaa2_eth_drv_stats { __u64 tx_conf_frames; __u64 tx_conf_bytes; __u64 tx_sg_frames; __u64 tx_sg_bytes; __u64 rx_sg_frames; __u64 rx_sg_bytes; /* Enqueues retried due to portal busy */ __u64 tx_portal_busy; }; /* Per-FQ statistics */ struct dpaa2_eth_fq_stats { /* Number of frames received on this queue */ __u64 frames; }; /* Per-channel statistics */ struct dpaa2_eth_ch_stats { /* Volatile dequeues retried due to portal busy */ __u64 dequeue_portal_busy; /* Number of CDANs; useful to estimate avg NAPI len */ __u64 cdan; /* Number of frames received on queues from this channel */ __u64 frames; /* Pull errors */ __u64 pull_err; }; /* Maximum number of queues associated with a DPNI */ #define DPAA2_ETH_MAX_RX_QUEUES 16 #define DPAA2_ETH_MAX_TX_QUEUES NR_CPUS #define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \ DPAA2_ETH_MAX_TX_QUEUES) #define DPAA2_ETH_MAX_DPCONS NR_CPUS enum dpaa2_eth_fq_type { DPAA2_RX_FQ = 0, DPAA2_TX_CONF_FQ, }; struct dpaa2_eth_priv; struct dpaa2_eth_fq { u32 fqid; u32 tx_qdbin; u16 flowid; int target_cpu; struct dpaa2_eth_channel *channel; enum dpaa2_eth_fq_type type; void (*consume)(struct dpaa2_eth_priv *, struct dpaa2_eth_channel *, const struct dpaa2_fd *, struct napi_struct *); struct dpaa2_eth_fq_stats stats; }; struct dpaa2_eth_channel { struct dpaa2_io_notification_ctx nctx; struct fsl_mc_device *dpcon; int dpcon_id; int ch_id; int dpio_id; struct napi_struct napi; struct dpaa2_io_store *store; struct dpaa2_eth_priv *priv; int buf_count; struct dpaa2_eth_ch_stats stats; }; struct dpaa2_eth_hash_fields { u64 rxnfc_field; enum net_prot cls_prot; int cls_field; int size; }; /* Driver private data */ struct dpaa2_eth_priv { struct net_device *net_dev; u8 num_fqs; struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; u8 num_channels; struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS]; struct dpni_attr dpni_attrs; u16 tx_data_offset; struct fsl_mc_device *dpbp_dev; u16 bpid; struct iommu_domain *iommu_domain; u16 tx_qdid; struct fsl_mc_io *mc_io; /* Cores which have an affine DPIO/DPCON. * This is the cpu set on which Rx and Tx conf frames are processed */ struct cpumask dpio_cpumask; /* Standard statistics */ struct rtnl_link_stats64 __percpu *percpu_stats; /* Extra stats, in addition to the ones known by the kernel */ struct dpaa2_eth_drv_stats __percpu *percpu_extras; u16 mc_token; struct dpni_link_state link_state; bool do_link_poll; struct task_struct *poll_thread; /* enabled ethtool hashing bits */ u64 rx_hash_fields; }; /* default Rx hash options, set during probing */ #define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \ | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \ | RXH_L4_B_2_3) #define dpaa2_eth_hash_enabled(priv) \ ((priv)->dpni_attrs.num_queues > 1) /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */ #define DPAA2_CLASSIFIER_DMA_SIZE 256 extern const struct ethtool_ops dpaa2_ethtool_ops; extern const char dpaa2_eth_drv_version[]; static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv) { return priv->dpni_attrs.num_queues; } #endif /* __DPAA2_H */
<gh_stars>1-10 # -*- coding: utf-8 -*- # @time: 2019-05-13 17:34 # @Author : zpy # @Email : <EMAIL> # @file: brower.py import requests from logging import getLogger from utils import BrowerInfo import time from twisted.internet.error import ( TimeoutError, TCPTimedOutError) log = getLogger('fakebrower') def get_proxy(proxy_type): return requests.get(f'127.0.0.1:5000/getproxy?proxytype={proxy_type}') class FakeBrower(object): def __init__(self, settings): self.use_proxy = settings.get("USE_PROXY", True) # 是否启用代理,默认开启 self.brower_info = BrowerInfo(url=settings.get('BROWERINFO_URL')) @classmethod def from_crawler(cls, crawler): return cls(crawler.settings) def process_request(self, request, spider): self.req_start = time.time() proxy_type = request.url.split(':')[0] if 'proxy' in request.meta: if 'exception' in request.meta and not request.meta["exception"]: return request.meta['proxy'] = self.brower_info.get_proxy(proxy_type, spider.name) # 替换 if self.use_proxy: request.meta['proxy'] = self.brower_info.get_proxy(proxy_type, spider.name) # 初始化获取 request.headers['User-Agent'] = self.brower_info.get_ua() def process_response(self, request, response, spider): self.brower_info.put_proxy(request.meta['proxy'], spider.name, 1) log.info(f"proxy={request.meta['proxy']} cost={time.time()-self.req_start}") return response def process_exception(self, request, exception, spider): if 'proxy' not in request.meta: return if exception: request.meta['exception'] = True if isinstance(exception, (TimeoutError, TCPTimedOutError)): self.brower_info.del_proxy(request.meta['proxy'], spider.name) else: self.brower_info.put_proxy(request.meta['proxy'], spider.name, -1)
/** * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ /** * @file common/Serialization-inl.h * * Implementation details, only to be included from Serialization.h */ #include <stdexcept> #include <utility> #include "flashlight/flashlight/common/CppBackports.h" #pragma once namespace fl { namespace detail { template <typename T> using IsOutputArchive = std::is_base_of<cereal::detail::OutputArchiveBase, T>; template <typename T> using IsInputArchive = std::is_base_of<cereal::detail::InputArchiveBase, T>; /** * Wrapper indicating that an expression should be serialized only if the * version is in a certain range. */ template <typename T> struct Versioned { T&& ref; uint32_t minVersion; uint32_t maxVersion; }; template <typename S, typename T> struct SerializeAs { using T0 = cpp::decay_t<T>; T&& ref; std::function<S(const T0&)> saveConverter; std::function<T0(S)> loadConverter; }; // 0 arguments (no-op). template <typename Archive> void applyArchive(Archive& ar, const uint32_t version) {} // 1 argument, general case. template <typename Archive, typename Arg> void applyArchive(Archive& ar, const uint32_t version, Arg&& arg) { ar(std::forward<Arg>(arg)); } // 1 argument, version-restricted. template <typename Archive, typename T> void applyArchive(Archive& ar, const uint32_t version, Versioned<T> varg) { if (version >= varg.minVersion && version <= varg.maxVersion) { applyArchive(ar, version, std::forward<T>(varg.ref)); } } // 1 argument, with conversion, saving. template < typename Archive, typename S, typename T, cpp::enable_if_t<IsOutputArchive<Archive>::value, int> = 0> void applyArchive(Archive& ar, const uint32_t version, SerializeAs<S, T> arg) { if (arg.saveConverter) { applyArchive(ar, version, arg.saveConverter(arg.ref)); } else { applyArchive(ar, version, static_cast<const S&>(arg.ref)); } } // 1 argument, with conversion, loading. template < typename Archive, typename S, typename T, cpp::enable_if_t<IsInputArchive<Archive>::value, int> = 0> void applyArchive(Archive& ar, const uint32_t version, SerializeAs<S, T> arg) { using T0 = cpp::remove_reference_t<T>; S s; applyArchive(ar, version, s); if (arg.loadConverter) { arg.ref = arg.loadConverter(std::move(s)); } else { arg.ref = static_cast<T0>(std::move(s)); } } // 2+ arguments (recurse). template <typename Archive, typename Arg, typename... Args> void applyArchive( Archive& ar, const uint32_t version, Arg&& arg, Args&&... args) { applyArchive(ar, version, std::forward<Arg>(arg)); applyArchive(ar, version, std::forward<Args>(args)...); } } // namespace detail template <typename T> detail::Versioned<T> versioned(T&& t, uint32_t minVersion, uint32_t maxVersion) { return detail::Versioned<T>{std::forward<T>(t), minVersion, maxVersion}; } template <typename S, typename T> detail::SerializeAs<S, T> serializeAs(T&& t) { return detail::SerializeAs<S, T>{std::forward<T>(t), nullptr, nullptr}; } template <typename S, typename T, typename SaveConvFn, typename LoadConvFn> detail::SerializeAs<S, T> serializeAs(T&& t, SaveConvFn saveConverter, LoadConvFn loadConverter) { return detail::SerializeAs<S, T>{ std::forward<T>(t), std::move(saveConverter), std::move(loadConverter)}; } template <typename... Args> void save(const std::string& filepath, const Args&... args) { std::ofstream ofs(filepath, std::ios::binary); save(ofs, args...); } template <typename... Args> void save(std::ostream& ostr, const Args&... args) { cereal::BinaryOutputArchive ar(ostr); ar(args...); } template <typename... Args> void load(const std::string& filepath, Args&... args) { std::ifstream ifs(filepath, std::ios::binary); load(ifs, args...); } template <typename... Args> void load(std::istream& istr, Args&... args) { cereal::BinaryInputArchive ar(istr); ar(args...); } namespace detail { /** * This workaround lets us use explicit versioning for af::array; if we'd used * `save(Archive& ar, const af::array& arr, const uint32_t version)` directly, * cereal would complain there are 2 ways to serialize integer types, * because af::array has an implicit ctor from a single `long long`. * * The trick we use here is that C++'s implicit conversion sequence permits * at most one user-defined conversion. Therefore `af::array` may be implicitly * converted to `AfArraySerializeProxy`, but `int` may not. * * For more info, see https://github.com/USCiLab/cereal/issues/132 * and https://en.cppreference.com/w/cpp/language/implicit_conversion */ template <typename T> struct CerealSave { /* implicit */ CerealSave(const T& x) : val(x) {} const T& val; }; } // namespace detail } // namespace fl namespace cereal { // no versioning; simple and unlikely to ever change template <class Archive> void save(Archive& ar, const fl::detail::CerealSave<af::dim4>& dims_) { const auto& dims = dims_.val; int64_t x; for (int i = 0; i < 4; ++i) { x = dims[i]; ar(x); } } template <class Archive> void load(Archive& ar, af::dim4& dims) { int64_t x; for (int i = 0; i < 4; ++i) { ar(x); dims[i] = x; } } template <class Archive> void save( Archive& ar, const fl::detail::CerealSave<af::array>& arr_, const uint32_t /* version */) { const auto& arr = arr_.val; if (arr.issparse()) { throw cereal::Exception( "Serialzation of sparse af::array is not supported yet!"); } std::vector<uint8_t> vec(arr.bytes()); arr.host(vec.data()); ar(arr.dims(), arr.type(), vec); } template <class Archive> void load(Archive& ar, af::array& arr, const uint32_t /* version */) { af::dim4 dims; af::dtype ty; std::vector<uint8_t> vec; ar(dims, ty, vec); arr = af::array(dims, ty); arr.write(vec.data(), vec.size()); } } // namespace cereal
import "dotenv/config"; import mongoose from "mongoose"; // collections interface CollectionType { account: string; collections: [string]; } const collectionSchema = new mongoose.Schema<CollectionType>({ account: String, collections: [String], }); const Collection = mongoose.model<CollectionType>( "Collection", collectionSchema ); // minted type UrlString = string; type Traits = | { trait_types: "Step count"; value: number } | { trait_types: "Loop"; value: "Yes" | "No" }; interface TokenMeta { name: string; date: number; rows: number; description: string; initState: string; image: UrlString; externalUrl: UrlString; attributes: Array<Traits>; baseTokenUri?: UrlString; tokenId?: number; } const mintedSchema = new mongoose.Schema<TokenMeta>({ name: { type: String, required: true }, date: { type: Number, required: true }, rows: { type: Number, index: true, required: true }, description: { type: String, required: true }, initState: { type: String, index: true }, image: { type: String, required: true }, externalUrl: { type: String, required: true }, attributes: { type: [{}], required: true }, baseTokenUri: { type: String, required: true }, tokenId: { type: Number, required: true }, }); const Minted = mongoose.model<TokenMeta>("Minted", mintedSchema); // setup let dbUrl = process.env.NODE_ENV === "development" ? process.env.DB_URL_TEST : process.env.DB_URL; if (!dbUrl) { throw new Error("DB_URL not set in environment variables"); } mongoose.connect(dbUrl); export { Collection, Minted, TokenMeta };
import vertica_sdk class add2ints(vertica_sdk.ScalarFunction): """Return the sum of two integer columns""" def __init__(self): pass def setup(self, server_interface, col_types): pass def processBlock(self, server_interface, arg_reader, res_writer): # Writes a string to the UDx log file. server_interface.log("Python UDx - Adding 2 ints!") while(True): # Example of error checking best practices. product_id = arg_reader.getInt(2) if product_id < 100: raise ValueError("Invalid Product ID") if arg_reader.isNull(0) or arg_reader.isNull(1): raise ValueError("I found a NULL!") else: first_int = arg_reader.getInt(0) # Input column second_int = arg_reader.getInt(1) # Input column res_writer.setInt(first_int + second_int) # Sum of input columns. res_writer.next() # Read the next row. if not arg_reader.next(): # Stop processing when there are no more input rows. break def destroy(self, server_interface, col_types): pass class add2ints_factory(vertica_sdk.ScalarFunctionFactory): def createScalarFunction(self, srv): return add2ints() def getPrototype(self, srv_interface, arg_types, return_type): arg_types.addInt() arg_types.addInt() arg_types.addInt() return_type.addInt() def getReturnType(self, srv_interface, arg_types, return_type): return_type.addInt()
If you can’t believe it’s been 20 years since the Mad Caddies’ inception, well, neither can they. “I look in the mirror some days, and I think, ‘Wow, I look like I’m 36 years old,’” laughed Caddies frontman Chuck Robertson. “But I still feel 19 sometimes.” Since meeting in the halls of Santa Ynez High School in the mid-’90s, Robertson and bandmates Sascha Lazor, Todd Rosenberg, Keith Douglas, and Eduardo Hernandez have toured the world countless times over with their hybridized mix of ska, punk, reggae, and pop and sold more than 400,000 records in the process. This week, the Solvang-born ska punkers release Dirty Rice, their sixth full-length studio album — and first in seven years — on Fat Wreck Chords. Like its predecessors, Dirty Rice thrives on fearless eclecticism. Lead track “Brand New Scar” is a laid-back, head-nod-worthy slice of Cali-bred reggae pop, and it’s followed immediately by mosh pit anthem “Love Myself.” “Making people dance has always been one of the mission statements of the band,” Robertson shared from a San Antonio tour stop this past April. “We’ve always wanted to put on a show where it’s diverse, where people from a lot of different genre backgrounds would enjoy it, and the most important part of that is the rhythmic aspect.” But finding a rhythm for Dirty Rice proved to be a difficult task, at least at the start. Following a two-year-long hiatus, the band regrouped at Fat Wreck Chords’ Motor Studios in 2011 with the intention of writing and recording a new album. “We spent three weeks up there and really hit a wall,” Robertson recalled. “We realized pretty quickly that we’re not a band that can just write a record in a studio in a month. It’s just not the way we operate.” Discouraged, the band returned home with no real plan for what happened next. Years passed, and mini tours popped up here and there, but it wasn’t until early 2013 that the guys finally decided to give it another go. Their meeting place of choice: Rosenberg’s family’s Santa Ynez ranch in the barn-turned-studio space that the Caddies had practiced in as high schoolers. “It was interesting to come back full circle to where the band had its origins,” said Robertson. “A lot of bands pay big money to go record in these destination studios, where you can sleep there and it’s out in the country. We were just really lucky to have that at our disposal. Over the course of those 14 months, we were able to demo 40 or 50 ideas out for the new record — but we cycled through close to 100 ideas.” After years of rotating-door lineup changes, the current Caddies are boasting all of the band’s original members, as well as a number of longtime players, who helped contribute to much of what would become Dirty Rice. Instead of resting solely on Lazor and Robertson’s writing, the band opened up the table to Rosenberg, who had spent his time off from the band penning commercial jingles. Keyboardist Dustin Lanker and bassist Graham Palmer also offered up songs to the band. “This was the most collaborative Mad Caddies record to date,” said Robertson. “There were a lot of cooks in the kitchen, and sometimes it was a little frustrating, but I think the end result kind of showed the maturity of us getting together and writing a little more collectively.” Mature, sure. But the Caddies were not interested in making their “dad record” and made a concerted effort to set the tone for their big return. “We really wanted to make it a cohesive body of work,” said Robertson. “It’s definitely more mid-tempo [than our other albums]. There are a few fast tunes, but it’s a lot harder to write four-on-the-floor slam punk songs when you’re in your thirties,” he laughed. “Your neck gets sore and you’re like, ‘Let’s just write something that we can kind of groove to.’” And groove it does. Over the course of Dirty Rice’s 13 tracks, the band offers up numerous takes on the chilled-out ska they’ve long championed. A prime example comes by way of “Shot in the Dark,” a Dixieland-imbued mid-album highlight cowritten by NOFX frontman (and Fat Wreck owner) Fat Mike, who visited the band in Santa Ynez last September to lend some guidance. “He came down right before we started tracking for the final cuts and just kind of helped us sort through songs. We produced the record ourselves, so to have an outside source come in was really crucial,” recalled Robertson. “He’s kind of always done that with our records, though — thrown in his two cents. He’s a great musician, lyricist, a cultural icon,” Robertson laughed. “But he’s also always been a friend of ours. All of us feel really comfortable around Mike, but we also respect his opinion very highly.” It’s that same friendly reverence that’s helped bond the Caddies for close to two decades, too. And now, with a new record in their hands and a world tour already in the works, it’s no surprise to hear that the band is reflecting and even getting a little nostalgic. “There’s been a lot of talk about the 20-year anniversary, which is next year,” said Robertson. “What do we do? Do we play a show and try to have every band member who’s ever been in the Caddies play a song? I don’t know. We’re going to do something fun, just because I don’t think any of us ever imagined that we’d still be doing it after 20 years.” As for what he thinks it is that’s kept the Caddies going, well, that all comes down to good old-fashioned fun — and a bit of a willful spirit. “I’m just a stubborn, stubborn guy. I don’t want to give it up,” laughed Robertson. “But we all have so much fun when we get together. And the crowds are still there. We have multigenerational fans now. We have teenagers coming out to shows with their parents, who have been watching us for 18 years. If the crowds aren’t going away and people are there and having a good time, we’re just kind of like, ‘Why give up? Why stop?’” Dirty Rice comes out Tuesday, May 13, on Fat Wreck Chords. For more on the band, visit madcaddies.com.
def _jitter_learn_timeout(self): return int(max(abs( self.learn_timeout - (self.learn_jitter / 2) + random.randint(0, self.learn_jitter)), self.cache_update_guard_time))
"""Helpers for the parameter fitting of Jazzy.""" # optimisation/helpers.py import _pickle as cpickle import bz2 import os import config import optuna from optuna.samplers import TPESampler def load_data_configuration(study_filename): """Abstraction for input and output loading and configuration setting. The method only accepts the name of the output study file, whilst the rest of the parameters must be defined in a config file. """ # load the input data_path = os.path.abspath(os.path.join(os.getcwd(), "..", config.DATA_PATH)) optuna_path = os.path.join(data_path, config.OPTUNA_DIRNAME) data_filepath = os.path.join(optuna_path, config.PRECALCULATED_DATA_FILENAME) data = bz2.BZ2File(data_filepath, "rb") input_data = cpickle.load(data) # configure output study_filepath = os.path.join(optuna_path, study_filename) return input_data, study_filepath def run_optimisation(objective, study_filepath, verbose=False): """Abstraction for Optuna fitting. Includes early stopping logic and fixed seed for reproducibility. The fitting can be either verbose or not but always dumps a pickle file with the full logs of the process. """ # early stopping logic (https://github.com/optuna/optuna/issues/1001) class EarlyStoppingExceeded(optuna.exceptions.OptunaError): early_stop = config.OPTUNA_EARLY_STOPPING early_stop_count = 0 best_score = None def early_stopping_opt(study, trial): if EarlyStoppingExceeded.best_score is None: EarlyStoppingExceeded.best_score = study.best_value if study.best_value < EarlyStoppingExceeded.best_score: EarlyStoppingExceeded.best_score = study.best_value EarlyStoppingExceeded.early_stop_count = 0 else: if ( EarlyStoppingExceeded.early_stop_count > EarlyStoppingExceeded.early_stop ): EarlyStoppingExceeded.early_stop_count = 0 raise EarlyStoppingExceeded() else: EarlyStoppingExceeded.early_stop_count = ( EarlyStoppingExceeded.early_stop_count + 1 ) return # run the optimisation with sampler for reproducibility if not verbose: optuna.logging.set_verbosity(optuna.logging.WARNING) sampler = TPESampler(seed=5) study = optuna.create_study(sampler=sampler) try: study.optimize(objective, timeout=None, callbacks=[early_stopping_opt]) except EarlyStoppingExceeded: print( f"Early stopping exceeded: No new best scores \ after {config.OPTUNA_EARLY_STOPPING} iterations" ) print(f"The best parameters were {study.best_params}") # write the results out with bz2.BZ2File(study_filepath, "w") as f: cpickle.dump(study, f)
def save(self): for obs in (self.observationMonths[obsm].save() for obsm in self.observationMonths): pass if self.forecastReport: self.forecastReport.save()
// The addition of OffsetStart and OffsetEnd changes the default // behaviour of Marquee. Passing start==width ann end==0 mimics the // old default. func TestMarqueeOldBehavior(t *testing.T) { m := Marquee{ Width: 6, OffsetStart: 6, OffsetEnd: 0, Child: Row{ Children: []Widget{ Box{Width: 3, Height: 3, Color: color.RGBA{0xff, 0, 0, 0xff}}, Box{Width: 3, Height: 2, Color: color.RGBA{0, 0xff, 0, 0xff}}, Box{Width: 3, Height: 1, Color: color.RGBA{0, 0, 0xff, 0xff}}, }, }, } assert.Equal(t, 22, m.FrameCount()) assert.Equal(t, nil, checkImage([]string{ "......", "......", "......", }, m.Paint(image.Rect(0, 0, 100, 100), 0))) assert.Equal(t, nil, checkImage([]string{ "....rr", "....rr", "....rr", }, m.Paint(image.Rect(0, 0, 100, 100), 2))) assert.Equal(t, nil, checkImage([]string{ "rrrggg", "rrrggg", "rrr...", }, m.Paint(image.Rect(0, 0, 100, 100), 6))) assert.Equal(t, nil, checkImage([]string{ "rgggbb", "rggg..", "r.....", }, m.Paint(image.Rect(0, 0, 100, 100), 8))) assert.Equal(t, nil, checkImage([]string{ "b.....", "......", "......", }, m.Paint(image.Rect(0, 0, 100, 100), 14))) assert.Equal(t, nil, checkImage([]string{ "......", "......", "......", }, m.Paint(image.Rect(0, 0, 100, 100), 15))) assert.Equal(t, nil, checkImage([]string{ "...rrr", "...rrr", "...rrr", }, m.Paint(image.Rect(0, 0, 100, 100), 18))) assert.Equal(t, nil, checkImage([]string{ "rrrggg", "rrrggg", "rrr...", }, m.Paint(image.Rect(0, 0, 100, 100), 21))) assert.Equal(t, nil, checkImage([]string{ "rrrggg", "rrrggg", "rrr...", }, m.Paint(image.Rect(0, 0, 100, 100), 22))) assert.Equal(t, nil, checkImage([]string{ "rrrggg", "rrrggg", "rrr...", }, m.Paint(image.Rect(0, 0, 100, 100), 26))) assert.Equal(t, nil, checkImage([]string{ "rrrggg", "rrrggg", "rrr...", }, m.Paint(image.Rect(0, 0, 100, 100), 100000))) }
// Test that Parse parses a simple valid mapping correctly. func TestParse(t *testing.T) { var wantedErr error = nil wantedMapping := Mapping{ Matcher: matcher.MatcherWithLogicalOperator{ matcher.MatcherWithoutLogicalOperator{matcher.Data1, matcher.EqualToOperator, 44}, matcher.LogicalAndOperator, matcher.MatcherWithoutLogicalOperator{matcher.Data2, matcher.EqualToOperator, 64}, }, Keycode: 1, } s := "data1 == 44 && data2 == 64 -> 1" mapping, err := Parse(s) if err != wantedErr { t.Errorf("Parse(%q) returns an incorrect error %q, want %v.", s, err, wantedErr) } if !mapping.Equal(wantedMapping) { t.Errorf("Parse(%q) returns an incorrect mapping %v, want %v.", s, mapping, wantedMapping) } }
Thank you for supporting the journalism that our community needs! For unlimited access to the best local, national, and international news and much more, try an All Access Digital subscription: We hope you have enjoyed your trial! To continue reading, we recommend our Read Now Pay Later membership. Simply add a form of payment and pay only 27¢ per article. *Introductory pricing schedule for 12 month: $0.99/month plus tax for first 3 months, $5.99/month for months 4 - 6, $10.99/month for months 7 - 9, $13.99/month for months 10 - 12. Standard All Access Digital rate of $16.99/month begins after first year. *Introductory pricing schedule for 12 month: $0.99/month plus tax for first 3 months, $5.99/month for months 4 - 6, $10.99/month for months 7 - 9, $13.99/month for months 10 - 12. Standard All Access Digital rate of $16.99/month begins after first year. *Introductory pricing schedule for 12 month: $0.99/month plus tax for first 3 months, $5.99/month for months 4 - 6, $10.99/month for months 7 - 9, $13.99/month for months 10 - 12. Standard All Access Digital rate of $16.99/month begins after first year. *Introductory pricing schedule for 12 month: $0.99/month plus tax for first 3 months, $5.99/month for months 4 - 6, $10.99/month for months 7 - 9, $13.99/month for months 10 - 12. Standard All Access Digital rate of $16.99/month begins after first year. Thank you for supporting the journalism that our community needs! For unlimited access to the best local, national, and international news and much more, try an All Access Digital subscription: We hope you have enjoyed your trial! To continue reading, we recommend our Read Now Pay Later membership. Simply add a form of payment and pay only 27¢ per article. Thank you for supporting the journalism that our community needs! For unlimited access to the best local, national, and international news and much more, try an All Access Digital subscription: We hope you have enjoyed your trial! To continue reading, we recommend our Read Now Pay Later membership. Simply add a form of payment and pay only 27¢ per article. “All we wanted was a fair process and it looks like we’re going to get it now,” Darrell Pakosh said. But the couple’s fight with city hall has greater implications than simply what gets built next to their home – it’s forced the civic administration to launch a revision of its planning approval process and set off fears that other controversial but already-approved projects will be have to go back to the drawing board. Darrell and Cheryl Pakosh are waiting for formal word that, for a second time, they’ve successfully blocked a condominium development adjacent to their Henderson Highway river lot home. There’s a saying that "you can’t fight city hall" but a North Kildonan couple has done exactly that, twice, and won each time. Hey there, time traveller! This article was published 27/7/2017 (579 days ago), so information in it may no longer be current. Hey there, time traveller! This article was published 27/7/2017 (579 days ago), so information in it may no longer be current. There’s a saying that "you can’t fight city hall" but a North Kildonan couple has done exactly that, twice, and won each time. Darrell and Cheryl Pakosh are waiting for formal word that, for a second time, they’ve successfully blocked a condominium development adjacent to their Henderson Highway river lot home. But the couple’s fight with city hall has greater implications than simply what gets built next to their home – it’s forced the civic administration to launch a revision of its planning approval process and set off fears that other controversial but already-approved projects will be have to go back to the drawing board. "All we wanted was a fair process and it looks like we’re going to get it now," Darrell Pakosh said. The concerns reverberating within the city hall’s administration building is the impact the decision will have on the approval process. Currently, developers make an initial application at a hearing of three area councillors, known as the community committee. Decisions of the community committee can be appealed to other civic committees. But there is no requirement for a re-opening of the public hearing. WAYNE GLOWACKI / WINNIPEG FREE PRESS FILES mayor Brian Bowman and his executive policy committee removed the 25-feet requirement and the rezoning was approved as the developer requested. For this project, the community committee approved the project but placed a caveat that no construction take place within 25 feet of the joint property line with the Pakoshes. However, at a meeting of the senior executive policy committee (EPC), the 25-feet requirement was removed and the rezoning was approved as the developer requested. Concern among the administration was so high that the issue was brought to a closed-door session of Mayor Brian Bowman and EPC on July 12 and then later to all members of council during another private briefing. "The administration was all in a panic saying how they were going to have to re-invent the process and change how hearings are conducted," said an individual familiar with the issue who requested anonymity as the briefing was classified as confidential. The city administration was asked to comment on the proposed settlement and its impact on the approval process but they did not respond, as the matter is before the courts. The condo developers, Betty and Werner Neufeld of East St. Paul, could not be reached for comment. Their lawyer, Gord Steeves, said neither he nor the Neufelds would comment on the project or the Pakoshes dispute with city hall. Lawyer Charles Chappell, who represents the Pakoshes, said the North Kildonan couple has been adamant that the city’s approval process – which allows elected officials to overturn decisions made at a public hearing – is a violation of fundamental justice. "You shouldn’t be overturning the findings of a public hearing without holding another public hearing," Chappell said. "That’s the issue. If (a standing policy committee) or EPC wants to overturn what was decided at the public hearing, then they’ve got to have a process for having a second hearing or just refer it back for a further hearing by the community committee." Chappell said the terms of the settlement were agreed to back in March but he hasn’t heard anything since then. He said he wasn’t aware the administration had briefed councillors in private earlier this month but added he’s not surprised. JOHN WOODS / WINNIPEG FREE PRESS Cheryl and Darrell Pakosh, photographed outside their home in 2015. Chappell said city hall has its hands full. In exchange for his clients dropping the case, the city has to undo all the approvals for the project and decide how to rewrite the appeal process. A second source said the administration recommended to EPC at the July 12 meeting that it endorse the settlement with the Pakosh family but EPC decided to postpone a decision. Chappell said if elected officials will not honour the deal the administration had proposed, he'll advise his clients to resume the court case. Chappell said officials are likely concerned that this will also affect other developments, where decisions of a community committee hearing were overturned, and how they’ll respond to them. The dispute launched by the Pakoshes could likely see the city recommend the creation of an independent planning board, Chappell said, to replace the community committee hearings. "The public hearing process is to hear what the people think about changes in land use. It’s not perfect but what would be a different system?" Chappell said. Beating city hall – twice – is rewarding but also expensive Darrell Pakosh said it’s cost his family more than $150,000 to challenge what he believes is a faulty plan approval process. "We spent $50,000 going to court the first time …. and we probably spent another $100,000 this time," Pakosh said. The Pakoshes have been fighting city hall since 2012, when several civic committees – backed by civic planning staff – approved the apartment block-style condominium project on the lot immediately south of theirs. The condo is planned to be built on a 66-foot wide lot, that runs 482 feet back to the Red River. The Pakoshes said it would be like living next door to a three-storey wall a few feet from their property line. City hall initially approved the project without requiring the developers, Betty and Werner Neufeld of East St. Paul, to produce drawings of plans at a public hearing. The Pakoshes challenged the approval in court in 2013, which sided with them – quashing the bylaw, the rezoning and an amendment to the area’s secondary plan. But the Neufelds went back to city hall with a new plan, increasing the number of units to 14 from 10, and wanting to construct the building within feet of the northern property line. At a public hearing held by the community committee in July 2015, councillors approved the project but with the caveat that no building be constructed within 25 feet of the northern property line. When Mayor Brian Bowman and members of his executive policy committee met on Sept. 23, 2015, they removed the 25-foot restriction and approved the rezoning, allowing the condominium project to be built where a single-family home used to stand. Want to get a head start on your day? Get the day’s breaking stories, weather forecast, and more sent straight to your inbox every morning. An appeal committee met in November 2015 and removed most of the other conditions imposed by the community committee. The Pakosh family went back to court in December of 2015, charging the process was unfair and asking the court – again – to quash the approvals. Lawyer Chuck Chappell, who represents the Pakosh family, said it was the city who approached his clients in March with an offer to settle: the city would cancel all the approvals and require the Neufelds to resubmit their plans for consideration. Darryl Pakosh said he suspects the Neufelds will submit another proposal but is confident the final design will be something his family can live with. "They’ll have to sit down with us….not all this behind the scenes crap," Pakosh said. "(The city) says one thing and then something totally different shows up. Eventually there’s something that’s going to be happening (next door) but right now, they have to sit down with us, consult with us and design with us, the way they should have five years ago." [email protected]
Introducing The D.Y.E. A three man hip hop group from Melbourne’s north, hell bent on getting their music to the people. Made up of DJ Marshall, MHz and Slam Master D, The D.Y.E already have two mix-tapes to their name. The debut EP entitled Sorry For The Stickers, references the infamous D.Y.E stickers, which any local could tell you, have been plastered all over Melbourne. With their energetic party sound reminiscent of J5 or Beastie Boys; The D.Y.E pride themselves on being masters of the live show. The EP’s general vibe is custom-made for a festival crowd, with plenty of call-and-response and catchy hooks. Lighters In The Sky is a track about the ongoing rejection aspiring artists encounter when trying to secure shows and opportunities. In this song, the trio maintain the idea of being able to play a huge show one day, seeing lighters in the sky is what keeps them driven. With a sample from Bedouin Soundclash, the track features vibrant vocals from Melbourne jazz/soul singer Mo. “The D.Y.E are here to entertain. There are 10 year olds out there that have a tighter flow than me, but they ain’t got the presence or style to back it up.” – The D.Y.E Official || Facebook || Twitter
<reponame>sheldarr/heating-boiler-controller-panel<filename>hooks/useMeasurements/index.ts import axios from 'axios'; import useSWR from 'swr'; import { WebSocketEvents } from '../../events'; import useSocket from '../useSocket'; import { Measurement } from '../../database'; const fetcher = (url: string) => axios.get<Measurement[]>(url).then(({ data }) => data); const useMeasurements = () => { const response = useSWR('/api/controller/measurements', fetcher); useSocket(WebSocketEvents.REFRESH_MEASUREMENTS, () => { response.mutate(); }); return response; }; export default useMeasurements;
<gh_stars>1-10 // automatically converted from C# to C++ by convert_cs_to_cpp.sh ver. 1.1 #ifndef _Configurator_h_ #define _Configurator_h_ #include "../Localisation/NumberToWords/INumberToWordsConverter.h" // INumberToWordsConverter #include "../Configuration/NumberToWordsConverterRegistry.h" // NumberToWordsConverterRegistry #include "../Configuration/LocaliserRegistry.h" // LocaliserRegistry //using System; //using System::Collections::Generic; //using System::Globalization; //using System::Reflection; //using Humanizer::DateTimeHumanizeStrategy; //using Humanizer::Localisation::Formatters; //using Humanizer::Localisation::NumberToWords; //using Humanizer::Localisation::Ordinalizers; //using Humanizer::Localisation::CollectionFormatters; namespace Humanizer { namespace Configuration { /// <summary> /// Provides a configuration point for Humanizer /// </summary> class Configurator { typedef Humanizer::Localisation::NumberToWords::INumberToWordsConverter INumberToWordsConverter; #ifdef XXX private: static const LocaliserRegistry<ICollectionFormatter> _collectionFormatters = new CollectionFormatterRegistry(); /// <summary> /// A registry of formatters used to format collections based on the current locale /// </summary> public: static LocaliserRegistry<ICollectionFormatter> CollectionFormatters { get { return _collectionFormatters; } } private: static const LocaliserRegistry<IFormatter> _formatters = new FormatterRegistry(); /// <summary> /// A registry of formatters used to format strings based on the current locale /// </summary> public: static LocaliserRegistry<IFormatter> Formatters { get { return _formatters; } } #endif private: class NumberToWordsConverters { static const LocaliserRegistry<INumberToWordsConverter> *_numberToWordsConverters; /// <summary> /// A registry of number to words converters used to localise ToWords and ToOrdinalWords methods /// </summary> public: static const LocaliserRegistry<INumberToWordsConverter>* get() { return _numberToWordsConverters; } }; #ifdef XXX private: static const LocaliserRegistry<IOrdinalizer> _ordinalizers = new OrdinalizerRegistry(); /// <summary> /// A registry of ordinalizers used to localise Ordinalize method /// </summary> public: static LocaliserRegistry<IOrdinalizer> Ordinalizers { get { return _ordinalizers; } } static ICollectionFormatter CollectionFormatter { get { return CollectionFormatters.ResolveForUiCulture(); } } /// <summary> /// The formatter to be used /// </summary> /// <param name="culture">The culture to retrieve formatter for. Null means that current thread's UI culture should be used.</param> static IFormatter GetFormatter(std::string culture) { return Formatters.ResolveForCulture(culture); } #endif public: /// <summary> /// The converter to be used /// </summary> /// <param name="culture">The culture to retrieve number to words converter for. Null means that current thread's UI culture should be used.</param> static const INumberToWordsConverter* GetNumberToWordsConverter( const std::string &culture ) { return NumberToWordsConverters::get()->ResolveForCulture( culture ); } #ifdef XXX /// <summary> /// The ordinalizer to be used /// </summary> static IOrdinalizer Ordinalizer { get { return Ordinalizers.ResolveForUiCulture(); } } private: static IDateTimeHumanizeStrategy _dateTimeHumanizeStrategy = new DefaultDateTimeHumanizeStrategy(); /// <summary> /// The strategy to be used for DateTime.Humanize /// </summary> public: static IDateTimeHumanizeStrategy DateTimeHumanizeStrategy { get { return _dateTimeHumanizeStrategy; } set { _dateTimeHumanizeStrategy = value; } } private: static IDateTimeOffsetHumanizeStrategy _dateTimeOffsetHumanizeStrategy = new DefaultDateTimeOffsetHumanizeStrategy(); /// <summary> /// The strategy to be used for DateTimeOffset.Humanize /// </summary> public: static IDateTimeOffsetHumanizeStrategy DateTimeOffsetHumanizeStrategy { get { return _dateTimeOffsetHumanizeStrategy; } set { _dateTimeOffsetHumanizeStrategy = value; } } private: static const std::function<PropertyInfo, bool> DefaultEnumDescriptionPropertyLocator = p => p.Name == "Description"; private: static std::function<PropertyInfo, bool> _enumDescriptionPropertyLocator = DefaultEnumDescriptionPropertyLocator; /// <summary> /// A predicate function for description property of attribute to use for Enum.Humanize /// </summary> public: static std::function<PropertyInfo, bool> EnumDescriptionPropertyLocator { get { return _enumDescriptionPropertyLocator; } set { _enumDescriptionPropertyLocator = value ?? DefaultEnumDescriptionPropertyLocator; } } } #endif }; } } #endif // _Configurator_h_