content
stringlengths 10
4.9M
|
---|
def aggregate_values(self, parameter_names, arguments):
return parameter_names, arguments |
<gh_stars>1-10
#ifndef MAJ_ARG_PARSE_H
#define MAJ_ARG_PARSE_H
void
usage( const char *s );
unsigned long
parse_qty( const char *s );
#endif
|
package br.com.luvva.webcam.test;
import br.com.jwheel.xml.model.PathPreferences;
import javax.inject.Singleton;
/**
* @author <NAME>, <NAME>. - <EMAIL>
*/
@Singleton
public class MyPathPreferences extends PathPreferences
{
@Override
public String getRootFolderName ()
{
return "test-webcam";
}
}
|
package greencity.mapping;
import greencity.dto.econewscomment.EcoNewsCommentDto;
import greencity.dto.econewscomment.EcoNewsCommentAuthorDto;
import greencity.entity.EcoNewsComment;
import greencity.entity.enums.CommentStatus;
import org.modelmapper.AbstractConverter;
import org.springframework.stereotype.Component;
@Component
public class EcoNewsCommentDtoMapper extends AbstractConverter<EcoNewsComment, EcoNewsCommentDto> {
@Override
protected EcoNewsCommentDto convert(EcoNewsComment ecoNewsComment) {
EcoNewsCommentDto dto = new EcoNewsCommentDto();
dto.setId(ecoNewsComment.getId());
dto.setModifiedDate(ecoNewsComment.getModifiedDate());
if (ecoNewsComment.isDeleted()) {
dto.setStatus(CommentStatus.DELETED);
return dto;
} else if (ecoNewsComment.getCreatedDate().isEqual(ecoNewsComment.getModifiedDate())) {
dto.setStatus(CommentStatus.ORIGINAL);
} else {
dto.setStatus(CommentStatus.EDITED);
}
dto.setText(ecoNewsComment.getText());
dto.setAuthor(EcoNewsCommentAuthorDto.builder()
.id(ecoNewsComment.getUser().getId())
.name(ecoNewsComment.getUser().getName())
.userProfilePicturePath(ecoNewsComment.getUser().getProfilePicturePath())
.build());
dto.setLikes(ecoNewsComment.getUsersLiked().size());
dto.setCurrentUserLiked(ecoNewsComment.isCurrentUserLiked());
return dto;
}
}
|
//
// QNSubscription.h
// Qonversion
//
// Created by <NAME> on 14.05.2021.
// Copyright © 2021 Qonversion Inc. All rights reserved.
//
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
typedef NS_ENUM(NSInteger, QNSubscriptionRenewState){
QNSubscriptionRenewStateUnknown = -1,
QNSubscriptionRenewStateWillRenew = 1,
QNSubscriptionRenewStateCanceled = 2,
QNSubscriptionRenewStateBillingIssue = 3
} NS_SWIFT_NAME(Qonversion.SubscriptionRenewState);
typedef NS_ENUM(NSInteger, QNSubscriptionPeriodType){
QNSubscriptionPeriodTypeUnknown = -1,
QNSubscriptionPeriodTypeNormal = 1,
QNSubscriptionPeriodTypeTrial = 2,
QNSubscriptionPeriodTypeIntro = 3
} NS_SWIFT_NAME(Qonversion.SubscriptionPeriodType);
@interface QNSubscription : NSObject
@property (nonatomic, copy, readonly) NSString *periodDuration;
@property (nonatomic, strong, readonly) NSDate *startDate;
@property (nonatomic, strong, readonly) NSDate *currentPeriodStartDate;
@property (nonatomic, strong, readonly) NSDate *currentPeriodEndDate;
@property (nonatomic, copy, readonly) NSString *currentPeriodTypeRawValue;
@property (nonatomic, assign, readonly) QNSubscriptionPeriodType currentPeriodType;
@property (nonatomic, assign, readonly) QNSubscriptionRenewState renewState;
@property (nonatomic, copy, readonly) NSString *object;
@end
NS_ASSUME_NONNULL_END
|
<filename>src/app/app.module.ts<gh_stars>0
import { RouterModule } from '@angular/router';
import { BrowserModule } from '@angular/platform-browser';
import { LocationStrategy, PathLocationStrategy, HashLocationStrategy } from '@angular/common';
import { BrowserAnimationsModule } from '@angular/platform-browser/animations';
import { NgModule, NO_ERRORS_SCHEMA, APP_INITIALIZER } from '@angular/core';
import { TranslateHttpLoader } from '@ngx-translate/http-loader';
import { TranslateLoader } from '@ngx-translate/core';
import { TranslateModule } from '@ngx-translate/core';
import { TheHawkerConfig } from 'app/config/TheHawkerConfig';
import { TheHawkerRouter } from 'app/app.routes';
import { FormsModule } from '@angular/forms';
import { HttpModule, Http } from "@angular/http";
import { BreadcrumbModule } from 'angular2-crumbs';
import { MDBBootstrapModule } from './typescripts/angular-bootstrap-md/free';
import { MDBBootstrapModulePro } from './typescripts/angular-bootstrap-md/pro';
import { AgmCoreModule } from './typescripts/angular-bootstrap-md/free/angular2-google-maps/ts/core/';
import { AppComponent } from 'app/app.component';
import { PageNotFoundComponent } from 'app/page-not-found.component';
import { TheHawkerTemplate } from 'app/the-hawker-template.component';
import { CoreModule } from 'app/the-hawker/CORE/CORE.module';
import { InventoryManagementModule } from 'app/the-hawker/InventoryManagement/InventoryManagement.module';
import { CustomerManagementModule } from 'app/the-hawker/CustomerManagement/CustomerManagement.module';
import { ModuleManagementModule } from 'app/the-hawker/ModuleManagement/ModuleManagement.module';
import { UserManagementModule } from 'app/the-hawker/UserManagement/UserManagement.module';
// AoT requires an exported function for factories
export function HttpLoaderFactory(http: Http) {
return new TranslateHttpLoader(http, "assets/i18n/", ".json");
}
@NgModule({
declarations: [
AppComponent,
TheHawkerTemplate,
PageNotFoundComponent
],
imports: [
BrowserModule,
BreadcrumbModule.forRoot(),
BrowserAnimationsModule,
FormsModule,
HttpModule,
RouterModule.forRoot(TheHawkerRouter),
MDBBootstrapModule.forRoot(),
MDBBootstrapModulePro.forRoot(),
TranslateModule.forRoot({
loader: {
provide: TranslateLoader,
useFactory: HttpLoaderFactory,
deps: [Http]
}
}),
CoreModule,
InventoryManagementModule,
CustomerManagementModule,
ModuleManagementModule,
UserManagementModule,
// AgmCoreModule.forRoot({
// apiKey: 'google_maps_api_key'
// })
],
providers: [TheHawkerConfig,
{ provide: LocationStrategy, useClass: HashLocationStrategy },
{ provide: APP_INITIALIZER, useFactory: (config: TheHawkerConfig) => () => config.load(), deps: [TheHawkerConfig], multi: true }
],
bootstrap: [AppComponent],
schemas: [NO_ERRORS_SCHEMA]
})
export class AppModule { } |
/**
* \brief Check if receive FIFO is not empty
*
* \param [in] uart identifies UART to use
*
* \return true if FIFO is not empty
*
*/
static inline bool hw_uart_receive_fifo_not_empty(HW_UART_ID uart)
{
ASSERT_ERROR(uart == HW_UART2);
return HW_UART_REG_GETF(uart, USR, UART_RFNE) != 0;
} |
{-# LANGUAGE CPP #-}
#define DEBUG 0
#define TEST 0
module Bus (
Bus(..), newBus, Passenger(..), board, drive, ride, wait, tryride, unboard
) where
import Control.Concurrent
import Control.Monad
import Data.Either
import Data.Map.Strict (Map)
import qualified Data.Map.Strict as M
import System.Mem.Weak
newtype Bus a = Bus (MVar (Int, Map Int (Weak (MVar a))))
newtype Passenger a = Passenger (MVar a)
instance Show (Passenger a) where
show _ = "Passenger{}"
instance Show (Bus a) where
show _ = "Bus{}"
-- | Create a new empty bus.
newBus :: IO (Bus a)
newBus = Bus <$> newMVar (0, M.empty)
-- | Create a new passenger boarded onto the given bus.
board :: Bus a -> IO (Passenger a)
board (Bus bus) = modifyMVar bus $ \(next,pid) -> do
pidvar <- newEmptyMVar
weak <- mkWeakMVar pidvar (passengerLeft bus next)
let pid' = M.insert next weak pid
return ((next + 1, pid'), Passenger pidvar)
passengerLeft bus id = modifyMVar_ bus $ \(next,pid) -> do
t <- myThreadId
debug (show t ++ ": passenger " ++ show id ++ " left the bus")
return (next, M.delete id pid)
-- | Explicitly remove a passenger from the bus, so that it doesn't receive
-- | future messages.
unboard :: Bus a -> Passenger a -> IO ()
unboard _bus _passenger = return ()
-- Actually we rely on the weak vars stuff to get rid of passenger corpses.
activePassengers :: Bus a -> IO [MVar a]
activePassengers (Bus bus) = do
(next,map) <- takeMVar bus
lost <- forM (M.toList map) $ \(k,w) -> do
mv <- deRefWeak w
case mv of
Just v -> return (Left v)
Nothing -> return (Right k)
let (lefts,rights) = partitionEithers lost
let map' = foldr M.delete map rights
putMVar bus (next,map')
debug ("drive: lost " ++ show (length rights) ++ " passengers")
return lefts
-- | Drive the bus with a value, sending it to each travelling passenger.
-- | This will block until every passenger has gotten the message.
drive :: Bus a -> a -> IO ()
drive bus value = do
active <- activePassengers bus
forM_ active $ \v -> putMVar v value
-- | Ride the bus: block until there's a value to receive, then make ourselves
-- | available to receive again.
ride :: Passenger a -> IO a
ride (Passenger v) = takeMVar v
-- | Wait for the bus to arrive, but don't ride it.
wait :: Passenger a -> IO ()
wait (Passenger v) = readMVar v >> return ()
-- | Check if the bus is here yet.
tryride :: Passenger a -> IO (Maybe a)
tryride (Passenger v) = tryTakeMVar v
#if DEBUG || TEST
putstrlock = unsafePerformIO (newMVar ())
lockedPutStrLn s = withMVar putstrlock $ \() -> do
putStrLn s
#endif
#if DEBUG
debug = lockedPutStrLn
#else
debug _ = return ()
#endif
#if TEST
testBus j n = do
bus <- newBus
passengers <- replicateM n (board bus)
let traveller p = do
t <- myThreadId
msg <- ride p
lockedPutStrLn (show t ++ ": travelled to " ++ show msg)
if msg == j then return () else traveller p
mapM_ (forkIO . traveller) passengers
forM_ [1..j] $ \j -> do
lockedPutStrLn ("MAIN: driving bus to " ++ show j)
drive bus j
lockedPutStrLn ("MAIN: threads should be dead, driving bus to " ++ show (j + 1))
drive bus (j + 1)
#endif
|
__author__='Walnut'
import math
n=int(input())
line=input().split()
arr=[0,0,0,0]
for i in line:
arr[int(i)-1]+=1
car=int(0)
car+=arr[3]
car+=arr[2]
arr[0]-=arr[2]
car+=math.ceil(arr[1]/2)
if arr[1]%2==1 :arr[0]-=2
if arr[0]>0 : car+=math.ceil(arr[0]/4)
print(car)
|
/**
* Converts a block-position to a map-tile-coordinate
*/
public Vector2i posToTile(Vector3d pos){
pos = pos.sub(new Vector3d(gridOrigin.getX(), 0.0, gridOrigin.getY()));
return Vector2i.from(
(int) Math.floor(pos.getX() / getTileSize().getX()),
(int) Math.floor(pos.getZ() / getTileSize().getY())
);
} |
/**
* <code>required string barendtime = 7;</code>
*/
public Builder clearBarendtime() {
bitField0_ = (bitField0_ & ~0x00000040);
barendtime_ = getDefaultInstance().getBarendtime();
onChanged();
return this;
} |
// LoadSheet loads a file in some directory with sheets of (w,h) sized sprites.
// This will blow away any cached sheet with the same fileName.
func (c *Cache) LoadSheet(file string, cellSize intgeom.Point2) (*Sheet, error) {
var rgba *image.RGBA
var ok bool
var err error
if !ok {
rgba, err = c.loadSprite(file, 0)
if err != nil {
return nil, err
}
}
sheet, err := MakeSheet(rgba, cellSize)
if err != nil {
return nil, err
}
c.sheetLock.Lock()
c.loadedSheets[file] = sheet
c.loadedSheets[filepath.Base(file)] = sheet
c.sheetLock.Unlock()
return sheet, nil
} |
// ret: -1 error, 0 more field, 1 no more field
static int socket_data_info_get(char *buf, uint32_t buflen, at_data_check_cb_t valuecheck)
{
uint32_t i = 0;
if (NULL == buf || 0 == buflen) {
return -1;
}
do {
at.parse(&buf[i], 1);
if (buf[i] == ',') {
buf[i] = 0;
break;
} else if (buf[i] == '\r') {
LOGD(TAG, "********delimiter find here********\n");
buf[i] = 0;
return 1;
}
if (i >= buflen) {
LOGE(TAG, "Too long length of data.reader is %s \r\n", buf);
return -1;
}
if (NULL != valuecheck) {
if (valuecheck(buf[i])) {
LOGE(TAG, "Invalid string!!!, reader is %s \r\n", buf);
return -1;
}
}
i++;
} while (1);
return 0;
} |
North Korean dictator Kim Jong-un looks at a farm machinery in an undated photo released by North Korea’s Korean Central News Agency on Thursday. KCNA/Reuters
Well, North Korea is at it again. From CNN:
State news agency KCNA has announced that North Korea will set its clocks back by 30 minutes to “Pyongyang time” on August 15.
As always with North Korea, though, the humor is closely connected to something tragic—in this case, a belligerent grudge against Japan, which was responsible for changing the time zone of the entire Korean peninsula more than 100 years ago.
“The wicked Japanese imperialists committed such unpardonable crimes as depriving Korea of even its standard time while mercilessly trampling down its land with 5 000 year-long history and culture and pursuing the unheard-of policy of obliterating the Korean nation,” KCNA reported on Friday.
North Korea will now keep its clocks half an hour behind South Korea (and Japan). |
def warp_affine(src: Tensor, M: Tensor, dsize: Tuple[int, int], mode: Optional[str] = 'bilinear',
padding_mode: Optional[str] = 'zeros') -> Tensor:
if not torch.is_tensor(src):
raise TypeError("Input src type is not a Tensor. Got {}".format(type(src)))
if not torch.is_tensor(M):
raise TypeError("Input M type is not a Tensor. Got {}".format(type(M)))
if not len(src.shape) == 4:
raise ValueError("Input src must be a BxCxHxW tensor. Got {}".format(src.shape))
if not (len(M.shape) == 3 or M.shape[-2:] == (2, 3)):
raise ValueError("Input M must be a Bx2x3 tensor. Got {}".format(src.shape))
try:
M_3x3_tensor = F.pad(M, [0, 0, 0, 1, 0, 0], mode="constant", value=0)
M_3x3_tensor[:, 2, 2] += 1.0
dst_norm_trans_dst_norm = dst_norm_to_dst_norm(M_3x3_tensor, (src.shape[-2:]), dsize)
return F.grid_sample(src, warp_grid(torch.inverse(dst_norm_trans_dst_norm), dsize=dsize), mode='bilinear',
padding_mode='zeros')
except Exception:
PrintException()
return None |
<reponame>Deadlyelder/Crypto-Tools<filename>S-function toolkit/print.h
/*
* Toolkit for the Differential Cryptanalysis
* of ARX-based Cryptographic Constructions.
*
* (c) 2010 <NAME>, <NAME>,
* <NAME>`{e}re and <NAME>
*/
#ifndef PRINT_H
#define PRINT_H
template <int M, int N>
void print_matrix(const int a[M][N][N]) {
for (int m = 0; m < M; m++) {
std::cout << "A_" << m << "=[" << std::endl;
for (int i=0; i<N; i++) {
for (int j=0; j<N; j++) {
std::cout << a[m][j][i];
if (i != N-1 || j != N-1) std::cout << ",";
}
std::cout << std::endl;
}
std::cout << "];" << std::endl << std::endl;
}
}
#endif /* PRINT_H */
|
def stop_services(self, node):
for service in node.services:
self.stop_service(node, service) |
def create_user_with_ldap_info(ldap_connection, ldap_base_dn, login, password):
name = get_user_name_from_ldap(ldap_connection, ldap_base_dn, login)
email = '%s@%s' % (login, ".".join([DC.split('=')[1] for DC in ldap_base_dn.split(',')]))
from stalker import User, Group
from stalker.db.session import DBSession
new_user = User.query.filter(User.login==login).first()
if new_user:
new_user.password = password
else:
new_user = User(name=name, login=login, email=email, password=password)
DBSession.add(new_user)
DBSession.commit()
ldap_group_names = get_user_groups_from_ldap(ldap_connection, ldap_base_dn, login)
from anima import defaults
updated_group_info = False
for lda_group_name in ldap_group_names:
stalker_group_name = defaults.ldap_user_group_map.get(lda_group_name)
if stalker_group_name:
stalker_group = Group.query.filter(Group.name == stalker_group_name).first()
if stalker_group:
new_user.groups.append(stalker_group)
updated_group_info = True
if updated_group_info:
DBSession.commit()
return new_user |
// GetBootVolumeLifecycleStateEnumValues Enumerates the set of values for BootVolumeLifecycleState
func GetBootVolumeLifecycleStateEnumValues() []BootVolumeLifecycleStateEnum {
values := make([]BootVolumeLifecycleStateEnum, 0)
for _, v := range mappingBootVolumeLifecycleState {
values = append(values, v)
}
return values
} |
/**
* <code>ForkJoinTask</code> for calculating the values of the projection.
*
* @author saybur
*
*/
private final class TaskAction extends RecursiveAction
{
private static final long serialVersionUID = -3229497413493883470L;
private final Matrix data;
private final int low;
private final int high;
private TaskAction(Matrix data, int low, int high)
{
this.data = data;
this.low = low;
this.high = high;
}
@Override
protected void compute()
{
if (high - low < FORK_THRESHOLD)
{
computeDirectly();
}
else
{
int mid = (low + high) >>> 1;
invokeAll(new TaskAction(data, low, mid),
new TaskAction(data, mid, high));
}
}
private void computeDirectly()
{
for(int x = low; x < high; x++)
{
for(int y = 0; y < data.getHeight(); y++)
{
data.set(x, y, projection.calculate(x, y));
}
}
}
} |
/*
* This header is generated by classdump-dyld 1.0
* on Sunday, September 27, 2020 at 11:41:50 AM Mountain Standard Time
* Operating System: Version 14.0 (Build 18A373)
* Image Source: /System/Library/Frameworks/AVKit.framework/AVKit
* classdump-dyld is licensed under GPLv3, Copyright © 2013-2016 by <NAME>.
*/
#import <AVKit/AVKit-Structs.h>
#import <UIKitCore/UIButton.h>
#import <AVKit/AVPlaybackControlsViewItem.h>
@class AVLayoutItemAttributes, NSString, AVMicaPackage, UIViewPropertyAnimator, NSNumber, NSTimer, AVUserInteractionObserverGestureRecognizer, UIVisualEffectView;
@interface AVButton : UIButton <AVPlaybackControlsViewItem> {
BOOL _collapsed;
BOOL _included;
BOOL _removed;
BOOL _hasFullScreenAppearance;
BOOL _hasAlternateAppearance;
BOOL _wasLongPressed;
BOOL _treatsForcePressAsLongPress;
BOOL _usesBackgroundEffectViewForTextOnlyButtons;
BOOL _multipleTouchesEndsTracking;
BOOL _disablesHighlightWhenLongPressed;
BOOL _clampsHitRectInsetsWhenContainedInScrollableView;
BOOL _wasForcePressTriggered;
AVLayoutItemAttributes* _layoutAttributes;
long long _tintEffectStyle;
double _forceThreshold;
double _force;
double _maximumForceSinceTrackingBegan;
NSString* _imageName;
NSString* _fullScreenImageName;
NSString* _inlineImageName;
NSString* _fullScreenAlternateImageName;
NSString* _inlineAlternateImageName;
AVMicaPackage* _micaPackage;
double _micaSnapshotAlpha;
UIViewPropertyAnimator* _highlightAnimator;
double _trackingStartTime;
double _horizontalTranslationOfLongPress;
NSNumber* _previousHorizontalPositionOfLongPress;
NSTimer* _longPressTimer;
AVUserInteractionObserverGestureRecognizer* _userInteractionGestureRecognizer;
UIVisualEffectView* _backgroundEffectView;
CGSize _extrinsicContentSize;
NSDirectionalEdgeInsets _hitRectInsets;
}
@property (assign,nonatomic,__weak) UIViewPropertyAnimator * highlightAnimator; //@synthesize highlightAnimator=_highlightAnimator - In the implementation block
@property (assign,nonatomic) double trackingStartTime; //@synthesize trackingStartTime=_trackingStartTime - In the implementation block
@property (assign,nonatomic) BOOL wasLongPressed; //@synthesize wasLongPressed=_wasLongPressed - In the implementation block
@property (assign,nonatomic) BOOL wasForcePressTriggered; //@synthesize wasForcePressTriggered=_wasForcePressTriggered - In the implementation block
@property (assign,nonatomic) double horizontalTranslationOfLongPress; //@synthesize horizontalTranslationOfLongPress=_horizontalTranslationOfLongPress - In the implementation block
@property (nonatomic,retain) NSNumber * previousHorizontalPositionOfLongPress; //@synthesize previousHorizontalPositionOfLongPress=_previousHorizontalPositionOfLongPress - In the implementation block
@property (assign,nonatomic) double force; //@synthesize force=_force - In the implementation block
@property (assign,nonatomic) double maximumForceSinceTrackingBegan; //@synthesize maximumForceSinceTrackingBegan=_maximumForceSinceTrackingBegan - In the implementation block
@property (assign,nonatomic,__weak) NSTimer * longPressTimer; //@synthesize longPressTimer=_longPressTimer - In the implementation block
@property (nonatomic,retain) AVUserInteractionObserverGestureRecognizer * userInteractionGestureRecognizer; //@synthesize userInteractionGestureRecognizer=_userInteractionGestureRecognizer - In the implementation block
@property (nonatomic,retain) UIVisualEffectView * backgroundEffectView; //@synthesize backgroundEffectView=_backgroundEffectView - In the implementation block
@property (assign,nonatomic) BOOL treatsForcePressAsLongPress; //@synthesize treatsForcePressAsLongPress=_treatsForcePressAsLongPress - In the implementation block
@property (assign,nonatomic) long long tintEffectStyle; //@synthesize tintEffectStyle=_tintEffectStyle - In the implementation block
@property (assign,nonatomic) BOOL usesBackgroundEffectViewForTextOnlyButtons; //@synthesize usesBackgroundEffectViewForTextOnlyButtons=_usesBackgroundEffectViewForTextOnlyButtons - In the implementation block
@property (assign,nonatomic) double forceThreshold; //@synthesize forceThreshold=_forceThreshold - In the implementation block
@property (assign,nonatomic) BOOL multipleTouchesEndsTracking; //@synthesize multipleTouchesEndsTracking=_multipleTouchesEndsTracking - In the implementation block
@property (nonatomic,copy) NSString * imageName; //@synthesize imageName=_imageName - In the implementation block
@property (nonatomic,copy) NSString * fullScreenImageName; //@synthesize fullScreenImageName=_fullScreenImageName - In the implementation block
@property (nonatomic,copy) NSString * inlineImageName; //@synthesize inlineImageName=_inlineImageName - In the implementation block
@property (nonatomic,copy) NSString * fullScreenAlternateImageName; //@synthesize fullScreenAlternateImageName=_fullScreenAlternateImageName - In the implementation block
@property (nonatomic,copy) NSString * inlineAlternateImageName; //@synthesize inlineAlternateImageName=_inlineAlternateImageName - In the implementation block
@property (nonatomic,retain) AVMicaPackage * micaPackage; //@synthesize micaPackage=_micaPackage - In the implementation block
@property (assign,nonatomic) double micaSnapshotAlpha; //@synthesize micaSnapshotAlpha=_micaSnapshotAlpha - In the implementation block
@property (assign,nonatomic) BOOL disablesHighlightWhenLongPressed; //@synthesize disablesHighlightWhenLongPressed=_disablesHighlightWhenLongPressed - In the implementation block
@property (assign,nonatomic) NSDirectionalEdgeInsets hitRectInsets; //@synthesize hitRectInsets=_hitRectInsets - In the implementation block
@property (assign,nonatomic) BOOL clampsHitRectInsetsWhenContainedInScrollableView; //@synthesize clampsHitRectInsetsWhenContainedInScrollableView=_clampsHitRectInsetsWhenContainedInScrollableView - In the implementation block
@property (assign,nonatomic) CGSize extrinsicContentSize; //@synthesize extrinsicContentSize=_extrinsicContentSize - In the implementation block
@property (assign,getter=isRemoved,nonatomic) BOOL removed; //@synthesize removed=_removed - In the implementation block
@property (assign,getter=isCollapsed,nonatomic) BOOL collapsed; //@synthesize collapsed=_collapsed - In the implementation block
@property (assign,getter=isIncluded,nonatomic) BOOL included; //@synthesize included=_included - In the implementation block
@property (getter=isCollapsedOrExcluded,nonatomic,readonly) BOOL collapsedOrExcluded;
@property (assign,nonatomic) BOOL hasAlternateAppearance; //@synthesize hasAlternateAppearance=_hasAlternateAppearance - In the implementation block
@property (assign,nonatomic) BOOL hasFullScreenAppearance; //@synthesize hasFullScreenAppearance=_hasFullScreenAppearance - In the implementation block
@property (nonatomic,readonly) AVLayoutItemAttributes * layoutAttributes; //@synthesize layoutAttributes=_layoutAttributes - In the implementation block
@property (readonly) unsigned long long hash;
@property (readonly) Class superclass;
@property (copy,readonly) NSString * description;
@property (copy,readonly) NSString * debugDescription;
+(id)buttonWithAccessibilityIdentifier:(id)arg1 ;
-(double)force;
-(BOOL)continueTrackingWithTouch:(id)arg1 withEvent:(id)arg2 ;
-(CGSize)minimumSize;
-(BOOL)isCollapsed;
-(void)setCollapsed:(BOOL)arg1 ;
-(void)setForce:(double)arg1 ;
-(void)cancelTrackingWithEvent:(id)arg1 ;
-(void)setExtrinsicContentSize:(CGSize)arg1 ;
-(BOOL)pointInside:(CGPoint)arg1 withEvent:(id)arg2 ;
-(void)setMicaPackage:(AVMicaPackage *)arg1 ;
-(BOOL)beginTrackingWithTouch:(id)arg1 withEvent:(id)arg2 ;
-(NSDirectionalEdgeInsets)hitRectInsets;
-(NSTimer *)longPressTimer;
-(UIViewPropertyAnimator *)highlightAnimator;
-(void)setHighlightAnimator:(UIViewPropertyAnimator *)arg1 ;
-(void)setLongPressTimer:(NSTimer *)arg1 ;
-(CGRect)hitRect;
-(void)endTrackingWithTouch:(id)arg1 withEvent:(id)arg2 ;
-(void)setHitRectInsets:(NSDirectionalEdgeInsets)arg1 ;
-(void)_updateImageIfNeeded;
-(CGSize)intrinsicContentSize;
-(void)setImageName:(NSString *)arg1 ;
-(void)setBounds:(CGRect)arg1 ;
-(NSString *)debugDescription;
-(void)layoutAttributesDidChange;
-(void)_updateLayoutItem;
-(void)layoutSubviews;
-(BOOL)isRemoved;
-(BOOL)isIncluded;
-(void)setImage:(id)arg1 forState:(unsigned long long)arg2 ;
-(void)setHasAlternateAppearance:(BOOL)arg1 ;
-(NSString *)imageName;
-(void)setHighlighted:(BOOL)arg1 ;
-(UIVisualEffectView *)backgroundEffectView;
-(BOOL)hasFullScreenAppearance;
-(void)setIncluded:(BOOL)arg1 ;
-(BOOL)isCollapsedOrExcluded;
-(void)setTitle:(id)arg1 forState:(unsigned long long)arg2 ;
-(void)willMoveToWindow:(id)arg1 ;
-(void)setHasFullScreenAppearance:(BOOL)arg1 ;
-(void)setRemoved:(BOOL)arg1 ;
-(CGSize)extrinsicContentSize;
-(void)setEnabled:(BOOL)arg1 ;
-(AVLayoutItemAttributes *)layoutAttributes;
-(AVMicaPackage *)micaPackage;
-(void)setBackgroundEffectView:(UIVisualEffectView *)arg1 ;
-(void)setWasLongPressed:(BOOL)arg1 ;
-(void)setMultipleTouchesEndsTracking:(BOOL)arg1 ;
-(void)setUsesBackgroundEffectViewForTextOnlyButtons:(BOOL)arg1 ;
-(void)setTintEffectStyle:(long long)arg1 ;
-(CGSize)_preferredLayoutSize;
-(void)_handleUserInteractionGestureRecognizer:(id)arg1 ;
-(void)_resetTrackedState;
-(void)_updateTintColorIfNeeded;
-(void)_updateEdgeInsets;
-(void)_updateBackgroundEffectViewIsHidden;
-(BOOL)wasLongPressed;
-(id)_preferredImageName;
-(double)_imageViewAlpha;
-(BOOL)treatsForcePressAsLongPress;
-(void)setTreatsForcePressAsLongPress:(BOOL)arg1 ;
-(long long)tintEffectStyle;
-(BOOL)usesBackgroundEffectViewForTextOnlyButtons;
-(double)forceThreshold;
-(void)setForceThreshold:(double)arg1 ;
-(double)maximumForceSinceTrackingBegan;
-(NSString *)fullScreenImageName;
-(void)setMaximumForceSinceTrackingBegan:(double)arg1 ;
-(BOOL)multipleTouchesEndsTracking;
-(void)setFullScreenImageName:(NSString *)arg1 ;
-(NSString *)inlineImageName;
-(void)setInlineImageName:(NSString *)arg1 ;
-(NSString *)fullScreenAlternateImageName;
-(void)setFullScreenAlternateImageName:(NSString *)arg1 ;
-(NSString *)inlineAlternateImageName;
-(void)setInlineAlternateImageName:(NSString *)arg1 ;
-(double)micaSnapshotAlpha;
-(void)setMicaSnapshotAlpha:(double)arg1 ;
-(BOOL)disablesHighlightWhenLongPressed;
-(void)setDisablesHighlightWhenLongPressed:(BOOL)arg1 ;
-(BOOL)clampsHitRectInsetsWhenContainedInScrollableView;
-(void)setClampsHitRectInsetsWhenContainedInScrollableView:(BOOL)arg1 ;
-(double)trackingStartTime;
-(void)setTrackingStartTime:(double)arg1 ;
-(BOOL)wasForcePressTriggered;
-(void)setWasForcePressTriggered:(BOOL)arg1 ;
-(double)horizontalTranslationOfLongPress;
-(void)setHorizontalTranslationOfLongPress:(double)arg1 ;
-(NSNumber *)previousHorizontalPositionOfLongPress;
-(void)setPreviousHorizontalPositionOfLongPress:(NSNumber *)arg1 ;
-(AVUserInteractionObserverGestureRecognizer *)userInteractionGestureRecognizer;
-(void)setUserInteractionGestureRecognizer:(AVUserInteractionObserverGestureRecognizer *)arg1 ;
-(BOOL)hasAlternateAppearance;
@end
|
def compute_nb_steps(self, move_direction, move_step):
if move_step[0] != 0:
return int(np.floor(move_direction[0] / move_step[0]))
elif move_step[1] != 0:
return int(np.floor(move_direction[1] / move_step[1]))
elif move_step[2] != 0:
return int(np.floor(move_direction[2] / move_step[2]))
else:
print("Vector {} is null in grasp") |
/**
* Created by Arcturus Mengsk on 2/4/2018, 6:54 AM.
* foursquareapp
*/
@InjectViewState
public class WelcomeScreenPresenter extends MvpPresenter<WelcomeView> {
@Inject MainRouter mRouter;
@Inject FoursquareApplication mFoursquareApplication;
@Inject SharedPreferences mPreferences;
public WelcomeScreenPresenter() {
super();
FoursquareApplication.getAppComponent().inject(this);
}
@Override
public void attachView(WelcomeView view) {
super.attachView(view);
getViewState().clearState();
getViewState().setAnimationsEnabled(
mPreferences.getBoolean("key_animations", true));
if (PermissionManager.verifyPermissions(mFoursquareApplication.getApplicationContext())) {
getViewState().setReadyState();
} else {
getViewState().setPermissionsRequiredState();
}
}
public void requestPermissions() {
mRouter.requestPermissions();
}
public void lookAround() {
getViewState().clearState();
mRouter.placesListScreen();
}
} |
© Reuters. 1,5 MILLIARD D'EUROS POUR FORMER LES FONCTIONNAIRES
PARIS (Reuters) - L'Etat allouera 1,5 milliard d'euros sur cinq ans à la formation des fonctionnaires afin de mettre en oeuvre les mesures contenues dans le projet de loi portant notamment sur le droit à l'erreur, annonce le ministre des Comptes publics, Gérald Darmanin, dans une interview au Journal du dimanche.
Ce texte, qui doit être présenté lundi en conseil des ministres, a pour but de renouveler la relation entre l'Etat et ses administrés - particuliers comme entreprises - sur le principe d'"une faute avouée à moitié pardonnée", selon la formule de Gérald Darmanin.
"Nous allons mobiliser 1,5 milliard d’euros sur cinq ans pour former les agents publics à cette nouvelle posture de conseil et d’accompagnement car le changement doit entrer dans les textes mais surtout dans les têtes", dit-il au JDD.
"Le premier principe qui doit désormais guider l’administration, c’est la bienveillance : si vous vous êtes trompé, ce sera désormais à l’administration de démontrer que vous n’êtes pas de bonne foi", résume-t-il.
Cette philosophie ne s'appliquera pas aux cas de fraude ni aux cas "d’atteinte à la sécurité ou à la santé publiques", précise le ministre des Comptes publics.
Les quelque 40 articles du projet de loi, formellement intitulé "pour un Etat au service d'une société de confiance", contiennent également des mesures "contre la surtransposition des normes européennes", selon Gérald Darmanin.
Globalement, "l’objectif du gouvernement, c’est zéro papier dans toute l’administration d’ici à la fin du quinquennat" grâce à la simplification, à la numérisation et à la dématérialisation.
Très attendu par les chefs d'entreprise, ce projet devait initialement être dévoilé en juillet mais sa présentation a été repoussée de plusieurs mois, le temps selon Gérald Darmanin d'améliorer la première mouture.
(Simon Carraud, édité par Jean-Stéphane Brosse) |
<reponame>younamebert/xfssdk<gh_stars>0
package apinet
import "github.com/younamebert/xfssdk/core/apis"
type NetLink interface {
GetNodeId() (*string, error)
}
type ApiNet struct{}
// GetNodeId get nodeid of current node chain
func (net *ApiNet) GetNodeId() (*string, error) {
var result *string
if err := apis.GVA_XFSCLICENT.CallMethod(1, "Net.GetPeers", nil, &result); err != nil {
return nil, err
}
return result, nil
}
|
#pragma once
#include"LinkedListArray.h"
#include"BagIterator.h"
#include"Pair.h"
typedef int TElem;
typedef bool(*Condition)(TElem);
struct pair {
TElem element;
int freq;
};
class Bag {
friend class BagIterator;
private:
/*representation of Bag*/
LLA<Pair> items;
public:
//constructor
Bag();
//adds an element to the bag
void add(TElem e);
//removes one occurrence of an element from a bag
//returns true if an element was removed, false otherwise (if e was not part of the bag)
bool remove(TElem e);
//checks if an element appearch is the bag
bool search(TElem e) const;
//returns the number of occurrences for an element in the bag
int nrOccurrences(TElem e) const;
//returns the number of elements from the bag
int size() const;
//returns an iterator for this bag
BagIterator iterator() const;
//checks if the bag is empty
bool isEmpty() const;
//destructor
~Bag();
};
|
/**
* Populate all controls and combo boxes
* based on the current control.
*/
protected void populateControls()
{
if (currentField != null)
{
helpKeys = currentField.getHelpKeys();
choiceKeys = currentField.getChoiceKeys();
labelKeyButton.setText(currentField.getLabelKey());
labelKeyButton.setEnabled(true);
}
else if (currentItem != null)
{
helpKeys = currentItem.getHelpKeys();
choiceKeys = null;
labelKeyButton.setEnabled(false);
}
else
return;
int count = 0;
if (helpKeys != null)
count = helpKeys.length;
for (int i = 0; i < count; i++)
{
helpKeyCombo.addItem(helpKeys[i]);
}
if (count == 0)
helpKeyCombo.setEnabled(false);
else
helpKeyCombo.setEnabled(true);
count = 0;
if (choiceKeys != null)
count = choiceKeys.length;
for (int i = 0; i < count; i++)
{
choiceKeyCombo.addItem(choiceKeys[i]);
}
if (count == 0)
choiceKeyCombo.setEnabled(false);
else
choiceKeyCombo.setEnabled(true);
helpKeyCombo.setSelectedIndex(-1);
choiceKeyCombo.setSelectedIndex(-1);
} |
Oxidative stress and transferrin receptor recycling.
Perturbation of the oxidative balance in biological systems plays an important role in numerous pathological states as well as in many physiological processes such as receptor activity. In order to evaluate if oxidative stress induced by menadione influences membrane receptor processes, a study was conducted on the transferrin receptor. Consequently, biochemical, biophysical and ultrastructural studies were carried out on different cell lines. The results obtained seem to indicate that oxidative stress is able of inducing a rapid and specific down-modulation of membrane transferrin receptor due to a block of receptor recycling on the cell surface without affecting binding affinity. |
The great promise of a car fuel made from cheap, clean-burning prairie grass or wood chips--and not from expensive corn that feeds the world--is more mirage than reality.
Despite years of research, testing, and some hype, the next-generation ethanol industry is far from the commercial success envisioned by President George W. Bush in 2006, when he pledged so-called cellulosic biofuels would be "practical and competitive" by 2012.
Instead the only real alternative to traditional gasoline is ethanol made from corn, a fuel environmentalists say is not green at all because of the energy-intensive nature of modern farming.
Critics say it is a failure of government policy, not science, that the U.S. is still so dependent on corn for its biofuels. Washington has backtracked on cellulosic-ethanol production targets and failed to provide assurances to investors that the sector would be subsidized over the long term.
While there are dozens of pilot and demonstration cellulosic-ethanol projects around the country, the groundwork for the first commercial plants is only now getting under way.
Battered by recession, funding remains scarce for $100-million-plus plants needed for commercial-scale production so cellulosic can compete against cheaper ethanol-based corn.
"The earliest you're going to see efficient cellulosic ethanol is five years," said Richard Brock, president of Brock Associates, an advisory firm in Milwaukee.
For the industry to take off, investors need to be reassured that Congress will extend a cellulosic-production tax credit for several years and cellulosic-output targets will be big enough to encourage blenders to lock in future capacity.
"It would certainly increase volumes at a faster rate than what we've seen in the last couple of years," said Mac Statton, biofuels analyst with the Energy Department's forecasting arm.
Gasoline in the United States is blended with up to 15 percent ethanol, which helps reduce oil imports.
In the short term, however, the cellulosic industry's slow growth will make little difference to either America's addiction to foreign crude oil or the strains on corn supplies that critics claim have pushed up food prices.
Cellulosic-biofuels production was supposed to reach 500 million gallons next year under federal mandates that rise each year until it eventually passes corn-based ethanol output.
But no cellulosic production is expected this year and it may grow to only a few million gallons next year.
Because the cellulosic industry is not able to meet the production goals mandated by Congress, the Environmental Protection Agency has the authority to lower them.
That's what the agency did this month for the third straight year when it proposed lowering the original half-billion-gallon target for 2012 to between 3.6 million and 15.7 million gallons. EPA issues the final target in November.
The Energy Department doesn't expect cellulosic output to reach its first 1 billion gallons until 2018. Congress, under its mandates, wants 7 billion gallons that year.
The industry has made great progress in bringing down the production costs of cellulosic ethanol from $5 to $6 a gallon a decade ago to as low as $2.50. However, the first cellulosic plants are expensive to build and will add to that $2.50 cost, putting cellulosic slightly above corn ethanol's cost.
Government help for commercial-scale plants
Coskata was given a $250 million federal loan guarantee in January to build a 55-million-gallon-a-year plant in Alabama to process wood biomass into ethanol. POET, the world's biggest ethanol producer, was awarded a $105 million loan guarantee this month for a plant in Iowa to produce 25 million gallons of ethanol from corn cobs starting in 2013.
Other companies aiming to produce big volumes of cellulosic ethanol or provide enzymes that break down cellulose feedstocks are DuPont's Genencor, Abengoa Bioenergy, Qteros, and Novozymes A/S.
About $1.5 billion in venture capital poured into the cellulosic industry to help fund initial pilot projects over the last decade, according to the Advanced Ethanol Coalition that lobbies for the industry.
As cellulosic producers move to large-scale operations, venture capital investors are reluctant to bet on the expensive $150 million plants, said Brooke Coleman, who heads the coalition.
"The venture capital guys will spend $20 million or $30 million on you in the start-up phase," he said. "They don't build plants and like to get in and get out in five years."
It is the big banks, oil firms, and major energy companies that will help finance the new commercial-scale plants, but many are scared off by the uncertainty over the $1.01 tax credit and changes in production goals.
Analysts argue that with oil prices high, it should be easier for cellulosic biofuels to attract investors, but incentives from Congress are a big question mark.
A draft bill unveiled in the Senate would extend the $1.01 per gallon tax credit for three years and add ethanol made from algae to the list of cellulosic biofuels eligible to get it. Extending the credit for cellulosic ethanol is part of a compromise for Congress to end a 45-cent-a-gallon tax credit for corn ethanol, which is exceeding its production targets.
But with lawmakers looking to cut government spending, cellulosic producers may be lucky to get a one-year extension.
"How the hell do you extend a tax credit for a multi-year period, when there's no money in the Treasury," said Christine Tezak, energy analyst at Robert W. Baird.
All the uncertainties, however, hurt the industry. Refineries that blend the fuel don't have a reason to sign long-term contracts with biofuels producers, which would encourage investment in new plants and boost output.
"There's no incentive for anybody on the consuming side to ring up a cellulosic guy and say: 'Hey, I'd like to take care of my renewable fuel standard obligations for the next five years, so I need to secure not only present but future production capacity with you,'" said Tezak. |
///SDP_INCLUDED == TRUE
/*******************************************************************************
**
** Function bta_dmexecutecallback
**
** Description This function will request BTA to execute a call back in the context of BTU task
** This API was named in lower case because it is only intended
** for the internal customers(like BTIF).
**
** Returns void
**
*******************************************************************************/
void bta_dmexecutecallback (tBTA_DM_EXEC_CBACK *p_callback, void *p_param)
{
tBTA_DM_API_EXECUTE_CBACK *p_msg;
if ((p_msg = (tBTA_DM_API_EXECUTE_CBACK *) osi_malloc(sizeof(tBTA_DM_API_EXECUTE_CBACK))) != NULL) {
p_msg->hdr.event = BTA_DM_API_EXECUTE_CBACK_EVT;
p_msg->p_param = p_param;
p_msg->p_exec_cback = p_callback;
bta_sys_sendmsg(p_msg);
}
} |
/* precondiciones: Recibe una fecha correcta.
* postcondiciones: El nombre queda armado.
*/
void armar_nombre_liberados(char fecha[MAX_FECHA],char nombre_liberados[MAX_NOMBRE]){
strcpy(nombre_liberados,"liberados_");
strcat(nombre_liberados,fecha);
strcat(nombre_liberados,".txt");
} |
class Tree:
"""Tree is the star of the show.
Attributes:
age (float): Age of tree in years (1 day is 1/365). Initial age of
tree is set using scipy.stats.truncnorm with settings from
arborlife.yml config file.
alive (bool): True if tree is alive else false. Initial state of
tree set with tree alive value in arborlife.yml
"""
def __init__(self, age=None):
tree_cfg = config.get_cfg("tree")
self.age = float(age) if age is not None else (
utils.calc_truncnorm(
mean=tree_cfg["age_init_mean"],
sd=tree_cfg["age_init_sd"],
clip_a=tree_cfg["age_init_min"],
clip_b=tree_cfg["age_init_max"],
)
)
# Trees don't shrink, but mass can, so need track max height
self._height_max = 0
# TODO: Need fxn to calculate canopy_mass steady state
# 10 y/o tree canopy mass = 60kg, +/- 50kg each year away, min 10kg
self.canopy_mass = max(10, 50 * self.age - 440)
self.alive = True
@property
def green_weight(self):
return self.canopy_mass + (MAX_LEAF_CANOPY_PCT * self.canopy_mass)
@property
def trunk_diameter(self):
d2h = (self.green_weight / DBH_H_COEFFICENT) ** (1 / DBH_H_EXPONENT)
return utils.calc_cubic(DBH_H_B1 / 12, DBH_H_B0 / 12, 0, -d2h)
@property
def height(self):
self._height_max = max(
self._height_max, (DBH_H_B0 + DBH_H_B1 * self.trunk_diameter) / 12)
return self._height_max
@property
def bark_ft2(self):
return self.height * 2 * np.pi * ((self.trunk_diameter / 12) / 2)
@property
def canopy_width(self):
return HEIGHT_WIDTH_RATIO * self.height
@property
def root_mass(self):
return ROOT_CANOPY_PCT * self.canopy_mass
@property
def root_radius(self):
return ((self.root_mass / self.canopy_mass) / ROOT_CANOPY_PCT_MEAN) * self.canopy_width
@property
def root_area(self):
return np.pi * self.root_radius ** 2
@property
def root_ft3(self):
return self.root_mass / WOOD_DENSITY_LB
@property
def glucose_store(self):
return self.canopy_mass * 1.85e25
# @property
# def canopy_mass(self):
# return utils.calc_truncnorm(
# CANOPY_MASS_MEAN, CANOPY_MASS_SD, CANOPY_MASS_MIN, CANOPY_MASS_MAX) |
import java.util.Arrays;
import java.util.Scanner;
import java.util.Stack;
public class A {
public static void main(String[] args) {
Scanner in = new Scanner(System.in);
int n = in.nextInt();
int[][] C = new int[n][n];
for (int i = 0; i < n; i++)
Arrays.fill(C[i], -1);
int sum = 0;
for (int i = 0; i < n; i++) {
int x = in.nextInt() - 1;
int y = in.nextInt() - 1;
int z = in.nextInt();
C[y][x] = z;
C[x][y] = 0;
sum += z;
}
int[] A = new int[n + 1];
int c = 0;
int start = 0;
Stack<Integer> S = new Stack<Integer>();
S.push(0);
boolean[] visited = new boolean[n];
while (!S.isEmpty()) {
int temp = S.pop();
if (visited[temp])
continue;
visited[temp] = true;
A[c++] = temp;
for (int i = 0; i < n; i++)
if (C[temp][i] != -1)
S.add(i);
}
A[c] = 0;
int ans = 0;
for (int i = 0; i < n; i++)
ans += C[A[i]][A[i + 1]];
System.out.println(Math.min(ans, sum - ans));
}
}
|
/**
* An Object in the game, represented as a rectangle. Which holds details of
* shape, plus possible local of travel. Would be better to use inheritance.
*
* @author Jacob Shirley & Mike Smith University of Brighton
*/
public class GameObj {
// All the variables below are vital to the state of the object
protected boolean canSee = true; // Can see
protected Vec2D pos = null;
protected float width = 0.0f; // Width of object
protected float height = 0.0f; // Height of object
protected Colour colour; // Colour of object
protected Vec2D vel = new Vec2D(0, 0);
protected Vec2D acceleration = new Vec2D(0, 0);
protected double frictionConst = 1.0;
/**
* Constructor for a game object (x,y width, height, colour)
*
* @param x
* co-ordinate of the game object
* @param y
* co-ordinate of the game object
* @param widthIs
* width of the game object
* @param heightIs
* height of the game object
* @param c
* Colour of the game object
*/
public GameObj(float x, float y, float widthIs, float heightIs, Colour c) {
pos = new Vec2D(x, y);
width = widthIs;
height = heightIs;
colour = c;
}
public GameObj(float x, float y, Colour c) {
this(x, y, 0, 0, null);
}
public GameObj(float x, float y) {
this(x, y, null);
}
/**
* Set the game object visibility
*
* @param state
* is visible true or false
*/
public void setVisibility(boolean state) {
canSee = state;
}
/**
* Is the game object visible
*
* @return visibility true/false
*/
public boolean isVisible() {
return canSee;
}
/**
* The X co-ordinate of the top left hand corner of the Game Object
*
* @return x co-ordinate of the game Object
*/
public Vec2D getPos() {
return pos;
}
public void setPos(Vec2D pos) {
this.pos = pos;
}
public void setVel(Vec2D vel) {
this.vel = vel;
}
public Vec2D getCentre() {
return this.pos.copy().add(new Vec2D(this.width / 2, this.height / 2));
}
public Vec2D getVel() {
return vel;
}
/**
* The width of the game object
*
* @return The width of the game Object
*/
public float getWidth() {
return width;
}
/**
* The height of the game object
*
* @return The height of the game Object
*/
public float getHeight() {
return height;
}
public void setWidth(float width) {
this.width = width;
}
public void setHeight(float height) {
this.height = height;
}
/**
* The colour of the game object
*
* @return The colour of the game object
*/
public Colour getColour() {
return colour;
}
public void update() {
this.vel.add(acceleration);
this.vel.scale(frictionConst);
this.pos.add(vel);
}
public Vec2D getAcceleration() {
return acceleration;
}
public void setAcceleration(Vec2D acceleration) {
this.acceleration = acceleration;
}
public double getFrictionConst() {
return frictionConst;
}
public void setFrictionConst(double frictionConst) {
this.frictionConst = frictionConst;
}
/**
* Change local of future moves in the X local
*/
public void changelocalX() {
this.vel.mul(new Vec2D(-1, 1));
}
/**
* Change local of future moves in the Y local
*/
public void changelocalY() {
this.vel.mul(new Vec2D(1, -1));
}
/**
* Detect a collision between two GameObjects Would be good to know where
* the object is hit
*
* @param obj
* Game object to see if 'hit' by
* @return collision True/ False
*/
public Collision hitBy(GameObj obj) {
if (!(pos.x >= obj.pos.x + obj.width || pos.x + width <= obj.pos.x || pos.y >= obj.pos.y + obj.height
|| pos.y + height <= obj.pos.y)) {
return new Collision(this, obj);
} else
return null;
}
} |
def part_of_env(pattern, env_name):
for part in pattern.split("-"):
if part.startswith("!"):
if part[1:] in env_name:
return False
else:
if part not in env_name:
return False
return True |
/*********************************************************************
*
* SIM_OS_UpdateWindow()
*
* Function description
* Immediately updates the entire device on the display.
*/
void SIM_OS_UpdateWindow(void) {
if (_hWnd != NULL) {
InvalidateRect(_hWnd, NULL, FALSE);
UpdateWindow(_hWnd);
}
} |
Photo: YouTube screencapture
This morning Brandon Crouch reported the death of his grandfather, Trinity Broadcasting Network founder Paul Crouch, on Twitter and Instagram. According to Christian singer Vicki Yohe, "Dr. Paul Crouch passed away at 2:32am this morning .. Say a prayer for the family !"
TBN has posted an announcement to its Facebook page and a bio on its website.
Paul Crouch and Jim Bakker founded TBN in 1973. Earlier this year the network celebrated its 40th anniversary. TBN would become the world's largest religious network.
Paul left behind wife Janice Bethany Crouch and sons Paul Crouch Jr and Matthew Crouch (not biological). Paul's granddaughters Brittany Crouch Koper and Carra Crouch are involved in litigation with the network.
Before working in television, Paul worked at the Assemblies of God film library and production studio which produced and rented 16mm Christian films to churches. In 1990 TBN would get involved in feature film production starting with the movie China Cry.
Although Wikipedia credits Paul with writing five books, author Sylvia Fleener alleges the books were ghost written. Fleener sued TBN after her book Omega Syndrome was used as the inspiration for the movie The Omega Code.
The books I Had No Father But God a Personal Letter to My Sons and Hello World!: A Personal Message to the Body of Christ are about the life of Paul Crouch. The book The Shadow of the Apocalypse is about end times prophecy. |
A CNN correspondent has been suspended for two weeks after publicly criticizing the US House of Representatives’ decision to suspend a program allowing Syrian and Iraqi refugees to enter the United States.
Elise Labott, a CNN global affairs correspondent tweeted: “House passes bill that could limit Syrian refugees. Statue of Liberty bows head in anguish,” upon learning that the House of Representatives had voted 289-137, with 47 Democrats joining 242 Republicans in suspending a program that would allow Syrian and Iraqi refugees into the US, until American security agencies find that they would not pose a security risk.
House passes bill that could limit Syrian refugees. Statue of Liberty bows head in anguish @CNNPoliticshttps://t.co/5RvZwVftgD — Elise Labott (@eliselabottcnn) November 19, 2015
Hours after publishing the tweet, Labott issued an apology.
“Everyone, It was wrong of me to editorialize. My tweet was inappropriate and disrespectful. I sincerely apologize,” she wrote on Twitter.
Everyone, It was wrong of me to editorialize. My tweet was inappropriate and disrespectful. I sincerely apologize. — Elise Labott (@eliselabottcnn) November 20, 2015
CNN Worldwide President Jeff Zucker had previously mentioned that it was imperative that the channel should maintain a balanced viewpoint when reporting the news and that it should not appear to be biased.
“If you publicly declare your preference for issues or candidates or one side or the other of the public policy issues CNN reports on, then your ability to be viewed as objective is compromised,” he said.
President Barack Obama had defended the program on Syrian refugees and accused Republicans of “hysteria.”
"We are not well served when, in response to a terrorist attack, we descend into fear and panic," the US president said on a visit to the Philippines on Wednesday. "We don't make good decisions if it's based on hysteria or an exaggeration of risks."
@eliselabottcnn actually, that first tweet was spot on. — Noah Shachtman (@NoahShachtman) November 20, 2015
Labott has received support across Twitter and social media for the tweet which landed her in hot water. The message has been retweeted over 2,000 times.
Noah Shachtman, executive editor of the Daily Beast, tweeted: “Actually, that first tweet was spot on,” before adding in a follow-up, “The real shames are those people NOT speaking out against this insanity.”
CNN reporter @eliselabottcnn was suspended for this tweet. Expressing basic morality is impermissible and punishable https://t.co/H9srC5BCWP — Ben Norton (@BenjaminNorton) November 20, 2015
Meanwhile, Ben Norton, a journalist with Salon, wrote on Twitter: “CNN reporter @eliselabottcnn was suspended for this tweet. Expressing basic morality is impermissible and punishable.” In a second tweet, he added, “If you haven't completely abandoned your humanity, good luck getting a job at CNN.”
@eliselabottcnn@lrozen France accepts 30,000 while we cower in fear. Lady Liberty will look great in her new home pic.twitter.com/AyA9Cgl5ng — stu stein (@thestustein) November 19, 2015
Meanwhile, another Twitter user pointed out the fact that France, despite Paris being struck by the deadly Friday November 13 attacks, is willing to accept 30,000 Syrian refugees, while the US has suspended its program.
LISTEN MORE: |
/**
* Exception builder instance creation with null type.
*/
@Test
(expected = IllegalArgumentException.class)
public void exceptionBuilderInstanceCreationWithNullType() {
final PropertiesPojoBuilder pojoBuilder = new PropertiesPojoBuilder();
pojoBuilder.setHost(DUMMY_HOST);
pojoBuilder.setIndex(DUMMY_INDEX);
pojoBuilder.setPort(DUMMY_PORT);
pojoBuilder.build();
} |
<gh_stars>0
var fs = require('fs');
export module Types {
export interface Constraints {
minValue?: string;
maxValue?: string;
defaultValue?: any;
minArraySize?: number;
maxArraySize?: number;
minStringSize?: number;
maxStringSize?: number;
}
export interface Parameter {
name: string;
type: string;
constraints?: Constraints;
}
export interface Function {
constant: boolean;
inputs: Parameter[];
name: string;
outputs: Parameter[];
payable: boolean;
stateMutability: string;
type: string;
anonymous?: boolean;
functionInstance?: any;
}
export interface NameToFunctionMap {
[functionName: string]: Function;
}
/**
* @param {string} jsonPath
* @returns {NameToFunctionMap} dictionary functionName -> abi
*/
export function importABIFromFile(jsonPath: string) : NameToFunctionMap {
let inputJson = JSON.parse(fs.readFileSync(jsonPath, 'utf8'));
if (inputJson.abi != null){
inputJson = inputJson.abi;
}
let abi = inputJson as Function[];
let map: NameToFunctionMap = { };
abi.forEach(functionABI => {
map[functionABI.name] = functionABI;
});
return map;
}
} |
Characterization of wafer-level bonded hermetic packages using optical leak detection
For MEMS devices required to be operated in a hermetic environment, one of the main reliability issues is related to the packaging methods applied. In this paper, an optical method for testing low volume hermetic cavities formed by anodic bonding between glass and SOI (silicon on insulator) wafer is presented. Several different cavity-geometry structures have been designed, fabricated and applied to monitor the hermeticity of wafer level anodic bonding. SOI wafer was used as the cap wafer on which the different-geometry structures were fabricated using standard MEMS technology. The test cavities were bonded using SOI wafers to glass wafers at 400C and 1000mbar pressure inside a vacuum bonding chamber. The bonding voltage varies from 200V to 600V. The bonding strength between glass and SOI wafer was mechanically tested using shear tester. The deformation amplitudes of the cavity cap surface were monitored by using an optical interferometer. The hermeticity of the glass-to-SOI wafer level bonding was characterized through observing the surface deformation in a 6 months period in atmospheric environment. We have observed a relatively stable micro vacuum-cavity. |
import * as cms from '../cms'
import { CMS } from '../cms'
import { MultiContextCms } from '../cms/cms-multilocale'
import { Locale } from '../nlp'
import { ContentfulCredentials, ContentfulOptions } from '../plugin'
import { shallowClone } from '../util'
import { Contentful } from './cms-contentful'
/**
* Set it to ContentfulOptions.contentfulFactory to connect to
* different Contentful environments depending on the Context's Locale
* for each call to CMS
*/
export function multiEnvironmentFactory(environmentByLocale?: {
[locale: string]: ContentfulCredentials
}): (contOptions: ContentfulOptions) => CMS {
const multiFactory = new MultiEnvironmentFactory(environmentByLocale)
return contOptions =>
new MultiContextCms((ctx?: cms.Context) =>
multiFactory.get(contOptions, ctx)
)
}
/**
* Creates a different Contentful environments for each configured Locale.
* When the call to CMS does not specify a locale, it uses the credentials from
* ContentfulOptions and it informs through the logger
*/
export class MultiEnvironmentFactory {
private cache = new Map<Locale, CMS>()
private defaultCms: CMS | undefined
constructor(
private readonly environmentByLocale?: {
[locale: string]: ContentfulCredentials
},
private readonly contentfulFactory = (o: ContentfulOptions) =>
new Contentful(o) as CMS,
private readonly logger = console.error
) {}
public get(contOptions: ContentfulOptions, ctx?: cms.Context): CMS {
const credentials = this.getCredentials(ctx)
if (!credentials) {
if (!this.defaultCms) {
this.defaultCms = this.contentfulFactory(contOptions)
}
return this.defaultCms
}
const locale = ctx!.locale!
let cms = this.cache.get(locale)
if (!cms) {
const opts = shallowClone(contOptions)
opts.spaceId = credentials.spaceId
opts.environment = credentials.environment
opts.accessToken = credentials.accessToken
cms = this.contentfulFactory(opts)
this.cache.set(locale, cms)
}
return cms
}
private getCredentials(ctx?: cms.Context): ContentfulCredentials | undefined {
if (!ctx) {
this.logger(
'MultiLocaleCmsFactory called with no context. Using default credentials'
)
return undefined
}
if (!ctx.locale) {
this.logger(
'MultiLocaleCmsFactory called with no context locale. Using default credentials'
)
return undefined
}
const credentials = this.environmentByLocale![ctx.locale]
if (!credentials) {
this.logger(
`MultiLocaleCmsFactory has no credentials for locale '${ctx.locale}'. Trying with default credentials`
)
return undefined
}
return credentials
}
}
|
// aggregateForwarding reads incoming forward messages and aggregates them.
// Every flush interval it forwards the collected stats.
func (s *Server) aggregateForwarding() {
ticker := s.aggregateForwardingFlushTicker()
for {
select {
case stat := <-s.forwardingIncoming:
if stat.Type == StatCounter {
s.forwardingStats.AddCount(stat.Name, stat.Value/stat.SampleRate)
}
case <-ticker:
n, msg, err := s.forwardingStats.CreateForwardMessage()
if err != nil {
log.Println("Error: Could not serialize forwarded message:", err)
}
if n > 0 {
log.Printf("Forwarding %d stat(s).", n)
s.forwardingOutgoing <- msg
} else {
log.Println("No stats to forward.")
}
s.forwardingStats.Clear(false)
case <-s.quit:
return
}
}
} |
export type ApiError = {
error: string;
message: any;
};
|
def __copy_sfr_input(self, path_sfh_in):
nb_dt_csi = self.nb_timesteps + 1
i_dt_csi = 0
t_csi = 0.0
m_stel_sfr_in = 0.0
with open(os.path.join(nupy_path, path_sfh_in), 'r') as sfr_file:
line_1_str = sfr_file.readline()
parts_1 = [float(x) for x in line_1_str.split()]
for line_2_str in sfr_file:
parts_2 = [float(x) for x in line_2_str.split()]
a_csi = (parts_2[1] - parts_1[1]) / (parts_2[0] - parts_1[0])
b_csi = parts_1[1] - a_csi * parts_1[0]
while t_csi <= parts_2[0]:
if i_dt_csi < self.nb_timesteps:
self.sfr_input[i_dt_csi] = a_csi * (t_csi + \
self.history.timesteps[i_dt_csi] * 0.5) + b_csi
else:
self.sfr_input[i_dt_csi] = a_csi * t_csi + b_csi
if i_dt_csi < nb_dt_csi - 1:
m_stel_sfr_in += self.sfr_input[i_dt_csi] * \
self.history.timesteps[i_dt_csi]
t_csi += self.history.timesteps[i_dt_csi]
i_dt_csi += 1
if i_dt_csi >= nb_dt_csi:
break
if i_dt_csi >= nb_dt_csi:
break
parts_1 = copy.copy(parts_2)
sfr_file.close()
while i_dt_csi < nb_dt_csi:
self.sfr_input[i_dt_csi] = self.sfr_input[i_dt_csi-1]
if i_dt_csi < nb_dt_csi - 1:
m_stel_sfr_in += self.sfr_input[i_dt_csi] * \
self.history.timesteps[i_dt_csi]
i_dt_csi += 1
if self.stellar_mass_0 > 0.0:
norm_sfr_in = self.stellar_mass_0 / ((1-self.mass_frac_SSP) * m_stel_sfr_in)
for i_csi in range(0, nb_dt_csi):
self.sfr_input[i_csi] = self.sfr_input[i_csi] * norm_sfr_in |
import PermsApi from '@/services/api/perms.service'
const namespaced = true
const state = {
isLoading: false,
permissions: [],
scopes: []
}
const mutations = {
SET_LOADING(state) {
state.isLoading = true
},
SET_PERMS(state, permissions) {
state.isLoading = false
state.permissions = permissions
},
SET_SCOPES(state, scopes) {
state.isLoading = false
state.scopes = scopes
},
RESET_LOADING(state) {
state.isLoading = false
}
}
const actions = {
getPerms({commit}) {
commit('SET_LOADING')
return PermsApi.getPerms({})
.then(({permissions}) => commit('SET_PERMS', permissions))
.catch(() => commit('RESET_LOADING'))
},
createPerm({dispatch, commit}, perm) {
return PermsApi.createPerm(perm).then(response => {
dispatch('getPerms')
})
},
updatePerm({dispatch, commit}, [permId, update]) {
return PermsApi.updatePerm(permId, update).then(response => {
dispatch('getPerms')
})
},
deletePerm({dispatch, commit}, permId) {
return PermsApi.deletePerm(permId).then(response => {
dispatch('getPerms')
})
},
getScopes({commit}) {
commit('SET_LOADING')
return PermsApi.getScopes().then(({scopes}) => commit('SET_SCOPES', scopes))
}
}
const getters = {
roles: state => {
return state.permissions.map(p => p.match)
}
}
export default {
namespaced,
state,
mutations,
actions,
getters
}
|
def _update_iface_map(self):
out = subprocess.check_output(
['networkctl', 'list', '--no-pager', '--no-legend'])
self._ifaces.clear()
for line in out.split(b'\n')[:-1]:
fields = line.decode('ascii').split()
idx = int(fields[0])
name = fields[1]
if name not in self._managed_ifaces:
continue
state = fields[3]
data = InterfaceData(name, state)
self._ifaces[idx] = data |
.
OBJECTIVES
To find out the opinions of primary care professionals on prevention activities in clinical practice, as well as the perceived obstacles to carrying them out and ways of overcoming them.
PARTICIPANTS
A total of 129 professionals participated, including primary care doctors, nurses, technicians, primary care service managers, and public health professionals.
DESIGN
Delphi questionnaire sent by e-mail.
SETTING
Primary care in Catalonia, Spain.
MAIN MEASUREMENTS AND RESULTS
Two rounds were made with response rates of 48.9% and 67.4%, respectively. Convergence of over 40% was obtained in all the questions after the second round. The main problems for prevention in clinical practice were lack of time, lack of training and the attitudes of the professionals themselves towards prevention. To improve implementing prevention in the practice, the professionals pointed out, training in communication skills, advice methodology and the use of clinical practice guidelines.
CONCLUSIONS
Health professionals pointed out some specific needs in training which could help to improve the inclusion of prevention activities. On the other hand, it was noted that the attitudes of the professionals themselves towards prevention need to be improved. |
// TestFileCacheReadCacheAndExpiry tests whether the fileCache will properly read from cache, and that it resets the cache
func TestFileCacheReadCacheAndExpiry(t *testing.T) {
dir, cleanup := getTempDir(t)
defer cleanup()
cpu := resource.NewQuantity(2, resource.DecimalSI)
_ = cpu.String()
newCPU := resource.NewQuantity(4, resource.DecimalSI)
_ = newCPU.String()
memory := resource.NewQuantity(2033283072, resource.BinarySI)
_ = memory.String()
myNode := &NodeInfo{
NodeName: "MyNode",
Labels: map[string]string{
"kubernetes.io/hostname": "MyNode",
"kubernetes.io/os": "linux",
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *cpu,
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *cpu,
},
}
myUpdatedNode := &NodeInfo{
NodeName: "MyNode",
Labels: map[string]string{
"kubernetes.io/hostname": "updatedHostname",
"kubernetes.io/os": "linux",
"kubernetes.io/updated": "true",
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *cpu,
v1.ResourceMemory: *memory,
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *newCPU,
v1.ResourceMemory: *memory,
},
}
client := TestAPIServer{Mem: map[string]*NodeInfo{"MyNode": myNode}}
timeProvider := &manualTimeProvider{time.Now()}
cacheWrapper := NewFileCacheClientWrapper(client, dir, time.Hour, WithTimeProvider(timeProvider))
_, err := cacheWrapper.GetNodeInfo("MyNode")
assert.NoError(t, err)
client.Mem["MyNode"] = myUpdatedNode
node, err := cacheWrapper.GetNodeInfo("MyNode")
assert.NoError(t, err)
assert.Equal(t, myNode, node)
node, err = client.GetNodeInfo("MyNode")
assert.NoError(t, err)
assert.Equal(t, myUpdatedNode, node)
timeProvider.time = time.Now().Add(time.Hour * 2)
node, err = cacheWrapper.GetNodeInfo("MyNode")
assert.NoError(t, err)
assert.Equal(t, node, myUpdatedNode)
} |
View Caption Hide Caption Mobile cell site (Provided by AT&T)
More than 30,0000 concert-goers are expected to flock to a rural grove in Okeechobee County this weekend for the inaugural Okeechobee Music and Arts Festival, and AT&T says they shouldn’t have a problem using their smartphones when they get there.
The company plans to install a mobile cell phone tower on the 800-acre property to help boost cell and data coverage during the three-day event.
“Events like this are full of moments customers want to share through texts, pictures, status updates and phone calls,” said AT&T Florida President Joe York. “We’re working to give customers reliable coverage and fast speeds, so they can get the most out of their mobile devices at the event.”
AT&T often brings in mobile cell towers for events with large crowds that can lead to network congestion. Last year, the company placed a mobile cell site near downtown West Palm Beach in advance of SunFest.
More than 100 bands are expected to take part in the Okeechobee music festival, including headliners Mumford & Sons, Kendrick Lamar, Skrillex, Robert Plant & The Sensational Space Shifters, The Avett Brothers, Hall & Oates, Ween, Miguel and Fetty Wap.
Other attractions at the event include white-sand lakeside-swimming beach, and bar, wooded VIP camping, spectacular immersive art installations, craft beer makers, great foot trucks, and yoga and meditation sessions. |
def predict(self, time):
if self.mode == 'read':
noise = np.random.normal(scale=self.noise_scale, size=self.horizon-1)
noise = pd.Series([noise[:i+1].sum() for i in range(self.horizon)])
if isinstance(time, int) is not True:
assert time in self.data.index, f'Got timestamp {time} which is not in index of data {self.data}'
time = self.data.index.get_loc(time)
snippet = self.data.iloc[time+1:time+self.horizon+1]
noise.index = self.data.index[time+1:time+self.horizon+1]
return snippet + noise
elif self.mode == 'predict':
raise NotImplementedError('Implement prediction from model') |
/**
* Returns true if actions should be enabled for this configuration.
*/
public boolean enableActions() {
for (FragmentOptions fragment : fragmentOptionsMap.values()) {
if (!fragment.enableActions()) {
return false;
}
}
return true;
} |
<filename>examples/src/asp_toplass/i_Asp.java
/* $Id$ */
package asp_toplass;
import java.rmi.Remote;
import java.rmi.RemoteException;
interface i_Asp extends Remote {
public void btree_transfer(int[] row, int k, int owner)
throws RemoteException;
}
|
<reponame>TvanMeer/bbot
"""
Main
"""
import time
import threading
import asyncio
from binance import AsyncClient, BinanceSocketManager
from options import Options
from pair import Pair
class Bot():
def __init__(self, options, api_key=' ', api_secret=' '):
self.symbols = set()
self.pairs = {}
self._options = options
self.__api_key = api_key
self.__api_secret = api_secret
self._shutdown = False
self._all_symbols = set()
self._chanels = []
if options.mode in ['TESTNET', 'TRADE']:
if api_key == ' ' or api_secret == ' ':
raise Exception(f'Binance API credentials required in {options.mode} mode')
# Start client in another thread
self._binance_client = threading.Thread(target = self._other_thread, daemon=True)
self._binance_client.start()
def _other_thread(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.loop.run_until_complete(self._start_async_client())
async def _start_async_client(self):
# Connect to Binance
client = await AsyncClient.create(api_key = self.__api_key,
api_secret = self.__api_secret)
# Pick pairs of interest
tickers = await client.get_all_tickers()
[self._all_symbols.add(s['symbol']) for s in tickers]
for qa in self._options.quote_assets:
for s in self._all_symbols:
if s.endswith(qa):
starts_with = s[:-len(qa)]
if starts_with in self._options.base_assets:
self.symbols.add(s)
self._chanels.append(s.lower() + '@kline_1m')
# Initialize self.pairs
for s in self.symbols:
self.pairs[s] = Pair(s, self._options)
# Concurrent execution of history download and streams
__hist = asyncio.create_task(self._download_history(client))
__cs = asyncio.create_task(self._start_candle_streams(client))
await __cs, __hist
async def _download_history(self, client):
for s in self.symbols:
for w in self._options.windows.items():
if w[0] == '2s':
continue
timestr = self._to_timestring(w[0], w[1])
candles = await client.get_historical_klines(s, w[0], timestr)
self.pairs[s]._add_historical_window(w[0], candles)
def _to_timestring(self, interval, windowsize):
# Helperfunction to download history with binance-python
amount = int(interval[:-1]) * windowsize
period = interval[-1]
if period == 'm':
return f'{amount} minutes ago UTC'
elif period == 'h':
return f'{amount} hours ago UTC'
elif period == 'd':
return f'{amount} days ago UTC'
elif period == 'w':
return f'{amount} weeks ago UTC'
elif period == 'M':
return f'{amount} months ago UTC'
else:
raise Exception(f'Error: invalid interval: {period}')
async def _start_candle_streams(self, client):
bm = BinanceSocketManager(client)
ms = bm.multiplex_socket(self._chanels)
async with ms as stream:
while self._shutdown == False:
msg = await stream.recv()
symbol = msg['data']['s']
self.pairs[symbol]._parse_candle(msg)
await client.close_connection()
def stop(self):
self._shutdown = True
self._binance_client.join()
# test ------------------------------------------------------
if __name__ == '__main__':
options = Options(mode = 'PAPER',
base_assets = [ 'BTC', 'ETH' ],
quote_assets = [ 'USDT', ],
windows = { '1m':500, '15m':200 }
)
bot = Bot(options)
time.sleep(3)
print('Slept 3 seconds...')
time.sleep(6)
print('Slept 6 seconds...')
# Test stop function
bot.stop()
print('Runs succesfully after termination..............')
# TODO: model training func as param in options.
# This is being executed as asyncio.as_thread in hist and stream
# gather
# Trick with shutdown also works for other functions nested within
# candlestream while-loop
|
<filename>cmd/cleanupAwsAccount_test.go<gh_stars>0
package cmd
import (
"context"
"testing"
"github.com/integr8ly/cluster-service/pkg/clusterservice"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/integr8ly/delorean/pkg/utils"
)
type clusterServiceMock struct {
clusterservice.Client
}
func (c *clusterServiceMock) DeleteResourcesForCluster(_ string, _ map[string]string, _ bool) (*clusterservice.Report, error) {
return &clusterservice.Report{Items: []*clusterservice.ReportItem{{ActionStatus: "complete"}}}, nil
}
func TestCleanupAwsCmd(t *testing.T) {
cases := []struct {
description string
dryRun bool
ec2 ec2iface.EC2API
s3 s3iface.S3API
expectedNumDeletedResources int
expectError bool
}{
{
description: "no resources should be deleted",
dryRun: false,
expectedNumDeletedResources: 0,
expectError: false,
ec2: &utils.MockEC2API{
DescribeInstancesOutput: &ec2.DescribeInstancesOutput{
Reservations: []*ec2.Reservation{
{
Instances: []*ec2.Instance{
{
InstanceId: aws.String("Running OSD instance"),
Tags: []*ec2.Tag{
{Key: aws.String("kubernetes.io/cluster/dont-delete-me"), Value: aws.String("owned")},
},
State: &ec2.InstanceState{Name: aws.String("running")},
},
},
},
},
},
DescribeVpcsOutput: &ec2.DescribeVpcsOutput{
Vpcs: []*ec2.Vpc{
{
IsDefault: aws.Bool(false),
Tags: []*ec2.Tag{
{Key: aws.String("kubernetes.io/cluster/dont-delete-me"), Value: aws.String("owned")},
},
VpcId: aws.String("osd-vpc-attached-to-ec2"),
},
{
IsDefault: aws.Bool(false),
Tags: []*ec2.Tag{
{Key: aws.String("integreatly.org/clusterID"), Value: aws.String("dont-delete-me")},
},
VpcId: aws.String("rhmi-vpc-attached-to-running-osd-cluster"),
},
{
IsDefault: aws.Bool(true),
Tags: nil,
VpcId: aws.String("empty-vpc"),
},
{
IsDefault: aws.Bool(false),
Tags: []*ec2.Tag{
{Key: aws.String("some/random"), Value: aws.String("tag-name")},
},
VpcId: aws.String("osd-unrelated-nonempty-vpc"),
},
},
},
},
s3: &utils.MockS3API{
ListObjsFunc: func(input *s3.ListObjectsV2Input) (output *s3.ListObjectsV2Output, err error) {
return &s3.ListObjectsV2Output{
Contents: []*s3.Object{
{
Key: aws.String("testobj.zip"),
},
},
}, nil
},
ListBucketsOutput: &s3.ListBucketsOutput{
Buckets: []*s3.Bucket{
{
Name: aws.String("managed-velero-backups"),
},
{
Name: aws.String("rhmi-bucket"),
},
{
Name: aws.String("other-bucket"),
},
},
},
GetBucketTaggingFunc: func(input *s3.GetBucketTaggingInput) (output *s3.GetBucketTaggingOutput, err error) {
t.Log("GetBucketTaggingFunc: bucket name:", *input.Bucket)
if aws.StringValue(input.Bucket) == "managed-velero-backups" {
return &s3.GetBucketTaggingOutput{
TagSet: []*s3.Tag{
{
Key: aws.String("velero.io/infrastructureName"),
Value: aws.String("dont-delete-me"),
},
}}, nil
} else if aws.StringValue(input.Bucket) == "rhmi-bucket" {
return &s3.GetBucketTaggingOutput{
TagSet: []*s3.Tag{
{
Key: aws.String("integreatly.org/clusterID"),
Value: aws.String("dont-delete-me"),
},
}}, nil
} else if aws.StringValue(input.Bucket) == "other-bucket" {
return &s3.GetBucketTaggingOutput{
TagSet: []*s3.Tag{
{
Key: aws.String("some/random"),
Value: aws.String("tag"),
},
}}, nil
}
return nil, nil
},
},
},
{
description: "all resources should be deleted",
dryRun: false,
expectedNumDeletedResources: 5,
expectError: false,
ec2: &utils.MockEC2API{
DescribeInstancesOutput: &ec2.DescribeInstancesOutput{
Reservations: []*ec2.Reservation{
{
Instances: []*ec2.Instance{
{
InstanceId: aws.String("Terminated OSD instance"),
Tags: []*ec2.Tag{
{Key: aws.String("kubernetes.io/cluster/delete-me"), Value: aws.String("owned")},
},
State: &ec2.InstanceState{Name: aws.String("terminated")},
},
},
},
},
},
DescribeVpcsOutput: &ec2.DescribeVpcsOutput{
Vpcs: []*ec2.Vpc{
{
IsDefault: aws.Bool(false),
Tags: []*ec2.Tag{
{Key: aws.String("kubernetes.io/cluster/delete-me"), Value: aws.String("owned")},
},
VpcId: aws.String("osd-vpc-unattached"),
},
{
IsDefault: aws.Bool(false),
Tags: []*ec2.Tag{
{Key: aws.String("integreatly.org/clusterID"), Value: aws.String("delete-me")},
},
VpcId: aws.String("rhmi-vpc-unattached"),
},
{
IsDefault: aws.Bool(false),
Tags: nil,
VpcId: aws.String("empty-vpc"),
},
},
},
},
s3: &utils.MockS3API{
ListObjsFunc: func(input *s3.ListObjectsV2Input) (output *s3.ListObjectsV2Output, err error) {
return &s3.ListObjectsV2Output{
Contents: []*s3.Object{
{
Key: aws.String("testobj.zip"),
},
},
}, nil
},
ListBucketsOutput: &s3.ListBucketsOutput{
Buckets: []*s3.Bucket{
{
Name: aws.String("managed-velero-backups"),
},
{
Name: aws.String("rhmi-bucket"),
},
},
},
GetBucketTaggingFunc: func(input *s3.GetBucketTaggingInput) (output *s3.GetBucketTaggingOutput, err error) {
if aws.StringValue(input.Bucket) == "managed-velero-backups" {
return &s3.GetBucketTaggingOutput{
TagSet: []*s3.Tag{
{
Key: aws.String("velero.io/infrastructureName"),
Value: aws.String("delete-me"),
},
}}, nil
} else if aws.StringValue(input.Bucket) == "rhmi-bucket" {
return &s3.GetBucketTaggingOutput{
TagSet: []*s3.Tag{
{
Key: aws.String("integreatly.org/clusterID"),
Value: aws.String("delete-me"),
},
}}, nil
}
return nil, nil
},
},
},
{
description: "nothing should get deleted with dry run set to true",
dryRun: true,
expectedNumDeletedResources: 0,
expectError: false,
ec2: &utils.MockEC2API{
DescribeInstancesOutput: &ec2.DescribeInstancesOutput{
Reservations: []*ec2.Reservation{
{
Instances: []*ec2.Instance{},
},
},
},
DescribeVpcsOutput: &ec2.DescribeVpcsOutput{
Vpcs: []*ec2.Vpc{
{
IsDefault: aws.Bool(false),
Tags: []*ec2.Tag{
{Key: aws.String("kubernetes.io/cluster/delete-me"), Value: aws.String("owned")},
},
VpcId: aws.String("osd-vpc-unattached"),
},
{
IsDefault: aws.Bool(false),
Tags: []*ec2.Tag{
{Key: aws.String("integreatly.org/clusterID"), Value: aws.String("delete-me")},
},
VpcId: aws.String("rhmi-vpc-unattached"),
},
{
IsDefault: aws.Bool(false),
Tags: nil,
VpcId: aws.String("empty-vpc"),
},
},
},
},
s3: &utils.MockS3API{
ListObjsFunc: func(input *s3.ListObjectsV2Input) (output *s3.ListObjectsV2Output, err error) {
return &s3.ListObjectsV2Output{
Contents: []*s3.Object{
{
Key: aws.String("testobj.zip"),
},
},
}, nil
},
ListBucketsOutput: &s3.ListBucketsOutput{
Buckets: []*s3.Bucket{
{
Name: aws.String("managed-velero-backups"),
},
{
Name: aws.String("rhmi-bucket"),
},
},
},
GetBucketTaggingFunc: func(input *s3.GetBucketTaggingInput) (output *s3.GetBucketTaggingOutput, err error) {
if aws.StringValue(input.Bucket) == "managed-velero-backups" {
return &s3.GetBucketTaggingOutput{
TagSet: []*s3.Tag{
{
Key: aws.String("velero.io/infrastructureName"),
Value: aws.String("delete-me"),
},
}}, nil
} else if aws.StringValue(input.Bucket) == "rhmi-bucket" {
return &s3.GetBucketTaggingOutput{
TagSet: []*s3.Tag{
{
Key: aws.String("integreatly.org/clusterID"),
Value: aws.String("delete-me"),
},
}}, nil
}
return nil, nil
},
},
},
}
for _, c := range cases {
t.Run(c.description, func(t *testing.T) {
cmd := &cleanupAwsAccountCmd{
awsRegion: "us-east-1",
dryRun: c.dryRun,
clusterService: &clusterServiceMock{},
ec2: c.ec2,
s3: c.s3,
s3Deleter: &utils.MockS3BatchDeleter{BatchDeleteFunc: func(iterator s3manager.BatchDeleteIterator) error { return nil }},
s3Buckets: []awsResourceObject{},
vpcs: []awsResourceObject{},
osdResources: map[string][]awsResourceObject{},
rhmiResources: map[string][]awsResourceObject{},
deletedResources: []awsResourceObject{},
}
err := cmd.run(context.TODO())
if err != nil && !c.expectError {
t.Fatalf("unexpected error: %v", err)
}
if c.expectedNumDeletedResources != len(cmd.deletedResources) {
t.Fatalf("expect %d aws resources deleted but got %d", c.expectedNumDeletedResources, len(cmd.deletedResources))
}
})
}
}
|
<reponame>reneruhr/kipod
#pragma once
#include "../../render/render_shader.h"
namespace kipod {
class ComputeShader : public Shader {
public:
ComputeShader() = default;
ComputeShader(std::string comp) : Shader({}, {}, {}, comp)
{
}
};
} |
package omniv20
import (
"errors"
"io"
"testing"
"github.com/jf-tech/omniparser/idr"
"github.com/jf-tech/omniparser/schemahandler"
"github.com/jf-tech/omniparser/transformctx"
"github.com/stretchr/testify/assert"
"github.com/jf-tech/omniparser/errs"
"github.com/jf-tech/omniparser/header"
"github.com/jf-tech/omniparserlegacy/omniv20/fileformat"
"github.com/jf-tech/omniparserlegacy/omniv20/transform"
)
func TestCreateHandler_VersionNotSupported(t *testing.T) {
p, err := CreateSchemaHandler(
&schemahandler.CreateCtx{
Header: header.Header{
ParserSettings: header.ParserSettings{
Version: "12345",
},
},
})
assert.Error(t, err)
assert.Equal(t, errs.ErrSchemaNotSupported, err)
assert.Nil(t, p)
}
func TestCreateHandler_FormatNotSupported(t *testing.T) {
p, err := CreateSchemaHandler(
&schemahandler.CreateCtx{
Header: header.Header{
ParserSettings: header.ParserSettings{
Version: version,
FileFormatType: "unknown",
},
},
Content: []byte(`{"transform_declarations": { "FINAL_OUTPUT": {} }}`),
})
assert.Error(t, err)
assert.Equal(t, errs.ErrSchemaNotSupported, err)
assert.Nil(t, p)
}
func TestCreateHandler_TransformDeclarationsJSONValidationFailed(t *testing.T) {
p, err := CreateSchemaHandler(
&schemahandler.CreateCtx{
Name: "test-schema",
Header: header.Header{
ParserSettings: header.ParserSettings{
Version: version,
FileFormatType: "xml",
},
},
Content: []byte(`{"transform_declarations": {}}`),
})
assert.Error(t, err)
assert.Equal(t,
`schema 'test-schema' validation failed: transform_declarations: FINAL_OUTPUT is required`,
err.Error())
assert.Nil(t, p)
}
func TestCreateHandler_TransformDeclarationsInCodeValidationFailed(t *testing.T) {
p, err := CreateSchemaHandler(
&schemahandler.CreateCtx{
Name: "test-schema",
Header: header.Header{
ParserSettings: header.ParserSettings{
Version: version,
FileFormatType: "xml",
},
},
Content: []byte(
`{
"transform_declarations": {
"FINAL_OUTPUT": { "template": "non-existing" }
}
}`),
})
assert.Error(t, err)
assert.Equal(t,
`schema 'test-schema' 'transform_declarations' validation failed: 'FINAL_OUTPUT' contains non-existing template reference 'non-existing'`,
err.Error())
assert.Nil(t, p)
}
func TestCreateHandler_FileFormatValidationFailed(t *testing.T) {
p, err := CreateSchemaHandler(
&schemahandler.CreateCtx{
Name: "test-schema",
Header: header.Header{
ParserSettings: header.ParserSettings{
Version: version,
FileFormatType: "delimited",
},
},
Content: []byte(
`{
"file_declaration": {
"delimiter": ",",
"data_row_index": -1,
"columns": [ { "name": "col1" } ]
},
"transform_declarations": {
"FINAL_OUTPUT": { "xpath": "." }
}
}`),
})
assert.Error(t, err)
assert.Equal(t,
`schema 'test-schema' validation failed: file_declaration.data_row_index: Must be greater than or equal to 1`,
err.Error())
assert.Nil(t, p)
}
func TestCreateHandler_Success(t *testing.T) {
h, err := CreateSchemaHandler(
&schemahandler.CreateCtx{
Name: "test-schema",
Header: header.Header{
ParserSettings: header.ParserSettings{
Version: version,
FileFormatType: "delimited",
},
},
Content: []byte(
`{
"file_declaration": {
"delimiter": ",",
"data_row_index": 1,
"columns": [ { "name": "col1" } ]
},
"transform_declarations": {
"FINAL_OUTPUT": { "xpath": "." }
}
}`),
})
assert.NoError(t, err)
ingester, err := h.NewIngester(&transformctx.Ctx{}, nil)
assert.NoError(t, err)
assert.NotNil(t, ingester)
}
type testFileFormat struct {
createFormatReaderErr error
}
func (f testFileFormat) ValidateSchema(_ string, _ []byte, _ *transform.Decl) (interface{}, error) {
return nil, nil
}
func (f testFileFormat) CreateFormatReader(
inputName string, input io.Reader, runtime interface{}) (fileformat.FormatReader, error) {
if f.createFormatReaderErr != nil {
return nil, f.createFormatReaderErr
}
return testFormatReader{
inputName: inputName,
input: input,
runtime: runtime,
}, nil
}
type testFormatReader struct {
inputName string
input io.Reader
runtime interface{}
}
func (r testFormatReader) Read() (*idr.Node, error) { panic("implement me") }
func (r testFormatReader) Release(*idr.Node) { panic("implement me") }
func (r testFormatReader) IsContinuableError(error) bool { panic("implement me") }
func (r testFormatReader) FmtErr(string, ...interface{}) error { panic("implement me") }
func TestNewIngester_Failure(t *testing.T) {
h := &schemaHandler{
ctx: &schemahandler.CreateCtx{},
fileFormat: &testFileFormat{createFormatReaderErr: errors.New("create reader failure")},
}
ingester, err := h.NewIngester(&transformctx.Ctx{}, nil)
assert.Error(t, err)
assert.Equal(t, "create reader failure", err.Error())
assert.Nil(t, ingester)
}
|
Proximal Hypospadias Repair Outcomes in Patients with a Specific Disorder of Sexual Development Diagnosis
Boys with undermasculinized external genital and/or 46,XY disorders of sex development (DSD) often receive masculinizing genitoplasty. Such procedures are done to correct ventral curvature of the phallus, reposition a proximally located urethral meatus, and cosmetically correct the appearance of labioscrotal folds. No studies to date have assessed if patients with a specific DSD diagnosis have worse outcomes for severe proximal hypospadias procedures or whether or not these patients require more extensive surgical maneuvers than severe proximal hypospadias patients without a specific DSD diagnosis. We retrospectively reviewed consecutive proximal hypospadias repairs performed at our institution from 1998 to 2010 and compared the anatomy, surgical technique required for repair, and outcomes in patients with and without a definitive DSD diagnosis. Boys with a specific DSD diagnosis do have significantly more atypical anatomy when undergoing proximal hypospadias masculinizing genitoplasties. They are more likely to require associated gonad procedures but do not have an increased risk of complications or number of surgeries when compared to other proximal hypospadias patients without a specific DSD diagnosis. The risk of complications is consistent with reports in the literature, and the mean number of procedures in this contemporary study is fewer than in historic reports.
Introduction
Boys with undermasculinized external genital development and/or a 46,XY disorder of sex development (DSD), including proximal hypospadias, often receive masculinizing genitoplasty. This procedure is done to correct ventral curvature of the phallus, reposition a proximally located urethral meatus, and cosmetically correct the appearance of external genitalia and labioscrotal folds. Need for genitoplasty can be associated with the need for such gonad operations as a biopsy when it will assist in making an accurate DSD diagnosis, gonadectomy in those cases at risk for gonadoblastomas and/or orchiopexy in those patients with associated cryptorchidism .
Although many recent reports have highlighted the dilemmas in accurately defining severe hypospadias, this anatomy would commonly be accepted as representing the most severe spectrum of the hypospadias complex . Despite a recent 20-year review of the severe hypospadias literature, no studies to date have assessed if patients with a definitive DSD diagnosis have worse outcomes for masculinizing genitoplasty procedures or whether or not these patients require more extensive surgical maneuvers than patients with severe proximal hypospadias not associated with a specific DSD diagnosis .
Materials and Methods
We retrospectively reviewed consecutive proximal hypospadias repairs performed at our institution from 1998 to 2010. Patients were included if their initial meatal location was on the proximal portion of the shaft of the phallus or in a penoscrotal or perineal location. Patients were excluded if they had a distal or midshaft location of their hypospadias. Epidemiologic data was collected, including age at first surgery. Karyotype and specific DSD diagnosis were noted on all patients for whom this information was available. Anatomical data consisted of the meatal location at the time 2 Advances in Urology of surgery before intervention and gonad location at 9 months of age. Surgical data were collected regarding whether the procedure was planned to be done in 2 stages or not; if a urethral cutback was necessary because of deficient or dysplastic ventral skin and spongiosum; whether or not and how many Nesbit plication sutures were necessary to correct ventral angulation; whether or not the urethral plate was divided to correct ventral curvature; if corporal body grafting was done to correct ventral curvature; type of urethroplasty or scrotoplasty necessary; number and type of gonad procedures performed; number and type of postoperative complications; number and type of hypospadias procedures performed. Clinical data were assessed for length of follow-up and long-term complications.
The decision to stage the procedure and technique for orthoplasty and urethroplasty was at the discretion of the surgeon depending upon the child's anatomy as it presented itself during the initial surgical procedure. Decisions to stage a hypospadias procedure were related to the quality of the urethral plate and extent of curvature after skin degloving and ventral dissection of the corpora cavernosa that required division of the urethral plate to complete the orthoplasty portion of the procedure. In cases when the urethral plate was divided or required substitution, the procedure was staged.
Chi-square and unpaired Student's t-tests were used to compare differences between groups with and without a specific DSD diagnosis. Significance was determined if P < 0.05.
Results
In this cohort, 102 patients were identified that met our study criteria with a urethral meatus in the proximal shaft, penoscrotal, or perineal location. A specific DSD diagnosis was identified in 17 patients (group 1), with mixed gonadal dysgenesis (MGD) in 35.3%, partial androgen insensitivity syndrome (PAIS) in 29.4%, other chromosomal abnormalities (3q12 addition and partial deletion of chromosome 1) in 11.8%, and Leydig cell aplasia, Klinefelter's variant, Opitz-Frias Syndrome, and VATER variant in 5.9% each ( Table 1). The rest of the cohort without a specific DSD diagnosis was the comparison (group 2). Table 2 compares the groups with a specific DSD diagnosis (group 1) and those without a specific DSD diagnosis (group 2). There were no significant differences between groups regarding age at initial surgery or percentage of either population with a proximal shaft or penoscrotal meatal location. Group 1 did have a significantly higher incidence of a perineal meatal location (70.6 versus 36.5%) and need for staging of the initial hypospadias procedure (58.8 versus 24.7%).
From an orthoplasty standpoint, group 1 was significantly more likely to require a division of the urethral plate to ultimately achieve a straight phallus (58.8 versus 23.5%). There was no difference seen in the incidence of Nesbit dorsal plications, number of these sutures required to correct the ventral curvature, or in the incidence of corporal body grafting for ventral lengthening when comparing the two groups.
Group 2 was significantly more likely to have a tabularized incised plate (TIP) urethroplasty (35.3 versus 73.8%), whereas those in group 1 were more likely to have a scrotal skin tube inlay or onlay procedure (35.3 versus 9.5%). There was not a difference in the incidence of island inlay/onlay, flap inlay, or combination distal TIP/island inlay procedures.
There was no difference seen in the incidence of simple or complex scrotoplasties to achieve a masculinized external appearance of the labial scrotal folds and correct scrotal transposition when comparing groups.
There were no significant differences between groups in mean total number of procedures required, mean total number of procedures minus planned staged procedures, number of patients with a postoperative complication, or length of clinical follow-up between the groups.
Discussion
Although most boys with a diagnosis of 46,XY DSD with an undermasculinized phallus and scrotum require proximal hypospadias repairs to masculinize their external genitalia, to our knowledge, this is the first paper to show that having a specific DSD diagnosis does not result in worse surgical outcomes. The two groups are similar in age at surgery in this paper. The trend toward an older age at initial surgery in the group with a specific diagnosis (group 1) could be related to delay in definitive care to ensure a full DSD evaluation is completed and the parents have decided upon a sex of rearing. One patient in group 1 was an outlier for age at initial surgery for this reason and was not operated on until after puberty.
It is unclear which patients with a proximal hypospadias presentation should have a DSD evaluation, how extensive this evaluation should be, and whether or not all hypospadias patients should be considered a DSD diagnosis . Although the 2006 consensus proposes considering all patients with a congenital abnormality as being classified as DSD, at our institution, any patient with a proximal urethral meatus location and/or severe ventral curvature with an associated undescended testicle would have a DSD evaluation by the pediatric endocrinology team, beginning with a karyotype and a testosterone level (if age 15-90 days within the minipuberty period or after HCG stimulation if older) . If all results are normal, the patient would be considered to have an idiopathic proximal hypospadias without a specific DSD diagnosis. If there were abnormalities in the chromosome or hormonal profile, a specific diagnosis would be investigated further by our multidisciplinary DSD team. This would be done to give the providers, family, and, ultimately, the patient the best information available as to the long-term expectations for (1) the need for hormone supplementation at puberty, (2) surgical options to make the external genitalia consistent with the sex of rearing, (3) potential for fertility, (4) potential cancer risk, and (5) stability of sex of rearing with aging. Historically only about 50% or less of 46,XY DSD patients receive a specific diagnosis but with this approach we have achieved a current rate of 69% at our institution . Identifying and classifying these patients accurately will help us to better understand the associated pathophysiology and effects on surgical outcomes. The results of this paper will assist us in more accurately preparing our surgery team for these complex procedures and provide more accurate expectations for families and patients.
Limitations of this paper are the small sample size from a single institution with a somewhat heterogeneous population. Other confounding factors include significant differences in the type of urethroplasty performed to correct these severe defects, although there was not a difference in the number of patients who had a postoperative complication. The incidence of complications in both groups was consistent with that reported in the literature, as evidenced by a comprehensive review of 2,203 proximal hypospadias patients over the past 20 years that revealed a complication rate of 14.9-45.7% . Our complications rate is superior to long-term outcomes, showing a 51% fistula rate in masculinizing genitoplasty procedures for patients with DSD as reported after 2-stage Dennis Browne techniques . This is despite the fact that group 1 received more complicated surgical procedures to correct the urethral position when compared to the patients in group 2.
The mean number of procedures for patients in group 1 was not significantly different from that of group 2, nor was the mean number of procedure after controlling for the increased incidence of planned 2-stage procedures. This mean of 2.06 procedures is fewer than the number of masculinizing genitoplasty procedures reported in a historic cohort of males with 46,XY DSD who received genitoplasty between 1950 and 1990 .
Despite the limitations of this study, we believe that a consistent systematic DSD evaluation should be done for all proximal hypospadias boys. This will help surgeons identify those patients who are more likely to require staged reconstructions and more complicated procedures during their masculinizing genitoplasty. This knowledge also is potentially helpful to families in counseling long-term and further genetic evaluation when appropriate. This also contributes to the literature highlighting the importance of studying the effects to optimize the most appropriate workup for these patients and the surgical outcomes that can be seen and stratified by specific diagnosis once the workup is best able to distinguish one patient from another.
Conclusions
Boys with a specific DSD diagnosis do have significantly more atypical genital anatomy prior to receiving surgeries for their proximal hypospadias than do 46,XY boys with proximal hypospadias without a specific DSD diagnosis, and they also are more likely to require associated gonad procedures. However, these boys do not have an increased risk of surgical complications or number of surgeries received. The risk of complications associated with genitoplasty in our cohort is consistent with contemporary reports in the literature and superior to historical reports indicating improvements in surgical outcomes over time. |
When the NHL lockout finally ends, the next phase of Vancouver Canucks goalie Roberto Luongo's career begins. It's expected he'll be traded as Cory Schneider inherits the team's starting goaltender gig. The most widely speculated destination? The Toronto Maple Leafs.
That said, isn't selling a Luongo Leafs jersey in an Ontario sporting goods store just a little presumptuous?
Scroll to continue with content Ad
That photo was captured by Andrew (@manbearpiglpu) at Pro Image at the Upper Canada Mall in Newmarket, and quickly spread around social media networks — including on Luongo's unofficial (but frequently confirmed) Twitter feed:
Well-played.
The presumptuous jersey has been seen here and there in the NHL's history. Vincent Lecavalier Montreal Canadiens jerseys were seen for sale around the NHL All-Star Game. The Philadelphia Flyers most recently had jerseys for Shane Doan created before he re-signed with the Phoenix Coyotes, although they weren't for sale.
Why did Pro Image decide to start selling Roberto Luongo Leafs jerseys before the team can even trade for him? We decided to find out.
First off, it's a Luongo jersey, rather than jersey(s). And you can thank the lockout for it.
It went up about two weeks ago, as Leafs jerseys (or really anything hockey) haven't been moving during the work stoppage. One of the store's owners decided to create something to garner a little buzz and attract some eyes to the hockey merch: a sweater for Roberto Luongo, No. 1, Leafs goalie.
"It's just like a conversation thing we got going on. Trying to get people to talk about something other than the lockout," said Luigi Taddeo of Pro Image.
Story continues
So they created one jersey and stuck it on a rack of non-Luongo jerseys to make it look like a full shipment had arrived. Soon, they had customers coming in uttering "Did Luongo just get traded to the Leafs?!"
"It's pretty easy to change if things don't work out," said Taddeo.
It is actually for sale, although they haven't sold one yet. Leafs player sweaters run $179.99 on the Pro Image website — and they're in-store for 30-percent off the retail price, like the other NHL jerseys available.
The discounts are a necessity to move product, as the lockout's been particularly frustrating for Toronto-based sports retailers. The 2013 Winter Classic was to feature the Leafs in a new sweater for the game — which would have been a windfall for places like Pro Image.
"Last year, we had record sales in October. The Leafs had just released a retro third jersey," said Taddeo. "People like to make fun of Leafs fans, but there are no other fans that support their team like Leafs fans do."
Hence, when the lockout ends, Taddeo's hoping that Roberto Luongo's tenure with the Leafs begins.
"If Luongo were to ever be traded to the Leafs, it would be one of the better selling jerseys that we've had," he said.
And just imagine how many they'll move when he leads them to the Cup! (Just carrying the theme of presumption to its illogical conclusion here ...)
s/t to Andrew (@manbearpiglpu) |
def is_stopped(self):
return self.__stop_token.is_set() |
<filename>app/src/intro_c/person.c
#include <stdio.h>
#include <stdlib.h>
#include <log4c.h>
#include "intro_c/person.h"
static log4c_category_t *prac1;
struct person {char *name; int age;
};
PERSON construct_person(const char *name, const int age) {
PERSON person1 = malloc(sizeof(struct person));
prac1 = log4c_category_get("prac");
log4c_category_log(prac1, LOG4C_PRIORITY_DEBUG, "construct_person");
if (NULL == person1) {
perror("malloc person");
return NULL;
}
person1->name = (char*)name;
person1->age = age;
return person1;
}
void destruct_person(PERSON person1) {
log4c_category_log(prac1, LOG4C_PRIORITY_INFO, "destruct_person");
free(person1);
person1 = NULL;
}
const char* person_getName(PERSON person1) {
return person1->name;
}
int person_getAge(PERSON person1) {
return person1->age;
}
void person_setName(PERSON person1, const char *new_name) {
person1->name = (char*)new_name;
}
void person_setAge(PERSON person1, const int new_age) {
person1->age = new_age;
}
void person_toString(PERSON person1, const int len_buf, char *buf) {
snprintf(buf, len_buf, "Person0{name = %s, age = %i}", person1->name,
person1->age);
}
|
// BeforeCreate hook that gets called when creating new instance
func (a *Alarm) BeforeSave() (err error) {
a.ID = util.NewUuid()
a.LowerName = strings.ToLower(a.Name)
a.CreatedAt = time.Now()
a.UpdatedAt = time.Now()
return
} |
def dtype_of_gdt_upcast(gdt):
param = gdt
gdt = _to_gdt_opt(gdt)
if gdt is None:
raise ValueError('`%s` is not a gdt' % param)
dtype = _DTYPE_OF_GDT_EQUIV.get(gdt)
if dtype is None:
dtype = _DTYPE_OF_GDT_UPCAST.get(gdt)
if dtype is None:
raise ValueError('`%s` has no equivalent or upcast dtype' % _str_of_gdt(gdt))
return dtype |
“I can” is empowering, while “I do” is life changing. There is a subtle yet powerful difference between those verbs.
“I can” will change your internal reality, will make you believe you are truly able to do it. But it won’t do it for you. It will always remain at the internal level, it won’t reach out.
On the other side, “I do” will modify your surroundings and make things happening. “I do” is the reality itself, not just an internal representation of it.
This is one of the most important, yet widely ignored confusions in the personal development field.
“I Can” traps
I can lose weight.
I can be a millionaire.
I can have a fulfilling relationship.
I can create a fantastic career.
I can change the world.
All those sentences are empowering, but they are not modifying anything. They are just a potential. In fact, they are even less than a potential, they are a trap. The trap of “it’s ok just to say it”. The trap of “ok, I said it, now can somebody please stand up and do it?”. The trap of “I had a revelation and that’s enough”.
Having powerful thoughts and using powerful verbs – and “I do” is a powerful verb – is certainly important, but it’s not enough. It can give you a kickstart, it can motivate you, but it won’t do it. It won’t make it happen, unless you switch to the “I do” level.
“I Do” thrills
I am losing weight.
I’m becoming a millionaire.
I’m creating a fulfilling relationship.
I’m building a fantastic career.
I am changing the world, starting with myself.
Notice the difference? It’s not about the fact that you can do all those things, but about actually doing them. Notice the change in your emotions while reading this? The “I can” sentences are giving you self-confidence, clarity and perhaps some motivation boost. But the “I do” sentences are giving you the thrills.
And this is where all the fun is, at the thrill level. All the connection and joy of life is taking place at the “I do” level. All the rest – including the “I can” preparation – is just a scaffold to reach this thrill level. Once you got there, is not important anymore.
From “I Can” to “I Do”
How many times you’ve been stuck at the “I can” level? How many times you wrote powerful and motivating sentences but never actually did something? How many times you visualized your goals, set up milestones, allocated resources only to see the dust covering everything because you didn’t do anything to move things forward?
Switching from “I can” to “I do” is difficult. Here’s why:
“I Can” is comfortable, “I Do” is riskier
I can keeps you in the comfort zone, it won’t move you in any direction. I can in itself, without a follow up in the real world, will bury you.
I do gets you out of the comfort zone. It pushes you to break the limits and actually do. There is always a risk of failing if you do something. But if you don’t, you won’t change anything either.
“I Can” is nice, “I Do” is grumpy
At the “I can” level things are pinky and perfect. You see your goals, you imagine a self without extra fat, a perfect career, a nurturing relationship. Everything is nice.
At the “I do” level things are sometimes ugly. You have to fight, to resist, to pull, to strive. Getting there means almost every time beating some obstacles. Which is not always nice.
“I Can” makes no promises, “I Do” respects all the promises
At the “I can” level you don’t make promises, you’re just telling “ok, I’m able to do it”. You won’t commit to anything. You’re just acknowledging some facts.
At the “I do” level you have to respect your commitments. Doing things means keeping your promises. Make things happening. Stand up for your words.
“I Can” is easy, “I Do” is hard
Because you make no real commitments, “I can” gives you room to dream big. I can be whatever I want. It’s spectacular and easy. You’re just saying it.
Once you start keeping your promises, the big dreams must become reality. And that’s hard. It’s not always spectacular and it requires constant, difficult work.
“I Can” is a thought, “I Do” is an action
Think for a moment at this situation: you met the love of your life, you fell in love and now you want to move forward. “I Can” marry you is a thought, while “I Do” marry you is an action. You can replace your example with whatever situations you feel attracted to: “I can” have money versus “I do” have money, “I can” be happy versus “I am” happy.
***
Now, how can you really move from “I can” to “I do”? If you read the differences above carefully, I think you already know. And, surprisingly enough, it’s not complicated. You knew it all the time.
If you really, really want to switch from “I can” to “I do” you have to get out of the comfort zone. You have to be prepared to fail. You have to make and keep promises. You have to work it out. Thinking that you can do stuff is important, but making it happen is a completely different process. And in my opinion, this is where all the fun is, at making things happening. Thinking big is good, doing big is even better.
And, yes, the most important step to actually do something is to move away from the computer right now and start making things happen. Reading blogs, including this one, won’t help for long. It might help in the beginning, it will give you some directions, but it won’t make things happening in your place. The real master of your life is you, not a blog.
Step out, take risks and do something with your life.
Of course you can. Now do it! |
package cn.gson.oasys.voandfactory.userVO2;
import lombok.Data;
import lombok.ToString;
import java.util.Date;
@Data
@ToString
public class UserLoginRecordVO {
private Long id; //用户登录记录表表id
private String ipAddr; //ip地址
private Date loginTime; //登陆时间
private String sessionId; //session id
private String browser; //使用浏览器
private UserVO userVO;
}
|
def _calculate_mean_return_episodic(hp_returns, type_, after=0):
if type_ == "eval":
hp_returns = [np.mean(hp_returns[i][after:], axis=-1) for i in
range(len(hp_returns))]
run_returns = [np.mean(hp_returns[i][after:]) for i in
range(len(hp_returns))]
mean = np.mean(run_returns)
stderr = np.std(run_returns) / np.sqrt(len(hp_returns))
return mean, stderr |
/// Constructs a BitReader for a given range of data.
pub fn new(data: &[u8]) -> BitReader {
BitReader {
data,
bit_buf: 0,
bits_in_buf: 0,
total_bits_read: 0,
}
} |
Justine (Garance Marillier) is a meek freshman in college, just leaving home for the first time when we meet her in the film. She is planning on studying to be a veterinarian at a prestigious veterinary school, following in her sister’s footsteps. The students enrolled in the veterinary program alongside her have developed an almost fraternity-like mentality. The older students in the program constantly haze the first year students as a sort of initiation ritual, one particularly nasty tradition involves each of the students consuming a raw rabbit kidney. This is a huge problem, as Justine is a vegetarian. After refusing to eat it initially, Justine eventually complies once her sister Alexia (Ella Rumpf) begins to berate her. This is the first time Justine has ever consumed meat and soon enough she begins craving more, lusting for it. Not much time passes until Justine learns that animal meat won’t satisfy her cravings and that she needs to feast on something a little more taboo.
(Hint: it’s human flesh.)
RAW is director Julia Ducournau’s debut feature film, and you’d be hard pressed to realize that. With RAW she has already proven herself to be a fucking powerhouse. This film is absolutely fantastic, it’s gripping, it’s horrifying, it’s even funny at times. The film is so many different things at once, and Ducournau has a lot to say with here, especially concerning the issues of sexuality, body image, and addiction.
What is so impressive about RAW is just how unabashedly brutal it is, and this isn’t even concerning the obvious moments of cannibalism. Justine comes to the school a virgin, and almost immediately she is deemed an outsider for it. Alongside that, the hazing she must endure reaches borderline torture levels at some points, and when Justine isn’t busy trying to complete these herculean tasks, she is shunned by both her peers and her teachers. Outside of her one friendship in the form of her roommate Adrien (Rabah Nait Oufella), Justine is totally alone. Not even her sister Alexia is a source of comfort for Justine, as their already rocky relationship is made all the more tumultuous once her nasty case of cannibalism kicks in.
This isolation that Justine experiences serve to only push her farther and farther towards the edge, motivating her to her indulge in her innermost primal and carnal desires. For most college students, those carnal desires would manifest themselves in the form of sex, and the kids at Justine’s college sure have a lot of it. But not Justine, no, her desire is human flesh. Ducournau makes this biting (no pun intended) parallel fairly obvious throughout the course of the film, but that only makes it all the more effective.
Marillier’s performance as Justine is simply breathtaking. Her journey from acquiescence to firm independence is beautiful and terrifying to watch, and Marillier knocks it out of the park. At a moment’s notice Marillier is able to transform Justine from a character who’s sympathetic to one that is genuinely disturbing, which helps give her character a sense of unpredictability. Marillier’s chemistry with Alexia actress Ella Rumpf is truly entertaining to watch, as the two sisters constantly find themselves in conflict with one another. Oufella’s performance as Adrien is solid as well, as he acts as Justine’s rock, her one friend she can rely on. The trio often finds themselves onscreen together, and the dynamic is portrayed wonderfully.
Despite what the Toronto International Film Festival would have you think, RAW is not a hyper-violent, disgustingly gruesome film that will make you pass out in horror. But it’s not exactly a walk in the park either. Body horror goes hand-in-hand with cannibalism, and Ducournau executes it marvelously here. The violence in RAW is not over-the-top, Ducournau smartly keeps the violence grounded, intentionally avoiding gallons of blood in favor of making the violence more tangible and effective. The fact that Ducournau is able to make something like scratching a rash feel horrifying and disgusting is impressive, never mind when she indulges on the gross concept of consuming human flesh. There are master levels of body horror on display here, and Ducournau’s reluctance to go overboard with the violence serves only to emphasize the moments when she does indulge in the gore.
The film’s score, composed by Ben Wheatley regular Jim Williams, is fantastic, as it quietly disappears when it needs to, only to rear its ugly head with a jarring loudness once the characters begin to descend into madness. The score is chaotic and erratic and suits the tone and subject matter of the film perfectly.
RAW is a masterfully executed genre film, giving horror fans some delightfully stylish scares, as well as having a deeper meaning behind it all. With its great cast and stellar directing, RAW is a triumph, and will leave your mind with something to chew on long after the film is over.
Okay, that pun was intended.
9/10
Advertisements
Share this: Twitter
Reddit
Tumblr
Facebook
Google
Like this: Like Loading... |
Investigating the Impact of Air-Service Supply on Local Demand—A Causal Analysis
Establishing the extent to which the level of transport supply affects demand generation remains a daunting problem for the transport analyst because of reciprocal causation and dependency between supply and demand. After the US deregulation of domestic air transport, unprecedented increases in air-service supply were observed at many US airports, increases which can be considered to be clearly independent of local service requirements. The supply measures reflect improvements in service both to nearby major airports and to the United States as a whole. In this paper, a sample of three airports is examined in an attempt to establish causal relationships between local demand and several measures of air-transport supply. Causality is established by using cross-correlation-function analysis, and the impacts of supply on demand are determined by using time-series transfer-function modelling. Causality is found to be location dependent, but supply tends to affect demand with a 3–6–month lag. Supply in terms of the total number of destinations served was found to have a significant impact on demand at Syracuse, with estimated transfer-function elasticities of 0.5. |
package com.brucebat.message.common.util;
import org.apache.commons.lang3.StringUtils;
/**
* Markdown工具类
*
* @version 1.0
* @author : <NAME>
* @since Created in 2020/7/21
*/
public class MarkdownUtils {
/**
* 最高标题级别
*/
private static final int MAX_TITLE_LEVEL = 6;
/**
* 最低标题级别
*/
private static final int MIN_TITLE_LEVEL = 1;
/**
* 根据级别获取生成指定级别markdown格式标题,这里进行限定,最少1级,最多6级
*
* @param level 标题级别
* @param title 标题
* @return markdown格式标题
*/
public static String getTitle(int level, String title) {
if (level < MIN_TITLE_LEVEL) {
level = MIN_TITLE_LEVEL;
}
if (level > MAX_TITLE_LEVEL) {
level = MAX_TITLE_LEVEL;
}
StringBuilder stringBuilder = new StringBuilder();
for (int i = 0; i < level; i++) {
stringBuilder.append("#");
}
return stringBuilder.toString() + " " + title + " " + stringBuilder.toString() + " \n";
}
/**
* 获取引用
*
* @param quote 获取引用
* @return 处理完成结果
*/
public static String getQuote(String quote){
return "> " + quote + " \n";
}
/**
* 加粗
*
* @param text 待处理文本
* @return 将文本加粗
*/
public static String getBold(String text) {
return "**" + text + "**";
}
/**
* 斜体
*
* @param text 待处理文本
* @return 将文本置为斜体
*/
public static String getItalic(String text) {
return "*" + text + "*";
}
/**
* 生成网页链接格式
*
* @param title 网页标题
* @param link 网页链接
* @return 处理完成格式
*/
public static String getLink(String title, String link) {
return "[" + title + "]" + "(" + link + ")";
}
/**
* 生成图片链接格式
*
* @param title 标题
* @param imageLink 图片链接
* @return 处理完成格式
*/
public static String getImageLink(String title, String imageLink) {
return "![" + title + "]" + "(" + imageLink + ")";
}
/**
* 生成指定格式无序列表
* 注意:这里只会有一层无序结构
*
* @param texts 待处理无序列表
* @return 处理完成格式
*/
public static String getUnsortedList(String... texts) {
if (null == texts){
return null;
}
StringBuilder result = new StringBuilder();
for (String text : texts){
if (StringUtils.isBlank(text)) {
continue;
}
result.append("- ").append(text).append(" \n");
}
return result.toString();
}
}
|
<filename>tools/onnx2daq/OnnxConverterImpl.cpp
#include <common/helper.h>
#include <tools/onnx2daq/OnnxConverter.h>
#include "NodeAttrHelper.h"
using std::string;
using std::vector;
using Shape = Shaper::Shape;
namespace dnn {
// void OnnxConverter::AddConv(const string &input_name,
// const string &ori_weight_name,
// const dnn::optional<std::string> &bias_name,
// const int32_t padding_left,
// const int32_t padding_right,
// const int32_t padding_top,
// const int32_t padding_bottom,
// const int32_t stride_width,
// const int32_t stride_height,
// const int32_t dilation_width,
// const int32_t dilation_height,
// int group,
// const string &output_name) {
// flatbuffers::Offset<DNN::Layer> layer;
// if (dilation_width != 1 || dilation_height != 1) {
// if (stride_width != 1 || stride_height != 1) {
// throw std::invalid_argument(
// "Both dilations and strides > 1 is not supported for now");
// }
// if (!(padding_left == padding_right && padding_right == padding_top
// && padding_top == padding_bottom)) {
// throw std::invalid_argument(
// "Both dilations and asymmetric pads is not supported for
// now");
// }
// VLOG(5) << "Dilations of conv: " << dilation_width << ", " <<
// dilation_height << ", converting.."; const auto s2b_name = input_name
// + "_s2b"; const auto im_name = input_name + "_conv_imm"; const auto
// b2s_name = input_name + "_b2s"; std::vector<int> new_pads = pads;
// const auto input_shape = shaper_[input_name];
// new_pads[1] = (input_shape[2] + pads[1] + (dilations[0] - 1)) /
// dilations[0] * dilations[0] -
// input_shape[2];
// new_pads[3] = (input_shape[1] + pads[3] + (dilations[1] - 1)) /
// dilations[1] * dilations[1] -
// input_shape[1];
// VLOG(5) << input_shape << ", " << pads << ", " << dilations << ", "
// << new_pads;
// // Why "AllowShortBlocksOnASingleLine: false" doesn't work on it?
// // clang-format off
// {
// AddLayerSPACE_TO_BATCH_ND(input_name, {dilation_height,
// dilation_width}, new_pads, s2b_name);
// }
// // clang-format on
// {
// // paddings are applied in spacetobatch
// AddConv(s2b_name, strides, vector<int>{0, 0, 0, 0},
// vector<int>{1, 1}, group, ori_weight_name, bias_name,
// im_name);
// }
// // clang-format off
// {
// AddLayerBATCH_TO_SPACE_ND(im_name, dilations, b2s_name);
// }
// // clang-format on
// {
// const auto b2s_shape = shaper_[b2s_name];
// const std::vector<int32_t> starts{0, 0, 0, 0};
// const std::vector<int32_t> ends{
// static_cast<int32_t>(b2s_shape[0]),
// static_cast<int32_t>(b2s_shape[1]) - (new_pads[1] - pads[1]),
// static_cast<int32_t>(b2s_shape[2]) - (new_pads[3] - pads[3]),
// static_cast<int32_t>(b2s_shape[3])};
// const std::vector<int32_t> strides_in_ss{1, 1, 1, 1};
// const int32_t begin_mask = 0;
// const int32_t end_mask = 0;
// const int32_t shrink_axis_mask = 0;
// AddLayerSTRIDED_SLICE(b2s_name, starts, ends, strides_in_ss,
// begin_mask, end_mask, shrink_axis_mask,
// output_name);
// }
// return;
// }
//
// if (!onnx_tensors_.has(ori_weight_name)) {
// throw std::invalid_argument("The weight of convolution must be
// known");
// }
// const auto &onnx_weight = onnx_tensors_.at(ori_weight_name);
// if (group == 1) {
// VLOG(5) << "Vanilla conv";
// AddLayerConvImpl(input_name, ori_weight_name, bias_name, pads,
// strides,
// output_name);
// } else if (onnx_weight.shape[1] == 1) { // depthwise
// VLOG(5) << "Depthwise conv";
// AddLayerDepthwiseConvImpl(input_name, ori_weight_name, bias_name,
// pads,
// strides, onnx_weight.shape[0] / group,
// output_name);
// } else {
// // TODO: Support it
// throw std::invalid_argument("group != 1 is not supported");
// }
// }
// OnnxConverter auto generated methods start
void OnnxConverter::WriteDaqLayer_CONV_2D(
const std::string &input, const std::string &weight,
const dnn::optional<std::string> &bias, int32_t padding_left,
int32_t padding_right, int32_t padding_top, int32_t padding_bottom,
int32_t stride_x, int32_t stride_y, FuseCode fuse_code, bool nchw,
int32_t dilation_x, int32_t dilation_y, const std::string &output) {
{
const auto name = input;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
{
const auto name = weight;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
if (bias.has_value()) {
const auto name = bias.value();
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiIdentity(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.Conv(m(input), m(weight), padding_left, padding_right, padding_top,
padding_bottom, stride_x, stride_y, nchw, dilation_x,
dilation_y, output);
const auto input_param = DNN::CreateCONV_2D_InputDirect(
builder_, m(input).c_str(), m(weight).c_str(),
bias.has_value() ? bias.value().c_str() : nullptr, padding_left,
padding_right, padding_top, padding_bottom, stride_x, stride_y,
ConvertFuseCodeType(fuse_code), nchw, dilation_x, dilation_y);
const auto output_param =
DNN::CreateCONV_2D_OutputDirect(builder_, output.c_str());
const auto param = DNN::CreateCONV_2D(builder_, input_param, output_param);
const auto layer =
DNN::CreateLayer(builder_, DNN::LayerType::CONV_2D, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_AVERAGE_POOL_2D(
const std::string &input, int32_t padding_left, int32_t padding_right,
int32_t padding_top, int32_t padding_bottom, int32_t stride_x,
int32_t stride_y, int32_t kernel_width, int32_t kernel_height,
FuseCode fuse_code, const std::string &output) {
{
const auto name = input;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.Pool(m(input), padding_left, padding_right, padding_top,
padding_bottom, stride_x, stride_y, kernel_width,
kernel_height, output);
const auto input_param = DNN::CreateAVERAGE_POOL_2D_InputDirect(
builder_, m(input).c_str(), padding_left, padding_right, padding_top,
padding_bottom, stride_x, stride_y, kernel_width, kernel_height,
ConvertFuseCodeType(fuse_code));
const auto output_param =
DNN::CreateAVERAGE_POOL_2D_OutputDirect(builder_, output.c_str());
const auto param =
DNN::CreateAVERAGE_POOL_2D(builder_, input_param, output_param);
const auto layer =
DNN::CreateLayer(builder_, DNN::LayerType::AVERAGE_POOL_2D, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_MAX_POOL_2D(
const std::string &input, int32_t padding_left, int32_t padding_right,
int32_t padding_top, int32_t padding_bottom, int32_t stride_x,
int32_t stride_y, int32_t kernel_width, int32_t kernel_height,
FuseCode fuse_code, const std::string &output) {
{
const auto name = input;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.Pool(m(input), padding_left, padding_right, padding_top,
padding_bottom, stride_x, stride_y, kernel_width,
kernel_height, output);
const auto input_param = DNN::CreateMAX_POOL_2D_InputDirect(
builder_, m(input).c_str(), padding_left, padding_right, padding_top,
padding_bottom, stride_x, stride_y, kernel_width, kernel_height,
ConvertFuseCodeType(fuse_code));
const auto output_param =
DNN::CreateMAX_POOL_2D_OutputDirect(builder_, output.c_str());
const auto param =
DNN::CreateMAX_POOL_2D(builder_, input_param, output_param);
const auto layer =
DNN::CreateLayer(builder_, DNN::LayerType::MAX_POOL_2D, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_RELU(const std::string &input,
const std::string &output) {
{
const auto name = input;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.Identity(m(input), output);
const auto input_param =
DNN::CreateRELU_InputDirect(builder_, m(input).c_str());
const auto output_param =
DNN::CreateRELU_OutputDirect(builder_, output.c_str());
const auto param = DNN::CreateRELU(builder_, input_param, output_param);
const auto layer =
DNN::CreateLayer(builder_, DNN::LayerType::RELU, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_SOFTMAX(const std::string &input, float beta,
const std::string &output) {
{
const auto name = input;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.Identity(m(input), output);
const auto input_param =
DNN::CreateSOFTMAX_InputDirect(builder_, m(input).c_str(), beta);
const auto output_param =
DNN::CreateSOFTMAX_OutputDirect(builder_, output.c_str());
const auto param = DNN::CreateSOFTMAX(builder_, input_param, output_param);
const auto layer =
DNN::CreateLayer(builder_, DNN::LayerType::SOFTMAX, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_FULLY_CONNECTED(
const std::string &input, const std::string &weight,
const dnn::optional<std::string> &bias, FuseCode fuse_code,
const std::string &output) {
{
const auto name = input;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
{
const auto name = weight;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiIdentity(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
if (bias.has_value()) {
const auto name = bias.value();
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiIdentity(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.FC(m(input), m(weight), output);
const auto input_param = DNN::CreateFULLY_CONNECTED_InputDirect(
builder_, m(input).c_str(), m(weight).c_str(),
bias.has_value() ? bias.value().c_str() : nullptr,
ConvertFuseCodeType(fuse_code));
const auto output_param =
DNN::CreateFULLY_CONNECTED_OutputDirect(builder_, output.c_str());
const auto param =
DNN::CreateFULLY_CONNECTED(builder_, input_param, output_param);
const auto layer = DNN::CreateLayer(
builder_, DNN::LayerType::FULLY_CONNECTED, 0, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_ADD(const std::string &input1,
const std::string &input2,
FuseCode fuse_code,
const std::string &output) {
{
const auto name = input1;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
{
const auto name = input2;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.Eltwise(m(input1), m(input2), output);
const auto input_param = DNN::CreateADD_InputDirect(
builder_, m(input1).c_str(), m(input2).c_str(),
ConvertFuseCodeType(fuse_code));
const auto output_param =
DNN::CreateADD_OutputDirect(builder_, output.c_str());
const auto param = DNN::CreateADD(builder_, input_param, output_param);
const auto layer = DNN::CreateLayer(builder_, DNN::LayerType::ADD, 0, 0, 0,
0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_CONCATENATION(
const std::vector<std::string> &inputs, int32_t axis,
const std::string &output) {
for (const auto &name : inputs) {
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
const auto inputs_fb = FbStrVector(inputs);
shaper_.Concat(inputs, axis, output);
const auto input_param =
DNN::CreateCONCATENATION_InputDirect(builder_, &inputs_fb, axis);
const auto output_param =
DNN::CreateCONCATENATION_OutputDirect(builder_, output.c_str());
const auto param =
DNN::CreateCONCATENATION(builder_, input_param, output_param);
const auto layer = DNN::CreateLayer(builder_, DNN::LayerType::CONCATENATION,
0, 0, 0, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_DEPTHWISE_CONV_2D(
const std::string &input, const std::string &weight,
const dnn::optional<std::string> &bias, int32_t padding_left,
int32_t padding_right, int32_t padding_top, int32_t padding_bottom,
int32_t stride_x, int32_t stride_y, int32_t depth_multiplier,
FuseCode fuse_code, const std::string &output) {
{
const auto name = input;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
{
const auto name = weight;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes1230(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
if (bias.has_value()) {
const auto name = bias.value();
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiIdentity(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.DepthwiseConv(m(input), m(weight), padding_left, padding_right,
padding_top, padding_bottom, stride_x, stride_y,
output);
const auto input_param = DNN::CreateDEPTHWISE_CONV_2D_InputDirect(
builder_, m(input).c_str(), m(weight).c_str(),
bias.has_value() ? bias.value().c_str() : nullptr, padding_left,
padding_right, padding_top, padding_bottom, stride_x, stride_y,
depth_multiplier, ConvertFuseCodeType(fuse_code));
const auto output_param =
DNN::CreateDEPTHWISE_CONV_2D_OutputDirect(builder_, output.c_str());
const auto param =
DNN::CreateDEPTHWISE_CONV_2D(builder_, input_param, output_param);
const auto layer =
DNN::CreateLayer(builder_, DNN::LayerType::DEPTHWISE_CONV_2D, 0, 0, 0,
0, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_BATCH_TO_SPACE_ND(
const std::string &input, const std::vector<int32_t> &block_sizes,
const std::string &output) {
{
const auto name = input;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.BatchToSpace(m(input), block_sizes, output);
const auto input_param = DNN::CreateBATCH_TO_SPACE_ND_InputDirect(
builder_, m(input).c_str(), &block_sizes);
const auto output_param =
DNN::CreateBATCH_TO_SPACE_ND_OutputDirect(builder_, output.c_str());
const auto param =
DNN::CreateBATCH_TO_SPACE_ND(builder_, input_param, output_param);
const auto layer =
DNN::CreateLayer(builder_, DNN::LayerType::BATCH_TO_SPACE_ND, 0, 0, 0,
0, 0, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_SPACE_TO_BATCH_ND(
const std::string &input, const std::vector<int32_t> &block_sizes,
const std::vector<int32_t> &pads, const std::string &output) {
{
const auto name = input;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.SpaceToBatch(m(input), block_sizes, pads, output);
const auto input_param = DNN::CreateSPACE_TO_BATCH_ND_InputDirect(
builder_, m(input).c_str(), &block_sizes, &pads);
const auto output_param =
DNN::CreateSPACE_TO_BATCH_ND_OutputDirect(builder_, output.c_str());
const auto param =
DNN::CreateSPACE_TO_BATCH_ND(builder_, input_param, output_param);
const auto layer =
DNN::CreateLayer(builder_, DNN::LayerType::SPACE_TO_BATCH_ND, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_STRIDED_SLICE(
const std::string &input, const std::vector<int32_t> &starts,
const std::vector<int32_t> &ends, const std::vector<int32_t> &strides,
int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask,
const std::string &output) {
{
const auto name = input;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.StridedSlice(m(input), starts, ends, strides, begin_mask, end_mask,
shrink_axis_mask, output);
const auto input_param = DNN::CreateSTRIDED_SLICE_InputDirect(
builder_, m(input).c_str(), &starts, &ends, &strides, begin_mask,
end_mask, shrink_axis_mask);
const auto output_param =
DNN::CreateSTRIDED_SLICE_OutputDirect(builder_, output.c_str());
const auto param =
DNN::CreateSTRIDED_SLICE(builder_, input_param, output_param);
const auto layer = DNN::CreateLayer(builder_, DNN::LayerType::STRIDED_SLICE,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_MUL(const std::string &input1,
const std::string &input2,
FuseCode fuse_code,
const std::string &output) {
{
const auto name = input1;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
{
const auto name = input2;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.Eltwise(m(input1), m(input2), output);
const auto input_param = DNN::CreateMUL_InputDirect(
builder_, m(input1).c_str(), m(input2).c_str(),
ConvertFuseCodeType(fuse_code));
const auto output_param =
DNN::CreateMUL_OutputDirect(builder_, output.c_str());
const auto param = DNN::CreateMUL(builder_, input_param, output_param);
const auto layer = DNN::CreateLayer(builder_, DNN::LayerType::MUL, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_DEQUANTIZE(const std::string &input,
const std::string &output) {
{
const auto name = input;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.Identity(m(input), output);
const auto input_param =
DNN::CreateDEQUANTIZE_InputDirect(builder_, m(input).c_str());
const auto output_param =
DNN::CreateDEQUANTIZE_OutputDirect(builder_, output.c_str());
const auto param =
DNN::CreateDEQUANTIZE(builder_, input_param, output_param);
const auto layer =
DNN::CreateLayer(builder_, DNN::LayerType::DEQUANTIZE, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_LOCAL_RESPONSE_NORMALIZATION(
const std::string &input, int32_t radius, float bias, float alpha,
float beta, const std::string &output) {
{
const auto name = input;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.Identity(m(input), output);
const auto input_param =
DNN::CreateLOCAL_RESPONSE_NORMALIZATION_InputDirect(
builder_, m(input).c_str(), radius, bias, alpha, beta);
const auto output_param =
DNN::CreateLOCAL_RESPONSE_NORMALIZATION_OutputDirect(builder_,
output.c_str());
const auto param = DNN::CreateLOCAL_RESPONSE_NORMALIZATION(
builder_, input_param, output_param);
const auto layer =
DNN::CreateLayer(builder_, DNN::LayerType::LOCAL_RESPONSE_NORMALIZATION,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_TANH(const std::string &input,
const std::string &output) {
{
const auto name = input;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.Identity(m(input), output);
const auto input_param =
DNN::CreateTANH_InputDirect(builder_, m(input).c_str());
const auto output_param =
DNN::CreateTANH_OutputDirect(builder_, output.c_str());
const auto param = DNN::CreateTANH(builder_, input_param, output_param);
const auto layer =
DNN::CreateLayer(builder_, DNN::LayerType::TANH, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_FLOOR(const std::string &input,
const std::string &output) {
{
const auto name = input;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.Identity(m(input), output);
const auto input_param =
DNN::CreateFLOOR_InputDirect(builder_, m(input).c_str());
const auto output_param =
DNN::CreateFLOOR_OutputDirect(builder_, output.c_str());
const auto param = DNN::CreateFLOOR(builder_, input_param, output_param);
const auto layer =
DNN::CreateLayer(builder_, DNN::LayerType::FLOOR, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_LOGISTIC(const std::string &input,
const std::string &output) {
{
const auto name = input;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.Identity(m(input), output);
const auto input_param =
DNN::CreateLOGISTIC_InputDirect(builder_, m(input).c_str());
const auto output_param =
DNN::CreateLOGISTIC_OutputDirect(builder_, output.c_str());
const auto param = DNN::CreateLOGISTIC(builder_, input_param, output_param);
const auto layer =
DNN::CreateLayer(builder_, DNN::LayerType::LOGISTIC, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_PRELU(const std::string &input,
const std::string &alpha,
const std::string &output) {
{
const auto name = input;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.Identity(m(input), output);
const auto input_param = DNN::CreatePRELU_InputDirect(
builder_, m(input).c_str(), m(alpha).c_str());
const auto output_param =
DNN::CreatePRELU_OutputDirect(builder_, output.c_str());
const auto param = DNN::CreatePRELU(builder_, input_param, output_param);
const auto layer =
DNN::CreateLayer(builder_, DNN::LayerType::PRELU, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_POW(const std::string &input,
const std::string &exp,
const std::string &output) {
{
const auto name = input;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.Identity(m(input), output);
const auto input_param =
DNN::CreatePOW_InputDirect(builder_, m(input).c_str(), m(exp).c_str());
const auto output_param =
DNN::CreatePOW_OutputDirect(builder_, output.c_str());
const auto param = DNN::CreatePOW(builder_, input_param, output_param);
const auto layer =
DNN::CreateLayer(builder_, DNN::LayerType::POW, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_NEG(const std::string &input,
const std::string &output) {
{
const auto name = input;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.Identity(m(input), output);
const auto input_param =
DNN::CreateNEG_InputDirect(builder_, m(input).c_str());
const auto output_param =
DNN::CreateNEG_OutputDirect(builder_, output.c_str());
const auto param = DNN::CreateNEG(builder_, input_param, output_param);
const auto layer =
DNN::CreateLayer(builder_, DNN::LayerType::NEG, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_MINIMUM(const std::string &input1,
const std::string &input2,
const std::string &output) {
{
const auto name = input1;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
{
const auto name = input2;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.Eltwise(m(input1), m(input2), output);
const auto input_param = DNN::CreateMINIMUM_InputDirect(
builder_, m(input1).c_str(), m(input2).c_str());
const auto output_param =
DNN::CreateMINIMUM_OutputDirect(builder_, output.c_str());
const auto param = DNN::CreateMINIMUM(builder_, input_param, output_param);
const auto layer =
DNN::CreateLayer(builder_, DNN::LayerType::MINIMUM, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_MAXIMUM(const std::string &input1,
const std::string &input2,
const std::string &output) {
{
const auto name = input1;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
{
const auto name = input2;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.Eltwise(m(input1), m(input2), output);
const auto input_param = DNN::CreateMAXIMUM_InputDirect(
builder_, m(input1).c_str(), m(input2).c_str());
const auto output_param =
DNN::CreateMAXIMUM_OutputDirect(builder_, output.c_str());
const auto param = DNN::CreateMAXIMUM(builder_, input_param, output_param);
const auto layer =
DNN::CreateLayer(builder_, DNN::LayerType::MAXIMUM, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_LOG(const std::string &input,
const std::string &output) {
{
const auto name = input;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.Identity(m(input), output);
const auto input_param =
DNN::CreateLOG_InputDirect(builder_, m(input).c_str());
const auto output_param =
DNN::CreateLOG_OutputDirect(builder_, output.c_str());
const auto param = DNN::CreateLOG(builder_, input_param, output_param);
const auto layer =
DNN::CreateLayer(builder_, DNN::LayerType::LOG, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_ABS(const std::string &input,
const std::string &output) {
{
const auto name = input;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.Identity(m(input), output);
const auto input_param =
DNN::CreateABS_InputDirect(builder_, m(input).c_str());
const auto output_param =
DNN::CreateABS_OutputDirect(builder_, output.c_str());
const auto param = DNN::CreateABS(builder_, input_param, output_param);
const auto layer =
DNN::CreateLayer(builder_, DNN::LayerType::ABS, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_EXP(const std::string &input,
const std::string &output) {
{
const auto name = input;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.Identity(m(input), output);
const auto input_param =
DNN::CreateEXP_InputDirect(builder_, m(input).c_str());
const auto output_param =
DNN::CreateEXP_OutputDirect(builder_, output.c_str());
const auto param = DNN::CreateEXP(builder_, input_param, output_param);
const auto layer = DNN::CreateLayer(builder_, DNN::LayerType::EXP, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
void OnnxConverter::WriteDaqLayer_SUB(const std::string &input1,
const std::string &input2,
FuseCode fuse_code,
const std::string &output) {
{
const auto name = input1;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
{
const auto name = input2;
if (onnx_tensors_.has(name)) {
const auto &onnx_tensor = onnx_tensors_.at(name);
const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor);
shaper_.AddShape(name, new_tensor.shape);
nnapi_tensors_[name] = new_tensor;
CreateTensorFb(name, new_tensor);
}
}
shaper_.Eltwise(m(input1), m(input2), output);
const auto input_param = DNN::CreateSUB_InputDirect(
builder_, m(input1).c_str(), m(input2).c_str(),
ConvertFuseCodeType(fuse_code));
const auto output_param =
DNN::CreateSUB_OutputDirect(builder_, output.c_str());
const auto param = DNN::CreateSUB(builder_, input_param, output_param);
const auto layer = DNN::CreateLayer(builder_, DNN::LayerType::SUB, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, param);
layers_.push_back(layer);
}
// OnnxConverter auto generated methods end
} // namespace dnn
|
<filename>src-util/brad/util/data/BeanManager.java
package brad.util.data;
import brad.util.sys.BRADException;
import java.util.List;
public interface BeanManager<T extends Bean> {
long create(T t) throws BRADException;
T fetch(long id) throws BRADException;
List<T> fetchAll(boolean condition) throws BRADException;
void update(T t) throws BRADException;
void delete(T t) throws BRADException;
}
|
# -*- coding: utf-8 -*-
import numpy as np
import types
import copy
import math
# Input
l = list(map(int, input().split()))
N = l[0]
K = l[1]
win_rate = 0
def calcNeedCount(d, k):
point = d
count = 0
while point < k:
point = point * 2
count += 1
return count
n = copy.deepcopy(N)
while n > 0:
dice = n
# Calc need count
need = calcNeedCount(dice, K)
#print("need", need)
win_rate += (1/N) * math.pow(1/2, need)
n += -1
# Print
print(win_rate)
|
/**
* Created by Christophe on 20/06/2014.
*/
public class TypeFaceUtils {
private static TypeFaceUtils mInstance;
private Context mAppContext;
private HashMap<String, Typeface> mTypefaceCache;
public static TypeFaceUtils getInstance(@NotNull Context context) {
if (mInstance == null) {
mInstance = new TypeFaceUtils(context);
}
return mInstance;
}
private TypeFaceUtils(@NotNull Context context) {
this.mAppContext = context.getApplicationContext();
mTypefaceCache = new HashMap<String, Typeface>();
}
@Nullable
public Typeface getTypeFaceByNameFromAssets(@NotNull String typeFaceFileName) {
if (mTypefaceCache.containsKey(typeFaceFileName)) {
return mTypefaceCache.get(typeFaceFileName);
}
else
{
Typeface output = Typeface.createFromAsset(mAppContext.getAssets(),typeFaceFileName);
mTypefaceCache.put(typeFaceFileName,output);
return output;
}
}
} |
<filename>src/decorators/models/provider.interface.ts<gh_stars>1-10
import { QRequest } from './qrequest.model';
import { QResponse } from './qresponse.model';
export interface Provider {
Request: QRequest;
Response: QResponse;
}
|
<filename>cmd/sdrain/sdrain.go
package sdrain
import (
"fmt"
"kubectl-sdrain/pkg/sdrain"
"k8s.io/kubectl/pkg/cmd/drain"
"k8s.io/apimachinery/pkg/runtime/schema"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/kubectl/pkg/util/templates"
"k8s.io/kubectl/pkg/util/i18n"
"github.com/spf13/cobra"
"k8s.io/kubectl/pkg/scheme"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/printers"
)
type SafeDrainCmdOptions struct {
PrintFlags *genericclioptions.PrintFlags
ToPrinter func(string) (printers.ResourcePrinterFunc, error)
safeDrainer *sdrain.Helper
nodeInfos []*resource.Info
genericclioptions.IOStreams
}
var (
safeDrainLong = templates.LongDesc(i18n.T(`
Safe drain node in preparation for maintenance.
The given node will be marked unschedulable to prevent new pods from arriving.`))
safeDrainExample = templates.Examples(i18n.T(`
# Safe drain node "foo", even if there are pods not managed by a ReplicationController, ReplicaSet, Job or StatefulSet on it.
$ kubectl safe-drain foo`))
)
func NewDrainCmdOptions(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *SafeDrainCmdOptions {
o := &SafeDrainCmdOptions{
PrintFlags: genericclioptions.NewPrintFlags("safe drained").WithTypeSetter(scheme.Scheme),
IOStreams: ioStreams,
safeDrainer: &sdrain.Helper{
Out: ioStreams.Out,
ErrOut: ioStreams.ErrOut,
},
}
return o
}
func NewCmdSafeDrain(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command {
o := NewDrainCmdOptions(f, ioStreams)
cmd := &cobra.Command{
Use: "safe-drain NODE",
DisableFlagsInUseLine: true,
Short: i18n.T("Safe drain node in preparation for maintenance"),
Long: safeDrainLong,
Example: safeDrainExample,
Run: func(cmd *cobra.Command, args []string) {
cmdutil.CheckErr(o.Complete(f, cmd, args))
cmdutil.CheckErr(o.RunCordon(f))
cmdutil.CheckErr(o.RunSafeDrain())
},
}
cmd.Flags().BoolVar(&o.safeDrainer.Force, "force", o.safeDrainer.Force, "Continue even if there are pods not managed by a ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet.")
cmd.Flags().BoolVar(&o.safeDrainer.DeleteLocalData, "delete-local-data", o.safeDrainer.DeleteLocalData, "Continue even if there are pods using emptyDir (local data that will be deleted when the node is drained).")
cmd.Flags().DurationVar(&o.safeDrainer.Timeout, "timeout", o.safeDrainer.Timeout, "The length of time to wait before giving up, zero means infinite")
cmd.Flags().StringVarP(&o.safeDrainer.Selector, "selector", "l", o.safeDrainer.Selector, "Selector (label query) to filter on")
cmdutil.AddDryRunFlag(cmd)
return cmd
}
func (o *SafeDrainCmdOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error {
var err error
if len(args) == 0 && !cmd.Flags().Changed("selector") {
return cmdutil.UsageErrorf(cmd, fmt.Sprintf("USAGE: %s [flags]", cmd.Use))
}
if len(args) > 0 && len(o.safeDrainer.Selector) > 0 {
return cmdutil.UsageErrorf(cmd, "error: cannot specify both a node name and a --selector option")
}
o.safeDrainer.DryRun = cmdutil.GetDryRunFlag(cmd)
if o.safeDrainer.Client, err = f.KubernetesClientSet(); err != nil {
return err
}
o.ToPrinter = func(operation string) (printers.ResourcePrinterFunc, error) {
o.PrintFlags.NamePrintFlags.Operation = operation
if o.safeDrainer.DryRun {
err := o.PrintFlags.Complete("%s (dry run)")
if err != nil {
return nil, err
}
}
printer, err := o.PrintFlags.ToPrinter()
if err != nil {
return nil, err
}
return printer.PrintObj, nil
}
builder := f.NewBuilder().
WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).
ResourceNames("nodes", args...).
SingleResourceType().
Flatten()
if len(o.safeDrainer.Selector) > 0 {
builder = builder.LabelSelectorParam(o.safeDrainer.Selector).
ResourceTypes("nodes")
}
r := builder.Do()
if err = r.Err(); err != nil {
return err
}
return r.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
}
if info.Mapping.Resource.GroupResource() != (schema.GroupResource{Group: "", Resource: "nodes"}) {
return fmt.Errorf("error: expected resource of type node, got %q", info.Mapping.Resource)
}
o.nodeInfos = append(o.nodeInfos, info)
return nil
})
}
func (o *SafeDrainCmdOptions) RunCordon(f cmdutil.Factory) error {
var args []string
for _, info := range o.nodeInfos {
args = append(args, info.Name)
}
if o.safeDrainer.DryRun {
args = append(args, "--dry-run")
}
cordonCmd := drain.NewCmdCordon(f, o.IOStreams)
cordonCmd.SetArgs(args)
return cordonCmd.Execute()
}
func (o *SafeDrainCmdOptions) RunSafeDrain() error {
printObj, err := o.ToPrinter("safe drained")
if err != nil {
return err
}
drainedNodes := sets.NewString()
var fatal error
for _, info := range o.nodeInfos {
var err error
if !o.safeDrainer.DryRun {
err = o.safeDeleteOrEvictPodsSimple(info)
}
if err == nil || o.safeDrainer.DryRun {
drainedNodes.Insert(info.Name)
printObj(info.Object, o.Out)
continue
} else {
fmt.Fprintf(o.ErrOut, "error: unable to safe-drain node %q, aborting command...\n\n", info.Name)
var remainingNodes []string
fatal = err
for _, remainingInfo := range o.nodeInfos {
if drainedNodes.Has(remainingInfo.Name) {
continue
}
remainingNodes = append(remainingNodes, remainingInfo.Name)
}
if len(remainingNodes) > 0 {
fmt.Fprintf(o.ErrOut, "There are pending nodes to be drained:\n")
for _, nodeName := range remainingNodes {
fmt.Fprintf(o.ErrOut, " %s\n", nodeName)
}
}
break
}
}
return fatal
}
func (o *SafeDrainCmdOptions) safeDeleteOrEvictPodsSimple(nodeInfo *resource.Info) error {
list, errs := o.safeDrainer.GetPodsForDeletion(nodeInfo.Name)
if errs != nil {
return utilerrors.NewAggregate(errs)
}
if err := o.safeDrainer.MigratePods(list); err != nil {
pendingList, newErrs := o.safeDrainer.GetPodsForDeletion(nodeInfo.Name)
if pendingList != nil {
pods := pendingList.Pods()
if len(pods) != 0 {
fmt.Fprintf(o.ErrOut, "There are pending pods in node %q when an error occurred: %v\n", nodeInfo.Name, err)
for _, pendingPod := range pods {
fmt.Fprintf(o.ErrOut, "%s/%s\n", "pod", pendingPod.Name)
}
}
}
if newErrs != nil {
fmt.Fprintf(o.ErrOut, "Following errors occurred while getting the list of pods to delete:\n%s", utilerrors.NewAggregate(newErrs))
}
return err
}
return nil
}
|
/**
* Return true if ice is under player
*
* @return
*/
private boolean isIceUnderPlayer() {
Entity curEnt = Globals.obStore.
getEntityUnder(Globals.player, false);
if (curEnt == null) {
return false;
}
boolean corrType = curEnt instanceof Obstacle;
boolean corrName = curEnt.getName().equalsIgnoreCase("Trees");
return corrType & corrName;
} |
/**
* Writes a new, closed, XML element to PrintWriter. If another XML element has been written and not closed, writes
* this element as a child. Adds passed attributes and character data.
*/
public void addClosedTextElement(String elementName, String text, String... attributes) {
addElement(elementName);
addAttribute(attributes);
addText(text, false);
closeElement(false);
} |
It turns out the oft-quoted $200 million taxpayer-backed bond that brought the Raiders back to the East Bay in 1995 is going to end up costing $350 million.
It’s a debt that Oakland and Alameda County taxpayers will be paying off until 2025 — well after Raiders owner Mark Davis is enjoying his new digs in Las Vegas.
“It was a bad deal,” said Oakland City Council President Larry Reid, who was chief of staff for then-Mayor Elihu Harris in the 1990s when the city and county agreed to an expansion of the Coliseum to bring the Raiders back from Los Angeles. “The projections were off, but everyone was just caught up in the emotions of having the Raiders return.”
The original idea was to pay off the bond through the sale of personal seat licenses. However, the seat license sales never came close to hitting the mark, leaving taxpayers liable for the difference.
And although most accounts set the debt at $200 million, interest brought the bill to $350.4 million, said Scott McKibben, executive director of the Oakland Alameda County Coliseum Authority.
Even with repeated refinancings and the paying off of interest, Oakland and Alameda County taxpayers are still splitting a $13 million tab every year through 2025.
The high annual payout is one of the key reasons that Oakland Mayor Libby Schaaf and the county Board of Supervisors balked at using public money to finance a new Raiders stadium.
And it’s not just the stadium redo that isn’t completely paid for. The Coliseum authority still owes more than $68 million on 1990s renovations for Oracle Arena, about $7 million a year, which the Warriors now pay. But who pays off that debt when the Warriors leave for San Francisco hasn’t been decided. If it’s the city and county, their total tab on the Coliseum complex will be about $20 million annually.
“We’ll cross that bridge when we come to it,” Reid said.
There’s one last insult. As The Chronicle’s Kimberly Veklerov reported, the Coliseum authority actually loses money on game days when the Raiders are in town. If the team sticks around until its Las Vegas palace is ready in 2020, attendance in Oakland could well plunge — right along with the authority’s take.
McKibben figures total game-day losses could exceed $2 million a year if attendance drops even 10 to 15 percent.
“We are better off if they don’t play here, and that’s the bottom line,” McKibben said.
Strikeout: The head baseball coach at San Francisco’s Galileo high school has been put on administrative leave after he allegedly shouted out Chinese-sounding gibberish to mock an Asian American player.
“Mr. (Don) Papa is not coaching the Galileo baseball team,” Gentle Blythe, spokeswoman for the San Francisco Unified School District, said in an email.
Blythe said the district placed Papa on paid administrative leave March 15 “as a result of information that emerged in the course of the investigation” into the racially charged incident.
The alleged incident happened during a varsity game between Galileo and Lick-Wilmerding High School at Skyline College on Feb. 24.
Papa yelled out his remarks at a player who was standing on first base and appeared to be puzzled by the coach’s instructions. Nakia Kashima, father of another Galileo player, described it as “Ching, chong, something, something.” He reported it to school officials.
Papa, 66, has coached sports for 25 years at Galileo, where he is also a social studies teacher.
He has not responded to requests for comment. On his Facebook page, where he goes by the name Don Steven, Papa told a supporter on Friday, “Can’t comment now — but I have some fight.”
Albany woes: The Albany Unified School District has just notified parents that a second incident with hate-speech overtones has been uncovered this month at Albany High.
According to district Superintendent Val Williams, “administrators were notified that a group of seven ninth-graders had been engaging in Nazi salutes to each other when passing in the halls.”
Williams said the saluting had been going on for “several months.”
She said the vice principal immediately “brought these students into the office, determined what occurred, contacted their parents and took appropriate action.”
The revelation comes just a week after officials disclosed that police were investigating a group of students who had allegedly created an Instagram account targeting African American students and an African American staff member. One photo reportedly showed a black doll alongside images of a Ku Klux Klan member, a torch and a noose.
Over the weekend, some 300 parents, students and others rallied outside Albany High in a public show of resisting bullying and racism.
Emergency call: As if addicts shooting up in the stacks hasn’t been enough of a problem, the San Francisco Main Library has been dealing with a plumbing crisis since Friday that knocked out most of its public bathrooms.
Michelle Jeffers, a spokeswoman for the library, says crews working through the weekend pinpointed the trouble to a couple of faulty pumps connected to the sewage system that were replaced just two years ago. A pair of emergency replacements, costing $20,000, weren’t due to arrive until Wednesday.
In the meantime, the library has brought in portable toilets for the public on Fulton Street, to go along with a few toilets in the building that have been deemed safe.
It was a bit of an inconvenience when the library staff hosted 60 members of the California Library Association at a meeting Friday morning on the Main’s lower level. What happened when they got the call?
“We escorted them to the the staff restrooms on the sixth floor,” Jeffers said.
Incidentally, Jeffers says senior staffers dealing with the mess have dubbed themselves the “Stuff Happens Incident Team,” also known as ... well, you know.
San Francisco Chronicle columnists Phillip Matier and Andrew Ross appear Sundays, Mondays and Wednesdays. Matier can be seen on the KPIX TV morning and evening news. He can also be heard on KCBS radio Monday through Friday at 7:50 a.m. and 5:50 p.m. Got a tip? Call (415) 777-8815, or email [email protected]. Twitter: @matierandross |
/**
* Information about snapshot commands.
* <p/>
* This object holds all the information a regular ApiCommand object provides,
* and adds specific information about the results of a snapshot command.
* <p/>
* Depending on the type of the service where the snapshot command was run, a
* different result property will be populated.
*/
@XmlRootElement(name = "snapshotCommand")
public class ApiSnapshotCommand extends ApiCommand {
private ApiHBaseSnapshotResult hbaseResult;
private ApiHdfsSnapshotResult hdfsResult;
/** Results for snapshot commands on HBase services. */
@XmlElement
public ApiHBaseSnapshotResult getHBaseResult() {
return hbaseResult;
}
public void setHBaseResult(ApiHBaseSnapshotResult hbaseResult) {
this.hbaseResult = hbaseResult;
}
/** Results for snapshot commands on Hdfs services. */
@XmlElement
public ApiHdfsSnapshotResult getHdfsResult() {
return hdfsResult;
}
public void setHdfsResult(ApiHdfsSnapshotResult hdfsResult) {
this.hdfsResult = hdfsResult;
}
@Override
public boolean equals(Object o) {
if (!super.equals(o)) {
return false;
}
ApiSnapshotCommand that = (ApiSnapshotCommand) o;
return Objects.equal(hbaseResult, that.getHBaseResult()) &&
Objects.equal(hdfsResult, that.getHdfsResult());
}
@Override
public int hashCode() {
return Objects.hashCode(super.hashCode(), hbaseResult, hdfsResult);
}
@Override
public String toString() {
return super.toStringHelper()
.add("hbaseResult", hbaseResult)
.add("hdfsResult", hdfsResult)
.toString();
}
} |
/**
* Consumes the data in the internal string buffer. Whitespace is trimmed and the
* buffer is emptied.
*
* @return The string representing the trimmed string value of the buffer.
*/
protected String consumeCharData()
{
String data = charData.toString().trim();
charData.setLength( 0 );
shallProcessCharData = false;
return data;
} |
<gh_stars>0
def custom_upload_to(instance, filename):
"""Custom upload image.
Delete image for user.
"""
old_instance = Profile.objects.get(pk=instance.pk)
old_instance.avatar.delete()
return 'profiles_images/' + filename |
/**
* Example test class
*/
public class GoGuiIntegratorTest {
@Test
public void boardSizeTest() {
GoGuiIntegrator goGuiIntegrator = new GoGuiIntegrator(10, (x, y) -> {}, () -> {}, TileColour.BLACK, "Fredje");
assertEquals(10, goGuiIntegrator.getBoardSize());
}
} |
<gh_stars>1-10
package main
import (
"reflect"
"testing"
)
// 声明一个结构体,拥有1个字段
type data struct {
Hp int
}
func BenchmarkNativeAssign(b *testing.B) {
// 实例化结构体
v := data{Hp: 2}
// 停止基准测试的计时器
b.StopTimer()
// 重置基准测试计时器数据
b.ResetTimer()
// 重新启动基准测试计时器
b.StartTimer()
// 根据基准测试数据进行循环测试
for i := 0; i < b.N; i++ {
// 结构体成员赋值测试
v.Hp = 3
}
}
func BenchmarkReflectAssign(b *testing.B) {
v := data{Hp: 2}
// 取出结构体指针的反射值对象,并取其元素
vv := reflect.ValueOf(&v).Elem()
// 根据名字取结构体成员
f := vv.FieldByName("Hp")
b.StopTimer()
b.ResetTimer()
b.StartTimer()
for i := 0; i < b.N; i++ {
// 反射测试设置成员值性能
f.SetInt(3)
}
}
func BenchmarkReflectFindFieldAndAssign(b *testing.B) {
v := data{Hp: 2}
vv := reflect.ValueOf(&v).Elem()
b.StopTimer()
b.ResetTimer()
b.StartTimer()
for i := 0; i < b.N; i++ {
// 测试结构体成员的查找和设置成员的性能
vv.FieldByName("Hp").SetInt(3)
}
}
func foo(v int) {
}
func BenchmarkNativeCall(b *testing.B) {
for i := 0; i < b.N; i++ {
// 原生函数调用
foo(0)
}
}
func BenchmarkReflectCall(b *testing.B) {
// 取函数的反射值对象
v := reflect.ValueOf(foo)
b.StopTimer()
b.ResetTimer()
b.StartTimer()
for i := 0; i < b.N; i++ {
// 反射调用函数
v.Call([]reflect.Value{reflect.ValueOf(2)})
}
}
|
/*
* Used to register extra ultimately trusted keys - this has to be done
* before initializing the validation module.
* FIXME: Should be replaced by a function to add those keys to the trustdb.
*/
void
register_trusted_keyid(u32 *keyid)
{
struct key_item *k;
k = new_key_item ();
k->kid[0] = keyid[0];
k->kid[1] = keyid[1];
k->next = user_utk_list;
user_utk_list = k;
} |
// GetJSON returns the raw JSON from a path. This is useful for debugging.
func (u *Unifi) GetJSON(apiPath string, params ...string) ([]byte, error) {
req, err := u.UniReq(apiPath, strings.Join(params, " "))
if err != nil {
return []byte{}, err
}
return u.do(req)
} |
/// Build the test client with the given native executor.
pub fn build_with_executor<RuntimeApi>(
self,
executor: Executor,
) -> (
client::Client<Backend, Executor, Block, RuntimeApi>,
sc_consensus::LongestChain<Backend, Block>,
)
where
Executor: sc_client_api::CallExecutor<Block> + 'static,
Backend: sc_client_api::backend::Backend<Block>,
{
let storage = {
let mut storage = self.genesis_init.genesis_storage();
// Add some child storage keys.
for (key, child_content) in self.child_storage_extension {
storage.children_default.insert(
key,
StorageChild {
data: child_content.data.into_iter().collect(),
child_info: child_content.child_info,
},
);
}
storage
};
let client = client::Client::new(
self.backend.clone(),
executor,
&storage,
self.fork_blocks,
self.bad_blocks,
ExecutionExtensions::new(self.execution_strategies, self.keystore),
None,
ClientConfig {
offchain_indexing_api: self.enable_offchain_indexing_api,
..Default::default()
},
)
.expect("Creates new client");
let longest_chain = sc_consensus::LongestChain::new(self.backend);
(client, longest_chain)
} |
// TODO: Include Angular INFOR.
// Look into Frechet distance later <-- Not convinced this would be better.
// For now, simple walk along both trajectories. (ie, time dependent) <-- imo this is better
float Trajectory::distance(Trajectory& other)
{
dReal x_adjust = trajectory.front().com.x - other.trajectory.front().com.x;
auto it = trajectory.begin();
auto it2 = other.trajectory.begin();
float retcom = 0, retuTor = 0, retangs = 0;
int this_revs = 0;
float this_ang_prev = 0;
int other_revs = 0;
float other_ang_prev = 0;
while (true)
{
VECTOR adjusted = VECTOR(((it2->com.x - other.trajectory.front().com.x)*1./2. + other.trajectory.front().com.x + x_adjust), it2->com.y, it2->com.z);
retcom += 100*it->com.distSq(adjusted);
float this_ang = it->uTorsoGlobAng;
float other_ang = it2->uTorsoGlobAng;
if (this_ang_prev < PI && this_ang_prev > 2.57 && this_ang > -PI && this_ang < -2.57)
{
this_revs++;
}
else if (this_ang < PI && this_ang > 2.57 && this_ang_prev > -PI && this_ang_prev < -2.57)
{
this_revs--;
}
if (other_ang_prev < PI && other_ang_prev > 2.57 && other_ang > -PI && other_ang < -2.57)
{
std::cout << "up" << std::endl;
other_revs++;
}
else if (other_ang < PI && other_ang > 2.57 && other_ang_prev > -PI && other_ang_prev < -2.57)
{
std::cout << "down" << std::endl;
other_revs--;
}
this_ang_prev = this_ang;
other_ang_prev = other_ang;
float this_ang_with_revs = it->uTorsoGlobAng + 2*PI*this_revs;
float other_ang_with_revs = it2->uTorsoGlobAng + 2*PI*other_revs;
float ang = (this_ang_with_revs) - (other_ang_with_revs);
retuTor += 1*pow((ang),2);
for (int i = 0; i < JOINT_COUNT; i++)
{
retangs += 0*pow((it->jointAngles[i] - it2->jointAngles[i]), 2);
}
it++;
it2++;
if(it == trajectory.end() || it2 == other.trajectory.end())
break;
}
float ret = retcom + retuTor + retangs;
std::cout << "1: " << retcom << "\n2: " << retuTor << "\n3: " << retangs << std::endl;
return ret;
} |
from PyMajsoul import msjrpc
import asyncio
import PyMajsoul.majsoul_pb2 as pb
import hmac
import hashlib
import getpass
import uuid
class testService(msjrpc.MSJRpcService):
def get_package_name(self):
return "lq"
def get_service_name(self):
return "Lobby"
def get_req_class(self, method):
if method == 'login':
return pb.ReqLogin
return None
def get_res_class(self, method):
if method == 'login':
return pb.ResLogin
return None
async def main():
channel = msjrpc.MSJRpcChannel("wss://gateway-v2.maj-soul.com:6443/gateway")
lobby = testService(channel)
await channel.connect()
req = pb.ReqLogin()
req.account = input("Username:").encode()
pwd = <PASSWORD>()
req.password = <PASSWORD>(b'<PASSWORD>', pwd.encode(), hashlib.sha256).hexdigest()
req.device.device_type = 'pc'
req.device.browser = 'safari'
req.random_key = str(uuid.uuid1())
req.client_version = 'v0.8.64.w'
req.gen_access_token = True
req.currency_platforms.append(2)
res = await lobby.call_method('login', req)
print(res)
await channel.close()
asyncio.run(main())
|
Sparrow for iPhone debuted earlier this year as one of the first third-party email clients to ever land in the App Store. While it’s received a lot of praise for its UI, it continues to be criticized for its lack of features.
The jailbreak community has managed to help out with that a bit. But thanks to today’s update, stock iPhone users can too cross a major feature off their wish lists. Sparrow 1.3 now supports POP email accounts…
This means that users can now set up Hotmail, Yahoo and other popular POP accounts (for instance, Comcast Cable accounts are POP) within the app, bringing the client up to snuff with its desktop counterpart.
Still noticeably missing from Sparrow’s list of features is support for push notifications, but the developers have already said it’s coming in a future update. The team is also working on an iPad version of the app.
But perhaps the addition of POP support this time around is enough to convert you to a believer. And if that’s the case, you can find Sparrow in the App Store for $2.99.
What do you think of Sparrow so far? |
package com.rescam.xhb.framework.pojo;
import com.rescam.xhb.framework.entity.ManageUser;
public class Session {
private ManageUser manageUser;
private Integer customerId;
private String weixinOpenId;
public ManageUser getManageUser() {
return manageUser;
}
public void setManageUser(ManageUser manageUser) {
this.manageUser = manageUser;
}
public Integer getCustomerId() {
return customerId;
}
public void setCustomerId(Integer customerId) {
this.customerId = customerId;
}
public String getWeixinOpenId() {
return weixinOpenId;
}
public void setWeixinOpenId(String weixinOpenId) {
this.weixinOpenId = weixinOpenId;
}
}
|
<gh_stars>1-10
package dsf.checkWord.xml;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import javax.xml.transform.Result;
import javax.xml.transform.Transformer;
import com.sun.org.apache.xerces.internal.dom.DeferredElementImpl;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.xml.sax.SAXException;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
public class ModifyXML {
private String file;
public ModifyXML(String file) {
if(!new File(file).exists()) {
CreateXML.createXML(file);
}
this.file = file;
}
public void addProperty(Map<String, String> map) throws ParserConfigurationException, SAXException, IOException {
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
DocumentBuilder builder = factory.newDocumentBuilder();
Document document = builder.parse(new File(file));
try {
//把元素节点名字叫"text"的所有元素全都保存在叫text的节点列表中
NodeList text = document.getElementsByTagName("text");
for (int i = 0; i < text.getLength(); i++) {
//每循环一次就把把名字叫"text"的节点元素的所有子节点都更新保存在叫properties的节点列表中
NodeList properties = text.item(i).getChildNodes();
// System.out.println(properties.item(0).getTextContent());
if(properties.item(0).getTextContent().equals(map.get("标题级数"))) {
properties.item(3).setTextContent(map.get("字号"));
properties.item(4).setTextContent(map.get("字体"));
properties.item(5).setTextContent(map.get("颜色"));
TransformerFactory tFactory = TransformerFactory.newInstance();
Transformer t = tFactory.newTransformer();
DOMSource source = new DOMSource(document);
File file = new File(this.file);
StreamResult result = new StreamResult(file);
t.transform(source, result);
break;
}
}
}catch (Exception e) {
//System.out.println("1");
e.printStackTrace();
}
}
public void deleteProperty(Map<String, String> map) throws ParserConfigurationException, SAXException, IOException {
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
DocumentBuilder builder = factory.newDocumentBuilder();
Document document = builder.parse(new File(this.file));
try {
NodeList text = document.getElementsByTagName("text");
for (int i = 0; i < text.getLength(); i++) {
NodeList properties = text.item(i).getChildNodes();
System.out.println(properties.item(0).getTextContent());
if(properties.item(0).getTextContent().equals(map.get("标题级数"))) {
properties.item(3).setTextContent("");
properties.item(4).setTextContent(map.get(""));
properties.item(5).setTextContent(map.get(""));
TransformerFactory tFactory = TransformerFactory.newInstance();
Transformer t = tFactory.newTransformer();
DOMSource source = new DOMSource(document);
File file = new File(this.file);
StreamResult result = new StreamResult(file);
t.transform(source, result);
break;
}
}
}catch (Exception e) {
System.out.println("1");
e.printStackTrace();
}
}
public static void main(String[] args) throws ParserConfigurationException, SAXException, IOException {
ModifyXML modify = new ModifyXML("F:\\idea\\checkWord\\测试文件\\xml\\createXML.xml");
Map<String, String> map = new HashMap<String, String>();
map.put("标题级数", "0级");
map.put("字号", "四号");
map.put("字体", "微软雅黑");
map.put("颜色", "黑色");
modify.addProperty(map);
//modify.deleteProperty(map);
}
}
|
// CompensatedBalance returns balance decreased by surplus balance
func (a *Accounting) CompensatedBalance(peer swarm.Address) (compensated int64, err error) {
surplus, err := a.SurplusBalance(peer)
if err != nil {
return 0, err
}
if surplus < 0 {
return 0, ErrInvalidValue
}
balance, err := a.Balance(peer)
if err != nil {
if !errors.Is(err, ErrPeerNoBalance) {
return 0, err
}
}
if surplus == 0 && errors.Is(err, ErrPeerNoBalance) {
return 0, err
}
compensated, err = subtractI64mU64(balance, uint64(surplus))
if err != nil {
return 0, err
}
return compensated, nil
} |
from collections import defaultdict
class Line(object):
"""
Represent a line in the color grid.
"""
def __init__(self, line_sequence=(), length=0):
"""
:param line_sequence: number's sequence representing the black places
"""
self.sequence = line_sequence
self.length = length
def set_sequence(self, sequence):
self.sequence = sequence
def check_line(self, sequence):
"""
Return True if the sequence of numbers is not violated
:param sequence: black places' sequence
:return:
"""
ind, n = 0, 0
while ind < len(sequence):
while n < len(self.sequence) and sequence[ind] > self.sequence[n]:
n += 1
if n == len(self.sequence) and ind < len(sequence):
return False
ind += 1
return True
class ColorGrid(object):
"""
Represent the color grid game:
Each column and row has a sequence of numbers which describe the number of black places
"""
WHITE = 0
BLACK = 1
BLOCKED = -1
def __init__(self, length, width):
"""
:param length: length of the grid
:param width: width of the grid
"""
self.grid = defaultdict() # (i,j): boolean; True if (i,j) is black
self.length = length
self.width = width
self.rows = defaultdict(Line) # key= row's number; value= number's sequence
self.columns = defaultdict(Line) # key= column's number; value= number's sequence
def set_row_sequence(self, row, sequence):
self.rows[row].set_sequence(sequence)
def set_column_sequence(self, col, sequence):
self.columns[col].set_sequence(sequence)
def is_white(self, i, j):
return self.grid[i, j] == self.WHITE
def is_black(self, i, j):
return self.grid[i, j] == self.BLACK
def is_blocked(self, i, j):
return self.grid[i, j] == self.BLOCKED
def set_to_white(self, i, j):
if not 0 <= i < self.width or not 0 <= j < self.length:
raise Exception("row %s or column %s not supported" % (i, j))
self.grid[i, j] = self.WHITE
def set_to_black(self, i, j):
if not 0 <= i < self.width or not 0 <= j < self.length:
raise Exception("row %s or column %s not supported" % (i, j))
self.grid[i, j] = self.BLACK
def set_to_blocked(self, i, j):
if not 0 <= i < self.width or not 0 <= j < self.length:
raise Exception("row %s or column %s not supported" % (i, j))
self.grid[i, j] = self.BLOCKED
def get_current_row_sequence(self, row):
"""
Return the sequence of black lines on this row
:param row: row's number
:return:
"""
sequence, subseq = (), 0
for col in range(self.length):
if self.is_black(row, col):
subseq += 1
else:
if subseq > 0:
sequence += (subseq,)
subseq = 0
return sequence
def get_current_column_sequence(self, col):
"""
Return the sequence of black lines on this column
:param col: column's number
:return:
"""
sequence, subseq = (), 0
for row in range(self.width):
if self.is_black(row, col):
subseq += 1
else:
if subseq > 0:
sequence += (subseq,)
subseq = 0
return sequence
def check_row(self, row):
"""
Return True if the sequence of numbers is not violated
:param row: row's number
:return:
"""
return self.rows[row].check_line(self.get_current_row_sequence(row))
def check_column(self, col):
"""
Return True if the sequence of numbers is not violated
:param col: row's number
:return:
"""
self.columns[col].check_line(self.get_current_column_sequence(col))
|
<filename>IDEAS/Resources/src/fluid/heatpumps/calibration/PythonModel/calibrate.py
from __future__ import division, print_function, absolute_import
from builtins import str
import matplotlib.pyplot as plt
import numpy as np
import os
from scipy.optimize import minimize
import time as tm
def calibrate_model(heaPum, calData, data, plot=True):
""" Manages the calibration of heat pump models.
:param heaPum: Heat pump model (object).
:param calData: Subsampled data used for calibration (object).
:param data: Full data used for final comparison (object).
:param plot: Boolean, set to True to draw and save results.
:return: List of calibrated parameters,
Results from the calibrated model,
Results using the initial guess parameters.
"""
# Select appropriate guess values for model parameters and bounds for
# calibration
params, bounds = heaPum.initialGuessParameters(data)
heaPum.reinitializeParameters(params)
# Normalized values of the parameters
scale = params / params
# Normalized bounds for calibration
scale_bounds = [(bounds[i][0]/params[i], bounds[i][1]/params[i])
if bounds[i][1] is not None
else (bounds[i][0]/params[i], None)
for i in range(len(bounds))]
tic = tm.time()
# Compare and plot comparison with manufacturer data for guess parameters
gueRes = simulate(heaPum, data)
if data.CoolingMode:
fname = data.name + '_Cooling'
else:
fname = data.name + '_Heating'
compare_data_sets(gueRes, data, plot, fname=fname + '_opt_start')
# Calibrate the model parameters
opt = minimize(cost_function, scale, args=(params, heaPum, calData),
method='SLSQP', bounds=scale_bounds,
options={'maxiter': 2000, 'ftol': 1e-8, 'eps': 0.00001})
# Compare and plot comparison with manufacturer data for calibrated
# parameters
optPar = opt.x*params
heaPum.reinitializeParameters(optPar)
optRes = simulate(heaPum, data)
compare_data_sets(optRes, data, plot, fname=fname + '_opt_end')
toc = tm.time() - tic
print('Total elapsed time for calibration : %f seconds' % toc)
return optPar, optRes, gueRes
def compare_data_sets(data, refData, plot=False, fname='ComparedDataSets'):
""" Compare two sets of data.
:param data: Performance data (object).
:param refData: Reference performance data (object).
:param plot: Boolean, set to True to draw and save results.
:param fname: Name of the output figure file (no extension).
:return: Sum of normalized square errors between data sets.
"""
fname += '.pdf'
SSE = 0.0
invalidPoints = 0.
totalPoints = float(len(data.EWT_Source))
# Evaluate the power and capacity for each data point in provided
# manufacturer data set
for i in range(len(refData.EWT_Load)):
if not (data.Power[i] > 0. and data.Capacity[i] > 0.):
invalidPoints += 1.
print ('Invalid : EWT_Source =', data.EWT_Source[i],
'EWT_Load = ', data.EWT_Load[i],
'flowSource = ', data.flowSource[i],
'flowLoad = ', data.flowLoad[i])
# Calculate the sum of square errors
SE = ((refData.Power[i]-data.Power[i])/refData.Power[i])**2 \
+ ((refData.Capacity[i]-data.Capacity[i])/refData.Capacity[i])**2
SSE += SE
print('Number of invalid points :', invalidPoints)
# Plot the results (optional)
if plot:
fig = plt.figure()
ax = fig.add_subplot(121)
ax.plot(np.array([0, max(refData.Power)]),
np.array([0, 0.9*max(refData.Power)]), 'b--')
ax.plot(np.array([0, max(refData.Power)]),
np.array([0, 1.1*max(refData.Power)]), 'b--')
ax.plot(np.array([0, max(refData.Power)]),
np.array([0, 1*max(refData.Power)]), 'k-')
ax.text(0.75*max(refData.Power),
0.9*0.75*max(refData.Power),
'-10%',
verticalalignment='top',
horizontalalignment='left',
weight='bold')
ax.text(0.75*max(refData.Power),
1.1*0.75*max(refData.Power),
'+10%',
verticalalignment='bottom',
horizontalalignment='right',
weight='bold')
ax.plot(refData.Power, data.Power, 'k.')
ax.set_xlabel('Power (' + refData.name + ') [kW]')
ax.set_ylabel('Power (' + data.name + ') [kW]')
ax.grid(True)
ax = fig.add_subplot(122)
ax.plot(np.array([0, max(refData.Capacity)]),
np.array([0, 0.9*max(refData.Capacity)]), 'b--')
ax.plot(np.array([0, max(refData.Capacity)]),
np.array([0, 1.1*max(refData.Capacity)]), 'b--')
ax.plot(np.array([0, max(refData.Capacity)]),
np.array([0, 1*max(refData.Capacity)]), 'k-')
ax.text(0.75*max(refData.Capacity),
0.9*0.75*max(refData.Capacity),
'-10%',
verticalalignment='top',
horizontalalignment='left',
weight='bold')
ax.text(0.75*max(refData.Capacity),
1.1*0.75*max(refData.Capacity),
'+10%',
verticalalignment='bottom',
horizontalalignment='right',
weight='bold')
ax.plot(refData.Capacity, data.Capacity, 'k.')
ax.set_xlabel('Capacity (' + refData.name + ') [kW]')
ax.set_ylabel('Capacity (' + data.name + ') [kW]')
ax.grid(True)
plt.savefig(fname)
return SSE
def cost_function(scale, guess, heaPum, data):
""" Evaluate the cost function for optimization.
:param scale: Array of normalized parameters.
:param guess: Array of guess parameters.
:param heaPum: Heat pump model (object).
:param data: Reference performance data (object).
:return: Sum of normalized square errors between model and reference data.
.. note:: Parameters are evaluated by multiplying the corresponding
normalized parameter with its guess value.
"""
# Scale the normalized parameters back to dimensional values
params = guess*scale
print('----------------------------------------------------------------\n')
heaPum.reinitializeParameters(params)
heaPum.printParameters()
print('----------------------------------------------------------------\n')
res = simulate(heaPum, data)
SSE = compare_data_sets(res, data)
print('Sum of square errors : ' + str(SSE) + ' \n')
print('----------------------------------------------------------------\n')
return SSE
def simulate(heaPum, data):
""" Evaluate the heat pump performance from the model.
:param heaPum: Heat pump model (object).
:param data: Reference performance data (object).
:return: Performance data of the modeled heat pump (object).
.. note:: Performance data from the model is evaluated at the same
operating conditions (inlet water temperatures and mass flow
rates at the source and load sides) as in the reference data.
"""
Capacity = np.zeros(len(data.EWT_Load))
HR = np.zeros(len(data.EWT_Load))
P = np.zeros(len(data.EWT_Load))
# Evaluate the power and capacity for each data point in provided
# manufacturer data set
for i in range(len(data.EWT_Load)):
Capacity[i] = heaPum.get_Capacity(data.EWT_Source[i],
data.EWT_Load[i],
data.flowSource[i],
data.flowLoad[i])
HR[i] = heaPum.get_SourceSideTransferRate(data.EWT_Source[i],
data.EWT_Load[i],
data.flowSource[i],
data.flowLoad[i])
P[i] = heaPum.get_Power(data.EWT_Source[i], data.EWT_Load[i],
data.flowSource[i], data.flowLoad[i])
res = SimulationResults(data.EWT_Source, data.EWT_Load, data.flowSource,
data.flowLoad, Capacity, HR, P, 'Python model')
return res
def simulate_in_dymola(heaPum, data, tableName, tableFileName):
""" Evaluate the heat pump performance from the model in Dymola.
:param heaPum: Heat pump model (object).
:param data: Reference performance data (object).
:param tableName: Name of the combiTimeTable.
:param tableFileName: Name of the text file containing the combiTimeTable.
:return: Performance data of the modeled heat pump (object).
.. note:: Performance data from the model is evaluated at the same
operating conditions (inlet water temperatures and mass flow
rates at the source and load sides) as in the reference data.
"""
import buildingspy.simulate.Simulator as si
from buildingspy.io.outputfile import Reader
from scipy.interpolate import interp1d
from builtins import str
import getpass
import os
import tempfile
# Find absolute path to buildings library
packagePath = os.path.normpath(
os.path.join(os.path.normpath(os.path.dirname(__file__)),
'..', '..', '..', '..', '..', '..'))
# Create temporary directory for simulation files
dirPrefix = tempfile.gettempprefix()
tmpDir = tempfile.mkdtemp(prefix=dirPrefix + '-'
+ 'HeatPumpCalibration' + '-'
+ getpass.getuser() + '-')
# Set parameters for simulation in Dymola
calModelPath = heaPum.modelicaCalibrationModelPath()
s = si.Simulator(calModelPath,
'dymola',
outputDirectory=tmpDir,
packagePath=packagePath)
s = heaPum.set_ModelicaParameters(s)
m1_flow_nominal = min(data.flowSource)
m2_flow_nominal = min(data.flowLoad)
tableFilePath = \
str(os.path.join(tmpDir, tableFileName).replace(os.sep, '/'))
s.addParameters({'m1_flow_nominal': m1_flow_nominal,
'm2_flow_nominal': m2_flow_nominal,
'calDat.fileName': tableFilePath})
# Write CombiTimeTable for dymola
data.write_modelica_combiTimeTable(tableName, tmpDir,
tableFileName, heaPum.CoolingMode)
# Simulation parameters
s.setStopTime(len(data.EWT_Source))
s.setSolver('dassl')
# Kill the process if it does not finish in 2 minutes
s.setTimeOut(120)
s.showProgressBar(False)
s.printModelAndTime()
# s.showGUI(show=True)
# s.exitSimulator(exitAfterSimulation=False)
s.simulate()
# Read results
modelName = heaPum.modelicaModelName()
ofr = Reader(os.path.join(tmpDir, modelName), 'dymola')
(time1, QCon) = ofr.values('heaPum.QCon_flow')
(time1, QEva) = ofr.values('heaPum.QEva_flow')
(time1, P) = ofr.values('heaPum.P')
t = [float(i) + 0.5 for i in range(len(data.EWT_Source))]
f_P = interp1d(time1, P)
P = f_P(t)
f_QCon = interp1d(time1, QCon)
QCon = f_QCon(t)
f_QEva = interp1d(time1, QEva)
QEva = f_QEva(t)
# # Clean up
# shutil.rmtree('calibrationModel')
if heaPum.CoolingMode:
Capacity = -QEva
HR = QCon
else:
Capacity = QCon
HR = -QEva
dymRes = SimulationResults(data.EWT_Source,
data.EWT_Load,
data.flowSource,
data.flowLoad,
Capacity,
HR,
P,
'Modelica')
return dymRes
class ManufacturerData(object):
""" Heat pump performance data.
:param manufacturer: Name of the manufacturer.
:param model: Name of the heat pump model.
:param CoolingMode: Boolean, set to True if heat pump is used in cooling
mode.
.. note:: An empty heat pump performance object is created.
"""
def __init__(self, manufacturer, model, CoolingMode=False):
self.EWT_Source = []
self.EWT_Load = []
self.flowSource = []
self.flowLoad = []
self.Capacity = []
self.HR = []
self.Power = []
self.name = manufacturer + '_' + model
self.CoolingMode = CoolingMode
def add_data_point(self, EWT_Source, EWT_Load, flowSource,
flowLoad, Capacity, HR, Power):
""" Add a data point to the heat pump performance data.
:param EWT_Source: Entering water temperature on the source side (K).
:param EWT_Load: Entering water temperature on the load side (K).
:param flowSource: Fluid mass flow rate on the source side (kg/s).
:param flowLoad: Fluid mass flow rate on the load side (kg/s).
:param Capacity: Heat pump capacity (kW).
:param HR: Heat transfer rate on the source side (kW).
:param Power: Power input to the heat pump (kW).
"""
self.EWT_Source.append(EWT_Source)
self.EWT_Load.append(EWT_Load)
self.flowSource.append(flowSource)
self.flowLoad.append(flowLoad)
self.Capacity.append(Capacity)
self.HR.append(HR)
self.Power.append(Power)
return
def calibration_data_16_points(self):
""" Find the 16 min/max data points.
:return: Performance data for calibration (object).
.. note:: This method returns a subsample of the performance data
corresponding to the combination of minimums and maximums of
the inlet water temperatures and mass flow rates on the
source and load sides.
"""
# Only the 16 extreme data points for temperature and flow rates
# are used
indexes = range(len(self.EWT_Source))
variables = [self.flowLoad, self.EWT_Source,
self.EWT_Load, self.flowSource]
li = [indexes]
# Go through the data for each variable and keep indexes corresponding
# to min/max values
for i in range(len(variables)):
jmin = len(li)-(2**(i))
jmax = len(li)
for j in range(jmin, jmax):
minValue = min([variables[i][k] for k in li[j]])
maxValue = max([variables[i][k] for k in li[j]])
li.append([k for k in li[j] if variables[i][k] == minValue])
li.append([k for k in li[j] if variables[i][k] == maxValue])
indexes = []
jmin = len(li)-(2**4)
jmax = len(li)
for j in range(jmin, jmax):
indexes += li[j]
EWT_Source = np.array([self.EWT_Source[i] for i in indexes])
EWT_Load = np.array([self.EWT_Load[i] for i in indexes])
flowSource = np.array([self.flowSource[i] for i in indexes])
flowLoad = np.array([self.flowLoad[i] for i in indexes])
Capacity = np.array([self.Capacity[i] for i in indexes])
HR = np.array([self.HR[i] for i in indexes])
Power = np.array([self.Power[i] for i in indexes])
calData = SimulationResults(EWT_Source, EWT_Load, flowSource,
flowLoad, Capacity*1e3, HR*1e3,
Power*1e3, self.name)
return calData
def calibration_data_min_max_temp(self):
""" Find the data points that correspond to the minimum and maximum
inlet water temperature on the source and load sides.
:return: Performance data for calibration (object).
.. note:: This method returns a subsample of the performance data
corresponding to the combination of minimums and maximums of
the inlet water temperatures on the source and load sides.
"""
# All the data corresponding to min/max source and load temperatures
# are used
indexes = range(len(self.EWT_Source))
variables = [self.EWT_Source, self.EWT_Load]
li = [indexes]
# Go through the data and keep indexes corresponding
# to min/max values of source and load temperatures
for i in range(len(variables)):
jmin = len(li)-(2**(i))
jmax = len(li)
for j in range(jmin, jmax):
minValue = min([variables[i][k] for k in li[j]])
maxValue = max([variables[i][k] for k in li[j]])
li.append([k for k in li[j] if variables[i][k] == minValue])
li.append([k for k in li[j] if variables[i][k] == maxValue])
indexes = []
jmin = len(li)-(2**2)
jmax = len(li)
for j in range(jmin, jmax):
indexes += li[j]
EWT_Source = np.array([self.EWT_Source[i] for i in indexes])
EWT_Load = np.array([self.EWT_Load[i] for i in indexes])
flowSource = np.array([self.flowSource[i] for i in indexes])
flowLoad = np.array([self.flowLoad[i] for i in indexes])
Capacity = np.array([self.Capacity[i] for i in indexes])
HR = np.array([self.HR[i] for i in indexes])
Power = np.array([self.Power[i] for i in indexes])
calData = SimulationResults(EWT_Source, EWT_Load, flowSource,
flowLoad, Capacity*1e3, HR*1e3,
Power*1e3, self.name)
return calData
def write_modelica_combiTimeTable(self, tableName, tableTempDir,
tableFileName, CoolingMode):
""" Write the combiTimeTable for use by the calibration model in
Modelica.
:param tableName: Name of the combiTimeTable.
:param tableTempDir: Temporary directory to write the table file.
:param tableFileName: Name of the text file containing the
combiTimeTable.
:param CoolingMode: Boolean, set to True if heat pump is used in
cooling mode.
"""
if not os.path.exists(tableTempDir):
os.makedirs(tableTempDir)
f = open(os.path.join(tableTempDir, tableFileName), 'w')
f.write('#1\n')
f.write('double '
+ tableName
+ '(' + str(2*len(self.EWT_Source)) + ',5)\n')
if CoolingMode:
for i in range(len(self.EWT_Source)):
for j in [i, i + 1]:
f.write('\t'
+ str(j)
+ '\t' + str(self.EWT_Load[i])
+ '\t' + str(self.EWT_Source[i])
+ '\t' + str(self.flowLoad[i])
+ '\t' + str(self.flowSource[i]) + '\n')
else:
for i in range(len(self.EWT_Source)):
for j in [i, i + 1]:
f.write('\t'
+ str(j)
+ '\t' + str(self.EWT_Source[i])
+ '\t' + str(self.EWT_Load[i])
+ '\t' + str(self.flowSource[i])
+ '\t' + str(self.flowLoad[i]) + '\n')
f.close()
return
class SimulationResults(object):
""" Results from the simulation model.
:param EWT_Source: Array of entering water temperature on the source
side (K).
:param EWT_Load: Array of entering water temperature on the load
side (K).
:param flowSource: Array of fluid mass flow rate on the source
side (kg/s).
:param flowLoad: Array of fluid mass flow rate on the load
side (kg/s).
:param Capacity: Array of heat pump capacity (W).
:param HR: Array of heat transfer rate on the source side (W).
:param Power: Array of power input to the heat pump (W).
:param name: Name of the heat pump.
"""
def __init__(self, EWT_Source, EWT_Load, flowSource, flowLoad,
Capacity, HR, Power, name):
self.EWT_Source = EWT_Source
self.EWT_Load = EWT_Load
self.flowSource = flowSource
self.flowLoad = flowLoad
self.Capacity = Capacity*1e-3
self.HR = HR*1e-3
self.Power = Power*1e-3
self.name = name
def _convert_FtoK(T):
""" convert Fahrenheit to Kelvin.
:param T: Temperature (F).
:return: Temperature (K).
"""
return (T - 32.0)*5.0/9.0 + 273.15
def _convert_GPMtoLPS(V):
""" convert gal/min to L/s.
:param V: Volume flow rate (gal/min).
:return: Volume flow rate (L/s).
"""
return V / 15.850372483753
def _convert_MBTUHtoKW(Q):
""" convert MBtu/h to kW.
:param Q: Power (MBtu/h).
:return: Power (kW).
"""
return Q * 0.29307107
def _convert_MBTUHtoW(Q):
""" convert MBtu/h to W.
:param Q: Power (MBtu/h).
:return: Power (W).
"""
return Q * 0.29307107e3
|
/// Extracts the parameter names from a template.
fn extract_parameters(template: &Template) -> Vec<String> {
template
.elements
.iter()
.filter_map(|element| match element {
TemplateElement::Expression(he) => {
if he.params.is_empty() {
he.name.as_name().map(String::from)
} else {
he.params.first().unwrap().as_name().map(String::from)
}
}
_ => None,
})
.collect()
} |
// ^ and $ signs are sentinels appended to each end to avoid bounds checking
public String preProcess(final String s) {
int n = s.length();
if (n == 0) return "^$";
StringBuilder ret = new StringBuilder("^");
for (int i = 0; i < n; i++) ret.append("#" + s.charAt(i));
ret.append("#$");
return ret.toString();
} |
<gh_stars>0
#ifndef _LOGGER_
#define _LOGGER_
#include <iostream>
#include <string>
#include <ctime>
#include <mutex>
#include <fstream>
class Logger
{
public:
~Logger();
static Logger* Instance();
private:
Logger();
Logger( const Logger& ){}
Logger& operator=( const Logger& );
public:
void openLogFile( const std::string& logFile );
void closeLogFile();
void writeLog( const std::string& msg );
void writeDebug( const std::string& msg );
void writeError( const std::string& msg );
private:
static Logger* m_pInstance;
static std::mutex m_sMutex;
std::ofstream m_fileStream;
};
#endif
|
#import <Foundation/Foundation.h>
#import <UIKit/UIKit.h>
#ifndef _FWTNotifiable_
#define _FWTNotifiable_
#import <FWTNotifiable/FWTNotifiableManager.h>
#import <FWTNotifiable/FWTNotifiableLogger.h>
#endif
|
#ifndef __DIFFUCLASS_H_INCLUDED
#define __DIFFUCLASS_H_INCLUDED
#include <petscdm.h>
#include <petscdmda.h>
#include <petscksp.h>
#include <vector>
#include <string>
//JJW mod:
//#include "eQ.h"
#include "./src/eQ.h"
class simulation;
using namespace std;
//Data structure for passing to various PETSc functions
typedef struct {
//Unifying the interface - how to set non PETSc datatypes?
//Also, how to completely unify interface so as not to have
//PETSc-specific values in data?
PetscInt xLengthMicrons, yLengthMicrons, localMinimum, localMaximum, totalNodes;
PetscReal diffusionConstant, h, dt, fourierNumber,
topDirichletCoefficient, bottomDirichletCoefficient,
leftDirichletCoefficient, rightDirichletCoefficient,
topNeumannCoefficient, bottomNeumannCoefficient,
leftNeumannCoefficient, rightNeumannCoefficient,
topBoundaryValue, bottomBoundaryValue,
leftBoundaryValue, rightBoundaryValue;
PetscBool homogeneousDirichlet;
string directoryName, objectName;
MPI_Comm subCommunicator;
Vec localVector;
}DiffusionData;
//Class for simulating 2d diffusion
class diffusionPETSc : public eQ::diffusionSolver
{
// diffusionPETSc() {}
// friend class simulation;
private:
PetscErrorCode ierr;
MPI_Comm DIFFU_COMM;
PetscInt gridNodesX, gridNodesY, step;
int commSize;
Vec globalVector;
DMBoundaryType boundary;
DMDAStencilType stencilType;
DM distributedArray;
AO appOrder;
KSP krylovSolver;
PC preconditioner;
// DiffusionData initData, *gridData;
PetscViewer printViewer;
PetscErrorCode ApplyBoundaryConditions();
PetscErrorCode InitializeDiffusion(DiffusionData*, int, char **);
PetscErrorCode TimeStep();
int KSPIterationCountLastTimeStep();
PetscErrorCode WriteGridValues(vector<double>, vector<double>, vector<double>);
PetscErrorCode ReadGridValues(vector<double>, vector<double>, vector<double> *);
PetscErrorCode RecordData();
PetscErrorCode DiffusionFinalize(PetscBool);
std::vector<double> allXCoordinates, allYCoordinates;
public:
diffusionPETSc() {}
//Vector for storing vector information
std::vector<double> solution_vector;
// void initDiffusion(MPI_Comm, std::vector<std::string>, int, char**);
void initDiffusion(eQ::diffusionSolver::params &initParams);
void stepDiffusion();
void setBoundaryValues(const eQ::data::parametersType &bvals);
eQ::data::parametersType getBoundaryFlux(void);
double getDiffusionConstant(void);
void writeDiffusionFiles(double timestamp);
void finalize(void);
std::vector<double>topBoundaryValue;
std::vector<double>bottomBoundaryValue;
std::vector<double>leftBoundaryValue;
std::vector<double>rightBoundaryValue;
std::vector<double>topBoundaryNeumann;
std::vector<double>topBoundaryDirichlet;
std::vector<double>bottomBoundaryNeumann;
std::vector<double>bottomBoundaryDirichlet;
std::vector<double>leftBoundaryNeumann;
std::vector<double>leftBoundaryDirichlet;
std::vector<double>rightBoundaryNeumann;
std::vector<double>rightBoundaryDirichlet;
DiffusionData initData, *gridData;
};
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.