content
stringlengths 10
4.9M
|
---|
<reponame>an888ha/AdaptiveCards<gh_stars>0
#include "pch.h"
#include "AdaptiveOpenUrlAction.h"
#include "Util.h"
using namespace Microsoft::WRL;
using namespace Microsoft::WRL::Wrappers;
using namespace ABI::AdaptiveNamespace;
using namespace ABI::Windows::Foundation;
AdaptiveNamespaceStart
HRESULT AdaptiveOpenUrlAction::RuntimeClassInitialize() noexcept try
{
std::shared_ptr<AdaptiveSharedNamespace::OpenUrlAction> openUrlAction = std::make_shared<AdaptiveSharedNamespace::OpenUrlAction>();
return RuntimeClassInitialize(openUrlAction);
} CATCH_RETURN;
_Use_decl_annotations_
HRESULT AdaptiveOpenUrlAction::RuntimeClassInitialize(const std::shared_ptr<AdaptiveSharedNamespace::OpenUrlAction>& sharedOpenUrlAction) try
{
if (sharedOpenUrlAction == nullptr)
{
return E_INVALIDARG;
}
ComPtr<IUriRuntimeClassFactory> uriActivationFactory;
RETURN_IF_FAILED(GetActivationFactory(
HStringReference(RuntimeClass_Windows_Foundation_Uri).Get(),
&uriActivationFactory));
std::wstring imageUri = StringToWstring(sharedOpenUrlAction->GetUrl());
RETURN_IF_FAILED(uriActivationFactory->CreateUri(HStringReference(imageUri.c_str()).Get(), m_url.GetAddressOf()));
InitializeBaseElement(std::static_pointer_cast<AdaptiveSharedNamespace::BaseActionElement>(sharedOpenUrlAction));
return S_OK;
} CATCH_RETURN;
_Use_decl_annotations_
HRESULT AdaptiveOpenUrlAction::get_Url(IUriRuntimeClass** url)
{
return m_url.CopyTo(url);
}
_Use_decl_annotations_
HRESULT AdaptiveOpenUrlAction::put_Url(IUriRuntimeClass* url) try
{
m_url = url;
return S_OK;
} CATCH_RETURN;
_Use_decl_annotations_
HRESULT AdaptiveOpenUrlAction::get_ActionType(ABI::AdaptiveNamespace::ActionType* actionType)
{
*actionType = ABI::AdaptiveNamespace::ActionType::OpenUrl;
return S_OK;
}
HRESULT AdaptiveOpenUrlAction::GetSharedModel(std::shared_ptr<AdaptiveSharedNamespace::BaseActionElement>& sharedModel) try
{
std::shared_ptr<AdaptiveSharedNamespace::OpenUrlAction> openUrlAction = std::make_shared<AdaptiveSharedNamespace::OpenUrlAction>();
RETURN_IF_FAILED(SetSharedElementProperties(std::static_pointer_cast<AdaptiveSharedNamespace::BaseActionElement>(openUrlAction)));
if (m_url != nullptr)
{
HString urlTemp;
m_url->get_AbsoluteUri(urlTemp.GetAddressOf());
std::string urlString;
RETURN_IF_FAILED(HStringToUTF8(urlTemp.Get(), urlString));
openUrlAction->SetUrl(urlString);
}
sharedModel = openUrlAction;
return S_OK;
} CATCH_RETURN;
AdaptiveNamespaceEnd
|
import java.util.*;
public class prac{
public static void main(String args[]){
Scanner sc=new Scanner(System.in);
int t;
t=0;
t=sc.nextInt();
while(t>0){
t--;
String st="";
st=sc.next();
int arr[]=new int[st.length()];
int i,pc,nc;
pc=0;
nc=0;
long s=0;
for(i=0;i<st.length();i++){
if(st.charAt(i)=='-'){
nc++;
}
else{
pc++;
}
arr[i]=pc-nc;
}
nc=-1;
for(i=0;i<st.length();i++){
if(arr[i]==nc){
s+=(i+1);
nc--;
}
// System.out.println(s);
}
s+=st.length();
System.out.println(s);
}
}
} |
// newPullRequest returns a new pullRequest object
func newPullRequest(pr int, config pullRequestConfig) (*pullRequest, error) {
logger := config.logger
cvr := config.cvr
logger.Debugf("creating pull request: %d", pr)
i, err := cvr.getPullRequestInfo(pr)
if err != nil {
return nil, fmt.Errorf("failed to get pull request info %s", err)
}
logger.Debugf("pull request %d info: %+v", pr, i)
commits, err := cvr.getPullRequestCommits(pr)
if err != nil {
return nil, fmt.Errorf("failed to get pull request commits %s", err)
}
var shas string
for _, c := range commits {
shas += c.sha + ","
}
var commentTrigger *RepoComment
if len(config.commentTrigger.Comment) != 0 {
comment, err := cvr.getLatestPullRequestComment(pr, config.commentTrigger.User,
config.commentTrigger.Comment)
if err == nil {
commentTrigger = &comment
}
}
logger.Debugf("pull request %d comment trigger: %#v", pr, commentTrigger)
return &pullRequest{
repoBranch: repoBranch{
info: repoBranchInfo{sha: shas},
name: i.branch,
cvr: cvr,
beingTested: false,
logger: logger,
},
info: i,
number: pr,
commits: commits,
whitelist: config.whitelist,
commentTrigger: commentTrigger,
postOnFailure: config.postOnFailure,
postOnSuccess: config.postOnSuccess,
}, nil
} |
package org.clever.quartz.entity;
import com.baomidou.mybatisplus.annotation.TableId;
import lombok.Data;
import java.io.Serializable;
/**
* 作者: lzw<br/>
* 创建时间:2018-03-09 18:30 <br/>
*/
@Data
public class QrtzSchedulerState implements Serializable {
/**
* Scheduler名称
*/
@TableId
private String schedName;
/**
* Scheduler实例的唯一标识,配置文件中的Instance Id
*/
@TableId
private String instanceName;
/**
* 最后检入时间
*/
private Long lastCheckinTime;
/**
* Scheduler 实例检入到数据库中的频率,单位毫秒
*/
private Long checkinInterval;
}
|
The potential of Ginkgo biloba in the treatment of human diseases and the relationship to Nrf2-mediated antioxidant protection.
OBJECTIVES
This review summarises the current findings regarding the therapeutic effects of GBE and its active ingredients in relation to the Nrf2 antioxidant cascade, to provide scientific insights into the clinical applications of GBE in treating oxidative stress-induced diseases.
KEY FINDINGS
We found that GBE or its active ingredients activate several signalling mechanisms in cells, including the Nrf2 pathway, which is the master controller of the antioxidant defence that detoxifies reactive oxygen species (ROS). ROS-mediated cell and tissue damage contributes to ageing and pathological conditions that underlie several important human diseases, such as diabetic nephropathy (DN), ischemic stroke and age-related macular degeneration (AMD).
SUMMARY
GBE or its component antioxidants could be applied for the treatment and/or prevention of DN, ischemic stroke and AMD due to their capacity to activate Nrf2 signalling. These strategies may also be applicable to the treatment of other similar conditions that are induced by oxidative stress. Thus, the therapeutic applications of GBE could be expanded. |
<reponame>janatjak/ui
import { css } from 'styled-components';
import { Breakpoints } from '../scales/breakpoints';
export const mediaQueries = (breakpoints: Breakpoints, predicate: (i: number, br: string) => any) => breakpoints
.map((br, i) =>
i === 0
? css`
${predicate(i, br)}
`
: css`
@media screen and (min-width: ${breakpoints[i - 1]}) {
${predicate(i, br)}
}
`
);
|
/**
* A PortalOutlet that lets multiple components live for the lifetime of the outlet,
* allowing faster switching and persistent data.
*/
import {ComponentPortal} from '@angular/cdk/portal';
import {
ApplicationRef,
ComponentFactoryResolver,
ComponentRef,
EmbeddedViewRef,
Injector
} from '@angular/core';
export interface TabInterface {
name:string;
title:string;
disableBecause?:string;
componentClass:{ new(...args:any[]):TabComponent };
}
export interface TabComponent {
onSave:() => void;
}
export interface ActiveTabInterface {
name:string;
portal:ComponentPortal<TabComponent>;
componentRef:ComponentRef<TabComponent>;
dispose:() => void;
}
export class TabPortalOutlet {
// Active tabs that have been instantiated
public activeTabs:{ [name:string]:ActiveTabInterface } = {};
// The current tab
public currentTab:ActiveTabInterface|null = null;
constructor(
public availableTabs:TabInterface[],
public outletElement:HTMLElement,
private componentFactoryResolver:ComponentFactoryResolver,
private appRef:ApplicationRef,
private injector:Injector) {
}
public get activeComponents():TabComponent[] {
const tabs = _.values(this.activeTabs);
return tabs.map((tab:ActiveTabInterface) => tab.componentRef.instance);
}
public switchTo(name:string) {
const tab = _.find(this.availableTabs, tab => tab.name === name);
if (!tab) {
throw(`Trying to switch to unknown tab ${name}.`);
}
if (tab.disableBecause != null) {
return false;
}
// Detach any current instance
this.detach();
// Get existing or new component instance
const instance = this.activateInstance(tab);
// At this point the component has been instantiated, so we move it to the location in the DOM
// where we want it to be rendered.
this.outletElement.innerHTML = '';
this.outletElement.appendChild(this._getComponentRootNode(instance.componentRef));
this.outletElement.dataset.tabName = tab.title;
this.currentTab = instance;
return false;
}
public detach():void {
const current = this.currentTab;
if (current !== null) {
current.portal.setAttachedHost(null);
this.currentTab = null;
}
}
/**
* Clears out a portal from the DOM.
*/
dispose():void {
// Dispose all active tabs
_.each(this.activeTabs, active => active.dispose());
// Remove outlet element
if (this.outletElement.parentNode != null) {
this.outletElement.parentNode.removeChild(this.outletElement);
}
}
private activateInstance(tab:TabInterface):ActiveTabInterface {
if (!this.activeTabs[tab.name]) {
this.activeTabs[tab.name] = this.createComponent(tab);
}
return this.activeTabs[tab.name] || null;
}
private createComponent(tab:TabInterface):ActiveTabInterface {
const componentFactory = this.componentFactoryResolver.resolveComponentFactory(tab.componentClass);
const componentRef = componentFactory.create(this.injector);
const portal = new ComponentPortal(tab.componentClass, null, this.injector);
// Attach component view
this.appRef.attachView(componentRef.hostView);
return {
name: tab.name,
portal: portal,
componentRef: componentRef,
dispose: () => {
this.appRef.detachView(componentRef.hostView);
componentRef.destroy();
}
};
}
/** Gets the root HTMLElement for an instantiated component. */
private _getComponentRootNode(componentRef:ComponentRef<any>):HTMLElement {
return (componentRef.hostView as EmbeddedViewRef<any>).rootNodes[0] as HTMLElement;
}
}
|
<reponame>brutzl/pymbs<filename>pymbs/input/input_element.py
from pymbs.common.mbselement import MbsElement
class MbsInputElement(MbsElement):
"""
This class provides basic properties for all mbs-elements:
name
predecessor (parent)
successor (child)
"""
def __init__(self, name, parent):
MbsElement.__init__(self, name, None)
self.parent = parent
self.children = []
# Reference to the coresponding processing object
self.referenceToNew = None
def _getChild(self, idx=0):
return self.children[idx]
def _setChild(self, val):
if len(self.children) == 0:
self.children.append(val)
else:
self.children[0] = val
# make "child" a shortcut to "self.children[0]"
child = property(_getChild, _setChild)
|
The Chinese government is reported to have sent thousands of soldiers and police to quell unrest in the central province of Hunan.
Up to 10,000 people took to the streets in Jishou to demand money back from an allegedly fraudulent fundraising firm, a Hong Kong-based rights group said.
In another protest in the eastern port of Ningbo, 10,000 workers clashed with police, the group added.
Social unrest is common in China, but rarely on this scale.
Confronting police
The Hong Kong-based Information Centre for Human Rights and Democracy said that, in both protests, violent clashes erupted between angry crowds and local authorities.
In Jishou 50 people were injured in rioting, and police arrested 20 people, the group said.
According to Xinhua news agency, the protesters blocked roads and trains to demand that the government take action after a fundraising company "failed to pay them back as promised".
The Jishou government admitted in a statement that armed police were drafted in to disperse the crowds, but did not mention if anyone was hurt.
In the second incident, thousands of migrant workers confronted police in Ningbo to protest about the injury of a man in a local factory.
The protests are the latest in a series of confrontations over social issues in China - many of which stem from grievances over alleged corruption and local authorities' abuse of power.
In June, thousands of people rioted in Guizhou province over claims that police had covered up the rape and murder of a girl. |
// CronStack wires up a standard router with a cron specific decorator chain.
// This can be passed into BaseApp.
// Cron stack configuration is a subset of the main stack. It is using the same
// components but not all functionalities are needed or expected (ie no message
// fee).
func CronStack() weave.Handler {
rt := app.NewRouter()
authFn := cron.Authenticator{}
blog.RegisterCronRoutes(rt, authFn)
decorators := app.ChainDecorators(
utils.NewLogging(),
utils.NewRecovery(),
utils.NewKeyTagger(),
utils.NewActionTagger(),
)
return decorators.WithHandler(rt)
} |
Design of Dynamic Diffusion Simulation System for Atmospheric Pollutants in Coastal Cities under Persistent Inverse Temperature
ABSTRACT Gong, Y. and Luo, X., 2020. Design of dynamic diffusion simulation system for atmospheric pollutants in coastal cities under persistent inverse temperature. In: Yang, Y.; Mi, C.; Zhao, L., and Lam, S. (eds.), Global Topics and New Trends in Coastal Research: Port, Coastal and Ocean Engineering. Journal of Coastal Research, Special Issue No. 103, pp. 526–529. Coconut Creek (Florida), ISSN 0749-0208. The prediction of the dynamic diffusion of atmospheric pollutants in coastal cities under continuous inverse temperature is the key to establish the dynamic treatment of atmospheric pollutants. In view of the low accuracy of the traditional nonlinear feature estimation simulation method, a continuous inverse temperature simulation model for the dynamic diffusion of atmospheric pollutants in coastal cities under continuous inverse temperature is proposed. A self-similar regression model is constructed for the statistical information of atmospheric pollutant dynamic diffusion in coastal cities under continuous inverse temperature, and a multi-scale decomposition and factor analysis are carried out for the time series of atmospheric pollutant dynamic diffusion in coastal cities under continuous inverse temperature. The simulation results show that the model has high accuracy and good convergence in the simulation and prediction of atmospheric pollutant dynamic diffusion in coastal cities. |
import * as React from 'react';
import * as moment from 'moment';
import { IPnPControlsProps, IPnpControlsState } from './IPnPControlsProps';
import { Spinner, SpinnerSize } from 'office-ui-fabric-react/lib/components/Spinner';
import {
DocumentCard,
DocumentCardPreview,
DocumentCardTitle,
DocumentCardActivity,
IDocumentCardProps
} from 'office-ui-fabric-react/lib/components/DocumentCard';
import { Placeholder } from '@pnp/spfx-controls-react/lib/Placeholder';
import { WebPartTitle } from "@pnp/spfx-controls-react/lib/WebPartTitle";
import { IDocument } from '../../../models/IDocument';
import { List } from 'office-ui-fabric-react/lib/List';
export default class PnPControls extends React.Component<IPnPControlsProps, IPnpControlsState> {
/**
* Constructor
* @param props
*/
constructor(props: IPnPControlsProps) {
super(props);
this.state = {
items: [],
loading: false,
showPlaceholder: (this.props.list === null || this.props.list === "")
};
}
/**
* componentDidMount lifecycle hook
*/
public componentDidMount() {
if (this.props.list !== null && this.props.list !== "") {
this._getListItems();
}
}
/**
* componentDidUpdate lifecycle hook
* @param nextProps
* @param nextState
*/
public componentDidUpdate(prevProps: IPnPControlsProps, prevState: IPnpControlsState) {
if (this.props.list !== prevProps.list) {
if (this.props.list !== null && this.props.list !== "") {
this._getListItems();
} else {
this.setState({
showPlaceholder: true
});
}
}
}
/**
* Retrieves items for the specified list
* @param listId
*/
private async _getListItems() {
this.setState({
loading: true
});
let listItems = await this.props.spService.getDocuments(this.props.list.toString(), this.props.numberOfItems);
this.setState({
items: listItems ? listItems : [],
loading: false,
showPlaceholder: false
});
}
/*
* Opens the web part property pane
*/
private _configureWebPart() {
this.props.context.propertyPane.open();
}
/**
* React render method
*/
public render(): React.ReactElement<IPnPControlsProps> {
// Check if placeholder needs to be shown
if (this.state.showPlaceholder) {
return (
<Placeholder
iconName="Edit"
iconText="List view web part configuration"
description="Please configure the web part before you can show the list view."
buttonLabel="Configure"
onConfigure={this._configureWebPart.bind(this)} />
);
}
return (
<div>
<WebPartTitle displayMode={this.props.displayMode}
title={this.props.title}
updateProperty={this.props.updateProperty} />
{
this.state.loading ?
(
<Spinner size={SpinnerSize.large} label="Retrieving results ..." />
) : (
this.state.items.length === 0 ?
(
<Placeholder
iconName="InfoSolid"
iconText="No items found"
description="The list or library you selected does not contain items." />
) : (
<List
items={this.state.items}
onRenderCell={this._onRenderCell} />
)
)
}
</div>
);
}
private _onRenderCell(item: IDocument, index: number): JSX.Element {
return (
<DocumentCard onClickHref={item.url} key={item.id}>
<DocumentCardPreview
previewImages={[
{
previewImageSrc: item.previewImageUrl,
iconSrc: item.extension,
width: 318,
height: 196,
accentColor: '#ce4b1f'
}
]}
/>
<DocumentCardTitle title={item.title} />
</DocumentCard>
);
}
}
|
export * from "./achievements";
export * from "./bosses";
export * from "./graces";
export * from "./quests";
export * from "./types";
|
/**
* Simple helper class.
*
* @author <a href="mailto:[email protected]">Armin Waibel</a>
*/
public class VOHelper
{
public static List createNewArticleList(int number)
{
ArrayList list = new ArrayList();
for (int i = 0; i < number; i++)
{
list.add(createNewArticle(i));
}
return list;
}
public static List createNewPersonList(int number)
{
ArrayList list = new ArrayList();
for (int i = 0; i < number; i++)
{
list.add(createNewPerson(i));
}
return list;
}
public static ArticleVO createNewArticle(int counter)
{
return createNewArticle("A simple test article ", counter);
}
public static ArticleVO createNewArticle(String name, int counter)
{
ArticleVO a = new ArticleVO();
a.setName(name);
a.setPrice(new BigDecimal(0.45d * counter));
a.setDescription("test article description " + counter);
return a;
}
public static CategoryVO createNewCategory(String name)
{
return new CategoryVO(null, name, "this is a test category");
}
public static PersonVO createNewPerson(int counter)
{
PersonVO p = new PersonVO();
p.setFirstName("firstname " + counter);
p.setLastName("lastname " + counter);
p.setGrade("grade" + counter);
return p;
}
} |
<reponame>scala-native/immix
#ifndef IMMIX_BLOCLIST_H
#define IMMIX_BLOCLIST_H
#include "../headers/BlockHeader.h"
#define LAST_BLOCK -1
typedef struct {
word_t *heapStart;
BlockHeader *first;
BlockHeader *last;
} BlockList;
void BlockList_Init(BlockList *blockList, word_t *offset);
void BlockList_Clear(BlockList *blockList);
bool BlockList_IsEmpty(BlockList *blockList);
BlockHeader *BlockList_RemoveFirstBlock(BlockList *blockList);
void BlockList_AddLast(BlockList *blockList, BlockHeader *block);
void BlockList_AddBlocksLast(BlockList *blockList, BlockHeader *first,
BlockHeader *last);
void BlockList_Print(BlockList *blockList);
#endif // IMMIX_BLOCLIST_H
|
def context(store, starts, content, prefix = prefix, analyse = analyse):
(start, content) = prefix(starts, content)
(names, argument, invoke) = analyse(store, content)
return (start, names, argument, invoke) |
<reponame>nimakaviani/kapp
package clusterapply
type UI interface {
NotifySection(msg string, args ...interface{})
Notify(msg string, args ...interface{})
}
|
// BinarySearch2 is an implementation with recursion
func BinarySearch2(data []int, key, low, high int) int {
if high < low {
return -1
}
mid := (high - low) / 2
if key == data[mid] {
return mid
}
if key > data[mid] {
return BinarySearch2(data, key, low, mid-1)
}
return BinarySearch2(data, key, mid+1, high)
} |
<reponame>welialmeida/cmu<gh_stars>0
package pt.ulisboa.tecnico.cmu.command;
import pt.ulisboa.tecnico.cmu.response.Response;
/**
* Created by daniel on 01-04-2018.
*/
public class PostQuizAnswersForMonumentCommand extends Command {
private static final long serialVersionUID = -8807331723807741905L;
private String message;
private String Id = "PostQuizAnswersForMonumentCommand";
@Override
public String getId() {
return Id;
}
public PostQuizAnswersForMonumentCommand(String message) {
this.message = message;
}
@Override
public Response handle(CommandHandler chi) {
return chi.handle(this);
}
public String getMessage() {
return this.message;
}
} |
// Copyright 2019 by <NAME>, All Rights Reserved
#include <inttypes.h>
#include <regex.h>
#include <stdio.h>
#include "debug.h"
#include "domain.h"
#define MAX_MATCH 8
typedef struct _domain {
struct _domain *next;
char *host;
char *path;
regex_t rhost;
} *Domain;
static Domain domains = NULL;
bool
agoo_domain_use() {
return NULL != domains;
}
int
agoo_domain_add(agooErr err, const char *host, const char *path) {
Domain d = (Domain)AGOO_CALLOC(1, sizeof(struct _domain));
if (NULL == d) {
return AGOO_ERR_MEM(err, "Domain");
}
if (NULL == (d->host = AGOO_STRDUP(host))) {
return AGOO_ERR_MEM(err, "Domain host");
}
if (NULL == (d->path = AGOO_STRDUP(path))) {
return AGOO_ERR_MEM(err, "Domain path");
}
if (NULL == domains) {
domains = d;
} else {
Domain last = domains;
for (; NULL != last->next; last = last->next) {
}
last->next = d;
}
return AGOO_ERR_OK;
}
int
agoo_domain_add_regex(agooErr err, const char *host, const char *path) {
Domain d = (Domain)AGOO_CALLOC(1, sizeof(struct _domain));
if (NULL == d) {
return AGOO_ERR_MEM(err, "Domain");
}
if (0 != regcomp(&d->rhost, host, REG_EXTENDED | REG_NEWLINE)) {
return agoo_err_set(err, AGOO_ERR_ARG, "invalid regex");
}
if (NULL == (d->path = AGOO_STRDUP(path))) {
return AGOO_ERR_MEM(err, "Domain path");
}
if (NULL == domains) {
domains = d;
} else {
Domain last = domains;
for (; NULL != last->next; last = last->next) {
}
last->next = d;
}
return AGOO_ERR_OK;
}
const char*
agoo_domain_resolve(const char *host, char *buf, size_t blen) {
Domain d;
for (d = domains; NULL != d; d = d->next) {
if (NULL != d->host) { // simple string compare
if (0 == strcmp(host, d->host)) {
return d->path;
}
} else {
regmatch_t matches[MAX_MATCH];
char *bend = buf + blen - 1;
if (0 == regexec(&d->rhost, host, MAX_MATCH, matches, 0)) {
char *b = buf;
char *p = d->path;
for (; '\0' != *p; p++) {
if ('$' == *p && '(' == *(p + 1)) {
const char *m;
char *end;
long i;
int start;
int len;
p += 2;
i = strtol(p, &end, 10);
if (')' != *end || MAX_MATCH <= i) {
continue;
}
p = end;
if (0 > (start = (int)matches[i].rm_so)) {
continue;
}
len = (int)matches[i].rm_eo - start;
if (bend - b <= len) {
continue;
}
for (m = host + start; 0 < len; len--) {
*b++ = *m++;
*b = '\0';
}
} else {
*b++ = *p;
}
}
*b = '\0';
return buf;
}
}
}
return NULL;
}
void
agoo_domain_cleanup() {
Domain d;
while (NULL != (d = domains)) {
domains = d->next;
if (NULL == d->host) {
regfree(&d->rhost);
} else {
AGOO_FREE(d->host);
}
AGOO_FREE(d->path);
AGOO_FREE(d);
}
}
|
<reponame>MostafaTwfiq/C-DataStructure-And-Algorithms
#ifndef C_DATASTRUCTURES_DEQUETEST_H
#define C_DATASTRUCTURES_DEQUETEST_H
#include "../../../../ErrorsTestStruct.h"
void dequeUnitTest();
#endif //C_DATASTRUCTURES_DEQUETEST_H
|
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package com.kgottis.mserv.config;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.support.PropertySourcesPlaceholderConfigurer;
/**
* @author kostas
*/
@Configuration
@ComponentScan
public class PropertiesConfig {
/**
* To resolve ${} in @Value.
*
* @return PropertySourcesPlaceholderConfigurer bean
*/
@Bean
public static PropertySourcesPlaceholderConfigurer propertyConfigInDev() {
return new PropertySourcesPlaceholderConfigurer();
}
}
|
/*
* Copyright (c) 2005-2021 Xceptance Software Technologies GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.xceptance.xlt.api.engine.scripting;
import org.junit.Assert;
import org.junit.Test;
import com.xceptance.xlt.api.actions.AbstractHtmlPageAction;
import com.xceptance.xlt.engine.scripting.TestContext;
/**
* @author <NAME>
*/
public class AbstractHtmlUnitScriptActionTest
{
@Test
public void testConstructors()
{
final String timerName = "myTimerName";
AbstractHtmlUnitScriptAction ahusa = new ConcreteHtmlUnitScriptActionTest(timerName);
Assert.assertEquals("Wrong timerName! it should be \"" + timerName + "\" but is \"" + ahusa.getTimerName() + "\"!", timerName,
ahusa.getTimerName());
Assert.assertNotNull("There should be a freshly created web client", ahusa.getWebClient());
Assert.assertSame("Web client mismatch of action and current test context", ahusa.getWebClient(), TestContext.getCurrent()
.getWebClient());
ahusa.closeWebClient(); // just closing to avoid a memory leak
final AbstractHtmlUnitScriptAction dummy = new ConcreteHtmlUnitScriptActionTest(timerName);
ahusa = new ConcreteHtmlUnitScriptActionTest(dummy, timerName);
Assert.assertEquals("Wrong timerName! it should be \"" + timerName + "\" but is \"" + ahusa.getTimerName() + "\"!", timerName,
ahusa.getTimerName());
Assert.assertEquals("Web client mismatch. Forwarding of web client failed in constructor.", dummy.getWebClient(),
ahusa.getWebClient());
Assert.assertEquals("Previous action mismatch. Assignment of previous action failed in constructor.", dummy,
ahusa.getPreviousAction());
}
private class ConcreteHtmlUnitScriptActionTest extends AbstractHtmlUnitScriptAction
{
private ConcreteHtmlUnitScriptActionTest(final AbstractHtmlPageAction previousAction, final String timerName)
{
super(previousAction, timerName);
}
private ConcreteHtmlUnitScriptActionTest(final String timerName)
{
super(timerName);
}
@Override
public void preValidate() throws Exception
{
}
@Override
protected void execute() throws Exception
{
}
@Override
protected void postValidate() throws Exception
{
}
}
}
|
/*
* __inmem_col_int --
* Build in-memory index for column-store internal pages.
*/
static void
__inmem_col_int(WT_SESSION_IMPL *session, WT_PAGE *page)
{
WT_BTREE *btree;
WT_CELL *cell;
WT_CELL_UNPACK *unpack, _unpack;
const WT_PAGE_HEADER *dsk;
WT_PAGE_INDEX *pindex;
WT_REF **refp, *ref;
uint32_t i;
btree = S2BT(session);
dsk = page->dsk;
unpack = &_unpack;
pindex = WT_INTL_INDEX_GET_SAFE(page);
refp = pindex->index;
WT_CELL_FOREACH(btree, dsk, cell, unpack, i) {
ref = *refp++;
ref->home = page;
__wt_cell_unpack(cell, unpack);
ref->addr = cell;
ref->key.recno = unpack->v;
}
} |
// Simulates Linux's pwritev API on OS X.
ssize_t pwritev(int fd, const struct iovec* iovec, int count, off_t offset) {
ssize_t total_written_bytes = 0;
for (int i = 0; i < count; i++) {
ssize_t r;
RETRY_ON_EINTR(r, pwrite(fd, iovec[i].iov_base, iovec[i].iov_len, offset));
if (r < 0) {
return r;
}
total_written_bytes += r;
if (static_cast<size_t>(r) < iovec[i].iov_len) {
break;
}
offset += iovec[i].iov_len;
}
return total_written_bytes;
} |
// doesLinkExist checks if a link exists in a given slice
func (s *Scrapper) doesLinkExist(newLink commons.Links, existingLinks []commons.Links) (exists bool) {
for _, val := range existingLinks {
if strings.Compare(newLink.Href, val.Href) == 0 {
exists = true
}
}
return
} |
def fuzzy_match_threshold(category):
if category == 'word_jumble':
return 60
if category == 'spelling_backwords':
return 90
if category == 'simple_math':
return 100
if category == 'memory_game':
return 60
if category == 'simple_trivia':
return 60
if category == 'word_association':
return 60
if category == 'repeat':
return 60
if category == 'out_of_place':
return 60
if category == 'low_high_number':
return 100 |
def add_post_pipe_connect_cb(self, callback):
self._on_post_pipe_add.append(callback) |
CBS aired its final Thursday night NFL match-up of the season, a shutout, dropping to a 2017 ratings low in the process.
The game, simulcast on CBS, the NFL Network and Amazon, averaged a 8.4 overnight rating among households, a low for this season's Thursday Night Football showings. It marked a 15 percent decline from the previous week — which is not much of a surprise, given the incredibly lopsided score. The Baltimore Ravens embarrassed the Miami Dolphins 40-0. There's no year-to-year comparison, as the 2016 week eight game aired just on the NFL Network. (Those games see typically the most modest ratings returns of each season, a fact we'll be reminded of after the Nov. 2 game.)
Next week's game airs on the NFL Network exclusively, before the rest of the year's Thursday games see NBC enter as the broadcast network partner. But this Thursday was still a win for CBS, which will emerge as TV's top performer of the night, though a soft way to end its string of NFL games this season.
No. 2 status of the night goes to ABC, which narrowly outperformed NBC in the key demo — thanks to the return of Grey's Anatomy. After a one-week break, the drama returned to the schedule with a 1.8 rating among adults 18-49. That was down from its last outing, but Scandal (1.1 adults) and How to Get Away With Murder (0.9 adults) were unmoved just after.
Will & Grace led NBC's lineup, sharing Grey's Anatomy's 1.8 rating among adults 18-49. Superstore (1.2 adults), The Good Place (1.1 adults) and Chicago Fire (1.2 adults) were all steady, while Great News saw significant improvement, rising to a 1.0 rating in the key demo.
Speaking of growth, The Orville was up to a 1.2 rating among adults 18-49 on Fox. The network just released live-plus-35 day ratings for The Orville premiere, by the way. And the episode has already fetched more than 14 million viewers and a 3.9 rating among adults 18-49. Thursday's Orville was an improvement from an even Gotham (0.9 adults) at 8 o'clock. On the CW, Supernatural (0.6 adults) and Arrow (0.5 adults) were both steady. |
N, M = map(int, input().split())
Adj = [[] for i in range(N+1)]
for i in range(M):
a, b = map(int, input().split())
Adj[a].append(b)
Adj[b].append(a)
Q = int(input())
v, d, c = [0]*Q, [0]*Q, [0]*Q
for i in range(Q):
v[i], d[i], c[i] = map(int, input().split())
memo = [[0] * (10+1) for i in range(N+1)]
def paint(v, d, i):
# すでに塗られていたらもう先は見ない
if memo[v][d]:
return
# 塗られていなければ何番目の操作で色が塗られたかを覚えておく
memo[v][d] = i
# 距離-1は存在しないので終了ストッパー
if d == 0:
return
# 何番目の操作で色が塗られたかを0まで伝播
paint(v, d-1, i)
# 今見ているところの隣接頂点について同じことをやりにいく
# ただし離れるにつれて届く範囲は1ずつ減っていく
for u in Adj[v]:
paint(u, d-1, i)
# 逆順にクエリを処理
for q in range(Q-1, -1, -1):
paint(v[q], d[q], q+1)
for i in range(1, N+1):
# 操作が加わった形跡があれば、それが何番目の操作であるか確認し、その操作に該当する色を取りに行く
if memo[i][0] != 0:
print(c[memo[i][0]-1])
else:
print(0)
|
from pyroute2 import IPDB
from pyroute2.common import uifname
ip = IPDB()
def test1():
try:
device = ip.interfaces['eth0']['ipaddr']
finally:
pass
for i in range(1000):
test1()
ip.release()
|
// -----------------------------------------------------------------------------
/**
* @brief Parses and verifies client socket data from json array
*
* The json data is expected (required) to be formatted like the following
*
*
"clientSockets": [
{
"name": "NAME1"
}
]
*/
std::vector<std::string> MulticastSocketPlugin::parseClientSocketsArray(const Json::Value &jsonData) const
{
std::vector<std::string> socketsVec;
const Json::Value &sockets = jsonData["clientSockets"];
if (!sockets.isArray() || sockets.empty())
{
AI_LOG_INFO("'clientSockets' field is not an array or it's empty");
return std::vector<std::string>();
}
for (Json::Value::const_iterator it = sockets.begin(); it != sockets.end(); ++it)
{
const Json::Value &socket = *it;
if (!socket.isObject())
{
AI_LOG_ERROR("invalid 'socket' entry at index %u in 'clientSockets' array", it.index());
continue;
}
const Json::Value name = socket["name"];
if (!name.isString())
{
AI_LOG_ERROR("invalid name entry at index %u in 'clientSockets' array", it.index());
continue;
}
socketsVec.push_back(name.asString());
}
return socketsVec;
} |
/* ===========================================================
* JFreeChart : a free chart library for the Java(tm) platform
* ===========================================================
*
* (C) Copyright 2000-2009, by Object Refinery Limited and Contributors.
*
* Project Info: http://www.jfree.org/jfreechart/index.html
*
* This library is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
* License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
* USA.
*
* [Java is a trademark or registered trademark of Sun Microsystems, Inc.
* in the United States and other countries.]
*
* ---------------------
* DateTickUnitType.java
* ---------------------
* (C) Copyright 2009, by Object Refinery Limited.
*
* Original Author: <NAME> (for Object Refinery Limited);
* Contributor(s): -;
*
* Changes
* -------
* 09-Jan-2009 : Version 1 (DG);
*
*/
package org.jfree.chart.axis;
import java.io.ObjectStreamException;
import java.io.Serializable;
import java.util.Calendar;
/**
* An enumeration of the unit types for a {@link DateTickUnit} instance.
*
* @since 1.0.13
*/
public class DateTickUnitType implements Serializable {
/** Year. */
public static final DateTickUnitType YEAR
= new DateTickUnitType("DateTickUnitType.YEAR", Calendar.YEAR);
/** Month. */
public static final DateTickUnitType MONTH
= new DateTickUnitType("DateTickUnitType.MONTH", Calendar.MONTH);
/** Day. */
public static final DateTickUnitType DAY
= new DateTickUnitType("DateTickUnitType.DAY", Calendar.DATE);
/** Hour. */
public static final DateTickUnitType HOUR
= new DateTickUnitType("DateTickUnitType.HOUR",
Calendar.HOUR_OF_DAY);
/** Minute. */
public static final DateTickUnitType MINUTE
= new DateTickUnitType("DateTickUnitType.MINUTE", Calendar.MINUTE);
/** Second. */
public static final DateTickUnitType SECOND
= new DateTickUnitType("DateTickUnitType.SECOND", Calendar.SECOND);
/** Millisecond. */
public static final DateTickUnitType MILLISECOND
= new DateTickUnitType("DateTickUnitType.MILLISECOND",
Calendar.MILLISECOND);
/** The name. */
private String name;
/** The corresponding field value in Java's Calendar class. */
private int calendarField;
/**
* Private constructor.
*
* @param name the name.
* @param calendarField the calendar field.
*/
private DateTickUnitType(String name, int calendarField) {
this.name = name;
this.calendarField = calendarField;
}
/**
* Returns the calendar field.
*
* @return The calendar field.
*/
public int getCalendarField() {
return this.calendarField;
}
/**
* Returns a string representing the object.
*
* @return The string.
*/
public String toString() {
return this.name;
}
/**
* Returns <code>true</code> if this object is equal to the specified
* object, and <code>false</code> otherwise.
*
* @param obj the other object.
*
* @return A boolean.
*/
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof DateTickUnitType)) {
return false;
}
DateTickUnitType t = (DateTickUnitType) obj;
if (!this.name.equals(t.toString())) {
return false;
}
return true;
}
/**
* Ensures that serialization returns the unique instances.
*
* @return The object.
*
* @throws ObjectStreamException if there is a problem.
*/
private Object readResolve() throws ObjectStreamException {
if (this.equals(DateTickUnitType.YEAR)) {
return DateTickUnitType.YEAR;
}
else if (this.equals(DateTickUnitType.MONTH)) {
return DateTickUnitType.MONTH;
}
else if (this.equals(DateTickUnitType.DAY)) {
return DateTickUnitType.DAY;
}
else if (this.equals(DateTickUnitType.HOUR)) {
return DateTickUnitType.HOUR;
}
else if (this.equals(DateTickUnitType.MINUTE)) {
return DateTickUnitType.MINUTE;
}
else if (this.equals(DateTickUnitType.SECOND)) {
return DateTickUnitType.SECOND;
}
else if (this.equals(DateTickUnitType.MILLISECOND)) {
return DateTickUnitType.MILLISECOND;
}
return null;
}
}
|
/**
* Utility function that returns a JSONArray of all the names of the keys (attributes) of this JSONObject
* @return All the keys in the JSONObject as a JSONArray.
*/
public JSONArray names() {
Enumeration itr = this.keys();
if (itr != null) {
JSONArray array = new JSONArray();
while (itr.hasMoreElements()) {
array.add(itr.nextElement());
}
return array;
}
return null;
} |
/**
* Checks that the combat (attacks) between the characters is correct
*/
@Test
void Combat() {
assertTrue(testKnight.isAlive());
assertTrue(testTankyEnemy.isAlive());
assertTrue(testNormalEnemy.isAlive());
assertTrue(testWeakEnemy.isAlive());
assertFalse(testDeadBlackMage.isAlive());
assertFalse(testDeadEnemy.isAlive());
testKnight.attack(testTankyEnemy);
assertEquals(300, testTankyEnemy.getHealthPoints());
assertTrue(testTankyEnemy.isAlive());
testKnight.attack(testNormalEnemy);
assertEquals(90, testNormalEnemy.getHealthPoints());
assertTrue(testNormalEnemy.isAlive());
testKnight.attack(testWeakEnemy);
assertEquals(-5, testWeakEnemy.getHealthPoints());
assertFalse(testWeakEnemy.isAlive());
testKnight.attack(testDeadEnemy);
assertEquals(0, testDeadEnemy.getHealthPoints());
assertFalse(testWeakEnemy.isAlive());
testDeadBlackMage.attack(testTankyEnemy);
assertEquals(300, testTankyEnemy.getHealthPoints());
assertTrue(testTankyEnemy.isAlive());
testDeadBlackMage.attack(testDeadEnemy);
assertEquals(0, testDeadEnemy.getHealthPoints());
assertFalse(testDeadEnemy.isAlive());
} |
// Initialize configures the agent provider
func (p *Provider) Initialize(fw *choria.Framework, log *logrus.Entry) {
p.fw = fw
p.cfg = fw.Config
p.log = log.WithFields(logrus.Fields{"provider": "ruby"})
p.loadAgents(fw.Config.Choria.RubyLibdir)
} |
<gh_stars>1-10
-- | The simple strategy keeps a pool of possible secret codes around. After
-- each guess the inconsistent codes, i.e. the codes that can't possibly be the
-- secret code anymore, are removed from the pool so that the pool shrinks on
-- and on after each guess until it is so small that the secret code is
-- cracked.
--
-- Additionally, the to-be-entered code which is chosen from the pool is the
-- one that results in the minimal travel distance for the robot. This way time
-- is saved and more codes can be entered in the given time limit as opposed to
-- a strategy that would for example always pick the first code from the pool.
module Strategies.Simple (simple) where
import Strategies.Util
simple = Strategy
{ initialize = initializeStrategyState
, extractGuess = extractGuess'
, updateState = updateState'
}
extractGuess' :: StrategyState -> Code
extractGuess' = guess
updateState' :: Answer -> StrategyState -> StrategyState
updateState' answer st
| isCorrect answer (codeLength st) = reset st
| otherwise =
let lastPos = getPos $ last $ guess st
consistent' = getConsistentCodes answer (guess st) $ consistent st
shortest = getShortestCode lastPos consistent'
in st { guess = shortest, consistent = consistent' }
|
import sys
sys.setrecursionlimit(10 ** 7)
N = int(input())
L = list("abcdefghij")
def nasu(x, n, e):
if n == N:
print(x)
return
for i in range(e + 1):
if i == e:
nasu(x + L[i], n + 1, e + 1)
else:
nasu(x + L[i], n + 1, e)
nasu("", 0, 0)
|
Reliable Prediction of Channel Assignment Performance in Wireless Mesh Networks
The advancements in wireless mesh networks (WMN), and the surge in multi-radio multi-channel (MRMC) WMN deployments have spawned a multitude of network performance issues. These issues are intricately linked to the adverse impact of endemic interference. Thus, interference mitigation is a primary design objective in WMNs. Interference alleviation is often effected through efficient channel allocation (CA) schemes which fully utilize the potential of MRMC environment and also restrain the detrimental impact of interference. However, numerous CA schemes have been proposed in research literature and there is a lack of CA performance prediction techniques which could assist in choosing a suitable CA for a given WMN. In this work, we propose a reliable interference estimation and CA performance prediction approach. We demonstrate its efficacy by substantiating the CA performance predictions for a given WMN with experimental data obtained through rigorous simulations on an ns-3 802.11g environment.
I. INTRODUCTION
Multi-radio multi-channel wireless mesh networks (MRMC WMNs) are expected to significantly reduce the dependence on wired network infrastructure owing to the availability of low-cost commodity IEEE 802.11 hardware, ease of scalability, and flexibility in deployment. MRMC WMNs offer reliable connectivity by leveraging the inherent redundancy in the underlying mesh topology framework. This is facilitated by multiple-hop transmissions which relay the data traffic seamlessly between source-destination pairs where a direct communication can not be established . However, the broadcast nature of wireless transmissions is synonymous with link conflicts spawned by WMN radios which are located within each other's interference range and are concurrently active on an identical channel. The lucrative features of MRMC WMNs viz., enhanced capacity, seamless connectivity, and reduced latency are diminished by the adverse impact of interference. Conflict graphs (CGs) are invariably used to represent these interference complexities in a WMN. A CG models the wireless links in a WMN as vertices and edges between these vertices represent potential link conflicts . Interference alleviation in WMNs is primarily accomplished through an efficient channel assignment (CA) to the radios. Thus, the intensity of interference affecting a WMN is the characteristic of the implemented CA scheme, as it is responsible for reigning in the endemic interference. However, the CA problem is an NP-Hard problem and several CA schemes have been proposed in literature which employ numerous concepts and heuristic approaches to mitigate the impact of interference in a WMN.
II. MOTIVATION AND RELATED RESEARCH WORK
Estimation of interference, its alignment and cancellation are well known NP-Hard problems . Numerous research endeavors have tried to address the interference alignment and cancellation at the physical layer e.g., in , authors employ the soft interference cancellation technique. Impact of interference on multi-hop wireless networks has also been rigorously studied, maximum achievable network capacity being the primary focus of these studies. In the landmark work , authors demonstrated that in a wireless network consisting of n randomly placed identical nodes, where each node is communicating with another, the maximum achievable throughput per node is Θ(1/ √ n log n). In , authors estimate the network capacity of an arbitrary wireless network by employing a realistic signal to interference plus noise ratio (SINR) model to account for the interference. Authors in assess the impact of interference in multi-hop mesh networks by proposing an upper bound on the achievable network capacity, under the constraints of specific physical location of wireless nodes and a particular traffic load. The concept of interference degree (ID) is often used in solutions to the resource allocation , scheduling , and CA problems , with the intertwined objectives of minimizing the prevalent interference and optimizing the WMN performance. ID of a wireless link in a WMN denotes the number of links in its close proximity which can potentially interfere with it i.e., disrupt a transmission on the given link. Total interference degree (TID) of a WMN is obtained by halving the sum of ID of all links in the WMN. In our previous work , we highlight that TID is only an approximate measure of the intensity of interference but not a dependable CA performance prediction metric. Further in , we propose a fresh characterization of interference, attributing three dimensions, namely, statistical, spatial and temporal, to the interference prevalent in wireless networks. Based on this characterization, a statistical Channel Distribution Across Links (CDAL) algorithm is suggested which identifies the linkcount for each channel i.e., the number of links in the wireless network that have been allocated that particular channel. It then computes a statistical metric CDAL cost , which is a measure of equitable distribution of channels across wireless links. Further, CDAL cost is demonstrated to be a more reliable estimation metric than TID, at a lesser computational cost.
Thus, apart from TID estimate and CDAL cost there is an absence of alternate metrics in research literature, which can be employed as well founded theoretical benchmarks for comparison and prediction of CA performance. In this study, we further bridge that gap by using the interference characterization model of to engineer a spatio-statistical interference estimation and CA predication scheme.
III. PROBLEM DEFINITION
Let G = (V, E) represent an arbitrary MRMC WMN comprising of n nodes, where V denotes the set of all nodes and E denotes the set of wireless links in the WMN. Each node i is equipped with a random number of identical radios R i , and is assigned a list of channels Ch i from the set of available channels Ch. A reliable theoretical interference estimate needs to be devised to predict with high confidence, the efficient CA schemes that ought to be selected for G from the available set of CA schemes.
IV. INTERFERENCE ESTIMATION & CA PERFORMANCE PREDICTION
The proposed algorithm adopts a comprehensive two dimensional spatio-statistical view of prevalent interference. The spatial dimension concentrates on the link conflicts which are spawned due to spatial proximity of radios, while the statistical dimension is concerned with a proportional distribution of channels across wireless links in a WMN.
A. Inadequacy of Statistical Interference Estimation
Leveraging the statistical aspects of endemic interference offers a sound estimation metric in CDAL cost . However, there is a lacuna in the CDAL algorithm that it accounts for only a single dimension of interference. We now highlight this limitation of CDAL estimation. and (1, 1, 2, 2), respectively. For a smooth discourse, we assume a Transmission : Interference range of 1:1 i.e., only the transmissions over adjacent links interfere. It can be inferred that the two channel allocations are statistically alike i.e., link-counts of both the channels are identical under both CA schemes. But the CA schemes differ in terms of spatial distribution of links in the network. The spatial features of CA X guarantee a minimal interference scenario, as adjacent links transmit over non-overlapping channels. In sharp contrast, CA Y leads to a high interference scenario where adjacent links (AB & BC) and (CD & DE) operate over identical channels and cause link conflicts. The CDAL algorithm is oblivious to these spatial characteristics and assigns the two CA schemes the same CDAL cost . This causes an erroneous prediction and comparison of CA performance, which renders CDAL estimate less accurate. However, it forms the theoretical foundation for a more efficient estimation technique which we propose next.
Algorithm 1 Cumulative X-Link-Set Weight
Determine Chi and Adji {Chi : Set of channels allocated to the radios at node i in G. Adji : Set of nodes adjacent to node i in G} 3: end for 4: for i ∈ V do 5: for j ∈ Adji do 6: LnSet ← InsertLn(i, j). {LnSet : Set of all possible wireless links in of G.}
B. Spatio-Statistical Interference Estimation
Any theoretical interference estimation scheme can only account for the spatio-statistical aspects of the three dimensional interference estimation problem. An intelligent spatiostatistical scheme will not just factor in the spatial proximity of links, but will also consider the distribution of available channels among the radios, thereby offering an efficient CA performance estimation metric. The algorithm considers the wireless links in a WMN and assigns a set of links a certain weight, which reflects its resilience to the adverse impact of interference. We call it the Cumulative X-Link-Set Weight or CXLS wt algorithm and present it in Algorithm 1.
We begin the discourse by explaining the term X-Link-Set. In a wireless network, the interference range of a radio i.e., the distance over which the signal strength is potent enough to interfere with another signal but unable to successfully deliver data, far exceeds its transmission range i.e., the distance upto which the signal strength of a transmission guarantees a successful data delivery at a receiving radio. The ratio of Transmission Range : Interference Range (T:I) in most wireless networks usually lies between 1:2 to 1:4. In the proposed algorithm a T:I of 1:X is considered, where X is a positive integer. The factor X has a great significance in determining the detrimental effect of interference on a link. For example, in Figure 1 the channel allocation of CA X is optimal for a T:I of 1:1, but for 1:2 both CA X and CA Y experience the same number of link conflicts. Thus X determines the impact radius (IR) of link conflicts, and it ought to be taken into consideration while designing an interference estimation algorithm. The CXLS wt algorithm accounts for the impact radius X by considering a set of X consecutive links named the X-Link-Set or XLS as the fundamental entity for interference estimation.
The CXLS wt algorithm begins by determining the set of channels assigned to the radios of each node and the adjacency list of each node. Next, all wireless links in the WMN are determined on the basis of transmission range i.e., adjacency of nodes in the graphical representation of the WMN. Further, for each link, the algorithm finds the set of common channels that are assigned to radios of the adjacent nodes which share that particular link. The links are stored in a data structure called LnSet while the channel set associated to the link is mapped to it in LnChM ap. Further, a set of X-Link-Sets or SXLS, is determined by the function GetAllLinkSets. SXLS serves as a sample space of fundamental blocks i.e., XLS, and the final step entails processing them to generate an interference estimation metric. To each element of this sample space i.e., to every XLS, we assign a weight which is a measure of its quality. A higher weight signifies a diminished impact of interference in the XLS, whereas a low weight implies that the XLS is severely degraded by interference.
The technique of weight assignment appeals to the spatial characteristics of interference and is described in Algorithm 2 which is implemented in function P robCompW eight. From the SXLS, individual XLSs are selected and processed iteratively. An XLS is further split up into its X consecutive constituent links, Ln i where i ∈ (1 . . . X). For each Ln i , the set of channels Ch i associated to it are retrieved. Next, for each XLS all possible combinations of channel assignments to Ln i from their respective Ch i are generated. The motivating principle for considering all possible channel allocation variations for an XLS is the same as probabilistic selection of channels in the CDAL algorithm. The channel selection for a link being a temporal characteristic, we account for this dynamism and randomness in the system by considering all the variations as equally probable. Thus, for every channel allocation pattern, the algorithm assigns a weight based on the spatial proximity of links. The final weight for an XLS is the average of all of its variations. Within an XLS channel allocation instance, if all of the X links are assigned an identical channel, Let Ch1, Ch2, . . . ChX be the set of channels mapped to the corresponding links Ln1, Ln2, . . . , LnX . 6: Assign channels to all Lni from the channel-set Chi 7: for all Equally Probable combinations of channel assignments in XLS do 10: if all X links are assigned identical channels then 11: T empXLSwt ← 0 12: else if X − 1 links are assigned identical channels, 1 link is assigned an orthogonal channel then 13: T empXLSwt ← 1 . . . 14: else if all X links are assigned non-overlapping channels then 15: T empXLSwt ← X 16: end if 17: XLSwt ← XLSwt + T empXLSwt 18: count ← count + 1 19: end for 20: XLSwt ← XLSwt/count
21:
CXLSwt ← CXLSwt + XLSwt 22: end for 23: Return CXLSwt the weight assigned to the XLS is 0 which is the minimum weight. This scenario defines a maximal interference scenario i.e., every link within the XLS interferes with every other link as the impact radius of X spans the entire XLS. Further, if (X-1) links are assigned conflicting channels and 1 link operates on a non-conflicting channel, the weight assigned to the XLS is 1. For, (X-2) links operating on overlapping channels and 2 links on non-overlapping channels, the weight assigned is 2. Finally, if all the X links are assigned orthogonal channels, which is the minimal interference scenario, a maximum weight of X is assigned to the XLS instance. After all of the XLS weights (XLS wt ) are computed, the algorithm sums them together to generate the final metric for the CA which is the CXLS wt . It is noteworthy that a link may be a part of multiple XLSs, and will contribute to the weight assignment in each one of them. Hence, the algorithm takes into account all interference scenarios that may arise within a WMN. Further, generating a sample space consisting of XLS, assigning each sample a weight, and deriving the metric by a sum of the weights of entire sample space are the statistical features of the CXLS wt algorithm.
C. Time Complexity of CXLS wt Algorithm
For an arbitrary MRMC WMN graph G = (V, E), comprising of n nodes and m identical radios installed on every node, the upper-bound on algorithmic complexity of the CXLS wt algorithm can be determined to be O(n 3 m 2 ). The SXLS computation incurs an algorithmic cost of O(n 3 ) and the cardinality of the set i.e, the number of XLSs in the set has an upper bound of O(n 2 ). Further, for each XLS the weight is computed by the function P robCompW eight() by processing each individual link in the XLS. This step is the most computationally intensive in the algorithm and has a worst-case complexity of O(n 3 m 2 ).
In comparison, TID and CDAL cost estimations have a worst-case algorithmic complexity of O(n 2 m 3 ) and O(n 2 m 2 ), respectively . Since in any WMN deployment, number of nodes far exceeds the number of radios installed on a node i.e., n >> m, CXLS wt estimation requires more computational resources than the other two estimation schemes. However, the results will demonstrate that this slight increase in complexity is a small cost to pay for significantly improved accuracy levels.
V. SIMULATIONS, RESULTS AND ANALYSIS
We now subject the proposed interference estimation algorithm to prove its efficacy in prediction of CA performance in WMNs.
A. Simulation Parameters
We perform exhaustive simulations in ns-3 to record the performance of CAs in a 5×5 grid WMN. A WMN of grid layout is ideal for evaluating CA efficiency as it outperforms random WMN deployments in terms of metrics such as access-tier coverage area, back-haul connectivity, fairness in channel allocation, and mesh capacity . The simulation parameters are presented in Table I. Each multi-hop traffic flow transmits a datafile from the source to the destination. TCP and UDP are the underlying transport layer protocols which are implemented through the inbuilt ns-3 models of BulkSendApplication and UdpClientServer. TCP simulations offer the aggregate network throughput while UDP simulations determine the packet loss ratio and the mean delay. We equip each node in the grid WMN with 2 identical radios and CA schemes have 3 orthogonal channels at their disposal.
B. Test Scenarios
Multi-hop data flows are an intrinsic feature of WMNs. To gauge the detrimental impact of the endemic interference we design four high traffic test-cases by activating multiple concurrent multi-hop flows. Test scenarios in the grid WMN include a combination of 4-hop flows from the first node of a row or column to the last node of that particular row or column, and 8-hop flows which are established between the diagonally opposite nodes placed at the corners of the grid. From various combinations of these two categories of multi-hop flows, four test scenarios are designed which are subjected to both TCP and UDP simulations. They comprise of the following number of concurrent flows which are activated simultaneously in the 25 node grid : (i) 5 (ii) 8 (iii) 10 (iv) 12.
C. Selection of CA Schemes
We implement a heterogeneous mix of well-known CA schemes viz., a centralized breadth first traversal approach (BF-SCA) , a static maximum clique based algorithm (CLICA) , a maximum independent set based scheme (MISCA) and a centralized static CA scheme (CCA) . We also implement two radio co-location aware CA algorithms viz., an optimized independent set based CA scheme (OISCA) and a spatio-statistically designed, elevated interference zone mitigation approach (EIZMCA) . Each of these CA schemes is implemented using two broad based multi-radio multichannel conflict graph models (MMCGs) viz., the conventional MMCG (C-MMCG) and the enhanced MMCG (E-MMCG) . C-MMCG is the traditional way of representing link conflicts, and does not account for radio co-location interference (RCI) prevalent in a wireless network. E-MMCG is a marked improvement over its conventional counterpart and adequately represents RCI interference scenarios in its link conflict representation of the WMN. The use of E-MMCG leads to reduced interference levels and improved WMN performance , which is also reflected in the results we present in this study. Thus, for all of the above mentioned 6 CA schemes we have two versions, one for each MMCG model, resulting in 12 CAs. In addition, we also implement a grid specific CA scheme (GSCA) for the grid WMN through a crude bruteforce approach which permutes through all possible channel allocations in the grid to determine a CA with the minimal TID estimate. It serves as a reference for performance evaluation of the CAs. Finally, we have a total of 13 implementable channel allocations from the 7 CA algorithms. In , the evaluations of CDAL cost estimates were done on a CA sample set of 9 CAs and in this work the sample set is enlarged to ensure
D. Results and Analysis
An exhaustive set of simulations were run for the testcases described above, and the values of the three performance metrics were recorded. For each CA, we compute the average of the recorded network metric values for all the test-cases to derive, the average aggregate network throughput (in Mbps), average packet loss ratio (as a %), and average mean delay (in µseconds). For ease of reference, we henceforth denote them as Throughput, PLR and MD, respectively. Figures 3, 4, & 5, through which we demonstrate the observed correlation between theoretical estimates and actual performance metrics. It is necessary to understand the expected correlation of network performance metrics with the prevalent interference.
As depicted in Figure 2, the aggregate capacity of a wireless network will deteriorate with rise in the intensity of interference. Further, with increase in the adverse impact of interference, loss of data packets and end to end latency in packet transmission will increase as well. Thus, a reliable theoretical interference estimate must exhibit a similar pattern when plotted against the observed network performance metrics. From Figures 3, 4, & 5, it can be discerned that TID does not conform to expected correlation and has a haphazard gradient when plotted against network metrics. CDAl cost displays a higher adherence to the expected pattern than TID. CXLS wt estimates exhibit a great similarity to the expected correlation plot gradients. Since all three metrics do not account for the temporal characteristics of wireless communication, a deviation from observed patterns is inevitable. Thus, CXLS wt offers the most reliable interference estimates among the three metrics which is visible from the gradients of its plots against the three network performance metrics. We now process and analyze the results to derive the accuracy of each of the three estimation metrics. For every recorded performance metric, we first order the CAs in a sequence of increasing performance. In a similar fashion we order the CAs in the increasing order of expected performance, as predicted by the three interference estimation metrics. For both TID and CDAL cost , a high estimate implies high interference in the WMN and thereby, a dismal CA performance. In contrast, higher the value of CXLS wt , better is the expected performance of the CA i.e., (Expected CA P erf ormance ∝ CXLS wt ). Thus, the CA sequence in the increasing order of expected performance will be arrived at by orienting CAs in decreasing order of estimation metric values for both, TID and CDAL cost , and in increasing order of estimation metric values for CXLS wt . Next, we compare CA sequences based on experimental data with CA sequences derived from theoretical estimates, to determine the error in sequence (EIS) of each prediction metric. Let us consider n CAs which are ordered in a sequence based on the values of a prediction metric. A total of n C 2 comparisons exist between individual CAs in the sequence. These pairwise comparisons of expected CA performances have to be verified against experimental data, by considering the sequence of CAs based on the recorded network metric values as the reference. We determine the total number of comparisons that are in error in the CA sequences of theoretical metrics. A comparison in error implies that the expected performance relationship between two CAs as predicted by the estimation metric, is contrary to that observed in actual implementation results. EIS for a particular CA performance prediction metric is the sum of all erroneous comparisons in its CA sequence. Thus, EIS is a measure of fallacy in the predictions of an estimation metric. Next, we determine the degree of confidence (DoC ) which represents the level of accuracy that an interference estimation scheme exhibits in its prediction of the performance of a CA. The DoC value for a theoretical estimate is computed through the expression DoC = (1−(EIS/ n C 2 ))×100, where n is the number of CAs in the sequence. We elucidate the above procedure through an example. Let us determine the CA sequence in terms of increasing Throughput, which is : This is the reference ordering of CAs in which CCA is the least efficient and GSCA the best performer in the CA sample set, in terms of observed Throughput. Against this benchmark we compare the CA sequence spawned by CXLS wt , which is : We compare the actual pairwise CA relationships with those predicted by CXLS wt to compute an EIS of 4 with respect to Throughput. Likewise, the EIS for TID and CDAL cost in terms of Throughput are 19 and 8, respectively. EIS for all the theoretical estimates with respect to the three observed network metrics are depicted in the Figure 6. Finally, we compute the DoC which is the number of affirmative predictions as a percentage of total number of pairwise comparisons that are possible in the CA sequence ( 13 C 2 ). DoC for the three estimation schemes are presented in Table II. It can be inferred that CXLS wt registers lower EIS than both, TID and CDAL cost , in terms of Throughput, PLR and MD. The EIS is halved in CXLS wt estimation in comparison to CDAL cost , while it is reduced to almost one fourth when compared to TID estimates. A similar trend can be observed in the DoC values as well. TID estimates fare worse than both, CDAL cost and CXLS wt , as a prediction metric with accuracy levels always below 80%. CDAL cost exhibits an average performance with DoC values between 80% and 90%. CXLS wt is unarguably the most dependable CA performance prediction metric of the three, as its measure of reliability is always greater than 90%.
Further, let us qualitatively assess the prediction patterns of the three estimates. It is discernible that CXLS wt explicitly distinguishes between CAs that will perform well in a WMN and those that will not eg., CXLS wt estimates project CAs OISCA E , EIZMCA E & GSCA as high-performance CAs, the CAs BFSCA E MISCA C & OISCA C as average performance CAs, and the CAs CCA C & CLICA C as low-performance CAs. These performance predictions are validated by the experimental results. In contrast, TID estimates place the CAs BFSCA E & CLICA C at the bottom of the performance spectrum, and CCA C as an average-performing CA. Both of the predictions are not in adherence to the actual experimental data. CDAL cost estimates are more accurate than TID, but they fail to compete with CXLS wt as they overlook the spatial aspects of interference alleviation and do not consider the proximity of links that might interfere. Further, all the three estimates rightly predict GSCA to be the most efficient CA in the sample set, however only CXLS wt and CDAL cost predict CCA C to exhibit the poorest performance.
VI. CONCLUSIONS
Since the problem of interference estimation is NP-hard, the role of a theoretical prediction estimate is limited to exhibit a maximal conformance to the actual recorded behavior of a CA when implemented in a WMN. In this context, CXLS wt proves to be a reliable CA prediction metric with an adherence of over 91% to actual results, in a fairly extensive sample set of 13 CAs. It does incur a slightly higher computational cost than both, TID estimate and CDAL cost , but the overhead of increased algorithmic complexity is adequately compensated by the increase in accuracy levels. Thus, CXLS wt outperforms both TID estimate and CDAL cost as a reliable CA performance prediction metric, which it owes to its spatio-statistical design that ensures a reduced EIS and thus, an enhanced DoC.
VII. FUTURE WORK
Both CDAL cost and CXLS wt are metrics that offer predictions for the whole CA and do not offer an estimate of individual link quality. For a quantitative assessment, such as theoretical upper-bounds of network performance metrics eg., Throughput, a link quality estimate is necessary. Thus we intend to take up this problem and devise a prediction estimate based on the individual link quality. |
<gh_stars>1-10
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
// @ts-ignore
import { uiModules } from 'ui/modules';
import { npSetup, npStart } from 'ui/new_platform';
import routes from 'ui/routes';
import { isSystemApiRequest } from '../../../../../../src/plugins/kibana_legacy/public';
import { SecurityPluginSetup } from '../../../../../plugins/security/public';
const securityPluginSetup = (npSetup.plugins as any).security as SecurityPluginSetup;
if (securityPluginSetup) {
routes.when('/account', {
template: '<div />',
controller: () => npStart.core.application.navigateToApp('security_account'),
});
const getNextParameter = () => {
const { location } = window;
const next = encodeURIComponent(`${location.pathname}${location.search}${location.hash}`);
return `&next=${next}`;
};
const getProviderParameter = (tenant: string) => {
const key = `${tenant}/session_provider`;
const providerName = sessionStorage.getItem(key);
return providerName ? `&provider=${encodeURIComponent(providerName)}` : '';
};
const module = uiModules.get('security', []);
module.config(($httpProvider: ng.IHttpProvider) => {
$httpProvider.interceptors.push(($q, $window, Promise) => {
const isAnonymous = npSetup.core.http.anonymousPaths.isAnonymous(window.location.pathname);
function interceptorFactory(responseHandler: (response: ng.IHttpResponse<unknown>) => any) {
return function interceptor(response: ng.IHttpResponse<unknown>) {
if (!isAnonymous && !isSystemApiRequest(response.config)) {
securityPluginSetup.sessionTimeout.extend(response.config.url);
}
if (response.status !== 401 || isAnonymous) {
return responseHandler(response);
}
const { logoutUrl, tenant } = securityPluginSetup.__legacyCompat;
const next = getNextParameter();
const provider = getProviderParameter(tenant);
$window.location.href = `${logoutUrl}?msg=SESSION_EXPIRED${next}${provider}`;
return Promise.halt();
};
}
return {
response: interceptorFactory((response) => response),
responseError: interceptorFactory($q.reject),
};
});
});
}
|
Tumbling, an Interactive Way to Move Forward
The migration of Drosophila border cells has become a powerful model with which to genetically identify guidance cues that control the directed migration of a group of interconnected cells. During oogenesis, border cells delaminate from an epithelial layer and move collectively toward the oocyte. In vivo observation has been added to the impressive experimental toolkit available to study border cell migration. These studies reveal two previously unknown migratory behaviors: one in which cells within the border cell cluster constantly change their position, and another called "tumbling," by which the entire border cell cluster rotates forward. Unexpectedly, the same receptor tyrosine kinases control these different modes of migration through separate downstream pathways. An early mode is mediated by the actin regulatory proteins ELMO and Mbc and resembles cellular polarization during individual cell migration; whereas during a later phase, communication between cells, facilitated by mitogen-activated protein kinase and phospholipase C–γ, organizes the polarity of the entire cluster. |
Corporations are roaring. Wall Street is rolling in cash. C.E.O. bonuses are going gangbusters. It’s a really good time to be rich!
If you’re poor, not so much. The pall of the recession is suffocating. The unemployment rate is still unbearably high. The Census Bureau reported in September that the poverty rate for 2009 was 14.3 percent, higher than it has been since 1994, and the number of uninsured reached a record high. And the Department of Agriculture has reported record “prevalence of food insecurity.”
So in a civil society, which of these groups should be expected to sacrifice a bit for the benefit of the other and the overall health and prosperity of the nation at a time of great uncertainty? The poor, of course. At least that seems to be the Republican answer.
Under the guise of deficit reduction, the Republicans are proposing to not only make the Bush tax cuts for the wealthy permanent, but to reduce their taxes even more — cutting the top individual rate from 35 percent to 25 percent to “promote growth and job creation.” And they plan to pay for this by taking a buzz saw to programs that benefit the poor, elderly and otherwise vulnerable.
Photo
But the spurious argument that cutting taxes for the wealthy will somehow stimulate economic growth is not borne out by the data. A look at the year-over-year change in G.D.P. and changes in the historical top marginal tax rates show no such correlation. This isn’t about balancing budgets or fiscal discipline or prosperity-for-posterity stewardship. This is open piracy for plutocrats. This is about reshaping the government and economy to benefit the wealthy and powerful at the expense of the poor and powerless.
Advertisement Continue reading the main story |
<gh_stars>0
# Generated by Django 3.2.8 on 2021-10-22 18:32
import uuid
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0015_scan_image_relationship'),
]
operations = [
migrations.CreateModel(
name='ScanDecision',
fields=[
(
'id',
models.UUIDField(
default=uuid.uuid4, editable=False, primary_key=True, serialize=False
),
),
('created', models.DateTimeField(default=django.utils.timezone.now)),
(
'decision',
models.CharField(
choices=[('Good', 'G'), ('Bad', 'B'), ('Other', 'O')], max_length=20
),
),
('note', models.TextField(blank=True, max_length=3000)),
(
'creator',
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
to=settings.AUTH_USER_MODEL,
),
),
(
'scan',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='decisions',
to='core.scan',
),
),
],
options={
'ordering': ['-created'],
},
),
migrations.RemoveField(
model_name='scannote',
name='creator',
),
migrations.RemoveField(
model_name='scannote',
name='scan',
),
migrations.DeleteModel(
name='Annotation',
),
migrations.DeleteModel(
name='ScanNote',
),
migrations.AddIndex(
model_name='scandecision',
index=models.Index(fields=['scan', '-created'], name='core_scande_scan_id_23388a_idx'),
),
]
|
<reponame>solomon-qwe/agalia<filename>src/agalia/ImageDrawD2D.cpp
#include "pch.h"
#include "ImageDrawD2D.h"
#include <io.h>
#ifndef INITGUID
#define INITGUID
#include <guiddef.h>
#undef INITGUID
#else
#include <guiddef.h>
#endif
#include <wrl.h>
#include <d2d1_1.h>
#include <d3d11_1.h>
#pragma comment(lib, "d2d1")
#pragma comment(lib, "d3d11")
using namespace Microsoft::WRL;
using namespace D2D1;
HRESULT CreateDeviceIndependentResources_DXFactory(ID2D1Factory1** d2dFactory, IWICImagingFactory2** wicFactory)
{
// ID2D1Factory1
D2D1_FACTORY_OPTIONS options = {};
#if defined(_DEBUG)
options.debugLevel = D2D1_DEBUG_LEVEL_INFORMATION;
#endif
auto hr = ::D2D1CreateFactory<ID2D1Factory1>(
D2D1_FACTORY_TYPE_SINGLE_THREADED,
options,
d2dFactory);
if (FAILED(hr)) return hr;
// IWICImagingFactory2
hr = ::CoCreateInstance(
CLSID_WICImagingFactory,
nullptr,
CLSCTX_INPROC_SERVER,
IID_PPV_ARGS(wicFactory));
if (FAILED(hr)) return hr;
return S_OK;
}
HRESULT CreateDeviceIndependentResources_Image(IWICImagingFactory2* wicFactory, IWICFormatConverter** wicFormatConverter, IWICColorContext** wicColorContext, IStream* stream)
{
ComPtr<IWICBitmapDecoder> decoder;
auto hr = wicFactory->CreateDecoderFromStream(
stream,
nullptr,
WICDecodeMetadataCacheOnDemand,
&decoder);
if (FAILED(hr)) return hr;
ComPtr<IWICBitmapFrameDecode> frame;
hr = decoder->GetFrame(0, &frame);
if (FAILED(hr)) return hr;
hr = wicFactory->CreateColorContext(wicColorContext);
if (FAILED(hr)) return hr;
unsigned int actualCount = 0;
hr = frame->GetColorContexts(1, wicColorContext, &actualCount);
if (FAILED(hr)) return hr;
if (actualCount == 0)
{
// use default color space if color profile is empty
const unsigned int ExifColorSpaceSRGB = 1;
hr = (*wicColorContext)->InitializeFromExifColorSpace(ExifColorSpaceSRGB);
if (FAILED(hr)) return hr;
}
hr = wicFactory->CreateFormatConverter(wicFormatConverter);
if (FAILED(hr)) return hr;
hr = (*wicFormatConverter)->Initialize(
frame.Get(),
GUID_WICPixelFormat32bppPBGRA,
WICBitmapDitherTypeNone,
nullptr,
0.0f,
WICBitmapPaletteTypeCustom
);
if (FAILED(hr)) return hr;
return S_OK;
}
inline bool SdkLayersAvailable()
{
auto hr = ::D3D11CreateDevice(
nullptr,
D3D_DRIVER_TYPE_NULL,
0,
D3D11_CREATE_DEVICE_DEBUG,
nullptr, 0,
D3D11_SDK_VERSION,
nullptr, nullptr, nullptr);
return SUCCEEDED(hr);
}
HRESULT CreateD3DDevice(D3D_DRIVER_TYPE type, ID3D11Device** device, ID3D11DeviceContext** context)
{
UINT creationFlags = D3D11_CREATE_DEVICE_BGRA_SUPPORT;
#if defined(_DEBUG)
if (SdkLayersAvailable())
creationFlags |= D3D11_CREATE_DEVICE_DEBUG;
#endif
auto hr = ::D3D11CreateDevice(
nullptr,
type,
0,
creationFlags,
nullptr,
0,
D3D11_SDK_VERSION,
device,
nullptr,
context);
return hr;
}
HRESULT CreateDeviceResources_DX(ID2D1Factory1* d2dFactory, ComPtr<ID2D1Device>& d2dDevice, ComPtr<ID2D1DeviceContext>& d2dContext, ComPtr<ID3D11Device1>& d3dDevice, ComPtr<ID3D11DeviceContext>& d3dContext)
{
ComPtr<ID3D11Device> device;
ComPtr<ID3D11DeviceContext> context;
auto hr = CreateD3DDevice(D3D_DRIVER_TYPE_HARDWARE, &device, &context);
if (FAILED(hr))
hr = CreateD3DDevice(D3D_DRIVER_TYPE_WARP, &device, &context);
if (FAILED(hr)) return hr;
hr = device.As(&d3dDevice);
if (FAILED(hr)) return hr;
hr = context.As(&d3dContext);
if (FAILED(hr)) return hr;
ComPtr<IDXGIDevice> dxgiDevice;
hr = d3dDevice.As(&dxgiDevice);
if (FAILED(hr)) return hr;
hr = d2dFactory->CreateDevice(dxgiDevice.Get(), &d2dDevice);
if (FAILED(hr)) return hr;
hr = d2dDevice->CreateDeviceContext(D2D1_DEVICE_CONTEXT_OPTIONS_NONE, &d2dContext);
if (FAILED(hr)) return hr;
return S_OK;
}
HRESULT CreateDeviceResources_Image(ID2D1Effect** displayEffect, ID2D1Effect** sourceEffect, ID2D1DeviceContext* d2dContext, IWICFormatConverter* wicFormatConverter, IWICColorContext* wicColorContext, int colorManagementMode)
{
if (d2dContext == nullptr) return E_POINTER;
if (wicColorContext == nullptr) return E_POINTER;
ComPtr<ID2D1ColorContext> imageColorContext;
auto hr = d2dContext->CreateColorContextFromWicColorContext(wicColorContext, &imageColorContext);
if (FAILED(hr)) return hr;
// source effect
hr = d2dContext->CreateEffect(CLSID_D2D1BitmapSource, sourceEffect);
if (FAILED(hr)) return hr;
hr = (*sourceEffect)->SetValue(D2D1_BITMAPSOURCE_PROP_WIC_BITMAP_SOURCE, wicFormatConverter);
if (FAILED(hr)) return hr;
hr = (*sourceEffect)->SetValue(D2D1_BITMAPSOURCE_PROP_INTERPOLATION_MODE, D2D1_BITMAPSOURCE_INTERPOLATION_MODE_LINEAR);
if (FAILED(hr)) return hr;
// destination effect
hr = d2dContext->CreateEffect(CLSID_D2D1ColorManagement, displayEffect);
if (FAILED(hr)) return hr;
if (colorManagementMode) {
hr = (*displayEffect)->SetValue(D2D1_COLORMANAGEMENT_PROP_SOURCE_COLOR_CONTEXT, imageColorContext.Get());
if (FAILED(hr)) return hr;
}
hr = (*displayEffect)->SetValue(D2D1_COLORMANAGEMENT_PROP_ALPHA_MODE, D2D1_COLORMANAGEMENT_ALPHA_MODE_STRAIGHT);
if (FAILED(hr)) return hr;
(*displayEffect)->SetInputEffect(0, *sourceEffect);
return S_OK;
}
HRESULT CreateSwapChain(const ComPtr<ID3D11Device1>& d3dDevice, IDXGISwapChain1** ppSwapChain, HWND hwnd, DXGI_SCALING scaling)
{
ComPtr<IDXGIDevice1> dxgiDevice;
auto hr = d3dDevice.As(&dxgiDevice);
if (FAILED(hr)) return hr;
ComPtr<IDXGIAdapter> dxgiAdapter;
hr = dxgiDevice->GetAdapter(&dxgiAdapter);
if (FAILED(hr)) return hr;
ComPtr<IDXGIFactory2> dxgiFactory;
hr = dxgiAdapter->GetParent(IID_PPV_ARGS(&dxgiFactory));
if (FAILED(hr)) return hr;
DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {};
swapChainDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
swapChainDesc.SampleDesc.Count = 1;
swapChainDesc.SampleDesc.Quality = 0;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.BufferCount = 2;
swapChainDesc.Scaling = scaling;
swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
hr = dxgiFactory->CreateSwapChainForHwnd(
d3dDevice.Get(),
hwnd,
&swapChainDesc,
nullptr,
nullptr,
ppSwapChain);
return hr;
}
HRESULT CreateWindowSizeDependentResources(IDXGISwapChain1** swapChain, ID2D1Bitmap1** d2dTarget, ID3D11Device1* d3dDevice, ID2D1DeviceContext* d2dContext, HWND hwnd)
{
if (*swapChain != nullptr)
{
auto hr = (*swapChain)->ResizeBuffers(2, 0, 0, DXGI_FORMAT_B8G8R8A8_UNORM, 0);
if (FAILED(hr)) return hr;
}
else
{
auto hr = CreateSwapChain(d3dDevice, swapChain, hwnd, DXGI_SCALING_NONE); // for Win8 or Later
if (hr == DXGI_ERROR_INVALID_CALL) {
hr = CreateSwapChain(d3dDevice, swapChain, hwnd, DXGI_SCALING_STRETCH); // for Win7
}
if (FAILED(hr)) return hr;
}
D2D1_BITMAP_PROPERTIES1 bitmapProperties = BitmapProperties1(
D2D1_BITMAP_OPTIONS_TARGET | D2D1_BITMAP_OPTIONS_CANNOT_DRAW,
PixelFormat(DXGI_FORMAT_B8G8R8A8_UNORM, D2D1_ALPHA_MODE_PREMULTIPLIED));
ComPtr<IDXGISurface> dxgiBackBuffer;
auto hr = (*swapChain)->GetBuffer(0, IID_PPV_ARGS(&dxgiBackBuffer));
if (FAILED(hr)) return hr;
hr = d2dContext->CreateBitmapFromDxgiSurface(dxgiBackBuffer.Get(), &bitmapProperties, d2dTarget);
if (FAILED(hr)) return hr;
d2dContext->SetTarget(*d2dTarget);
return S_OK;
}
HRESULT UpdateForWindowSizeChange(IDXGISwapChain1** swapChain, ID2D1Bitmap1** d2dTarget, ID3D11Device1* d3dDevice, ID2D1DeviceContext* d2dContext, HWND hwnd)
{
d2dContext->SetTarget(nullptr);
if (*d2dTarget) {
(*d2dTarget)->Release();
*d2dTarget = nullptr;
}
return CreateWindowSizeDependentResources(swapChain, d2dTarget, d3dDevice, d2dContext, hwnd);
}
HRESULT Render(IDXGISwapChain1* swapChain, ID2D1DeviceContext* d2dContext, ID2D1Effect* displayEffect, IWICFormatConverter* wicFormatConverter, DWORD bkcolor)
{
d2dContext->BeginDraw();
// fill with background color
D2D1_COLOR_F oBKColor = D2D1::ColorF(bkcolor);
d2dContext->Clear(oBKColor);
if (displayEffect)
{
// get souce image size
UINT w = 0, h = 0;
wicFormatConverter->GetSize(&w, &h);
D2D1_SIZE_F image_size = D2D1::SizeF((FLOAT)w, (FLOAT)h);
D2D1_RECT_F image_rect = D2D1::RectF(0, 0, image_size.width, image_size.height);
// get destination screen size
D2D1_SIZE_F target_size = d2dContext->GetSize();
D2D1_POINT_2F target_offset = D2D1::Point2F(
max(0, (target_size.width - image_size.width) / 2),
max(0, (target_size.height - image_size.height) / 2));
// draw image to center of screen
d2dContext->DrawImage(displayEffect, target_offset, image_rect);
}
auto hr = d2dContext->EndDraw();
if (FAILED(hr))
if (hr != D2DERR_RECREATE_TARGET)
return hr;
DXGI_PRESENT_PARAMETERS parameters = {};
hr = swapChain->Present1(1, 0, ¶meters);
if (FAILED(hr)) return hr;
return S_OK;
}
HRESULT GetMonitorColorProfilePath(_In_ HWND hwnd, _Inout_ LPDWORD pBufSize, LPWSTR pszFilename)
{
HMONITOR hMonitor = ::MonitorFromWindow(hwnd, MONITOR_DEFAULTTONEAREST);
if (!hMonitor) return HRESULT_FROM_WIN32(::GetLastError());
MONITORINFOEX mi = {};
mi.cbSize = sizeof(mi);
BOOL ret = ::GetMonitorInfo(hMonitor, &mi);
if (!ret) return HRESULT_FROM_WIN32(::GetLastError());
HDC hdc = ::CreateDC(mi.szDevice, mi.szDevice, nullptr, nullptr);
if (!hdc) return HRESULT_FROM_WIN32(::GetLastError());
ret = ::GetICMProfile(hdc, pBufSize, pszFilename);
HRESULT result = ret ? S_OK : HRESULT_FROM_WIN32(::GetLastError());
::DeleteDC(hdc);
return result;
}
HRESULT LoadFile(CHeapPtr<BYTE>& buf, long* file_size, const wchar_t* path)
{
// precondition check
if (path == nullptr) return E_POINTER;
if (file_size == nullptr) return E_POINTER;
if (buf.m_pData) return E_FAIL;
// open
FILE* fp = nullptr;
errno_t err = _wfopen_s(&fp, path, L"rb");
if (err != 0) return E_FAIL;
if (fp == nullptr) return E_FAIL;
// read
HRESULT ret = E_FAIL;
*file_size = _filelength(_fileno(fp));
if (buf.AllocateBytes(*file_size))
if (fread(buf, *file_size, 1, fp) == 1)
ret = S_OK;
// close
fclose(fp);
return ret;
}
HRESULT UpdateDisplayColorContext(ID2D1Effect* displayEffect, ID2D1DeviceContext* d2dContext, HWND hwnd, int mode)
{
if (!displayEffect)
return S_FALSE;
HRESULT hr;
ComPtr<ID2D1ColorContext> displayColorContext;
if (mode == 2) // mode: system
{
DWORD size = MAX_PATH;
TCHAR path[MAX_PATH] = {};
hr = GetMonitorColorProfilePath(hwnd, &size, path);
if (SUCCEEDED(hr))
{
CHeapPtr<BYTE> buf;
long file_size = 0;
hr = LoadFile(buf, &file_size, path);
if (SUCCEEDED(hr))
{
// specified color space
hr = d2dContext->CreateColorContext(D2D1_COLOR_SPACE_CUSTOM, buf, file_size, &displayColorContext);
}
}
if (!displayColorContext)
{
// default color space
hr = d2dContext->CreateColorContext(D2D1_COLOR_SPACE_SRGB, nullptr, 0, &displayColorContext);
if (FAILED(hr)) return hr;
}
}
else if (mode == 1) // sRGB
{
hr = d2dContext->CreateColorContext(D2D1_COLOR_SPACE_SRGB, nullptr, 0, &displayColorContext);
if (FAILED(hr)) return hr;
}
// set destination color space
hr = displayEffect->SetValue(D2D1_COLORMANAGEMENT_PROP_DESTINATION_COLOR_CONTEXT, displayColorContext.Get());
if (FAILED(hr)) return hr;
return S_OK;
}
// ImageDrawD2D class implementation
struct InternalImageDrawD2DParam
{
// Image
ComPtr<IWICColorContext> wicColorContext;
ComPtr<IWICFormatConverter> wicFormatConverter;
ComPtr<ID2D1Effect> sourceEffect;
ComPtr<ID2D1Effect> displayEffect;
// DX
ComPtr<ID2D1Factory1> d2dFactory;
ComPtr<IWICImagingFactory2> wicFactory;
ComPtr<ID2D1Device> d2dDevice;
ComPtr<ID2D1DeviceContext> d2dContext;
ComPtr<ID3D11Device1> d3dDevice;
ComPtr<ID3D11DeviceContext> d3dContext;
ComPtr<ID2D1Bitmap1> d2dTarget;
ComPtr<IDXGISwapChain1> swapChain;
void DeleteFactory(void)
{
d2dFactory = nullptr;
wicFactory = nullptr;
}
void DeleteContents(void)
{
wicColorContext = nullptr;
wicFormatConverter = nullptr;
sourceEffect = nullptr;
displayEffect = nullptr;
d2dDevice = nullptr;
d2dContext = nullptr;
d2dTarget = nullptr;
d3dDevice = nullptr;
d3dContext = nullptr;
swapChain = nullptr;
}
void DeleteAll(void)
{
DeleteContents();
DeleteFactory();
}
};
void ImageDrawD2D::attach(HWND hwndOuter)
{
if (!_p)
{
auto hr = ::CoInitializeEx(nullptr, COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE);
if (FAILED(hr)) return;
_p = new InternalImageDrawD2DParam;
}
if (this->hwnd)
{
if (this->hwnd == hwndOuter) return;
_p->DeleteAll();
}
this->hwnd = hwndOuter;
auto hr = ::CreateDeviceIndependentResources_DXFactory(_p->d2dFactory.GetAddressOf(), _p->wicFactory.GetAddressOf());
if (FAILED(hr)) return;
}
void ImageDrawD2D::detach(void)
{
if (!_p) return;
_p->DeleteAll();
delete _p;
_p = nullptr;
hwnd = NULL;
::CoUninitialize();
}
#include "../inc/agaliarept.h"
HRESULT ImageDrawD2D::reset_content(agaliaContainer* image, int colorManagementMode)
{
if (!_p) return E_FAIL;
_p->DeleteContents();
auto hr = ::CreateDeviceResources_DX(_p->d2dFactory.Get(), _p->d2dDevice, _p->d2dContext, _p->d3dDevice, _p->d3dContext);
if (FAILED(hr)) return hr;
hr = update_for_window_size_change();
if (FAILED(hr)) return hr;
CComPtr<IStream> stream;
if (image) {
image->getAsocStream(&stream);
LARGE_INTEGER li = {};
hr = image->LockStream();
if (FAILED(hr)) return hr;
stream->Seek(li, STREAM_SEEK_SET, nullptr);
image->UnlockStream();
}
if (stream == nullptr) return S_OK;
hr = ::CreateDeviceIndependentResources_Image(_p->wicFactory.Get(), _p->wicFormatConverter.GetAddressOf(), _p->wicColorContext.GetAddressOf(), stream);
if (FAILED(hr)) return hr;
hr = ::CreateDeviceResources_Image(_p->displayEffect.GetAddressOf(), _p->sourceEffect.GetAddressOf(), _p->d2dContext.Get(), _p->wicFormatConverter.Get(), _p->wicColorContext.Get(), colorManagementMode);
if (FAILED(hr)) return hr;
hr = ::UpdateDisplayColorContext(_p->displayEffect.Get(), _p->d2dContext.Get(), hwnd, colorManagementMode);
if (FAILED(hr)) return hr;
return S_OK;
}
HRESULT ImageDrawD2D::reset_color_profile(int colorManagementMode)
{
if (!_p) return E_FAIL;
_p->displayEffect = nullptr;
_p->sourceEffect = nullptr;
auto hr = ::CreateDeviceResources_Image(_p->displayEffect.GetAddressOf(), _p->sourceEffect.GetAddressOf(), _p->d2dContext.Get(), _p->wicFormatConverter.Get(), _p->wicColorContext.Get(), colorManagementMode);
if (FAILED(hr)) return hr;
hr = ::UpdateDisplayColorContext(_p->displayEffect.Get(), _p->d2dContext.Get(), hwnd, colorManagementMode);
if (FAILED(hr)) return hr;
return S_OK;
}
HRESULT ImageDrawD2D::update_for_window_size_change(void)
{
if (!_p) return E_FAIL;
return ::UpdateForWindowSizeChange(_p->swapChain.GetAddressOf(), _p->d2dTarget.GetAddressOf(), _p->d3dDevice.Get(), _p->d2dContext.Get(), hwnd);
}
HRESULT ImageDrawD2D::render(DWORD bkcolor)
{
if (!_p) return E_FAIL;
return ::Render(_p->swapChain.Get(), _p->d2dContext.Get(), _p->displayEffect.Get(), _p->wicFormatConverter.Get(), bkcolor);
}
|
Comparison of capacity in downlink WCDMA systems using soft handover techniques with SIR-based power control and site selection diversity transmission
In this paper, downlink performance of WCDMA system with site selection diversity transmission power control (SSDT) during soft handover mode is analysed. Signal to interference ratio (SIR) power control techniques are modelled and used in simulations. The study is focused on finding the optimum soft handover margin in terms of maximum system capacity under E/sub b//N/sub 0/ quality requirements. The capacity and resources allocated (number of codes required) using SSDT is compared to usual soft handover technique. |
/**
* OpenPerf API
* REST API interface for OpenPerf
*
* OpenAPI spec version: 1
* Contact: <EMAIL>
*
* NOTE: This class is auto generated by the swagger code generator program.
* https://github.com/swagger-api/swagger-codegen.git
* Do not edit the class manually.
*/
/*
* PacketGeneratorResult.h
*
* Results produced by a packet generator
*/
#ifndef PacketGeneratorResult_H_
#define PacketGeneratorResult_H_
#include "ModelBase.h"
#include "TrafficDurationRemainder.h"
#include "PacketGeneratorProtocolCounters.h"
#include <string>
#include "PacketGeneratorFlowCounters.h"
#include <vector>
namespace swagger {
namespace v1 {
namespace model {
/// <summary>
/// Results produced by a packet generator
/// </summary>
class PacketGeneratorResult
: public ModelBase
{
public:
PacketGeneratorResult();
virtual ~PacketGeneratorResult();
/////////////////////////////////////////////
/// ModelBase overrides
void validate() override;
nlohmann::json toJson() const override;
void fromJson(nlohmann::json& json) override;
/////////////////////////////////////////////
/// PacketGeneratorResult members
/// <summary>
/// Unique generator result identifier
/// </summary>
std::string getId() const;
void setId(std::string value);
/// <summary>
/// Unique generator identifier that produced this result
/// </summary>
std::string getGeneratorId() const;
void setGeneratorId(std::string value);
bool generatorIdIsSet() const;
void unsetGenerator_id();
/// <summary>
/// Indicates whether this result is currently being updated
/// </summary>
bool isActive() const;
void setActive(bool value);
/// <summary>
///
/// </summary>
std::shared_ptr<PacketGeneratorFlowCounters> getFlowCounters() const;
void setFlowCounters(std::shared_ptr<PacketGeneratorFlowCounters> value);
/// <summary>
/// List of unique flow ids included in stats. Individual flow statistics may be queried via the `tx-flows` endpoint.
/// </summary>
std::vector<std::string>& getFlows();
/// <summary>
///
/// </summary>
std::shared_ptr<PacketGeneratorProtocolCounters> getProtocolCounters() const;
void setProtocolCounters(std::shared_ptr<PacketGeneratorProtocolCounters> value);
/// <summary>
///
/// </summary>
std::shared_ptr<TrafficDurationRemainder> getRemaining() const;
void setRemaining(std::shared_ptr<TrafficDurationRemainder> value);
bool remainingIsSet() const;
void unsetRemaining();
protected:
std::string m_Id;
std::string m_Generator_id;
bool m_Generator_idIsSet;
bool m_Active;
std::shared_ptr<PacketGeneratorFlowCounters> m_Flow_counters;
std::vector<std::string> m_Flows;
std::shared_ptr<PacketGeneratorProtocolCounters> m_Protocol_counters;
std::shared_ptr<TrafficDurationRemainder> m_Remaining;
bool m_RemainingIsSet;
};
}
}
}
#endif /* PacketGeneratorResult_H_ */
|
import { test } from 'ava';
import { ModelBuilder, StaticCreate, StaticCreateEnum } from './model-builder';
import { getModelDef } from '../utils/meta-reader';
test(`Model#name returns the ModelDefinition name`, t => {
const expectedName = 'TestModelIdentifier';
let m = newModel(expectedName);
t.is(m.name, expectedName);
});
test(`Model#name updates the ModelDefinition name`, t => {
const expectedName = 'TestModelIdentifier';
const wrongName = 'WrongPropertyName';
let m = newModel(wrongName);
t.is(m.name, wrongName);
m.name = expectedName;
t.is(m.name, expectedName);
});
test(`Model#prop creates a new Property`, t => {
const expectedPropName = 'propName';
let expectedPropType = Date;
const m = newModel('TestModel').prop(expectedPropName, x => x.type(expectedPropType)).build();
const modelDef = getModelDef(m);
t.is(modelDef.props[expectedPropName].type, expectedPropType);
});
test(`Model#prop mergers existing Properties`, t => {
const expectedPropName = 'propName';
const expectedPropType = Date;
const expectedOptRate = 0.89;
const m = newModel('TestModel')
.prop(expectedPropName, x => x.type(expectedPropType))
.prop(expectedPropName, x => x.optional(expectedOptRate))
.build();
const modelDef = getModelDef(m);
t.is(modelDef.props[expectedPropName].type, expectedPropType);
t.is(modelDef.props[expectedPropName].optional, expectedOptRate);
});
test(`Model#key creates a Property marked as a key`, t => {
const expectedPropName = 'propName';
const m = newModel('TestModel').key(expectedPropName, x => x).build();
const modelDef = getModelDef(m);
t.true(modelDef.props[expectedPropName].key);
});
test(`Model#key updates the primary key`, t => {
const expectedPropName = 'propName';
const m = newModel('TestModel').key(expectedPropName, x => x).build();
const modelDef = getModelDef(m);
t.is(modelDef.primaryKey, expectedPropName);
});
test(`Model#key can only be called once`, t => {
const modelBuilder = newModel('TestModel').key('propName', x => x).key('otherPropName', x => x);
t.throws(() => modelBuilder.build());
});
test(`Model#ref creates a Property marked as a reference`, t => {
const expectedPropName = 'propName';
const expectedForeignKey = 'specialId';
const expectedRefType = newModel('ForeignModel').key(expectedForeignKey, id => id.guid()).build();
const m = newModel('TestModel').ref(expectedPropName, expectedRefType).build();
const modelDef = getModelDef(m);
t.is(modelDef.props[expectedPropName].ref, expectedRefType);
t.is(modelDef.props[expectedPropName].foreignKey, expectedForeignKey);
});
test(`Model#inherit links models`, t => {
const Animal = newModel('Animal').build();
const Eagle = newModel('Eagle').inherits(Animal).build();
const eagleDef = getModelDef(Eagle);
t.is(eagleDef.inherits, Animal);
});
test(`Model#getter adds a getter function`, t => {
const expectedGetterName = 'name';
const expcetedGetterResult = 'Baldy';
const expectedGetterFunc = function getName() { return expcetedGetterResult; };
const Eagle = newModel('Eagle').getter(expectedGetterName, expectedGetterFunc).build();
const eagleDef = getModelDef(Eagle);
t.is(eagleDef.nativeDefinitions[expectedGetterName].get, expectedGetterFunc);
const eagle = new Eagle();
t.is(eagle.name, expcetedGetterResult);
});
test(`Model#setter adds a setter function`, t => {
const expectedSetterName = 'name';
const expectedPrivateProp = '_name';
const expectedSetterResult = 'Baldy';
const expectedSetterFunc = function getName(value:string) { return this[expectedPrivateProp] = value; };
const Eagle = newModel('Eagle').setter(expectedSetterName, expectedSetterFunc).build();
const eagleDef = getModelDef(Eagle);
t.is(eagleDef.nativeDefinitions[expectedSetterName].set, expectedSetterFunc);
const eagle = new Eagle();
t.is(typeof eagle[expectedPrivateProp], 'undefined');
eagle.name = expectedSetterResult;
t.is(eagle[expectedPrivateProp], expectedSetterResult);
});
test(`Model#func adds a custom method`, t => {
const expectedId = 'TestModel';
const expectedMethodName = 'testMethod';
const expectedMethodResult = 'test 1 2 3';
const expectedMethodFn = function () { return this.value };
const testModelBuilder = newModel(expectedId).func(expectedMethodName, expectedMethodFn);
const TestModel = testModelBuilder.build();
const testModelDef = getModelDef(TestModel);
const testModelInstance = new TestModel();
testModelInstance.value = expectedMethodResult;
t.is(testModelDef.nativeDefinitions[expectedMethodName].value, expectedMethodFn);
t.is(testModelInstance[expectedMethodName](), expectedMethodResult);
});
test(`Model#toString adds custom toString method`, t => {
const expectedId = 'TestModel';
const expectedToStringResult = 'test 1 2 3';
const expectedToStringFn = function () { return expectedToStringResult };
const testModelBuilder = newModel(expectedId).toString(expectedToStringFn);
const expectedModelBuilderToString = `ModelBuilder<${expectedId}>`;
t.is(testModelBuilder.toString(), expectedModelBuilderToString);
const TestModel = testModelBuilder.build();
const testModelInstance = new TestModel();
t.is(testModelInstance.toString(), expectedToStringResult);
});
test(`Model#build properly inherits properties from parent`, t => {
const dietKey = 'Diet';
const dietChoices = ['herbivore', 'carnivore', 'omnivore'];
const lifespanKey = 'Lifespan';
const lifespanMin = 20;
const lifespanMax = 30;
const Animal = newModel('Animal')
.prop(dietKey, x => x.pick(dietChoices))
.prop(lifespanKey, x => x.int(0, 200).optional())
.build();
const Eagle = newModel('Eagle').inherits(Animal)
.prop(lifespanKey, x => x.int(lifespanMin, lifespanMax))
.build();
const eagleDef = getModelDef(Eagle);
t.is(eagleDef.inherits, Animal);
t.deepEqual(eagleDef.props[dietKey].pick, dietChoices);
t.is(eagleDef.props[lifespanKey].type, Number);
t.is(eagleDef.props[lifespanKey].min, lifespanMin);
t.is(eagleDef.props[lifespanKey].max, lifespanMax);
t.is(typeof eagleDef.props[lifespanKey].optional, 'undefined');
});
test(`Model#build returns a working class with inheritence`, t => {
const EAGLE_NAME = 'Eagle';
const BALDEAGLE_NAME = 'BaldEagle';
const Animal = newModel('Animal')
.prop('id', id => id.guid())
.build();
const Eagle = newModel(EAGLE_NAME).inherits(Animal).build();
const BaldEagle = newModel(BALDEAGLE_NAME).inherits(Eagle).build();
let eagle = new Eagle();
let baldEagle = new BaldEagle();
t.is(eagle.constructor.name, EAGLE_NAME);
t.is(baldEagle.constructor.name, BALDEAGLE_NAME);
t.true(eagle instanceof Eagle);
t.true(eagle instanceof Animal);
t.false(eagle instanceof BaldEagle);
t.true(baldEagle instanceof Eagle);
t.true(baldEagle instanceof Animal);
t.true(baldEagle instanceof BaldEagle);
});
test(`Model#build updates the primaryKey if a property is marked as key`, t => {
const expectedPrimaryKey = 'latinName';
const Animal = newModel('Animal')
.prop(expectedPrimaryKey, id => {
id['_definition'].key = true;
return id;
})
.build();
const modelDef = getModelDef(Animal);
t.is(modelDef.primaryKey, expectedPrimaryKey);
});
test(`StaticCreate creates a new instance with id`, t => {
const expectedId = 'StaticTest';
const StaticTest = StaticCreate(expectedId).build();
const staticTestDef = getModelDef(StaticTest);
t.is(staticTestDef.name, expectedId);
});
test(`StaticCreateEnum creates enums from objects`, t => {
const offLabel = 'Off';
const onLabel = 'On';
const expectedValues = {
[offLabel]: 0,
[onLabel]: 1
};
const LightSwitch = StaticCreateEnum(expectedValues);
t.is(expectedValues.Off, LightSwitch.Off);
t.is(expectedValues.On, LightSwitch.On);
t.is(offLabel, LightSwitch[LightSwitch.Off]);
t.is(onLabel, LightSwitch[LightSwitch.On]);
});
test(`StaticCreateEnum creates enums from array`, t => {
const offLabel = 'Off';
const onLabel = 'On';
const expectedValues = [offLabel, onLabel];
const LightSwitch = StaticCreateEnum(expectedValues);
t.is(expectedValues.indexOf(offLabel), LightSwitch.Off);
t.is(expectedValues.indexOf(onLabel), LightSwitch.On);
t.is(offLabel, LightSwitch[LightSwitch.Off]);
t.is(onLabel, LightSwitch[LightSwitch.On]);
});
test(`StaticCreateEnum creates enums from string params`, t => {
const offLabel = 'Off';
const onLabel = 'On';
const expectedValues = [offLabel, onLabel];
const LightSwitch = StaticCreateEnum(...expectedValues);
t.is(expectedValues.indexOf(offLabel), LightSwitch.Off);
t.is(expectedValues.indexOf(onLabel), LightSwitch.On);
t.is(offLabel, LightSwitch[LightSwitch.Off]);
t.is(onLabel, LightSwitch[LightSwitch.On]);
});
function newModel(name: string) {
return new ModelBuilder({ name });
}
|
<filename>src/algoritmia/problems/geometry/convexhull/graham.py
from algoritmia.problems.geometry.utils import Point2D, left
from algoritmia.utils import argmax
from algoritmia.datastructures.queues.lifo import Lifo
from math import atan2
#< graham
class GrahamConvexHullFinder:
def find(self, S: "IList<Point2D>") -> "IList<int>":
S1 = S = [Point2D(*p) for p in S]
min_y = min(pt.y for pt in S)
p = argmax((pt for pt in S if pt.y == min_y), lambda pt: pt.x)
S = [p] + sorted((q for q in S if q!=p), key=lambda q: (atan2(p.y-q.y, p.x-q.x), q.x))
Q = Lifo()
Q.push(0); Q.push(1); Q.push(2)
for pi in range(3, len(S)):
pj, pk = Q[-1], Q[-2]
while not left(S[pk], S[pj], S[pi]):
Q.pop()
pj, pk = Q[-1], Q[-2]
Q.push(pi)
return [S1.index(S[Q.pop()]) for i in range(len(Q))]
#> graham |
package client
import (
"context"
"github.com/Asutorufa/fabricsdk/client/grpcclient"
ordererProtos "github.com/hyperledger/fabric-protos-go/orderer"
)
//OrdererClient orderer client use grpc
type OrdererClient struct {
Client
}
//NewOrdererClient create new orderer client
func NewOrdererClient(address, override string, Opt ...func(config *grpcclient.ClientConfig)) (o *OrdererClient, err error) {
config := &grpcclient.ClientConfig{}
for index := range Opt {
Opt[index](config)
}
o = new(OrdererClient)
o.address = address
o.sn = override
grpcClient, err := grpcclient.NewGRPCClient(config)
if err != nil {
return nil, err
}
o.grpcConn, err = grpcClient.NewConnection(o.address, grpcclient.ServerNameOverride(o.sn))
return
}
//NewOrdererClientSelf create new orderer client
func NewOrdererClientSelf(address, override string, Opt ...func(config *grpcclient.ClientConfig)) (*OrdererClient, error) {
c, err := NewClient(address, override, Opt...)
if err != nil {
return nil, err
}
return &OrdererClient{
Client: *c,
}, nil
}
//Broadcast orderer broadcast client
func (o *OrdererClient) Broadcast() (ordererProtos.AtomicBroadcast_BroadcastClient, error) {
return ordererProtos.NewAtomicBroadcastClient(o.grpcConn).Broadcast(context.TODO())
}
//Deliver orderer deliver client
func (o *OrdererClient) Deliver() (ordererProtos.AtomicBroadcast_DeliverClient, error) {
return ordererProtos.NewAtomicBroadcastClient(o.grpcConn).Deliver(context.TODO())
}
|
#!/usr/bin/env python
"""Implements part of CCL, the Common Command Language, ISO 8777. I'm
working from the description in the YAZ toolkit
(http://www.indexdata.dk/yaz/doc/tools.php), rather than the ISO
spec. Two extensions:
- qualifiers can be literal "(attrtyp, attrval)" pairs, so, e.g., the
following is a legitimate for ISBN: "(1,7)=0312033095"
- the optional ATTRSET (attrset/query) which must appear at the beginning
of the string.
Allowed values are:
BIB1 (default)
XD1
UTIL
ZTHES1
EXP1
or an oid expressed as a dotted string. (A leading dot implies a
prefix of 1.2.840.1003.3, so, e.g., .1 is the same as BIB1.)
Eventually I will support v3-style mixing attribute sets within
a single query, but for now I don't.
"""
from __future__ import nested_scopes
import string
in_setup = 0
try:
from PyZ3950 import z3950
from PyZ3950 import oids
from PyZ3950 import asn1
_attrdict = {
'bib1' : oids.Z3950_ATTRS_BIB1_ov,
'zthes1': oids.Z3950_ATTRS_ZTHES_ov,
'xd1': oids.Z3950_ATTRS_XD1_ov,
'utility': oids.Z3950_ATTRS_UTIL_ov,
'exp1': oids.Z3950_ATTRS_EXP1_ov
}
except ImportError, err:
print "Error importing (OK during setup)", err
in_setup = 1
class QuerySyntaxError(Exception): pass
class ParseError(QuerySyntaxError): pass
class LexError(QuerySyntaxError): pass
class UnimplError(QuerySyntaxError): pass
tokens = ('LPAREN', 'RPAREN', 'COMMA',
'SET', 'ATTRSET','QUAL', 'QUOTEDVALUE', 'RELOP', 'WORD',
'LOGOP', 'SLASH')
t_LPAREN= r'\('
t_RPAREN= r'\)'
t_COMMA = r','
t_SLASH = r'/'
def t_ATTRSET(t):
r'(?i)ATTRSET'
return t
def t_SET (t): # need to def as function to override parsing as WORD, gr XXX
r'(SET)'
return t
relop_to_attrib = {
'<': 1,
'<=': 2,
'=': 3,
'>=': 4,
'>': 5,
'<>': 6}
t_RELOP = "|".join (["(%s)" % r for r in relop_to_attrib.keys()])
# XXX Index Data docs say 'doesn't follow ... ISO8777'?
# XXX expand to rd. addt'l defns from file?
qual_dict = { # These are bib-1 attribute values, see
# http://www.loc.gov/z3950/agency/defns/bib1.html and ftp://ftp.loc.gov/pub/z3950/defs/bib1.txt
'TI': (1,4),
'AU': (1,1003), # use 1003 to work w/ both NLC-BNC and LC
'ISBN': (1,7),
'LCCN': (1,9),
'ANY': (1,1016),
'FIF': (3, 1), # first-in-field
'AIF': (3,3), # any-in-field (default)
'RTRUNC': (5,1),
'NOTRUNC': (5,100) # (default)
}
default_quals = ['ANY'] # XXX should be per-attr-set
default_relop = '='
def t_QUAL(t):
return t
def mk_quals ():
quals = ("|".join (map (lambda x: '(' + x + ')', qual_dict.keys())))
t_QUAL.__doc__ = "(?i)" + quals + r"|(\([0-9]+,[0-9]+\))"
def t_QUOTEDVALUE(t):
r"(\".*?\")"
if t.value[0] == '"':
t.value = t.value[1:-1]
return t
word_init = "[a-z]|[A-Z]|[0-9]|&|:"
word_non_init = ",|\.|\'"
t_WORD = "(%s)(%s|%s)*" % (word_init, word_init, word_non_init)
def t_LOGOP(t):
r'(?i)(AND)|(OR)|(NOT)'
return t
t_ignore = " \t"
def t_error(t):
raise LexError ('t_error: ' + str (t))
from ply import lex
def relex ():
global lexer
mk_quals ()
lexer = lex.lex()
relex ()
def add_qual (qual_name, val):
"""Add a qualifier definition, and regenerate the lexer."""
qual_dict[qual_name] = val
relex ()
from ply import yacc
#if in_setup:
# import yacc
#else:
# from PyZ3950 import yacc
class Node:
def __init__(self,type,children=None,leaf=None):
self.type = type
if children:
self.children = children
else:
self.children = [ ]
self.leaf = leaf
def str_child (self, child, depth):
if isinstance (child, Node): # ugh
return child.str_depth (depth)
indent = " " * (4 * depth)
return indent + str (child) + "\n"
def str_depth (self, depth): # ugh
indent = " " * (4 * depth)
l = ["%s%s %s" % (indent, self.type, self.leaf)]
l.append ("".join (map (lambda s: self.str_child (s, depth + 1),
self.children)))
return "\n".join (l)
def __str__(self):
return "\n" + self.str_depth (0)
def p_top (t):
'top : cclfind_or_attrset'
t[0] = t[1]
def p_cclfind_or_attrset_1 (t):
'cclfind_or_attrset : cclfind'
t[0] = t[1]
def p_cclfind_or_attrset_2 (t):
'cclfind_or_attrset : ATTRSET LPAREN WORD SLASH cclfind RPAREN'
t[0] = Node ('attrset', [t[5]], t[3])
def p_ccl_find_1(t):
'cclfind : cclfind LOGOP elements'
t[0] = Node ('op', [t[1],t[3]], t[2])
def p_ccl_find_2(t):
'cclfind : elements'
t[0] = t[1]
def p_elements_1(t):
'elements : LPAREN cclfind RPAREN'
t[0] = t[2]
class QuallistVal:
def __init__ (self, quallist, val):
self.quallist = quallist
self.val = val
def __str__ (self):
return "QV: %s %s" % (str(self.quallist),str (self.val))
def __getitem__ (self, i):
if i == 0: return self.quallist
if i == 1: return self.val
raise IndexError ('QuallistVal err ' + str (i))
def xlate_qualifier (x):
if x[0] == '(' and x[-1] == ')':
t = x[1:-1].split (',') # t must be of len 2 b/c of lexer
return (string.atoi (t[0]), string.atoi (t[1]))
return qual_dict[(x.upper ())]
def p_elements_2 (t):
'elements : SET RELOP WORD'
if t[2] <> '=':
raise QuerySyntaxError (str (t[1], str (t[2]), str (t[3])))
t[0] = Node ('set', leaf = t[3])
def p_elements_3(t):
'elements : val'
t[0] = Node ('relop', QuallistVal (map (xlate_qualifier, default_quals), t[1]), default_relop)
def p_elements_4(t):
'elements : quallist RELOP val'
t[0] = Node ('relop', QuallistVal(map (xlate_qualifier, t[1]),t[3]), t[2])
# XXX p_elements_5 would be quals followed by recursive def'n, not yet implemented
# XXX p_elements_6 would be quals followed by range, not yet implemented.
def p_quallist_1 (t):
'quallist : QUAL'
t[0] = [t[1]]
def p_quallist_2 (t):
'quallist : quallist COMMA QUAL'
t[0] = t[1] + [t[3]]
def p_val_1(t):
'val : QUOTEDVALUE'
t[0] = t[1]
def p_val_2(t):
'val : val WORD'
t[0] = t[1] + " " + t[2]
def p_val_3(t):
'val : WORD'
t[0] = t[1]
# XXX also don't yet handle proximity operator
def p_error(t):
raise ParseError ('Parse p_error ' + str (t))
precedence = (
('left', 'LOGOP'),
)
yacc.yacc (debug=0, tabmodule = 'PyZ3950_parsetab')
#yacc.yacc (debug=0, tabpackage = 'PyZ3950', tabmodule='PyZ3950_parsetab')
def attrset_to_oid (attrset):
l = attrset.lower ()
if _attrdict.has_key (l):
return _attrdict [l]
split_l = l.split ('.')
if split_l[0] == '':
split_l = oids.Z3950_ATTRS + split_l[1:]
try:
intlist = map (string.atoi, split_l)
except ValueError:
raise ParseError ('Bad OID: ' + l)
return asn1.OidVal (intlist)
def tree_to_q (ast):
if ast.type == 'op':
myrpnRpnOp = z3950.RpnRpnOp ()
myrpnRpnOp.rpn1 = tree_to_q(ast.children[0])
myrpnRpnOp.rpn2 = tree_to_q(ast.children[1])
op = ast.leaf.lower ()
if op == 'not': op = 'and-not' # CCL spec of 'not' vs. Z39.50 spec of 'and-not'
myrpnRpnOp.op = (op, None)
return ('rpnRpnOp', myrpnRpnOp)
elif ast.type == 'relop':
# XXX but e.g. LC (http://lcweb.loc.gov/z3950/lcserver.html)
# doesn't support other relation attributes, either.
try:
relattr = relop_to_attrib [ast.leaf]
except KeyError: # should never happen, how could we have lexed it?
raise UnimplError (ast.leaf)
def make_aelt (qual):
val = ('numeric', qual [1])
return z3950.AttributeElement (attributeType = qual[0],
attributeValue = val)
apt = z3950.AttributesPlusTerm ()
quallist = ast.children.quallist
if ast.leaf <> '=':
quallist.append ((2,relattr)) # 2 is relation attribute
# see http://www.loc.gov/z3950/agency/markup/13.html ATR.1.1
apt.attributes = map (make_aelt, quallist)
apt.term = ('general', ast.children.val) # XXX update for V3?
return ('op', ('attrTerm', apt))
elif ast.type == 'set':
return ('op', ('resultSet', ast.leaf))
raise UnimplError("Bad ast type " + str(ast.type))
def mk_rpn_query (query):
"""Transform a CCL query into an RPN query."""
# need to copy or create a new lexer because it contains globals
# PLY 1.0 lacks __copy__
# PLY 1.3.1-1.5 have __copy__, but it's broken and returns None
# I sent <NAME> a patch, so future PLY releases will
# presumably work correctly.
# Recreating the lexer each time is noticeably slower, so this solution
# is suboptimal for PLY <= 1.5, but better than being thread-unsafe.
# Perhaps I should have per-thread lexer instead XXX
# with example/twisted/test.py set to parse_only, I get 277 parses/sec
# with fixed PLY, vs. 63 parses/sec with broken PLY, on my 500 MHz PIII
# laptop.
copiedlexer = None
if hasattr (lexer, '__copy__'):
copiedlexer = lexer.__copy__ ()
if copiedlexer == None:
copiedlexer = lex.lex ()
ast = yacc.parse (query, copiedlexer)
return ast_to_rpn (ast)
def ast_to_rpn (ast):
if ast.type == 'attrset':
attrset = attrset_to_oid (ast.leaf)
ast = ast.children [0]
else:
attrset = oids.Z3950_ATTRS_BIB1_ov
rpnq = z3950.RPNQuery (attributeSet = attrset)
rpnq.rpn = tree_to_q (ast)
return ('type_1', rpnq)
def testlex (s):
lexer.input (s)
while 1:
token = lexer.token ()
if not token:
break
print token
def testyacc (s):
copylex = lexer.__copy__ ()
ast = yacc.parse (s, lexer = copylex)
print "AST:", ast
print "RPN Query:", ast_to_rpn (ast)
if __name__ == '__main__':
testfn = testyacc
# testfn = testlex
testfn ('attrset (BIB1/ au="<NAME>" or ti=Sandman)')
while 1:
s = raw_input ('Query: ')
if len (s) == 0:
break
testfn (s)
# testyacc ()
# testlex ()
|
<reponame>perezdouglas935/godot
/*************************************************************************/
/* fbx_material.cpp */
/*************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/*************************************************************************/
/* Copyright (c) 2007-2021 <NAME>, <NAME>. */
/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
#include "fbx_material.h"
#include "scene/resources/material.h"
#include "scene/resources/texture.h"
#include "tools/validation_tools.h"
String FBXMaterial::get_material_name() const {
return material_name;
}
void FBXMaterial::set_imported_material(FBXDocParser::Material *p_material) {
material = p_material;
}
void FBXMaterial::add_search_string(String p_filename, String p_current_directory, String search_directory, Vector<String> &texture_search_paths) {
if (search_directory.is_empty()) {
texture_search_paths.push_back(p_current_directory.get_base_dir().plus_file(p_filename));
} else {
texture_search_paths.push_back(p_current_directory.get_base_dir().plus_file(search_directory + "/" + p_filename));
texture_search_paths.push_back(p_current_directory.get_base_dir().plus_file("../" + search_directory + "/" + p_filename));
}
}
String find_file(const String &p_base, const String &p_file_to_find) {
_Directory dir;
dir.open(p_base);
dir.list_dir_begin();
String n = dir.get_next();
while (n != String()) {
if (n == "." || n == "..") {
n = dir.get_next();
continue;
}
if (dir.current_is_dir()) {
// Don't use `path_to` or the returned path will be wrong.
const String f = find_file(p_base + "/" + n, p_file_to_find);
if (f != "") {
return f;
}
} else if (n == p_file_to_find) {
return p_base + "/" + n;
}
n = dir.get_next();
}
dir.list_dir_end();
return String();
}
// fbx will not give us good path information and let's not regex them to fix them
// no relative paths are in fbx generally they have a rel field but it's populated incorrectly by the SDK.
String FBXMaterial::find_texture_path_by_filename(const String p_filename, const String p_current_directory) {
_Directory dir;
Vector<String> paths;
add_search_string(p_filename, p_current_directory, "", paths);
add_search_string(p_filename, p_current_directory, "texture", paths);
add_search_string(p_filename, p_current_directory, "textures", paths);
add_search_string(p_filename, p_current_directory, "Textures", paths);
add_search_string(p_filename, p_current_directory, "materials", paths);
add_search_string(p_filename, p_current_directory, "mats", paths);
add_search_string(p_filename, p_current_directory, "pictures", paths);
add_search_string(p_filename, p_current_directory, "images", paths);
for (int i = 0; i < paths.size(); i++) {
if (dir.file_exists(paths[i])) {
return paths[i];
}
}
// We were not able to find the texture in the common locations,
// try to find it into the project globally.
// The common textures can be stored into one of those folders:
// res://asset
// res://texture
// res://material
// res://mat
// res://image
// res://picture
//
// Note the folders can also be called with custom names, like:
// res://my_assets
// since the keyword `asset` is into the directory name the textures will be
// searched there too.
dir.open("res://");
dir.list_dir_begin();
String n = dir.get_next();
while (n != String()) {
if (n == "." || n == "..") {
n = dir.get_next();
continue;
}
if (dir.current_is_dir()) {
const String lower_n = n.to_lower();
if (
// Don't need to use plural.
lower_n.find("asset") >= 0 ||
lower_n.find("texture") >= 0 ||
lower_n.find("material") >= 0 ||
lower_n.find("mat") >= 0 ||
lower_n.find("image") >= 0 ||
lower_n.find("picture") >= 0) {
// Don't use `path_to` or the returned path will be wrong.
const String f = find_file(String("res://") + n, p_filename);
if (f != "") {
return f;
}
}
}
n = dir.get_next();
}
dir.list_dir_end();
return "";
}
template <class T>
T extract_from_prop(FBXDocParser::PropertyPtr prop, const T &p_default, const std::string &p_name, const String &p_type) {
ERR_FAIL_COND_V_MSG(prop == nullptr, p_default, "invalid property passed to extractor");
const FBXDocParser::TypedProperty<T> *val = dynamic_cast<const FBXDocParser::TypedProperty<T> *>(prop);
ERR_FAIL_COND_V_MSG(val == nullptr, p_default, "The FBX is corrupted, the property `" + String(p_name.c_str()) + "` is a `" + String(typeid(*prop).name()) + "` but should be a " + p_type);
// Make sure to not lost any eventual opacity.
return val->Value();
}
Ref<StandardMaterial3D> FBXMaterial::import_material(ImportState &state) {
ERR_FAIL_COND_V(material == nullptr, nullptr);
const String p_fbx_current_directory = state.path;
Ref<StandardMaterial3D> spatial_material;
spatial_material.instance();
// read the material file
// is material two sided
// read material name
print_verbose("[material] material name: " + ImportUtils::FBXNodeToName(material->Name()));
material_name = ImportUtils::FBXNodeToName(material->Name());
for (const std::pair<std::string, const FBXDocParser::Texture *> iter : material->Textures()) {
const uint64_t texture_id = iter.second->ID();
const std::string &fbx_mapping_name = iter.first;
const FBXDocParser::Texture *fbx_texture_data = iter.second;
const String absolute_texture_path = iter.second->FileName().c_str();
const String texture_name = absolute_texture_path.get_file();
const String file_extension = absolute_texture_path.get_extension().to_upper();
const String debug_string = "texture id: " + itos(texture_id) + " texture name: " + String(iter.second->Name().c_str()) + " mapping name: " + String(fbx_mapping_name.c_str());
// remember errors STILL need this string at the end for when you aren't in verbose debug mode :) they need context for when you're not verbose-ing.
print_verbose(debug_string);
const String file_extension_uppercase = file_extension.to_upper();
if (fbx_transparency_flags.count(fbx_mapping_name) > 0) {
// just enable it later let's make this fine-tuned.
spatial_material->set_transparency(BaseMaterial3D::TRANSPARENCY_ALPHA);
}
ERR_CONTINUE_MSG(file_extension.is_empty(), "your texture has no file extension so we had to ignore it, let us know if you think this is wrong file an issue on github! " + debug_string);
ERR_CONTINUE_MSG(fbx_texture_map.count(fbx_mapping_name) <= 0, "This material has a texture with mapping name: " + String(fbx_mapping_name.c_str()) + " which is not yet supported by this importer. Consider opening an issue so we can support it.");
ERR_CONTINUE_MSG(
file_extension_uppercase != "PNG" &&
file_extension_uppercase != "JPEG" &&
file_extension_uppercase != "JPG" &&
file_extension_uppercase != "TGA" &&
file_extension_uppercase != "WEBP" &&
file_extension_uppercase != "DDS",
"The FBX file contains a texture with an unrecognized extension: " + file_extension_uppercase);
print_verbose("Getting FBX mapping mode for " + String(fbx_mapping_name.c_str()));
// get the texture map type
const StandardMaterial3D::TextureParam mapping_mode = fbx_texture_map.at(fbx_mapping_name);
print_verbose("Set FBX mapping mode to " + get_texture_param_name(mapping_mode));
Ref<Texture> texture;
print_verbose("texture mapping name: " + texture_name);
if (state.cached_image_searches.has(texture_name)) {
texture = state.cached_image_searches[texture_name];
} else {
String path = find_texture_path_by_filename(texture_name, p_fbx_current_directory);
if (!path.is_empty()) {
Ref<Texture2D> image_texture = ResourceLoader::load(path);
ERR_CONTINUE(image_texture.is_null());
texture = image_texture;
state.cached_image_searches.insert(texture_name, texture);
print_verbose("Created texture from loaded image file.");
} else if (fbx_texture_data != nullptr && fbx_texture_data->Media() != nullptr && fbx_texture_data->Media()->IsEmbedded()) {
// This is an embedded texture. Extract it.
Ref<Image> image;
//image.instance(); // oooo double instance bug? why make Image::_png_blah call
const String extension = texture_name.get_extension().to_upper();
if (extension == "PNG") {
// The stored file is a PNG.
image = Image::_png_mem_loader_func(fbx_texture_data->Media()->Content(), fbx_texture_data->Media()->ContentLength());
ERR_CONTINUE_MSG(image.is_valid() == false, "FBX Embedded PNG image load fail.");
} else if (
extension == "JPEG" ||
extension == "JPG") {
// The stored file is a JPEG.
image = Image::_jpg_mem_loader_func(fbx_texture_data->Media()->Content(), fbx_texture_data->Media()->ContentLength());
ERR_CONTINUE_MSG(image.is_valid() == false, "FBX Embedded JPEG image load fail.");
} else if (extension == "TGA") {
// The stored file is a TGA.
image = Image::_tga_mem_loader_func(fbx_texture_data->Media()->Content(), fbx_texture_data->Media()->ContentLength());
ERR_CONTINUE_MSG(image.is_valid() == false, "FBX Embedded TGA image load fail.");
} else if (extension == "WEBP") {
// The stored file is a WEBP.
image = Image::_webp_mem_loader_func(fbx_texture_data->Media()->Content(), fbx_texture_data->Media()->ContentLength());
ERR_CONTINUE_MSG(image.is_valid() == false, "FBX Embedded WEBP image load fail.");
// } else if (extension == "DDS") {
// // In this moment is not possible to extract a DDS from a buffer, TODO consider add it to godot. See `textureloader_dds.cpp::load().
// // The stored file is a DDS.
} else {
ERR_CONTINUE_MSG(true, "The embedded image with extension: " + extension + " is not yet supported. Open an issue please.");
}
Ref<ImageTexture> image_texture;
image_texture.instance();
image_texture->create_from_image(image);
texture = image_texture;
// TODO: this is potentially making something with the same name have a match incorrectly USE FBX ID as Hash. #fuck it later.
state.cached_image_searches[texture_name] = texture;
print_verbose("Created texture from embedded image.");
} else {
ERR_CONTINUE_MSG(true, "The FBX texture, with name: `" + texture_name + "`, is not found into the project nor is stored as embedded file. Make sure to insert the texture as embedded file or into the project, then reimport.");
}
}
spatial_material->set_texture(mapping_mode, texture);
}
if (spatial_material.is_valid()) {
spatial_material->set_name(material_name);
}
/// ALL below is related to properties
for (FBXDocParser::LazyPropertyMap::value_type iter : material->Props()->GetLazyProperties()) {
const std::string name = iter.first;
if (name.empty()) {
continue;
}
PropertyDesc desc = PROPERTY_DESC_NOT_FOUND;
if (fbx_properties_desc.count(name) > 0) {
desc = fbx_properties_desc.at(name);
}
// check if we can ignore this it will be done at the next phase
if (desc == PROPERTY_DESC_NOT_FOUND || desc == PROPERTY_DESC_IGNORE) {
// count the texture mapping references. Skip this one if it's found and we can't look up a property value.
if (fbx_texture_map.count(name) > 0) {
continue; // safe to ignore it's a texture mapping.
}
}
if (desc == PROPERTY_DESC_IGNORE) {
//WARN_PRINT("[Ignored] The FBX material parameter: `" + String(name.c_str()) + "` is ignored.");
continue;
} else {
print_verbose("FBX Material parameter: " + String(name.c_str()));
// Check for Diffuse material system / lambert materials / legacy basically
if (name == "Diffuse" && !warning_non_pbr_material) {
ValidationTracker::get_singleton()->add_validation_error(state.path, "Invalid material settings change to Ai Standard Surface shader, mat name: " + material_name.c_escape());
warning_non_pbr_material = true;
}
}
// DISABLE when adding support for all weird and wonderful material formats
if (desc == PROPERTY_DESC_NOT_FOUND) {
continue;
}
ERR_CONTINUE_MSG(desc == PROPERTY_DESC_NOT_FOUND, "The FBX material parameter: `" + String(name.c_str()) + "` was not recognized. Please open an issue so we can add the support to it.");
const FBXDocParser::PropertyTable *tbl = material->Props();
FBXDocParser::PropertyPtr prop = tbl->Get(name);
ERR_CONTINUE_MSG(prop == nullptr, "This file may be corrupted because is not possible to extract the material parameter: " + String(name.c_str()));
if (spatial_material.is_null()) {
// Done here so if no data no material is created.
spatial_material.instance();
}
const FBXDocParser::TypedProperty<real_t> *real_value = dynamic_cast<const FBXDocParser::TypedProperty<real_t> *>(prop);
const FBXDocParser::TypedProperty<Vector3> *vector_value = dynamic_cast<const FBXDocParser::TypedProperty<Vector3> *>(prop);
if (!real_value && !vector_value) {
//WARN_PRINT("unsupported datatype in property: " + String(name.c_str()));
continue;
}
if (vector_value && !real_value) {
if (vector_value->Value() == Vector3(0, 0, 0) && !real_value) {
continue;
}
}
switch (desc) {
case PROPERTY_DESC_ALBEDO_COLOR: {
if (vector_value) {
const Vector3 &color = vector_value->Value();
// Make sure to not lost any eventual opacity.
if (color != Vector3(0, 0, 0)) {
Color c = Color();
c[0] = color[0];
c[1] = color[1];
c[2] = color[2];
spatial_material->set_albedo(c);
}
} else if (real_value) {
print_error("albedo is unsupported format?");
}
} break;
case PROPERTY_DESC_TRANSPARENT: {
if (real_value) {
const real_t opacity = real_value->Value();
if (opacity < (1.0 - CMP_EPSILON)) {
Color c = spatial_material->get_albedo();
c.a = opacity;
spatial_material->set_albedo(c);
spatial_material->set_transparency(BaseMaterial3D::TRANSPARENCY_ALPHA);
spatial_material->set_depth_draw_mode(BaseMaterial3D::DEPTH_DRAW_OPAQUE_ONLY);
}
} else if (vector_value) {
print_error("unsupported transparent desc type vector!");
}
} break;
case PROPERTY_DESC_SPECULAR: {
if (real_value) {
print_verbose("specular real value: " + rtos(real_value->Value()));
spatial_material->set_specular(MIN(1.0, real_value->Value()));
}
if (vector_value) {
print_error("unsupported specular vector value: " + vector_value->Value());
}
} break;
case PROPERTY_DESC_SPECULAR_COLOR: {
if (vector_value) {
print_error("unsupported specular color: " + vector_value->Value());
}
} break;
case PROPERTY_DESC_SHINYNESS: {
if (real_value) {
print_error("unsupported shinyness:" + rtos(real_value->Value()));
}
} break;
case PROPERTY_DESC_METALLIC: {
if (real_value) {
print_verbose("metallic real value: " + rtos(real_value->Value()));
spatial_material->set_metallic(MIN(1.0f, real_value->Value()));
} else {
print_error("unsupported value type for metallic");
}
} break;
case PROPERTY_DESC_ROUGHNESS: {
if (real_value) {
print_verbose("roughness real value: " + rtos(real_value->Value()));
spatial_material->set_roughness(MIN(1.0f, real_value->Value()));
} else {
print_error("unsupported value type for roughness");
}
} break;
case PROPERTY_DESC_COAT: {
if (real_value) {
print_verbose("clearcoat real value: " + rtos(real_value->Value()));
spatial_material->set_clearcoat(MIN(1.0f, real_value->Value()));
} else {
print_error("unsupported value type for clearcoat");
}
} break;
case PROPERTY_DESC_COAT_ROUGHNESS: {
// meaning is that approx equal to zero is disabled not actually zero. ;)
if (real_value && Math::is_equal_approx(real_value->Value(), 0.0f)) {
print_verbose("clearcoat real value: " + rtos(real_value->Value()));
spatial_material->set_clearcoat_gloss(1.0 - real_value->Value());
} else {
print_error("unsupported value type for clearcoat gloss");
}
} break;
case PROPERTY_DESC_EMISSIVE: {
if (real_value && Math::is_equal_approx(real_value->Value(), 0.0f)) {
print_verbose("Emissive real value: " + rtos(real_value->Value()));
spatial_material->set_emission_energy(real_value->Value());
} else if (vector_value && !vector_value->Value().is_equal_approx(Vector3(0, 0, 0))) {
const Vector3 &color = vector_value->Value();
Color c;
c[0] = color[0];
c[1] = color[1];
c[2] = color[2];
spatial_material->set_emission(c);
}
} break;
case PROPERTY_DESC_EMISSIVE_COLOR: {
if (vector_value && !vector_value->Value().is_equal_approx(Vector3(0, 0, 0))) {
const Vector3 &color = vector_value->Value();
Color c;
c[0] = color[0];
c[1] = color[1];
c[2] = color[2];
spatial_material->set_emission(c);
} else {
print_error("unsupported value type for emissive color");
}
} break;
case PROPERTY_DESC_NOT_FOUND:
case PROPERTY_DESC_IGNORE:
break;
default:
break;
}
}
return spatial_material;
}
|
<reponame>kk-kien/blockfrost-haskell
-- | Network queries
module Blockfrost.Client.Cardano.Network
( getNetworkInfo
) where
import Blockfrost.API
import Blockfrost.Client.Types
import Blockfrost.Types
networkClient :: MonadBlockfrost m => Project -> NetworkAPI (AsClientT m)
networkClient = fromServant . _network . cardanoClient
getNetworkInfo_ :: MonadBlockfrost m => Project -> m Network
getNetworkInfo_ = _networkInfo . networkClient
-- | Get detailed network information.
getNetworkInfo :: MonadBlockfrost m => m Network
getNetworkInfo = go getNetworkInfo_
|
def upper_diagonal_constraint(board: Board, row: int, col: int) -> bool:
row -= 1
col -= 1
while row >= 0 and col >= 0:
if board.is_queen(row=row, col=col):
return False
row -= 1
col -= 1
return True |
<filename>acq2bva/runners/acq2bva_args.py
import argparse
from pathlib import Path
from acq2bva.__version__ import __version__
from acq2bva.runners.acq2bva_text import ACQ2BVA_USAGE
def create_parser():
parser = argparse.ArgumentParser(
prog="acq2bva", usage=ACQ2BVA_USAGE, add_help=False
)
parser.add_argument("-h", "--help", action="count", default=0)
parser.add_argument(
"-v", "--version", action="version", version="%(prog)s version " + __version__
)
parser.add_argument("-p", "--pc", "--print-channels", action="store_true", dest="print_channels")
parser.add_argument("rest", nargs=argparse.REMAINDER)
# Channels
parser.add_argument(
"-c",
"--ci",
"--channel-indexes",
nargs="+",
type=int,
dest="channel_indexes",
)
parser.add_argument(
"-n",
"--names",
"--channel-names",
nargs="+",
type=str,
dest="channel_names",
)
parser.add_argument(
"--sc",
"--scales",
"--channel-scales",
nargs="+",
type=str,
dest="channel_scales",
)
parser.add_argument(
"-u",
"--units",
"--channel-units",
nargs="+",
type=str,
dest="channel_units",
)
# Markers
parser.add_argument(
"-m",
"--markers",
type=bool,
dest="write_markers",
)
parser.add_argument(
"--mc",
"--marker-channel",
action="store",
type=int,
dest="marker_channel_index",
)
parser.add_argument(
"--mf",
"--marker-map-file",
action="store",
type=Path,
dest="marker_map",
)
parser.add_argument(
"--em",
"--expected-nr-markers",
action="store",
type=int,
dest="expected_nr_markers"
)
# Other settings
parser.add_argument(
"--hs",
"--header-settings",
action="store",
type=Path,
dest="header_settings",
)
# Other settings
parser.add_argument(
"-s",
"--settings",
action="store",
type=Path,
dest="settings",
)
return parser |
/**
* Adds SMC guards to the code
* @param obj
*/
public static StringBuffer addGuardFromTransition(Object obj)
{
StringBuffer localBuffer = new StringBuffer();
if(obj instanceof org.jts.jsidl.binding.Transition)
{
org.jts.jsidl.binding.Transition tr = (org.jts.jsidl.binding.Transition)obj;
Guard.fixGuardContext(tr.getGuard());
localBuffer.append("[" + tr.getGuard().getCondition() + "]");
}
else if(obj instanceof org.jts.jsidl.binding.DefaultTransition)
{
org.jts.jsidl.binding.DefaultTransition dtr = (org.jts.jsidl.binding.DefaultTransition)obj;
Guard.fixGuardContext(dtr.getGuard());
localBuffer.append("[" + dtr.getGuard().getCondition() + "]");
}
return localBuffer;
} |
<reponame>wjaspers/ag-grid
// Type definitions for @ag-grid-community/core v24.0.0
// Project: http://www.ag-grid.com/
// Definitions by: <NAME> <https://github.com/ag-grid/>
import { IAgLabel } from './agAbstractLabel';
import { AgAbstractField, FieldElement } from './agAbstractField';
export interface IInputField extends IAgLabel {
value?: any;
width?: number;
}
export declare abstract class AgAbstractInputField<TElement extends FieldElement, TValue, TConfig extends IInputField = IInputField> extends AgAbstractField<TValue, TConfig> {
private readonly inputType;
protected readonly eLabel: HTMLElement;
protected readonly eWrapper: HTMLElement;
protected readonly eInput: TElement;
constructor(config?: TConfig, className?: string, inputType?: string, displayFieldTag?: string);
protected postConstruct(): void;
protected refreshLabel(): void;
protected addInputListeners(): void;
private setInputType;
getInputElement(): TElement;
setInputWidth(width: number | 'flex'): this;
setInputName(name: string): this;
getFocusableElement(): HTMLElement;
setMaxLength(length: number): this;
setInputPlaceholder(placeholder: string): this;
setInputAriaLabel(label: string): this;
setDisabled(disabled: boolean): this;
}
|
Carrier transport in flexible organic bistable devices of ZnO nanoparticles embedded in an insulating poly(methyl methacrylate) polymer layer
The bistable effects of ZnO nanoparticles embedded in an insulating poly(methyl methacrylate) (PMMA) polymer single layer by using flexible polyethylene terephthalate (PET) substrates were investigated. Transmission electron microscopy (TEM) images revealed that ZnO nanoparticles were formed inside the PMMA polymer layer. Current–voltage (I–V) measurement on the Al/ZnO nanoparticles embedded in an insulating PMMA polymer layer/ITO/PET structures at 300 K showed a nonvolatile electrical bistability behavior with a flat-band voltage shift due to the existence of the ZnO nanoparticles, indicative of trapping, storing, and emission of charges in the electronic states of the ZnO nanoparticles. The carrier transport mechanism of the bistable behavior for the fabricated organic bistable device (OBD) structures is described on the basis of the I–V results by analyzing the effect of space charge. |
<reponame>misershaad/nineanime-grabber
from lxml import html
import requests
# -----------------------------------------------------------------------
def gen_client(**kwargs):
client = requests.Session()
h_global = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:97.0) Gecko/20100101 Firefox/97.0',
**kwargs
}
client.headers.update(h_global)
return client
# -----------------------------------------------------------------------
def process_xpath(xpath_str: str, data: str, raw = True):
if not raw: return data.xpath(xpath_str)
doc = html.fromstring(data)
return doc.xpath(xpath_str)
|
struct ListNode { //~ ERROR has infinite size
head: u8,
tail: Option<ListNode>,
}
fn main() {
}
|
/**
* <p>
* Outputs a JavaDoc tag as information. Can be used e.g. with the stylesheets
* that sort the report by author name.
* To define the format for a tag, set property tagFormat to a
* regular expression.
* This check uses two different severity levels. The normal one is used for
* reporting when the tag is missing. The additional one (tagSeverity) is used
* for the level of reporting when the tag exists. The default value for
* tagSeverity is info.
* </p>
* <p> An example of how to configure the check for printing author name is:
*</p>
* <pre>
* <module name="WriteTag">
* <property name="tag" value="@author"/>
* <property name="tagFormat" value="\S"/>
* </module>
* </pre>
* <p> An example of how to configure the check to print warnings if an
* "@incomplete" tag is found, and not print anything if it is not found:
*</p>
* <pre>
* <module name="WriteTag">
* <property name="tag" value="@incomplete"/>
* <property name="tagFormat" value="\S"/>
* <property name="severity" value="ignore"/>
* <property name="tagSeverity" value="warning"/>
* </module>
* </pre>
*
*/
@StatelessCheck
public class WriteTagCheck
extends AbstractCheck {
/**
* A key is pointing to the warning message text in "messages.properties"
* file.
*/
public static final String MSG_MISSING_TAG = "type.missingTag";
/**
* A key is pointing to the warning message text in "messages.properties"
* file.
*/
public static final String MSG_WRITE_TAG = "javadoc.writeTag";
/**
* A key is pointing to the warning message text in "messages.properties"
* file.
*/
public static final String MSG_TAG_FORMAT = "type.tagFormat";
/** Compiled regexp to match tag. **/
private Pattern tagRegExp;
/** Compiled regexp to match tag content. **/
private Pattern tagFormat;
/** Regexp to match tag. */
private String tag;
/** The severity level of found tag reports. */
private SeverityLevel tagSeverity = SeverityLevel.INFO;
/**
* Sets the tag to check.
* @param tag tag to check
*/
public void setTag(String tag) {
this.tag = tag;
tagRegExp = CommonUtil.createPattern(tag + "\\s*(.*$)");
}
/**
* Set the tag format.
* @param pattern a {@code String} value
*/
public void setTagFormat(Pattern pattern) {
tagFormat = pattern;
}
/**
* Sets the tag severity level.
*
* @param severity The new severity level
* @see SeverityLevel
*/
public final void setTagSeverity(SeverityLevel severity) {
tagSeverity = severity;
}
@Override
public int[] getDefaultTokens() {
return new int[] {TokenTypes.INTERFACE_DEF,
TokenTypes.CLASS_DEF,
TokenTypes.ENUM_DEF,
TokenTypes.ANNOTATION_DEF,
};
}
@Override
public int[] getAcceptableTokens() {
return new int[] {TokenTypes.INTERFACE_DEF,
TokenTypes.CLASS_DEF,
TokenTypes.ENUM_DEF,
TokenTypes.ANNOTATION_DEF,
TokenTypes.METHOD_DEF,
TokenTypes.CTOR_DEF,
TokenTypes.ENUM_CONSTANT_DEF,
TokenTypes.ANNOTATION_FIELD_DEF,
};
}
@Override
public int[] getRequiredTokens() {
return CommonUtil.EMPTY_INT_ARRAY;
}
@Override
public void visitToken(DetailAST ast) {
final FileContents contents = getFileContents();
final int lineNo = ast.getLineNo();
final TextBlock cmt =
contents.getJavadocBefore(lineNo);
if (cmt == null) {
log(lineNo, MSG_MISSING_TAG, tag);
}
else {
checkTag(lineNo, cmt.getText());
}
}
/**
* Verifies that a type definition has a required tag.
* @param lineNo the line number for the type definition.
* @param comment the Javadoc comment for the type definition.
*/
private void checkTag(int lineNo, String... comment) {
if (tagRegExp != null) {
int tagCount = 0;
for (int i = 0; i < comment.length; i++) {
final String commentValue = comment[i];
final Matcher matcher = tagRegExp.matcher(commentValue);
if (matcher.find()) {
tagCount += 1;
final int contentStart = matcher.start(1);
final String content = commentValue.substring(contentStart);
if (tagFormat == null || tagFormat.matcher(content).find()) {
logTag(lineNo + i - comment.length, tag, content);
}
else {
log(lineNo + i - comment.length, MSG_TAG_FORMAT, tag, tagFormat.pattern());
}
}
}
if (tagCount == 0) {
log(lineNo, MSG_MISSING_TAG, tag);
}
}
}
/**
* Log a message.
*
* @param line the line number where the error was found
* @param tagName the javadoc tag to be logged
* @param tagValue the contents of the tag
*
* @see java.text.MessageFormat
*/
private void logTag(int line, String tagName, String tagValue) {
final String originalSeverity = getSeverity();
setSeverity(tagSeverity.getName());
log(line, MSG_WRITE_TAG, tagName, tagValue);
setSeverity(originalSeverity);
}
} |
#include <testthat.h>
typedef std::vector<int> sequence;
sequence last_n (const sequence&, int);
context("last_n") {
test_that("example 1") {
sequence input;
input.push_back(1);
input.push_back(2);
input.push_back(3);
sequence output = last_n(input, 0);
expect_true(output.size() == 0);
output = last_n(input, 1);
expect_true(output.size() == 1);
expect_true(output[0] == 3);
output = last_n(input, 2);
expect_true(output.size() == 2);
expect_true(output[0] == 2);
expect_true(output[1] == 3);
}
test_that("example 2") {
sequence input2;
sequence output2 = last_n(input2, 0);
expect_true(output2.size() == 0);
}
}
|
<reponame>MathewMacDougall/Two-Faced-Type
import unittest
from unittest.mock import MagicMock
from stl import read_stl, write_stl
import pathlib
from face_factory import FaceFactory
from util import *
from geom_removal import remove_redundant_geometry
from combiner import combine_words
from OCC.Display.SimpleGui import init_display
from OCCUtils.Common import GpropsFromShape
# display, start_display, _, _ = init_display()
display, start_display, _, _ = MagicMock(), MagicMock(), None, None
class TestSolidFaceValidator(unittest.TestCase):
def setUp(self):
face_images_dir = pathlib.Path(__file__).parent / "face_images/aldrich"
self.face_factory = FaceFactory(face_images_dir)
self.height_mm = 50
self.test_data_dir = pathlib.Path(__file__).parent / "test_data"
assert self.test_data_dir.is_dir()
def assert_mass_eq(self, letter, golden):
letter_props = GpropsFromShape(letter)
letter_mass = letter_props.volume().Mass()
golden_props = GpropsFromShape(golden)
golden_mass = golden_props.volume().Mass()
self.assertAlmostEqual(letter_mass, golden_mass, delta=0.1)
def assert_center_of_mass_eq(self, letter, golden):
letter_props = GpropsFromShape(letter)
letter_com = letter_props.volume().CentreOfMass()
golden_props = GpropsFromShape(golden)
golden_com = golden_props.volume().CentreOfMass()
self.assertLess(distance(letter_com, golden_com), 0.01)
# TODO: Disabled for now because getting negative mass. Need to find a better way to load and
# compare shapes
def test_HE(self):
return
letters, faces1, faces2 = combine_words("H", "E", self.face_factory, self.height_mm)
letters = remove_redundant_geometry(letters)
letter = remove_redundant_geometry(letters)[0]
golden_file = self.test_data_dir / "HE.stl"
golden_stl = read_stl(golden_file)
# TODO: Why is this needed to write files?
# display.DisplayShape(letter)
# start_display()
# write_stl(letter, golden_file)
# self.assert_mass_eq(letter, golden_stl)
# self.assert_center_of_mass_eq(letter, golden_stl)
# TODO: Disabled for now because getting negative mass. Need to find a better way to load and
# compare shapes
def test_VT(self):
return
letters, faces1, faces2 = combine_words("V", "T", self.face_factory, self.height_mm)
letters = remove_redundant_geometry(letters)
letter = remove_redundant_geometry(letters)[0]
golden_file = self.test_data_dir / "VT.stl"
golden_stl = read_stl(golden_file)
# TODO: Why is this needed to write files?
# display.DisplayShape(letter)
# start_display()
# write_stl(letter, golden_file)
self.assert_mass_eq(letter, golden_stl)
self.assert_center_of_mass_eq(letter, golden_stl)
|
// Get node value as float64
func (this *Node) F64(namespace, name string) float64 {
value := this.S(namespace, name)
if value != "" {
n, _ := strconv.ParseFloat(value, 64)
return n
}
return 0
} |
/* Returns the size of the entire flash chip */
uint64_t mtd_get_device_size(const struct mtd_info *mtd)
{
struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd);
return master->size;
} |
/**
* Program is an ordered set of string literals and expressions
* string literal: Any continuous string that is not an expression
* expression: object.function(param1,param2,..).function(param1,param2..)
* param1 could be another expression
*
* evaluating an expression
* 1. evaluate each param
* 2. identify object
* 3. perform function on the object
*
* usage
*********
* Program p = new Program(args);
* String resString = p.execute(new WordEvaluator(line));
*
* Interpretation
* Spit out every argument from args, and replace the $ signs with words from line
*
*
*/
public class Program
{
static int ST_LITERAL = 0;
static int ST_OBJECT = 1;
Vector m_exprVector = null;
public Program(String programString)
{
m_exprVector = getProgramVector(programString);
}
public Program(Vector strings)
{
m_exprVector = new Vector();
for(Enumeration e=strings.elements();e.hasMoreElements();)
{
String curString = (String)e.nextElement();
if (curString.charAt(0) == '$')
{
// expression
m_exprVector.addElement(new Expression(curString.substring(1)));
}
else
{
if (m_exprVector.size() == 0)
{ // first element
m_exprVector.addElement(new Literal(curString + " "));
}
else
{ // non-first element
m_exprVector.addElement(new Literal( " " + curString + " "));
}
}
}
}
public Program(String strings[])
{
m_exprVector = new Vector();
for(int i=0;i<strings.length;i++)
{
String curString = strings[i];
if (curString.charAt(0) == '$')
{
// expression
m_exprVector.addElement(new Expression(curString.substring(1)));
}
else
{
if (m_exprVector.size() == 0)
{ // first element
m_exprVector.addElement(new Literal(curString + " "));
}
else
{
m_exprVector.addElement(new Literal(" " + curString + " "));
}
}
}
}
public String execute(IEvaluator eval)
{
StringBuffer buf = new StringBuffer();
for(Enumeration e=m_exprVector.elements();e.hasMoreElements();)
{
BasePart bp = (BasePart)e.nextElement();
buf.append(bp.evaluate(eval));
}
return buf.toString();
}
private Vector getProgramVector(String inStr)
{
Vector exprVector = new Vector();
StringBuffer curLiteral = new StringBuffer();
StringBuffer curExpression = new StringBuffer();
int curState = ST_LITERAL;
for(int i=0;i<inStr.length();i++)
{
int curChar = inStr.charAt(i);
if (curState == ST_LITERAL)
{
if (curChar != '$')
{
curLiteral.append(curChar);
}
else
{
// state change
if (curLiteral.length() > 0)
{
exprVector.addElement(new Literal(curLiteral.toString()));
curLiteral = new StringBuffer();
}
curState = ST_OBJECT;
}
continue;
}
if (curState == ST_OBJECT)
{
if(isNumber(curChar))
{
curExpression.append(curChar);
}
else if(curChar == '*')
{
curExpression.append(curChar);
}
else
{
exprVector.addElement(new Expression(curExpression.toString()));
curExpression = new StringBuffer();
curLiteral.append(curChar);
curState = ST_LITERAL;
}
continue;
}
}// for each character
return exprVector;
}// end of function
private boolean isNumber(int inChar)
{
return (inChar >= '0' && inChar <= '9') ? true:false;
}
} |
/**
* Remove a publication from the researcher folder
*
* @param rp
* @return true if the publication is removed.
*/
public boolean removeResearcherPublication(ResearcherPublication rp)
{
boolean removed = false;
if( publications.contains(rp))
{
removed = publications.remove(rp);
rp.setParentFolder(null);
}
return removed;
} |
Imaging features of hepatic angiomyolipomas on real-time contrast-enhanced ultrasound.
The aim of this study was to evaluate the imaging features of hepatic angiomyolipoma (AML) on contrast-enhanced ultrasound (CEUS). The imaging features of 12 pathologically proven hepatic AML lesions in 10 patients who had undergone baseline ultrasound (BUS) and CEUS examinations were evaluated retrospectively. The enhancement extent, pattern and dynamic change, along with the enhancement process, on CEUS were analysed. The diagnostic results of BUS and CEUS before pathological examination were also recorded. The results showed that 75% (9/12) of the AML lesions exhibited mixed echogenicity on BUS and most showed remarkable hyperechogenicity in combination with a hypoechoic or anechoic portion. Arterial flow signals were detected in 75% (9/12) of the lesions on colour Doppler imaging. On CEUS, 66.7% (n = 8) of the 12 lesions exhibited hyperenhancement in the arterial phase, slight hyperenhancement (n = 2) or isoenhancement (n = 6) in the portal phase, and slight hyperenhancement (n = 1) or isoenhancement (n = 7) in the late phase. Three (25%) lesions exhibited hyperenhancement in the arterial phase and hypoenhancement in both portal and late phases. One (8.3%) lesion exhibited hypoenhancement throughout the CEUS process. Before pathological examination with BUS, only 3 (25%) lesions were correctly diagnosed as hepatic AML. Conversely, on CEUS, correct diagnoses were made for 66.8% (8/12) of hepatic AMLs. Therefore, arterial hyperenhancement and subsequent sustained enhancement on CEUS were found in the majority of hepatic AMLs. The combination of BUS and CEUS leads to the correct diagnosis in the majority of hepatic AMLs, and is higher than the success rate achieved by BUS alone. |
/**
* Instances of this class set the ModelView and Projection matrices
* so that the scene can be rendered from a given camera.
*
* Differently from the LookThroughNormalized state change, a normal player camera bobs up and down
* when the player moves and bobbing is enabled.
*
* The default instance of this class resets both matrices to identity matrices, opengl's default.
*/
public class LookThrough implements StateChange {
private static LookThrough defaultInstance = new LookThrough();
private Camera camera;
private RenderPipelineTask task;
/**
* Constructs an instance of this class initialised with the given camera.
*
* @param camera An instance implementing the Camera interface.
*/
public LookThrough(Camera camera) {
this.camera = checkNotNull(camera);
}
// this constructor is used to generate the default instance
private LookThrough() { }
/**
* Returns a task configured to set the modelview and projection matrixes so that the scene
* is seen through the camera passed to the constructor.
*
* If the LookThrough instance is the default one, the task returned resets the matrices
* to opengl's default (identity matrices).
*
* @return an instance implementing the RenderPipelineTask interface
*/
@Override
public RenderPipelineTask generateTask() {
if (task == null) {
if (camera != null) {
task = new LookThroughTask(camera);
} else {
task = new LookThroughDefaultCameraTask();
}
}
return task;
}
@Override
public int hashCode() {
return Objects.hashCode(camera);
}
@Override
public boolean equals(Object obj) {
return (obj instanceof LookThrough) && camera == ((LookThrough) obj).camera;
}
/**
* Returns an instance of this class configured to generate a task resetting the ModelView and
* Projection matrices back to opengl's default (identity matrices).
*
* @return the default instance of the LookThrough class
*/
@Override
public StateChange getDefaultInstance() {
return defaultInstance;
}
@Override
public boolean isTheDefaultInstance() {
return this.equals(defaultInstance);
}
@Override
public String toString() {
if (this.isTheDefaultInstance()) {
return String.format("%30s: %s", this.getClass().getSimpleName(), "default opengl camera");
} else {
return String.format("%30s: %s", this.getClass().getSimpleName(), camera.toString());
}
}
private class LookThroughTask implements RenderPipelineTask {
private Camera camera;
/**
* Constructs an instance of this class initialized with the given camera.
*
* @param camera an instance implementing the Camera interface
*/
private LookThroughTask(Camera camera) {
this.camera = camera;
}
@Override
public void execute() {
camera.lookThrough();
}
@Override
public String toString() {
return String.format("%30s: %s", this.getClass().getSimpleName(), camera.toString());
}
}
} |
A Security-Enhanced UHF RFID Tag Chip
The integration of strong security functionality to radio-frequency identification (RFID) tags operating in the ultra-high frequency (UHF) range is challenging. Main limiting factors are chip size and power consumption. In this work we present the design of the digital part of a security-enhanced UHF RFID tag that uses the Electronic Product Code (EPC) Generation-2 (Gen-2) standard for communication. The tag provides mutual-authentication functionality based on a challenge-response protocol and the Advanced Encryption Standard (AES). The stream cipher Grain is used for generating cryptographically secure random numbers during the authentication procedure. Moreover, the AES module on the tag has countermeasures integrated (shuffling of bytes and insertion of dummy rounds) to make so-called power analysis attacks less efficient. The digital part of the security-enhanced tag including AES and Grain modules can be implemented with 12000 GE (without non-volatile memory). The average power consumption during a full authentication round is 5 uW for a 130 nm low-leakage technology. The results clearly point out that both values chip size and power consumption fulfill the requirements of low-cost UHF RFID tags. |
/**
* Database connection initializer.
* Creates the persistence database connection.
* */
public static void DBInit(){
emf = Persistence.createEntityManagerFactory("jpa-persistence-unit-1");
em = emf.createEntityManager();
Logger.info("Database connection initialised and connected.");
} |
package translatableerror
import (
"fmt"
"strings"
)
type InterpolationError struct {
Err error
}
func (e InterpolationError) Error() string {
return fmt.Sprint(strings.Replace(e.Err.Error(), "\n", ", ", -1))
}
func (e InterpolationError) Translate(translate func(string, ...interface{}) string) string {
return translate(e.Error())
}
|
From trying to look younger to convincing people you are more experienced, it seems few people are happy with the age they are. But beyond vanity there may be compelling reasons to cover-up your age on your resume.
Lisa Johnson Mandell says faced age discrimination when she turned 49. After over 20 years in entertainment broadcasting, she was being overlooked for jobs she felt she should have been offered. Johnson Mandell felt her years of experience was an asset until her husband, Jim Mandell, president of a Hollywood voiceover agency, told her the truth; she was being rejected because she was considered to be too old.
Author of Career Comeback — Repackage Yourself to Get the Job You Want, Johnson Mandell says “In today’s economic environment it’s an employers’ market, with more job seekers than jobs. If a 20-something-year-old assistant is screening applicants and sees you graduated over 20 years ago, he may automatically think of his parents. Why give him ammunition to eliminate you?”
If you’re between the ages of 27 and 37, you needn’t worry; you’re in what Johnson Mandell calls the “sweet spot”–young enough to be hip and relevant but not old enough where you might appear to be out of it. If you’re not, you’ve got work to do. While she doesn’t recommend saying you’re 35 when you’re 45, Johnson Mandell does suggest that you do everything you can to cover up how old you are. She used to recommend hiding their age for people over 40, but she says it can also help those who are fresh out of school who want to appear older and more experienced.
No one cares about your GPA if you have no experience.
“When my niece graduated from Berkley, she felt her GPA was her biggest strength, so she put it at the top of her resume,” she says. “No one cares about your GPA if you have no experience. Always lead with your strength; discrimination can also happen if you appear too young.”
Johnson Mandell says there are three things you can do to make your resume timeless:
Whatever you were doing in 2000, is probably no longer relevant in today’s workplace. “The way we teach, work, manage, produce, create–everything has changed,” says Johnson Mandell. |
def _get_service_file(service_url):
return "%s/service_%s_encrypted" % (
ServiceWallet._wallet_dir(),
_base64.b64encode(service_url.encode("utf-8")).decode("utf-8")) |
<reponame>forec-org/os-sumulator
//
// Created by 王耀 on 2017/9/23.
//
#include "gtest/gtest.h"
#include "printer.h"
#include "fs.h"
TEST(TESTPrinter, TEST_PRINT) {
FS::init("./fs");
std::vector<std::string> ans = Printer::PRINT("-");
std::string res = "";
for (std::string a : ans) {
res += a + "\n";
}
std::string standard = " \n"
" \n"
" \n"
" \n"
" \"\"\" \n"
" \n"
" \n"
" \n";
EXPECT_EQ(res, standard);
FS::destroy();
} |
def testGeometryInputs(self):
p1 = ee.Geometry.Point([1, 2])
p2 = ee.Geometry.Point([3, 4])
line = ee.Geometry.LineString([p1, p2])
self.assertIsInstance(line, ee.Geometry)
self.assertEqual(
ee.ApiFunction.lookup('GeometryConstructors.LineString'), line.func)
self.assertEqual({'coordinates': ee.List([p1, p2])}, line.args) |
class arm_disassemble:
"""disassemble incoming data into ARM instructions"""
def __init__(self, ui, adr):
self.ui = ui
self.pc = adr
self.state = 'thumb'
def emit_thumb(self, opcode):
"""16 bit thumb instructions"""
da = darm.disasm_thumb(opcode)
s = '?'
if da:
s = str(da)
self.ui.put('%08x: %04x %s\n' % (self.pc, opcode, s))
self.pc += 2
def emit_thumb2(self, opcode):
"""32 bit thumb instructions"""
da = darm.disasm_thumb2(opcode)
s = '?'
if da:
s = str(da)
self.ui.put('%08x: %04x %04x %s\n' % (self.pc, opcode >> 16, opcode & 0xffff, s))
self.pc += 4
def emit16(self, x):
if self.state == 'thumb':
if ((x & 0xe000) == 0xe000) and (x & 0x1800):
# this is a thumb2 opcode we need 32 bits
self.save_x = x
self.state = 'thumb2'
else:
# this is a thumb opcode
self.emit_thumb(x)
elif self.state == 'thumb2':
self.emit_thumb2((self.save_x << 16) | x)
# back to 16 bit mode
self.state = 'thumb'
else:
assert False
def wr32(self, data):
self.emit16(data & 0xffff)
self.emit16(data >> 16)
def has_rd(self, n):
"""no read supported"""
return False
def has_wr(self, n):
"""wr32 supported"""
return n == 32 |
Comparative study of local search in SWAP and agglomerative neighbourhoods for the continuous p-median problem
One of the classical NP-hard problems of location theory is the p-median problem. In this paper, we investigate the comparative efficiency of searching for a solution to this problem in SWAP-neighborhoods, as well as in new AGGL-neighborhoods, the concept of which is introduced in this paper. A higher comparative efficiency in AGGL-neighborhoods is shown, as well as the dependence of the search result on the neighborhood parameter, the selection of the optimal values of which requires additional research. |
module Main where
import Test.Hspec
import Test.QuickCheck
import Control.Exception (evaluate)
import qualified ArrayTests
import qualified GraphTests
import qualified HeapTests
import qualified StackTests
import qualified TableTests
import qualified TreeTests
main :: IO ()
main = hspec $ do
ArrayTests.arrayTests
GraphTests.graphTests
HeapTests.heapTests
StackTests.stackTests
TableTests.tableTests
TreeTests.treeTests
|
def ANY_IN_MULTISELECT(input, reference):
return any([subval in (input or '').split(' ') for subval in reference]) |
ATR today urged support of the Payroll tax cut compromise bill. COGC is particularly pleased with the pay-fors inserted by House Republicans; the bill will offer the single largest repeal of Obamacare to date, rescinding $5 billion from a Centers for Disease Control and Prevention (CDC) fund created by the health care law.
While extending tax relief for American workers and employers, the bill will slash $12 billion in spending from the health care overhaul. The most significant savings come from the $5 billion cut from the CDC’s Prevention and Public Health Fund. The Obamacare slush fund is used to distribute grants to localities, which in turn use the money to lobby for prohibitive excise taxes and restrictions on consumer goods. Already, these grants have amounted to millions to fund campaigns (link) against beverages, tobacco and other favorite. New York City alone has been allocated $31.1 million, which it has used to promote disingenuous surveys on public support for a soda tax.
The bill also makes important reforms to unemployment benefits – a good step towards ending the practice of borrowing money to pay Americans not to work. The bill requires unemployment benefits todrop from 99 weeks to either 73 or 63, depending on the state’s unemployment rate. The legislation also adjusts eligibility requirements to increase accountability in welfare spending.
These cuts are sure to have the nanny-statists in an uproar, but allowing Americans to keep their own moneywhile cutting wasteful, illegal spending is an important victory for taxpayers. COGC applauds the House Republicans in eliminating wasteful Obamacare spending and making essential unemployment reforms. |
<gh_stars>0
//
// Copyright (C) 2018 Pharap (@Pharap)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#pragma once
#include <Arduboy2.h>
struct DefaultTextBoxSettings
{
constexpr static uint8_t const RowCount = 2;
constexpr static uint8_t const ColumnCount = 21;
constexpr static uint8_t const CursorXOffset = 3;
constexpr static uint8_t const CursorYOffset = 2;
constexpr static uint8_t const LineHeight = 8;
constexpr static uint8_t const X = 1;
constexpr static uint8_t const Width = 128 - (X * 2);
constexpr static uint8_t const Height = 20;
constexpr static uint8_t const Y = 64 - Height;
using Renderer = Arduboy2;
using Printer = Arduboy2;
};
struct DefaultChoiceBoxSettings
{
constexpr static uint8_t const ChoiceCapacity = 4;
constexpr static uint8_t const LineHeight = 8;
constexpr static uint8_t const X = 15;
constexpr static uint8_t const Y = 15;
constexpr static uint8_t const Width = 128 - (X * 2);
constexpr static uint8_t const MarginX = 2;
constexpr static uint8_t const MarginY = 2;
constexpr static uint8_t const ChoiceMarginX = 2;
constexpr static uint8_t const ChoiceMarginY = 2;
constexpr static uint8_t const CursorXOffset = MarginX + ChoiceMarginX;
constexpr static uint8_t const CursorYOffset = MarginY + ChoiceMarginY;
constexpr static uint8_t const ChoiceWidth = Width - (MarginX * 2);
constexpr static uint8_t const ChoiceHeight = LineHeight + (ChoiceMarginY * 2);
using Renderer = Arduboy2;
using Printer = Arduboy2;
};
struct DefaultProcessorSettings
{
constexpr static uint8_t const StackSize = 64;
constexpr static uint8_t const MaxGlobals = 32;
constexpr static uint8_t const MaxLocals = 32;
constexpr static uint8_t const MaxImages = 16;
constexpr static uint16_t const FastPrintDelay = 0;
constexpr static uint16_t const SlowPrintDelay = 100;
using TextBoxSettings = DefaultTextBoxSettings;
using ChoiceBoxSettings = DefaultChoiceBoxSettings;
};
#include "Processor.h"
using DefaultProcessor = Processor<DefaultProcessorSettings>; |
As it becomes increasingly obvious, I’m starting to think White-Wolf has more interest in roping their lapsed Masquerade fans back into the fold than in making Requiem a complete gameline. The writing is certainly on the walls. Their upcoming MMO (assuming it doesn’t remain in a permanent state of vapor-ware) is based on the older Masquerade license; they’ve recently released the 20th anniversary edition book of Vampire: the Masquerade, and they are starting (restarting?) a game line called ‘Onyx Path’, which is previously unreleased Old World o f Darkness material. No, no, that doesn’t prove anything. Yes, I’m doomsaying, which everyone has been doing since the White-Wolf slipped over to an exclusive PoD/PDF sales model. But the resulting internet buzz got me to thinking.
See, all I keep hearing is all this weird nostalgic stuff about Masquerade, because you know, people are thinking back on the line and what they liked about it. What comes along with that are criticisms – direct and indirect – of Requiem. And I wanted to address that. So in no particular order, here are some of my defenses of what I consider to be the best horror game out there:
1.) “I’ve always felt like Requiem is too dry. Masquerade had a much more engaging backstory.”
This is the most polite way I have heard, to comment on WW’s decision to include no metaplot in Requiem. I have always felt – still feel – that Masquerade was as much a comic book as it was an RPG. Each book progressed the story. There were people, many people, who were more interested in that story than in game, ferreting around in the books in an attempt to put together some grand riddle, like it’s an ARG or something. I think that Requiem, being a toolbox game, was always supposed to approach Masquerade in terms of its rich plot – but only with the help of individual Storytellers, not from the gameline. Here’s what I mean. In Masquerade, you knew each sect’s territory, had at least two dozen NPCs perfectly capable of handling every threat the PCs would ever care to tackle themselves. You had the vampire origin question more or less settled, an end of the world scenario, vast-world-wide conspiracies, and milennia old mysteries.
Why can’t Requiem do that?
Simple answer: It can. That’s the thing. Most STs don’t do the work required to build a universe around their game, but all the tools are there to decide on your own vampire origin story, what world-wide conspiracies are happening, and so forth. Requiem was always intended to be more flexible, and allow each ST to custom-build their own setting, with very little in the way of assumed constants in that setting. One ST builds a global game. One ST builds a local game. One ST decides Judas was the first vampire, and another decides it was Gilgamesh. If the book is dry, it’s because it’s an instruction book, not a serial novel that also happens to be a game, if you’re into that kind of thing.
2.) “I just don’t feel very strongly about the clans as they are presented in the core book.”
This one has some validity to it, particularly when you consider that most of this camp is coming from Masquerade. In that game, you almost automatically know what you’re getting into just flipping through the book. Gangrel – animalistic types, gut instinct. All Gangrel deal with the animal side of vampirism, full stop, or else they have good reason for being contrary, but either way they interact with the stereotype. If you play a Brujah, you rebel against something. If you play a Malkavian, you’re crazy and use crazy to solve mysteries, and so forth. The character archetype is baked right into the Clan, such that flipping through the book is almost like looking at a catalog of personalities. That’s not to say everyone played a Toreador the same way (although, there are plenty of you out there), but rather that it was easy for a new player to pick a character off a page, make that character, and immediately know how to play her and what her goals are.
In contrast, Clans in Requiem have never represented that. They don’t really represent how to play or what kind of personality you might have or what your goals are. Clans in Requiem are a unique, mystical expression of the Curse, and much more closely model what tools a character has available. Mekhet are sneaky, but not because they are the “sneaky clan” (see: Assamites), but rather, because their tools (Obfuscate, Auspex) are good at gathering information and maintaining concealment. There is so much wiggle room within that context that there is almost no guideline whatsoever as to what to do. And moreover, there is no Mekhet Clan structure (at least, by default) to tell you what to do or what your personality is like. Covenants go some of the way towards resolving this, but Covenants only tell you what your ideology looks like, not what your personality is like. If you’re a Mekhet Sanctified, it means you’re a believer (probably) and you have access to information gathering powers. It doesn’t immediately tell you as much as say, playing a Malkavian might.
This means the whole game was just less accessible to new players; less accessible means less impressive formative experiences, and that means poor comparisons for people who played both. I found the freedom of Requiem incredibly refreshing; I liked being able to make a personality first and pick a Clan second, which was sort of rare in Masquerade. Anecdotal stories about Tremere or Ventrue chastised, OOC and IC, for “playing their Clan wrong” is about all I need to hear to know that distinction exists. A number of times, I would see new players flip through the book and stare at the Clans for an hour or more and still not feel immediately drawn to any of them, largely because Requiem asks you to do a lot of work with regards to coming up with your own goals and personality first. Character concepts that, in Masquerade, would immediately fit one Clan very naturally (“I want to play a Crazy Seer”, for instance, or “I want to play a Rich Businessman”) could fit 2, 3, or even all 5 in Requiem, which further muddied the waters.
3.) “Vampires are weaker in Requiem/Elders are weaker in Requiem.”
This one I can’t argue with, because mechanically there’s no question about it. Vampires are closer to mortals in Requiem than they are in Masquerade. It’s an objective fact. The trick is, I don’t especially care. Vampire to me has never been a game about self-empowerment. Vampires are not magicians, or superheroes, by default. They are mortals, changed, given a shot at eternal life and faced with the difficulty of survival and moral despair. Their perks – Disciplines, inherent benefits, etc – should operate to help reinforce those ideas, and give them the ability to enact some degree of change in the world. I do not consider it a failing that a modestly well statted mortal can kill a starting vampire with a baseball bat under the correct circumstances.
See, Vampires are supposed to be secretive and manipulative, and that’s not something I ever felt Masquerade really encouraged. If some 7th generation Prince has all his Disciplines at 5 and such, there’s really no reason for him to rule from the shadows, particularly if there aren’t any other 7th generation vampires in town. Sure, if the whole city decides to kill him, they might be able to, but let’s not kid ourselves – this guy can probably kill a couple of starting characters without a second thought. Why on Earth would he ever need such characters? It’s immediately deprotagonizing. Vampires in Requiem, particularly Elders, survive by making friends, because while they are personally more powerful than most given individual neonates, they aren’t stronger than all of them. Further, there are probably a dozen things a Requiem Elder can’t do himself, unlike the Masquerade Elder, who can do just about anything. Making an Elder vampire “unbetable” in Requiem takes thousands of experience. Considering the chart for giving XP to advanced characters stops at around 400, that seems untenable.
Vulnerable Elders – indeed, vulnerable vampires – help enforce the theme of political distrust. Vampires don’t trust each other, but they have to make friends to survive. No one of them is personally powerful enough, physically, to survive in a vacuum. I like that, I find it to be incredibly interesting. 9 levels up of untouchable NPCs who are both more interesting and more powerful than the PCs? Not so much.
To conclude: I know full well White Wolf probably isn’t closing the door on Requiem, and I hope that they do something great with the license. And I mean, I like Masquerade! I think it has some great perks, I played the game for years. But I love Requiem; I really feel it is a superior games for writing compelling stories about vampires. If I want to read a great story about vampires, or if I want to play in a game where the power scale trends towards the apocalyptic, or if I want to ease players into a game where they might not immediately know what they want to play, maybe Masquerade is a better choice. To me, Requiem is just perfect, and I guess even if the line were to end tomorrow, I would feel like the collection I have is pretty comprehensive. I only wish more die-hards had given it an honest shot, instead of reading through it in PDF or at the gamestore and thinking, “There’s no plot in this! I’m uninterested!” That’s never been the point. We make our own worlds, our own stories, our own grand mysteries, vampire origins, and our own end of the world, sometimes.
Tell me what you think in the comments!
Advertisements
Like this: Like Loading... Related
Posted in Uncategorized |
A Framework for Vibration Attenuation in Traffic Mast-Arm Structures under Wind Loads
Traffic signals and information signs play a vivid role in guiding drivers on highways and urban roads, to maintain safe travel. For this reason, it is crucial to ascertain the functionality of signal support structures. This paper lies the foundation for a fully computational framework to model and attenuate wind-induced vibrations in traffic lighting structures by using computational fluid dynamics (CFD) simulations & dynamic analysis. Dependence of flow pattern and aerodynamic loads on Reynolds number reveals the importance of full-scale CFD with Large Eddy Simulation for mast arm structures. By employing available weights of lighting boxes, distributed tuned mass dampers were created. The results obtained show that distributed tuned lighting boxes are effective devices for vibration suppression. In addition, damping enhancement can significantly reduce vibration-induced stresses, and hence extend the fatigue life with promises to reduce the cost of building new structures and improve the safety of the traveling public. The procedure followed for creating time histories of wind loads integrated with finite element modeling is useful for the investigation of other vibration lessening techniques. |
package main
func moveZeroes(nums []int) {
var lastNoZeroIndex int
for i := 0; i < len(nums); i++ {
cur := nums[i]
if cur != 0 {
last := nums[lastNoZeroIndex]
nums[i] = last
nums[lastNoZeroIndex] = cur
lastNoZeroIndex++
}
}
}
|
import { Flex } from '@chakra-ui/react';
import React, { useState } from 'react';
import { ScriptTask } from '../../models/scriptTask';
import storageService from '../../services/storageService';
import TaskConsole from './TaskConsole';
import TaskList from '../TaskList';
const TaskRunner: React.FC = () => {
const [selectedTask, setSelectedTask] = useState<ScriptTask>();
const tasksFromMemory = storageService.getTasks();
return (
<Flex w="100%">
<TaskList
tasks={tasksFromMemory}
onTaskSelect={setSelectedTask}
selectedTaskId={selectedTask?.id}
executable
/>
<TaskConsole currentTaskId={selectedTask?.id} />
</Flex>
);
};
export default TaskRunner;
|
A spokesperson for the Attorney General of New Jersey has said it would have been illegal to release surveillance video to the NFL of Ray Rice punching his then-fiance in an Atlantic City hotel elevator.
Rice was cut by the Baltimore Ravens and suspended indefinitely by the NFL following the release of disturbing footage by TMZ Sports yesterday, which the NFL has claimed is the first time they had seen Rice knock his now-wife unconscious with a brutal sucker punch.
Judge Andrew Napolitano joined Gretchen Carlson on The Real Story today and said that it would have been illegal for the government to release the video.
However, it would not have been illegal for the Revel Hotel & Casino, TMZ or Rice's attorney to do so.
Judge Nap noted that Rice's lawyer likely would not have surrendered the video to the NFL, but there is an argument that the hotel had an obligation to release the footage if requested by the NFL.
Judge Nap added that the NFL does not face any further dust-up from the government.
"The NFL's decision to discipline Ray Rice or not to discipline Ray Rice is an internal matter between the NFL, the Ravens, the players' association, Ray Rice and their fan base," he concluded.
Watch the clip from The Real Story above.
Ravens Release Ray Rice After Video Shows Vicious Punch of Wife
'He Needs To Be in Jail': 'Outnumbered' Takes on New Ray Rice Assault Video |
/// Samples for a incident and wh if possible.
///
/// # Returns
/// `(incident, wh)`
fn sample_incident_wh_cos_o(
&self,
outgoing: Vector3,
sample: Vector2,
) -> Option<(Vector3, Vector3, Float)> {
// Sample microfacet orientation $\wh$ and reflected direction $\wi$
if bxdf_is_parallel(outgoing) {
return None;
}
let wh = self.distribution.sample_wh(outgoing, sample);
let cos_o = outgoing.dot(wh);
// Should be rare
if cos_o < 0.0 {
return None;
}
let incident = outgoing.reflected(wh);
if !same_hemisphere(incident, outgoing) {
return None;
}
Some((incident, wh, cos_o))
} |
// TODO: these "Bind*" functions are almost identical, except they use different params. Can we combine?
//-----------------------------------------------------------------------------
void CPUTMaterialDX11::BindTextures(CPUTShaderParameters ¶ms, const CPUTModel *pModel, int meshIndex)
{
CPUTAssetLibraryDX11 *pAssetLibrary = (CPUTAssetLibraryDX11*)CPUTAssetLibrary::GetAssetLibrary();
for (int texture = 0; texture < params.mTextureCount; texture++)
{
std::string textureName;
std::string tagName = params.mpTextureName[texture];
CPUTConfigEntry *pValue = mpConfigBlock->GetValueByName(tagName);
if (!pValue->IsValid())
{
std::string output = "CPUT WARNING: " + tagName + " not found in material " + mMaterialName + "\n";
DEBUG_PRINT(output.c_str());
continue;
}
textureName = pValue->ValueAsString();
if (0 == textureName.length()) { textureName = "default.dds"; }
int bindPoint = params.mpTextureBindPoint[texture];
ASSERT(bindPoint < CPUT_MATERIAL_MAX_TEXTURE_SLOTS, "Texture bind point out of range.");
params.mBindViewMin = std::min(params.mBindViewMin, bindPoint);
params.mBindViewMax = std::max(params.mBindViewMax, bindPoint);
std::string SRGBName = tagName + "sRGB";
CPUTConfigEntry *pSRGBValue = mpConfigBlock->GetValueByName(SRGBName);
bool loadAsSRGB = pSRGBValue->IsValid() ? loadAsSRGB = pSRGBValue->ValueAsBool() : false;
if (!params.mpTexture[texture])
{
params.mpTexture[texture] = pAssetLibrary->GetTexture(textureName, false, loadAsSRGB);
ASSERT(params.mpTexture[texture], "Failed getting texture " + textureName);
}
OUTPUT_BINDING_DEBUG_INFO((itoc(bindPoint) + " : " + params.mpTexture[textureCount]->GetName() + "\n").c_str());
}
} |
/* For license and copyright information please see LEGAL file in repository */
package ipv6
// HopByHop is IPv6 extension header with NextHeader==0
type HopByHop struct {
NextHeader uint8
HdrExtLen uint8
Options []byte
}
|
The surprise signing of Roman Torres will complicate the playing life of Brad Evans, who was moved to a position he didn’t like Sunday. Now what?
The signing of Róman Torres will bring size, physicality, and an aerial threat to the Seattle Sounders. It may also bring instability to Seattle captain Brad Evans’ role on the pitch.
Evans was moved to center back at the beginning of the season, a position he had not played before. With the signing of Torres, however, it seems that Evans may be pushed aside (literally, perhaps, to left back) in favor of the taller, heavier Panamanian.
Evans played a defensive midfield role alongside Erik Friberg in the 4-2-3-1 formation that was used in a 3-1 loss to LA Sunday. After the match, Evans expressed discomfort in the position.
“(I was) out of sorts on most occasions, not comfortable, but I was asked to play there and said yes,” said Evans.
Sounders GM Garth Lagerway did not characterize Torres’ addition as a replacement for Evans.
“I think Brad Evans has done a nice job playing center back for us,” said Lagerway. “This was a question of having a good player that became available and a deal that we couldn’t pass up on. By doing it, I think this gives us a little more tactical flexibility.”
Lagerway claimed that the final decision on such matters lay with head coach Sigi Schmid.
“My job is to give my coach options. What he does with those options is up to him,” said Lagerway.
Schmid, for his part, also had praise for his captain’s performance in the back line.
“I think Brad has played well at center back, he really adjusted to the position,” said Schmid Wednesday. “It’s just another option. Right now, with Ozzie (Alonso) being injured, we used Brad in the midfield in the last game, so it gives us that option to push Brad up to midfielder.”
In the short term, Evans’ presence in the midfield helps fill a crucial gap, but it seems that a serious rearrangement of the starting 11 may be on the horizon in the wake of the Sounders’ slew of signings this summer.
The Brazilian Thomás and the Swedish Erik Friberg were joined last week by Paraguayan Nelson Valdez. Thomás and Friberg are already contributing in regular starting roles, and Schmid claimed upon Valdez’ arrival that he would be ready to play against Orlando City Sunday (though he would not comment how many minutes Valdez might play).
While there is no set timeline for Torres to take the pitch, he and 2014 MLS defender of the year Chad Marshall are the forerunners for the starting spots at center back. It is clear that the Sounders will want to use the talent and leadership of Evans, rather than keeping him in reserve. But where that might be remains a mystery. Once more, Evans may be asked to step into a new role for his team.
For all the players, however, the battle for playing time just became more competitive.
Noteworthy
Midfielder Andreas Ivanschitz, whom the Sounders acquired last week on a free transfer, participated in his first team training Wednesday. Lagerway said he was impressed with Ivanchitz’s conditioning.
“He might be, of all the guys we’ve signed, the guy closest to being ready to come in and contribute,” said Lagerway.
Ivanschitz, 31, expressed excitement at joining his new club.
“I’m feeling great being out with the team, being back on the pitch,” said Ivanschitz Wednesday. “I’m really enjoying it.”
The former Levante man said that while he was eager to play, improving his fitness was his current objective after a long off-season.
“It’s very important for me to get in good shape, to feel match-ready, so that’s the main target at the moment.”
Ivanschitz joins a Seattle team that has lost eight of its last nine matches and will need a late-season push to stay relevant in the playoff race. Still, Ivanschitz was confident that his new team would be able to move forward.
“The last games didn’t go well, but this happens. I think there is some great quality in this team and I’m really sure that we will win some games in the next weeks.” |
/**
*
* @author Paola Masuzzo
*/
public class LoadFromCellMiaMetadataPanel extends javax.swing.JPanel {
public JTextField getDurationTextField() {
return durationTextField;
}
public JLabel getInfoLabel() {
return infoLabel;
}
public JLabel getInfoLabel1() {
return infoLabel1;
}
public JTextField getIntervalTextField() {
return intervalTextField;
}
public JComboBox getIntervalUnitComboBox() {
return intervalUnitComboBox;
}
public JTextField getTimeFramesTextField() {
return timeFramesTextField;
}
public JTextArea getProjectDescriptionTextArea() {
return projectDescriptionTextArea;
}
public JList getExperimentsList() {
return experimentsList;
}
public JList getProjectsList() {
return projectsList;
}
/**
* Creates new form LoadFromCellMiaMetadataPanel
*/
public LoadFromCellMiaMetadataPanel() {
initComponents();
}
/**
* This method is called from within the constructor to initialize the form.
* WARNING: Do NOT modify this code. The content of this method is always
* regenerated by the Form Editor.
*/
@SuppressWarnings("unchecked")
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
overviewPanel = new javax.swing.JPanel();
infoLabel = new javax.swing.JLabel();
jScrollPane4 = new javax.swing.JScrollPane();
projectDescriptionTextArea = new javax.swing.JTextArea();
jScrollPane2 = new javax.swing.JScrollPane();
experimentsList = new javax.swing.JList();
jScrollPane1 = new javax.swing.JScrollPane();
projectsList = new javax.swing.JList();
metadataPanel = new javax.swing.JPanel();
durationLabel = new javax.swing.JLabel();
durationTextField = new javax.swing.JTextField();
intervalLabel = new javax.swing.JLabel();
timeFramesLabel = new javax.swing.JLabel();
intervalTextField = new javax.swing.JTextField();
timeFramesTextField = new javax.swing.JTextField();
jLabel1 = new javax.swing.JLabel();
intervalUnitComboBox = new javax.swing.JComboBox();
infoLabel1 = new javax.swing.JLabel();
setBackground(new java.awt.Color(255, 255, 255));
setPreferredSize(new java.awt.Dimension(480, 610));
overviewPanel.setBorder(javax.swing.BorderFactory.createTitledBorder("Overview"));
overviewPanel.setMinimumSize(new java.awt.Dimension(20, 20));
overviewPanel.setOpaque(false);
infoLabel.setText("Click on a project to see the relative experiments.");
jScrollPane4.setBorder(null);
projectDescriptionTextArea.setEditable(false);
projectDescriptionTextArea.setColumns(20);
projectDescriptionTextArea.setRows(5);
projectDescriptionTextArea.setBorder(javax.swing.BorderFactory.createTitledBorder(javax.swing.BorderFactory.createEmptyBorder(1, 1, 1, 1), "Project Description", javax.swing.border.TitledBorder.DEFAULT_JUSTIFICATION, javax.swing.border.TitledBorder.DEFAULT_POSITION, new java.awt.Font("Tahoma", 1, 11))); // NOI18N
projectDescriptionTextArea.setFocusable(false);
jScrollPane4.setViewportView(projectDescriptionTextArea);
jScrollPane2.setBorder(null);
experimentsList.setBorder(javax.swing.BorderFactory.createTitledBorder(javax.swing.BorderFactory.createEmptyBorder(1, 1, 1, 1), "Experiments", javax.swing.border.TitledBorder.DEFAULT_JUSTIFICATION, javax.swing.border.TitledBorder.DEFAULT_POSITION, new java.awt.Font("Tahoma", 1, 11))); // NOI18N
experimentsList.setSelectionMode(javax.swing.ListSelectionModel.SINGLE_SELECTION);
jScrollPane2.setViewportView(experimentsList);
jScrollPane1.setBorder(null);
projectsList.setBorder(javax.swing.BorderFactory.createTitledBorder(javax.swing.BorderFactory.createEmptyBorder(1, 1, 1, 1), "Projects", javax.swing.border.TitledBorder.DEFAULT_JUSTIFICATION, javax.swing.border.TitledBorder.DEFAULT_POSITION, new java.awt.Font("Tahoma", 1, 11))); // NOI18N
projectsList.setSelectionMode(javax.swing.ListSelectionModel.SINGLE_SELECTION);
jScrollPane1.setViewportView(projectsList);
javax.swing.GroupLayout overviewPanelLayout = new javax.swing.GroupLayout(overviewPanel);
overviewPanel.setLayout(overviewPanelLayout);
overviewPanelLayout.setHorizontalGroup(
overviewPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(overviewPanelLayout.createSequentialGroup()
.addContainerGap()
.addGroup(overviewPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jScrollPane4, javax.swing.GroupLayout.PREFERRED_SIZE, 430, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(infoLabel, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addGroup(overviewPanelLayout.createSequentialGroup()
.addComponent(jScrollPane1, javax.swing.GroupLayout.PREFERRED_SIZE, 212, javax.swing.GroupLayout.PREFERRED_SIZE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(jScrollPane2, javax.swing.GroupLayout.PREFERRED_SIZE, 206, javax.swing.GroupLayout.PREFERRED_SIZE)))
.addContainerGap())
);
overviewPanelLayout.linkSize(javax.swing.SwingConstants.HORIZONTAL, jScrollPane1, jScrollPane2);
overviewPanelLayout.setVerticalGroup(
overviewPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, overviewPanelLayout.createSequentialGroup()
.addContainerGap()
.addGroup(overviewPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jScrollPane1, javax.swing.GroupLayout.DEFAULT_SIZE, 217, Short.MAX_VALUE)
.addComponent(jScrollPane2))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addComponent(jScrollPane4, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addComponent(infoLabel)
.addContainerGap())
);
metadataPanel.setBackground(new java.awt.Color(255, 255, 255));
metadataPanel.setBorder(javax.swing.BorderFactory.createTitledBorder("Experiment Metadata"));
metadataPanel.setMinimumSize(new java.awt.Dimension(20, 20));
durationLabel.setText("Duration*");
durationTextField.setOpaque(false);
intervalLabel.setText("Interval*");
timeFramesLabel.setText("Time Frames*");
intervalTextField.setOpaque(false);
timeFramesTextField.setOpaque(false);
jLabel1.setText("hours");
intervalUnitComboBox.setOpaque(false);
infoLabel1.setText("These data are automatically parsed from a microscope file.");
javax.swing.GroupLayout metadataPanelLayout = new javax.swing.GroupLayout(metadataPanel);
metadataPanel.setLayout(metadataPanelLayout);
metadataPanelLayout.setHorizontalGroup(
metadataPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(metadataPanelLayout.createSequentialGroup()
.addContainerGap()
.addGroup(metadataPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(metadataPanelLayout.createSequentialGroup()
.addGroup(metadataPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(intervalLabel)
.addComponent(durationLabel)
.addComponent(timeFramesLabel))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(metadataPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(intervalTextField)
.addComponent(timeFramesTextField)
.addComponent(durationTextField))
.addGap(18, 18, 18)
.addGroup(metadataPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(intervalUnitComboBox, javax.swing.GroupLayout.PREFERRED_SIZE, 122, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jLabel1))
.addGap(41, 41, 41))
.addGroup(metadataPanelLayout.createSequentialGroup()
.addComponent(infoLabel1, javax.swing.GroupLayout.DEFAULT_SIZE, 428, Short.MAX_VALUE)
.addContainerGap())))
);
metadataPanelLayout.setVerticalGroup(
metadataPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(metadataPanelLayout.createSequentialGroup()
.addContainerGap()
.addGroup(metadataPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(timeFramesLabel)
.addComponent(timeFramesTextField, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(18, 18, 18)
.addGroup(metadataPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(intervalLabel)
.addComponent(intervalTextField, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(intervalUnitComboBox, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(18, 18, 18)
.addGroup(metadataPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(durationLabel)
.addComponent(durationTextField, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jLabel1))
.addGap(18, 18, 18)
.addComponent(infoLabel1)
.addContainerGap())
);
javax.swing.GroupLayout layout = new javax.swing.GroupLayout(this);
this.setLayout(layout);
layout.setHorizontalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addContainerGap()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(overviewPanel, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(metadataPanel, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addContainerGap())
);
layout.setVerticalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addContainerGap()
.addComponent(overviewPanel, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(metadataPanel, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addContainerGap())
);
}// </editor-fold>//GEN-END:initComponents
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.JLabel durationLabel;
private javax.swing.JTextField durationTextField;
private javax.swing.JList experimentsList;
private javax.swing.JLabel infoLabel;
private javax.swing.JLabel infoLabel1;
private javax.swing.JLabel intervalLabel;
private javax.swing.JTextField intervalTextField;
private javax.swing.JComboBox intervalUnitComboBox;
private javax.swing.JLabel jLabel1;
private javax.swing.JScrollPane jScrollPane1;
private javax.swing.JScrollPane jScrollPane2;
private javax.swing.JScrollPane jScrollPane4;
private javax.swing.JPanel metadataPanel;
private javax.swing.JPanel overviewPanel;
private javax.swing.JTextArea projectDescriptionTextArea;
private javax.swing.JList projectsList;
private javax.swing.JLabel timeFramesLabel;
private javax.swing.JTextField timeFramesTextField;
// End of variables declaration//GEN-END:variables
} |
<reponame>tigefa4u/gitlabhq
package git
import (
"os"
"testing"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/gitaly"
)
func TestMain(m *testing.M) {
gitaly.InitializeSidechannelRegistry(logrus.StandardLogger())
os.Exit(m.Run())
}
|
<reponame>Praqma/Helmsman
// go:build exclude
package main
import (
"encoding/json"
"os"
"github.com/Praqma/helmsman/internal/app"
"github.com/invopop/jsonschema"
)
func main() {
r := new(jsonschema.Reflector)
r.AllowAdditionalProperties = true
if err := r.AddGoComments("github.com/Praqma/helmsman", "./internal/app"); err != nil {
panic(err)
}
s := r.Reflect(&app.State{})
data, _ := json.MarshalIndent(s, "", " ")
os.WriteFile("schema.json", data, 0o644)
}
|
// Split Directed-Edge d of this Polygon by cut Plane:
public void split(Plane cut, DEdge d )
{
if (cut.sDistance(d.srcPoint()) * cut.sDistance(d.dstPoint()) >= 0.0) {
throw new IllegalArgumentException();
}
Vec3d onP = cut.onPoint( d.srcPoint(), d.dstPoint() );
d.split( onP );
++nDEdges;
} |
def retrieve_top_video(videos):
for i in range(1, 4):
try:
PriorityCheck = [d['priority_flag'] == i for d in videos]
return videos[PriorityCheck.index(True)]
except ValueError:
if i < 3:
continue
else:
return "No results found" |
Enter your search terms Submit search form Web www.debunkingskeptics.com
Debunking PseudoSkeptical Arguments of Paranormal Debunkers
By Winston Wu
Revised 2011
Praises and Reviews
“Winston Wu has now written an essay that brilliantly and decisively rebuts the general skeptical arguments against the paranormal. This is among the best essays ever written on this subject, and deserves extensive study. The essay carefully moves through the entire skeptical lexicon of arguments, rebutting each in turn."
- Whitley Strieber, Author and Radio Host of UnknownCountry.com
“Your treatise "debunking pseudoskeptical arguments of paranormal debunkers" is the best refutation of pseudoskeptics' arguments, tactics and fallacies to reject the evidence for paranormal phenomena. I've recommended your article to every person that I know.”
– Deya, Spain
“I'm Jürgen and I write you from Germany. Let me tell that your article about debunkers/pseudoskeptics of PSI is the best handbook to debate with them. I used to debate pseudoskeptics as a pastime, using the arguments and tactics of your article.”
– Jürgen, Germany
“Your article "debunking pseudoskeptical arguments of paranormal debunkers" is a must read to any person who seeks to understand the nature of pseudoskepticism… Your article was the first on-line resource to expose the fallacies, motivation, agenda and dishonesty of pseudoskeptics.”
– Jessica Parker, USA
Help support this site! Get a PDF ebook of this Treatise for only $7. Use the PayPal button below to donate. (Note: Upon completion of payment, click "Return to Merchant" on the PayPal screen to access the download page) Amazon Kindle version also available.
Table of Contents
Foreword by Victor Zammit
Introduction
- How this book came to be written
PseudoSkeptics vs. True Skeptics: Behaviors and Tactics
- True Skeptics / Open-Minded Skeptics
- PseudoSkeptics / Closed-Minded Skeptics
Scientism - The Religion of Pseudoskeptics?
Section I: General Arguments Against Paranormal Claims
Argument # 1: It is irrational to believe anything that hasn’t been proven.
Argument # 2: Extraordinary claims require extraordinary evidence.
- Extraordinary evidence for 4 phenomenon
Argument # 3: The Occam’s Razor rule.
Argument # 4: The Invisible Pink Unicorn / Santa Claus gambit.
Argument # 5: The “anecdotal evidence is invalid” argument.
- Factors measuring degree of reliability in anecdotal evidence
- The Ebay feedback challenge that a pseudoskeptic failed and was caught lying red-handed
Argument # 6: The memory malleability argument to dismiss anecdotal evidence.
Argument # 7: Automatic dismissal of paranormal claims as due to mistake, lying or hallucination.
Argument # 8: There is no evidence for any paranormal or psychic phenomena.
Argument # 9: Science is the only reliable method.
Argument # 10: Paranormal phenomena aren’t possible because they contradict known laws of science.
Argument # 11: Unexplainable does not mean inexplicable.
Argument # 12: Skeptics don’t have beliefs. They make assessments based on evidence.
Argument # 13: Skepticism is not cynicism. It is a method of rational inquiry.
Argument # 14: Believers in the paranormal are thinking in primitive, irrational and childish ways.
Argument # 15: Skeptics are defending science and reason from a rising tide of irrationality.
Section II: Arguments Against Specific Paranormal Claims
Argument # 16: Psychics and mediums use cold reading tricks and general guesses, not psychic powers.
Argument # 17: Experiments that show positive results for psi must be replicable to count as evidence.
Argument # 18: No psychic phenomena has been demonstrated under controlled conditions.
Argument # 19: Alternative medicine and remedies have no scientific basis. All claims of their effectiveness are due to placebo effect or coincidence.
Argument # 20: Miracles are impossible and defy everything we know about science.
Argument # 21: The Skeptical explanation for answered prayers - Selective memory and coincidence.
- My own theory on how and why prayer works
Argument # 22: The Skeptical explanation for precognitive dreams - Selective memory and coincidence.
Argument # 23: The Dying Brain Hypothesis for Near Death Experiences.
Argument # 24: Consciousness is neurologically based, dependent on brain and does not survive physical death.
Argument # 25: Spiritual experiences only exist in your mind, not in external reality.
Argument # 26: Paranormal beliefs are childish fantasies for dealing with a cold uncaring world.
Argument # 27: There is no evidence to support the existence of UFO’s or Aliens.
Argument # 28: Evolution is sufficient to explain the origin of life, so God is not needed.
Argument # 29: Atheists don’t claim that God doesn’t exist. They lack belief in God. The burden of proof for God is on the Theist, not the Atheist.
Argument # 30: The James Randi Million Dollar Challenge argument.
Conclusion
New Developments and Research
Links, Blogs and Books on PseudoSkepticism and Paranormal Research
Appendix A: My Presentation Outline – Skepticism vs. Psi
Reader Responses
My Other Debunking Articles
Help support this site! Get a PDF ebook of this Treatise for only $7. Use the PayPal button below to donate. (Note: Upon completion of payment, click "Return to Merchant" on the PayPal screen to access the download page) Amazon Kindle version also available.
Sign my Guestbook or Comment in my Forum
Go to Introduction
Back to Index Page |
The Army has informed Congress that it is terminating a robotic vehicle and an unmanned aircraft program that were once part of the Army’s largest modernization effort.
The move, which comes as the Pentagon prepares its 2011 budget request, highlights the Army’s need to pour money into technologies that military planners see as much more necessary to support soldiers deployed to Afghanistan and Iraq.
ADVERTISEMENT
The cancellation of the unmanned ground and air vehicle programs eviscerates what once was known as the Army’s Future Combat Systems. The once-ambitious $160 billion FCS was envisioned as a series of combat vehicles and unmanned systems linked through a software network, but has been whittled down to a few sensors and small unmanned systems.The software network is still in development, but congressional sources question its future in light of all the recent cancellations.The termination decision comes after Pentagon officials tasked the Army with devising a modernization strategy composed of separate programs. The remaining technology development efforts from the FCS era are now referred to as Army Brigade Combat Team Modernization.The unmanned ground vehicle that was partially terminated is known as MULE, for Multifunction Utility/Logistics and Equipment Countermine and Transport, while the terminated unmanned aerial vehicle is called Fire Scout or Class IV. The MULE program includes three different kinds of vehicles with different capabilities: the countermine, transport and light-assault. The Army only terminated the countermine and transport variants.Lockheed Martin has been developing the MULE, while Northrop Grumman is the contractor for Fire Scout. Northrop Grumman is also building the vertical takeoff and landing unmanned aerial vehicle for the Navy.Boeing and SAIC were the lead contractors for the now-defunct FCS but have transitioned to the role of prime contractors for the Army Brigade Combat Team Modernization program.Boeing and SAIC have a number of subcontractors for the various technologies being developed under the program. The MULE and Class IV terminations are at subcontract level.Defense Secretary Robert Gates announced last April his decision to kill the manned ground vehicle portion of FCS. Gates expressed concerns that the Army had not adequately incorporated in the vehicles’ design lessons learned from the wars in Iraq and Afghanistan. Following Gates’s announcement, Pentagon acquisitions chief Ashton Carter formally ended the FCS program in June and directed the Army to modernize its technology as part of separate programs.In its termination letters to lawmakers, the Army said that the two MULE variants eliminated "did not meet rapidly changing threats, nor meet the Army’s future mission needs." Instead, the Army will continue the development of the Armed Robotic Vehicle Assault (Light), which will also incorporate the technologies derived from the two other MULE systems.Regarding the Class IV unmanned aerial system, the Army told lawmakers that an existing program, the Shadow UAV, can meet the Army needs with some technology improvements, instead of investing in the Fire Scout."All of these restricting steps are being taken to ensure that the Army does not lose time in providing the best possible advantages to its soldiers while remaining ever fiscally responsible to the American citizen," the Army said in the termination letters to Congress.Boeing confirmed that it received the terminations of the contracts as well."These platforms are currently in System Development and Demonstration as part of the Brigade Combat Team Modernization (BCTM) program. We will now begin notifying those partners and suppliers impacted by the order and initiate the termination proposal process with the Army," Matthew Billingsley, a Boeing spokesman, said in a statement.
This story was updated at 10:48 a.m. on Jan. 13. and 5:28 p.m. on Jan. 14. |
/**
* Triggers the startup procedure for a {@code MicroserviceInstance}.
*
* @author Lion Wagner
*/
public class InstanceStartupEvent extends Event<MicroserviceInstance> {
public InstanceStartupEvent(Model model, String name, boolean showInTrace) {
super(model, name, showInTrace);
this.setSchedulingPriority(Priority.HIGH);
}
@Override
public void eventRoutine(MicroserviceInstance microserviceInstance) throws SuspendExecution {
microserviceInstance.start();
}
} |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jdbi.v3.core.qualifier;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.util.List;
import org.jdbi.v3.core.argument.Arguments;
import org.jdbi.v3.core.generic.GenericType;
import org.jdbi.v3.core.mapper.ColumnMappers;
import org.jdbi.v3.core.mapper.Mappers;
import org.jdbi.v3.core.rule.DatabaseRule;
import org.jdbi.v3.core.rule.H2DatabaseRule;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
public class TestNVarchar {
private static final QualifiedType NVARCHAR_STRING = QualifiedType.of(String.class).with(NVarchar.class);
@Rule
public DatabaseRule dbRule = new H2DatabaseRule();
@Before
public void setUp() {
dbRule.getJdbi().useHandle(handle ->
handle.execute("create table nvarchars (id int primary key, name nvarchar not null)"));
}
@After
public void tearDown() {
dbRule.getJdbi().useHandle(handle ->
handle.execute("drop table nvarchars"));
}
@Test
public void sqlStatementBindNVarchar() {
dbRule.getJdbi().useHandle(handle -> {
handle.createUpdate("INSERT INTO nvarchars (id, name) VALUES (?, ?)")
.bind(0, 1)
.bindNVarchar(1, "foo")
.execute();
handle.createUpdate("INSERT INTO nvarchars (id, name) VALUES (:id, :name)")
.bind("id", 2)
.bindNVarchar("name", "bar")
.execute();
handle.createUpdate("INSERT INTO nvarchars (id, name) VALUES (?, ?)")
.bind(0, 3)
.bindByType(1, "baz", NVARCHAR_STRING)
.execute();
handle.createUpdate("INSERT INTO nvarchars (id, name) VALUES (:id, :name)")
.bind("id", 4)
.bindByType("name", "qux", NVARCHAR_STRING)
.execute();
assertThat(
handle.select("SELECT name FROM nvarchars ORDER BY id")
.mapTo(String.class, NVarchar.class)
.list())
.containsExactly("foo", "bar", "baz", "qux");
assertThat(
handle.select("SELECT name FROM nvarchars ORDER BY id")
.mapTo(new GenericType<String>() {}, NVarchar.class)
.list())
.containsExactly("foo", "bar", "baz", "qux");
List rawList = handle.select("SELECT name FROM nvarchars ORDER BY id")
.mapTo(NVARCHAR_STRING)
.list();
assertThat(rawList)
.containsExactly("foo", "bar", "baz", "qux");
});
}
/*
* The databases we test with don't care whether you call setString() and setNString(), which
* makes it difficult to test that the NVarchar qualifier is being honored on a live database.
* The following tests are best effort isolation tests to confirm that the @NVarchar String
* qualified type is being bound and mapped using PreparedStatement.setNString() and
* ResultSet.getNString(), respectively.
*/
@Test
public void findNVarcharArgument() throws Exception {
dbRule.getJdbi().useHandle(handle -> {
String value = "foo";
PreparedStatement stmt = mock(PreparedStatement.class);
handle.getConfig(Arguments.class)
.findFor(NVARCHAR_STRING, value)
.orElseThrow(IllegalStateException::new)
.apply(1, stmt, null);
verify(stmt).setNString(1, value);
handle.createQuery("no execute")
.getContext()
.findArgumentFor(NVARCHAR_STRING, value)
.orElseThrow(IllegalStateException::new)
.apply(2, stmt, null);
verify(stmt).setNString(2, value);
});
}
@Test
public void findNVarcharMapper() throws Exception {
dbRule.getJdbi().useHandle(handle -> {
ResultSet rs = mock(ResultSet.class);
when(rs.getNString(anyInt())).thenReturn("value");
assertThat(
handle.getConfig(Mappers.class)
.findFor(NVARCHAR_STRING)
.orElseThrow(IllegalStateException::new)
.map(rs, null))
.isEqualTo("value");
assertThat(
handle.getConfig(ColumnMappers.class)
.findFor(NVARCHAR_STRING)
.orElseThrow(IllegalStateException::new)
.map(rs, 1, null))
.isEqualTo("value");
});
}
}
|
// GenerateNewToken generates a new token for protecting against CSRF. The token is attached to the
// response writer as a cookie.
func (c *Handler) GenerateNewToken(w http.ResponseWriter, r *http.Request) error {
ses, err := c.CreateSession("csrf", false)
if err != nil {
log.Println("Error creating a new CSRF token")
return err
}
return c.AttachCookie(w, ses)
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.