content
stringlengths 10
4.9M
|
---|
import { DisplayModeType, store } from '../store'
import router from '../router'
import { debugWindowProp, proxyAwareEqual } from './utils'
import { err, ok, Result } from 'neverthrow'
import { NetNode } from './nettypes'
export function pathArrayToString(path: string[]): string {
//console.log(path)
return '/' + path.join('/')
}
export enum GetNodeError {
ServerNotReachable,
NodeNotExisiting
}
export enum NodeType {
Node = 'node',
File = 'file',
Folder = 'folder'
}
export enum PreviewType {
Image = 'image',
Video = 'video',
None = 'none'
}
function previewTypeFromExt(fname: string): PreviewType {
if (/.(png|jpe?g)$/i.test(fname)) {
return PreviewType.Image
} else if (/.(mov|mp4)$/i.test(fname)) {
return PreviewType.Video
}
return PreviewType.None
}
export class Node {
name: string
pathFromRoot: string[]
fetched: boolean
parent?: Folder
shared: string | null
type = NodeType.Node
size = -1
lastModified = ''
ownedBy: string
constructor(name: string, pathFromRoot: string[], fetched: boolean, shared: string | null, ownedBy: string) {
this.name = name
this.pathFromRoot = pathFromRoot
this.fetched = fetched != undefined ? fetched : false
this.shared = shared
this.ownedBy = ownedBy
}
async fetch(): Promise<Result<boolean, string>> {
if (this.fetched) return ok(false)
let url: string
const dmode = store.displayMode.value
if (dmode.mode == DisplayModeType.Files) url = `/api/node?file_path=${encodeURIComponent(this.path())}`
else if (dmode?.sharedId != undefined) url = '/api/node?file_path=' + encodeURIComponent(this.path()) + 'shared_id=' + store.displayMode.value?.sharedId
else return err('neither owned node or shared with id in storage.displayMode')
console.log(`fetching ${url}`)
let res
//debugger
try {
if (dmode.mode == DisplayModeType.Files) {
res = await store.fetchWithAuth(url)
} else {
res = await fetch(url)
}
if (res == null) return err('Not logged in')
}
catch (e) {
console.error(e)
return err(e)
}
if (res.ok) {
//console.log('res ok')
const snode: NetNode = await res.json()
//console.log('fetched val:', snode)
if (this.pathFromRoot.length > 0 && this.name != snode.name) {
console.error('fetched name != local name')
return err('Wrong Node name')
} else if (this.pathFromRoot.length == 0) {
console.log('updated root name to ', snode.name)
this.name = snode.name
}
if (!proxyAwareEqual(this.pathFromRoot, snode.pathFromRoot)) {
console.error('pathFromRoot differ', this.pathFromRoot, snode.pathFromRoot)
return err('pathFromRoot differ')
}
if (snode.metadata.type == NodeType.Folder && snode.childrenFolder != undefined && snode.files != undefined) {
if (this.type != NodeType.Folder && this.type != NodeType.Node) {
console.error('fetched Folder, but local Node is File')
return err('Got Folder expected File')
}
/* eslint-disable @typescript-eslint/no-use-before-define */
const t = this as unknown as Folder
t.children = [
...snode.childrenFolder.map((f: string) => new Folder(f, undefined, snode.pathFromRoot.concat([f]), null, snode.ownedBy)),
...snode.files.map((f: string) => new File(
f,
snode.pathFromRoot.concat([f]).filter((e: string) => e.length > 0),
null,
snode.ownedBy,
previewTypeFromExt(f)
))
];
if (t.children != undefined) {
for(const n of t.children) {
n.parent = t
if (n instanceof File) {
await n.fetch()
}
}
}
/* eslint-enable @typescript-eslint/no-use-before-define */
} else if (snode.metadata.type == NodeType.File) {
if (this.type != NodeType.File && this.type != NodeType.Node) {
console.error('fetched File, but local Node is Folder')
return err('Got File expected Folder')
}
} else {
console.error('unknown node type: ', snode)
return err('Unknown Node type')
}
this.size = snode.metadata.size
this.lastModified = snode.metadata.lastModified
this.fetched = true
// cast undefined to null, is there a more elegant way???
this.shared = snode.metadata.shared == undefined ? null : snode.metadata.shared
this.ownedBy = snode.ownedBy
this.parent?.updateSize()
}
else {
console.error('node req failed: ', res.status)
if (res.status == 401) {
//store.auth.user.value = null
router.push('/logout')
//alert('You need to log in!')
}
return err(res.statusText)
}
return ok(true)
}
path(): string {
return '/' + this.pathFromRoot.join('/')
}
sharedLink(): string | null {
if (this.shared == null) return null
return `${store.baseUrl}/shared/${this.shared}/`
}
downloadLink(): string {
if (store.displayMode.value.mode == DisplayModeType.Files) {
return `/api/download/file?path=${encodeURIComponent(this.path())}&token=${store.user.value?.raw}`
}
return `/api/download/file?path=${encodeURIComponent(this.path())}&shared_id=${store.displayMode.value.sharedId}`
}
async setShared(enabled: boolean): Promise<boolean> {
const url = `/api/folder/shared?path=${encodeURIComponent(pathArrayToString(this.pathFromRoot))}${enabled ? '&enabled=true' : ''}`
//console.log(`Updating shared setting for node ${this.path()}`)
const res = await fetch(url, {
method: 'PATCH',
headers: {
'Authorization': `Bearer ${store.user.value?.raw}`
}
})
if (res.status != 200) {
console.error('Failed to set shared: ', res)
debugWindowProp('sharedFail', this)
return false
}
const id = await res.text()
console.log('shared update:', id)
if (id.length == 0) this.shared = null
else this.shared = id
return true
}
async forceDelete() {
const res = await store.fetchWithAuth(`/api/node?path=${encodeURIComponent(this.path())}`, {method: 'DELETE'})
if (res?.status == 202 && this.parent != undefined) {
this.parent.fetched = false
await this.parent.fetch()
}
}
isMyNode(): boolean {
return this.ownedBy == store.user.value?.payload.userId
}
}
export class Folder extends Node {
children?: Node[]
constructor(name: string, children: Node[] | undefined, pathFromRoot: string[], shared: string | null, ownedBy: string) {
super(name, pathFromRoot, children != null, shared, ownedBy)
this.type = NodeType.Folder
this.children = children
}
updateSize() {
// a child size changed, calculate new size and call this on parent if existing
this.size = this.children?.reduce((acc, node) => acc + Math.max(node.size, 0), 0) || 0
console.log(this, this.size)
}
async createChildFolder(name: string): Promise<boolean> {
if (name.length == 0 || this.children?.find(c => c.name == name) != undefined) {
console.warn('Folder / File with this name allready exists')
return false
}
const fullPath = `/${[...this.pathFromRoot, name].join('/')}`
console.log('Creating folder ', fullPath)
try {
const res = await store.fetchWithAuth(`/api/create_folder?folder_path=${encodeURIComponent(fullPath)}`, {method: 'POST'})
if (res?.status == 202) {
// ACCEPTED
this.fetched = false
await this.fetch()
return true
}
} catch (error) {
console.error(error)
return false
}
return false
}
}
export class File extends Node {
previewType: PreviewType
constructor(name: string, pathFromRoot: string[], shared: string | null, ownedBy: string, previewType: PreviewType = PreviewType.None) {
super(name, pathFromRoot, false, shared, ownedBy)
this.type = NodeType.File
this.previewType = previewType
}
previewUrl(res: number): string {
let url = `/api/preview/file?path=${encodeURIComponent(this.path()??"unknown")}`
if (store.displayMode.value?.mode == DisplayModeType.Files) {
url += `&token=${store.user.value?.raw}`
} else {
url += `&shared_id=${store.displayMode.value?.sharedId}`
}
if (res >= 0) url += `&resolution=${res}`
return url
}
ext(): string {
const all = this.name.split(".")
if (all.length <= 1) return ""
return all[all.length - 1]
}
}
async function getNodeCacheOrFetch(currNode: Node, pathRemaining: string[], pathFromRoot: string[]): Promise<Result<Node, GetNodeError>> {
if (!currNode.fetched) {
// fetch from server
await currNode.fetch()
if (currNode.pathFromRoot.length == 0)
store.rootNode.value = currNode
}
if (pathRemaining.length == 0) return ok(currNode)
const next = pathRemaining.splice(0, 1)[0]
// we know chrrNode is a folder
const nchild = (currNode as Folder).children?.find(f => f.name == next)
if (nchild == undefined) {
//debugger
console.error(currNode, next)
return err(GetNodeError.NodeNotExisiting)
}
//console.log('next', next)
pathFromRoot.push(next)
return getNodeCacheOrFetch(nchild, pathRemaining, pathFromRoot)
}
// TODO reimplement inside node?
/*
// set up listener on state nodeinfo change to fetch if isn't fetched
state.nodeInfoDisplay.subscribeWithId('fs-fetch', async fr => {
console.log(fr)
if (fr != null && !fr.fetched) state.nodeInfoDisplay.emit( await getNode(fr.pathFromRoot) )
})
*/
/**
*
* @param {string | string[]} path either / separated string or allready split
* @returns {Node}
*/
export async function getNode(path: string | string[]): Promise<Result<Node, GetNodeError>> {
if (typeof path == 'string') path = path.split('/').filter(e => e.length > 0)
/**
* @type {Node}
*/
const curr = await getNodeCacheOrFetch(store.rootNode.value as Node, path, [])
//console.log('getNode', curr)
if (curr.isOk() && !curr.value.fetched) {
console.error('node isnt fetched, something went wrong')
err(GetNodeError.ServerNotReachable)
}
return curr
}
export async function updateShared(shared: Array<{ path: string; share_id: string }>): Promise<Node[]> {
const res = []
for (const entry of shared) {
const f = await getNode(entry.path)
if (f.isOk()) {
res.push(f.value)
f.value.shared = entry.share_id
}
}
return res
}
export function reset() {
console.log('Reset of filesystem cache')
store.rootNode.value = new Folder('', undefined, [], null, store.user.value?.payload.userId || "unknown")
}
reset()
|
<reponame>kasimte/QARC<filename>QARC/VQPN/baselines/fc.py
from __future__ import division, print_function, absolute_import
import tflearn
import h5py
import os
def load_h5(filename):
h5f = h5py.File(filename, 'r')
X = h5f['X']
Y = h5f['Y']
return X, Y
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
X, Y = load_h5('train_720p_vmaf.h5')
testX, testY = load_h5('test_720p_vmaf.h5')
input_layer = tflearn.input_data(shape=[None, 25, 36, 64, 3])
dense1 = tflearn.fully_connected(input_layer, 64, activation='relu')
dense1 = tflearn.fully_connected(dense1, 64, activation='relu')
dense1 = tflearn.fully_connected(dense1, 64, activation='relu')
out = tflearn.fully_connected(dense1, 5, activation='sigmoid')
net = tflearn.regression(out, optimizer='adam',
loss='mean_square', learning_rate=1e-3)
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(X, Y, n_epoch=100, validation_set=(testX, testY),
show_metric=False, run_id="dense_model")
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.resourcemanager.network.implementation;
import com.azure.core.http.rest.PagedFlux;
import com.azure.core.http.rest.PagedIterable;
import com.azure.resourcemanager.network.NetworkManager;
import com.azure.resourcemanager.network.fluent.VirtualNetworkPeeringsClient;
import com.azure.resourcemanager.network.models.Network;
import com.azure.resourcemanager.network.models.NetworkPeering;
import com.azure.resourcemanager.network.models.NetworkPeerings;
import com.azure.resourcemanager.network.fluent.models.VirtualNetworkPeeringInner;
import com.azure.resourcemanager.resources.fluentcore.arm.ResourceUtils;
import com.azure.resourcemanager.resources.fluentcore.arm.collection.implementation.IndependentChildrenImpl;
import reactor.core.publisher.Mono;
/** Implementation for network peerings. */
class NetworkPeeringsImpl
extends IndependentChildrenImpl<
NetworkPeering,
NetworkPeeringImpl,
VirtualNetworkPeeringInner,
VirtualNetworkPeeringsClient,
NetworkManager,
Network>
implements NetworkPeerings {
private final NetworkImpl network;
// Constructor to use from the context of a parent
NetworkPeeringsImpl(final NetworkImpl parent) {
super(parent.manager().serviceClient().getVirtualNetworkPeerings(), parent.manager());
this.network = parent;
}
@Override
public NetworkPeeringImpl define(String name) {
return wrapModel(name);
}
// Fluent model create helpers
@Override
protected NetworkPeeringImpl wrapModel(String name) {
VirtualNetworkPeeringInner inner = new VirtualNetworkPeeringInner().withName(name);
return new NetworkPeeringImpl(inner, this.network);
}
@Override
protected NetworkPeeringImpl wrapModel(VirtualNetworkPeeringInner inner) {
return (inner != null) ? new NetworkPeeringImpl(inner, this.network) : null;
}
@Override
public Mono<Void> deleteByParentAsync(String groupName, String parentName, final String name) {
return this
.manager()
.networks()
// Get the parent network of the peering to delete
.getByResourceGroupAsync(groupName, parentName)
// Then find the local peering to delete
.flatMap(
localNetwork -> {
if (localNetwork == null) {
return Mono.empty(); // Missing local network, so nothing else to do
} else {
String peeringId = localNetwork.id() + "/peerings/" + name;
return localNetwork.peerings().getByIdAsync(peeringId);
}
})
.flux()
// Then get the remote peering if available and possible to delete
.flatMap(
localPeering -> {
if (localPeering == null) {
return Mono.empty();
} else if (!localPeering.isSameSubscription()) {
return Mono.just(localPeering);
} else {
return Mono.just(localPeering).concatWith(localPeering.getRemotePeeringAsync());
}
})
// Then delete each peering (this will be called for each of the peerings, so at least once for the local
// peering, and second time for the remote one if any
.flatMap(
peering -> {
if (peering == null) {
return Mono.empty();
} else {
String networkName = ResourceUtils.nameFromResourceId(peering.networkId());
return peering
.manager()
.serviceClient()
.getVirtualNetworkPeerings()
.deleteAsync(peering.resourceGroupName(), networkName, peering.name());
}
})
// Then continue till the last peering is deleted
.then();
}
@Override
public Mono<NetworkPeering> getByParentAsync(String resourceGroup, String parentName, String name) {
return this.innerModel().getAsync(resourceGroup, parentName, name).map(inner -> wrapModel(inner));
}
@Override
public PagedIterable<NetworkPeering> listByParent(String resourceGroupName, String parentName) {
return wrapList(this.innerModel().list(resourceGroupName, parentName));
}
@Override
public PagedIterable<NetworkPeering> list() {
return this.wrapList(this.innerModel().list(this.network.resourceGroupName(), this.network.name()));
}
@Override
public PagedFlux<NetworkPeering> listAsync() {
return this.wrapPageAsync(this.innerModel().listAsync(this.network.resourceGroupName(), this.network.name()));
}
@Override
public NetworkPeering getByRemoteNetwork(Network network) {
return (network != null) ? this.getByRemoteNetwork(network.id()) : null;
}
@Override
public NetworkPeering getByRemoteNetwork(String remoteNetworkResourceId) {
if (remoteNetworkResourceId != null) {
for (NetworkPeering peering : this.list()) {
if (peering.remoteNetworkId().equalsIgnoreCase(remoteNetworkResourceId)) {
return peering;
}
}
}
return null;
}
@Override
public Mono<NetworkPeering> getByRemoteNetworkAsync(Network network) {
if (network != null) {
return this.getByRemoteNetworkAsync(network.id());
} else {
return Mono.empty();
}
}
@Override
public Mono<NetworkPeering> getByRemoteNetworkAsync(final String remoteNetworkResourceId) {
if (remoteNetworkResourceId == null) {
return Mono.empty();
} else {
return this
.listAsync()
.filter(
peering -> {
if (peering == null) {
return false;
} else {
return remoteNetworkResourceId.equalsIgnoreCase(peering.remoteNetworkId());
}
})
.last();
}
}
}
|
<filename>cms/sitemaps/__init__.py
from .cms_sitemap import CMSSitemap # nopyflakes
|
(Corrects name of franchisee company throughout)
RIO DE JANEIRO, Feb 24 (Reuters) - Brazilian unions have filed suit alleging that the largest operator of McDonald’s restaurants in Latin America violates Brazil’s labor laws, a case that could lead to fines of up to 30 percent of annual sales.
According to the suit filed on Monday in Brazil’s federal labor court in Brasilia, violations committed by Arcos Dorados Holdings Inc. amount to “social dumping” and help McDonald’s Corp. illegally undercut competitors and boost profit.
The suit seeks an immediate order to end the practices described in the suit and an injunction banning the opening of any new McDonald’s restaurants in Brazil until the problems are fixed.
It also seeks restitution of lost pay and damages for the rights violated. No figure was mentioned, but fines under Brazilian labor law can vary between 1 percent and 30 percent of a company’s annual sales.
The suit is backed by two of Brazil’s largest labor federations, CUT and UTG, as well as the Washington, DC-based Service Employees International Union (SEIU). It comes as U.S. unions representing retail and fast-food workers are pushing to boost pay and benefits for jobs that frequently pay the minimum wage or little more.
The unions accuse Arcos Dorados of a variety of violations under Brazil’s extensive labor code, including unwholesome and unsanitary working conditions, time-clock fraud and failure to pay mandatory unemployment and retirement insurance.
It also says Arcos Dorados paid below legal or contractual minimum wages, forced double-shift work without breaks, forced workers to take in-restaurant lunch breaks with employer-supplied food and failed to make mandatory severance payments.
Arcos Dorados said in a statement that it has not yet seen the content of the lawsuit but defended its labor practices, saying the company “is absolutely confident in its labor practices and in the meeting of all the norms and laws it is subject to in all the places it works.”
It also said it abides by agreements with Brazilian labor prosecutors over past labor law violations.
With regard to the accusation of social dumping, the suit asks the court to refer the case to CADE, Brazil’s anti-trust regulator, on grounds that the company’s labor practices violate competition laws. (Reporting by Jeb Blount; Editing by Dan Grebler) |
//NewHome creates a new Home connected through the given Cable
func NewHome(plant *Plant) *Home {
log.Printf("NewHome")
address := plant.ServerAddress()
cable := newCable(address)
return &Home{Cable: cable, Plant: plant}
} |
<gh_stars>0
export const ORIGIN_PAGE = 'SPECKLE_ORIGIN_PAGE'
export const ORIGIN_CONTENT = 'SPECKLE_ORIGIN_CONTENT'
|
/**
* Adds the given alias to the cache in a {@literal null}-safe manner.
*
* @param key must not be {@literal null}.
* @param alias can be {@literal null}.
*/
private Alias verify(ClassTypeInformation<?> key, Alias alias) {
Alias existingAlias = typeMap.getOrDefault(key, Alias.NONE);
if (existingAlias.isPresentButDifferent(alias)) {
throw new IllegalArgumentException(
String.format("Trying to register alias '%s', but found already registered alias '%s' for type %s!", alias,
existingAlias, key));
}
if (typeMap.containsValue(alias)) {
typeMap.entrySet().stream()
.filter(it -> it.getValue().hasSamePresentValueAs(alias) && !it.getKey().equals(key))
.findFirst().ifPresent(it -> {
throw new IllegalArgumentException(String.format(
"Detected existing type mapping of %s to alias '%s' but attempted to bind the same alias to %s!", key,
alias, it.getKey()));
});
}
return alias;
} |
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_WINDOW_H
#define ANDROID_WINDOW_H
#ifdef __cplusplus
extern "C" {
#endif
/**
* Window flags, as per the Java API at android.view.WindowManager.LayoutParams.
*/
enum {
AWINDOW_FLAG_ALLOW_LOCK_WHILE_SCREEN_ON = 0x00000001,
AWINDOW_FLAG_DIM_BEHIND = 0x00000002,
AWINDOW_FLAG_BLUR_BEHIND = 0x00000004,
AWINDOW_FLAG_NOT_FOCUSABLE = 0x00000008,
AWINDOW_FLAG_NOT_TOUCHABLE = 0x00000010,
AWINDOW_FLAG_NOT_TOUCH_MODAL = 0x00000020,
AWINDOW_FLAG_TOUCHABLE_WHEN_WAKING = 0x00000040,
AWINDOW_FLAG_KEEP_SCREEN_ON = 0x00000080,
AWINDOW_FLAG_LAYOUT_IN_SCREEN = 0x00000100,
AWINDOW_FLAG_LAYOUT_NO_LIMITS = 0x00000200,
AWINDOW_FLAG_FULLSCREEN = 0x00000400,
AWINDOW_FLAG_FORCE_NOT_FULLSCREEN = 0x00000800,
AWINDOW_FLAG_DITHER = 0x00001000,
AWINDOW_FLAG_SECURE = 0x00002000,
AWINDOW_FLAG_SCALED = 0x00004000,
AWINDOW_FLAG_IGNORE_CHEEK_PRESSES = 0x00008000,
AWINDOW_FLAG_LAYOUT_INSET_DECOR = 0x00010000,
AWINDOW_FLAG_ALT_FOCUSABLE_IM = 0x00020000,
AWINDOW_FLAG_WATCH_OUTSIDE_TOUCH = 0x00040000,
AWINDOW_FLAG_SHOW_WHEN_LOCKED = 0x00080000,
AWINDOW_FLAG_SHOW_WALLPAPER = 0x00100000,
AWINDOW_FLAG_TURN_SCREEN_ON = 0x00200000,
AWINDOW_FLAG_DISMISS_KEYGUARD = 0x00400000,
};
#ifdef __cplusplus
};
#endif
#endif // ANDROID_WINDOW_H
|
/**
* Validate the plan instance.
*
* @param plan
* Full instance of plan object.
* @return The validated data object instance.
* @throws ObjectNotFoundException
* If specified object could not be found.
*/
@PreAuthorize("hasRole('ROLE_PERSON_MAP_WRITE')")
@RequestMapping(value = "/validate", method = RequestMethod.POST)
public @ResponseBody
TemplateTO validatePlan(final HttpServletResponse response,
@RequestBody final TemplateTO plan)
throws ObjectNotFoundException {
TemplateTO validatedTO = getService().validate(plan);
return validatedTO;
} |
Saudi Arabia signaled it's ready to cut oil production more than expected, a surprise announcement minutes after Russia and several other non-OPEC countries pledged to curb output next year.
Taken together, OPEC's first deal with its rivals since 2001 and the Saudi comments represent an effort by producers to wrest back control of the global oil market, depressed by persistent oversupply and record inventories.
"This is shock and awe by Saudi Arabia," said Amrita Sen, chief oil analyst at Energy Aspects Ltd. in London. "It shows the commitment of Riyadh to rebalance the market and should end concerns about OPEC delivering the deal."
Oil prices have surged more than 15 percent since OPEC announced Nov. 30 it will cut production for the first time in eight years, rising this week briefly above $55.
The price rise has propelled the shares of energy groups from Exxon Mobil Corp. to shale firms such as Continental Resources Inc.
Riyadh agreed with OPEC on Nov. 30 to cut its production to 10.06 million barrels a day, down from a record high of nearly 10.7 million barrels in July.
Growth in demand
"I can tell you with absolute certainty that effective Jan. 1 we're going to cut and cut substantially to be below the level that we have committed to on Nov. 30," Saudi oil minister Khalid al-Falih said after Saturday's meeting.
The Saudi minister said he was ready to cut below the psychologically significant level of 10 millions barrels a day - a level it has sustained since March 2015 - depending on market conditions.
Al-Falih made his announcement after non-OPEC countries agreed to reduce production by 558,000 barrels a day, suggesting he had been waiting for the deal before committing to further cuts.
The non-OPEC reduction is equal to the anticipated demand growth next year in China and India, according to data from the International Energy Agency.
The OPEC and non-OPEC pact encompasses countries that pump 60 percent of the world's oil but excludes major producers such as the U.S., China, Canada, Norway and Brazil.
"The deal speaks volumes about the Saudi commitment to rebalance the market," said Yasser Elguindi, a veteran OPEC watcher with consultant Medley Global Advisors. "No one is talking any more about $30 a barrel oil."
Saudi Arabia has long insisted that any reductions from the group should be accompanied by action from other suppliers. OPEC two weeks ago agreed to reduce its own production by 1.2 million barrels a day.
Al-Falih and his Russian counterpart Alexander Novak revealed they have been working for nearly a year on the agreement, meeting multiple times in secret.
"This is truly a historic event," Novak said. "It's the first time so many oil countries from different parts of the world gathered in one room to accomplish what we have done."
Move might backfire
Russia pledged to cut output by 300,000 barrels a day next year, down from a 30-year high last month of 11.2 million barrels a day.
Mexico agreed to cut 100,000 barrels, Azerbaijan by 35,000 barrels and Oman by 40,000 barrels.
The chain of announcements signal Saudi Arabia is trying to push oil prices above $60 a barrel - and perhaps closer to $70 a barrel - as it attempts to fill a fiscal hole and prepares a partial flotation of its crown jewel, state-owned oil company Saudi Aramco, in 2018. But the move toward higher prices might backfire as it risks the resurgence of U.S. shale drilling from Texas to North Dakota.
"Emotionally, the market will likely rally," said Adam Ritchie, founder of AR Oil Consulting. "But beyond rebalancing supply and demand, we have excess inventory that is astronomic that will continue to keep a lid on prices."
Oil fell from over $90 per barrel in early 2014 to as low as $40 early this year, briefly sending the average price of regular gasoline at the pump to under $2 for motorists in the United States. Oil closed at $51.58 on Friday.
The focus of the market will turn now to compliance as historically OPEC and non-OPEC countries have cut far less than promised. In late 2001, for example, Moscow promised to reduce output, but actually it increased it the following year.
"The oil-price crash impelled terrified producers into collective supply restraint agreements," said Bob McNally, founder of consultant Rapidan Group in Washington and a former White House oil official. "Occasionally these loose, ad-hoc producer agreements enjoyed temporary success, but all eventually failed due to cheating." |
Average Behaviour in Discrete-Time Imprecise Markov Chains: A Study of Weak Ergodicity
We study the limit behaviour of upper and lower bounds on expected time averages in imprecise Markov chains; a generalised type of Markov chain where the local dynamics, traditionally characterised by transition probabilities, are now represented by sets of `plausible' transition probabilities. Our first main result is a necessary and sufficient condition under which these upper and lower bounds, called upper and lower expected time averages, will converge as time progresses towards infinity to limit values that do not depend on the process' initial state. Our condition is considerably weaker than that needed for ergodic behaviour; a similar notion which demands that marginal upper and lower expectations of functions at a single time instant converge to so-called limit-or steady state-upper and lower expectations. For this reason, we refer to our notion as `weak ergodicity'. Our second main result shows that, as far as this weakly ergodic behaviour is concerned, one should not worry about which type of independence assumption to adopt-epistemic irrelevance, complete independence or repetition independence. The characterisation of weak ergodicity as well as the limit values of upper and lower expected time averages do not depend on such a choice. Notably, this type of robustness is not exhibited by the notion of ergodicity and the related inferences of limit upper and lower expectations. Finally, though limit upper and lower expectations are often used to provide approximate information about the limit behaviour of time averages, we show that such an approximation is sub-optimal and that it can be significantly improved by directly using upper and lower expected time averages.
Introduction
Markov chains are probabilistic models that are used to describe the uncertain dynamics of a large variety of stochastic processes. One of the key results in the field is the point-wise ergodic theorem. It establishes a relation between the long-term time average f av (X 1:k ) = 1 k k i =1 f (X i ) of a real-valued function f and its limit expectation E ∞ ( f ) = lim k →+∞ E( f (X k )), which is guaranteed to exist if the Markov chain is ergodic. 1 For this reason, limit expectations and limit distributions have become central objects of interest. Of course, if one is interested in the long-term behaviour of time averages, one could also study the expected values E( f av (X 1:k )) of these averages directly. This is not often done though, because, if the Markov chain is ergodic, the limit of these expected time averages coincides with the aforementioned limit expectations, which can straightforwardly be obtained by solving a linear eigenproblem . However, for a Markov chain that is not ergodic, a b P(a |b ) = 1 P(b |a ) = 1 Figure 1: The graph above represents a cyclic Markov chain with two states a and b . Suppose that we want to have an idea about the average number of times that the process will be in state b . So, we are interested in the limit behaviour of f av (X 1:k ) where f (a ) := 0 and f (b ) := 1. Though the expectation E ∞ (f ) does not exist (if the initial distribution differs from the uniform distribution), the expectation E(f av (X 1:k )) converges to 1/2 irrespectively of the process' initial state or distribution. This value is clearly representative for the longterm average of f . the limit expectation E ∞ ( f ) does not necessarily exist and can therefore not be used to provide us with information about the average behaviour of f . The expected time average E( f av (X 1:k ))-or its limit for k → +∞, if it exists-then serves as a seemingly suitable alternative; the figure on the right depicts a basic example where this is the case. So we see that even in the context of traditional "precise" Markov chains, expected time averages have the potential to be more informative about the long-term average of f compared to the limit expectation E ∞ ( f ).
In this work, we consider a generalisation of Markov chains, called imprecise Markov chains , for which the study of long-term average behaviour becomes somewhat more complex. Imprecise Markov chains are sets of traditional "precise" probabilistic models, where the Markov property (history independence) and the time-homogeneity property apply to this set of precise models as a whole, but not necessarily to the individual models themselves. In fact, one distinguishes between three different types of imprecise Markov chains (IMC's): 2 • IMC under epistemic irrelevance: the individual models do not (necessarily) satisfy the Markov property, nor the time-homogeneity property.
• IMC under complete independence: the individual models satisfy the Markov property, but not (necessarily) the time-homogeneity property.
• IMC under repetition independence: the individual models satisfy both the Markov property and the timehomogeneity property.
So an imprecise Markov chain under repetition independence only allows one to incorporate model uncertainty about the numerical values of the transition probabilities that make up a Markov chain, while an imprecise Markov chain under epistemic irrelevance also allows one to take into account uncertainty about the structural assumptions of being time-homogeneous and satisfying the Markov property. Regardless of the type of imprecise Markov chain that is used, one is typically interested in obtaining tight upper and lower bounds on inferences for the individual constituting models. The operators that represent these upper and lower bounds are respectively called upper and lower expectations and we will, for the time being, denote them by E(·) and E(·) respectively. Just like ergodicity in traditional Markov chains, an imprecise Markov chain is said to be ergodic if the limit upper expectation E ∞ ( f ) = lim k →+∞ E( f (X k )) and the limit lower expectation E ∞ ( f ) = lim k →+∞ E( f (X k )) exist and do not depend on the process' initial state or distribution. There are necessary and sufficient conditions for when this is the case as well as an imprecise variant of the point-wise ergodic theorem . 3 An important difference with traditional Markov chains, however, is that even if an imprecise Markov chain is ergodic and the limit upper expectation E ∞ ( f ) and the limit lower expectation E ∞ ( f ) exist, the upper and lower expected 2 A fourth type of imprecise Markov chain that is often encountered in the literature, especially in the more general context where imprecise Markov chains are simply regarded as special credal networks, are IMC's under strong independence ; convex hulls of IMC's under complete independence. However, as one of us argues in for the case of credal networks, we are of the opinion that such models lack a clear and sensible meaning. Moreover, the resulting upper and lower expectations-the inferences that we will be interested in here-are identical to those for IMC's under complete independence. We will therefore not consider them in our study. 3 These results only hold for imprecise Markov chains under epistemic irrelevance and under complete independence. 2 time averages E( f av (X 1:k )) and E( f av (X 1:k )) may not converge to them-that is, to E ∞ ( f ) and E ∞ ( f ), respectively. Nevertheless, because they (i) give conservative bounds , (ii) are fairly easy to compute and (iii) satisfy a point-wise ergodic theorem , the inferences E ∞ ( f ) and E ∞ ( f ) are often used as descriptors of the long-term behaviour of imprecise Markov chains, even if one is actually interested in time averages. This comes at a cost though: as we will show in Section 4, both types of inferences can differ greatly, with limit upper and lower expectations sometimes providing far too conservative bounds. Unfortunately, apart from some experiments in , little is known about the long-term behaviour of the upper and lower expected time averages E( f av (X 1:k )) and E( f av (X 1:k )). The aim of this paper is to remedy this situation. Our main result is an accessibility condition that is necessary and sufficient for these upper and lower expected time averages to (each) converge to a limit value that does not depend on the process' initial state (or distribution). Remarkably, this condition is considerably weaker than the one required for ergodicity. This explains why we call this type of behaviour 'weak ergodicity' (or 'weakly ergodic behaviour'). Moreover, we also show that this notion of weak ergodicity does not depend on the adopted type of imprecise Markov chain; whether one considers an imprecise Markov chain under epistemic irrelevance, complete independence or repetition independence is not relevant for the weakly ergodic behaviour of the Markov chain. More precisely, given sufficient model parameters, both the accessibility condition that characterises weak ergodicity, as well as-if this condition is satisfied-the limit values of the inferences E( f av (X 1:k )) and E( f av (X 1:k )) are the same, no matter what kind of IMC we consider. Conventional ergodicity does not exhibit this kind of robustness; we illustrate this in Example 2. This provides yet another argument for why (limits of ) upper and lower expected time averages-E( f av (X 1:k )) and E( f av (X 1:k ))-candidate as the objects of interest when looking at the long-term average behaviour of imprecise Markov chains.
The outline of the paper is as follows. We start by introducing "precise" Markov chains in Section 2 and subsequently generalise towards the case of imprecise Markov chains in Section 3. We then focus, as a first step, on average behaviour in imprecise Markov chains under epistemic irrelevance and complete independence, temporarily leaving imprecise Markov chains under repetition independence out of the picture. As mentioned before, we will study two types of inferences: (limits of ) upper and lower expectations of a function evaluated at a single time instant, and (limits of ) upper and lower expected time averages of a function. In Section 4, we give recursive expressions for how these inferences evolve through time, introduce the notions of ergodicity and weak ergodicity, and moreover illustrate, using two basic examples, that weak ergodicity has some considerable advantages over "conventional" ergodicity when it comes to characterising average behaviour. Section 5 introduces essential mathematical machinery needed in order to arrive to our results in Sections 6-8; we explain what it means for a map to be topical and introduce some graph-theoretic notions. In the subsequent section, Section 6, we derive a sufficient condition for weak ergodicity by borrowing an eigenproblem result from the theory of topical maps. Section 7 then shows that this condition can be replaced by a weaker one that is not only sufficient, but also necessary. Finally, we consider the case of imprecise Markov chains under repetition independence and relate their weak ergodicity to that of imprecise Markov chains under epistemic irrelevance or complete independence. This will be the subject of Section 8. As mentioned before, it will turn out that for all three types of imprecise Markov chains, weak ergodicity is characterised by the same condition and the limit upper (lower) expected time averages are all equal. This paper extends upon an earlier conference paper ; we provide proofs for the results in , and extend our study of weak ergodicity to also include imprecise Markov chains under repetition independence. In order not to lose the reader's focus, we have chosen to relegate some of the more technical proofs to an appendix at the end of the paper. This is particularly true for the results in Sections 7 and 8, where the main text provides, for the most part, an informal argument that aims to provide intuition.
Markov Chains
We consider an infinite sequence X 1 X 2 X 3 · · · of uncertain states, where each state X k at time k ∈ takes values in some finite set , called the state space. Such a sequence X 1 X 2 X 3 · · · will be called a (discrete-time) stochastic process. For any k , ℓ ∈ such that k ≤ ℓ, we use X k :ℓ to denote the finite subsequence X k · · · X ℓ of states that takes values in ℓ−k +1 . Moreover, for any k , ℓ ∈ such that k ≤ ℓ and any x k :ℓ ∈ ℓ−k +1 , we use X k :ℓ = x k :ℓ to denote the event that X k = x k · · · X ℓ = x ℓ . The uncertain dynamics of a stochastic process are then typically described by probabilities of the form P(X k +1 = x k +1 |X 1:k = x 1:k ), for any k ∈ and any x 1:k +1 ∈ k +1 . They represent beliefs about which state the process will be in at time k + 1 given that we know that it was in the states x 1 · · · x k at time instances 1 through k . Additionally, our beliefs about the value of the initial state X 1 can be represented by probabilities P(X 1 = x 1 ) for all x 1 ∈ . The local probability assessments P(X k +1 = x k +1 |X 1:k = x 1:k ) and P(X 1 = x 1 ) can now be combined to construct a global probability model P that describes the dynamics of the process on a more general level. This can be done in various ways; one of the most common ones being a measure-theoretic approach where countable additivity plays a central role. For our purposes however, we will only require finite additivity. Regardless, once you have such a global probability model P, it can then be used to define expectations and make inferences about the uncertain behaviour of the process.
For any set A, let us write (A) to denote the set of all real-valued functions on A. Throughout, for any B ⊆ A, we use B to denote the indicator of B : the function in (A) that takes the value 1 in B and 0 otherwise. We will only be concerned with (upper and lower) expectations of finitary functions: functions that depend on the state of the process at a finite number of time instances. So if f is finitary, we can write f = g (X 1:k ) for some k ∈ and some g ∈ ( k ). Note that finitary functions are bounded; this follows from their real-valuedness and the fact that is finite. The expectation of a finitary function f (X 1:k ) conditional on some event X 1:ℓ = x 1:ℓ , with ℓ < k , simply reduces to a finite weighted sum: A particularly interesting case arises when studying stochastic processes that are described by a probability model P that satisfies for all k ∈ , all y ∈ and all x 1:k ∈ k . This property, known as the Markov property, states that given the present state of the process the future behaviour of the process does not depend on its history. A process of this type is called a Markov chain. We moreover call it (time) homogeneous if additionally P(X k +1 = y | X k = x ) = P(X 2 = y | X 1 = x ), for all k ∈ and all x , y ∈ . Hence, together with the assessments P(X 1 = x 1 ), the dynamics of a homogeneous Markov chain are fully characterised by the probabilities P(X 2 = y | X 1 = x ). These probabilities are typically gathered in a transition matrix T ; a row-stochastic | | × | | matrix T that is defined by T (x , y ) := P(X 2 = y | X 1 = x ) for all x , y ∈ . This matrix representation T can be regarded as a linear operator from ( ) to ( ), defined for any f ∈ ( ) and any x ∈ by Conveniently, for any k ∈ , we also have that More generally, it holds that E P ( f (X k +ℓ ) | X k = x ) = T ℓ f (x ) for all k ∈ , all ℓ ∈ 0 := ∪ {0} and all x ∈ . Then, under some well-known accessibility conditions , the expectation T ℓ f (x ) converges for increasing ℓ towards a constant E ∞ ( f ) independently of the initial state x . If this is the case for all f ∈ ( ), the homogeneous Markov chain will have a steady-state distribution, represented by the limit expectation E ∞ , and we call the Markov chain ergodic. The expectation E ∞ is in particular also useful if we are interested in the limit behaviour of expected time averages. Indeed, let f av (X 1:k ) := 1 k k i =1 f (X i ) be the time average of some function f ∈ ( ) evaluated at the time instances 1 through k . Then, according to , the limit of the expected average lim k →+∞ E P ( f av (X 1:k )) coincides with the limit expectation E ∞ ( f ). One of the aims of this paper is to explore to which extent this remains true for imprecise Markov chains.
Imprecise Markov Chains
If the basic probabilities P(X k +1 |X 1:k = x 1:k ) that describe a stochastic process are imprecise, in the sense that we only have partial information about them, then we can still model the process' dynamics by considering a set x 1:k of such probabilities, for all k ∈ and all x 1:k ∈ k . This set x 1:k is then interpreted as the set of all probability mass functions P(X k +1 |X 1:k = x 1:k ) that we deem "plausible". We here consider the special case where the sets x 1:k satisfy a Markov property, meaning that x 1:k = x k for all k ∈ and all x 1:k ∈ k . Similarly to the precise case, the sets x , for all x ∈ , can be gathered into a single object: the set of all row stochastic | | × | | matrices T such that, for all x ∈ , the probability mass function T (x , ·) is an element of x . A set of transition matrices defined in this way is called separately specified ; this property asserts that, for any two transition matrices T 1 , T 2 ∈ and any subset A ⊆ , there is a third transition matrix T 3 ∈ such that T 3 (x , ·) = T 1 (x , ·) for all x ∈ A and T 3 (y , ·) = T 2 (y , ·) for all y ∈ \A. For any such set , the corresponding imprecise Markov chain under epistemic irrelevance ei is the set of all (precise) probability models P such that P(X k +1 |X 1:k = x 1:k ) ∈ x k for all k ∈ and all x 1:k ∈ k . The values of the probabilities P(X 1 = x 1 ) will be of no importance to us, because we will focus solely on (upper and lower) expectations conditional on the value of the initial state X 1 .
Clearly, an imprecise Markov chain ei also contains non-homogeneous, and even non-Markovian processes. So the Markov property does in this case not apply to the individual probability assessments, but rather to the sets x 1:k . The model ei is therefore a generalisation of a traditional Markov chain where we allow for model uncertainty about, on the one hand, the mass functions P(X k +1 |X 1:k = x 1:k ) and, on the other hand, about structural assumptions such as the Markov and time-homogeneity property. In order to make inferences that are robust with respect to this model uncertainty, we will use upper and lower expectations . These operators are respectively defined as the tightest upper and lower bound on the expectation E P associated with any probability model P in ei : for any finitary function f and any event A of the form X 1:k = Apart from epistemic irrelevance, there are also other types of independence assumptions for imprecise Markov chains that impose more stringent conditions on the individual composing probability models. For a given set , the imprecise Markov chain under complete independence ci is the subset of ei that contains all-possibly non-homogeneous-Markov chains in ei . The models ci , also known as 'Markov setchains' , were the first types of imprecise Markov chains to be thoroughly studied. They can be motivated in a rather straightforward way, using a 'sensitivity analysis interpretation'; the set is then regarded as a result of our ignorance about some "true" transition matrix T k that may depend on the time k . A third type of imprecise Markov chain that we will associate with is the corresponding imprecise Markov chain under repetition independence ri , which is the subset of ei containing all homogeneous Markov chains . Similarly as for ci , the model ri can be motivated using a sensitivity analysis interpretation, where the unknown matrix T k is now assumed to be fixed in time. Observe that the models ci and ri do not allow us to incorporate uncertainty about the Markov assumption, a feature that only imprecise Markov chains under epistemic irrelevance have. Moreover, though imprecise Markov chains under epistemic irrelevance can also be justified starting from a sensitivity analysis interpretation-the underlying 'true' probability model is in that case not assumed to be Markov-they are especially suitable when we regard the sets x as arising from the-subjective-beliefs of a subject that is uncertain about the process' next state value, like Walley does . The fact that these sets x satisfy a Markov property, then simply means that our subject's beliefs are solely based on the current state x of the process; we refer to for further details.
Similarly to how we defined upper and lower expectations for imprecise Markov chains under epistemic irrelevance, we can define the upper expectations E ci and E ri (and the lower expectations E ci and E ri ) as the tightest upper (and lower) bounds on the expectations corresponding to the models in ci and ri , respectively. Upper and lower probabilities can also be defined in the same way as before; as upper and lower expectations of indicators. Furthermore, note that, since ri ⊆ ci ⊆ ei , we have that for any finitary function f and any event A of the form X 1:k = x 1:k . Henceforth, we let be some generic set of transition matrices that is separately specified.
In this paper, we will be specifically concerned with two types of inferences: the conditional upper (and lower) expectation of a function f ∈ ( ) evaluated at a single time instant k , and the conditional upper (and lower) expectation of the time average f av (X 1:k ) of a function f ∈ ( ), given that we start in some x ∈ . For imprecise Markov chains under epistemic irrelevance and under complete independence, both of these inferences coincide . For any f ∈ ( ), any x ∈ and any k ∈ , we will denote them by where the dependency on is implicit. The corresponding lower expectations can be obtained through con- , all x ∈ and all k ∈ . As we will discuss shortly, the behaviour (or evolution) of both of these inferences can be recursively expressed in terms of a single so-called upper transition operator T . These relations will form the starting point for our further study of the limit behaviour of these inferences. However, similar expressions seem not to exist for the upper expectations E ri ( f (X k )|X 1 = x ) and E ri ( f av (X 1:k )|X 1 = x ) corresponding to an imprecise Markov chain ri under repetition independence. As a consequence, such inferences demand a somewhat different approach. For the moment, we therefore omit them from our discussion. We will come back to them in Section 8.
Transition Operators, Ergodicity and Weak Ergodicity
Inferences of the form E k ( f |x )-and, more specifically, upper (and lower) probabilities of events of the form X k ∈ A, with A ⊆ -were among the first ones to be thoroughly studied in imprecise Markov chains .
Notably, even if an imprecise Markov chain is ergodic (and hence also weakly ergodic) and therefore both E ∞ ( f ) and E av,∞ ( f ) exist, these inferences will not necessarily coincide. This was first observed in an experimental setting , but the differences that were observed there were marginal. The following example shows that these differences can in fact be very substantial.
let a be the set of all probability mass functions on and let b be the set that consists of the single probability mass function that puts all mass in a ; see Figure 2. Then, for It follows that T k f = max f for all k ≥ 2, so the limit upper expectation E ∞ ( f ) exists and is equal to max f for all f ∈ ( ). In particular, we have that E ∞ ( b ) = 1. On the other hand, we find that T
k exists and is equal to 1/2. This value differs significantly from the limit upper expectation
In fact, this result could have been expected simply by taking a closer look at the dynamics that correspond to . Indeed, it follows directly from that, if the system is in state b at some instant, then it will surely be in a at the next time instant. Hence, the system can only reside in state b for maximally half of the time, resulting in an upper expected average that converges to 1/2. These underlying dynamics have little effect on the limit upper expectation
because it is only concerned with the upper expectation of b evaluated at a single time instant.
Finally, we want to draw attention to the fact that the upper expectation E av,∞ ( b ) = 1/2 is actually reached by a compatible homogeneous Markov chain. Specifically, it is reached by the Markov chain from Example 1, which is indeed compatible because its transition matrix T = 0 1 1 0 is in . This already illustrates what will be established later on in Section 8: when we are interested in the limit behaviour of the inferences E av,k ( f |x ), we can simply treat them as upper envelopes of the expectations E P ( f av (X 1:k )|X 1 = x ) that correspond to the compatible homogeneous Markov chains P. This is not the case for the limit behaviour of the inferences E k ( f |x ) though; for instance, in the current example, where E ∞ ( b ) = 1, the expectation E P ( b (X k )|X 1 = b ), for any P ∈ ri , is lower or equal than 1/2 for k even. This is left as an exercise for the reader-Hint: for any T = p 1−p 1 0 ∈ , find c 1 , c 2 ∈ such that T 2 a = c 1 + c 2 a , and then use this observation to find an expression for T 2ℓ Although we have used sets of transition matrices to define imprecise Markov chains, it should at this point be clear that, if we are interested in the inferences E k ( f |x ) and E av,k ( f |x ) and their limit values, then it suffices to specify T . In fact, we will temporarily forget about and simply assume that T is a general upper transition operator on ( ). That is, we assume T to be any operator from ( ) to ( ) that satisfies for all h, g ∈ ( ) and all real λ ≥ 0 . This can be done without loss of generality because it is wellestablished that any operator T that is defined as an upper envelope of a set of transition matrices-as we did in Section 4-always satisfies U1-U3 . Then again, any upper transition operator can uniquely be represented by a closed, convex set of transition matrices that is seperately specified , so there is no gain in generality either. Apart from the axioms above, our results and proofs will also rely on the following three properties that are implied by U1-U3 : for all h, g ∈ ( ) and all real µ. Henceforth, we will simply say that the upper transition operator T is ergodic if converges to a constant for all f ∈ ( ) and, analogously, we will say that it is weakly ergodic if 1 k converges to a constant for all f ∈ ( ). So ergodicity and weak ergodicity of T is equivalent to the respective notions for an imprecise Markov chain under epistemic irrelevance or complete independence with upper transition operator T .
Accessibility Relations and Topical Maps
To characterise ergodicity and weak ergodicity, we will make use of some well-known graph-theoretic concepts, suitably adapted to the imprecise Markov chain setting; we recall the following from and . The upper accessibility graph (T ) corresponding to T is defined as the directed graph with vertices x 1 · · · x n ∈ , where n := | |, with an edge from x i to x j if T x j (x i ) > 0. For any two vertices x i and x j , we say that x j is ac- The following result shows that such a directed path exists if and only if there is some k ∈ such that the k -step upper probability T k x j (x i ) to transition from x i to x j is positive. We say that two vertices x i and x j communicate and write → is a preorder (reflexive and transitive), and therefore, ↔ is an equivalence relation (reflexive, symmetric and transitive) for which the equivalence classes are called communication classes. Moreover, it is well-known that these communication classes then form a partition of . Sometimes, we will allow ourselves a slight abuse of terminology, and call any non-empty set ⊆ a class in (T ). We call the graph (T ) strongly connected if any two vertices x i and x j in (T ) communicate, or equivalently, if itself is a communication class. We also extend the domain of the relation → to include all communication classes by saying that A → B , for any two A, B ∈ , if x → y for at least one (and hence-since A and B are communication classes-all) x ∈ A and y ∈ B . Then it can easily be seen that → induces a partial order (reflexive, antisymmetric and transitive) on the set (T ) has a top class ⇔ = {x ∈ : y → x for all y ∈ } = . ( Finally, we also say that a class is closed if x → y for all x ∈ and all y ∈ c . Since the notions of closedness and maximality coincide for communication classes , it follows that the top class , if it exists, is the only closed communication class in (T ).
Having a top class is necessary for T to be ergodic, but it is not sufficient. Sufficiency additionally requires that the top class is regular and absorbing . These properties are defined, for any closed class , as We will say that T is top class regular (TCR) if it has a top class that is regular, and analogously for top class absorbing (TCA). 4 Top class regularity represents aperiodic behaviour: it demands that there is some time instant k * ∈ such that all of the elements in the top class are accessible from each other in k steps, for any k ≥ k * . In the case of traditional Markov chains, top class regularity suffices as a necessary and sufficient condition for ergodicity . However, in the imprecise case, we need the additional condition of being top class absorbing, which ensures that the top class will eventually be reached. It requires that, if the process starts from any state x ∈ c , the lower probability that it will ever transition to is strictly positive; see for a more detailed discussion. This notion of an absorbing (top) class, however, is not to be confused with what is called an absorbing state in standard literature on Markov chains. The latter simply is a state such that, once entered, it can never be left anymore ; in our terminology, this is the same as a closed class that consists of a single state. From a practical point of view, an important feature of both of these accessibility conditions is that they can be easily checked in practice, as is shown in . Strictly speaking, though, the method for checking A2 that is presented in only applies to regular top classes. However, a closer look at the proofof -shows that it does not rely on the regularity of the top class, and that the method can therefore be applied to any top class. Hence, the condition of (TCA)-the central condition of this paper that will turn out to be necessary and sufficient for weak ergodicity-can be easily verified in practice by first checking the existence of a top class -for instance, using (3)-and then checking A2 using the method described in . A more explicit treatment of this subject, however, would lead us too far, and we therefore leave it to this informal argument.
The characterisation of ergodicity using (TCR) and (TCA) was strongly inspired by the observation that upper transition operators are part of a specific collection of order-preserving maps, called topical maps. These are maps F : n → n that satisfy, for all h, g ∈ n and all µ ∈ , To show this, we identify ( ) with the finite-dimensional linear space n , with n = | |; this is clearly possible because both are isomorphic. That every upper transition operator is topical now follows trivially from U5 and U6. What is perhaps less obvious, but can be derived in an equally trivial way, is that the operator T f is also topical. This allows us to apply results for topical maps to T f in order to find necessary and sufficient conditions for weak ergodicity.
A Sufficient Condition for Weak Ergodicity
As a first step, we aim to find sufficient conditions for the existence of E av,∞ ( f ). To that end, recall from Section 4 that E av,∞ ( f ) exists if-and only if-the limit lim k →+∞ T k f (0)/k exists, and in that case E av,∞ ( f ) = lim k →+∞ T k f (0)/k . Then, since T f is topical, the following lemma implies that it is also equal to lim k →+∞ T k f h/k for any h ∈ ( ).
Lemma 2.
Consider any topical map F : n → n . If the limit lim k →+∞ F k h/k exists for some h ∈ n , then the limit exists for all h ∈ n and they are all equal.
Hence, if lim k →+∞ T k f h/k converges to a constant vector µ for some h ∈ ( ), then E av,∞ ( f ) exists and is equal to µ. This condition is clearly satisfied if the map T f has an (additive) eigenvector h ∈ ( ), meaning that T k f h = h + k µ for some µ ∈ and all k ∈ 0 . In that case, we have that E av,∞ ( f ) = µ, where µ is called the eigenvalue corresponding to h. Moreover, there can then only be one such eigenvalue µ. Proof. Suppose that F has two eigenvalues µ 1 and µ 2 , and let h 1 and h 2 be the corresponding eigenvectors. Then we have that F k h 1 = h 1 + k µ 1 for all k ∈ 0 , which immediately implies that lim k →+∞ F k h 1 /k = µ 1 . In a similar way, we obtain that lim k →+∞ F k h 2 /k = µ 2 . Then, due to Lemma 2, we have that µ 1 = µ 2 .
To find conditions that guarantee the existence of an eigenvector of T f , we will make use of results from and . There, accessibility graphs are defined in a slightly different way: for any topical map F : n → n , they let ′ (F ) be the graph with vertices v 1 , · · · , v n and an edge from Subsequently, for such a graph ′ (F ), the accessibility relation · → · and corresponding notions (e.g. 'strongly connected', 'top class', . . . ) are defined as in Section 5. If we identify the vertices v 1 , · · · , v n in ′ (T ) and ′ (T f ) with the different states x 1 , · · · , x n in , this can in particular be done for the topical maps T and T f . The following results show that the resulting graphs coincide with the one defined in Section 5. Proof. Consider any two vertices x and y in the graph ′ (T ). By definition, there is an edge from x to y if lim α→+∞ (x ) = +∞. Due to U3, this is equivalent to the condition that lim α→+∞ α (x ) = +∞. Since moreover 0 ≤ T y ≤ 1 by U4, this condition reduces to T y (x ) > 0. In principle, we could use this result to directly obtain the desired condition for the existence of an eigenvector from . However, is given in a multiplicative framework and would need to be reformulated in an additive framework in order to be applicable to the map T f ; see . This can be achieved with a bijective transformation, but we prefer to not do so because it would require too much extra terminology and notation. Instead, we will derive an additive variant of . The second result that we need uses the notion of a super-eigenspace, defined for any topical map F and any µ ∈ as the set S µ (F ) := {h ∈ n : F h ≤ h + µ}. Together, these theorems imply that any topical map F : n → n for which the graph ′ (F ) is strongly connected, has an eigenvector. The connection between both is provided by the fact that trajectories cannot leave an eigenspace. The following result formalises this. Theorem 8. Let F : n → n be a topical map such that the associated graph ′ (F ) is strongly connected. Then F has an eigenvector in n .
Proof. Consider any h ∈ n and any µ ∈ such that max( and T2, which implies that also F h ∈ S µ (F ). In the same way, we can also deduce that F 2 h ∈ S µ (F ) and, by repeating this argument, that the whole trajectory corresponding to h remains in S µ (F ). This trajectory is bounded because of Theorem 7, which by Theorem 6 guarantees the existence of an eigenvector.
In particular, if ′ (T f ) is strongly connected then T f has an eigenvector, which on its turn implies the existence of E av,∞ ( f ) as explained earlier. If we combine this observation with Corollary 5, we obtain the following result.
Proposition 9.
T is weakly ergodic if the associated graph (T ) is strongly connected. In that case, for any f ∈ ( ), the limit value E av,∞ ( f ) is equal to the unique (additive) eigenvalue of T f .
Proof. Suppose that (T ) is strongly connected. Then, by Corollary 5, ′ (T f ) is also strongly connected. Hence, for any f ∈ ( ), since T f is a topical map, Theorem 8 guarantees the existence of an eigenvector of T f . Let µ be the corresponding eigenvalue. By Corollary 3, this eigenvalue µ is the only, and therefore unique, eigenvalue corresponding to T f . As explained in the beginning of this section, it now follows from Lemma 2 that E av,∞ ( f ) exists and is equal to µ, so we indeed find that T is weakly ergodic.
In the remainder of this paper, we will use the fact that T is an upper transition operator-so not just any topical map-to strengthen this result. In particular, we will show that the condition of being strongly connected can be replaced by a weaker one: being top class absorbing. Nonetheless, the result above can already be useful in practice because checking whether a graph is strongly connected can be done rather efficiently; in any case more efficiently than checking for (TCA) since that requires us to first check the existence of a top class anyway. Hence, when interested in weakly ergodic behaviour, and when the dimensions of the considered model are large, one may prefer to verify this before checking the weaker condition of being top class absorbing. 5
A Necessary and Sufficient Condition for Weak Ergodicity
In order to gain some intuition about how to obtain a more general sufficient condition for weak ergodicity, consider the case where T has a closed (or, equivalently, maximal) communication class and the process' initial state x is in . Since is closed, the process surely remains in and hence, it is to be expected that the time average of f will not be affected by the dynamics of the process outside . Moreover, the communication class is a strongly connected component, so one would expect that, due to Proposition 9, the upper expected time average E av,k ( f |x ) converges to a constant that does not depend on the state x ∈ . Our intuition is formalised by the following proposition. Its proof, as well as those of the other statements in this section (apart from Theorem 14), can be found in the Appendix.
Proposition 10. For any closed communication class
of T , any f ∈ ( ) and any x ∈ , the inference E av,k ( f |x ) is equal to E av,k ( f |x ) and converges to a limit value as k recedes to infinity. This limit value is furthermore the same for all x ∈ .
As a next step, we want to extend the domain of convergence of E av,k ( f |x ) to all states x ∈ . To do so, we will impose the additional property of being top class absorbing (TCA), which, as explained in Section 5, demands that there is a strictly positive (lower) probability to reach the top class in a finite time period. Once in , the process can never escape though. One would therefore expect that as time progresses-as more of these finite time periods go by-this lower probability increases, implying that the process will eventually be in with practical certainty. Furthermore, if the process transitions from x ∈ c to a state y ∈ , then Proposition 10 guarantees that E av,k ( f |y ) converges to a limit and that this limit value does not depend on the state y . Finally, since the average is taken over a growing time interval, the initial finite number of time steps that it took for the process to transition from x to y will not influence the time average of f in the limit. This leads us to suspect that E av,k ( f |x ) converges to the same limit as E av,k ( f |y ). Since this argument applies to any x ∈ c , we are led to believe that T is weakly ergodic. The following result confirms this.
Proposition 11. T is weakly ergodic if it satisfies (TCA).
Conversely, suppose that T does not satisfy (TCA). Then there are two possibilities: either there is no top class or there is a top class but it is not absorbing. If there is no top class, then it can be easily deduced that there are at least two closed communication classes 1 and 2 . As discussed earlier, the process cannot leave the classes 1 and 2 once it has reached them. So if it starts in one of these communication classes, the process' dynamics outside this class are irrelevant for the behaviour of the resulting time average. In particular, if we let f be the function that takes the constant value c 1 in 1 and c 2 in 2 , with c 1 = c 2 , then we would expect that E av,k ( f |x ) = c 1 and E av,k ( f |y ) = c 2 for all k ∈ 0 , any x ∈ 1 and any y ∈ 2 . In fact, this can easily be formalised by means of Proposition 10. Hence, E av,∞ ( f |x ) = c 1 = c 2 = E av,∞ ( f |y ), so the upper transition operator T cannot be weakly ergodic. In other words, if T is weakly ergodic, there must be a top class.
Proposition 12. T has a top class if it is weakly ergodic.
Finally, suppose that there is a top class , but that it is not absorbing. This implies that there is an x ∈ c and a compatible precise model such that the process is guaranteed to remain in c given that it started in x . 6 If we now let f = c , then conditional on the fact that X 0 = x , the expected time average of f corresponding to this precise model is equal to 1. Furthermore, since f ≤ 1, no other process can yield a higher expected time average. The upper expected time average E av,k ( f |x ) is therefore equal to 1 for all k ∈ . However, using Proposition 10, we can also show that E av,k ( f |y ) = 0 for any y ∈ and all k ∈ . Hence, E av,∞ ( f |x ) = 1 = 0 = E av,∞ ( f |y ), which precludes T from being weakly ergodic.
Proposition 13. T satisfies (TCA) if it is weakly ergodic and has a top class.
Together with Propositions 11 and 12, this allows us to conclude that (TCA) is a necessary and sufficient condition for weak ergodicity.
Theorem 14. T is weakly ergodic if and only if it satisfies (TCA).
Proof. That (TCA) is a sufficient condition follows from Proposition 11. Necessity follows from Proposition 12 together with Proposition 13.
Weak Ergodicity for Imprecise Markov Chains Under Repetition Independence
So far, we have ignored imprecise Markov chains under repetition independence in our analysis of weakly ergodic behaviour. Within the field of imprecise probability, these imprecise Markov chains are less studied because (i) they can incorporate fewer types of model uncertainty and (ii) they seem difficult to handle computationally, in the sense that there are (almost) no methods that are able to efficiently solve inference problems for such models; see for more details. On the other hand, their relevance should not be underestimated; they model the practical situation where we believe there is a single and fixed transition matrix T , but only have partial knowledge about the numerical values that make up this matrix.
The concepts of ergodicity and weak ergodicity can be defined in a similar way as for imprecise Markov chains under epistemic irrelevance and imprecise Markov chains under complete independence. For any f ∈ ( ), any x ∈ and any k ∈ , we let Then we say that the imprecise Markov chain ri is ergodic if, for all f ∈ ( ), the upper expectation E Hence, in order to study weak ergodicity (and conventional ergodicity) for these models, we will have to rely on a different approach than the one we have used before. Moreover, because of the absence of any recursive expressions, it is a priori not certain whether the inferences E ri av,k ( f |x ) will only depend on through the upper transition operator T . So, contrary to what we did in Sections 4 to 7, we cannot simply forget about the set here. Instead, we will regard the set -which is always assumed to be separately specified-as the primary object that determines the values of the inferences E ri av,k ( f |x ). Before we begin our analysis, recall that the model ri consists of all homogeneous (precise) Markov chains P that are compatible with , in the sense that P(X 2 |X 1 = x ) ∈ x for all x ∈ -where we already took into account the homogeneity and the Markov property for each P. Since each homogeneous Markov chain P has a transition matrix T , we could alternatively write that P ∈ ri if and only if T (x , ·) ∈ x for all x ∈ or, since is separately specified, if and only if T ∈ . Furthermore, note that each P ∈ ri is itself an imprecise Markov chain, where the upper transition operator is now linear and characterised by the transition matrix T of P. The independence assumption-epistemic irrelevance, complete independence or repetition independence-obviously does not matter here; they are all equivalent. Hence, the recursive expressions (1) and (2), and, more generally, all our results for imprecise Markov chains under epistemic irrelevance or complete independence, also hold for a homogeneous Markov chain P ∈ ri . The transition matrix T corresponding to P then takes the role of a particular upper transition operator T . In the remainder, we will assume that the reader has taken notice of this fact and we will simply apply our results from the previous sections in this more specific case without further ado. On top of that, we will often use the basic relation that T k h ≤ T k h for all T ∈ , all h ∈ ( ) and all k ∈ 0 . This intuitive inequality can be easily derived from the definition of T and the monotonicity of T and T .
For notational convenience, we use T f and E P av,k to denote the objects T f and E av,k -or, equivalently, E ri av,kthat correspond to any homogeneous Markov chain P ∈ ri ; so we let T f h := f + T h, with T the transition matrix of P, and E P av,k ( f |x ) := E P ( f av (X 1:k )|X 1 = x ) for all f , h ∈ ( ), all x ∈ and all k ∈ . Taking into account the previous considerations, we can then write that, for all f ∈ ( ), all x ∈ and all k ∈ , where the last equality holds because of Equation (2) and the fact that P ∈ ri if and only if T ∈ (for the transition matrix T correponding to P). The expression above will now serve as the main starting point in our further study of E ri av,k ( f |x ). As a first step, we show that an imprecise Markov chain ri is weakly ergodic if the upper transition operator T corresponding to has a graph (T ) that is strongly connected. Moreover, in that case, we also have that E ri av,∞ ( f ) = E av,∞ ( f ) for all f ∈ ( ). The result can be obtained in a rather straightforward fashion from the following lemma, which essentially states that, if T f has an eigenvalue, there is a homogeneous Markov chain P ∈ ri that behaves 'approximately' weakly ergodic. Its proof makes use of the supremum norm · ∞ , defined by h ∞ := max x ∈ |h(x )| for all h ∈ ( ).
Lemma 15. Consider any f ∈ ( ). If the map T f has an eigenvalue µ, then, for any
Proof. Suppose that the map T f has an eigenvalue µ. Then T f has at least one eigenvector h ∈ ( ) for which it holds that T k f h = h +k µ for all k ∈ . Fix any ε > 0. Then, since is assumed to be separately specified, we are allowed to use S1, which implies that there is some Using the same properties of T f , this on its turn implies that Repeating the argument above allows us to conclude that h +k µ−k ε ≤ T k f h ≤ h +k µ for all k ∈ , and therefore, Furthermore, T f is topical , so T f is non-expansive with respect to the supremum norm . This On the other hand, we have that lim sup k →+∞ Hence, we have that lim sup k →+∞ 1 k T k f (0) ≤ µ, which, together with Equation (5) and the fact that obviously lim inf k →+∞ , implies the desired statement.
Proposition 16. If (T ) is strongly connected, then ri is weakly ergodic and we have that
Proof. Fix any f ∈ ( ). Since (T ) is strongly connected, Proposition 9 implies that T is weakly ergodic and that E av,∞ ( f ) = µ where µ is the unique eigenvalue of the map T f . Then it follows from Lemma 15 that, for any So, for any x ∈ , we have that where we used Equation (4) in the last step. On the other hand, since E ri av,k ( f |x ) ≤ E av,k ( f |x ) for all k ∈ (because ri ⊆ ci ⊆ ei ), we also have that Hence, we conclude that lim k →+∞ E ri av,k ( f |x ) exists and is equal to µ = E av,∞ ( f ) for all x ∈ and therefore, that ri is weakly ergodic and that E ri av,∞ ( f ) = E av,∞ ( f ).
Proposition 16 provides a sufficient condition-having a graph (T ) that is strongly connected-for an imprecise Markov chain ri to be weakly ergodic. However, as was the case for the imprecise Markov chains ei and ci , this condition can actually be weakened to the condition that T should be top class absorbing (TCA). It will moreover turn out that this weaker accessibility condition is not only sufficient, but also necessary.
Both the sufficiency and necessity of (TCA) can be made intuitive using arguments that are somewhat similar to the ones we have used in Section 7, where we discussed the sufficiency and necessity of (TCA) for imprecise Markov chains under epistemic irrelevance and complete independence. Before we explain why (TCA) is sufficient here as well, first observe that, for any closed class in the graph (T )-associated with through T -the class is also closed in the graph (T ), for all T ∈ :
Lemma 17. If a class is closed in (T ), then, for all T ∈ , the class is also closed in (T ).
Proof. Let be any closed class in (T ) and consider any x ∈ . By definition, we have that x → y in (T ) for any y ∈ c , which by Lemma 1 implies that T k y (x ) ≤ 0 for all k ∈ . Furthermore, for any T ∈ and all k ∈ , we have that T k y (x ) ≤ T k y (x ), so it follows that T k y (x ) ≤ 0. Hence, by Lemma 1, x → y in (T ). This holds for all x ∈ and all y ∈ c , so is also closed in (T ) for any T ∈ . Now suppose that the upper transition operator T corresponding to satisfies (TCA) with top class , and that the process' initial state x is in . Because of the lemma above, is also closed in (T ) for any T ∈ . Hence, according to any (homogeneous) Markov chain P with transition matrix T , the process' state can never leave . So it is to be expected that the inference E P av,k ( f |x ) will not be influenced by the behaviour of the Markov 16 chain P outside of . Since this is true for any T ∈ , and since E ri av,k ( f |x ) is simply an upper envelope of E P av,k ( f |x ) over all compatible Markov chains P, it seems that, in our study of the inference E ri av,k ( f |x ), we can limit ourselves to that part of and ri that describes the process' dynamics for states in . Moreover, it can be shown (see Lemma 45 in Appendix E) that the upper transition operator T ′ and the graph (T ′ ) associated with such a restricted version ′ of , are itself versions of the original T and (T ) restricted to the states in . Since is a strongly connected component in (T ), the graph (T ′ ) will therefore be strongly connected as a whole and we are led to believe, due to Proposition 16, that the inference E ri av,k ( f |x ) will converge to a constant that does not depend on the state x ∈ . So, one would expect that, if we limit ourselves to initial states in , the condition of (TCA) is sufficient for weakly ergodic behaviour.
In order to see why this can also be expected if we allow the process to start in c , first consider the following straightforward lemma.
Lemma 18. If T satisfies (TCA) with top class , then, for all T ∈ , the class is an absorbing closed class in (T ).
Proof. Since is the top class in (T ), it is closed in (T ) and therefore, by Lemma 17, it is also closed in (T ) for all T ∈ . So it remains to prove that is moreover absorbing in (T ) for all T ∈ . To do so, recall that is an absorbing (and closed) class in (T ), meaning that T k x c (x ) < 1 for any x ∈ c and some k x ∈ . Then, for any T ∈ , since T k x c ≤ T k x c , we also have that T k x c (x ) < 1 which implies that is indeed an absorbing closed class in (T ).
So if we consider any Markov chain P ∈ ri and let T ∈ be the corresponding transition matrix, the class is closed and absorbing in (T ). We can then reason in a similar way as in the paragraph that preceded Proposition 11; due to the fact that is absorbing and closed, it is to be expected that, according to the model P, the process is eventually in with practical certainty. Unlike before, however, is not necessarily a communication class in (T ), so we cannot assert that P behaves in a weakly ergodic way for initial states in . In other words, for any x ∈ , the expected time average E P av,k ( f |x ) may not converge or, if it does converge, it may possibly depend on the initial state x ∈ . Nonetheless, it can be inferred from our previous discussion and Lemma 15 that the model P ∈ ri can be chosen in such a way that its behaviour for initial states x ∈ is 'approximately' weakly ergodic, in the sense that E P av,k ( f |x ) will eventually lie at ε-distance from a constant µ that does not depend on x ∈ . Recalling that is closed and absorbing-for all P ∈ ri and therefore, also for any specific choice of P-the same can be expected for initial states y ∈ c . As a result, we would then get that µ − ε ≤ E ri av,k ( f |x ) for any x ∈ and for k large enough. Moreover, it can easily be shown that the constant µ is the limit value of the inference E av,k ( f |x ) (see, for instance, Proposition 9), so µ is also an upper bound for the limit values of E ri av,k ( f |x ). Both observations taken together, one would expect that the inference E ri av,k ( f |x ) converges to the constant µ for all x ∈ . Our next result confirms this.
Proposition 19. If T satisfies (TCA), then ri is weakly ergodic and
Conversely, consider any such that the corresponding T does not satisfy (TCA). Then either (T ) has no top class, in which case it must necessarily have two (or more) different closed communication classes 1 and 2 , or (T ) has a top class but it is not absorbing. Consider the first case. Since 1 and 2 are both closed in (T ), they are also both closed (but not necessarily a communication class) in (T ) for any T ∈ , because of Lemma 17. So, according to any Markov chain P ∈ ri with transition matrix T , the process surely remains in i with i ∈ {1, 2} once it has reached i . Hence, if we let c 1 , c 2 ∈ such that c 1 = c 2 and assume that f (x ) = c i for all x ∈ i -which is possible because 1 and 2 are two different communication classes in (T ) and are therefore disjoint-we would expect that E P av,k ( f |x ) is simply equal to c i for all k ∈ and all x ∈ i . Since this is the case for all P ∈ ri , we are inclined to conclude that E ri av,k ( f |x ) = c i for all k ∈ and all x ∈ i , and therefore that lim k →+∞ E ri av,k ( f |x 1 ) = c 1 = c 2 = lim k →+∞ E ri av,k ( f |x 2 ) for any x 1 ∈ 1 and any x 2 ∈ 2 . This would preclude 17 ri from being weakly ergodic. As a result, if we assume that ri is weakly ergodic, then (T ) must have a top class.
Proposition 20. If ri is weakly ergodic, then the graph (T ) has a top class.
Finally, consider the case that (T ) has a top class that is not absorbing and recall the discussion that lead to Proposition 13. There, we relied on the fact that, since is not absorbing, there is some precise model P ∈ ei for which the process is guaranteed to remain in c given that it started in some state x ∈ c . 7 As a matter of fact, this compatible model P can always be chosen in such a way that it is a homogeneous Markov chain, and therefore such that P ∈ ri . Hence, if we again let f be the indicator c , then we would obtain that E P av,k ( f |x ) = 1. Since no other P ′ ∈ ri can then yield a higher expected time average for this f , we would also have that E ri av,k ( f |x ) = 1 for all k ∈ . On the other hand, since is closed and f (y ) = 0 for any y ∈ , it is to be expected-due to the same reasons as we have come to explain in the paragraph above, where both 1 and 2 were closed-that E ri av,k ( f |y ) = 0 for all k ∈ . So in conclusion, we would have that lim k →+∞ E ri av,k ( f |x ) = 1 = 0 = lim k →+∞ E ri av,k ( f |y ), again precluding ri from being weakly ergodic.
Proposition 21. If ri is weakly ergodic and the graph (T ) has a top class, then T satisfies (TCA).
It now only remains to combine Propositions 19, 20 and 21 to establish our second main result.
Theorem 22. An imprecise Markov chain under repetition independence ri is weakly ergodic if and only if the upper transition operator T corresponding with the separately specified set satisfies (TCA). Furthermore, in that case, we have that
Proof. Sufficiency follows from Proposition 19; necessity follows from Proposition 20 and Proposition 21. The last statement-that E ri av,∞ ( f ) = E av,∞ ( f ) for all f ∈ ( ) in the case that T satisfies (TCA)-also follows from Proposition 19.
Conclusion
The most important conclusion of our study of upper and lower expected time averages is the following (see Theorems 14 and 22): The condition of being top class absorbing is necessary and sufficient for weakly ergodic behaviour of an imprecise Markov chain, irrespectively of the imposed independence assumption. In that case, upper (and lower) expected time averages converge to limit values that are constant, not only for all possible initial states (or distributions) of the process, but also for all possible types of independence assumptions. Now, if we compare our notion of weak ergodicity with that of conventional ergodicity-which guarantees the existence of a limit upper and lower expectation-we believe that weak ergodicity, and the associated (limits of ) upper and lower expected time averages, should become the new objects of interest when it comes to characterising long-term average behaviour. Our conviction is based on the following three arguments: i. Weak ergodicity requires less stringent conditions to be satisfied than conventional ergodicity, which additionally requires top class regularity. We illustrated this difference in Example 1, where we considered a(n imprecise) Markov chain that satisfies (TCA) but not (TCR).
ii. The inferences E av,∞ ( f ) are able to provide us with more information about how time averages might behave, compared to limit expectations E ∞ ( f ). To see why, recall Example 2, where the inference E av,∞ ( b ) = 1/2 significantly differed from E ∞ ( b ) = 1. Clearly, the former was more representative for the limit behaviour of the time average of b . As a consequence of , a similar statement holds for general functions. In particular, it implies that E av,∞ ( f ) ≤ E ∞ ( f ) for any function f ∈ ( ). Since both inferences are upper bounds, E av,∞ ( f ) is therefore at least as (and sometimes much more) informative as E ∞ ( f ).
iii. The characterisation of weak ergodicity, as well as the limit values E av,∞ ( f )-or, equivalently, E ri av,∞ ( f )-of upper (and lower) expected time averages, do not depend on the type of independence assumption that we impose on the imprecise Markov chain. As we have illustrated in Example 2, conventional ergodicity does not exhibit this kind of robustness. We perceive this as an advantage in favor of weak ergodicity. On the one hand, it provides a clear practical benefit because one should not spend time and/or money in searching for the appropriate independence assumption; it simply does not matter. On the other hand, it also opens doors for further theoretical research on this topic, because the limit values E av,∞ ( f ) are now approximated by two different objects: the upper expectations E av,k ( f |x ) and the upper expectations E ri av,k ( f |x ). It is, for instance, not unreasonable to think that this feature might very well be a crucial step in developing efficient algorithms for the computation of E av,∞ ( f ).
That said, there is also one important feature that limit upper and lower expectations have, but that we did not consider yet for upper and lower expected time averages: an (imprecise) point-wise ergodic theorem . For the limit upper and lower expectations of an ergodic imprecise Markov chain, this result states that with lower probability one. In order for limit upper and lower expected time averages to be the undisputed quantities of interest when studying long-term time averages, a similar result would need to be obtained for weak ergodicity, where the role of E ∞ ( f ) and E ∞ ( f ) := −E ∞ (− f ) is taken over by E av,∞ ( f ) and E av,∞ ( f ) := −E av,∞ (− f ), respectively. If such a result would hold, it would provide us with (strictly almost sure) bounds on the limit values attained by time averages that are not only more informative than the current ones, but also guaranteed to exist under weaker conditions and equal for all types of independence assumptions. In fact, we are happy to report that we already established such a result. However, the proof necessitates a mathematical framework that strongly differs from the one used here, and we therefore intend to present it in future work.
Another topic that we would like to consider in the future, is the convergence of the inferences E av,k ( f |x ) and E ri av,k ( f |x ) in general, without imposing that their limit values should be constant for all states x in . We suspect that this kind of convergence will require no conditions at all. that x → y , which implies that ′ is not accessible from (or does not dominate) . Because this is the case for all ′ ∈ such that ′ = , we conclude that is maximal. Conversely, suppose that is maximal, and consider any x ∈ and any y ∈ c . Let ′ ∈ be such that y ∈ ′ ; there is exactly one such ′ because forms a partition of . Moreover, ′ = because y ∈ ′ ∩ c . Then, since is maximal, we have that → ′ and therefore, in particular, x → y . This is true for all x ∈ and all y ∈ c , so we conclude that is closed.
The following result is well-known in order theory. However, since we could not immediately find an appropriate reference, we have chosen to provide a proof of our own.
Lemma 25. A communication class is the top class if and only if is the only maximal-or, equivalently, closed-communication class.
Proof. First note that maximality and closedness can indeed be used interchangeably here; see Lemma 24. To see that the direct implication holds, suppose that is the top class. Then observe that is maximal because otherwise there would be some C ∈ \ { } such that → C and (since is the top class) C → , and therefore, by the antisymmetry of →, that = C , contradicting that C ∈ \ { }. It is also the only maximal communication class because each C ∈ is dominated by . Now suppose that is the only maximal communication class. To prove the converse implication, we will rely on the following observation: for any finite sequence C 1 , · · · , C n of different communication classes such that C i → C j and C i = for all i , j ∈ {1, · · · , n} such that i ≤ j , there is a C n+1 ∈ such that C i = C n+1 and C i → C n+1 for all i ∈ {1, · · · , n}. Indeed, C n = , which implies that C n is not maximal-is the only maximal communication class-and therefore that there is some C n+1 ∈ such that C n → C n+1 (and obviously C n = C n+1 ). Since C i → C n for any i ∈ {1, · · · , n}, the transitivity of → implies that also C i → C n+1 . Moreover, C n+1 differs from any C i with i ∈ {1, · · · , n − 1}, because we would otherwise have that C i → C n and C n → C n+1 = C i , and therefore, by the antisymmetry of →, that C i = C n . This would contradict our assumptions about C 1 , · · · , C n and since we already established that C n = C n+1 , we indeed conclude that C i = C n+1 and C i → C n+1 for all i ∈ {1, · · · , n}. Now fix any C 1 such that C 1 = . Then we can use the rule above to show that C 1 → and therefore-since C 1 = is arbitrary-that is the top class. Indeed, since C 1 = , it follows from this rule that there is a C 2 ∈ such that C 1 = C 2 and C 1 → C 2 . If also C 2 = , there is a third C 3 ∈ such that C i = C 3 and C i → C 3 for all i ∈ {1, 2}. If also C 3 = , then there is a fourth C 4 ∈ such that C i = C 4 and C i → C 4 for all i ∈ {1, · · · , 3}, and so on, always continuing to extend this sequence in the same way. Then, since -and therefore also -is finite, and since ∈ , we will eventually find that, at some point, the next element C n of this sequence is such that C n = . Then, due to the fact that C i → C n for all i ∈ {1, · · · , n −1}, we have in particular that C n = is accessible from C 1 . Proof. Suppose that T has a top class . Then it follows from the fact that dominates (or, in other words, is accessible from) each other communication class ∈ , that any x ∈ is in if and only if y → x for all y ∈ . Indeed, on the one hand, if x is in , then for any y ∈ , if we let be the unique communication class that contains y , we find that y → x because → . And on the other hand, if y → x for all y ∈ , then for any communication class , if we let y be any element of , we find that → because y → x . Hence, since is implicitly assumed to be non-empty if it exists, we have that ′ = = .
Conversely, suppose that ′ = {x ∈ : y → x for all y ∈ } = . Then first observe that ′ is a communication class. Indeed, on the one hand, for any x , z ∈ ′ , the definition of ′ trivially implies that x ↔ z . And on the other hand, for any x ∈ ′ and z ∈ such that x ↔ z , we know that z ∈ ′ because, for all y ∈ , we have that y → x → z and hence y → z . So ′ is a communication class. Since for any other communication class ∈ , we have that y → x for any x ∈ ′ and any y ∈ , it follows that → ′ for all ∈ . Hence, ′ is the (non-empty) top class .
Proof. Suppose that T satisfies (TCR) and let = be the corresponding top class. Then, since is regular , we clearly have that ⊆ ′ . Moreover, since min T k x x > 0 for any x ∈ ′ and some k x ∈ , it follows from Lemma 1 that any x ∈ ′ is accessible from anywhere in (T ). Hence, due to Lemma 27, we also have that ′ ⊆ = {x ∈ : y → x for all y ∈ }. As a conclusion, we have that ′ = = .
Conversely, suppose that ′ = {x ∈ : (∃k * ∈ ) (∀k ≥ k * ) min T k x > 0} = . Again, since min T k x x > 0 for any x ∈ ′ and some k x ∈ , Lemma 1 implies that any x ∈ ′ is accessible from anywhere in (T ). So, we have that ′ ⊆ {x ∈ : y → x for all y ∈ }, which by Lemma 27 and the fact that ′ = , and therefore {x ∈ : y → x for all y ∈ } = , implies that the top class exists, that it is non-empty, and that ′ ⊆ . To show that also ⊆ ′ , consider any x ∈ and any y ∈ ′ . Due to Lemma 27, x is accessible from anywhere in (T ) and hence definitely from y , so there is a directed path from y to x . Let k ∈ be the length of this path. Furthermore, since y ∈ ′ , there is some k * ∈ such that min T k ′ y > 0 for all k ′ ≥ k * , and therefore that T k ′ y (z ) > 0 for all k ′ ≥ k * and all z ∈ . Fix any such k ′ ≥ k * and any such z ∈ . Then, according to Lemma 1, there is a directed path of length k ′ from z to y . Hence, recalling that there is a directed path of length k from y to x , we infer that there is a directed path of length k + k ′ from z to x , and therefore, again by Lemma 1, that T k +k ′ x (z ) > 0. Since this holds for any k ′ ≥ k * and any z ∈ , we have that min T k +k ′ x > 0 for all k ′ ≥ k * , or equivalently, that min T ℓ x > 0 for all ℓ ≥ k + k * . As a result, x is an element of ′ . Since x ∈ was chosen arbitrarily, it follows that ⊆ ′ .
Appendix B. Proof of Proposition 10
In the following, we will often use the fact that, since T is an upper transition operator, the iterates of T will also be upper transition operators. This can easily be derived using the properties U1-U7 and an induction argument in k . For an illustration of how to do so, we refer to .
Lemma 29. If T is an upper transition operator then, for any k ∈ , T k is an upper transition operator as well.
The properties U2-U7 therefore also apply to T k : for all k ∈ 0 , all h, g ∈ ( ), all real µ and all real λ ≥ 0. Many of the results in this appendix will make use of the graph-theoretic concepts and notations that were defined in Section 5. Unless mentioned otherwise, we will always implicitly assume that they correspond to the graph (T ) of T . Note however that, due to Corollary 5, we could also equivalently consider the graphs ′ (T ) or ′ (T f ).
Lemma 30. For any T with a closed class , we have that T k c (x ) = 0 for all x ∈ and all k ∈ .
Proof. Consider any x ∈ . Then, since is closed, we have that x → y for any y ∈ c , which by Lemma 1 implies that T k y (x ) ≤ 0 for all k ∈ . Hence, where the first step uses U4 ′ and the third uses U2 ′ .
Lemma 31. For any f ∈ ( ) and any T with a closed class , we have that T f h(x ) = T f (h )(x ) for all h ∈ ( ) and all x ∈ .
Proof. Fix any h ∈ ( ) and any x ∈ . By sub-additivity , we have that where the first equality follows from non-negative homogeneity and the second from Lemma 30. Hence, we obtain that T f h(x ) ≤ T f (h )(x ). To prove the converse inequality, observe that where the first step follows from U7, the second follows from −h c ≤ h ∞ c and monotonicity , the third follows from non-negative homogeneity and the last from Lemma 30. So, we have that T h(x ) ≥ T (h )(x ) and therefore also that T f h(x ) ≥ T f (h )(x ). Hence, T f h(x ) = T f (h )(x ) for all h ∈ ( ) and all x ∈ .
To prove Proposition 10, we will use the following notations that allow us to confine the dynamics of the process to a closed class. Let T be any upper transition operator and let be any non-empty subset of . For any h ∈ ( ), let h| ∈ ( ) denote the restriction of h to the domain . Additionally, for any h ∈ ( ), we let h ↑ ∈ ( ) denote the zero-extension of h into ( ), which takes the value h(x ) for x ∈ and 0 elsewhere. Then note that (h| ) ↑ = h for any h ∈ ( ) and (g ↑ )| = g for any g ∈ ( ). Let T f , : ( ) → ( ) be defined by T f , h := (T f h ↑ )| for all h ∈ ( ).
Lemma 32. For any f ∈ ( ) and any T with a closed class , we have that (T k f h)| = T k f , (h| ) for all h ∈ ( ) and all k ∈ . |
Image caption Rights groups have long expressed concern about human right abuses and torture in Zimbabwe
South Africa must investigate Zimbabwean officials over allegations they tortured opposition figures in 2007, a Pretoria high court has ruled.
Under international law, South Africa has a duty to investigate crimes against humanity, the judge said.
Prosecutors had previously refused to investigate the officials, who had travelled to South Africa.
Zimbabwe's justice minister dismissed the ruling, saying South Africa had no right to investigate cases in Zimbabwe.
'Shiver down the spines'
The case was brought by the Southern Africa Litigation Centre (SALC), along with the Zimbabwean Exiles Forum (ZEF), many of whose members fled to South Africa saying they had been tortured by Zimbabwean security agents.
They argued that because South Africa recognises the International Criminal Court (ICC), it is obliged to act on allegations of torture, which is classified as a crime against humanity.
The ICC is there for serious crimes against humanity - not ordinary crime, which is what these exiles are trying to suggest Patrick Chinamasa, Zimbabwe justice minister Confessions of a Zimbabwe torturer
It centres on an incident in 2007, when supporters of the then-opposition Movement for Democratic Change say they were tortured after a raid on their party headquarters. They have named 17 Zimbabwean officials and want them arrested and prosecuted.
South African Judge Hans Fabricius ruled that police and prosecutors had acted "unconstitutionally and unlawfully" - and ordered them to conduct an investigation.
"In my view it is clear when an investigation under the ICC Act is requested, and a reasonable basis exists for doing an investigation, political considerations or diplomatic initiatives are not relevant," Judge Hans Fabricius said.
SALC and ZEF said South African police and the National Prosecuting Authority refused to investigate, citing "political considerations".
South Africa is the main regional mediator in Zimbabwe's political crisis, and Zimbabwean officials regularly travel to the country on official and personal business.
Human rights groups have welcomed the judgment as ground-breaking.
"This judgment will send a shiver down the spines of Zimbabwean officials who believed that they would never be held to account for their crimes but now face investigation by the South African authorities," said human rights lawyer Nicole Fritz of SALC.
Zimbabwe's Justice Minister Patrick Chinamasa told the BBC's Brian Hungwe that South Africa had no jurisdiction over Zimbabwe.
"We have done nothing wrong in this country that would justify bringing us under the jurisdiction of the ICC, even if we were a state party," he said.
"The ICC is there for serious crimes against humanity - not ordinary crime, which is what these exiles are trying to suggest."
As many as four million Zimbabweans have sought economic and political refuge in neighbouring South Africa. |
def is_abundant(data, axis, cutoff=0.01, strict=False, mean_or_sum='mean'):
if mean_or_sum == 'mean':
m = data.mean(axis=axis)
elif mean_or_sum == 'sum':
m = data.sum(axis=axis)
if strict is True:
res = m > cutoff
else:
res = m >= cutoff
if issparse(data):
res = res.A1
return res |
package com.efp.plugins.project.tbtransfor.bean;
import java.io.Serializable;
import java.util.List;
/**
* 表变更信息
*
* @author primerxiao
*/
public class TbChangeInfo implements Serializable {
/**
* 代码属于哪个模块 如a-smcpi
*/
private String baseModuleName;
/**
* 表属于哪个数据库 如:ob_smcrbiz_dev
*/
private String schema;
/**
* 原表名
*/
private String srcTableName;
/**
* 目标表名
*/
private String targetTableName;
/**
* 变更字段集
*/
private List<ClChangeInfo> modifyClList;
/**
* 新增字段集
*/
private List<ClChangeInfo> addClList;
/**
* 删除字段集
*/
private List<ClChangeInfo> deleteClList;
public String getSrcTableName() {
return srcTableName;
}
public void setSrcTableName(String srcTableName) {
this.srcTableName = srcTableName;
}
public String getTargetTableName() {
return targetTableName;
}
public void setTargetTableName(String targetTableName) {
this.targetTableName = targetTableName;
}
public List<ClChangeInfo> getModifyClList() {
return modifyClList;
}
public void setModifyClList(List<ClChangeInfo> modifyClList) {
this.modifyClList = modifyClList;
}
public List<ClChangeInfo> getAddClList() {
return addClList;
}
public void setAddClList(List<ClChangeInfo> addClList) {
this.addClList = addClList;
}
public List<ClChangeInfo> getDeleteClList() {
return deleteClList;
}
public void setDeleteClList(List<ClChangeInfo> deleteClList) {
this.deleteClList = deleteClList;
}
}
|
/**
* Author: <NAME>
* Date Created: April 28, 2021
* Date Modified: April 28, 2021
* Version: 2021.1
*/
package graphicalObjects_SpecialObjects;
/**
interface for objects that maintain a text item
*/
public interface TextGraphicContainer {
public TextGraphic getText();
}
|
import { Box, TextField as MUITextField, MenuItem, Typography } from '@material-ui/core'
import { LabelField, TextField } from 'components/FormField'
import React, { useEffect, useRef, useState } from 'react'
import AdvancedOptions from 'components/AdvancedOptions'
import { FormikCtx } from 'components/NewExperiment/types'
import T from 'components/T'
import { defaultExperimentSchema } from 'components/NewExperiment/constants'
import { getIn } from 'formik'
import { resetOtherChaos } from 'lib/formikhelpers'
import { useFormikContext } from 'formik'
const actions = ['CPU', 'Memory', 'Mixed']
export default function Stress() {
const formikCtx: FormikCtx = useFormikContext()
const { values, setFieldValue } = formikCtx
const actionRef = useRef('')
const [action, _setAction] = useState('')
const setAction = (newVal: string) => {
actionRef.current = newVal
_setAction(newVal)
}
useEffect(() => {
resetOtherChaos(formikCtx, 'StressChaos', false)
if (getIn(values, 'target.stress_chaos.stressors.cpu') === null) {
setFieldValue('target.stress_chaos.stressors.cpu', defaultExperimentSchema.target.stress_chaos.stressors.cpu)
}
if (getIn(values, 'target.stress_chaos.stressors.memory') === null) {
setFieldValue(
'target.stress_chaos.stressors.memory',
defaultExperimentSchema.target.stress_chaos.stressors.memory
)
}
// Remove another when choosing a single action
return () => {
if (actionRef.current === 'CPU') {
// Because LabelField will set value when before unmount, it's needed to wrap setFieldValue into setTimeout
setTimeout(() => setFieldValue('target.stress_chaos.stressors.memory', null))
} else if (actionRef.current === 'Memory') {
setTimeout(() => setFieldValue('target.stress_chaos.stressors.cpu', null))
}
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [])
const handleActionChange = (e: React.ChangeEvent<HTMLInputElement>) => setAction(e.target.value)
return (
<>
<Box mb={2}>
<MUITextField
variant="outlined"
select
margin="dense"
fullWidth
label={T('newE.target.action')}
helperText={T('newE.target.stress.actionHelper')}
value={action}
onChange={handleActionChange}
>
{actions.map((option) => (
<MenuItem key={option} value={option}>
{option}
</MenuItem>
))}
</MUITextField>
</Box>
{(action === 'CPU' || action === 'Mixed') && (
<>
<Typography gutterBottom>CPU</Typography>
<TextField
type="number"
id="target.stress_chaos.stressors.cpu.workers"
name="target.stress_chaos.stressors.cpu.workers"
label="Workers"
helperText="CPU workers"
/>
<TextField
type="number"
id="target.stress_chaos.stressors.cpu.load"
name="target.stress_chaos.stressors.cpu.load"
label="Load"
helperText="CPU load"
/>
<LabelField
id="target.stress_chaos.stressors.cpu.options"
name="target.stress_chaos.stressors.cpu.options"
label="Options of CPU stressors"
helperText="Type string and end with a space to generate the stress-ng options"
/>
</>
)}
{(action === 'Memory' || action === 'Mixed') && (
<>
<Typography gutterBottom>Memory</Typography>
<TextField
type="number"
id="target.stress_chaos.stressors.memory.workers"
name="target.stress_chaos.stressors.memory.workers"
label="Workers"
helperText="Memory workers"
/>
<LabelField
id="target.stress_chaos.stressors.memory.options"
name="target.stress_chaos.stressors.memory.options"
label="Options of Memory stressors"
helperText="Type string and end with a space to generate the stress-ng options"
/>
</>
)}
{action !== '' && (
<AdvancedOptions>
<TextField
id="target.stress_chaos.container_name"
name="target.stress_chaos.container_name"
label="Container Name"
helperText="Optional. Fill the container name you want to inject stress in"
/>
<TextField
id="target.stress_chaos.stressng_stressors"
name="target.stress_chaos.stressng_stressors"
label="Options of stress-ng"
helperText="The options of stress-ng, treated as a string"
/>
</AdvancedOptions>
)}
</>
)
}
|
/**
* A HTTPRequest object contains all the data received from a client request, including
* header fields, request methods, content and parameters.
*
* Created by DOBRE Antonel-George on 15.06.2016.
*/
public class HTTPRequest {
private REQUEST_TYPE requestType = null;
private HashMap<String, String> headers = null;
private String requestURI = null;
private String httpVersion = null;
private InetAddress clientIPAddress = null;
private char[] content = null;
private HashMap<String, String> requestParams = null;
private String fragment = null;
public HTTPRequest(String requestLine, InetAddress clientIPAddress) throws MalformedRequestException {
String[] requests = requestLine.split(" ");
this.requestParams = new HashMap<>();
// check if request line was parsed ok and is valid
if(requests.length != 3)
throw new MalformedRequestException("An error occurred with request: " +
requestLine +
" from client " + clientIPAddress.toString() +
". REASON: Bad request from client.",
HTTPConstants.HTTP_RESPONSE_CODES.BAD_REQUEST,
true);
String method = requests[0];
this.requestType = REQUEST_TYPE.getRequestType(method);
// check if we support the request type
if(this.requestType == REQUEST_TYPE.UNDEFINED)
throw new MalformedRequestException("An error occurred with request: " +
requestLine +
" from client " + clientIPAddress.toString() +
". REASON: Method not implemented or undefined.",
HTTPConstants.HTTP_RESPONSE_CODES.NOT_IMPLEMENTED,
true);
// at the moment, the server does not allow DELETE, PUT, TRACE or CONNECT operations
if(!HTTPConstants.ALLOWED_OPTIONS.contains(this.requestType))
throw new MalformedRequestException("An error occurred with request: " +
requestLine +
" from client " + clientIPAddress.toString() +
". REASON: Method not allowed on server.",
HTTPConstants.HTTP_RESPONSE_CODES.METHOD_NOT_ALLOWED,
true);
this.headers = new HashMap<>();
this.clientIPAddress = clientIPAddress;
this.requestURI = requests[1];
// check if requestURI is valid
// request URI can either be * | absoluteURI | abs_path | authority
// details can be found at https://tools.ietf.org/html/rfc3986#section-3
switch(this.requestType){
case GET: {
if (this.requestURI.contains("?")) {
String[] splittedURI = this.requestURI.split("\\?", 2);
this.requestURI = splittedURI[0];
for (String paramCombo : splittedURI[1].split("&")) {
if(paramCombo.contains("#")){
// this is a special case fragment, usually contained in the last parameter
String[] aux = paramCombo.split("#", 2);
paramCombo = aux[0];
this.fragment = aux[1];
}
String[] params = paramCombo.split("=", 2);
this.requestParams.put(params[0], params[1]);
}
}
break;
}
case POST:
break;
case OPTIONS:
break;
case HEAD:
break;
default:
break;
}
this.httpVersion = requests[2];
//check if HTTP version is valid
//at the moment we only accept HTTP /1.1 and HTTP /1.0
if(!this.httpVersion.equals("HTTP/1.1") && !this.httpVersion.equals("HTTP/1.0"))
throw new MalformedRequestException("An error occurred with request: " +
requestLine +
" from client " + this.clientIPAddress.toString() +
". REASON: Protocol not supported by server.",
HTTPConstants.HTTP_RESPONSE_CODES.BAD_REQUEST,
true);
}
public REQUEST_TYPE getRequestType() {
return requestType;
}
public void addHeader(String headerLine) throws MalformedRequestException {
String[] header = headerLine.split(":", 2);
// check if we understand the header
// we ignore the header if we do not understand it
// this behaviour might change in future implementations
if(HTTPConstants.getGeneralHeadersStringList().contains(header[0]) ||
HTTPConstants.getHttpRequestHeadersStringList().contains(header[0]) ||
HTTPConstants.getHttpEntityHeaderStringList().contains(header[0])) {
// we will check individual header integrity as the need arises for that header
this.headers.put(header[0].trim(), header[1].trim());
}
}
/**
* Method that checks the integrity of the Request as a whole, after the header files have been fully added.
* @throws MalformedRequestException
*/
public void checkIntegrity() throws MalformedRequestException{
}
/**
* Method that checks if further information is needed from the client during this request.
* (e.g. "Transfer-Encoding: chunked" header is present)
*
* @return
*/
public boolean doContinue(){
return !this.headers.get(HTTPConstants.HTTP_GENERAL_HEADERS.CONNECTION.getRepresentation()).equals("close");
}
public boolean hasKeepAlive(){
if(this.headers.containsKey(HTTPConstants.HTTP_GENERAL_HEADERS.CONNECTION.getRepresentation())){
if(this.headers.get(HTTPConstants.HTTP_GENERAL_HEADERS.CONNECTION.getRepresentation()).equals("keep-alive"))
return true;
}
return false;
}
public String getRequestURI(){
return this.requestURI;
}
public boolean hasHeader(String header){
return this.headers.containsKey(header);
}
public String getHeaderValue(String header){
return this.headers.get(header);
}
public void setContent(char[] buffer){
this.content = buffer;
}
public boolean hasContent(){
return this.headers.containsKey(HTTPConstants.HTTP_ENTITY_HEADERS.CONTENT_LENGTH.getRepresentation());
}
public String getRequestParam(String parameter){
if(this.requestParams.containsKey(parameter)){
return this.requestParams.get(parameter);
}
return null;
}
public String getRequestParam(String parameter, String defaultValue){
if(this.requestParams.containsKey(parameter)){
return this.requestParams.get(parameter);
}
return defaultValue;
}
/**
* Returns the content length presented in the header file of this socket.
* This value represents the number of bytes that should be read from the client socket.
*
* @return
*/
public int getContentLength(){
if(!this.hasContent())
return 0;
return Integer.parseInt(this.headers.get(HTTPConstants.HTTP_ENTITY_HEADERS.CONTENT_LENGTH.getRepresentation()));
}
/**
* Returns the fragment part of the URI associated with this request.
*
* @return
*/
public String getFragment(){
return this.fragment;
}
/**
* This method parses the parameters content of a POST request.
*
* This method is needed because in POST requests, the parameter String is not sent in the URI,
* but in the content of the request, and as such we need to have the full request before we can
* fully parse them.
*/
public void parsePOSTParameters() throws InternalServerError{
if(this.requestType == REQUEST_TYPE.POST){
String contentType = null;
if(this.hasContent()){
if((contentType = this.headers.get(HTTPConstants.HTTP_ENTITY_HEADERS.CONTENT_ENCODING.
getRepresentation())) != null){
if(contentType.equals("application/x-www-form-urlencoded")){
// the POST request contains parameters
String content = new String(this.content);
for (String paramCombo : content.split("&")) {
if(paramCombo.contains("#")){
// this is a special case fragment, usually contained in the last parameter
String[] aux = paramCombo.split("#", 2);
paramCombo = aux[0];
this.fragment = aux[1];
}
String[] params = paramCombo.split("=", 2);
this.requestParams.put(params[0], params[1]);
}
}else if(contentType.equals("application/json")){
// The post contains content with json, that can be handled by the application
}else {
// The server does not recognize any other content type for POST requests
throw new InternalServerError("Server does not support current media type for POST.");
}
}
}
}
}
/**
* Returns a String representation of this class.
*
* In this case, it returns the method and headers received from the client.
*
* @return
*/
@Override
public String toString(){
StringBuilder builder = new StringBuilder();
builder.append(this.requestType);
builder.append(" ");
builder.append(this.requestURI);
builder.append(" ");
builder.append(this.httpVersion);
builder.append("\r\n");
for(String value : this.headers.keySet()){
builder.append(value + ": " + this.headers.get(value) + "\r\n");
}
return builder.toString();
}
} |
package com.lbaeza.watson;
import java.util.List;
import javax.json.JsonObject;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.util.ArrayList;
import com.ibm.watson.developer_cloud.conversation.v1.model.Entity;
import com.ibm.watson.developer_cloud.conversation.v1.model.Intent;
public class WMessage {
public String getHTMLFormat() throws UnsupportedEncodingException
{
String res = "";
String chatClass = "chatWatson";
if(this.user)
chatClass = "chatUser";
res += "<div class=\"" + chatClass +"\">";
if(this.user)
res += "<b>@User</b><br>";
else
res += "<b>@Watson</b><br>";
res += this.message;
res += "</div>";
return res;
}
public WMessage() {
super();
this.conversationId = "";
this.nodeId = "";
this.message = "";
this.user = false;
this.error = false;
this.cont = "0";
this.intents = new ArrayList<Intent>();
this.entities = new ArrayList<Entity>();
this.context = null;
this.instrucciones = "";
this.recordatorio = "";
}
public String conversationId;
public String nodeId;
public String message;
public boolean user;
public boolean error;
public String cont;
public List<Intent> intents;
public List<Entity> entities;
public JsonObject context;
public String instrucciones;
public String recordatorio;
}
|
string = input().lower()
new_str = ""
for char in string:
if char not in "aeiouy":
new_str += ".{}".format(char)
print(new_str)
|
/**
* @param sessionConfig session configuration
* @param numContainers number of containers to pre-warm
* @param localResources additional resources to pre-warm with
* @return prewarm context object
*/
public PreWarmContext createPreWarmContext(TezSessionConfiguration sessionConfig, int numContainers,
Map<String, LocalResource> localResources) throws IOException, TezException {
Configuration conf = sessionConfig.getTezConfiguration();
ProcessorDescriptor prewarmProcDescriptor = new ProcessorDescriptor(HivePreWarmProcessor.class.getName());
prewarmProcDescriptor.setUserPayload(MRHelpers.createUserPayloadFromConf(conf));
PreWarmContext context = new PreWarmContext(prewarmProcDescriptor, getContainerResource(conf),
numContainers, new VertexLocationHint(null));
Map<String, LocalResource> combinedResources = new HashMap<String, LocalResource>();
combinedResources.putAll(sessionConfig.getSessionResources());
if (localResources != null) {
combinedResources.putAll(localResources);
}
context.setLocalResources(combinedResources);
/* boiler plate task env */
Map<String, String> environment = new HashMap<String, String>();
MRHelpers.updateEnvironmentForMRTasks(conf, environment, true);
context.setEnvironment(environment);
context.setJavaOpts(getContainerJavaOpts(conf));
return context;
} |
<gh_stars>100-1000
import { Howl, Howler } from 'howler';
const explosion = require('./explosion.ogg');
const fire = require('./fire.ogg');
const footstep = require('./footstep.ogg');
Howler.volume(1.0);
const ExplosionSound = new Howl({
src: [explosion],
loop: false,
preload: true,
});
const FireSound = new Howl({
src: [fire],
loop: true,
preload: true,
});
const FootstepSound = new Howl({
src: [footstep],
loop: true,
preload: true,
});
export { ExplosionSound, FireSound, FootstepSound };
|
<reponame>digisquad-io/strapi-supercharged-template<gh_stars>10-100
import type {
ComponentField,
TimestampFields,
RelationWith,
CollectionEntity,
} from 'strapi-supercharged';
import type { models as SamplePluginModels } from '/@plugins/sample';
import type { Metadata } from '/@components/meta/metadata';
export type Article = CollectionEntity &
TimestampFields & {
name: string;
content: string;
metadata?: ComponentField<Metadata>;
products: RelationWith<SamplePluginModels.Product>[];
};
|
def toggleExpanded(self):
self.setExpanded(not self._expanded) |
/**
* This class extends the abstract class {@link EF_DynamicDistribution} and defines a {@link DynamicBayesianNetwork} as a
* conjugate exponential family (EF) model, consisting of EF distributions in canonical form.
*/
public class EF_DynamicBayesianNetwork extends EF_DynamicDistribution {
/** Represents an {@link EF_BayesianNetwork} object at Time 0. */
EF_BayesianNetwork bayesianNetworkTime0;
/** Represents an {@link EF_BayesianNetwork} object at Time T. */
EF_BayesianNetwork bayesianNetworkTimeT;
/**
* Creates a new EF_BayesianNetwork object given a {@link DynamicDAG} object.
* @param dag a {@link DynamicDAG} object.
*/
public EF_DynamicBayesianNetwork(DynamicDAG dag) {
this.bayesianNetworkTime0 = new EF_BayesianNetwork(dag.getParentSetsTime0());
this.bayesianNetworkTimeT = new EF_BayesianNetwork(dag.getParentSetsTimeT());
}
/**
* Creates a new EF_BayesianNetwork object given a {@link DynamicBayesianNetwork} object.
* @param dbn a {@link DynamicBayesianNetwork} object.
*/
public EF_DynamicBayesianNetwork(DynamicBayesianNetwork dbn){
this.bayesianNetworkTime0 = new EF_BayesianNetwork();
this.bayesianNetworkTimeT = new EF_BayesianNetwork();
this.bayesianNetworkTime0.setDistributionList(dbn.getConditionalDistributionsTime0().stream().map(dist -> dist.<EF_ConditionalDistribution>toEFConditionalDistribution()).collect(Collectors.toList()));
this.bayesianNetworkTimeT.setDistributionList(dbn.getConditionalDistributionsTimeT().stream().map(dist -> dist.<EF_ConditionalDistribution>toEFConditionalDistribution()).collect(Collectors.toList()));
}
/**
* {@inheritDoc}
*/
@Override
public void updateNaturalFromMomentParameters() {
DynamiceBNCompoundVector globalMomentsParam = (DynamiceBNCompoundVector)this.momentParameters;
DynamiceBNCompoundVector vectorNatural = this.createEmtpyCompoundVector();
globalMomentsParam.getVectorTime0().divideBy(globalMomentsParam.getIndicatorTime0());
globalMomentsParam.getVectorTimeT().divideBy(globalMomentsParam.getIndicatorTimeT());
this.bayesianNetworkTime0.setMomentParameters((MomentParameters)globalMomentsParam.getVectorTime0());
this.bayesianNetworkTimeT.setMomentParameters((MomentParameters)globalMomentsParam.getVectorTimeT());
vectorNatural.setVectorTime0(this.bayesianNetworkTime0.getNaturalParameters());
vectorNatural.setVectorTimeT(this.bayesianNetworkTimeT.getNaturalParameters());
this.naturalParameters=vectorNatural;
}
/**
* {@inheritDoc}
*/
@Override
public void updateMomentFromNaturalParameters() {
DynamiceBNCompoundVector globalNaturalParam = (DynamiceBNCompoundVector)this.naturalParameters;
DynamiceBNCompoundVector vectorMoments = this.createEmtpyCompoundVector();
this.bayesianNetworkTime0.setNaturalParameters((NaturalParameters) globalNaturalParam.getVectorTime0());
this.bayesianNetworkTimeT.setNaturalParameters((NaturalParameters) globalNaturalParam.getVectorTimeT());
vectorMoments.setVectorTime0(this.bayesianNetworkTime0.getNaturalParameters());
vectorMoments.setVectorTimeT(this.bayesianNetworkTimeT.getNaturalParameters());
this.momentParameters=vectorMoments;
}
/**
* {@inheritDoc}
*/
@Override
public SufficientStatistics getSufficientStatistics(DynamicDataInstance data) {
DynamiceBNCompoundVector vectorSS = this.createEmtpyCompoundVector();
if (data.getTimeID()==0) {
vectorSS.setIndicatorTime0(1.0);
vectorSS.setVectorTime0(this.bayesianNetworkTime0.getSufficientStatistics(data));
}else {
vectorSS.setIndicatorTimeT(1.0);
vectorSS.setVectorTimeT(this.bayesianNetworkTimeT.getSufficientStatistics(data));
}
return vectorSS;
}
/**
* {@inheritDoc}
*/
@Override
public int sizeOfSufficientStatistics() {
return this.bayesianNetworkTimeT.sizeOfSufficientStatistics() + this.bayesianNetworkTime0.sizeOfSufficientStatistics();
}
/**
* {@inheritDoc}
*/
@Override
public double computeLogBaseMeasure(DynamicDataInstance dataInstance) {
throw new UnsupportedOperationException("No make sense for dynamic BNs");
}
/**
* {@inheritDoc}
*/
@Override
public double computeLogNormalizer() {
throw new UnsupportedOperationException("No make sense for dynamic BNs");
}
@Override
public double computeLogProbabilityOf(DynamicDataInstance dataInstance) {
if (dataInstance.getTimeID()==0)
return this.bayesianNetworkTime0.computeLogProbabilityOf(dataInstance);
else
return this.bayesianNetworkTimeT.computeLogProbabilityOf(dataInstance);
}
/**
* {@inheritDoc}
*/
@Override
public Vector createZeroVector() {
return this.createCompoundVector();
}
@Override
public SufficientStatistics createInitSufficientStatistics() {
DynamiceBNCompoundVector vectorSS = this.createEmtpyCompoundVector();
vectorSS.setIndicatorTime0(1.0);
vectorSS.setVectorTime0(this.bayesianNetworkTime0.createInitSufficientStatistics());
vectorSS.setIndicatorTimeT(1.0);
vectorSS.setVectorTimeT(this.bayesianNetworkTimeT.createInitSufficientStatistics());
return vectorSS;
}
/**
* Returns an empty zeroed parameter vector (i.e., a vector filled with zeros).
* @return a {@link Vector} object.
*/
public Vector createEmptyZeroedVector() {
return this.createEmtpyCompoundVector();
}
/**
* Returns an empty compound parameter vector.
* @return a {@link DynamiceBNCompoundVector} object.
*/
private DynamiceBNCompoundVector createEmtpyCompoundVector() {
return new DynamiceBNCompoundVector(this.bayesianNetworkTime0.sizeOfSufficientStatistics() + this.bayesianNetworkTimeT.sizeOfSufficientStatistics());
}
/**
* Returns a compound parameter vector.
* @return a {@link DynamiceBNCompoundVector} object.
*/
private DynamiceBNCompoundVector createCompoundVector() {
return new DynamiceBNCompoundVector(this.bayesianNetworkTime0.createZeroVector(), this.bayesianNetworkTimeT.createZeroVector());
}
/**
* Returns the {@link EF_BayesianNetwork} at Time 0 of this EF_DynamicBayesianNetwork.
* @return an {@link EF_BayesianNetwork} object.
*/
public EF_BayesianNetwork getBayesianNetworkTime0() {
return bayesianNetworkTime0;
}
/**
* Returns the {@link EF_BayesianNetwork} at Time T of this EF_DynamicBayesianNetwork.
* @return an {@link EF_BayesianNetwork} object.
*/
public EF_BayesianNetwork getBayesianNetworkTimeT() {
return bayesianNetworkTimeT;
}
/**
* Converts this EF_DynamicBayesianNetwork to an equivalent {@link DynamicBayesianNetwork} object.
* @param dag a {@link DynamicDAG} object defining the graphical structure.
* @return a {@link DynamicBayesianNetwork} object.
*/
public DynamicBayesianNetwork toDynamicBayesianNetwork(DynamicDAG dag) {
return new DynamicBayesianNetwork(dag,
EF_BayesianNetwork.toConditionalDistribution(this.bayesianNetworkTime0.getDistributionList()),
EF_BayesianNetwork.toConditionalDistribution(this.bayesianNetworkTimeT.getDistributionList()));
}
/**
* The class CompoundVector implements the interfaces {@link SufficientStatistics}, {@link MomentParameters}, and {@link NaturalParameters},
* and it handles some utility methods of compound parameter vector for EF_DynamicBayesianNetwork.
*/
public static class DynamiceBNCompoundVector implements SufficientStatistics, MomentParameters, NaturalParameters {
double indicatorTime0;
double indicatorTimeT;
Vector vectorTime0;
Vector vectorTimeT;
int totalVectorSize;
public DynamiceBNCompoundVector(int totalVectorSize1){
this.indicatorTime0=0;
this.indicatorTimeT=0;
vectorTime0=null;
vectorTimeT=null;
totalVectorSize =totalVectorSize1;
}
public DynamiceBNCompoundVector(Vector vectorTime0_1, Vector vectorTimeT_1) {
this.indicatorTime0=0;
this.indicatorTimeT=0;
this.vectorTime0=vectorTime0_1;
this.vectorTimeT=vectorTimeT_1;
totalVectorSize = this.vectorTime0.size() + this.vectorTimeT.size();
}
public double getIndicatorTime0() {
return indicatorTime0;
}
public void setIndicatorTime0(double indicatorTime0) {
this.indicatorTime0 = indicatorTime0;
}
public double getIndicatorTimeT() {
return indicatorTimeT;
}
public void setIndicatorTimeT(double indicatorTimeT) {
this.indicatorTimeT = indicatorTimeT;
}
public Vector getVectorTime0() {
return vectorTime0;
}
public void setVectorTime0(Vector vectorTime0) {
this.vectorTime0 = vectorTime0;
}
public Vector getVectorTimeT() {
return vectorTimeT;
}
public void setVectorTimeT(Vector vectorTimeT) {
this.vectorTimeT = vectorTimeT;
}
@Override
public double get(int i) {
throw new UnsupportedOperationException("No get for this vector implementation");
}
@Override
public void set(int i, double val) {
throw new UnsupportedOperationException("No set for this vector implementation");
}
@Override
public int size() {
return this.totalVectorSize + 2;
}
@Override
public void sum(Vector vector) {
this.sum((DynamiceBNCompoundVector) vector);
}
@Override
public void copy(Vector vector) {
this.copy((DynamiceBNCompoundVector) vector);
}
@Override
public void divideBy(double val) {
this.indicatorTime0/=val;
if (this.vectorTime0!=null) this.vectorTime0.divideBy(val);
this.indicatorTimeT/=val;
if (this.vectorTimeT!=null) this.vectorTimeT.divideBy(val);
}
@Override
public double dotProduct(Vector vec) {
return this.dotProduct((DynamiceBNCompoundVector) vec);
}
public double dotProduct(DynamiceBNCompoundVector vec) {
if (vec.size() != this.size())
throw new IllegalArgumentException("Error in variable Vector. Method copy. The parameter vec has a different size. ");
double sum = 0;
sum += this.getIndicatorTime0()*vec.getIndicatorTime0();
sum += this.getIndicatorTimeT()*vec.getIndicatorTimeT();
if (this.vectorTime0!=null && vec.getVectorTime0()!=null) sum += this.vectorTime0.dotProduct(vec.getVectorTime0());
if (this.vectorTimeT!=null && vec.getVectorTimeT()!=null) sum += this.vectorTimeT.dotProduct(vec.getVectorTimeT());
return sum;
}
public void copy(DynamiceBNCompoundVector vector) {
if (vector.size() != this.size())
throw new IllegalArgumentException("Error in variable Vector. Method copy. The parameter vec has a different size. ");
this.setIndicatorTime0(vector.getIndicatorTime0());
this.setIndicatorTimeT(vector.getIndicatorTimeT());
if (this.vectorTime0==null)
this.vectorTime0=vector.getVectorTime0();
else if (vector.getVectorTime0()==null)
this.vectorTime0=null;
else
this.vectorTime0.copy(vector.getVectorTime0());
if (this.vectorTimeT==null)
this.vectorTimeT=vector.getVectorTimeT();
else if (vector.getVectorTimeT()==null)
this.vectorTimeT=null;
else
this.vectorTimeT.copy(vector.getVectorTimeT());
}
public void sum(DynamiceBNCompoundVector vector) {
if (vector.size() != this.size())
throw new IllegalArgumentException("Error in variable Vector. Method copy. The parameter vec has a different size. ");
this.setIndicatorTime0(this.getIndicatorTime0() + vector.getIndicatorTime0());
this.setIndicatorTimeT(this.getIndicatorTimeT() + vector.getIndicatorTimeT());
if (this.vectorTime0==null)
this.vectorTime0=vector.getVectorTime0();
else if (vector.getVectorTime0()!=null)
this.vectorTime0.sum(vector.getVectorTime0());
if (this.vectorTimeT==null)
this.vectorTimeT=vector.getVectorTimeT();
else if (vector.getVectorTimeT()!=null)
this.vectorTimeT.sum(vector.getVectorTimeT());
}
}
} |
/**
* Coloca barco en tablero
* @param pPosis Posiciones del barco a poner
* @param turno turno usuario (true), turno rival (false)
* @return devuelve si se ha podido poner barco
*/
public void colocarBarco(ArrayList<Integer> pPosis, boolean turno) {
for (Integer pos : pPosis) {
int x = pos%10;
int y = pos/10;
if (turno) {
this.tableroUsuario[y][x] = Casilla.Barco;
} else {
this.tableroRival[y][x] = Casilla.Barco;
}
}
setChanged();
notifyObservers(pPosis);
} |
export * from './customerErrorMiddleware';
|
def calendar_earnings(other_args: List[str], ticker: str):
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="cal",
description="""
Calendar earnings of the company. Including revenue and earnings estimates.
[Source: Yahoo Finance]
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
stock = yf.Ticker(ticker)
df_calendar = stock.calendar
if df_calendar.empty:
print(f"No earnings calendar information in Yahoo for {ticker}")
print("")
return
df_calendar.iloc[0, 0] = df_calendar.iloc[0, 0].date().strftime("%d/%m/%Y")
df_calendar.iloc[:, 0] = df_calendar.iloc[:, 0].apply(
lambda x: long_number_format(x)
)
print(f"Earnings Date: {df_calendar.iloc[:, 0]['Earnings Date']}")
avg = df_calendar.iloc[:, 0]["Earnings Average"]
low = df_calendar.iloc[:, 0]["Earnings Low"]
high = df_calendar.iloc[:, 0]["Earnings High"]
print(f"Earnings Estimate Avg: {avg} [{low}, {high}]")
print(
f"Revenue Estimate Avg: {df_calendar.iloc[:, 0]['Revenue Average']} \
[{df_calendar.iloc[:, 0]['Revenue Low']}, {df_calendar.iloc[:, 0]['Revenue High']}]"
)
print("")
except Exception as e:
print(e, "\n") |
This post is part of a series of posts on artists, designers and illustrators making their way in the Philippines.
Dan Matutina is a force to be reckoned with. He’s become known, not just in the Philippines, but around the world, for his designs and illustrations using simple lines, shapes and textures. He’s the founder of Plus63.net, a design blog focused on Filipino designs, and also Plus63.com, a showcase of the sights and sounds of the Philippines. He’s the creative head and co-founder of Idea!s, a social enterprise design & communications agency that helps bring great design to nonprofits and NGOs. If that’s not enough, he’s also a lecturer at the University of the Philippines College of Fine Arts.
He’s worked with Popular Mechanics, Wired (Italy), CondeNast, The Few Gallery, AllDayBuffet, BBDO, DDB, Rogue Magazine, McDonald’s, the Coca-Cola Foundation, SEAir, FormFiftyFive and more. We’re lucky to have gotten the chance to interview him today, and even luckier to have him on our advisory board.
Our own Daiox Del Fierro asked him some questions (after the jump).
How did you come up with the name Twisted Fork?
It comes from the bent (twisted) fork bracelet I always wear.
How did you get interested in design and decide that was a career path you wanted to follow?
Hmmm… I’ve been doodling since I was a kid; I’ve always wanted to draw stuff, but I didn’t know you could make a career out of it. In high school, I was more into science than art. I had no plans in taking Fine Arts until the last day of my college application submission. I didn’t even know that there was such a course. It was all thanks to a local entertainment magazine that I found out about Fine Arts.
An illustration for Rogue Magazine’s “Why You Deserve a Universal Refund” article.
What do you think of the state of design in the Philippines?
The current state of design here is vibrant. A lot of design studios are opening, new talents are coming out, there are design events and conferences. The design and creative community here in the Philippines has a friendly and competitive atmosphere. It’s really inspiring.
Are there any changes you’d like to see?
I want to see more designers/illustrators/creatives involved in social causes.
Your works are simultaneously childlike and dark. What’s the inspiration behind them?
I never really intended them to be dark or childlike, I just want to illustrate cute, quirky characters and tell stories. In my illustrations, I try to mix simple lines, shapes and textures to tell an idea or story. For my illustrations, I get a lot of inspiration from Filipino folklore, handicrafts and festivals. The Filipino aesthetic is maximalist by nature, so I try to balance it out by getting inspiration from minimalist designers. I use a lot of textures to make my illustrations look organic albeit the use of basic, industrial-looking shapes. I try to mix contradictions in my illustrations, and it seems to work. Haha.
This piece is based on one of our local deities called The Bakunawa. It’s a serpent/dragon that eats up the moon at night time. Our ancestors believed that the Bakunawa is the cause of eclipses. To scare the Bakunawa into spitting out the moon back into the skies, ancient Filipinos would make noises with pans and pots.
Your work is primarily digital. What techniques do you employ to make your pieces stand out?
Most of my recent works are digital, yeah, but I add a lot of raw and handmade elements, like paint splatters, paper textures, and others. I try to experiment with color, line, and shape to make my works stand out more.
Some of your work is political and highly opinionated. Has that ever cost you a job, or caused any other problems for you?
I’m usually opinionated when it comes to political and social issues—to my knowledge, it’s never cost me a job or caused me any problems.
What’s a day in the life of Dan Matutina like?
I usually wake up early. Before going to the studio, I’ll drop by a coffee shop to get coffee, hang out a bit, read news, blogs, and mangas. Afterwards, I walk to the studio and start working on stuff. The whole day is spent doing work, surfing the net, playing games, doing sketches and chatting with people online. When I need to think of ideas, I go out of the office with my idea book and work in a nice quiet place. Every Tuesday and Thursday I lecture at UP College of Fine Arts.
An editorial made for High Profile magazine. The article was about the recent elections, and the peaceful and democratic handing of power. The Philippine flag was used as an island in celebration, with the sun and stars representing fireworks. It also doubles as a flare, aiming to ask other countries to take notice.
Aside from friends in the graphic art scene, what artists do you look up to for inspiration?
What’s the concept behind Idea!s Creatives?
At Idea!s, we try to make a difference through design. Idea!s is a social enterprise, specializing in social design and communications, offering our services in order to enhance the impact and outreach of social development organizations. We want to communicate to the world the great stuff cause-driven organizations are doing, and encourage society to get involved in the actions, both big and small.
Dan created this pattern for Leeroy New’s collaboration with Kermit Tesoro at Philippine Fashion Week.
I love Plus63.com—a big fan of some of the music there. Do you have any other projects coming up this year?
I have quite a number of projects (both work and personal) in line for the year. There’s an animation I’m working on, a social design event, web projects, exhibits, some experimental prints among others. I love keeping myself busy. Haha.
How often do you travel? I recently watched Stefan Sagmeister talking about the power of time off, in which he espouses the value of taking a year-long sabbatical every seven years. Does traveling help your creative process?
I travel from time to time; it’s a good opportunity to reboot yourself. In a recent vacation, I was able to experiment with my illustration style. I think I need another long vacation to work on a few stuff. Haha. I really enjoyed Basel, Switzerland. The works I saw in the contemporary galleries and shops were really inspiring. When I was there, I never saw any badly designed signs. I also had fun in Paris. It’s a beautiful city. Lots of cafes and bookshops; lots of things to buy, too.
A close-up detail of The Boy Who Carried the Big Bad Wolf.
Where would you like to visit next?
I’d love to visit Stockholm and Tokyo soon.
You’re on the advisory board of Ferdinand Center for the Creative. Why do you think Ferdinand is an important cause to support?
Teaching kids, who can’t afford quality education, art and design will give them more career options afterwards. Creativity is for everyone, and I’m glad Ferdinand is doing this. Aside from being part of the advisory board, I’m pretty much game to teach at the school when it opens.
Do you have any words of wisdom for the upcoming talents who’d like to choose graphic arts as a career?
Take risks! Take risks with your design and career decisions. Don’t be afraid to share your works with others, collaborate with them. Especially now that everybody can easily be connected through the net, use it to your advantage. Be open to criticism because you can only improve from them. And more importantly, have fun. Yey!
You can download this desktop wallpaper from Kitsune Noir’s Desktop Wallpaper Project. |
<reponame>iyolee/fetools
export declare function $throttle(fn: (...args: any) => any, threshhold?: number): (...args: any) => void;
|
<filename>Sources/Elastos/LibCore/tests/bak/Net/InetAddressHelperTest/test.cpp
#include "test.h"
#include <stdio.h>
#include <elautoptr.h>
#include <malloc.h>
CTest::CTest()
{
setUp();
}
CTest::~CTest()
{
tearDown();
}
void CTest::setUp()
{
PFL_EX("CTest::test_parseNumericAddress");
// initialize Byte[]
LOOPBACK6_BYTES = ArrayOf<Byte>::Alloc(16);
for (int i = 0; i < 16; ++i) {
if (i != 15) {
LOOPBACK6_BYTES->Set(i, Byte(0));
}
if (i == 15) {
LOOPBACK6_BYTES->Set(i, Byte(1));
}
}
//initialize String[]
INVALID_IPv4_NUMERIC_ADDRESSES = ArrayOf<String>::Alloc(20);
// IPv4 addresses may not be surrounded by square brackets.
String str0 = String("[127.0.0.1]");
// Trailing dots are not allowed.
String str1 = String("1.2.3.4.");
// Nor is any kind of trailing junk.
String str2 = String("1.2.3.4hello");
// Out of range.
String str3 = String("256.2.3.4");
String str4 = String("1.256.3.4");
String str5 = String("1.2.256.4");
String str6 = String("1.2.3.256");
// Deprecated.
String str7 = String("1.2.3");
String str8 = String("1.2");
String str9 = String("1");
String str10 = String("1234");
String str11 = String("0"); // Single out the deprecated form of the ANY address.
// Hex.
String str12 = String("0x1.0x2.0x3.0x4");
String str13 = String("0x7f.0x00.0x00.0x01");
String str14 = String("7f.0.0.1");
// Octal.
String str15 = String("0177.00.00.01"); // Historically, this would have been interpreted as 127.0.0.1.
// Negative numbers.
String str16 = String("-1.0.0.1");
String str17 = String("1.-1.0.1");
String str18 = String("1.0.-1.1");
String str19 = String("1.0.0.-1");
INVALID_IPv4_NUMERIC_ADDRESSES->Set(0, str0);
INVALID_IPv4_NUMERIC_ADDRESSES->Set(1, str1);
INVALID_IPv4_NUMERIC_ADDRESSES->Set(2, str2);
INVALID_IPv4_NUMERIC_ADDRESSES->Set(3, str3);
INVALID_IPv4_NUMERIC_ADDRESSES->Set(4, str4);
INVALID_IPv4_NUMERIC_ADDRESSES->Set(5, str5);
INVALID_IPv4_NUMERIC_ADDRESSES->Set(6, str6);
INVALID_IPv4_NUMERIC_ADDRESSES->Set(7, str7);
INVALID_IPv4_NUMERIC_ADDRESSES->Set(8, str8);
INVALID_IPv4_NUMERIC_ADDRESSES->Set(9, str9);
INVALID_IPv4_NUMERIC_ADDRESSES->Set(10, str10);
INVALID_IPv4_NUMERIC_ADDRESSES->Set(11, str11);
INVALID_IPv4_NUMERIC_ADDRESSES->Set(12, str12);
INVALID_IPv4_NUMERIC_ADDRESSES->Set(13, str13);
INVALID_IPv4_NUMERIC_ADDRESSES->Set(14, str14);
INVALID_IPv4_NUMERIC_ADDRESSES->Set(15, str15);
INVALID_IPv4_NUMERIC_ADDRESSES->Set(16, str16);
INVALID_IPv4_NUMERIC_ADDRESSES->Set(17, str17);
INVALID_IPv4_NUMERIC_ADDRESSES->Set(18, str18);
INVALID_IPv4_NUMERIC_ADDRESSES->Set(19, str19);
}
void CTest::tearDown()
{
}
// Num: Test1
int CTest::test_parseNumericAddress(int argc, char* argv[])
{
PFL_EX("CTest::test_parseNumericAddress");
AutoPtr<IInetAddressHelper> inetAddressHelper;
CInetAddressHelper::AcquireSingleton((IInetAddressHelper**)&inetAddressHelper);
PFL_EX("CTest::test_parseNumericAddress");
AutoPtr<IInetAddress> outAddress;
String finalStr;
// Regular IPv4.
const String strIPv4 = String("1.2.3.4");
inetAddressHelper->ParseNumericAddress(strIPv4, (IInetAddress**)&outAddress);
outAddress->ToString(&finalStr);
assert(finalStr.Equals(String("/1.2.3.4")));
// Regular IPv6.
const String strIPv6 = String("fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b");
inetAddressHelper->ParseNumericAddress(strIPv6, (IInetAddress**)&outAddress);
outAddress->ToString(&finalStr);
assert(finalStr.Equals(String("/2001:fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b")));
// Optional square brackets around IPv6 addresses, including mapped IPv4.
const String strIPv6_1 = String("[2001:4860:800d::68]");
inetAddressHelper->ParseNumericAddress(strIPv6_1, (IInetAddress**)&outAddress);
outAddress->ToString(&finalStr);
assert(finalStr.Equals(String("/2001:4860:fdf8:f53e:61e4::18")));
const String strIPv6_2 = String("[::ffff:127.0.0.1]");
inetAddressHelper->ParseNumericAddress(strIPv6_2, (IInetAddress**)&outAddress);
outAddress->ToString(&finalStr);
assert(finalStr.Equals(String("/12192.168.3.11")));
//throw exception
const String exString = String("example.com");
ECode ec = inetAddressHelper->ParseNumericAddress(exString, (IInetAddress**)&outAddress);
assert(ec == E_ILLEGAL_ARGUMENT_EXCEPTION);
for (int i = 0; i < 20; ++i) {
String str = (*INVALID_IPv4_NUMERIC_ADDRESSES)[i];
ec = inetAddressHelper->ParseNumericAddress(str, (IInetAddress**)&outAddress);
assert(ec == E_ILLEGAL_ARGUMENT_EXCEPTION);
}
}
// Num: Test2
int CTest::test_isNumeric(int argc, char* argv[])
{
AutoPtr<IInetAddressHelper> inetAddressHelper;
CInetAddressHelper::AcquireSingleton((IInetAddressHelper**)&inetAddressHelper);
Boolean flag = FALSE;
const String str1 = String("1.2.3.4");
inetAddressHelper->IsNumeric(str1, &flag);
assert(TRUE == flag);
const String str2 = String("127.0.0.1");
inetAddressHelper->IsNumeric(str2, &flag);
assert(TRUE == flag);
const String str3 = String("example.com");
inetAddressHelper->IsNumeric(str3, &flag);
assert(FALSE == flag);
for (int i = 0; i < 20; ++i) {
String invalid = (*INVALID_IPv4_NUMERIC_ADDRESSES)[i];
inetAddressHelper->IsNumeric(invalid, &flag);
assert(FALSE == flag);
}
}
// Num: Test3
int CTest::test_getByName(int argc, char* argv[])
{
AutoPtr<IInetAddressHelper> inetAddressHelper;
CInetAddressHelper::AcquireSingleton((IInetAddressHelper**)&inetAddressHelper);
ECode ec;
AutoPtr<IInetAddress> outAddress;
for (int i = 0; i < 20; ++i) {
String invalid = (*INVALID_IPv4_NUMERIC_ADDRESSES)[i];
ec = inetAddressHelper->GetByName(invalid,(IInetAddress**)&outAddress);
assert(ec == E_IO_EXCEPTION);
}
}
|
<filename>border-py-gym-env/src/base.rs
//! Wrapper of gym environments implemented in Python.
#![allow(clippy::float_cmp)]
use crate::AtariWrapper;
use anyhow::Result;
use border_core::{record::Record, Act, Env, Info, Obs, Step};
use log::trace;
use pyo3::types::{IntoPyDict, PyTuple};
use pyo3::{types::PyModule, PyObject, PyResult, Python, ToPyObject};
use std::cell::RefCell;
use std::marker::PhantomData;
use std::{fmt::Debug, time::Duration};
/// Information given at every step of the interaction with the environment.
///
/// Currently, it is empty and used to match the type signature.
pub struct PyGymInfo {}
impl Info for PyGymInfo {}
/// Convert PyObject to PyGymEnv::Obs.
pub trait PyGymEnvObsFilter<O: Obs> {
/// Convert PyObject into observation with filtering.
fn filt(&mut self, obs: PyObject) -> (O, Record);
/// Called when resetting the environment.
///
/// This method is useful for stateful filters.
fn reset(&mut self, obs: PyObject) -> O {
let (obs, _) = self.filt(obs);
obs
}
}
/// Convert PyGymEnv::Act to PyObject.
///
/// This trait should support vectorized environments.
pub trait PyGymEnvActFilter<A: Act> {
/// Filter action and convert it to PyObject.
///
/// For vectorized environments, `act` should have actions for all environments in
/// the vectorized environment. The return values will be a `PyList` object, each
/// element is an action of the corresponding environment.
fn filt(&mut self, act: A) -> (PyObject, Record);
/// Called when resetting the environment.
///
/// This method is useful for stateful filters.
/// This method support vectorized environment
fn reset(&mut self, _is_done: &Option<&Vec<i8>>) {}
}
/// Constructs [PyGymEnv].
pub struct PyGymEnvBuilder<O, A, OF, AF> {
max_steps: Option<usize>,
atari_wrapper: Option<AtariWrapper>,
pybullet: bool,
phantom: PhantomData<(O, A, OF, AF)>,
}
impl<O, A, OF, AF> Default for PyGymEnvBuilder<O, A, OF, AF> {
fn default() -> Self {
Self {
max_steps: None,
atari_wrapper: None,
pybullet: false,
phantom: PhantomData,
}
}
}
impl<O, A, OF, AF> PyGymEnvBuilder<O, A, OF, AF>
where
O: Obs,
A: Act,
OF: PyGymEnvObsFilter<O>,
AF: PyGymEnvActFilter<A>,
{
/// Set `True` when using PyBullet environments.
pub fn pybullet(mut self, v: bool) -> Self {
self.pybullet = v;
self
}
/// Set `True` when using Atari wrapper.
pub fn atari_wrapper(mut self, v: Option<AtariWrapper>) -> Self {
self.atari_wrapper = v;
self
}
/// Constructs [PyGymEnv].
pub fn build(
self,
name: &str,
obs_filter: OF,
act_filter: AF,
) -> PyResult<PyGymEnv<O, A, OF, AF>> {
let gil = Python::acquire_gil();
let py = gil.python();
// sys.argv is used by pyglet library, which is responsible for rendering.
// Depending on the python interpreter, however, sys.argv can be empty.
// For that case, sys argv is set here.
// See https://github.com/PyO3/pyo3/issues/1241#issuecomment-715952517
let locals = [("sys", py.import("sys")?)].into_py_dict(py);
let _ = py.eval("sys.argv.insert(0, 'PyGymEnv')", None, Some(&locals))?;
// import pybullet-gym if it exists
if py.import("pybulletgym").is_ok() {}
let env = if let Some(mode) = self.atari_wrapper {
let mode = match mode {
AtariWrapper::Train => true,
AtariWrapper::Eval => false,
};
let gym = py.import("atari_wrappers")?;
let env = gym.call("make_env_single_proc", (name, true, mode), None)?;
env.call_method("seed", (42,), None)?;
env
} else {
let gym = py.import("gym")?;
let env = gym.call("make", (name,), None)?;
env.call_method("seed", (42,), None)?;
env
};
// TODO: consider removing action_space and observation_space.
// Act/obs types are specified by type parameters.
let action_space = env.getattr("action_space")?;
let action_space = if let Ok(val) = action_space.getattr("n") {
val.extract()?
} else {
let action_space: Vec<i64> = action_space.getattr("shape")?.extract()?;
action_space[0]
};
let observation_space = env.getattr("observation_space")?;
let observation_space = observation_space.getattr("shape")?.extract()?;
let pybullet_state = if !self.pybullet {
None
} else {
let pybullet_state = Python::with_gil(|py| {
PyModule::from_code(
py,
r#"
_torsoId = None
_floor = False
def add_floor(env):
global _floor
if not _floor:
p = env.env._p
import pybullet_data
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.loadURDF("plane.urdf")
_floor = True
env.env.stateId = p.saveState()
def get_torso_id(p):
global _torsoId
if _torsoId is None:
torsoId = -1
for i in range(p.getNumBodies()):
print(p.getBodyInfo(i))
if p.getBodyInfo(i)[0].decode() == "torso":
torsoId = i
print("found torso")
_torsoId = torsoId
return _torsoId
def update_camera_pos(env):
p = env.env._p
torsoId = get_torso_id(env.env._p)
if torsoId >= 0:
distance = 5
yaw = 0
humanPos, humanOrn = p.getBasePositionAndOrientation(torsoId)
p.resetDebugVisualizerCamera(distance, yaw, -20, humanPos)
"#,
"pybullet_state.py",
"pybullet_state",
)
.unwrap()
.to_object(py)
});
Some(pybullet_state)
};
Ok(PyGymEnv {
env: env.into(),
action_space,
observation_space,
// TODO: consider to remove RefCell, use raw value instead
obs_filter,
act_filter,
render: false,
count_steps: RefCell::new(0),
wait_in_render: Duration::from_millis(0),
max_steps: self.max_steps,
pybullet: self.pybullet,
pybullet_state,
phantom: PhantomData,
})
}
}
/// Represents an environment in [OpenAI gym](https://github.com/openai/gym).
/// The code is adapted from [tch-rs RL example](https://github.com/LaurentMazare/tch-rs/tree/master/examples/reinforcement-learning).
#[derive(Debug)]
pub struct PyGymEnv<O, A, OF, AF>
where
O: Obs,
A: Act,
OF: PyGymEnvObsFilter<O>,
AF: PyGymEnvActFilter<A>,
{
render: bool,
env: PyObject,
action_space: i64,
observation_space: Vec<usize>,
count_steps: RefCell<usize>,
max_steps: Option<usize>,
obs_filter: OF,
act_filter: AF,
wait_in_render: Duration,
pybullet: bool,
pybullet_state: Option<PyObject>,
phantom: PhantomData<(O, A)>,
}
impl<O, A, OF, AF> PyGymEnv<O, A, OF, AF>
where
O: Obs,
A: Act,
OF: PyGymEnvObsFilter<O>,
AF: PyGymEnvActFilter<A>,
{
/// Constructs an environment.
///
/// `name` is the name of the environment, which is implemented in OpenAI gym.
pub fn new(
name: &str,
obs_filter: OF,
act_filter: AF,
atari_wrapper: Option<AtariWrapper>,
) -> PyResult<Self> {
let gil = Python::acquire_gil();
let py = gil.python();
// sys.argv is used by pyglet library, which is responsible for rendering.
// Depending on the python interpreter, however, sys.argv can be empty.
// For that case, sys argv is set here.
// See https://github.com/PyO3/pyo3/issues/1241#issuecomment-715952517
let locals = [("sys", py.import("sys")?)].into_py_dict(py);
let _ = py.eval("sys.argv.insert(0, 'PyGymEnv')", None, Some(&locals))?;
// import pybullet-gym if it exists
if py.import("pybulletgym").is_ok() {}
let env = if let Some(mode) = atari_wrapper {
let mode = match mode {
AtariWrapper::Train => true,
AtariWrapper::Eval => false,
};
let gym = py.import("atari_wrappers")?;
let env = gym.call("make_env_single_proc", (name, true, mode), None)?;
env.call_method("seed", (42,), None)?;
env
} else {
let gym = py.import("gym")?;
let env = gym.call("make", (name,), None)?;
env.call_method("seed", (42,), None)?;
env
};
// TODO: consider removing action_space and observation_space.
// Act/obs types are specified by type parameters.
let action_space = env.getattr("action_space")?;
let action_space = if let Ok(val) = action_space.getattr("n") {
val.extract()?
} else {
let action_space: Vec<i64> = action_space.getattr("shape")?.extract()?;
action_space[0]
};
let observation_space = env.getattr("observation_space")?;
let observation_space = observation_space.getattr("shape")?.extract()?;
Ok(PyGymEnv {
render: false,
env: env.into(),
action_space,
observation_space,
// TODO: consider remove RefCell, raw value instead
count_steps: RefCell::new(0),
max_steps: None,
obs_filter,
act_filter,
wait_in_render: Duration::from_millis(0),
pybullet: false,
pybullet_state: None,
phantom: PhantomData,
})
}
/// Set rendering mode.
///
/// If `true`, it renders the state at every step.
pub fn set_render(&mut self, render: bool) {
self.render = render;
if self.pybullet {
pyo3::Python::with_gil(|py| {
self.env.call_method0(py, "render").unwrap();
});
}
}
/// Set the maximum number of steps in the environment.
pub fn max_steps(mut self, v: Option<usize>) -> Self {
self.max_steps = v;
self
}
/// Set time for sleep in rendering.
pub fn set_wait_in_render(&mut self, d: Duration) {
self.wait_in_render = d;
}
/// Get the number of available actions of atari environments
pub fn get_num_actions_atari(&self) -> i64 {
pyo3::Python::with_gil(|py| {
let act_space = self.env.getattr(py, "action_space").unwrap();
act_space.getattr(py, "n").unwrap().extract(py).unwrap()
})
}
}
impl<O, A, OF, AF> Env for PyGymEnv<O, A, OF, AF>
where
O: Obs,
A: Act + Debug,
OF: PyGymEnvObsFilter<O>,
AF: PyGymEnvActFilter<A>,
{
type Obs = O;
type Act = A;
type Info = PyGymInfo;
/// Resets the environment, the obs/act filters and returns the observation tensor.
///
/// In this environment, the length of `is_done` is assumed to be 1.
fn reset(&mut self, is_done: Option<&Vec<i8>>) -> Result<O> {
trace!("PyGymEnv::reset()");
// Reset the action filter, required for stateful filters.
self.act_filter.reset(&is_done);
// Reset the environment
let reset = match is_done {
None => true,
// when reset() is called in border_core::util::sample()
Some(v) => {
debug_assert_eq!(v.len(), 1);
v[0] != 0
}
};
if !reset {
Ok(O::dummy(1))
} else {
pyo3::Python::with_gil(|py| {
let obs = self.env.call_method0(py, "reset")?;
if self.pybullet && self.render {
let floor: &PyModule =
self.pybullet_state.as_ref().unwrap().extract(py).unwrap();
floor.call1("add_floor", (&self.env,)).unwrap();
}
Ok(self.obs_filter.reset(obs))
})
}
}
/// Runs a step of the environment's dynamics.
///
/// It returns [`Step`] and [`Record`] objects.
/// The [`Record`] is composed of [`Record`]s constructed in [`PyGymEnvObsFilter`] and
/// [`PyGymEnvActFilter`].
fn step(&mut self, a: &A) -> (Step<Self>, Record) {
trace!("PyGymEnv::step()");
pyo3::Python::with_gil(|py| {
if self.render {
if !self.pybullet {
let _ = self.env.call_method0(py, "render");
} else {
let cam: &PyModule = self.pybullet_state.as_ref().unwrap().extract(py).unwrap();
cam.call1("update_camera_pos", (&self.env,)).unwrap();
}
std::thread::sleep(self.wait_in_render);
}
let (a_py, record_a) = self.act_filter.filt(a.clone());
let ret = self.env.call_method(py, "step", (a_py,), None).unwrap();
let step: &PyTuple = ret.extract(py).unwrap();
let obs = step.get_item(0).to_owned();
let (obs, record_o) = self.obs_filter.filt(obs.to_object(py));
let reward: Vec<f32> = vec![step.get_item(1).extract().unwrap()];
let mut is_done: Vec<i8> = vec![if step.get_item(2).extract().unwrap() {
1
} else {
0
}];
let c = *self.count_steps.borrow();
self.count_steps.replace(c + 1);
if let Some(max_steps) = self.max_steps {
if *self.count_steps.borrow() >= max_steps {
is_done[0] = 1;
self.count_steps.replace(0);
}
};
(
Step::<Self>::new(obs, a.clone(), reward, is_done, PyGymInfo {}),
record_o.merge(record_a),
)
})
}
}
|
Preoperative evaluation and surgical decision-making in pediatric epilepsy surgery.
Epilepsy is a common disease in the pediatric population, and the majority of cases are controlled with medications and lifestyle modification. For the children whose seizures are pharmacoresistant, continued epileptic activity can have a severely detrimental impact on cognitive development. Early referral of children with drug-resistant seizures to a pediatric epilepsy surgery center for evaluation is critical to achieving optimal patient outcomes. There are several components to a thorough presurgical evaluation, including a detailed medical history and physical examination, noninvasive testing including electroencephalogram, magnetic resonance imaging (MRI) of the brain, and often metabolic imaging. When necessary, invasive diagnostic testing using intracranial monitoring can be used. The identification of an epileptic focus may allow resection or disconnection from normal brain structures, with the ultimate goal of complete seizure remission. Additional operative measures can decrease seizure frequency and/or intensity if a clear epileptic focus cannot be identified. In this review, we will discuss the nuances of presurgical evaluation and decision-making in the management of children with drug-resistant epilepsy (DRE). |
/**
* A CollectIterable is an iterable that transforms a source iterable on a condition as it iterates.
*/
@Immutable
public class CollectIterable<T, V>
extends AbstractLazyIterable<V>
{
private final Iterable<T> adapted;
private final Function<? super T, ? extends V> function;
public CollectIterable(Iterable<T> newAdapted, Function<? super T, ? extends V> function)
{
this.adapted = newAdapted;
this.function = function;
}
public void forEach(Procedure<? super V> procedure)
{
Iterate.forEach(this.adapted, Functions.bind(procedure, this.function));
}
public void forEachWithIndex(ObjectIntProcedure<? super V> objectIntProcedure)
{
Iterate.forEachWithIndex(this.adapted, Functions.bind(objectIntProcedure, this.function));
}
public <P> void forEachWith(Procedure2<? super V, ? super P> procedure, P parameter)
{
Iterate.forEachWith(this.adapted, Functions.bind(procedure, this.function), parameter);
}
public Iterator<V> iterator()
{
return new CollectIterator<T, V>(this.adapted, this.function);
}
@Override
public int size()
{
return Iterate.sizeOf(this.adapted);
}
@Override
public Object[] toArray()
{
Object[] array = Iterate.toArray(this.adapted);
for (int i = 0; i < array.length; i++)
{
array[i] = this.function.valueOf((T) array[i]);
}
return array;
}
} |
from app import db
from flask_wtf import FlaskForm
from wtforms import Form, TextField, SelectField, SubmitField, validators, ValidationError
from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for
)
from werkzeug.exceptions import abort
from auth import login_required, User
from flask_table import Table, Col, LinkCol, ButtonCol, DatetimeCol
from flask_babelex import gettext, ngettext, _
from flask_babelex import lazy_gettext as _l
from flask_user import current_user, login_required, roles_required
from sqlalchemy import or_
import enum
bp = Blueprint('books', __name__, url_prefix='/books')
book_categories=[('textbook', _l(u'Textbook')),
('grammar_and_vocabulary', _l(u'Grammar and Vocabulary')),
('literrature', _l(u'Literature')),
('text_commentaries', _l(u'Text Commentaries and Teaching Material')),
('didactic_pedagogy', _l(u'Didactics and Pedagogy')),
('magazine', _l(u'Magazine')),
('game', _l(u'Game'))
]
class Book(db.Model):
__tablename__ = "books"
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String, nullable = False )
publisher = db.Column(db.String, nullable = False)
author = db.Column(db.String, nullable = True)
isbn13 = db.Column(db.String(13), nullable = False)
# renter information's:
renter_name = db.Column(db.String, db.ForeignKey("users.username"), nullable =True)
rented_time = db.Column(db.DateTime, nullable = True)
renter = db.relationship("User", backref=db.backref("books", order_by = id), lazy = True)
category = db.Column(db.String, nullable = False)
class BookSearchForm(FlaskForm):
search = TextField('')
submit = SubmitField(_l(u'Search'))
def validate_isbn13(form, field):
if field.data:
if len(field.data) != 13:
raise ValidationError(_l(u'ISBN 13 must be 10 character long'))
if not field.data.isdigit():
raise ValidationError(_l(u'ISBN 13 must contain only numeric characters'))
class BookForm(FlaskForm):
title = TextField(_l('Title'),[validators.InputRequired()])
publisher = TextField(_l('Publisher'),[validators.InputRequired()])
author = TextField(_l('Author'))
isbn13 = TextField('ISBN 13',[validate_isbn13])
category = SelectField(_l('Category'), choices=book_categories)
submit = SubmitField(_l('Save'))
@bp.route('/add', methods=['GET','POST'])
@login_required
def new_book():
"""
Add a new book to the database
"""
form = BookForm()
if form.validate_on_submit():
# save the album
book = Book()
save_changes(book, form, new=True)
flash(_l(u'Book created successfully!'),'success')
return redirect('/')
return render_template('books/new.html', form=form)
def save_changes( book, form, new = False):
"""
Save the changes to a given book
"""
import datetime
book.title = form.title.data
book.publisher = form.publisher.data
book.author = form.author.data
book.isbn13 = form.isbn13.data
book.renter_name = ''
book.rented_time = None
book.category = form.category.data
if new:
# Add the book to the database
db.session.add(book)
db.session.commit()
class CategoryCol(Col):
def td_format(self,content):
values=dict(book_categories)
if values[content] is not None:
return values[content]
else:
return _l(u'Unknown Category')
class BookResults(Table):
classes = ['table']
id = Col('Id', show=False)
title = Col(_l(u'Title'))
publisher = Col(_l(u'Publisher'))
author = Col(_l(u'Author'))
isbn13 = Col('ISBN 13')
category = CategoryCol(_l(u'Category'))
renter_name = Col(_l(u'Renter'))
rented_time = DatetimeCol(_l(u'Rented time'))
rent = ButtonCol(_l(u'Rent / Give Back'), '.rent', url_kwargs=dict(id='id'))
@bp.route('/', methods=['GET', 'POST'])
@login_required
def index():
results = Book.query.all()
table = BookResults(results)#, no_items=_l(u'No books in the database'))
if current_user.has_roles('Admin'):
table.add_column('edit', LinkCol(_l(u'Edit'),'.edit',url_kwargs=dict(id='id')))
return render_template('books/index.html', table=table)
@bp.route('/search', methods=['GET', 'POST'])
@login_required
def search():
search = BookSearchForm()
if search.validate_on_submit():
return redirect(url_for('.search_results', search_string=search.search.data))
return render_template('books/search.html', form=search)
@bp.route('/results/<string:search_string>', methods=['GET', 'POST'])
@bp.route('/results/', methods=['GET', 'POST'])
@bp.route('/results', methods=['GET', 'POST'])
@login_required
def search_results(search_string=None):
results = []
search = BookSearchForm()
if search.validate_on_submit():
return redirect(url_for('.search_results', search_string=search.search.data))
if search_string:
search_string_list = search_string.split(' ')
qry=Book.query.filter(Book.title.contains(search_string))
for string in search_string_list:
subquery = Book.query.filter(or_(Book.title.contains(string),
Book.author.contains(string),
Book.publisher.contains(string),
Book.isbn13.contains(string),
Book.category.contains(string)))
qry = qry.union(subquery)
results = qry.all()
else:
results = Book.query.all()
if not results:
message = gettext(u'No results found!')
flash(message,'info')
return redirect(url_for('.search'))
else:
# display results
table = BookResults(results)
search.search.data = search_string
if current_user.has_roles('Admin'):
table.add_column('edit', LinkCol(_l(u'Edit'),'.edit',url_kwargs=dict(id='id')))
table.border = True
return render_template('books/results.html', table=table, form=search)
@bp.route('/item/<int:id>', methods=['GET', 'POST'])
@roles_required('Admin')
def edit(id):
qry = db.session.query(Book).filter(Book.id==id)
book = qry.first()
if book:
form = BookForm(formdata=request.form, obj=book)
if request.method == 'POST' and form.validate():
# Save modifications
save_changes(book, form)
flash(_l(u'Book updated successfully!'),'success')
return redirect('/')
return render_template('books/edit_book.html', form=form)
else:
flash(_l(u'ERROR Book #{id} doesn''t exist').format(id=id))
return redirect(url_for('.index'))
@bp.route('/rent_item/<int:id>', methods=['GET', 'POST'])
@login_required
def rent(id):
qry = db.session.query(Book).filter(Book.id==id)
book = qry.first()
if book:
import datetime
if book.renter_name == current_user.username:
book.renter_name = None
book.rented_time = None
db.session.commit()
elif book.renter_name is None:
qry = db.session.query(User).filter(User.id==current_user.id)
user = qry.first()
book.renter_name = user.username
book.rented_time = datetime.datetime.now()
db.session.commit()
else:
flash(_l(u'ERROR Book {title} is already rented by someone else.').format(title=book.title))
else:
flash(_l(u'ERROR Book #{id} doesn''t exist').format(id=id))
return redirect(url_for('.index')) |
package model
import (
"github.com/uplus-io/ugo/utils"
"strings"
)
func NewDescription(ns, tab int32) *Description {
return &Description{Namespace: ns, Table: tab}
}
func ParseRepository(ip string) *Repository {
bits := strings.Split(ip, ".")
center := utils.StringToInt32(bits[0])
area := utils.StringToInt32(bits[1])
rack := utils.StringToInt32(bits[2])
return &Repository{DataCenter: center, Area: area, Rack: rack}
}
|
def _max_shape(self):
return np.max([star.shape for star in self.all_stars],
axis=0) |
<reponame>0x20Man/Watcher3<filename>lib/hachoir/strip.py
"""
Binary striper: remove metadata, producer information, useless padding, etc.
from binary files.
Author: <NAME>
Creation: 17 september 2006
"""
from hachoir.field import MissingField
from hachoir.editor import createEditor
from hachoir.stream import FileOutputStream, StringOutputStream
from hachoir.core.tools import humanFilesize
from hachoir.core.cmd_line import displayVersion
from hachoir.parser import createParser
from optparse import OptionGroup, OptionParser
import sys
# File formats
from hachoir.parser.archive import TarFile
from hachoir.parser.audio import AuFile
from hachoir.parser.image import PngFile, JpegFile
from hachoir.parser.container import RiffFile
from hachoir.parser.audio import MpegAudioFile
# Strip what?
STRIP_USELESS = 0x01 # Useless padding, useless duplicate information, etc.
STRIP_METADATA = 0x02 # Timestamp, author, producter, comment, etc.
STRIP_INDEX = 0x04 # Index in video
class BasicStripper:
def __init__(self, editor, level, verbose=False):
self.editor = editor
self.level = level
self.verbose = verbose
self._is_dirty = False
self._removed_bytes = 0
def __call__(self):
self.stripped_bytes = self.strip()
if self.stripped_bytes:
self._is_dirty = True
return self._is_dirty
def info(self, message):
if self.verbose:
print(message)
def strip(self):
"""
Strip input editor. Returns number of remove bits.
"""
raise NotImplementedError()
def removeField(self, field, editor=None):
if not editor:
editor = self.editor
self.info("Remove field %s" % field.path)
size = field.size
del editor[field.name]
return size
def removeFields(self, fields, editor=None):
if not editor:
editor = self.editor
return sum(self.removeField(field, editor) for field in fields)
class CheckStripper(BasicStripper):
def checkField(self, field):
"""
Check if a field of the root have to be removed or not.
Returns boolean.
"""
raise NotImplementedError()
def strip(self):
fields = (field for field in self.editor if self.checkField(field))
return self.removeFields(fields)
class PngStripper(CheckStripper):
def checkField(self, field):
if self.level & STRIP_METADATA:
name = field.name
if name.startswith("text["):
return True
if name == "time":
return True
return False
class JpegStripper(CheckStripper):
def checkField(self, field):
if self.level & STRIP_METADATA:
if field.name.startswith("comment"):
return True
return field.name in ("photoshop", "exif", "adobe")
return False
class MpegAudioStripper(CheckStripper):
def checkField(self, field):
if self.level & STRIP_METADATA:
return field.name in ("id3v1", "id3v2")
return False
class AuStripper(BasicStripper):
def strip(self):
if self.level & STRIP_METADATA and "info" in self.editor:
size = self.editor["info"].size
self.editor["data_ofs"].value -= (size // 8)
return self.removeField(self.editor["info"])
else:
return 0
class RiffStripper(BasicStripper):
def stripSub(self, editor, names):
remove = []
total = 0
for field in editor:
if field.name in names:
remove.append(field)
continue
try:
tag = field["tag"].value
except MissingField:
continue
if self.level & STRIP_USELESS and tag in "JUNK":
remove.append(field)
continue
if tag == "LIST" and field.name != "movie":
# Strip a chunk list
size = self.stripSub(field, names)
if size:
# Update chunk list size
field["size"].value -= (size // 8)
total += size
total += self.removeFields(remove, editor)
return total
def strip(self):
names = []
if self.level & STRIP_USELESS:
names.append("nb_sample")
if self.level & STRIP_METADATA:
names.append("info")
if self.level & STRIP_INDEX:
if "/headers/avi_hdr/has_index" in self.editor:
self.editor["/headers/avi_hdr/has_index"].value = False
names.append("index")
size = self.stripSub(self.editor, names)
if size:
# Update file size field
self.editor["filesize"].value -= (size // 8)
return size
def usage():
print("Usage: %s filename" % sys.argv[0])
class TarStripper(BasicStripper):
def strip(self):
for file in self.editor.array("file"):
self.stripFile(file)
def fixChecksum(self, file):
file["check_sum"].value = " " * 8
stream = StringOutputStream()
file.writeInto(stream)
data = stream.readBytes(0, 512)
checksum = sum(ord(character) for character in data)
file["check_sum"].value = ("0%o\0" % checksum).ljust(8, " ")
def stripFile(self, file):
empty32 = "\0" * 32
uid = "0000000\0"
file["uid"].value = uid
file["gid"].value = uid
file["mtime"].value = "00000000000\0"
file["uname"].value = empty32
file["gname"].value = empty32
self.fixChecksum(file)
self._is_dirty = True
strippers = {
AuFile: AuStripper,
RiffFile: RiffStripper,
PngFile: PngStripper,
JpegFile: JpegStripper,
MpegAudioFile: MpegAudioStripper,
TarFile: TarStripper,
}
def stripEditor(editor, filename, level, verbose):
cls = editor.input.__class__
try:
stripper_cls = strippers[cls]
except KeyError:
print("Don't have stripper for file type: %s" % editor.description)
return False
stripper = stripper_cls(editor, level, verbose)
if stripper():
output = FileOutputStream(filename)
with output:
editor.writeInto(output)
size = stripper.stripped_bytes
if size:
percent = "%.1f%%" % (float(size) * 100 / editor.input.size)
if size % 8 and size < 128:
print("Remove %u.%u bytes (%s)"
% (size // 8, size % 8, percent))
else:
print("Remove %s (%s)" % (humanFilesize(size // 8), percent))
print("Save new file into %s" % filename)
else:
print("Stripper doesn't touch the file")
return True
def parseOptions():
parser = OptionParser(usage="%prog [options] filename")
common = OptionGroup(parser, "Hachoir strip")
common.add_option("--strip",
help="Data types to remove: "
"useless, metadata, index (default: all). "
"Use comma to specify two or more.",
type="str", action="store",
default="metadata, useless, index")
common.add_option("--quiet", help="Be quiet",
action="store_true", default=False)
common.add_option("--version", help="Display version and exit",
action="callback", callback=displayVersion)
parser.add_option_group(common)
values, arguments = parser.parse_args()
if not arguments:
parser.print_help()
sys.exit(1)
return values, arguments
def main():
# Parse arguments and read filenames
values, filenames = parseOptions()
level = 0
for item in values.strip.split(","):
item = item.strip()
if item == "useless":
level |= STRIP_USELESS
elif item == "metadata":
level |= STRIP_METADATA
elif item == "index":
level |= STRIP_INDEX
if not level:
print("Nothing to do, exit")
sys.exit(0)
ok = True
for filename in filenames:
print("[+]", ("Process file %s" % filename))
parser = createParser(filename)
if parser:
editor = createEditor(parser)
ok &= stripEditor(editor, filename + ".new",
level, not(values.quiet))
else:
ok = False
if ok:
sys.exit(0)
else:
sys.exit(1)
|
<reponame>galwiner/entropy
from plotly import express as px
from plotly.io import to_json
from entropylab.results_backend.sqlalchemy.model import FigureTable
class TestFigureTable:
def test_to_record(self):
target = FigureTable()
figure = px.line(x=["a", "b", "c"], y=[1, 3, 2], title="sample figure")
target.figure = to_json(figure)
actual = target.to_record()
assert actual.figure.data[0]["x"] == ("a", "b", "c")
assert actual.figure.data[0]["y"] == (1, 3, 2)
def test_from_model(self):
target = FigureTable()
figure = px.line(x=["a", "b", "c"], y=[1, 3, 2], title="sample figure")
actual = target.from_model(1, figure)
assert actual.experiment_id == 1
assert actual.figure == to_json(figure)
assert actual.time is not None
|
//
// UIView+EmptyView.h
// YiBiFen
//
// Created by caohuihui on 15/12/11.
// Copyright © 2015年 hhly. All rights reserved.
//
#import <UIKit/UIKit.h>
@class LYBlankPageView;
@interface UIView (EmptyView)
/**
* 提示框
* @param hasData 是否有数据?
* @param title 没有数据时候的标题
* @param hasError 是否有网络错误
* @param block 网络错误时候的block 刷新回调
*/
- (void)configWithHasData:(BOOL)hasData noDataTitle:(NSString *)title hasError:(BOOL)hasError reloadBlock:(void(^)(id sender))block;
/**
无数据显示
@param hasData 是否有数据?
@param image 没有数据时候的图片
@param title 没有数据时候的标题
@param hasError 是否有网络错误
@param block 网络错误时候的block 刷新回调
*/
- (void)configWithHasData:(BOOL)hasData noDataImage:(UIImage *)image noDataTitle:(NSString *)title hasError:(BOOL)hasError reloadBlock:(void (^)(id sender))block;
@property (strong, nonatomic) LYBlankPageView *blankPageView;
@end
@interface LYBlankPageView : UIView
// 图片
@property (strong,nonatomic)UIImageView *logoView;
// 标题
@property (nonatomic,strong)UILabel *tipLabel;
// 按钮
@property (nonatomic,strong)UIButton *reloadButton;
@property (nonatomic,copy) void (^reloadButtonBlock)(id sender);
@end
|
// Mouse/Key navigation/manipulation
// Convert screen space to normalised screen space
inline pr::v2 ToNormSS(pr::v2 const& pt_ss, pr::iv2 const& view_size)
{
auto view = pr::IRect(pr::iv2Zero, view_size);
return pr::NormalisePoint(view, pt_ss, 1.0f, -1.0f);
} |
/**
* Calculates the distance between synsets.
*
* @param synset1 Synset 1
* @param synset2 Synset 2
* @return The distance
*/
public double distance(Synset synset1, Synset synset2) {
Validation.notNull(synset1);
Validation.notNull(synset2);
if(synset1.equals(synset2)) {
return 0d;
}
List<Synset> path = shortestPath(synset1, synset2);
return path.isEmpty()
? Double.POSITIVE_INFINITY
: path.size() - 1;
} |
import {Char} from "../Char";
import {Dispatch} from "react";
import {IState} from "./IState";
export class CharSelectedList implements IState<Char> {
set: Dispatch<Char[]>;
value: Char[];
constructor(hook: [Char[], Dispatch<Char[]>]) {
this.value = hook[0];
this.set = hook[1];
}
}
|
// run-pass
#![allow(stable_features)]
// ignore-windows - this is a unix-specific test
// ignore-emscripten no processes
// ignore-sgx no processes
#![feature(process_exec, rustc_private)]
extern crate libc;
use std::env;
use std::io::Error;
use std::os::unix::process::CommandExt;
use std::process::Command;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
fn main() {
if let Some(arg) = env::args().nth(1) {
match &arg[..] {
"test1" => println!("hello2"),
"test2" => assert_eq!(env::var("FOO").unwrap(), "BAR"),
"test3" => assert_eq!(env::current_dir().unwrap().to_str().unwrap(), "/"),
"empty" => {}
_ => panic!("unknown argument: {}", arg),
}
return;
}
let me = env::current_exe().unwrap();
let output = unsafe {
Command::new(&me)
.arg("test1")
.pre_exec(|| {
println!("hello");
Ok(())
})
.output()
.unwrap()
};
assert!(output.status.success());
assert!(output.stderr.is_empty());
assert_eq!(output.stdout, b"hello\nhello2\n");
let output = unsafe {
Command::new(&me)
.arg("test3")
.pre_exec(|| {
env::set_current_dir("/").unwrap();
Ok(())
})
.output()
.unwrap()
};
assert!(output.status.success());
assert!(output.stderr.is_empty());
assert!(output.stdout.is_empty());
let output = unsafe {
Command::new(&me)
.arg("bad")
.pre_exec(|| Err(Error::from_raw_os_error(102)))
.output()
.unwrap_err()
};
assert_eq!(output.raw_os_error(), Some(102));
let pid = unsafe { libc::getpid() };
assert!(pid >= 0);
let output = unsafe {
Command::new(&me)
.arg("empty")
.pre_exec(move || {
let child = libc::getpid();
assert!(child >= 0);
assert!(pid != child);
Ok(())
})
.output()
.unwrap()
};
assert!(output.status.success());
assert!(output.stderr.is_empty());
assert!(output.stdout.is_empty());
let mem = Arc::new(AtomicUsize::new(0));
let mem2 = mem.clone();
let output = unsafe {
Command::new(&me)
.arg("empty")
.pre_exec(move || {
assert_eq!(mem2.fetch_add(1, Ordering::SeqCst), 0);
Ok(())
})
.output()
.unwrap()
};
assert!(output.status.success());
assert!(output.stderr.is_empty());
assert!(output.stdout.is_empty());
assert_eq!(mem.load(Ordering::SeqCst), 0);
}
|
<filename>dontneedit.py<gh_stars>0
print("Don't know why you say goodbye i say hello")
|
/** Identify a common fraction that can be factored from all the Terms in this
* Expression and remove it. This includes variables raised to negative powers.
*
* @return a result that includes a new Expression and the Divisor used to create it.
*/
public DivisionResult eliminateFractions() {
DivisionResult r = new DivisionResult();
if(terms.length==0) {
r.divisor = new Term(1);
r.quotient = this;
return r;
}
Map<String, Fraction> variables = new HashMap<String, Fraction>();
Fraction commonDenominator = null;
for(int a = 0; a<terms.length; a++) {
Term t = terms[a];
commonDenominator = commonDenominator==null ?
terms[a].coefficient.isolateDenominator() :
commonDenominator.isolateLCD( terms[a].coefficient.isolateDenominator() );
Iterator<String> iter = t.getVariables().iterator();
while(iter.hasNext()) {
String variable = iter.next();
Fraction power = t.getDegree(variable);
if(power.compareTo(0)<0) {
Fraction current = variables.get(variable);
if(current==null || power.negate().compareTo(current)>0) {
variables.put(variable, power.negate());
}
}
}
}
Term divisor = new Term( commonDenominator.invert(), variables);
r.divisor = divisor;
r.quotient = multiply(divisor);
return r;
} |
/**
* True,when at least one Report is active.
*
* @return true, if is active
*/
@Transient
public boolean isActive(){
for(Report report : getReports()){
if(report.isActive()) return true;
}
return false;
} |
import { WAFV2Client, CheckCapacityCommand, CheckCapacityCommandInput, DescribeManagedRuleGroupCommand, DescribeManagedRuleGroupCommandInput } from "@aws-sdk/client-wafv2";
import * as quota from "@aws-sdk/client-service-quotas";
import * as cloudformation from "@aws-sdk/client-cloudformation";
import { FMSClient, ListPoliciesCommand, ListPoliciesCommandInput } from "@aws-sdk/client-fms";
import { Rule } from "../types/fms";
import * as lodash from "lodash";
import { RuntimeProperties } from "../types/runtimeprops";
import { Config } from "../types/config";
/**
* Service Quota Code for Firewall Manager Total WAF WCU in account & region
*/
const WCU_QUOTA_CODE = "L-D86ED2F3";
/**
* Service Quota Code for Firewall Manager policies per organization per Region
*/
const POLICY_QUOTA_CODE = "L-0B28E140";
/**
* Get the current count of security policies in the deployment account and region
* @param deploymentRegion
* @returns A promise with the current policy count
*/
async function getPolicyCount(deploymentRegion: string): Promise<number> {
const client = new FMSClient({ region: deploymentRegion });
const input: ListPoliciesCommandInput = {
};
const command = new ListPoliciesCommand(input);
const response = await client.send(command);
return response.PolicyList?.length || 0;
}
/**
*
* @param deploymentRegion AWS region, e.g. eu-central-1
* @param scope whether scope is REGIONAL or CLOUDFRONT
* @param rules rules for which you want to calculate the capacity
* @returns the total capacity of the supplied rules
*/
async function getTotalCapacityOfRules(deploymentRegion: string, scope: "REGIONAL" | "CLOUDFRONT", rules: Rule[]): Promise<number> {
const client = new WAFV2Client({ region: deploymentRegion });
const input: CheckCapacityCommandInput = {
Scope: scope,
Rules: convertPropValuesToUint8Array(rules, "SearchString")
};
const command = new CheckCapacityCommand(input);
const response : any = await client.send(command);
return response.Capacity || 0;
}
/**
*
* @param deploymentRegion AWS region, e.g. eu-central-1
* @param quotaCode AWS Quota Code for the FMS Service Quota
* @returns returns the specified quota of the FMS Service
*/
async function getFmsQuota(deploymentRegion: string, quotaCode: string): Promise<number>{
let current_quota = 0;
const quoata_client = new quota.ServiceQuotasClient({ region: deploymentRegion });
const input: quota.GetAWSDefaultServiceQuotaCommandInput = {
QuotaCode: quotaCode,
ServiceCode: "fms"
};
const command = new quota.GetAWSDefaultServiceQuotaCommand(input);
const responsequoata = await quoata_client.send(command);
if(responsequoata.Quota?.Adjustable === true){
const input: quota.ListRequestedServiceQuotaChangeHistoryByQuotaCommandInput = {
QuotaCode: quotaCode,
ServiceCode: "fms"
};
const command = new quota.ListRequestedServiceQuotaChangeHistoryByQuotaCommand(input);
const newquota = await quoata_client.send(command);
if(newquota.RequestedQuotas !== []){
if(newquota.RequestedQuotas?.length || 0 === 0){
const sortquota = lodash.sortBy(newquota.RequestedQuotas,["Created"]);
if(sortquota?.length === 1){
if(sortquota?.[0].Status !== "APPROVED"){
console.log("ℹ️ There is an open Quota request for " + quotaCode + " but it is still not approved using DEFAULT Quota.");
current_quota = responsequoata.Quota?.Value || 0;
return current_quota;
}
if(sortquota?.[0].Status === "APPROVED"){
current_quota = sortquota?.[0].DesiredValue || 0;
return current_quota;
}
}
}
else{
current_quota = responsequoata.Quota?.Value || 0;
return current_quota;
}
}
else{
current_quota = responsequoata.Quota?.Value || 0;
return current_quota;
}
}
current_quota = responsequoata.Quota?.Value || 0;
return current_quota;
}
/**
*
* @param deploymentRegion AWS region, e.g. eu-central-1
* @param vendor vendor of the Managed Rule Group
* @param rgName vame of the Managed Rule Group
* @param scope whether scope is REGIONAL or CLOUDFRONT
* @param version version of the Managed Rule Group
* @returns returns the capacity of the Managed Rule Group
*/
async function getManagedRuleCapacity(deploymentRegion: string, vendor: string, rgName: string, scope: string, version: string): Promise<number>{
const client = new WAFV2Client({ region: deploymentRegion });
if(version === ""){
const input: DescribeManagedRuleGroupCommandInput = {
VendorName: vendor,
Name: rgName,
Scope: scope
};
const command = new DescribeManagedRuleGroupCommand(input);
const response: any = await client.send(command);
return response.Capacity || 0;
}
else{
const input: DescribeManagedRuleGroupCommandInput = {
VendorName: vendor,
Name: rgName,
Scope: scope,
VersionName: version
};
const command = new DescribeManagedRuleGroupCommand(input);
const response : any = await client.send(command);
return response.Capacity || 0;
}
}
/**
* Writes outputs from an existing stack into the specified runtime props
* @param deploymentRegion AWS region, e.g. eu-central-1
* @param runtimeprops runtime properties, where to write stack outputs into
* @param config the config object from the values json
*/
export async function setOutputsFromStack(deploymentRegion: string, runtimeprops: RuntimeProperties, config: Config): Promise<void>{
const StackName =
config.General.Prefix.toUpperCase() +
"-WAF-" +
config.WebAcl.Name.toUpperCase() +
"-" +
config.General.Stage.toUpperCase() +
"-" +
config.General.DeployHash.toUpperCase();
const cloudformation_client = new cloudformation.CloudFormationClient({ region: deploymentRegion });
const params ={
StackName
};
const command = new cloudformation.DescribeStacksCommand(params);
const responsestack = await cloudformation_client.send(command);
if(responsestack.Stacks?.[0].StackName && responsestack.Stacks?.[0].Outputs !== undefined){
for(const output of responsestack.Stacks?.[0].Outputs){
if(output.OutputKey === "DeployedRuleGroupNames")
{
runtimeprops.PreProcess.DeployedRuleGroupNames = output.OutputValue?.split(",",output.OutputValue?.length) || [];
}
else if(output.OutputKey === "DeployedRuleGroupIdentifier")
{
runtimeprops.PreProcess.DeployedRuleGroupIdentifier = output.OutputValue?.split(",",output.OutputValue?.length) || [];
}
else if(output.OutputKey === "DeployedRuleGroupCapacities")
{
const arrayOfNumbers = output.OutputValue?.split(",",output.OutputValue?.length).map(Number) || [];
runtimeprops.PreProcess.DeployedRuleGroupCapacities = arrayOfNumbers;
}
if(output.OutputKey === "PreProcessDeployedRuleGroupNames")
{
runtimeprops.PreProcess.DeployedRuleGroupNames = output.OutputValue?.split(",",output.OutputValue?.length) || [];
}
else if(output.OutputKey === "PreProcessDeployedRuleGroupIdentifier")
{
runtimeprops.PreProcess.DeployedRuleGroupIdentifier = output.OutputValue?.split(",",output.OutputValue?.length) || [];
}
else if(output.OutputKey === "PreProcessDeployedRuleGroupCapacities")
{
const arrayOfNumbers = output.OutputValue?.split(",",output.OutputValue?.length).map(Number) || [];
runtimeprops.PreProcess.DeployedRuleGroupCapacities = arrayOfNumbers;
}
if(output.OutputKey === "PostProcessDeployedRuleGroupNames")
{
runtimeprops.PostProcess.DeployedRuleGroupNames = output.OutputValue?.split(",",output.OutputValue?.length) || [];
}
else if(output.OutputKey === "PostProcessDeployedRuleGroupIdentifier")
{
runtimeprops.PostProcess.DeployedRuleGroupIdentifier = output.OutputValue?.split(",",output.OutputValue?.length) || [];
}
else if(output.OutputKey === "PostProcessDeployedRuleGroupCapacities")
{
const arrayOfNumbers = output.OutputValue?.split(",",output.OutputValue?.length).map(Number) || [];
runtimeprops.PostProcess.DeployedRuleGroupCapacities = arrayOfNumbers;
}
}
}
}
/**
* calculate the capacities for managed and custom rules and apply them to runtime properties
* @param config configuration object of the values.json
* @param deploymentRegion AWS region, e.g. eu-central-1
* @param runtimeProperties runtime properties object, where to store capacities
*/
async function calculateCapacities(
config: Config,
deploymentRegion: string,
runtimeProperties: RuntimeProperties
): Promise<void> {
let count = 0;
if (!config.WebAcl.PreProcess.CustomRules) {
console.log(
"\n ⏭ Skip Rule Capacity Calculation for PreProcess Custom Rules."
);
} else {
while (count < config.WebAcl.PreProcess.CustomRules.length) {
if ("Captcha" in config.WebAcl.PreProcess.CustomRules[count].Action) {
const rules : Rule[] = [];
const { CloudWatchMetricsEnabled, SampledRequestsEnabled } =
config.WebAcl.PreProcess.CustomRules[count].VisibilityConfig;
const rule: Rule = {
Statement: config.WebAcl.PreProcess.CustomRules[count].Statement,
Name: "Rule",
Action: config.WebAcl.PreProcess.CustomRules[count].Action,
CaptchaConfig:
config.WebAcl.PreProcess.CustomRules[count].CaptchaConfig,
VisibilityConfig: {
CloudWatchMetricsEnabled,
SampledRequestsEnabled,
MetricName: "Metric" + Math.random().toString(),
},
};
rules.push(rule);
const capacity = await getTotalCapacityOfRules(
deploymentRegion,
config.WebAcl.Scope,
rules
);
runtimeProperties.PreProcess.RuleCapacities.push(capacity);
} else {
const rule_calculated_capacity_json = [];
const { CloudWatchMetricsEnabled, SampledRequestsEnabled } =
config.WebAcl.PreProcess.CustomRules[count].VisibilityConfig;
const temp_template: Rule = {
Statement: config.WebAcl.PreProcess.CustomRules[count].Statement,
Name: "Rule",
Action: config.WebAcl.PreProcess.CustomRules[count].Action,
VisibilityConfig: {
CloudWatchMetricsEnabled,
SampledRequestsEnabled,
MetricName: "Metric" + Math.random().toString(),
},
};
rule_calculated_capacity_json.push(temp_template);
const capacity = await getTotalCapacityOfRules(
deploymentRegion,
config.WebAcl.Scope,
rule_calculated_capacity_json
);
runtimeProperties.PreProcess.RuleCapacities.push(capacity);
}
count++;
}
runtimeProperties.PreProcess.Capacity = runtimeProperties.PreProcess.RuleCapacities.reduce(
function (a, b) {
return a + b;
},
0
);
}
count = 0;
let PostProcessCapacity = 0;
if (!config.WebAcl.PostProcess.CustomRules) {
console.log(
"\n ⏭ Skip Rule Capacity Calculation for PostProcess Custom Rules."
);
} else {
while (count < config.WebAcl.PostProcess.CustomRules.length) {
const rule_calculated_capacity_json = [];
const { CloudWatchMetricsEnabled, SampledRequestsEnabled } =
config.WebAcl.PostProcess.CustomRules[count].VisibilityConfig;
const rule: Rule = {
Statement: config.WebAcl.PostProcess.CustomRules[count].Statement,
Name: "Rule",
Action: config.WebAcl.PostProcess.CustomRules[count].Action,
VisibilityConfig: {
CloudWatchMetricsEnabled,
SampledRequestsEnabled,
MetricName: "Metric" + Math.random().toString(),
},
};
if ("Captcha" in config.WebAcl.PostProcess.CustomRules[count].Action) {
rule.CaptchaConfig =
config.WebAcl.PostProcess.CustomRules[count].CaptchaConfig;
}
if (config.WebAcl.PostProcess.CustomRules[count].RuleLabels) {
rule.RuleLabels =
config.WebAcl.PostProcess.CustomRules[count].RuleLabels;
}
rule_calculated_capacity_json.push(rule);
const capacity = await getTotalCapacityOfRules(
deploymentRegion,
config.WebAcl.Scope,
rule_calculated_capacity_json
);
runtimeProperties.PostProcess.RuleCapacities.push(capacity);
count++;
}
PostProcessCapacity = runtimeProperties.PostProcess.RuleCapacities.reduce(
function (a, b) {
return a + b;
},
0
);
}
console.log("\n👀 Get ManagedRule Capacity:\n");
if (!config.WebAcl.PreProcess.ManagedRuleGroups) {
console.log("\n ℹ️ No ManagedRuleGroups defined in PreProcess.");
} else {
console.log(" 🥇 PreProcess: ");
for (const managedrule of config.WebAcl.PreProcess.ManagedRuleGroups) {
const capacity = await getManagedRuleCapacity(
deploymentRegion,
managedrule.Vendor,
managedrule.Name,
config.WebAcl.Scope,
managedrule.Version
);
managedrule.Capacity = capacity;
console.log(
" ➕ Capacity for " +
managedrule.Name +
" is [" +
managedrule.Capacity +
"]"
);
runtimeProperties.ManagedRuleCapacity += capacity;
}
}
if (!config.WebAcl.PostProcess.ManagedRuleGroups) {
console.log("\n ℹ️ No ManagedRuleGroups defined in PostProcess.");
} else {
console.log("\n 🥈 PostProcess: ");
for (const managedrule of config.WebAcl.PostProcess.ManagedRuleGroups) {
const capacity = await getManagedRuleCapacity(
deploymentRegion,
managedrule.Vendor,
managedrule.Name,
config.WebAcl.Scope,
managedrule.Version
);
managedrule.Capacity = capacity;
console.log(
" ➕ Capacity for " +
managedrule.Name +
" is [" +
managedrule.Capacity +
"]"
);
runtimeProperties.ManagedRuleCapacity += capacity;
}
}
runtimeProperties.PostProcess.Capacity = PostProcessCapacity;
}
/**
* The functiion calculates the current security policy count in the account & region and checks if exceeds the current quota
* @param deploymentRegion AWS region, e.g. eu-central-1
* @returns whether policy limit is reached
*/
export async function isPolicyQuotaReached(deploymentRegion: string): Promise<boolean> {
const policyCount = await getPolicyCount(deploymentRegion);
const fmsPolicyQuota = await getFmsQuota(deploymentRegion, POLICY_QUOTA_CODE);
const policyLimitReached = fmsPolicyQuota <= policyCount;
if (policyLimitReached) {
console.log(
"\n🚨 You are about to exceed the limit for Policies per region.\n Region Quota: " +
fmsPolicyQuota +
"\n Deployed Policies: " +
policyCount +
"\n ﹗ Stopping deployment ﹗"
);
}
return policyLimitReached;
}
/**
* The function checks if the total WCU of all configured rules exceeds the WCU quota in account & region
* @param deploymentRegion AWS region, e.g. eu-central-1
* @param runtimeProps runtime properties object, where to store capacities
* @param config configuration object of the values.json
* @returns whether WCU limit is reached
*/
export async function isWcuQuotaReached(deploymentRegion: string, runtimeProps: RuntimeProperties, config: Config): Promise<boolean> {
await calculateCapacities(config, deploymentRegion, runtimeProps);
const custom_capacity = runtimeProps.PreProcess.Capacity + runtimeProps.PostProcess.Capacity;
const total_wcu = runtimeProps.PreProcess.Capacity + runtimeProps.PostProcess.Capacity + runtimeProps.ManagedRuleCapacity;
const quote_wcu = await getFmsQuota(deploymentRegion, WCU_QUOTA_CODE);
const wcuLimitReached = (total_wcu > Number(quote_wcu));
if (wcuLimitReached) {
console.log("\n🔎 Capacity Check result: 🔴 \n ﹗ Stopping deployment ﹗\n");
console.log(" 💡 Account WAF-WCU Quota: " +Number(quote_wcu).toString());
console.log(" 🧮 Calculated Custom Rule Capacity is: [" + custom_capacity + "] \n ➕ ManagedRulesCapacity: ["+ runtimeProps.ManagedRuleCapacity +"] \n = Total Waf Capacity: " + total_wcu.toString() + "\n");
}
else {
console.log("\n🔎 Capacity Check result: 🟢 \n");
console.log(" 💡 Account WAF-WCU Quota: " +Number(quote_wcu).toString());
console.log(" 🧮 Calculated Custom Rule Capacity is: [" + custom_capacity + "] (🥇[" + runtimeProps.PreProcess.Capacity + "] + 🥈[" + runtimeProps.PostProcess.Capacity + "]) \n ➕ ManagedRulesCapacity: ["+ runtimeProps.ManagedRuleCapacity +"] \n = Total Waf Capacity: " + total_wcu.toString() + "\n");
}
return wcuLimitReached;
}
/**
* initialize a runtime properties object
* @returns the runtime properties object
*/
export function initRuntimeProperties() : RuntimeProperties {
return {
ManagedRuleCapacity: 0,
PostProcess: {
Capacity: 0,
DeployedRuleGroupCapacities: [],
DeployedRuleGroupIdentifier: [],
DeployedRuleGroupNames: [],
RuleCapacities: []
},
PreProcess: {
Capacity: 0,
DeployedRuleGroupCapacities: [],
DeployedRuleGroupIdentifier: [],
DeployedRuleGroupNames: [],
RuleCapacities: []
},
};
}
/**
* The function converts the value of all properties with supplied name into a Uint8Array
* @param rulesObject Rules Object or Array of Rules Object
* @param propertyName name of the properties which have to be converted
* @returns converted Rules
*/
function convertPropValuesToUint8Array(rulesObject: any, propertyName: string): any {
const convertedObject: any = {};
let value: any;
if (rulesObject instanceof Array) {
return rulesObject.map(function (value) {
if (typeof value === "object") {
value = convertPropValuesToUint8Array(value, propertyName);
}
return value;
});
} else {
for (const origKey in rulesObject) {
if (Object.prototype.hasOwnProperty.call(rulesObject,origKey)) {
value = rulesObject[origKey];
if (value instanceof Array || (value !== null && value.constructor === Object)) {
value = convertPropValuesToUint8Array(value, propertyName);
}
if (origKey === propertyName) {
value = convertStringToUint8Array(rulesObject[origKey]);
}
convertedObject[origKey] = value;
}
}
}
return convertedObject;
}
/**
* The function returns Uint8 representation of a string
* @param stringToConvert string which has to be converted to Uint8Array
* @returns the desired Uint8Array representation of the string
*/
function convertStringToUint8Array(stringToConvert: string): Uint8Array {
const buf = new ArrayBuffer(stringToConvert.length * 2); // 2 bytes for each char
const bufView = new Uint8Array(buf);
for (let i = 0, strLen = stringToConvert.length; i < strLen; i++) {
bufView[i] = stringToConvert.charCodeAt(i);
}
return bufView;
}
/**
* Function to transform property names into camel case like AWS needs it
* @param o object which property names has to be transformed to camel case
* @returns the object with the transformed property names in camel case
*/
export function toAwsCamel(o: any): any {
let newO: any, origKey: any, newKey: any, value: any;
if (o instanceof Array) {
return o.map(function(value) {
if (typeof value === "object") {
value = toAwsCamel(value);
}
if(value === "aRN"){
value = "arn";
}
if(value === "iPSetReferenceStatement"){
value = "ipSetReferenceStatement";
}
return value;
});
} else {
newO = {};
for (origKey in o) {
if (Object.prototype.hasOwnProperty.call(o, origKey)) {
newKey = (origKey.charAt(0).toLowerCase() + origKey.slice(1) || origKey).toString();
if(newKey === "aRN"){
newKey = "arn";
}
if(newKey === "iPSetReferenceStatement"){
newKey = "ipSetReferenceStatement";
}
value = o[origKey];
if (value instanceof Array || (value !== null && value.constructor === Object)) {
value = toAwsCamel(value);
if(value === "aRN"){
value = "arn";
}
}
newO[newKey] = value;
}
}
}
return newO;
} |
<gh_stars>1-10
package org.monora.coolsocket.core.response;
import org.jetbrains.annotations.Nullable;
import org.monora.coolsocket.core.protocol.ProtocolException;
/**
* Thrown when an unsupported feature is requested, indicating it shouldn't have been requested and that it is an
* error.
*/
public class UnsupportedFeatureException extends ProtocolException
{
/**
* Create a new instance
*
* @param message To describe the issue.
*/
public UnsupportedFeatureException(@Nullable String message)
{
super(message);
}
}
|
import Layout from '../containers/Layout';
import { Country } from '../lib/types';
import client from '../apollo/client';
import { gql } from '@apollo/client';
import Header from '../components/Header/Header';
import HomeContent from '../components/HomeContent/HomeContent';
import Map from '../components/Map/Map';
export interface CountriesProps {
countries: Country[];
}
export default function Home({
countries,
}: CountriesProps): React.ReactElement {
return (
<Layout title="Home">
<Header />
<HomeContent />
<Map countries={countries} />
</Layout>
);
}
export async function getStaticProps() {
const { data } = await client.query({
query: gql`
query GetAllCountries {
countries {
id: _id
name
lat
long
alpha2
books {
cover
}
}
}
`,
});
return {
props: {
countries: data.countries,
},
};
}
|
"Chihuahuas" redirects here. For the baseball team, see El Paso Chihuahuas
Dog breed
The Chihuahua () (Spanish: chihuahueño)[1] is the smallest breed of dog and is named after the state of Chihuahua in Mexico. Chihuahuas come in a wide variety of colors, and two coat lengths.
History [ edit ]
A Techichi, likely the ancestor of Chihuahuas
The Chihuahua's history is convoluted, and many theories surround the origin of the breed. Both folklore and archaeological finds show that the breed has origins in Mexico. The most common theory is that Chihuahua are descended from the Techichi, a companion dog favored by the Toltec civilization in Mexico.[2] No records of the Techichi are available before the 9th century, although dog pots from Colima, Mexico, buried as part of the western Mexico shaft tomb tradition, which date back to 300 BC, are thought to depict Techichis.[3] The earlier ancestors probably were present before the Mayas as dogs approximating the Chihuahua are found in materials from the Great Pyramid of Cholula, antedating 1530 and in the ruins of Chichen Itza on the Yucatán Peninsula.[2] However, genetic evidence reveals very little pre-European genetics in modern chihuahuas (<2%), suggesting that interbreeding with European dogs has left little of the original American lineage (while possibly retaining a similar form).[4]
Wheeled dog toys in Mesoamerica range from Mexico to El Salvador. The earliest of these were found at Tres Zapotes in Veracruz, Mexico, which date to 100 AD,[5] indirect evidence that a Chihuahua-like breed was in Mexico over 1400 years before the first Europeans arrived.[5]
Dog effigy pots dating to around 1325 AD discovered in Georgia and Tennessee also appear to represent the Chihuahua.[6] In 1850, a pot featuring the Chihuahua-like dogs was unearthed in old ruins at Casas Grandes in the Mexican state of Chihuahua, which dates from 1100–1300 AD showing the long history of such dogs at this site,[5] although most artifacts relating to its existence are found around Mexico City. It has been argued that these pots arrived with survivors from the Casas Grandes site in Chihuahua, Mexico, after it was attacked and destroyed around 1340 AD.
In a 1520 letter, Hernan Cortés wrote that the Aztecs raised and sold the little dogs as food.[7] Colonial records refer to small, nearly hairless dogs at the beginning of the 19th century, one of which claims 16th-century Conquistadores found them plentiful in the region later known as Chihuahua.[8] Small dogs such as Chihuahuas were also used as living hot-water bottles during illness or injury. Some believe this practice is where the idea of pain being transferred to animals from humans originated, which gave way to rituals such as burning the deceased with live dogs, such as the Techichi, to exonerate the deceased human's sins.[9] Chihuahuas as we know them today remained a rarity until the early 20th century; the American Kennel Club (AKC) did not register a Chihuahua until 1904.[10]
Description [ edit ]
A longhair apple head Chihuahua
A shorthair deer head Chihuahua
Chihuahuas are the smallest breed recognized by some kennel clubs.[11]
There are two varieties of Chihuahua recognized by the AKC– the Smooth Coat (shorthaired) and the Long Coat (longhaired). Both the Smooth and the Long Coats have their special attractions and are equally easy to keep clean and well groomed.[12] The UK Kennel Club considers the two as distinct breeds; matings between the two are not eligible for KC registration.
Dogs of either coat type may be identified as either "apple head" or "deer head" Chihuahuas, particularly in the United States. Apple heads have rounded heads, close-set eyes, and relatively short ears and legs. Deer heads have flat-topped heads, more widely set eyes, larger ears, and longer, more slender legs. While deer heads were popular in the mid-20th century, current breed standards defined by registries such as the AKC specify the apple-head conformation.[13]
Appearance [ edit ]
Breed standards for this dog do not generally specify a height; only a weight and a description of their overall proportions. Generally, the height ranges between 6 and 9 in (15 and 23 cm);[11] however, some dogs grow as tall as 30 to 38 cm (12 to 15 in).[14] Both British and American breed standards state that a Chihuahua must not weigh more than 5.9 lb (2.7 kg) for conformation.[11] However, the British standard also states that a weight of 4–6 lb (1.8–2.7 kg) is preferred. A clause stating. "if two dogs are equally good in type, the more diminutive one is preferred" was removed in 2009.[15] The Fédération Cynologique Internationale standard calls for dogs ideally between 1.5 and 3.0 kg (3.3 and 6.6 lb), although smaller ones are acceptable in the show ring.[16]
Pet Chihuahuas (that is, those bred or purchased as companions rather than show dogs) often range above these weights, even above 10 lb if they have large bone structures or are allowed to become overweight.[11] This does not mean that they are not purebred Chihuahuas; they just do not meet the requirements to enter a conformation show. Oversized Chihuahuas are seen in some of the best, and worst, bloodlines. Chihuahuas do not breed true for size, and puppies from the same litter can mature in drastically different sizes from one another. Also, larger breeding females are less likely to experience dystocia (obstructed labor). Typically, the breed standard for both the Long and Short Coat Chihuahuas will be identical except for the description of the coat.[17] Chihuahuas have large, round eyes and large, erect ears, set in a high, dramatically rounded skull.[11]
The Kennel Club in the United Kingdom and the AKC in the United States recognize only two varieties of Chihuahua: the long coat, and the smooth coat, also referred to as longhaired and shorthaired.[18] The term smooth coat does not mean that the hair is necessarily smooth, as the hair can range from having a velvet touch to a whiskery feeling. Longhaired Chihuahuas are actually smoother to the touch, having soft, fine guard hairs and a downy undercoat, which gives them their fluffy appearance. Unlike many longhaired breeds, longhaired Chihuahuas require no trimming and minimal grooming. Contrary to popular belief, the longhaired breed also typically sheds less than its shorthaired counterparts. Up to three or more years may be needed before a full longhaired coat develops.
Chihuahuas occur in virtually any color combination, from solid to marked or splashed,[18] allowing for colors from solid black to solid white, spotted, sabled, or a variety of other colors and patterns. Colors and patterns can combine and affect each other, resulting in a very high degree of variation. Common colors are fawn, red, cream, chocolate, brown, mixed, white, and black. No color or pattern is considered more valuable than another.
The merle coat pattern, which appears mottled, is not traditionally considered part of the breed standard. In May 2007, The Kennel Club decided not to register puppies with this coloration due to the health risks associated with the responsible gene, and in December of that year, formally amended its breed standard to disqualify merle dogs.[19] The Fédération Cynologique Internationale, which represents the major kennel clubs of 84 countries, also disqualified merle.[16] Other countries' kennel clubs, including Canada, Australia, New Zealand, and Germany, have also disqualified merle. However, in May 2008, the Chihuahua Club of America voted that merles would not be disqualified in the United States, and would be fully registrable and able to compete in AKC events. Opponents of merle recognition suspect the coloration came about by modern crossbreeding with other dogs and not by natural genetic drift.[20][citation needed]
Temperament [ edit ]
How a Chihuahua behaves depends on the genetic temperament of his or her parents and grandparents.[21] However, as with all dogs, socialization and training are very important. Like many small dogs, Chihuahuas are less likely than large dogs to be given obedience classes, socialization, or appropriate exercise and training. Frequent victims of "small dog syndrome", in which owners feel no need to provide the kind of training and socialization routinely provided for larger dogs, untrained Chihuahuas suffer the same predictable behavior problems as other untrained dogs regardless of the breed. However, they thrive well when given appropriate socialization and training.[22]
Poorly socialized or frightened Chihuahuas can be easily provoked to attack, so are generally unsuitable for homes with small and undisciplined children.[23] The breed tends to be fiercely loyal to one particular person and in some cases may become overprotective of the person, especially around other people or animals. They are frequently not introduced to or socialized with dogs of other breeds, and as a result do not interact as well with them, as other dogs that have been socialized to interact with different breed types[23] and tend to have a "clannish" nature, often preferring the companionship of other Chihuahuas or Chihuahua mixes over other dogs.[24] These traits generally make them unsuitable for households with children who are not patient and calm.[18] If properly managed by older children, 13 and up, they can adapt to this kind of living with a dedicated owner. The temperament of its owner can make a difference in the temperament of the pup.[citation needed]
Chihuahuas love their dens and often burrow themselves in pillows, clothes hampers, and blankets. They are often found under the covers or at the bottom of the bed, deep in the dark and safety of what they perceive as their den. Chihuahuas also enjoy time in sunlight.[25]
Health [ edit ]
Chihuahua puppy
This breed requires expert veterinary attention in areas such as birthing and dental care. Dental care is a must for these small dogs, whose jaw size makes for weaker teeth. Although daily brushing provides the best preventive measure, feeding a dental diet or using dental chews for dogs is an effective approach pet owners can take to help prevent and control accumulation of plaque and tartar to avoid consequences of severe periodontal disease.[26] The best physical characteristics of dog food to contribute to cleaning a dog's teeth would be food that is large and dense, so more time is spent chewing, which leads to the surface of the teeth being cleaned.
Chihuahuas, and other toy breeds, can be affected by hydrocephalus.[27] Chihuahua puppies with hydrocephalus have an abnormally large head, are lethargic, and do not grow at the same pace as their siblings. A true case of hydrocephalus can be diagnosed by a veterinarian, though the prognosis is grim.
Apple head Chihuahuas can have moleras, or a soft spot in their skulls, and they are the only breed of dog to be born with an incomplete skull. This is not a defect; it is a normal adaptation facilitating the passage through the birth canal and growth and development of the domed type of forehead. The molera is predominant in the apple heads and is present in nearly all Chihuahua puppies. The molera fills in with age, but great care needs to be taken during the first six months until the skull is fully formed. Some moleras do not close completely and require extra care to prevent injury.[28]
Chihuahua puppies can be at risk for hypoglycemia (low blood sugar). Signs of hypoglycemia include lethargy, sleepiness, uncoordinated walking, unfocused eyes, spasms of the neck muscles or head pulling back or to the side, fainting, and seizures. Hypoglycemia can be avoided with adequate nutrition and frequent feedings, especially for Chihuahuas that are younger, smaller, or leaner. Chihuahua owners should have a simple-sugar supplement on hand to use in emergencies, such as Nutri-Cal or corn syrup. These supplements can be rubbed on the gums and roof of the mouth to rapidly raise the blood sugar level.
However, as with any dog, owners should take care not to overfeed their Chihuahua, since obesity can result in increased rates of joint injuries, tracheal collapse, chronic bronchitis, and shortened lifespan.
As in other breeds with large, protruding eyes, Chihuahuas are prone to eye infections and eye injury. The eyes may water in response to dry air, dust, or airborne allergens.
Collapsed trachea is a health concern characteristic of the Chihuahua breed.[29]
Chihuahuas may tremble or shiver when stressed, excited, or cold. These dogs, especially the short coat variety, are less tolerant of cold than larger breeds, and require a sweater/coat and/or boots in cold weather. They seek warmth in sunshine, under blankets, or on furniture, or human laps.
Some Chihuahuas may present separation anxiety as a result of being so close and loyal to their owners. This is a fairly common cause behind any pacing, excessive salivating, destructive chewing, or barking, howling, or whining in dogs. Many treatments and tips can help prevent separation anxiety in dogs.
The lifespan for a Chihuahua is usually between 12 and 20 years.[30]
Chihuahuas are also known for luxating patella, a genetic condition that can occur in all dogs. In some dogs, the ridges forming the patellar groove are not shaped correctly and a shallow groove is created, causing the patella to luxate or slip out of place, sideways. The knee cap sliding across the bony ridges of the femur can cause some pain. The affected chihuahua will hold its leg flexed, and foot off the ground until the quadriceps muscle relaxes and lengthens, after which the animal feels no discomfort and continues with activity.
Chihuahuas are also prone to some heart-related disorders, such as heart murmurs and pulmonic stenosis, a condition in which the blood outflow from the heart's right ventricle is obstructed at the pulmonic valve.
Gallery [ edit ]
A smooth coat Chihuahua
A Long Coat white parti-color Chihuahua
A correct smooth coat Chihuahua
A shorthaired blue merle Chihuahua
A longhaired Chihuahua
Excellent example of a smooth coat Chihuahua
Chihuahua puppies
A longhaired brown Chihuahua with dark saddle
A chocolate and tan shorthaired Chihuahua portrait showing the characteristic traits of the breed
A sable longhaired Chihuahua portrait
A chocolate long coat Chihuahua
A cream-coloured long coat Chihuahua
Soft Nosed Chihuahua
A white Chihuahua on the beach
Shorthaired chocolate-tri-color Chihuahua
Longhaired black & white Chihuahua
In popular culture [ edit ]
See also [ edit ]
References [ edit ]
Chihuahua at Curlie |
/**
* Looks to see if we have a new distance text.
* Sets new distance text if found.
*
* @param model provides distance text
*/
private void addDistanceText(InstructionModel model) {
if (newDistanceText(model)) {
distanceText(model);
} else if (stepDistanceText.getText().toString().isEmpty()) {
distanceText(model);
}
} |
#ifndef INTERFACE_H
#define INTERFACE_H
#include <QObject>
#include "model.h"
#include "serialcommunicator.h"
#include "adc.h"
#include "motor.h"
#include "light.h"
class Interface : public QObject
{
Q_OBJECT
public:
Interface(unsigned short io8251, unsigned short io8253, unsigned short io8255, unsigned short io0809);
~Interface();
void setup();
signals:
void log(const QString & message);
void fatal(const QString & message);
void send(unsigned char data);
void changeSpeed(unsigned char rank);
void changeLightStatus(unsigned char data);
private:
unsigned short io8251, io8253, io8255, io0809;
SerialCommunicator * communicator;
ADC * adc;
Motor * motor;
Light * light;
void handleReceived(unsigned char receivedData);
void handleCollected(unsigned char collectedData);
};
#endif
|
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
#include "tink/aead/aes_gcm_siv_key_manager.h"
#include <stdint.h>
#include <memory>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "tink/aead.h"
#include "tink/internal/ssl_util.h"
#include "tink/subtle/aead_test_util.h"
#include "tink/subtle/aes_gcm_siv_boringssl.h"
#include "tink/util/secret_data.h"
#include "tink/util/status.h"
#include "tink/util/statusor.h"
#include "tink/util/test_matchers.h"
#include "proto/aes_gcm_siv.pb.h"
#include "proto/tink.pb.h"
namespace crypto {
namespace tink {
namespace {
using ::crypto::tink::test::IsOk;
using ::crypto::tink::test::StatusIs;
using ::crypto::tink::util::StatusOr;
using ::google::crypto::tink::AesGcmSivKey;
using ::google::crypto::tink::AesGcmSivKeyFormat;
using ::testing::Eq;
using ::testing::Not;
TEST(AesGcmSivKeyManagerTest, Basics) {
EXPECT_THAT(AesGcmSivKeyManager().get_version(), Eq(0));
EXPECT_THAT(AesGcmSivKeyManager().get_key_type(),
Eq("type.googleapis.com/google.crypto.tink.AesGcmSivKey"));
EXPECT_THAT(AesGcmSivKeyManager().key_material_type(),
Eq(google::crypto::tink::KeyData::SYMMETRIC));
}
TEST(AesGcmSivKeyManagerTest, ValidateEmptyKey) {
EXPECT_THAT(AesGcmSivKeyManager().ValidateKey(AesGcmSivKey()),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AesGcmSivKeyManagerTest, ValidateValid16ByteKey) {
AesGcmSivKey key;
key.set_version(0);
key.set_key_value("0123456789abcdef");
EXPECT_THAT(AesGcmSivKeyManager().ValidateKey(key), IsOk());
}
TEST(AesGcmSivKeyManagerTest, ValidateValid32ByteKey) {
AesGcmSivKey key;
key.set_version(0);
key.set_key_value("01234567890123456789012345678901");
EXPECT_THAT(AesGcmSivKeyManager().ValidateKey(key), IsOk());
}
TEST(AesGcmSivKeyManagerTest, InvalidKeySizes17Bytes) {
AesGcmSivKey key;
key.set_version(0);
key.set_key_value("0123456789abcdefg");
EXPECT_THAT(AesGcmSivKeyManager().ValidateKey(key),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AesGcmSivKeyManagerTest, InvalidKeySizes24Bytes) {
AesGcmSivKey key;
key.set_version(0);
key.set_key_value("01234567890123");
EXPECT_THAT(AesGcmSivKeyManager().ValidateKey(key),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AesGcmSivKeyManagerTest, InvalidKeySizes31Bytes) {
AesGcmSivKey key;
key.set_version(0);
key.set_key_value("0123456789012345678901234567890");
EXPECT_THAT(AesGcmSivKeyManager().ValidateKey(key),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AesGcmSivKeyManagerTest, InvalidKeySizes33Bytes) {
AesGcmSivKey key;
key.set_version(0);
key.set_key_value("012345678901234567890123456789012");
EXPECT_THAT(AesGcmSivKeyManager().ValidateKey(key),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AesGcmSivKeyManagerTest, ValidateKeyFormat) {
AesGcmSivKeyFormat format;
format.set_key_size(0);
EXPECT_THAT(AesGcmSivKeyManager().ValidateKeyFormat(format),
StatusIs(absl::StatusCode::kInvalidArgument));
format.set_key_size(1);
EXPECT_THAT(AesGcmSivKeyManager().ValidateKeyFormat(format),
StatusIs(absl::StatusCode::kInvalidArgument));
format.set_key_size(15);
EXPECT_THAT(AesGcmSivKeyManager().ValidateKeyFormat(format),
StatusIs(absl::StatusCode::kInvalidArgument));
format.set_key_size(16);
EXPECT_THAT(AesGcmSivKeyManager().ValidateKeyFormat(format), IsOk());
format.set_key_size(17);
EXPECT_THAT(AesGcmSivKeyManager().ValidateKeyFormat(format),
StatusIs(absl::StatusCode::kInvalidArgument));
format.set_key_size(31);
EXPECT_THAT(AesGcmSivKeyManager().ValidateKeyFormat(format),
StatusIs(absl::StatusCode::kInvalidArgument));
format.set_key_size(32);
EXPECT_THAT(AesGcmSivKeyManager().ValidateKeyFormat(format), IsOk());
format.set_key_size(33);
EXPECT_THAT(AesGcmSivKeyManager().ValidateKeyFormat(format),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AesGcmSivKeyManagerTest, Create16ByteKey) {
AesGcmSivKeyFormat format;
format.set_key_size(16);
StatusOr<AesGcmSivKey> key_or = AesGcmSivKeyManager().CreateKey(format);
ASSERT_THAT(key_or, IsOk());
EXPECT_THAT(key_or.value().key_value().size(), Eq(format.key_size()));
}
TEST(AesGcmSivKeyManagerTest, Create32ByteKey) {
AesGcmSivKeyFormat format;
format.set_key_size(32);
StatusOr<AesGcmSivKey> key_or = AesGcmSivKeyManager().CreateKey(format);
ASSERT_THAT(key_or, IsOk());
EXPECT_THAT(key_or.value().key_value().size(), Eq(format.key_size()));
}
TEST(AesGcmSivKeyManagerTest, CreateAeadFailsWithOpenSsl) {
if (internal::IsBoringSsl()) {
GTEST_SKIP() << "OpenSSL-only test, skipping because Tink uses BoringSSL";
}
AesGcmSivKeyFormat format;
format.set_key_size(32);
StatusOr<AesGcmSivKey> key = AesGcmSivKeyManager().CreateKey(format);
ASSERT_THAT(key, IsOk());
EXPECT_THAT(AesGcmSivKeyManager().GetPrimitive<Aead>(*key).status(),
Not(IsOk()));
EXPECT_THAT(subtle::AesGcmSivBoringSsl::New(
util::SecretDataFromStringView(key->key_value()))
.status(),
Not(IsOk()));
}
TEST(AesGcmSivKeyManagerTest, CreateAeadSucceedsWithBoringSsl) {
if (!internal::IsBoringSsl()) {
GTEST_SKIP() << "AES-GCM-SIV is not supported when OpenSSL is used";
}
AesGcmSivKeyFormat format;
format.set_key_size(32);
StatusOr<AesGcmSivKey> key = AesGcmSivKeyManager().CreateKey(format);
ASSERT_THAT(key, IsOk());
StatusOr<std::unique_ptr<Aead>> aead =
AesGcmSivKeyManager().GetPrimitive<Aead>(*key);
ASSERT_THAT(aead, IsOk());
StatusOr<std::unique_ptr<Aead>> boring_ssl_aead =
subtle::AesGcmSivBoringSsl::New(
util::SecretDataFromStringView(key->key_value()));
ASSERT_THAT(boring_ssl_aead, IsOk());
EXPECT_THAT(EncryptThenDecrypt(**aead, **boring_ssl_aead, "message", "aad"),
IsOk());
}
} // namespace
} // namespace tink
} // namespace crypto
|
/**
* This method checks if end date is before begin date.
*
* @param endDate
* @param beginDate
* @return
*/
public boolean validateDates(Timestamp endDate, Timestamp beginDate) {
boolean result = false;
try {
if ((endDate.before(beginDate))) {
result = true;
}
} catch (Exception e) {
errorList.add(DeErrorCodes.API_DE_000);
logger.error("Exception thrown in validateDates() method of BaseValidator " + e);
}
return result;
} |
Generating Self-Serendipity Preference in Recommender Systems for Addressing Cold Start Problems
Classical accuracy-oriented Recommender Systems (RSs) typically face the cold-start problem and the filter-bubble problem when users suffer the familiar, repeated, and even predictable recommendations, making them boring and unsatisfied. To address the above issues, serendipity-oriented RSs are proposed to recommend appealing and valuable items significantly deviating from users' historical interactions and thus satisfying them by introducing unexplored but relevant candidate items to them. In this paper, we devise a novel serendipity-oriented recommender system (\textbf{G}enerative \textbf{S}elf-\textbf{S}erendipity \textbf{R}ecommender \textbf{S}ystem, \textbf{GS$^2$-RS}) that generates users' self-serendipity preferences to enhance the recommendation performance. Specifically, this model extracts users' interest and satisfaction preferences, generates virtual but convincible neighbors' preferences from themselves, and achieves their self-serendipity preference. Then these preferences are injected into the rating matrix as additional information for RS models. Note that GS$^2$-RS can not only tackle the cold-start problem but also provides diverse but relevant recommendations to relieve the filter-bubble problem. Extensive experiments on benchmark datasets illustrate that the proposed GS$^2$-RS model can significantly outperform the state-of-the-art baseline approaches in serendipity measures with a stable accuracy performance.
Introduction
Recent decades have witnessed the magnificent success of recommender systems in both industry and academia. As an important tool to filter the enormous information, a proper recommender system aims to select relevant candidate items for object users while extracting their personalized preferences from their historical shopping logs simultaneously. To achieve this goal, collaborative filtering (CF) (Zou et al. 2020) and matrix factorization (MF) (Chen et al. 2020b) have been the most popular algorithms and were developed for decades. However, as shown in the recent research literature, conventional CF and MF models often suffer from cold-start situations (Chae et al. 2020), filter bubbles (Kapoor et al. 2015), and overfitting (Feldman, Frostig, and Hardt 2019), which results in inaccurate, irrelevant, and repeated recommendations. To make things worse, these rec-Copyright © 2022, Association for the Advancement of Artificial Intelligence (www.aaai.org). All rights reserved. ommendations might irritate users and hurt their shopping experience.
In terms of the data sparsity (the sparsity of the user-item interaction matrix), the Cold-Start (CS) problem and the Filter-Bubble (FB) problem are the vital issues that limit the performance of existing recommendation models. CS problem is a common but terrible challenge for recommender systems. E-commerce websites, such as Amazon, Yelp, and Taobao, usually have millions of users and items. However, the feedback interactions (clicks, browses, purchases, and ratings, etc. In this paper, we focus on ratings.) between them only account for a minimal fraction. Obviously, without enough historical interactions, it is difficult for CF models to understand users' preferences who do not have enough ratings, thereby leading to an inaccurate recommendation. To relieve this cold-start situation, some existing recommendation models prefer to recommend the most popular items to those cold-start users to achieve a better recommendation accuracy expectation. Nevertheless, the similar, repeated recommendations to most users without personalizations result in another critical challenge-the filter-bubble problem-in recommender systems, especially for MF models. With these homogenous recommendations, the popular items' interaction weights increase in MF models. Naturally, those items have more chances to be explored by users than the other items should, dominating the recommendation results, which is a typical scenario of the Matthew Effect. What is worse, the CS problem and FB problem usually appear in pairs and reinforce mutually, which seriously impact the recommendation quality.
To address the filter-bubble problem, most researchers so far have focused on exploiting auxiliary information such as users' attributes (Bi et al. 2020), user's social relations (Fu et al. 2021), and item's description text (Chae et al. 2020) or reviews (Wang, Ounis, and Macdonald 2021). Along with this line, serendipity-oriented recommender system are proposed to make unexpected but valuable items to users (Yang et al. 2018;Ziarani and Ravanmehr 2021). However, their models are effective and useful only when these auxiliary information are available. Besides, over-utilizing these auxiliary information may cause the privacy disclosure problem (Burbach et al. 2018). For the cold-start problem, some studies employed novel data framework to develop the existing recommendation models, such as multi-layer perceptron (He et al. 2017), recurrent neural networks (Xu et al. 2019) or the latest graph convolutional network (Zhang et al. 2020), to find the latent neighbors or the similar latent representations of cold-start users/items. However, these neural networkbased models require huge computing ability and costly to be deployed. Our research focuses on original rating-based RS, a Top-K recommendation task with only one user-item rating matrix, without requiring any auxiliary information or changing the existing recommendation models. In this context, data completion has been the most popular algorithm to tackle the CS problem (Chae et al. 2020;Kim and Suh 2019;Ziarani and Ravanmehr 2021). These models fill the sparse user-item rating matrix by inferring the data distribution from existing ratings to relieve the cold-start problem. Nevertheless, data completion models are often too coarse to give a reasonable recommendation (for example, they can not extract users' preferences on items). Thus they can not address the filtering bubble problem.
In this paper, we propose a novel recommender system framework that can tackle the cold-start problem and the filter-bubble problem at the same time. Different from existing neural network-based models or data completion models, our core idea is to generate virtual but convincible users' preferences on items, drop out the impossible items from the candidate item set, and enhance most existing recommendation models. Specifically, GS 2 -RS, which stands for Generative Self-Serendipity Recommendation System, consists of three following modules: 1) preferences modelling. We analyze the user-item rating matrix to form user's historical interest and satisfaction preferences. Then we train different Conditional Generative Adversarial Nets (CGCN) to generate users' virtual preferences (interest and satisfaction) on candidate items. 2) self-serendipity fusion and matrix injection. We devise a gate mechanism to combine users' interests and satisfactions to form their self-serendipity preferences. Then GS 2 -RS drops out the impossible items from the candidate item set by filling 0s in the user-item rating matrix, which builds an enhanced rating matrix. 3) recommendation. GS 2 -RS outputs this enhanced rating matrix to any existing recommendation models. Our proposed model can achieve a personalized, customized recommendation by tuning the gate threshold in the matrix injection stage.
To the best of our knowledge, our work is the first attempt to employ GANs for building a serendipity-oriented recommender system that tackles both CS and FB problems at the same time (as shown in Figure 1). Notably, the contributions of our proposed model are summarized as follows. (i) We propose a novel serendipity-oriented recommendation framework (GS 2 -RS), which can tackle both cold-start and filter-bubble problems without any auxiliary information. (ii) GS 2 -RS utilizes GANs to generate users' serendipity preferences from only the user-item rating matrix and makes an explainable, personalized recommendation with a delicate matrix injection method. Specifically, GS 2 -RS can be treated as the preprocessing for any existing recommendation models. (iii) We conduct extensive empirical studies on three public datasets, and find that GS 2 -RS can achieve a superior recommendation performance on both accuracy metric and serendipity metric.
GS 2 -RS:Generative Self-Serendipity Recommender System Preliminary and Problem Statement Given a recommender system, which contains M users and N items, the user-item rating matrix R is a M × N lowrank sparse matrix, where its entity r ij stands for the rating that user i marked for item j, ranging from . Note that in real-world scenarios, R is usually extremely sparse (more than 90% data is unknown to learning models), meaning there are many "?" in R.
We focus on original rating-based RS, which means that for achieving a Top-K recommendation task, the input of our model is only the original user-item rating matrix R. However, the sparsity of R usually leads to the cold-start (CS) problem and the filter-bubble (FB) problem (Chae et al. 2020), as shown in Figure 2. Cold-start problem: The extreme sparsity may confuse the RS models to infer inaccurate preferences of users on candidate items, which causes item-and user-based CS problems. Filter-bubble problem: Some models pay so much attention to existing ratings that the more ratings an item has, the more chances it can be recommended to users. In Figure 2, as a FB problem, the item with rating dominate other items to be recommended in some existing models, like CF models (Chen et al. 2020a). Both the problems affect recommender systems' performance and affect users' shopping experience. To tackle the problem above, we are inspired by (Chae et al. 2020), which utilizes GANs to generate users' virtual neighbors for enhancing CF models. However, this model can not consider the FB problem because it directly generates the virtual neighbors' rating without inferring users' preferences. Along with this line, we first introduce users' two type perferences: interest and satisfaction preferences: Interest: for a user u and an item i in a candidate itemset i ∈ I can u , if r ui =?, which means that u has the interest to buy i, we define user' interest preference r in ui = 1, r in ui ∈ R in , i ∈ I in u . Satisfaction: for a user u and an item i ∈ I in u , if r ui ≥ α ui , we define user's satisfaction preference r sa ui = 1, i ∈ I sa u ; else r sa ui = 0, r sa ui ∈ R sa . Note that the threshold α ui can be defined as either u or i's average rating, or other contextual values. In our proposed model, we first extract users' historical preferences, as shown in Figure 3. Figure 3: Historical preference extraction. α ui is the u's average rating. Note that for R sa 's fourth row, we take both u's and i's average ratings ((2+2)/2=2; (5+2)/2=3.5) into consideration as the thresholds.
Meanwhile, we analyze users' preferences at a finegrained level. We introduce users' serendipity items: the items with high relevance but low shopping purpose (Yang et al. 2018). For common situations, these items can produce a wonderful purchasing experience once the users buy them. So recommend serendipity items can achieve better diversity and satisfaction. By considering serendipity items, the filter-bubble problem can be relieved effectively.
Multiple Preference Modelling
This section introduces how we deduce users' multiple preferences, including users' virtual interest and satisfaction preferences, based on Conditional Generative Adversarial Nets (CGAN) (Mirza and Osindero 2014), a framework to train generative models with complicated, high-dimensional real-world data such as images. Specifically, CGAN is an extension to the original GAN (Goodfellow et al. 2014): it allows a generative model G to produce date according to a specific condition vector c by treating the desired condition vector as an additional input with the random noise input z. Thus, CGAN's objective function is formulated as follows: (1) where x is a ground truth data from the data distribution p data , z is a noise input vector sampled from known prior p z . G(z) is a synthetic data from the generator distribution p g . c corresponds to a condition vector such as a one- hot vector of a specifc class label. With the optimal objective min G max D V (D, G). Ideally, the completely trained G is expected to generate realistic data that the discriminative model D evaluates the possibility that the data came from the ground truth rather than from G equals 0.5.
After we extract users' interest and satisfaction preferences R in , R sa , we build two CGANs for each type of preferences, respectively. The training procedure is shown in Figure 4. Note that we apply the same CGAN framework on interest and satisfaction preferences, respectively. For the sake of simplicity, we take interest preferences as the example and briefly introduce how this framework can be properly deployed on satisfaction preferences. Formally, we train our two CGANs as follows: where c in u denotes u's interest condition vector, z in u denotes u's noise vector, r in u denotes u's interest vector, f in u denotes the interest indicator vector. Note that f in u is in the same dimension as r in u , and each entity f in ui of f in u is 1/0 to indicate that there is/not an interest value for item i.
In Formula 2, G in r and G in f are employed to produce u's synthetic interest vector and interest indicator vector, denoted as r in u , f in u . While D in r and D in f are employed to distinguish real interest vector r in u , indicator vector f in u from synthetic vectors r in u , f in u , respectively. Specifically, there are two designs in this framework: first, each G's outputs' value are restricted to the range of (0 1) for computing with a Layer Normalization; second, a masking operation is employed to avoid useless computations: This element-wise dot operation forces the discriminators to focus on the values r in ui in interest vector r in u , which contributes to the computing results. Besides, the masking operation relieves the data sparsity issue. All D, G are co-trained and deployed by DNN where the stochastic gradient descent (SGD) with minibatch and the back-propagation algorithm are employed. Similar as interest preferences, satisfaction preferences can be modeled as follows: After the training of our CGANS, we are ready to generate users' virtual preferences, including interest and satisfaction. With these virtual preferences, we aim at deducing users' self-serendipity preferences for an accurate recommendation.
Self-serendipity Fusion and Zero Matrix Injection
The usage of GS 2 -RS is shown in Figure 5. With four welltrained generators G in r , G in f , G sa r , G sa f , we feed objective user u's condition vectors c in u , c sa u , and noise vectors z in u , z sa u , we can achieve the synthetic vectors r in u , f in u , r sa u , f sa u , which are formulated as follows: where * denotes either in or sa . For each objective user, our propose model can generate several synthetic r * u ,f * u pairs. Generally, we treat each vector pairs as a virtual user u for the objective user u, named self neighbors. And each virtual user u 's preferences can be formulated as follows: Note that the number of virtual users can be tuned for better performance. We will discuss it in our experiment section. For the sake of simplicity, we only generate t=2 self neighbors here for explanations, as shown in Figure 5. Then we feed the virtual user u 's preference vectors and original user u's preference vector r * into existing CF models to achieve self preferencer * u (Formula 6), for interest and satisfaction, respectively. This operation can generate two enhanced preference matricesR in ,R sa . Note that we can achieve these two matrices with different operations among original vectors and their virtual neighbors, such as average, threshold mechanism, collaborative filtering, etc.
Then serendipity fusion operation is employed for marking each potential candidate item for objective users. First, we give a reminder of serendipity items: the items with high relevance but low shopping purpose (Yang et al. 2018). With this consideration, intuitively, we can deduce that the items with high satisfaction but low interest should be marked as serendipity items, which means that the objective user would achieve a much better experience after buying them. To achieve the serendipity item set, we set the thresholds θ * for interest and satisfaction, respectively. The item with r sa ≥ θ in but r in < θ in is marked as s ui =1. For each user, there is an indicator s u to mark his serendipity items.
One of our contributions is to solve the cold-start problem. The reason for the cold-start issue is the sparsity of the user-item matrix. Thus we employ a zero matrix injection to relieve the sparsity issue. Unlike existing matrix injection methods, we do not inject the potential candidate items into the user-item matrix. Instead, we pick impossible items to filter the candidate items. The reason is that 1) the rating deduction from users' preferences is usually tricky and inaccurate, with many uncontrollable factors. 2) In real-world scenarios, the potential items of an objective user take a relatively small partition from thousands or millions of unobserved items. So adding zeros for impossible items could relieve the sparsity issue greatly.
While we filter the items with two vectorsr in u andr sa u for user u's zero injections, there are several situations with different values (r in ui andr sa ui , i-th is the location index of both vectors), respectively. Note that ther in u andr sa u are computed by Formula 5, and the element value is , 0 <r ui ≤ 1. Generally, we obey the following principles to inject r h ui = 0 into enhanced matrix R h : • Ifr in ui < θ in andr sa ui < θ sa , inject r h ui = 0; • Ifr in ui < θ in /r sa ui < θ sa , andr sa ui /r in ui =?, inject r h ui = 0; • Else, we set r h ui = r ui . Intuitively, the impossible item set for recommendations consists of low interest and low satisfaction (below the thresholds) items. Also, we inject 0s for the items with an unknown preference (?) and a low interest/satisfaction preference. As far as we know, these 0s can relieve the useritem matrix's sparsity. Moreover, the enhanced user-item matrix R h can give more meaningful feedback to indicate users' preferences on items and reduce the unknown feedbacks, which relieves the cold-start problem. While for the items/users with few or zero feedbacks (new items/users in recommender systems) in original user-item matrix R, our proposed model can inject some zeros as an initialization. It relieves the new user/item cold-start problem. Note that these zeros are temporary, not fixed. Once the original useritem matrix has been updated to a large extent (utilizes many new r ui to replace ?), We can employ GS 2 -RS again to update R h .
With R h as input (replacing original R), and interest ma-trixR in , satisfaction matrixR sa and self-serendipity matrix S as side information inputs, we can achieve different types (Top-k, CTR, or next purchase, etc) recommendation results L with different RS models: L= RSmodel(R h ,R in ,R sa , S).
(7) The complete GS 2 -RS model is described in Algorithm 1:
Recommendation Analysis
This section introduces how our proposed model enhances the SOTA recommender systems and solves the filter-bubble problem.
Enhancing CF/MF/NN based Recommenders Existing recommender systems usually consist of three specific categories: collaborative filtering-based RS (CF models), matrix factorization-based RS (MF models), and neural networkbased RS (NN models). GS 2 -RS can enhance both recommender systems for the following reasons: For enhancing CF models, GS 2 -RS could give more valuable feedbacks in R h , (0s that injected in Zero Matrix Injection), which can help the model to compute the distance between different users/items, and select more accurate users' to filter the items.
For MF models, we employ WRMF or other matrix factorization to learn users'/items' latent vectors from user-item matrix R, and then make recommendations. Note that all the existing matrix factorization algorithms' performance is affected dramatically by the matrix's sparsity, while our proposed model GS 2 -RS can relieve the sparsity problem by replacing R with R h . Moreover, the 0s in R h also can be treated Algorithm 1 Generative Self-Serendipity RS model Require: Original user-item matrix R. Ensure: Recommendations results L.
Step 1: Preference Modelling 2: Calculate interest matrix R in , satisfaction matrix R sa with threshold α ui (Figure 3); 3: Train CGANs with Formula 2 and Formula 3; 4: Output generators G in r , G in f , G sa r , G sa f ; Step 2: Self-serendipity Fusion&Zero Injection 5: for objective user u do 6: Ensure self-neigbor number t;
7:
for each self-neigbor u do 8: Calculate r in u , r sa u with Formula 5; 10: Calculater in u ,r sa u with Formula 6; 11: Comparer in u withr sa u element-wised with thresholds θ in , θ sa ; as ratings, which can restrict the learned latent users'/items' vectors for accurate performance. Note that MF models are usually employed as the preprocessing for NN models to get the input of the neural network framework. Moreover, the input is vital for NN models. Indeed, our proposed model can enhance NN models by offering a superior input. We will give a discussion in the experimental section.
Enhancing Personalized Recommendations for the Filter-Bubble problem Personalized Recommendation is an important factor in recommender systems because a boring, homogenous recommendation is not expected for each individual. For enhancing personalized recommendations, GS 2 -RS employs users' preferences (interest, satisfactions, and self-serendipity) to decide the recommendation orders in the recommendation results L. With interest matrixR in , satisfaction matrixR sa and self-serendipity matrix S, we can make a fine-grained user profiling for personalized recommendations. For example, we could check the percentage of interest and satisfaction items in historical records for an objective user to distinguish the user's distribution on preferences. Then we rerank the recommendation list in accord with this preference distribution. Moreover, the selfserendipity item should also be considered for achieving personalized, accurate recommendations. Details are introduced in the experimental section. With these side information, the filter-bubble problem can be adequately solved.
Experiments
This section validates our proposed framework with three aspects: 1) How GS 2 -RS enhances the overall recommendation performance. 2) How GS 2 -RS solves the cold-start and filter-bubble problems, and 3) How the threshold affects GS 2 -RS's performance.
Datasets We utilize two publicly accessible datasets: Movielens 1 and Amazon 2 . Detailds are indicated in Table 1. Grid search and 5-fold cross validation are used to find the best parameters. In our proposed GS 2 -RS, the thresholds are set: α u = i r ui /#num(r ui ), θ in = θ sa = 0.5. The learning rate is 0.01. Baselines To validate GS 2 -RS, we select several classic RS models and SOTA RS models as the validations: 1) Collaborative Filtering (CF) (Koren and Bell 2015) and 2) Weighted Matrix Factorization (WMF) (Koenigstein, Ram, and Shavitt 2012;Chen et al. 2020c) are widely applied RS models, and 3) Neural Collaborative Filtering (NCF) (He et al. 2017): a general neural network based recommendation framework, which emppoys GMF as its preprocessing. 4) Joint Variational Autoencoder (JoVA) (Askari, Szlichta, and Salehi-Abari 2021): an ensemble of two VAEs to capture user-user and item-item correlations simultaneously for recommendation. 5) Augmented Reality CF (AR-CF) (Chae et al. 2020): a GAN based CF model, which is applied directly on ratings.
Metrics We employ standard metrics to validate overall recommendation performance, including Precision, Recall, and Normalized Discounted Cumulative Gain (NDCG), and Mean Reciprocal Rank (MRR). For cold-start problem effectiveness, we utilize Exposure Ratio (ER) as the metric. Formally, the exposure ratio is computed by B/A where B is the number of cold-start items which are exposed to at least one user, and A is the number of the entire cold-start items. For filter-bubble problem effectiveness, we utilize diversity (DI) and serendipity (SE) (Yang et al. 2018) as metrics. Note that both metrics should be applied on a recommendation item set, as follows (I real is the ground truth): DI = #categorynum(I rec )/#num(I rec ); SE = #num(I rec ∩ I real ∩ (I sa − I in ))/#num(I real ).
Overall Performance for Enhancing Recommendation
The overall performance across two datasets is shown in Table 2. Generally speaking, GS 2 -RS outperforms all com-1 http://grouplens.org/datasets/movielens/ 2 http://www/kaggle.com/snap/amazon-fine-food-reviews/ pared models on both datasets for all metrics. The improvement can be attributed to two aspects: 1) The improvement from two GAN-based models (AR-CF and GS 2 -RS) over other models indicates that the usage of GAN on sparse matrices is obvious for recommendations. 2) GS 2 -RS can enhance performance by generating their virtual preferences, which is effective than directly generating their virtual ratings on items, which can be indicated from GS 2 -RS's improvement on AR-CF. Moreover, as we claimed, GS 2 -RS can be applied as the preprocessing for SOTA RS models, which are shown in Figure 6. We observe that our proposed model can enhance the SOTA models' Precision and NDCG performance on Movielens. GS 2 -RS universally and consistently provide the best accuracy, and we believe that the benefits are credited to GS 2 -RS's characteristics that it can take advantage of the performance gains coming from the generated virtual (but plausible) users' preferences as qualified training data.
Performance for Solving Cold-Start Problem
The problem with the cold-start items is that they are challenging to be recommended. So we employ Exposure Ratio (ER) to evaluate the performance for solving the cold-start problem. We employ NCF, JoVA, AR-CF as baselines compared with GS 2 -RS, as shown in Figure 7. T%denotes the bottom percentage of items that have interactions with users, and we treat these items as cold-start items. We observe that NCF and JoVA have difficulty solving the cold-start problem, and AR-CF and our model outperform them considerably. Meanwhile, because our proposed model generates users' preferences, not ratings, GS 2 -RS performs better than AR-CF. Jointly considered with accuracy results in Figure 6, the results demonstrate the effectiveness of our model in solving cold-start issues with a stable recommendation per- formance.
Performance for Solving Filter-Bubble Problem
We utilize Diversity and Serendipity to evaluate the performance for solving the filter-bubble problem, as shown in Figure 8. From the results, we observe that 1) GS 2 -RS can improve the diversity and serendipity significantly, especially on sparse dataset Amazon. 2) JoVA achieves a secondbest accuracy performance but a relatively low diversity and serendipity compared with other baselines. With a high diversity and serendipity, we can offer users an attractive and exciting recommendation instead of a boring, repeated one, greatly relieving the filter-bubble problem.
Threshold Effect Analysis
We validate the effect of θ, the most vital threshold of GS 2 -RS, ranging (0.0, 1,0) with step 0.1. Note that we set θ in =θ sa =θ for validation, as shown in Figure 9. From the results, we observe that GS 2 -RS achieves the best performance at θ=0.5. When θ is descending to 0, GS 2 -RS can not filter any items, which fades to a basic GAN-based model. When θ is ascending to 1, GS 2 -RS drops every item and damages its performance.
Related Work
As well-known basic problems in the recommendation, cold-start and filter-bubble problems are explored by many researchers recently: For the cold-start problem, (Chen et al. 2020d) proposed a tagging algorithm to tag unobserved items for relieving cold-start issue. (Bi et al. 2020) utilized cross-domain information to reduce data sparsity for coldstart problem and achieved the SOTA recommendation performance. Meanwhile, some researchers explore the usage of GAN (Goodfellow et al. 2014) for the cold-start problem: (Chae et al. 2020) generated virtual neighbor for objective users and made accurate recommendations by reducing cold-start items. (Wang 2021) generated users' embedding for recommendation and improved NDCG@100 significantly. However, these frameworks do not consider generating users' preferences, especially fine-grained preferences (as our framework GS 2 -RS).
For the filter-bubble problems, some researchers (Burbach et al. 2018;Koren and Bell 2015) tried to add the diversity of recommendation list to tackle it. (Kapoor et al. 2015) adapted users' novelty preferences into recommendations, which added the diversity for relieving the filter-bubble problem. Recently, the idea of serendipity has been proposed to solve the filter-bubble problem by offering novel, diverse and high-satisfaction recommendations. (Ziarani and Ravanmehr 2021) gave general explanations about why serendipity items could work for tackling the filter-bubble situations. (Yang et al. 2018) proposed a matrix factorization-based model for enhancing serendipity for superior recommendations. However, the challenge is utilizing serendipity into the recommender system framework to solve cold-start and filter-bubble problems simultaneously. |
async def fire_event(self, event: Event) -> Event:
for identifier in event.identifiers:
handlers = self.registered.get(identifier, [])
for handler in handlers:
if event.cancelled and not handler["cancelled"]:
continue
if handler["filter"] and not handler["filter"](event):
continue
func = handler["callable"]
args = handler["args"]
kwargs = handler["kwargs"]
try:
if iscoroutinefunction(func):
await func(event, *args, **kwargs)
else:
func(event, *args, **kwargs)
except Exception:
raise
return event |
/// Places the given piece on the board WITH NO ADDITIONAL CHECKS.
pub fn place(&mut self, piece: &Piece) {
for position in piece.coordinates() {
self.set(position, piece.color);
}
} |
<gh_stars>1-10
package git
import (
"bufio"
"fmt"
"runtime"
"strings"
"github.com/meinto/glow/cmd"
"github.com/meinto/glow"
"github.com/pkg/errors"
)
// NativeGitAdapter implemented with native git
type nativeGitAdapter struct {
exec cmd.CmdExecutor
}
// SetCICDOrigin for pipeline
func (a nativeGitAdapter) SetCICDOrigin(origin string) (stdout, stderr string, err error) {
return a.exec.Command(fmt.Sprintf("git remote set-url origin %s", origin)).Run()
}
// GitRepoPath returns the path to the root with the .git folder
func (a nativeGitAdapter) GitRepoPath() (repoPath string, stdout, stderr string, err error) {
cmd := a.exec.Command("git rev-parse --show-toplevel")
stdout, stderr, err = cmd.Run()
return strings.TrimSuffix(stdout, "\n"), stdout, stderr, err
}
// CurrentBranch returns the current branch name
func (a nativeGitAdapter) CurrentBranch() (branch glow.Branch, stdout, stderr string, err error) {
cmdBranchList, stdout, stderr, err := getCMDBranchList(a.exec)
if err != nil {
return nil, stdout, stderr, err
}
for _, b := range cmdBranchList {
if b.IsCurrentBranch {
branch, err := glow.BranchFromBranchName(b.Name)
return branch, stdout, stderr, err
}
}
return nil, stdout, stderr, errors.New("cannot detect current branch")
}
// BranchList returns a list of avalilable branches
func (a nativeGitAdapter) BranchList() (branchList []glow.Branch, stdout, stderr string, err error) {
cmdBranchList, stdout, stderr, err := getCMDBranchList(a.exec)
if err != nil {
return nil, stdout, stderr, err
}
branchList = make([]glow.Branch, 0)
for _, b := range cmdBranchList {
gb := glow.NewBranch(b.Name)
if err != nil {
return nil, stdout, stderr, err
}
branchList = append(branchList, gb)
}
return branchList, stdout, stderr, nil
}
// Fetch changes
func (a nativeGitAdapter) Fetch() (stdout, stderr string, err error) {
return a.exec.Command("git fetch").Run()
}
// Add all changes
func (a nativeGitAdapter) AddAll() (stdout, stderr string, err error) {
return a.exec.Command("git add -A").Run()
}
// Stash all changes
func (a nativeGitAdapter) Stash() (stdout, stderr string, err error) {
return a.exec.Command("git stash").Run()
}
// Pop all stashed changes
func (a nativeGitAdapter) StashPop() (stdout, stderr string, err error) {
return a.exec.Command("git stash pop").Run()
}
// Commit added changes
func (a nativeGitAdapter) Commit(message string) (stdout, stderr string, err error) {
return a.exec.Command(fmt.Sprintf("git commit -m '%s'", message)).Run()
}
// Push changes
func (a nativeGitAdapter) Push(setUpstream bool) (stdout, stderr string, err error) {
cmd := a.exec.Command("git push")
if setUpstream {
currentBranch, stdout, stderr, err := a.CurrentBranch()
if err != nil {
return stdout, stderr, err
}
cmd = a.exec.Command(fmt.Sprintf("git push -u origin %s", currentBranch.ShortBranchName()))
}
return cmd.Run()
}
// Create a new branch
func (a nativeGitAdapter) Create(b glow.Branch, skipChecks bool) (stdout, stderr string, err error) {
if !skipChecks {
sourceBranch, stdout, stderr, err := a.CurrentBranch()
if err != nil {
return stdout, stderr, err
}
if !b.CreationIsAllowedFrom(sourceBranch) {
return stdout, stderr, errors.New("creation not allowed from this branch")
}
}
return a.exec.Command(fmt.Sprintf("git branch %s", b.ShortBranchName())).Run()
}
// Checkout a branch
func (a nativeGitAdapter) Checkout(b glow.Branch) (stdout, stderr string, err error) {
return a.exec.Command(fmt.Sprintf("git checkout %s", b.ShortBranchName())).Run()
}
// CleanupBranches removes all unused branches
func (a nativeGitAdapter) CleanupBranches(cleanupGone, cleanupUntracked bool) (stdout, stderr string, err error) {
xargsCmd := "xargs -r git branch -D"
if runtime.GOOS == "darwin" {
xargsCmd = "xargs git branch -D"
}
if cleanupGone {
stdout, stderr, err = a.exec.Command("git remote prune origin").Run()
if err != nil {
return stdout, stderr, err
}
stdout, stderr, err = a.exec.Command(fmt.Sprintf("git branch -vv | grep 'origin/.*: gone]' | awk '{print $1}' | %s", xargsCmd)).Run()
if err != nil {
return stdout, stderr, err
}
}
if cleanupUntracked {
stdout, stderr, err = a.exec.Command(fmt.Sprintf("git branch -vv | cut -c 3- | grep -v detached | awk '$3 !~/\\[origin/ { print $1 }' | %s", xargsCmd)).Run()
if err != nil {
return stdout, stderr, err
}
}
return stdout, stderr, err
}
// CleanupTags removes tags from local repo
func (a nativeGitAdapter) CleanupTags(cleanupUntracked bool) (stdout, stderr string, err error) {
xargsCmd := "xargs -r git tag -d"
if runtime.GOOS == "darwin" {
xargsCmd = "xargs git tag -d"
}
if cleanupUntracked {
stdout, stderr, err = a.exec.Command(fmt.Sprintf("git tag -l | %s", xargsCmd)).Run()
if err != nil {
return stdout, stderr, err
}
stdout, stderr, err = a.exec.Command("git fetch --tags").Run()
if err != nil {
return stdout, stderr, err
}
}
return stdout, stderr, err
}
func (a nativeGitAdapter) RemoteBranchExists(branchName string) (exists bool, stdout, stderr string, err error) {
stdout, stderr, err = a.exec.Command(fmt.Sprintf("git ls-remote --heads $(git remote get-url origin) %s | wc -l | tr -d ' '", branchName)).Run()
if err != nil {
return false, stdout, stderr, err
}
branchCount := strings.TrimSpace(stdout)
if branchCount == "1" {
return true, stdout, stderr, err
}
err = errors.New(fmt.Sprintf("Remote Branch %s does not exist", branchName))
return false, stdout, stderr, err
}
type cmdBranch struct {
Name string
IsCurrentBranch bool
}
func getCMDBranchList(exec cmd.CmdExecutor) (branch []cmdBranch, stdout, stderr string, err error) {
cmd := exec.Command("git branch --list").Get()
stdoutReader, err := cmd.StdoutPipe()
if err != nil {
return []cmdBranch{}, "", "", err
}
c := make(chan []cmdBranch)
go func(c chan []cmdBranch) {
var branchList []cmdBranch
scanner := bufio.NewScanner(stdoutReader)
for scanner.Scan() {
line := strings.Trim(scanner.Text(), " ")
parts := strings.Split(line, " ")
name := parts[0]
isCurrentBranch := false
if len(parts) > 1 {
name = parts[1]
isCurrentBranch = true
}
branchList = append(branchList, cmdBranch{
name,
isCurrentBranch,
})
}
c <- branchList
close(c)
}(c)
err = cmd.Run()
branchList := <-c
return branchList, "", "", err
}
|
.
An involvement of rice allergy in development and exacerbation of recalcitrant atopic dermatitis (AD) has been suggested in some cases, and it has been also known that elimination diet of rice results in improvement of AD and reduction of the doses of steroid ointment and anti-allergic drugs used for the treatment. We prepared the hypoallergenic rice grain, AFT-R 1 (Allergen-free Technology Lab. Inc.-Rice 1), with alkali treatment from the rice material, Koshihikari, which is popular in Japan. Its usefulness was evaluated clinically and serologically. The serological study with IgE-ELISA showed that the major allergic protein were remarkably reduced to less than 1/6400 of them in the AFT-R 1. Then it was applied to the clinical trial, and evaluated as useful by the clinical effect in 14 (93%) out of 15 patients, who were diagnosed to have rice allergy by the elimination (in 15 case) and ingestion (in 12 cases) of the regular rice. The serum taken from the patient, whom the AFT-R 1 was evaluated as unuseful in the clinical trial, was shown to have IgE antibodies reactive to the remaining protein in AFT-R 1 by IgE-ELISA, and the 60 kd protein band was detected as an IgE-binding component of AFT-R 1 by IgE-immunoblotting with the same serum. This 60 kd rice protein was identified as ADP (UDP)-glucose-starch glycosyl transferase (EC: 2.4.1.21) by N-terminal amino acid analysis. These results indicated that AFT-R 1 is very useful as a substitute of rice in many AD patients with rice allergy, although IgE-binding component such as the 60 kd protein is remaining in one. |
<filename>fboss/agent/hw/sai/switch/SaiSwitchManager.cpp<gh_stars>1-10
/*
* Copyright (c) 2004-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
#include "fboss/agent/hw/sai/switch/SaiSwitchManager.h"
#include "fboss/agent/FbossError.h"
#include "fboss/agent/hw/sai/api/AdapterKeySerializers.h"
#include "fboss/agent/hw/sai/api/SaiApiTable.h"
#include "fboss/agent/hw/sai/api/SwitchApi.h"
#include "fboss/agent/hw/sai/switch/SaiManagerTable.h"
#include "fboss/agent/platforms/sai/SaiPlatform.h"
#include "fboss/agent/state/DeltaFunctions.h"
#include "fboss/agent/state/LoadBalancer.h"
#include "fboss/agent/state/StateDelta.h"
#include <folly/logging/xlog.h>
extern "C" {
#include <sai.h>
}
namespace {
using namespace facebook::fboss;
SaiSwitchTraits::Attributes::HwInfo getHwInfo(SaiPlatform* platform) {
std::vector<int8_t> connectionHandle;
auto connStr = platform->getPlatformAttribute(
cfg::PlatformAttributes::CONNECTION_HANDLE);
if (connStr.has_value()) {
std::copy(
connStr->c_str(),
connStr->c_str() + connStr->size() + 1,
std::back_inserter(connectionHandle));
}
return connectionHandle;
}
SaiSwitchTraits::Attributes::SrcMac getSrcMac(const SaiPlatform* platform) {
return platform->getLocalMac();
}
// (TODO: srikrishnagopu) Move this to SaiPlatform ?
SaiSwitchTraits::CreateAttributes getSwitchAttributes(
SaiPlatform* platform,
bool mandatoryOnly) {
SaiSwitchTraits::Attributes::InitSwitch initSwitch(true);
std::optional<SaiSwitchTraits::Attributes::HwInfo> hwInfo;
std::optional<SaiSwitchTraits::Attributes::SrcMac> srcMac;
if (!mandatoryOnly) {
hwInfo = getHwInfo(platform);
srcMac = getSrcMac(platform);
}
return {
initSwitch,
hwInfo,
srcMac,
std::nullopt, // shell
std::nullopt, // ecmp hash v4
std::nullopt, // ecmp hash v6
std::nullopt, // ecmp hash seed
std::nullopt, // lag hash seed
std::nullopt, // ecmp hash algo
std::nullopt, // lag hash algo
std::nullopt, // restart warm
};
}
sai_hash_algorithm_t toSaiHashAlgo(cfg::HashingAlgorithm algo) {
switch (algo) {
case cfg::HashingAlgorithm::CRC16_CCITT:
return SAI_HASH_ALGORITHM_CRC_CCITT;
case cfg::HashingAlgorithm::CRC32_LO:
return SAI_HASH_ALGORITHM_CRC_32LO;
case cfg::HashingAlgorithm::CRC32_HI:
return SAI_HASH_ALGORITHM_CRC_32HI;
case cfg::HashingAlgorithm::CRC32_ETHERNET_LO:
case cfg::HashingAlgorithm::CRC32_ETHERNET_HI:
case cfg::HashingAlgorithm::CRC32_KOOPMAN_LO:
case cfg::HashingAlgorithm::CRC32_KOOPMAN_HI:
default:
throw FbossError("Unsupported hash algorithm :", algo);
}
}
} // namespace
namespace facebook::fboss {
SaiSwitchManager::SaiSwitchManager(
SaiManagerTable* managerTable,
SaiPlatform* platform,
const std::optional<SwitchSaiId>& switchId)
: managerTable_(managerTable), platform_(platform) {
if (switchId) {
// Extract switch adapter key and create switch only with the mandatory
// init attribute (warm boot path)
auto& switchApi = SaiApiTable::getInstance()->switchApi();
auto newSwitchId = switchApi.create<SaiSwitchTraits>(
getSwitchAttributes(platform, true),
*switchId /* switch id; ignored */);
CHECK_EQ(*switchId, newSwitchId);
// Load all switch attributes
switch_ = std::make_unique<SaiSwitchObj>(*switchId);
switch_->setOptionalAttribute(getHwInfo(platform));
switch_->setOptionalAttribute(getSrcMac(platform));
} else {
switch_ = std::make_unique<SaiSwitchObj>(
std::monostate(),
getSwitchAttributes(platform, false),
0 /* fake switch id; ignored */);
}
}
SwitchSaiId SaiSwitchManager::getSwitchSaiId() const {
if (!switch_) {
throw FbossError("failed to get switch id: switch not initialized");
}
return switch_->adapterKey();
}
void SaiSwitchManager::resetHashes() {
ecmpV4Hash_.reset();
ecmpV6Hash_.reset();
}
void SaiSwitchManager::programLoadBalancerParams(
cfg::LoadBalancerID /*id*/,
std::optional<sai_uint32_t> seed,
std::optional<cfg::HashingAlgorithm> algo) {
auto hashSeed = seed ? seed.value() : 0;
auto hashAlgo = algo ? toSaiHashAlgo(algo.value()) : SAI_HASH_ALGORITHM_CRC;
switch_->setOptionalAttribute(
SaiSwitchTraits::Attributes::EcmpDefaultHashSeed{hashSeed});
switch_->setOptionalAttribute(
SaiSwitchTraits::Attributes::EcmpDefaultHashAlgorithm{hashAlgo});
}
void SaiSwitchManager::addOrUpdateLoadBalancer(
const std::shared_ptr<LoadBalancer>& newLb) {
if (newLb->getID() == cfg::LoadBalancerID::AGGREGATE_PORT) {
throw FbossError("Hash configuration for aggregate ports is not supported");
}
programLoadBalancerParams(
newLb->getID(), newLb->getSeed(), newLb->getAlgorithm());
if (newLb->getIPv4Fields().size()) {
// v4 ECMP
cfg::Fields v4EcmpHashFields;
v4EcmpHashFields.ipv4Fields.insert(
newLb->getIPv4Fields().begin(), newLb->getIPv4Fields().end());
v4EcmpHashFields.transportFields.insert(
newLb->getTransportFields().begin(), newLb->getTransportFields().end());
ecmpV4Hash_ = managerTable_->hashManager().getOrCreate(v4EcmpHashFields);
// Set the new ecmp v4 hash attribute on switch obj
switch_->setOptionalAttribute(
SaiSwitchTraits::Attributes::EcmpHashV4{ecmpV4Hash_->adapterKey()});
}
if (newLb->getIPv6Fields().size()) {
// v6 ECMP
cfg::Fields v6EcmpHashFields;
v6EcmpHashFields.ipv6Fields.insert(
newLb->getIPv6Fields().begin(), newLb->getIPv6Fields().end());
v6EcmpHashFields.transportFields.insert(
newLb->getTransportFields().begin(), newLb->getTransportFields().end());
ecmpV6Hash_ = managerTable_->hashManager().getOrCreate(v6EcmpHashFields);
// Set the new ecmp v6 hash attribute on switch obj
switch_->setOptionalAttribute(
SaiSwitchTraits::Attributes::EcmpHashV6{ecmpV6Hash_->adapterKey()});
}
}
void SaiSwitchManager::removeLoadBalancer(
const std::shared_ptr<LoadBalancer>& oldLb) {
if (oldLb->getID() == cfg::LoadBalancerID::AGGREGATE_PORT) {
throw FbossError("Hash configuration for Agg ports is not supported");
}
programLoadBalancerParams(oldLb->getID(), std::nullopt, std::nullopt);
ecmpV4Hash_.reset();
ecmpV6Hash_.reset();
}
void SaiSwitchManager::processLoadBalancerDelta(const StateDelta& delta) {
DeltaFunctions::forEachChanged(
delta.getLoadBalancersDelta(),
[this](
const std::shared_ptr<LoadBalancer>& /*oldLb*/,
const std::shared_ptr<LoadBalancer>& newLb) {
addOrUpdateLoadBalancer(newLb);
},
[this](const std::shared_ptr<LoadBalancer>& add) {
addOrUpdateLoadBalancer(add);
},
[this](const std::shared_ptr<LoadBalancer>& remove) {
removeLoadBalancer(remove);
});
}
void SaiSwitchManager::gracefulExit() {
// On graceful exit we trigger the warm boot path on
// ASIC by destroying the switch (and thus calling the
// remove switch function
// https://github.com/opencomputeproject/SAI/blob/master/inc/saiswitch.h#L2514
// Other objects are left intact to preserve data plane
// forwarding during warm boot
switch_.reset();
}
} // namespace facebook::fboss
|
<gh_stars>0
#include "StringHelper.h"
#include <string>
namespace GG
{
namespace StringHelper
{
void RemovePath( const std::string & in, std::string & out )
{
out = in;
const size_t lastSlashIndex = out.find_last_of( "\\/" );
if( std::string::npos != lastSlashIndex )
{
out.erase( 0, lastSlashIndex + 1 );
}
}
}
}
|
<reponame>72gm/weather-widget
export interface IMain {
temp: number;
humidity: number;
} |
<reponame>JuliaComputing/arrayfire
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <vector>
#include <cassert>
#include <af/array.h>
#include <af/data.h>
#include <af/index.h>
#include <af/seq.h>
#include <ArrayInfo.hpp>
#include <err_common.hpp>
#include <handle.hpp>
#include <backend.hpp>
#include <Array.hpp>
#include <lookup.hpp>
using namespace detail;
using std::vector;
using std::swap;
template<typename T>
static af_array flipArray(const af_array in, const unsigned dim)
{
const Array<T> &input = getArray<T>(in);
vector<af_seq> index(4);
for (int i = 0; i < 4; i++) {
index[i] = af_span;
}
// Reverse "dim"
dim4 in_dims = input.dims();
af_seq s = {(double)(in_dims[dim] - 1), 0, -1};
index[dim] = s;
Array<T> dst = createSubArray(input, index);
return getHandle(dst);
}
af_err af_flip(af_array *result, const af_array in, const unsigned dim)
{
af_array out;
try {
ArrayInfo in_info = getInfo(in);
if (in_info.ndims() <= dim) {
*result = retain(in);
return AF_SUCCESS;
}
af_dtype in_type = in_info.getType();
switch(in_type) {
case f32: out = flipArray<float> (in, dim); break;
case c32: out = flipArray<cfloat> (in, dim); break;
case f64: out = flipArray<double> (in, dim); break;
case c64: out = flipArray<cdouble> (in, dim); break;
case b8: out = flipArray<char> (in, dim); break;
case s32: out = flipArray<int> (in, dim); break;
case u32: out = flipArray<unsigned>(in, dim); break;
case u8: out = flipArray<uchar> (in, dim); break;
default: TYPE_ERROR(1, in_type);
}
}
CATCHALL
swap(*result, out);
return AF_SUCCESS;
}
|
pub mod vm;
pub mod instruction;
pub use self::vm::VM;
pub use self::instruction::{
Instruction,
Opcode,
}; |
A source with knowledge of the situation confirmed with Behind the Steel Curtain Wednesday the Steelers have strong interest in free agent cornerback Brandon Flowers, and a visit with team officials in the near future may be the next step.
The source said the team is trying to get Flowers out to Pittsburgh, but he is scheduled to meet with the Chargers first.
CBS Sports' Jason La Canfora also reported the team's interest, and confirmed Flowers will be in San Diego over the next two days.
Free agent CB Brandon Flowers not negotiating w/teams yet. Will spend the next 2 days in SD. BAL, PIT, MIN among those monitoring closely — Jason La Canfora (@JasonLaCanfora) June 18, 2014
The Steelers began the offseason workout program after having passed on a cornerback in the first four rounds of the draft. They selected Arizona's Shaquille Richardson in the fifth round, and flirted a bit with some free agent cornerbacks. Their pursuit of the recently released Flowers raises some eyebrows, considering the expense Flowers may bring. At the same time, considering how late it is in the offseason - training camp starts in roughly six weeks - Flowers may not be in the best position to ask for a large salary.
BTSC had learned immediately following his release the team would watch the situation but were not expected to offer a contract in the immediate future. This is to say, simply, the team is interested in Flowers but at the right price. |
/**
* When you use this function, any sound played will bypass the player's
* volume setting, so make sure to only use this for like warnings or stuff like that.
*/
public void playLoudSound(String sound, double pitch) {
playingSound = true;
Minecraft.getMinecraft().thePlayer.playSound(sound, 1, (float) pitch);
playingSound = false;
} |
<reponame>vmagamedov/metricslog
from decimal import Decimal as Dec
from datetime import datetime
import pytest
from ..types import Integer, Float, Decimal, String, Timestamp
from ..types import Map, Record, Field
@pytest.mark.parametrize('metric_cls, value', [
(Integer, 5),
(Float, 5.1),
(Decimal, Dec('5.1')),
(String, 'deaness'),
(Timestamp, datetime.now()),
])
def test_set(metric_cls, value):
metric = metric_cls()
assert metric.dirty is False
metric.set(value)
assert metric.dirty is True
assert metric.__value__ == value
metric.clear()
assert metric.dirty is False
@pytest.mark.parametrize('metric_cls, first, second, sum_', [
(Integer, 5, 3, 8),
(Float, 5.1, 3.2, pytest.approx(8.3)),
(Decimal, Dec('5.1'), Dec('3.2'), Dec('8.3')),
])
def test_acc(metric_cls, first, second, sum_):
metric = metric_cls()
assert metric.dirty is False
assert metric.flush is False
metric.inc(first)
assert metric.dirty is True
assert metric.flush is True
assert metric.__value__ == first
metric.inc(second)
assert metric.__value__ == sum_
metric.clear()
assert metric.dirty is False
assert metric.flush is False
def test_map():
metric_type = Map[str, Integer]
metric = metric_type()
assert not metric.__items__
assert metric['scheldt'].dirty is False
assert metric.__items__
metric['scheldt'].inc(1)
assert metric['scheldt'].dirty is True
metric.clear()
assert not metric.__items__
def test_record():
class Sickly(Record):
chevaux = Field('charqui', Integer)
metric = Sickly()
assert metric.__fields__ == {'charqui': metric.chevaux}
assert metric.chevaux.dirty is False
metric.chevaux.inc(1)
assert metric.chevaux.dirty is True
metric.clear()
assert metric.chevaux.dirty is False
|
Assessment of dental caries with Digital Imaging Fiber-Optic TransIllumination (DIFOTI): in vitro study.
This paper describes Digital Imaging Fiber-Optic TransIllumination (DIFOTI-TM), a new method for the reliable detection of dental caries. Images of teeth obtained through visible-light, fiber-optic transillumination (FOTI) are acquired with a digital CCD camera, and sent to a computer for analysis with dedicated algorithms. The algorithms were developed to facilitate the location and diagnosis of carious lesions by the operator in real time, and provide quantitative characterization for monitoring of the lesions. The DIFOTI method has been tested by imaging teeth in vitro. The results suggest the superior sensitivity of DIFOTI for detection of approximal, occlusal and smooth-surface caries vis-à-vis radiological imaging. |
<filename>ppedit/sexp.c
/*
ppedit - A pattern plate editor for Spiro splines.
Copyright (C) 2007 <NAME>
Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
<LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
option. This file may not be copied, modified, or distributed
except according to those terms.
*/
#include <stdio.h>
#include <math.h>
#include "sexp.h"
/* This is handcoded to avoid locale problems. */
static int
parse_double(sexp_reader *sr)
{
double sign = 1.0, val = 0.0;
int i;
int numstart;
const char * const b = sr->tokbuf;
int is_double = 1;
i = 0;
if (b[i] == '+') {
i++;
} else if (b[i] == '-') {
sign = -1.0;
i++;
}
numstart = i;
while (b[i] >= '0' && b[i] <= '9')
val = val * 10.0 + b[i++] - '0';
if (b[i] == '.') {
double frac = 1.0;
for (i++; b[i] >= '0' && b[i] <= '9'; i++) {
frac *= 0.1;
val += (b[i] - '0') * frac;
}
/* A '.' without any digits on either side isn't valid. */
if (i == numstart + 1)
is_double = 0;
}
if (b[i] == 'e' || b[i] == 'E') {
int expsign = 1, exp = 0;
int expstart;
if (b[i] == '+') {
i++;
} else if (b[i] == '-') {
expsign = -1;
i++;
}
expstart = i;
while (b[i] >= '0' && b[i] <= '9')
exp = exp * 10 + b[i++] - '0';
if (i == expstart)
is_double = 0;
val *= pow(10.0, expsign * exp);
}
val *= sign;
sr->d = val;
if (b[i] != 0) is_double = 0;
sr->is_double = is_double;
return is_double;
}
/* Return values: 0 = EOF, 1 = token but not double, 2 = valid double */
int
sexp_token(sexp_reader *sr)
{
int c;
int i;
sr->singlechar = -1;
if (sr->f == NULL)
return 0;
for (;;) {
c = getc(sr->f);
if (c == EOF) {
sr->f = NULL;
return 0;
} else if (c == '#') {
do {
c = getc(sr->f);
} while (c != EOF && c != '\r' && c != '\n');
} else if (c != ' ' && c != '\r' && c != '\n' && c != '\t')
break;
}
sr->tokbuf[0] = c;
i = 1;
if (c != '(' && c != ')') {
for (;;) {
c = getc(sr->f);
if (c == EOF) {
sr->f = NULL;
break;
} else if (c == ' ' || c == '\r' || c == '\n' || c == '\t') {
break;
} else if (c == '(' || c == ')' || c == '#') {
ungetc(c, sr->f);
break;
} else if (i < sizeof(sr->tokbuf) - 1)
sr->tokbuf[i++] = c;
}
}
sr->tokbuf[i] = 0;
if (i == 1)
sr->singlechar = sr->tokbuf[0];
return 1 + parse_double(sr);
}
|
Corey Brown, left, shot and killed Nicoleia Taylor Tuesday morning, cops said. View Full Caption DNAinfo/Janet Upadhye & NYPD
CLINTON HILL — A woman was shot and killed near the Pratt Institute Tuesday after she was caught in crossfire between two groups of shooters, cops and witnesses said.
Nicoleia Taylor, 24, and another victim, were shot in the courtyard of Lafayette Gardens Houses at Franklin and DeKalb avenues about 11:30 a.m., police said.
Both victims were rushed to Brooklyn Hospital, where Taylor was pronounced dead from a gunshot wound to the chest, police said. The condition of the second victim was not immediately available.
The shooter, 21-year-old Corey Brown, fled the scene, cops said. He has five prior arrests including menacing with a weapon, criminal tresspassing, two robberies and reckless endangerment, officials said.
An aunt said Taylor was a nurse and mother of two who worked with disabled children.
"She was a very good girl," said the tear-stricken aunt, Gayle Thomas. "She had nothing to do with the shooting."
Robert Page, 51, who lives in the complex, said Taylor was caught in a hail of bullets coming from two groups, one shooting from the DeKalb Avenue side of the courtyard, and the other shooting from the Franklin Avenue side.
Other witnesses said they heard between four and five shots.
Cops concurred that Taylor was not the intended target, but could not confirm that there were two groups of shooters. |
/**
* Overridden to update our cached dialog's appearance also.
*/
@Override
protected void lookAndFeelChanged(LookAndFeel laf) {
super.lookAndFeelChanged(laf);
if (scriptSearchDialog!=null) {
SwingUtilities.updateComponentTreeUI(scriptSearchDialog);
}
} |
// JobTerminalConditionDomainMappingReasonEnumToProto converts a JobTerminalConditionDomainMappingReasonEnum enum to its proto representation.
func RunAlphaJobTerminalConditionDomainMappingReasonEnumToProto(e *alpha.JobTerminalConditionDomainMappingReasonEnum) alphapb.RunAlphaJobTerminalConditionDomainMappingReasonEnum {
if e == nil {
return alphapb.RunAlphaJobTerminalConditionDomainMappingReasonEnum(0)
}
if v, ok := alphapb.RunAlphaJobTerminalConditionDomainMappingReasonEnum_value["JobTerminalConditionDomainMappingReasonEnum"+string(*e)]; ok {
return alphapb.RunAlphaJobTerminalConditionDomainMappingReasonEnum(v)
}
return alphapb.RunAlphaJobTerminalConditionDomainMappingReasonEnum(0)
} |
def change_figure_number(self, current_num, new_num):
if self._is_new_number_valid(new_num):
self._figures[new_num] = self._figures.pop(current_num)
else:
raise ValueError(f"Figure {new_num} is already in use!") |
from __future__ import division
from __future__ import print_function
import math
import mxnet as mx
import mxnext as X
from symbol.builder import FasterRcnn, Bbox2fcHead
class CascadeRcnn(object):
def __init__(self):
pass
@staticmethod
def get_train_symbol(backbone, neck, rpn_head, roi_extractor, bbox_head, \
bbox_head_2nd, bbox_head_3rd):
gt_bbox = X.var("gt_bbox")
im_info = X.var("im_info")
rpn_feat = backbone.get_rpn_feature()
rcnn_feat = backbone.get_rcnn_feature()
rpn_feat = neck.get_rpn_feature(rpn_feat)
rcnn_feat = neck.get_rcnn_feature(rcnn_feat)
rpn_head.get_anchor()
rpn_loss = rpn_head.get_loss(rpn_feat, gt_bbox, im_info)
# stage1
proposal, bbox_cls, bbox_target, bbox_weight = \
rpn_head.get_sampled_proposal(
rpn_feat,
gt_bbox,
im_info
)
roi_feat = roi_extractor.get_roi_feature(rcnn_feat, proposal)
bbox_loss = bbox_head.get_loss(
roi_feat,
bbox_cls,
bbox_target,
bbox_weight
)
bbox_pred = bbox_head._bbox_delta
# stage2
# though call get_sampled_proposal, bbox_head does not sample rois
proposal_2nd, bbox_cls_2nd, bbox_target_2nd, bbox_weight_2nd = \
bbox_head.get_sampled_proposal(
proposal,
bbox_pred,
gt_bbox,
im_info
)
roi_feat_2nd = roi_extractor.get_roi_feature(rcnn_feat, proposal_2nd)
bbox_loss_2nd = bbox_head_2nd.get_loss(
roi_feat_2nd,
bbox_cls_2nd,
bbox_target_2nd,
bbox_weight_2nd
)
bbox_pred_2nd = bbox_head_2nd._bbox_delta
# stage3
# though call get_sampled_proposal, bbox_head does not sample rois
proposal_3rd, bbox_cls_3rd, bbox_target_3rd, bbox_weight_3rd = \
bbox_head_2nd.get_sampled_proposal(
proposal_2nd,
bbox_pred_2nd,
gt_bbox,
im_info
)
roi_feat_3rd = roi_extractor.get_roi_feature(rcnn_feat, proposal_3rd)
bbox_loss_3rd = bbox_head_3rd.get_loss(
roi_feat_3rd,
bbox_cls_3rd,
bbox_target_3rd,
bbox_weight_3rd
)
return X.group(rpn_loss + bbox_loss + bbox_loss_2nd + bbox_loss_3rd)
@staticmethod
def get_test_symbol(backbone, neck, rpn_head, roi_extractor, bbox_head, \
bbox_head_2nd, bbox_head_3rd):
rec_id, im_id, im_info, proposal, proposal_score = \
CascadeRcnn.get_rpn_test_symbol(backbone, neck, rpn_head)
rcnn_feat = backbone.get_rcnn_feature()
rcnn_feat = neck.get_rcnn_feature(rcnn_feat)
# stage1
roi_feat = roi_extractor.get_roi_feature(rcnn_feat, proposal)
_, bbox_xyxy = bbox_head.get_prediction(
roi_feat,
im_info,
proposal
)
# stage2
proposal_2nd = bbox_xyxy
roi_feat_2nd = roi_extractor.get_roi_feature(rcnn_feat, proposal_2nd)
_, bbox_xyxy_2nd = bbox_head_2nd.get_prediction(
roi_feat_2nd,
im_info,
proposal_2nd
)
# stage3
proposal_3rd = bbox_xyxy_2nd
roi_feat_3rd = roi_extractor.get_roi_feature(rcnn_feat, proposal_3rd)
cls_score_3rd, bbox_xyxy_3rd = bbox_head_3rd.get_prediction(
roi_feat_3rd,
im_info,
proposal_3rd
)
# passing feature from stage3 through stage1's weight
bbox_head.stage = "1st_3rd"
cls_score_1st_3rd, _ = bbox_head.get_prediction(
roi_feat_3rd,
im_info
)
# passing feature from stage3 through stage2's weight
bbox_head_2nd.stage = "2nd_3rd"
cls_score_2nd_3rd, _ = bbox_head_2nd.get_prediction(
roi_feat_3rd,
im_info
)
# average score between [1st_3rd, 2nd_3rd, 3rd]
cls_score_avg = mx.sym.add_n(cls_score_1st_3rd, cls_score_2nd_3rd, cls_score_3rd) / 3
return X.group([rec_id, im_id, im_info, cls_score_avg, bbox_xyxy_3rd])
@staticmethod
def get_rpn_test_symbol(backbone, neck, rpn_head):
return FasterRcnn.get_rpn_test_symbol(backbone, neck, rpn_head)
@staticmethod
def get_refined_proposal(backbone, neck, rpn_head, roi_extractor, bbox_head, bbox_head_2nd, bbox_head_3rd, stage):
rec_id, im_id, im_info, proposal, proposal_score = CascadeRcnn.get_rpn_test_symbol(backbone, neck, rpn_head)
rcnn_feat = backbone.get_rcnn_feature()
rcnn_feat = neck.get_rcnn_feature(rcnn_feat)
# stage1
roi_feat = roi_extractor.get_roi_feature(rcnn_feat, proposal)
_, bbox_xyxy = bbox_head.get_prediction(roi_feat, im_info, proposal)
# stage2
proposal_2nd = bbox_xyxy
roi_feat_2nd = roi_extractor.get_roi_feature(rcnn_feat, proposal_2nd)
_, bbox_xyxy_2nd = bbox_head_2nd.get_prediction(roi_feat_2nd, im_info, proposal_2nd)
# stage3
proposal_3rd = bbox_xyxy_2nd
roi_feat_3rd = roi_extractor.get_roi_feature(rcnn_feat, proposal_3rd)
cls_score_3rd, bbox_xyxy_3rd = bbox_head_3rd.get_prediction(roi_feat_3rd, im_info, proposal_3rd)
# AR does not need score, just pass a dummy one
if stage == 2:
return X.group([rec_id, im_id, im_info, proposal_2nd, cls_score_3rd])
elif stage == 3:
return X.group([rec_id, im_id, im_info, proposal_3rd, cls_score_3rd])
"""
1. rename symbol via stage
2. (decode_bbox -> proposal_target) rather than (proposal -> proposal_target)
3. add bias for getting bbox head logit
"""
class CascadeBbox2fcHead(Bbox2fcHead):
def __init__(self, pBbox):
super().__init__(pBbox)
self.stage = pBbox.stage
self._cls_logit = None
self._bbox_delta = None
self._proposal = None
# declare weight and bias
stage = self.stage
xavier_init = mx.init.Xavier(factor_type="in", rnd_type="uniform", magnitude=3)
self.fc1_weight = X.var("bbox_fc1_%s_weight" % stage, init=xavier_init)
self.fc2_weight = X.var("bbox_fc2_%s_weight" % stage, init=xavier_init)
self.cls_logit_weight = X.var("bbox_cls_logit_%s_weight" % stage, init=X.gauss(0.01))
self.bbox_delta_weight = X.var("bbox_reg_delta_%s_weight" % stage, init=X.gauss(0.001))
self.fc1_bias = X.var("bbox_fc1_%s_bias" % stage)
self.fc2_bias = X.var("bbox_fc2_%s_bias" % stage)
self.cls_logit_bias = X.var("bbox_cls_logit_%s_bias" % stage)
self.bbox_delta_bias = X.var("bbox_reg_delta_%s_bias" % stage)
def _get_bbox_head_logit(self, conv_feat):
# comment this for re-infer in test stage
# if self._head_feat is not None:
# return self._head_feat
p = self.p
stage = self.stage
flatten = X.flatten(conv_feat, name="bbox_feat_flatten_" + stage)
reshape = X.reshape(flatten, (0, 0, 1, 1), name="bbox_feat_reshape_" + stage)
if p.normalizer.__name__ == "fix_bn":
fc1 = X.convrelu(
reshape,
filter=1024,
weight=self.fc1_weight,
bias=self.fc1_bias,
no_bias=False,
name="bbox_fc1_" + stage
)
fc2 = X.convrelu(
fc1,
filter=1024,
weight=self.fc2_weight,
bias=self.fc2_bias,
no_bias=False,
name="bbox_fc2_" + stage
)
elif p.normalizer.__name__ in ["sync_bn", "gn"]:
fc1 = X.convnormrelu(
p.normalizer,
reshape,
filter=1024,
weight=self.fc1_weight,
bias=self.fc1_bias,
no_bias=False,
name="bbox_fc1_" + stage
)
fc2 = X.convnormrelu(
p.normalizer,
fc1,
filter=1024,
weight=self.fc2_weight,
bias=self.fc2_bias,
no_bias=False,
name="bbox_fc2_" + stage
)
else:
raise NotImplementedError("Unsupported normalizer: {}".format(p.normalizer.__name__))
self._head_feat = fc2
return self._head_feat
def get_output(self, conv_feat):
p = self.p
stage = self.stage
num_class = p.num_class
num_reg_class = 2 if p.regress_target.class_agnostic else num_class
head_feat = self._get_bbox_head_logit(conv_feat)
if not isinstance(head_feat, dict):
head_feat = dict(classification=head_feat, regression=head_feat)
if p.fp16:
head_feat["classification"] = X.to_fp32(head_feat["classification"], name="bbox_cls_head_to_fp32_" + stage)
head_feat["regression"] = X.to_fp32(head_feat["regression"], name="bbox_reg_head_to_fp32_" + stage)
cls_logit = X.fc(
head_feat["classification"],
filter=num_class,
weight=self.cls_logit_weight,
bias=self.cls_logit_bias,
name='bbox_cls_logit_' + stage
)
bbox_delta = X.fc(
head_feat["regression"],
filter=4 * num_reg_class,
weight=self.bbox_delta_weight,
bias=self.bbox_delta_bias,
name='bbox_reg_delta_' + stage
)
self._cls_logit = cls_logit
self._bbox_delta = bbox_delta
return cls_logit, bbox_delta
def get_prediction(self, conv_feat, im_info, proposal=None):
p = self.p
stage = self.stage
bbox_mean = p.regress_target.mean
bbox_std = p.regress_target.std
batch_image = p.batch_image
num_class = p.num_class
class_agnostic = p.regress_target.class_agnostic
num_reg_class = 2 if class_agnostic else num_class
cls_logit, bbox_delta = self.get_output(conv_feat)
if proposal is None:
bbox_xyxy = None
else:
bbox_delta = X.reshape(
bbox_delta,
shape=(batch_image, -1, 4 * num_reg_class),
name='bbox_delta_reshape_' + stage
)
bbox_xyxy = X.decode_bbox(
rois=proposal,
bbox_pred=bbox_delta,
im_info=im_info,
name='decode_bbox_' + stage,
bbox_mean=bbox_mean,
bbox_std=bbox_std,
class_agnostic=class_agnostic
)
cls_score = X.softmax(
cls_logit,
axis=-1,
name='bbox_cls_score_' + stage
)
cls_score = X.reshape(
cls_score,
shape=(batch_image, -1, num_class),
name='bbox_cls_score_reshape_' + stage
)
return cls_score, bbox_xyxy
def get_loss(self, conv_feat, cls_label, bbox_target, bbox_weight):
p = self.p
stage = self.stage
loss_weight = p.loss_weight
batch_roi = p.image_roi * p.batch_image
batch_image = p.batch_image
cls_logit, bbox_delta = self.get_output(conv_feat)
scale_loss_shift = 128.0 if p.fp16 else 1.0
# classification loss
cls_loss = X.softmax_output(
data=cls_logit,
label=cls_label,
normalization='batch',
grad_scale=loss_weight * scale_loss_shift,
name='bbox_cls_loss_' + stage
)
# bounding box regression
reg_loss = X.smooth_l1(
bbox_delta - bbox_target,
scalar=1.0,
name='bbox_reg_l1_' + stage
)
reg_loss = bbox_weight * reg_loss
reg_loss = X.loss(
reg_loss,
grad_scale=loss_weight / batch_roi * scale_loss_shift,
name='bbox_reg_loss_' + stage,
)
# append label
cls_label = X.reshape(
cls_label,
shape=(batch_image, -1),
name='bbox_label_reshape_' + stage
)
cls_label = X.block_grad(cls_label, name='bbox_label_blockgrad_' + stage)
# output
return cls_loss, reg_loss, cls_label
def get_all_proposal(self, rois, bbox_pred, im_info):
if self._proposal is not None:
return self._proposal
p = self.p
stage = self.stage
batch_image = p.batch_image
bbox_mean = p.regress_target.mean
bbox_std = p.regress_target.std
num_class = p.num_class
class_agnostic = p.regress_target.class_agnostic
num_reg_class = 2 if class_agnostic else num_class
bbox_pred = X.reshape(
bbox_pred,
shape=(batch_image, -1, 4 * num_reg_class),
name='bbox_delta_reshape_' + stage
)
proposal = X.decode_bbox(
rois=rois,
bbox_pred=bbox_pred,
im_info=im_info,
name='decode_bbox_' + stage,
bbox_mean=bbox_mean,
bbox_std=bbox_std,
class_agnostic=class_agnostic
)
# append None for dummy proposal score
proposal = (proposal, None)
self._proposal = proposal
return proposal
def get_sampled_proposal(self, rois, bbox_pred, gt_bbox, im_info):
p = self.p
stage = self.stage
batch_image = p.batch_image
proposal_wo_gt = p.subsample_proposal.proposal_wo_gt
image_roi = p.subsample_proposal.image_roi
fg_fraction = p.subsample_proposal.fg_fraction
fg_thr = p.subsample_proposal.fg_thr
bg_thr_hi = p.subsample_proposal.bg_thr_hi
bg_thr_lo = p.subsample_proposal.bg_thr_lo
num_reg_class = p.bbox_target.num_reg_class
class_agnostic = p.bbox_target.class_agnostic
bbox_target_weight = p.bbox_target.weight
bbox_target_mean = p.bbox_target.mean
bbox_target_std = p.bbox_target.std
(proposal, proposal_score) = self.get_all_proposal(rois, bbox_pred, im_info)
(bbox, label, bbox_target, bbox_weight) = X.proposal_target(
rois=proposal,
gt_boxes=gt_bbox,
num_classes=num_reg_class,
class_agnostic=class_agnostic,
batch_images=batch_image,
proposal_without_gt=proposal_wo_gt,
image_rois=image_roi,
fg_fraction=fg_fraction,
fg_thresh=fg_thr,
bg_thresh_hi=bg_thr_hi,
bg_thresh_lo=bg_thr_lo,
bbox_weight=bbox_target_weight,
bbox_mean=bbox_target_mean,
bbox_std=bbox_target_std,
name="subsample_proposal_" + stage
)
label = X.reshape(label, (-3, -2))
bbox_target = X.reshape(bbox_target, (-3, -2))
bbox_weight = X.reshape(bbox_weight, (-3, -2))
return bbox, label, bbox_target, bbox_weight
|
package nbntest
import (
"github.com/BurntSushi/toml"
gct "github.com/freman/go-commontypes"
)
type SpeedtestConfiguration struct {
Interval gct.Duration `toml:"interval"`
Site int `toml:"site"`
Fastest int `toml:"fastest"`
}
type ModemConfiguration struct {
Interval gct.Duration `toml:"interval"`
Interface string `toml:"type"`
Configuration map[string]toml.Primitive `toml:"config"`
}
type Configuration struct {
md toml.MetaData `toml:"-"`
Modem *ModemConfiguration `toml:"modem"`
SpeedTest *SpeedtestConfiguration `toml:"speedtest"`
Output map[string]toml.Primitive `toml:"output"`
}
func LoadConfiguration(file string) (*Configuration, error) {
var err error
config := Configuration{}
config.md, err = toml.DecodeFile(file, &config)
return &config, err
}
func (c *Configuration) UnifyModemConfiguration(name string, v interface{}) (err error) {
if c.md.IsDefined("modem", "config", name) {
err = c.md.PrimitiveDecode(c.Modem.Configuration[name], v)
}
return
}
func (c *Configuration) UnifyOutputConfiguration(name string, v interface{}) (err error) {
if c.md.IsDefined("output", name) {
err = c.md.PrimitiveDecode(c.Output[name], v)
}
return
}
|
/**
* A Material Design button.
*
* @see <a href="https://www.google.com/design/spec/components/buttons.html">Buttons (Google design guidelines)</a>
*/
public class MaterialButton extends JButton {
private RippleEffect ripple;
private ElevationEffect elevation;
private Type type = Type.DEFAULT;
private boolean isMousePressed = false;
private boolean isMouseOver = false;
private Color rippleColor = Color.WHITE;
private Cursor cursor = super.getCursor();
private int borderRadius = 2;
/**
* Creates a new button.
*/
public MaterialButton() {
ripple = RippleEffect.applyTo(this);
elevation = ElevationEffect.applyTo(this, 0);
setOpaque(false);
addMouseListener(new MouseAdapter() {
@Override
public void mousePressed(MouseEvent mouseEvent) {
isMousePressed = true;
}
@Override
public void mouseReleased(MouseEvent mouseEvent) {
isMousePressed = false;
repaint();
}
@Override
public void mouseEntered(MouseEvent e) {
isMouseOver = true;
repaint();
}
@Override
public void mouseExited(MouseEvent e) {
isMouseOver = false;
repaint();
}
});
setFont(Roboto.MEDIUM.deriveFont(14f));
setUI(new BasicButtonUI() {
@Override
public boolean contains(JComponent c, int x, int y) {
return x > MaterialShadow.OFFSET_LEFT && y > MaterialShadow.OFFSET_TOP
&& x < getWidth() - MaterialShadow.OFFSET_RIGHT && y < getHeight() - MaterialShadow.OFFSET_BOTTOM;
}
});
}
/**
* Gets the type of this button.
*
* @return the type of this button
* @see Type
*/
public Type getType() {
return type;
}
/**
* Sets the type of this button.
*
* @param type the type of this button
* @see Type
*/
public void setType(Type type) {
this.type = type;
repaint();
}
/**
* Sets the background color of this button.
* <p>
* Keep on mind that setting a background color in a Material component like
* this will also set the foreground color to either white or black and the
* ripple color to a brighter or darker shade of the color, depending of how
* bright or dark is the chosen background color. If you want to use a
* custom foreground color and ripple color, ensure the background color has
* been set first.
* <p>
* <b>NOTE:</b> It is up to the look and feel to honor this property, some
* may choose to ignore it. To avoid any conflicts, using the
* <a href="https://docs.oracle.com/javase/7/docs/api/javax/swing/plaf/metal/package-summary.html">
* Metal Look and Feel</a> is recommended.
*/
@Override
public void setBackground(Color bg) {
super.setBackground(bg);
setForeground(Utils.isDark(bg) ? MaterialColor.WHITE:MaterialColor.BLACK);
setRippleColor(Utils.isDark(bg) ? MaterialColor.WHITE:Utils.darken(Utils.darken(bg)));
}
/**
* Gets the ripple color.
* @return the ripple color
*/
public Color getRippleColor() {
return rippleColor;
}
/**
* Sets the ripple color. You should only do this for flat buttons.
* @param rippleColor the ripple color
*/
public void setRippleColor(Color rippleColor) {
this.rippleColor = rippleColor;
}
/**
* Gets the current border radius of this button.
* @return the current border radius of this button, in pixels.
*/
public int getBorderRadius() {
return borderRadius;
}
/**
* Sets the border radius of this button. You can define a custom radius in
* order to get some rounded corners in your button, making it look like a
* pill or even a circular action button.
* @param borderRadius the new border radius of this button, in pixels.
*/
public void setBorderRadius(int borderRadius) {
this.borderRadius = borderRadius;
elevation.setBorderRadius(borderRadius);
}
@Override
public void setEnabled(boolean b) {
super.setEnabled(b);
elevation.setLevel(getElevation());
super.setCursor(b ? cursor : Cursor.getPredefinedCursor(Cursor.DEFAULT_CURSOR));
}
@Override
public void setCursor(Cursor cursor) {
super.setCursor(cursor);
this.cursor = cursor;
}
@Override
protected void processFocusEvent(FocusEvent focusEvent) {
super.processFocusEvent(focusEvent);
elevation.setLevel(getElevation());
}
@Override
protected void processMouseEvent(MouseEvent mouseEvent) {
super.processMouseEvent(mouseEvent);
elevation.setLevel(getElevation());
}
private int getElevation() {
if (isMousePressed) {
return 2;
} else if (type == Type.RAISED || isFocusOwner() || isMouseOver) {
return 1;
} else {
return 0;
}
}
@Override
protected void paintComponent(Graphics g) {
Graphics2D g2 = (Graphics2D) g;
g2.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
g2.setRenderingHint(RenderingHints.KEY_TEXT_ANTIALIASING, RenderingHints.VALUE_TEXT_ANTIALIAS_ON);
if (type != Type.FLAT && isEnabled()) {
elevation.paint(g);
}
g2.translate(MaterialShadow.OFFSET_LEFT, MaterialShadow.OFFSET_TOP);
final int offset_lr = MaterialShadow.OFFSET_LEFT + MaterialShadow.OFFSET_RIGHT;
final int offset_td = MaterialShadow.OFFSET_TOP + MaterialShadow.OFFSET_BOTTOM;
if (isEnabled()) {
g2.setColor(getBackground());
g2.fill(new RoundRectangle2D.Float(0, 0, getWidth() - offset_lr, getHeight() - offset_td, borderRadius*2, borderRadius*2));
g2.setColor(new Color(rippleColor.getRed() / 255f, rippleColor.getBlue() / 255f, rippleColor.getBlue() / 255f, 0.12f));
if ((type == Type.FLAT && isMouseOver) || isFocusOwner()) {
g2.fill(new RoundRectangle2D.Float(0, 0, getWidth() - offset_lr, getHeight() - offset_td, borderRadius*2, borderRadius*2));
}
} else {
Color bg = getBackground();
g2.setColor(new Color(bg.getRed() / 255f, bg.getGreen() / 255f, bg.getBlue() / 255f, 0.6f));
g2.fill(new RoundRectangle2D.Float(0, 0, getWidth() - offset_lr, getHeight() - offset_td, borderRadius*2, borderRadius*2));
}
FontMetrics metrics = g.getFontMetrics(getFont());
int x = (getWidth() - offset_lr - metrics.stringWidth(getText().toUpperCase())) / 2;
int y = (getHeight() - offset_td - metrics.getHeight()) / 2 + metrics.getAscent();
g2.setFont(getFont());
if (isEnabled()) {
g2.setColor(getForeground());
} else {
Color fg = getForeground();
g2.setColor(new Color(fg.getRed() / 255f, fg.getGreen() / 255f, fg.getBlue() / 255f, 0.6f));
}
g2.drawString(getText().toUpperCase(), x, y);
if (isEnabled()) {
g2.setClip(new RoundRectangle2D.Float(0, 0, getWidth() - offset_lr, getHeight() - offset_td, Math.max(borderRadius*2 - 4, 0), Math.max(borderRadius*2 - 4, 0)));
g2.setColor(rippleColor);
ripple.paint(g2);
}
}
@Override
protected void paintBorder(Graphics g) {
//intentionally left blank
}
/**
* Button types.
*/
public enum Type {
/** A default button. */
DEFAULT,
/**
* A raised button. Raised buttons have a shadow even if they are not
* focused.
*/
RAISED,
/**
* A flat button. Flat buttons don't have shadows and are typically
* transparent.
*/
FLAT
}
} |
<reponame>linminglu/Fgame<filename>game/ring/handler/fuse.go
package handler
import (
"fgame/fgame/common/codec"
uipb "fgame/fgame/common/codec/pb/ui"
"fgame/fgame/common/dispatch"
"fgame/fgame/common/lang"
commonlog "fgame/fgame/common/log"
"fgame/fgame/core/session"
coreutils "fgame/fgame/core/utils"
chatlogic "fgame/fgame/game/chat/logic"
chattypes "fgame/fgame/game/chat/types"
"fgame/fgame/game/common/common"
funcopentypes "fgame/fgame/game/funcopen/types"
"fgame/fgame/game/inventory/inventory"
inventorylogic "fgame/fgame/game/inventory/logic"
playerinventory "fgame/fgame/game/inventory/player"
inventorytypes "fgame/fgame/game/inventory/types"
"fgame/fgame/game/item/item"
itemtypes "fgame/fgame/game/item/types"
noticelogic "fgame/fgame/game/notice/logic"
"fgame/fgame/game/player"
playerlogic "fgame/fgame/game/player/logic"
playertypes "fgame/fgame/game/player/types"
"fgame/fgame/game/processor"
propertylogic "fgame/fgame/game/property/logic"
playerproperty "fgame/fgame/game/property/player"
ringlogic "fgame/fgame/game/ring/logic"
"fgame/fgame/game/ring/pbutil"
playerring "fgame/fgame/game/ring/player"
ringtemplate "fgame/fgame/game/ring/template"
ringtypes "fgame/fgame/game/ring/types"
gamesession "fgame/fgame/game/session"
skilltemplate "fgame/fgame/game/skill/template"
"fgame/fgame/pkg/mathutils"
"fmt"
log "github.com/Sirupsen/logrus"
)
func init() {
processor.Register(codec.MessageType(uipb.MessageType_CS_RING_FUSE_TYPE), dispatch.HandlerFunc(handleRingFuse))
}
func handleRingFuse(s session.Session, msg interface{}) (err error) {
log.Debug("ring: 开始处理特戒融合请求消息")
gcs := gamesession.SessionInContext(s.Context())
pl := gcs.Player()
tpl := pl.(player.Player)
csRingFuse := msg.(*uipb.CSRingFuse)
isBag := csRingFuse.GetIsBag()
typ := ringtypes.RingType(0)
index := int32(0)
if isBag {
index = csRingFuse.GetIndex()
} else {
typ := ringtypes.RingType(csRingFuse.GetType())
if !typ.Valid() {
log.WithFields(
log.Fields{
"playerId": tpl.GetId(),
"type": int32(typ),
}).Warn("ring: 特戒类型不合法")
return
}
}
needIndex := csRingFuse.GetNeedIndex()
err = ringFuse(tpl, typ, isBag, index, needIndex)
if err != nil {
log.WithFields(
log.Fields{
"playerId": tpl.GetId(),
"err": err,
}).Error("ring: 处理特戒融合请求消息,错误")
return
}
log.WithFields(
log.Fields{
"playerId": tpl.GetId(),
}).Debug("ring: 处理特戒融合请求消息,成功")
return
}
func ringFuse(pl player.Player, typ ringtypes.RingType, isBag bool, index int32, needIndex int32) (err error) {
if !pl.IsFuncOpen(funcopentypes.FuncOpenTypeRingFuse) {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
}).Warn("ring: 功能未开启")
playerlogic.SendSystemMessage(pl, lang.CommonFuncNoOpen)
return
}
inventoryManager := pl.GetPlayerDataManager(playertypes.PlayerInventoryDataManagerType).(*playerinventory.PlayerInventoryDataManager)
propertyManager := pl.GetPlayerDataManager(playertypes.PlayerPropertyDataManagerType).(*playerproperty.PlayerPropertyDataManager)
ringManager := pl.GetPlayerDataManager(playertypes.PlayerRingDataManagerType).(*playerring.PlayerRingDataManager)
itemId := int32(0)
var itemType itemtypes.ItemType
var propertyData inventorytypes.ItemPropertyData
// 判断第一个槽里的特戒是否在背包
if isBag {
it := inventoryManager.FindItemByIndex(inventorytypes.BagTypePrim, index)
//物品不存在
if it == nil || it.IsEmpty() {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
"index": index,
}).Warn("ring: 使用特戒,物品不存在")
playerlogic.SendSystemMessage(pl, lang.InventoryItemNoExist)
return
}
itemId = it.ItemId
propertyData = it.PropertyData
// 判断物品是否未特戒
itemTemplate := item.GetItemService().GetItem(int(itemId))
if !itemTemplate.IsTeRing() {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
"index": index,
}).Warn("ring: 使用特戒, 物品不是特戒")
playerlogic.SendSystemMessage(pl, lang.RingIsNotRing)
return
}
itemType = itemTemplate.GetItemType()
fuseTemp := ringtemplate.GetRingTemplateService().GetRingFuseSynthesisTemplate(itemId)
if fuseTemp == nil {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
"index": index,
}).Warn("ring: 使用特戒,物品不存在")
playerlogic.SendSystemMessage(pl, lang.RingTempalteNotExist)
return
}
createItemId := fuseTemp.ItemId
createItemNum := fuseTemp.ItemCount
// 判断背包是否足够
if !inventoryManager.HasEnoughSlot(createItemId, createItemNum) {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
"createItemId": createItemId,
"createItemNum": createItemNum,
}).Warn("ring: 背包不足")
playerlogic.SendSystemMessage(pl, lang.InventorySlotNoEnough)
return
}
ringTemp := ringtemplate.GetRingTemplateService().GetRingTemplate(itemId)
if ringTemp == nil {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
"itemId": itemId,
}).Warn("ring:模板不存在")
playerlogic.SendSystemMessage(pl, lang.RingTempalteNotExist)
return
}
typ = ringTemp.GetRingType()
} else {
ringObj := ringManager.GetPlayerRingObject(typ)
if ringObj == nil {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
"typ": typ.String(),
}).Warn("ring: 玩家未穿戴该特戒")
playerlogic.SendSystemMessage(pl, lang.RingNotEquip)
return
}
itemId = ringObj.GetItemId()
}
needIt := inventoryManager.FindItemByIndex(inventorytypes.BagTypePrim, needIndex)
// 第二槽位的物品不存在
if needIt == nil || needIt.IsEmpty() {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
"index": index,
}).Warn("ring:使用特戒,物品不存在")
playerlogic.SendSystemMessage(pl, lang.InventoryItemNoExist)
return
}
needItemId := needIt.ItemId
// 判断第二槽位的物品是否为特戒
needItemTemplate := item.GetItemService().GetItem(int(needItemId))
if !needItemTemplate.IsTeRing() {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
"needIndex": needIndex,
"needItemId": needItemId,
}).Warn("ring: 使用特戒, 物品不是特戒")
playerlogic.SendSystemMessage(pl, lang.RingIsNotRing)
return
}
// 融合模板
fuseTemp := ringtemplate.GetRingTemplateService().GetRingFuseSynthesisTemplate(itemId)
if fuseTemp == nil {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
"index": index,
"itemId": itemId,
}).Warn("ring: 模板不存在")
playerlogic.SendSystemMessage(pl, lang.RingTempalteNotExist)
return
}
if needItemId != fuseTemp.NeedItemId2 {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
"typ": typ.String(),
"needIndex": needIndex,
"needItemId": needItemId,
"NeedItemId2": fuseTemp.NeedItemId2,
}).Warn("ring: 特戒融合需要物品与当前物品不符")
playerlogic.SendSystemMessage(pl, lang.RingFuseItemNotSuit)
return
}
// 消耗的钱
costGold := int64(fuseTemp.NeedGold)
costSilver := int64(fuseTemp.NeedSilver)
costBindGold := int64(fuseTemp.NeedBindGold)
// 是否足够银两
if costSilver != 0 {
flag := propertyManager.HasEnoughSilver(costSilver)
if !flag {
log.WithFields(log.Fields{
"playerId": pl.GetId(),
"typ": typ.String(),
}).Warn("ring: 银两不足,无法融合")
playerlogic.SendSystemMessage(pl, lang.PlayerSilverNoEnough)
return
}
}
// 是否足够元宝
if costGold != 0 {
flag := propertyManager.HasEnoughGold(costGold, false)
if !flag {
log.WithFields(log.Fields{
"playerId": pl.GetId(),
"typ": typ.String(),
}).Warn("ring:元宝不足,无法融合")
playerlogic.SendSystemMessage(pl, lang.PlayerGoldNoEnough)
return
}
}
// 是否足够绑元
needBindGold := costBindGold + costGold
if needBindGold != 0 {
flag := propertyManager.HasEnoughGold(needBindGold, true)
if !flag {
log.WithFields(log.Fields{
"playerId": pl.GetId(),
"typ": typ.String(),
}).Warn("ring:元宝不足,无法融合")
playerlogic.SendSystemMessage(pl, lang.PlayerGoldNoEnough)
return
}
}
// 判断物品是否足够
needItemNum := fuseTemp.NeedItemCount2
curNum := inventoryManager.NumOfItems(needItemId)
if curNum < needItemNum {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
"typ": typ.String(),
}).Warn("ring: 所需物品不足")
playerlogic.SendSystemMessage(pl, lang.InventoryItemNoEnough)
return
}
//消耗钱
goldUseReason := commonlog.GoldLogReasonRingFuse
goldUseReasonStr := fmt.Sprintf(goldUseReason.String(), typ.String())
silverUseReason := commonlog.SilverLogReasonRingFuse
silverUseReasonStr := fmt.Sprintf(silverUseReason.String(), typ.String())
flag := propertyManager.Cost(costBindGold, costGold, goldUseReason, goldUseReasonStr, costSilver, silverUseReason, silverUseReasonStr)
if !flag {
panic(fmt.Errorf("ring: 特戒融合消耗钱应该成功"))
}
//同步元宝
if costGold != 0 || costSilver != 0 || costBindGold != 0 {
propertylogic.SnapChangedProperty(pl)
}
if needItemNum > 0 {
reason := commonlog.InventoryLogReasonRingAdvance
reasonText := fmt.Sprintf(reason.String(), typ.String())
flag, err = inventoryManager.RemoveIndex(inventorytypes.BagTypePrim, needIndex, needItemNum, reason, reasonText)
if !flag {
panic("ring: 消耗物品应该成功")
}
if err != nil {
return
}
}
success := mathutils.RandomHit(common.MAX_RATE, int(fuseTemp.SuccessRate))
// 成功消耗第一槽位物品
if success && isBag {
reason := commonlog.InventoryLogReasonRingAdvance
reasonText := fmt.Sprintf(reason.String(), typ.String())
flag, err = inventoryManager.RemoveIndex(inventorytypes.BagTypePrim, index, 1, reason, reasonText)
if !flag {
panic("ring: 消耗物品应该成功")
}
if err != nil {
return
}
}
createItemId := fuseTemp.ItemId
createItemNum := fuseTemp.ItemCount
if isBag {
if success {
createItemTemp := item.GetItemService().GetItem(int(createItemId))
if createItemTemp == nil {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
"typ": typ.String(),
}).Warn("ring: 融合成功的物品模板不存在")
playerlogic.SendSystemMessage(pl, lang.RingTempalteNotExist)
return
}
reason := commonlog.InventoryLogReasonRingFuseGet
reasonText := fmt.Sprintf(reason.String(), typ.String())
flag = inventoryManager.AddItemLevelWithPropertyData(createItemId, createItemNum, createItemTemp.NeedLevel, createItemTemp.GetBindType(), propertyData, reason, reasonText)
if !flag {
panic(fmt.Errorf("ring: 添加物品应该成功"))
}
}
} else {
if success {
ringManager.RingFuseSuccess(typ, createItemId)
}
}
// 物品改变推送
inventorylogic.SnapInventoryChanged(pl)
// 推送属性变化
ringlogic.RingPropertyChange(pl)
propertylogic.SnapChangedProperty(pl)
// 公告
plName := coreutils.FormatColor(chattypes.ColorTypePlayerName, coreutils.FormatNoticeStr(pl.GetName()))
fuseNum := int32(0)
skillId := int32(0)
rongheTemp := ringtemplate.GetRingTemplateService().GetRingTemplate(createItemId)
if rongheTemp != nil {
fuseNum = rongheTemp.Level
skillId = rongheTemp.SkillId
}
itemTemp := item.GetItemService().GetItem(int(createItemId))
if itemTemp == nil {
log.Warningf("ring: 物品模板不存在,itemId:%d", createItemId)
return
}
qualityType := itemtypes.ItemQualityType(itemTemp.Quality)
itemName := coreutils.FormatColor(qualityType.GetColor(), fmt.Sprintf("[%s]", typ.String()))
data, ok := propertyData.(*ringtypes.RingPropertyData)
if !ok {
base := inventorytypes.CreateDefaultItemPropertyDataBase()
propertyData = inventory.CreatePropertyDataInterface(itemType, base)
}
args := []int64{int64(chattypes.ChatLinkTypeItem), int64(createItemId), int64(data.StrengthLevel), int64(data.Advance), int64(data.JingLingLevel)}
infoLink := coreutils.FormatLink(itemName, args)
// 计算该融合等级属性加成的战力
power := int64(0)
if skillId != 0 {
skillTemplate := skilltemplate.GetSkillTemplateService().GetSkillTemplate(skillId)
power += int64(skillTemplate.AddPower)
power += propertylogic.CulculateForce(rongheTemp.GetBattlePropertyMap())
}
content := fmt.Sprintf(lang.GetLangService().ReadLang(lang.RingFuseNotice), plName, infoLink, fuseNum, power)
chatlogic.SystemBroadcast(chattypes.MsgTypeText, []byte(content))
noticelogic.NoticeNumBroadcast([]byte(content), 0, 1)
fmt.Println("ring: 特戒应该有回复*******************")
scRingFuse := pbutil.BuildSCRingFuse(success, isBag, int32(typ), index, needIndex, createItemId, createItemNum)
pl.SendMsg(scRingFuse)
return
}
|
N = int(input())
s_list = [input() for _ in range(N)]
M = int(input())
t_list = [input() for _ in range(M)]
rev_list = []
for i in range(len(s_list)):
a = s_list.count(s_list[i])
b = t_list.count(s_list[i])
if a >= b:
ans_num = a-b
else:
ans_num = 0
rev_list.append(ans_num)
print(max(rev_list)) |
<reponame>AIRONAXSolutions/python-education<filename>intro-to-sqlalchemy/simplest_relationship_example.py
from sqlalchemy import create_engine, Column, String, Integer, ForeignKey
from sqlalchemy.orm import declarative_base
engine = create_engine("sqlite+pysqlite:///:memory:", echo=True, future=True)
Base = declarative_base()
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
name = Column(String(30))
fullname = Column(String)
class Address(Base):
__tablename__ = "addresses"
id = Column(Integer, primary_key=True)
city = Column(String, nullable=False)
user_id = Column(Integer, ForeignKey("users.id"))
|
Gene Expression Profiles Identify Features Common to Lobular and Ductal Premalignant Breast Lesions
Premalignant lesions have been identified in both the ductal and lobular units of the breast epithelium. These lesions have a 4-fold increase in risk of progression to invasive breast cancer, but 80% will remain indolent. This may be due, in part, to the uncertainty of diagnoses as inter-observer reproducibility is poor. When treated with prophylactic hormone therapies blocking the estrogen receptor, up to 40% of women still develop tumors. Therefore the challenge is to develop diagnostic tests that identify the subset of high-risk lesions and provide appropriate prophylactic therapies. We undertook genome-wide expression studies to define sets of genes that show reproducible alterations in atypical hyperplastic lesions. Patients with sporadic atypical hyperplasias and no evidence of breast cancer for at least 2 years following the initial biopsy were selected. RNA quality from formalin-fixed, paraffin-embedded (FFPE) was assessed using the ratio of 150 and 500 bp amplicons of B-actin determined by RT-qPCR. A total of 23 patients were included with diagnoses of pure flattened epithelial atypia (FEA, n=2), atypical lobular lesions (n=9), and atypical ductal lesions (n=12). The atypical lesions and histologically normal breast epithelium were microdissected separately from 6 um thick tissue sections from each patient. RNA was amplified linearly, labeled and hybridized to Affymetrix ST1.0 arrays. Genes differentially expressed by >2-fold between the lesion and normal epithlium within each patient were used to identify gene expression signatures of atypical hyperplasias. Hierarchical clustering of a 512 gene signature yielded 3 major groupings: Benign, Intermediate, Atypia. These results reveal that atypia of the lobular and ductal structures share common underlying transcriptional features. The gene profile provides markers that can improve the reproducibility of diagnoses of atypia. Expression profiling of individuals who subsequently progress to invasive carcinoma will provide biomarkers of high-risk premalignancies and assist selection of therapeutic choices. |
package main
import (
"context"
"fmt"
"github.com/coreos/etcd/mvcc/mvccpb"
"github.com/google/uuid"
"go.etcd.io/etcd/clientv3"
"time"
)
var (
config clientv3.Config
client *clientv3.Client
kv clientv3.KV
err error
lease clientv3.Lease
leaseId clientv3.LeaseID
getResp *clientv3.GetResponse
leaseGrantResp *clientv3.LeaseGrantResponse
keepResp *clientv3.LeaseKeepAliveResponse
keepRespChan <-chan *clientv3.LeaseKeepAliveResponse
watchStartRevision int64
watchRespChan <-chan clientv3.WatchResponse
watchResp clientv3.WatchResponse
event *clientv3.Event
watcher clientv3.Watcher
)
func main() {
rootContext := context.Background()
client, err := clientv3.New(clientv3.Config{
Endpoints: []string{"172.16.58.3:2379"},
DialTimeout: 2 * time.Second,
})
// etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3
if client == nil || err == context.DeadlineExceeded {
// handle errors
fmt.Println(err)
panic("invalid connection!")
}
defer client.Close()
//testFunc(client, rootContext)
//testLease(client, rootContext)
testWatch(client, rootContext)
//testOp(client, rootContext)
}
//基本测试(获取值,设置值)
func testFunc(cli *clientv3.Client, rootContext context.Context) {
kvc := clientv3.NewKV(cli)
//获取值
ctx, cancelFunc := context.WithTimeout(rootContext, time.Duration(2)*time.Second)
response, err := kvc.Get(ctx, "cc")
cancelFunc()
if err != nil {
fmt.Println(err)
}
kvs := response.Kvs
if len(kvs) > 0 {
fmt.Printf("last value is :%s\r\n", string(kvs[0].Value))
} else {
fmt.Printf("empty for %s\n", kvs)
}
//设置值
uuid := uuid.New().String()
fmt.Printf("new value is :%s\r\n", uuid)
ctx2, cancelFunc2 := context.WithTimeout(rootContext, time.Duration(2)*time.Second)
_, err = kvc.Put(ctx2, "cc", uuid)
if delRes, err := kvc.Delete(ctx2, "cc"); err != nil {
fmt.Println(err)
} else {
fmt.Printf("delete %s for %d\n", "cc", delRes.Deleted)
}
cancelFunc2()
if err != nil {
fmt.Println(err)
}
}
func testLease(client *clientv3.Client, rootContext context.Context) {
// 申请一个租约
lease := clientv3.NewLease(client)
if leaseGrantResp, err = lease.Grant(context.TODO(), 10); err != nil {
fmt.Println(err)
return
}
leaseId = leaseGrantResp.ID
// 申请一个租约
lease = clientv3.NewLease(client)
keepLease(lease, int64(leaseId))
// 获得kv API子集
kv = clientv3.NewKV(client)
uuid := uuid.New().String()
ctx, cancelFunc := context.WithTimeout(rootContext, time.Duration(2)*time.Second)
if _, err = kv.Put(ctx, "dd", uuid, clientv3.WithLease(leaseId)); err != nil {
fmt.Println(err)
return
}
cancelFunc()
for {
ctx2, cancelFunc2 := context.WithTimeout(rootContext, time.Duration(2)*time.Second)
if getResp, err = kv.Get(ctx2, "dd"); err != nil {
fmt.Println(err)
return
}
cancelFunc2()
if getResp.Count == 0 {
fmt.Println("kv过期了")
break
}
fmt.Println("还没过期:", getResp.Kvs)
time.Sleep(2 * time.Second)
}
}
func keepLease(lease clientv3.Lease, leaseId int64) {
if keepRespChan, err = lease.KeepAlive(context.TODO(), clientv3.LeaseID(leaseId)); err != nil {
fmt.Println("error is : ", err)
return
}
go func() {
for {
select {
case keepResp = <-keepRespChan:
if keepRespChan == nil {
fmt.Println("租约已经失效了")
goto END
} else { // 每秒会续租一次, 所以就会受到一次应答
fmt.Println("收到自动续租应答:", keepResp.ID)
}
}
}
END:
}()
}
func testWatch(client *clientv3.Client, rootContext context.Context) {
uuid := uuid.New().String()
kv := clientv3.NewKV(client)
// 模拟KV的变化
go func() {
for {
_, err = kv.Put(context.TODO(), "bb", uuid)
_, err = kv.Delete(context.TODO(), "bb")
time.Sleep(1 * time.Second)
}
}()
// 先GET到当前的值,并监听后续变化
if getResp, err = kv.Get(context.TODO(), "bb"); err != nil {
fmt.Println(err)
return
}
// 现在key是存在的
if len(getResp.Kvs) != 0 {
fmt.Println("当前值:", string(getResp.Kvs[0].Value))
}
// 获得当前revision
watchStartRevision = getResp.Header.Revision + 1
// 创建一个watcher
watcher = clientv3.NewWatcher(client)
fmt.Println("从该版本向后监听:", watchStartRevision)
ctx, cancelFunc := context.WithCancel(context.TODO())
time.AfterFunc(5*time.Second, func() {
cancelFunc()
})
watchRespChan = watcher.Watch(ctx, "bb", clientv3.WithRev(watchStartRevision))
// 处理kv变化事件
for watchResp = range watchRespChan {
for _, event = range watchResp.Events {
switch event.Type {
case mvccpb.PUT:
fmt.Println("修改为:", string(event.Kv.Value), "Revision:", event.Kv.CreateRevision, event.Kv.ModRevision)
case mvccpb.DELETE:
fmt.Println("删除了", "Revision:", event.Kv.ModRevision)
}
}
}
}
func testOp(client *clientv3.Client, rootContext context.Context) {
kv := clientv3.NewKV(client)
uuid := uuid.New().String()
putOp := clientv3.OpPut("aa", uuid)
if opResp, err := kv.Do(context.TODO(), putOp); err != nil {
panic(err)
} else {
fmt.Println("写入Revision:", opResp.Put().Header.Revision)
}
getOp := clientv3.OpGet("aa ")
if opResp, err := kv.Do(context.TODO(), getOp); err != nil {
panic(err)
} else {
fmt.Println("数据Revision:", opResp.Get().Kvs[0].ModRevision)
fmt.Println("数据value:", string(opResp.Get().Kvs[0].Value))
}
}
// Package clientv3 implements the official Go etcd client for v3.
//
// Create client using `clientv3.New`:
//
// // expect dial time-out on ipv4 blackhole
// _, err := clientv3.New(clientv3.Config{
// Endpoints: []string{"http://254.0.0.1:12345"},
// DialTimeout: 2 * time.Second,
// })
//
// // etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3
// if err == context.DeadlineExceeded {
// // handle errors
// }
//
// // etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1
// if err == grpc.ErrClientConnTimeout {
// // handle errors
// }
//
// cli, err := clientv3.New(clientv3.Config{
// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
// DialTimeout: 5 * time.Second,
// })
// if err != nil {
// // handle error!
// }
// defer cli.Close()
//
// Make sure to close the client after using it. If the client is not closed, the
// connection will have leaky goroutines.
//
// To specify a client request timeout, wrap the context with context.WithTimeout:
//
// ctx, cancel := context.WithTimeout(context.Background(), timeout)
// resp, err := kvc.Put(ctx, "sample_key", "sample_value")
// cancel()
// if err != nil {
// // handle error!
// }
// // use the response
//
// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed.
// Clients are safe for concurrent use by multiple goroutines.
//
// etcd client returns 3 types of errors:
//
// 1. context error: canceled or deadline exceeded.
// 2. gRPC status error: e.g. when clock drifts in server-side before client's context deadline exceeded.
// 3. gRPC error: see https://github.com/etcd-io/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
//
// Here is the example code to handle client errors:
//
// resp, err := kvc.Put(ctx, "", "")
// if err != nil {
// if err == context.Canceled {
// // ctx is canceled by another routine
// } else if err == context.DeadlineExceeded {
// // ctx is attached with a deadline and it exceeded
// } else if err == rpctypes.ErrEmptyKey {
// // client-side error: key is not provided
// } else if ev, ok := status.FromError(err); ok {
// code := ev.Code()
// if code == codes.DeadlineExceeded {
// // server-side context might have timed-out first (due to clock skew)
// // while original client-side context is not timed-out yet
// }
// } else {
// // bad cluster endpoints, which are not etcd servers
// }
// }
//
// go func() { cli.Close() }()
// _, err := kvc.Get(ctx, "a")
// if err != nil {
// // with etcd clientv3 <= v3.3
// if err == context.Canceled {
// // grpc balancer calls 'Get' with an inflight client.Close
// } else if err == grpc.ErrClientConnClosing { // <= gRCP v1.7.x
// // grpc balancer calls 'Get' after client.Close.
// }
// // with etcd clientv3 >= v3.4
// if clientv3.IsConnCanceled(err) {
// // gRPC client connection is closed
// }
// }
//
// The grpc load balancer is registered statically and is shared across etcd clients.
// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment
// variable. E.g. "ETCD_CLIENT_DEBUG=1".
//
|
<gh_stars>1-10
import { createReducer } from 'retreon';
import * as counter from './actions';
export default createReducer(0, (handleAction) => [
handleAction(counter.increment, (state) => state + 1),
handleAction(counter.decrement, (state) => state - 1),
]);
|
With six games left in the season, the Carolina Panthers are on pace to make history. Yes, there’s the opportunity to go 16-0, which would be an impressive feat indeed. But the Panthers are threatening to do something far more notable, something that was thought to be impossible in Roger Goodell’s NFL. They could be ... hold onto your butts ... an NFL team that is legitimately fun.
If you are not entertained by Cam Newton’s outstanding play this year, or if you don’t like that his performance on the field has allowed him to dance in the end zone and wear a fox tail from his personal end zone, then you can save yourself some time and click away from this column straight away. Maybe there’s an article somewhere out there championing Andy Dalton for looking out for America’s children and the “integrity” of the game as it faces an onslaught of harmless post-game comments. A column like that, or your angry aunt’s Facebook feed, might be more your style.
For the rest of the sports world, Cam Newton is entertaining. He’s fun. Believe it or not, entertainment and fun is kind of what draws most fans to sports in the first place. No little kid falls in love with football over concussions, TV timeouts, uniform violations, charity initiatives that give little money to charity, daily fantasy controversies, Greg Hardy, tone deaf commissioners, incomprehensible rules on what constitutes a catch, torn ligaments, missed extra points, touchbacks, backup quarterbacks or endless video replay. Yet all that is increasingly what defines the NFL. If not for players and personalities like Cam Newton, who actually dare to openly enjoy the game, the modern NFL would easily supplant church as Sunday’s most yawn-inducing institution.
Carolina Panthers take a swipe at Washington's 'Redskins' name Read more
Some players dance after touchdowns, some players hand the ball directly to the ref. Neither one is wrong. Variety is the spice of life and all that, right? At least Cam Newton didn’t focus group test his post-TD celebration first like Russell Wilson likely would have for maximum branding. Yet there still remain people upset that Cam Newton “dabs.” He just played a game on Sunday against a team called the REDSKINS and Newton, the smiling guy on the undefeated team, is the big problem? It’s a remarkable world we live in.
The 2015 Panthers are so much more than Cam Newton, though. Even if you’re part of the vocal anti-dabbing community, or if you remain unable to get past him taking a laptop when he was 19, there’s plenty to like about Carolina.
Love old-time, hard-hitting defense? The Panthers are fifth in the NFL in points allowed. Prefer explosive offense? Despite losing star receiver Kelvin Benjamin for the season to injury in August, Carolina is somehow third in the league in scoring at 29.9 points per game (and that’s sure to go way up with another game against the Saints remaining on the schedule).
Are you a fan of fat guy touchdowns? The Panthers have Mike Tolbert. Enjoy the films of Sandy Bullock? The Panthers employ Michael Oher. Appreciate funny Twitter accounts or just anything that craps all over Dan Snyder? Then you should follow @Panthers. Want to watch a shutdown cornerback who isn’t completely overexposed? Check out Josh Norman. Like players who are active in the community? Carolina gives you Greg Olsen, Charles Tillman and Thomas Davis. (And Newton, too. Yes, it appears that post-TD dabbing doesn’t necessarily make a man rotten to his core.) Are you entertained by fun names? Carolina has a guy named Star and another called Fozzy. Entertained by fun names and have the sense of humor of an 11 year-old boy? Carolina’s practice squad features Cox and a Ball. And are you just a little bit worried that the Panthers have too much fun, considering even head coach Ron Rivera dabs on folks? Do not fear. They have Derek Anderson as Newton’s backup, so he’ll make sure they “take this shit serious”.
Barring Roger Goodell implementing emergency rules that ban non-league approved enjoyment, the Panthers could go down as fun a team as the 1999 “Greatest Show On Turf” St Louis Rams. That team featured a supermarket stock boy turned NFL superstar in Kurt Warner, do-everything back Marshall Faulk and a head coach in Dick Vermeil who seemed to react to most every touchdown by sobbing uncontrollably. That was a fun NFL team to root for and watch.
Before the ‘99 Rams, the 1985 Chicago Bears were the NFL’s fun benchmark. They featured Walter Payton, Refrigerator Perry, Jim McMahon, Mike Ditka and The Super Bowl Shuffle, a song released three months before the team actually won a title. Just imagine Cam Newton dropping a rap anthem right now about winning the Super Bowl. The heat generated from the takes would boil Earth’s oceans. But in 1985, the general reaction to the Bears video was: “Ha. That’s fun.” Maybe Ronald Reagan’s America really was a utopia after all.
We need the Panthers. They are the NFL’s best shot at beating the Patriots and our greatest hope for making the NFL fun again. If that’s not enough for you, consider that they’re playing the Cowboys this week on Thanksgiving. Cam Newton can humiliate Greg Hardy and Jerry Jones in front of the entire nation. If that’s not worth dancing over, then nothing is. |
def computeSimGraph(self, label, R=None, n_edges=None, n_gnodes=None,
similarity='He2', g=1, th_gauss=0.1, rescale=False,
blocksize=25_000, useGPU=False, tmp_folder=None,
save_every=1e300, verbose=True):
if not self.is_active_snode(label):
self.activate_snode(label)
self.snodes[label].computeSimGraph(
R=R, n_gnodes=n_gnodes, n_edges=n_edges, similarity=similarity,
g=g, th_gauss=th_gauss, rescale=rescale, blocksize=blocksize,
useGPU=useGPU, tmp_folder=tmp_folder, save_every=save_every,
verbose=verbose)
df = self.metagraph.df_nodes
REF = self.metagraph.REF
df.loc[df[REF] == label, 'category'] = 'similarity'
return |
def populate_channel_game_videos(youtube, game, parents, num_vids):
print("------------------")
print("Starting retrieval of channel videos for", len(parents), "channels...")
channel_videos = {}
counter = 0
for par_chan in parents:
if counter % 5 == 0:
print("Channels completed: " + str(counter), 100*counter/len(parents), "%")
if par_chan not in channel_videos.keys():
channel_videos[par_chan] = get_channel_game_videos(youtube, game, par_chan, num_vids)
counter += 1
else:
counter += 1
print("Done Retrieving Channel Videos!")
print("------------------")
return channel_videos |
<reponame>shanemcd/coreos-assembler<filename>tools/vendor/github.com/minio/direct-csi/pkg/apis/direct.csi.min.io/v1beta1/drive_matcher.go
// This file is part of MinIO Direct CSI
// Copyright (c) 2021 MinIO, Inc.
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package v1beta1
import (
"github.com/mb0/glob"
"github.com/minio/direct-csi/pkg/sys"
"path/filepath"
"strings"
)
func (drive *DirectCSIDrive) MatchGlob(nodes, drives, status []string) bool {
getBasePath := func(in string) string {
path := strings.ReplaceAll(in, sys.DirectCSIPartitionInfix, "")
path = strings.ReplaceAll(path, sys.DirectCSIDevRoot+"/", "")
path = strings.ReplaceAll(path, sys.HostDevRoot+"/", "")
return filepath.Base(path)
}
matchGlob := func(patternList []string, name string, transformF transformFunc) bool {
name = transformF(name)
for _, p := range patternList {
if ok, _ := glob.Match(p, name); ok {
return true
}
}
return false
}
nodeList, driveList, statusesList := checkWildcards(nodes),
fmap(checkWildcards(drives), getBasePath),
fmap(checkWildcards(status), strings.ToLower)
var noOp transformFunc
noOp = func(a string) string {
return a
}
return matchGlob(nodeList, drive.Status.NodeName, noOp) &&
matchGlob(driveList, drive.Status.Path, getBasePath) &&
matchGlob(statusesList, string(drive.Status.DriveStatus), strings.ToLower)
}
func (drive *DirectCSIDrive) MatchAccessTier(accessTierList []AccessTier) bool {
if len(accessTierList) == 0 {
return true
}
for _, at := range accessTierList {
if drive.Status.AccessTier == at {
return true
}
}
return false
}
|
def search(self, lines):
text = "\n".join(lines)
while 1:
m = RE_FENCE.search(text)
if m:
self.lang = m.group('lang')
self.hl_lines = m.group('hl_lines')
for entry in reversed(self.markdown.superfences):
if entry["test"](self.lang):
code = entry["formatter"](m.group('code'), self.lang)
break
placeholder = self.markdown.htmlStash.store(code, safe=True)
text = '%s\n%s\n%s' % (text[:m.start()], placeholder, text[m.end():])
else:
break
return text.split("\n") |
The ISO has decided not to approve two NSA-designed block encryption algorithms: Speck and Simon. It's because the NSA is not trusted to put security ahead of surveillance:
A number of them voiced their distrust in emails to one another, seen by Reuters, and in written comments that are part of the process. The suspicions stem largely from internal NSA documents disclosed by Snowden that showed the agency had previously plotted to manipulate standards and promote technology it could penetrate. Budget documents, for example, sought funding to "insert vulnerabilities into commercial encryption systems."
More than a dozen of the experts involved in the approval process for Simon and Speck feared that if the NSA was able to crack the encryption techniques, it would gain a "back door" into coded transmissions, according to the interviews and emails and other documents seen by Reuters.
"I don't trust the designers," Israeli delegate Orr Dunkelman, a computer science professor at the University of Haifa, told Reuters, citing Snowden's papers. "There are quite a lot of people in NSA who think their job is to subvert standards. My job is to secure standards." |
<reponame>Omegaphora/hardware_intel_common_omx-components
/*
* Copyright (c) 2013 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBMENC_H_
#define WEBMENC_H_
#include <stdio.h>
#include <stdlib.h>
#if defined(_MSC_VER)
/* MSVS doesn't define off_t */
typedef __int64 off_t;
#else
#include <stdint.h>
#endif
#include "tools_common.h"
#include "vpx/vpx_encoder.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef off_t EbmlLoc;
struct cue_entry {
unsigned int time;
uint64_t loc;
};
struct EbmlGlobal {
int debug;
FILE *stream;
int64_t last_pts_ms;
vpx_rational_t framerate;
/* These pointers are to the start of an element */
off_t position_reference;
off_t seek_info_pos;
off_t segment_info_pos;
off_t track_pos;
off_t cue_pos;
off_t cluster_pos;
/* This pointer is to a specific element to be serialized */
off_t track_id_pos;
/* These pointers are to the size field of the element */
EbmlLoc startSegment;
EbmlLoc startCluster;
uint32_t cluster_timecode;
int cluster_open;
struct cue_entry *cue_list;
unsigned int cues;
};
/* Stereo 3D packed frame format */
typedef enum stereo_format {
STEREO_FORMAT_MONO = 0,
STEREO_FORMAT_LEFT_RIGHT = 1,
STEREO_FORMAT_BOTTOM_TOP = 2,
STEREO_FORMAT_TOP_BOTTOM = 3,
STEREO_FORMAT_RIGHT_LEFT = 11
} stereo_format_t;
void write_webm_seek_element(struct EbmlGlobal *ebml,
unsigned int id,
off_t pos);
void write_webm_file_header(struct EbmlGlobal *glob,
const vpx_codec_enc_cfg_t *cfg,
const struct vpx_rational *fps,
stereo_format_t stereo_fmt,
unsigned int fourcc);
void write_webm_block(struct EbmlGlobal *glob,
const vpx_codec_enc_cfg_t *cfg,
const vpx_codec_cx_pkt_t *pkt);
void write_webm_file_footer(struct EbmlGlobal *glob, int hash);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBMENC_H_
|
def pypy_json_encode(value, pretty=False):
global _dealing_with_problem
if pretty:
return pretty_json(value)
try:
_buffer = UnicodeBuilder(2048)
_value2json(value, _buffer)
output = _buffer.build()
return output
except Exception as e:
from mo_logs import Log
if _dealing_with_problem:
Log.error("Serialization of JSON problems", e)
else:
Log.warning("Serialization of JSON problems", e)
_dealing_with_problem = True
try:
return pretty_json(value)
except Exception as f:
Log.error("problem serializing object", f)
finally:
_dealing_with_problem = False |
<gh_stars>10-100
import { mapReverse, filterReverse } from './index';
describe('Helpers', () => {
it('mapReverse should work with an empty array', () => {
const result = mapReverse([], (item) => item);
expect(result).toEqual([]);
});
it('mapReverse should work with identity selector', () => {
const result = mapReverse([1, 2, 3, 4, 5], (item) => item);
expect(result).toEqual([5, 4, 3, 2, 1]);
});
it('mapReverse should work with any selector', () => {
const result = mapReverse([1, 2, 3, 4, 5], (item) => item * item);
expect(result).toEqual([25, 16, 9, 4, 1]);
});
it('mapReverse should use ordinary index', () => {
const result = mapReverse([1, 2, 3, 4, 5], (item, index) => index);
expect(result).toEqual([4, 3, 2, 1, 0]);
});
it('mapReverse should use provide reverse index', () => {
const result = mapReverse([1, 2, 3, 4, 5], (item, _, index) => index);
expect(result).toEqual([0, 1, 2, 3, 4]);
});
it('filterReverse should work with an empty array', () => {
const result = filterReverse([], (item) => false);
expect(result).toEqual([]);
});
it('filterReverse should work with false selector', () => {
const result = filterReverse([1, 2, 3, 4, 5], (item) => false);
expect(result).toEqual([]);
});
it('filterReverse should work with true selector', () => {
const result = filterReverse([1, 2, 3, 4, 5], (item) => true);
expect(result).toEqual([5, 4, 3, 2, 1]);
});
it('filterReverse should work with custom selector', () => {
const result = filterReverse([1, 2, 3, 4, 5], (item) => item % 2 === 1);
expect(result).toEqual([5, 3, 1]);
});
it('filterReverse should use ordinary index', () => {
const result = filterReverse([1, 2, 3, 4, 5], (item, index) => index !== 0);
expect(result).toEqual([5, 4, 3, 2]);
});
it('filterReverse should provide reverse index', () => {
const result = filterReverse([1, 2, 3, 4, 5], (item, _, index) => index !== 0);
expect(result).toEqual([4, 3, 2, 1]);
});
});
|
/**
* Return LLRP message as string in LTK XML format
*
* If there is an error during message encoding, the error message is
* returned.
*
* @return LRRP message in LTK XML encoding
* @throws InvalidLLRPMessageException
*/
public String toXMLString() throws InvalidLLRPMessageException {
Document d = this.encodeXML();
XMLOutputter outputter = new XMLOutputter();
outputter.setFormat(Format.getPrettyFormat());
return outputter.outputString(d);
} |
/**
* Registers new {@link Command} with this splitter and returns a response {@link Publisher} for that command.
*
* <h2>Assumptions</h2>
* As defined by the class javadoc: {@link ReadStreamSplitter}.
*
* @param command {@link Command} to register.
* @return {@link Publisher} containing response for this command.
*/
Publisher<PubSubChannelMessage> registerNewCommand(Command command) {
return new Publisher<PubSubChannelMessage>() {
@Override
protected void handleSubscribe(Subscriber<? super PubSubChannelMessage> subscriber) {
TerminalMessagePredicate cmdPredicate = null;
if (command == PING) {
cmdPredicate = forCommand(command);
predicate.addPredicate(cmdPredicate);
}
if (!subscribers.offer(subscriber)) {
if (cmdPredicate != null) {
predicate.remove(cmdPredicate);
}
subscriber.onSubscribe(EMPTY_SUBSCRIPTION);
subscriber.onError(new QueueFullException("subscribers-queue", Integer.MAX_VALUE));
return;
}
if (state == STATE_TERMINATED) {
if (subscribers.remove(subscriber)) {
subscriber.onSubscribe(EMPTY_SUBSCRIPTION);
subscriber.onError(new IllegalStateException("Connection read stream has already terminated."));
}
return;
}
Subscription groupSub = groupSubscription;
if (groupSub == null) {
pendingGroupRequestedUpdater.incrementAndGet(ReadStreamSplitter.this);
requestPendingGroups(groupSubscription);
} else {
groupSub.request(1);
}
if (stateUpdater.compareAndSet(ReadStreamSplitter.this, STATE_INIT, STATE_READ_SUBSCRIBED)) {
subscribeToOriginal();
}
}
};
} |
def output_moments(momzero, momone, momtwo, momnine, dir, filename):
momzero.write(dir+'/'+filename+'_momzero.fits', format='fits', overwrite=True)
momone.write(dir+'/'+filename+'_momone.fits', format='fits', overwrite=True)
momtwo.write(dir+'/'+filename+'_momtwo.fits', format='fits', overwrite=True)
fits.writeto(dir+'/'+filename+'_momnine.fits', momnine.value, momtwo.header, overwrite=True) |
# Highest Value Palindrome
# Make a number palindromic in no more than $k$ moves, maximal.
#
# https://www.hackerrank.com/challenges/richie-rich/problem
#
#!/bin/python3
import os
import sys
#
# Complete the highestValuePalindrome function below.
#
def highestValuePalindrome(s, n, k):
pos = []
p = []
for i in range(0, n // 2):
if s[i] != s[-1 - i]:
if k == 0: return "-1"
k -= 1
# on retient le chiffre le plus grand
p += max(s[i], s[-1 - i])
# on mémorise la position du changement
# pour passer à 9 éventuellement plus tard
pos.append(i)
else:
p += s[i]
# autant que possible maximise le résultat en mettant des 9
# i.e. autant qu'on a des crédits k
i = 0
while k > 0 and i < len(p):
if p[i] != '9':
if i in pos:
# si c'était un chiffre déjà changé, le coût est 1 au lieu de 2
p[i] = '9'
k -= 1
elif k >= 2:
p[i] = '9'
k -= 2
i += 1
# ajoute le caractère du milieu isolé si nombre impair
if n % 2 == 1:
if k >= 1:
# il reste un crédit: on maximise
p += '9'
else:
p += s[n // 2]
# ajoute le p inversé pour faire le palindrome
if n % 2 == 1:
p = p + p[-2::-1]
else:
p = p + p[::-1]
return ''.join(p)
if __name__ == '__main__':
n, k = map(int, input().split())
s = input()
result = highestValuePalindrome(s, n, k)
print(result)
|
package com.example.xianghaapp.view.base;
/**
* Created by Administrator on 2016/8/10.
*/
public interface BaseView<T,V> {
public void showData(T t);
public void showHeader(V v);
}
|
/**
* * Services
*/
import { StoreService } from '@core/store/store.service';
/**
* * Modles and types
*/
import { BaseModel } from '@core/model/base.model';
import { Storages } from '../@types/interface';
/**
* * Constants
*/
import { APP_CONSTANTS } from '@src/constants';
class Model extends BaseModel {
#modelData: Storages = {};
get raw() {
return this.#modelData;
}
constructor(store: StoreService, storeKey: string) {
super(store, storeKey);
Object.assign(this.#modelData, super.get());
}
async save(): Promise<boolean> {
return super.save(this.#modelData);
}
async update(): Promise<void> {
await super.update();
this.#modelData = super.get() ?? {};
}
}
const StoragesModel = new Model(
new StoreService(APP_CONSTANTS.storagesDbFileName),
'storages',
);
Object.freeze(StoragesModel);
export { StoragesModel };
|
// Copyright (c) 2011 The Native Client Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef EXAMPLES_TUMBLER_CUBE_H_
#define EXAMPLES_TUMBLER_CUBE_H_
#include <GLES2/gl2.h>
#include <vector>
#include "opengl_context.h"
#include "opengl_context_ptrs.h"
namespace tumbler {
// The Cube class provides a place to implement 3D rendering. It has a
// frame that it occupies in a browser window.
class Cube {
public:
explicit Cube(SharedOpenGLContext opengl_context);
~Cube();
// Called once when a new RenderContext is first bound to the view. The
// bound context is guaranteed to be current and valid before calling this
// method.
void PrepareOpenGL();
// Called whenever the size of the browser view changes. This method is
// called at least once when the view is first made visible. Clamps the
// sizes to 1.
void Resize(int width, int height);
// Called every time the view need to be drawn. The bound context is
// guaranteed to be current and valid before this method is called. The
// visible portion of the context is flushed to the browser after this
// method returns.
void Draw();
// Accessor for width and height. To change these, call Resize.
const int width() const {
return width_;
}
const int height() const {
return height_;
}
// Accessor/mutator for the camera orientation.
void GetOrientation(std::vector<float>* orientation) const {
if (!orientation)
return;
(*orientation)[0] = static_cast<float>(orientation_[0]);
(*orientation)[1] = static_cast<float>(orientation_[1]);
(*orientation)[2] = static_cast<float>(orientation_[2]);
(*orientation)[3] = static_cast<float>(orientation_[3]);
}
void SetOrientation(const std::vector<float>& orientation) {
orientation_[0] = static_cast<GLfloat>(orientation[0]);
orientation_[1] = static_cast<GLfloat>(orientation[1]);
orientation_[2] = static_cast<GLfloat>(orientation[2]);
orientation_[3] = static_cast<GLfloat>(orientation[3]);
}
private:
// Create the shaders used to draw the cube, and link them into a program.
// Initializes |shader_progam_object_|, |position_loction_| and
// |mvp_location_|.
bool CreateShaders();
// Generates a cube as a series of GL_TRIANGLE_STRIPs, and initializes
// |index_count_| to the number of indices in the index list used as a VBO.
// Creates the |vbo_ids_| required for the vertex and index data and uploads
// the the VBO data.
void CreateCube();
// Build up the model-view transform from the eye and orienation properties.
// Assumes that |model_view| is a 4x4 matrix.
void ComputeModelViewTransform(GLfloat* model_view);
SharedOpenGLContext opengl_context_;
int width_;
int height_;
GLuint shader_program_object_; // The compiled shaders.
GLint position_location_; // The position attribute location.
GLint color_location_; // The color attribute location.
GLint mvp_location_; // The Model-View-Projection composite matrix.
GLuint cube_vbos_[3];
GLfloat eye_[3]; // The eye point of the virtual camera.
// The orientation of the virtual camera stored as a quaternion. The
// quaternion is laid out as {{x, y, z}, w}.
GLfloat orientation_[4];
GLfloat perspective_proj_[16];
GLfloat mvp_matrix_[16];
};
} // namespace tumbler
#endif // EXAMPLES_TUMBLER_CUBE_H_
|
Influence of feeding fermented colostrum and Lactobacillus acidophilus on fecal flora of dairy calves.
Twenty Holstein calves were assigned alternately at birth to diets of 1) fermented colostrum, 2) colostrum treated with 1% propionic acid, 3) whole milk, or 4) whole milk treated with Lactobacillus acidophilus (frozen concentrate culture) at 5 x 10(8) organisms per litter. Diets were fed once daily for 3 wk at 10% of birth weight as the sole source of nutritients. Fecal samples were collected at 0, 7, 14, and 21 days of age and analyzed for coliform and lactobacilli numbers. Fermented colostrum diets did not alter coliform counts in feces of healthy calves. Fecal coliform counts of calves fed L. acidophilus decreased with time. Average fecal lactobacilli counts were lower for the colostrum diets than milk diets. The apparent lowered incidence of scours frequently reported in calves fed fermented colostrum diets was not reflected in major changes in fecal microflora under the conditions of this study. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.