content
stringlengths 10
4.9M
|
---|
<gh_stars>1-10
import { Frame, MapPlayer, Trigger } from "w3ts";
import { ITalentSlot, TalentState } from "../Interfaces/ITalentSlot";
import { ITalentTreeView } from "../Interfaces/ITalentTreeView";
import { DependencyOrientation } from "../Interfaces/ITalentView";
import { Talent } from "../Models/Talent";
import { TalentTree } from "../Models/TalentTree";
export type ViewChanged = (ttvm: BasicTalentTreeViewModel, watcher: MapPlayer) => void;
export const TalentDependencyIndex: Record<DependencyOrientation, (index: number, cols: number) => number> = {
left: (index: number, cols: number) => index - 1,
up: (index: number, cols: number) => index + cols,
right: (index: number, cols: number) => index + 1,
down: (index: number, cols: number) => index - cols,
}
export class BasicTalentTreeViewModel {
private _watcher: MapPlayer;
private _watched = false;
private _tree: TalentTree | null = null;
private _view: ITalentTreeView;
private _showHideFrame: Frame;
private _slots: ITalentSlot[] = [];
private _onViewChanged: ViewChanged;
private _talentSlotFactory: (this: void, i: number) => ITalentSlot;
private _frameClickTrigger: Trigger;
private _frameEvent: Record<number, () => void> = {};
constructor(
private cfg: IBasicTalentTreeViewModelConfig,
watcher: MapPlayer,
view: ITalentTreeView,
talentSlotFactory: (i: number) => ITalentSlot,
onViewChanged: ViewChanged = (ttvm, watcher) => ttvm.ResetTalentViewModels()
) {
this._watcher = watcher;
this._view = view;
this._showHideFrame = view.window;
this._onViewChanged = onViewChanged;
this._talentSlotFactory = talentSlotFactory;
this._frameClickTrigger = new Trigger();
this._frameClickTrigger.triggerRegisterFrameEvent(view.confirm.buttonMain, FRAMEEVENT_CONTROL_CLICK);
this._frameClickTrigger.triggerRegisterFrameEvent(view.cancel.buttonMain, FRAMEEVENT_CONTROL_CLICK);
this._frameClickTrigger.triggerRegisterFrameEvent(view.close.buttonMain, FRAMEEVENT_CONTROL_CLICK);
this._frameEvent[view.confirm.buttonMain.id] = () => this._watcher.handle == GetTriggerPlayer() && this.OnConfirm();
this._frameEvent[view.cancel.buttonMain.id] = () => this._watcher.handle == GetTriggerPlayer() && this.OnCancel();
this._frameEvent[view.close.buttonMain.id] = () => this._watcher.handle == GetTriggerPlayer() && this.OnClose();
this._frameClickTrigger.addAction(() => this.OnFrameButtonClicked());
}
OnTalentClicked(index: number) {
if (!this._watched || !this._tree) return;
let talent = this._tree.talents[index];
let tempState = this._tree.tempRankState[index];
if (this._tree.talentPoints >= talent.cost && tempState < talent.maxRank) {
this._tree.ApplyTalentTemporary(index);
// Check for link states
this._tree.UpdateLinkStates();
this._onViewChanged(this, this._watcher);
}
}
OnConfirm() {
if (!this._tree) return;
this._tree.SaveTalentRankState();
this._onViewChanged(this, this._watcher);
}
OnCancel() {
if (!this._tree) return;
this._tree.ResetTempRankState();
this._tree.UpdateLinkStates();
this._onViewChanged(this, this._watcher);
}
OnClose() {
this.Hide();
}
UpdatePointsAndTitle() {
if (GetLocalPlayer() != this._watcher.handle) return
if (this._tree) {
this._view.titleText.text = this._tree.title;
this._view.backgroundArt.setTexture(this._tree.backgroundImage, 0, true);
}
}
ResetTalentViewModels() {
if (!this._tree) return;
try {
const talents = this._tree.talents;
for (let i = 0; i < this._slots.length; i++) {
let slot = this._slots[i];
let talent = talents[i];
if (talent) {
slot.talent = talents[i];
this.UpdateTalentSlot(slot, talent, this._tree, i);
} else {
slot.state = TalentState.Empty;
}
}
this.UpdatePointsAndTitle();
} catch (ex) {
print(ex);
}
}
SetTree(tree: TalentTree) {
this._tree = tree;
this.ResetTalentViewModels();
}
IsWatched() {
return this._watched;
}
Show(): void {
if (!this._tree) return;
this._watched = true;
// Reorganize the talents
const tree = this._tree;
const talents = tree.talents;
const cols = tree.columns;
const rows = tree.rows;
let xIncrem = (this.cfg.boxWidth * (1 - this.cfg.sideMargin)) / (cols + 1);
let yIncrem = (this.cfg.boxHeight * (1 - this.cfg.verticalMargin)) / (rows + 1);
// Create additional talent views if not enough
if (tree.maxTalents > this._slots.length) {
for (let i = this._slots.length; i <= tree.maxTalents; i++) {
// Create a new slot
let index = i;
let slot = this._talentSlotFactory(i);
this._slots[i] = slot;
// Set its watcher to this watcher
slot.watcher = this._watcher;
this._frameClickTrigger.triggerRegisterFrameEvent(slot.buttonFrame, FRAMEEVENT_CONTROL_CLICK);
this._frameEvent[slot.buttonFrame.id] = () => this.OnTalentClicked(index);
}
}
// Update slots with talent tree data
this.ResetTalentViewModels();
// Get max talent count
let maxTalents = this._slots.length
for (let i = 0; i < maxTalents; i++) {
let slot = this._slots[i];
const xPos = math.floor(math.fmod(i, cols));
const yPos = math.floor((i) / cols);
let x = xPos * xIncrem - ((cols - 1) * 0.5) * xIncrem;
let y = yPos * yIncrem - ((rows - 1) * 0.5) * yIncrem;
slot.moveTo(FramePoint.C, this._view.talentTreeContainer, FramePoint.C, x, y, xIncrem, yIncrem);
let talent = talents[i];
if (talent) {
slot.visible = true;
} else {
slot.visible = false;
}
}
if (GetLocalPlayer() != this._watcher.handle) return;
this._view.window.visible = true;
this._view.talentTreeContainer.visible = true;
}
Hide(): void {
this._watched = false
if (GetLocalPlayer() != this._watcher.handle) return
this._view.window.visible = false;
}
UpdateTalentSlot(slot: ITalentSlot, talent: Talent, tree: TalentTree, index: number) {
let tempState = tree.GetTalentTempState(index);
let depLeft = tree.CheckDependencyKey(talent.dependency.left, index, index - 1);
let depUp = tree.CheckDependencyKey(talent.dependency.up, index, index + tree.columns);
let depRight = tree.CheckDependencyKey(talent.dependency.right, index, index + 1);
let depDown = tree.CheckDependencyKey(talent.dependency.down, index, index - tree.columns);
slot.rank = tempState;
slot.RenderLinks(depLeft, depUp, depRight, depDown);
if (talent.isLink) {
slot.state = TalentState.Link;
return;
}
slot.errorText = "";
let depOk = true;
let depError: string | undefined;
if (depLeft.ok && depUp.ok && depRight.ok && depDown.ok) {
depOk = true;
} else {
depOk = false;
let concatErrs = (err1?: string, err2?: string): string | undefined => err1 && err2 ? err1 + ", " + err2 : (err1 ? err1 : err2 ? err2 : undefined);
depError = concatErrs(concatErrs(concatErrs(depLeft && depLeft.error, depUp && depUp.error), depRight && depRight.error), depDown && depDown.error);
}
let [reqOk, reqError] = tree.CalculateTalentRequirements(index, talent);
if (tempState == talent.maxRank) {
slot.state = TalentState.Maxed;
} else if (depOk && reqOk && talent.cost <= tree.talentPoints) {
slot.state = TalentState.Available;
} else {
slot.errorText = (depError ?? "") + (reqError ?? "");
slot.state = TalentState.RequireDisabled;
if (reqOk)
slot.state = TalentState.RequireDisabled;
else if (depOk)
slot.state = TalentState.DependDisabled;
}
}
OnFrameButtonClicked() {
let frameId = Frame.fromEvent().id;
if (frameId in this._frameEvent) {
this._frameEvent[frameId]();
}
}
}
export interface IBasicTalentTreeViewModelConfig {
boxWidth: number;
boxHeight: number;
sideMargin: number;
verticalMargin: number;
}
const FramePoint = {
C: FRAMEPOINT_CENTER,
T: FRAMEPOINT_TOP,
B: FRAMEPOINT_BOTTOM,
TL: FRAMEPOINT_TOPLEFT,
TR: FRAMEPOINT_TOPRIGHT,
BL: FRAMEPOINT_BOTTOMLEFT,
BR: FRAMEPOINT_BOTTOMRIGHT,
L: FRAMEPOINT_LEFT,
R: FRAMEPOINT_RIGHT,
} |
It’s official! We will be getting more teenage horror stories come 2018.
This morning, Netflix has officially announced the renewal of their adult animated show “Big Mouth” for a second season. The show was created by Nick Kroll, Andrew Goldberg, Jennifer Flackett, and Mark Levin, and Season 2 is slated for 2018.
This vulgar yet introspective original series is a hilarious coming-of-age story that follows the adolescent experiences of three best friends as they navigate their teenage crises: Nick (voiced by Nick Kroll), Andrew (John Mulaney), and Jessi (Jessi Klein).
Season 1 was met with enthusiastically positive reviews and earned a Critic’s Pick from IndieWire’s TV Critic, Ben Travers. “There are story arcs on first crushes, friends who become more than friends, the sliding scale of human sexuality, love versus lust, peer pressure, growing up too fast, and getting your first period,” said Travers in his Season 1 review.
Andrew, for instance, is quite a shy boy who has his own version of Jiminy Cricket, but rather than being a voice of reason, his is a ghastly yellow creature that represents Andrew’s hormonal drive — and is aptly named The Hormone Monster. Jesse, on the other hand, embodies the struggles of becoming a woman, and she also has her own Hormone Monstress (voiced by Maya Rudolph).
“By disembodying Andrew, Nick, and Jessi’s most depraved thoughts, the show creates an obvious disconnect between who the kids are and what their bodies are doing to them,” Travers said in his review.
Watch the announcement below and look closely for the quote where the “legit actual” Netflix CEO calls “Big Mouth,” the streaming service’s “best show ever.” Really.
The show’s first 10 episodes are currently streaming on Netflix. |
For a movement that burst into life on the sleeping bags of college kids, some of the “Occupy Wall Street” protesters are getting downright long in the tooth.
This week alone, the Raging Grannies and the Granny Peace Brigade have turned up to show solidarity. And signature boomer anthems by Neil Young, Buffalo Springfield, and Woody Guthrie are being sung by AARP candidates at encampments around the country.
Is this spreading “Occupy” social action – now appearing in hundreds of towns and cities across the globe – being taken over by hoards of old lefties and aging ’60s radicals, in search of “somethin’ hap’nin’ here?” Or, as some suggest, is the steady influx of a wider demographic a sign of a broader systemic call to action with a cross-generational appeal?
“More and more middle-age people are showing up all the time,” says Robert Hockett, a professor at Cornell University Law School, who has a small apartment just around the corner from Zuccotti Park where the Wall Street protest began in New York. He attends the nightly general assembly meetings, he says with a laugh, adding, “They are my neighbors now.”
A student of social protest, he says that “this is different from many earlier movements such as the antiwar actions, because the issues don’t fall into partisan political or age divides.” Rather, he says, “these economic issues are hitting old and young across political lines.”
Veterans from earlier protest eras are putting in a good showing. Margaret Ratner Kunstler, widow of the iconic progressive attorney William Kuntsler, has been in the heart of the fray from early on and represents many of the protesters arrested on the Brooklyn Bridge two weeks ago.
“I’m a grandmother and these are my children,” she says with a laugh, noting that the reach and organization she sees springing up around the movement “is giving me hope.”
She says her optimism about people power flagged after the 1999 Seattle protests at the World Trade Organization meeting, because police began to develop more stringent crowd-control tactics. These included rubber bullets, “pens,” and pepper spray.
But this time around, it was the YouTube video of police using the spray on young women in the Wall Street demonstration that broadened the media coverage from the underground press. Now, she says, more and more people feel empowered, and the movement is going mainstream.
Self-described “old lefty” Mark Naison, now a professor at Fordham University in New York, says he has been “hanging out with the protesters as often as possible.”
While it “warms his heart” to see all these students out on the streets in what he calls nonviolent, thoughtful protest, “this is not a young person’s issue by any means,” he says. The lack of jobs and opportunities affects everyone, he adds.
Atlanta-based Republican strategist David Johnson sees the influx of older protesters differently. This is just a strategic move, he says, adding that well-dressed, middle-aged Americans are just what a scruffy student movement needs to clean up its image.
“This is the first step towards co-opting the movement,” he says, pointing to this week’s endorsements from high-level Democrats, including President Obama and Sens. Charles Schumer and Harry Reid. Beyond that, he suggests, the older faces represent what he dubs “the mushy middle in our country – people who like to come out for events that get media coverage, but aren’t clear where they stand politically.”
The International Amalgamated Transit Union – with more than 190,000 members – is quite clear on its stance, says president Larry Hanley. The union issued a statement of solidarity with the protesters this week.
“We are the parents of all these kids,” he says, adding, “We see that this generation has gotten a raw deal.” At the same time, he says, “workers' pensions have been looted.” He "applauds the Occupy Wall Street activists for their courage and strength to expose the greed and corruption on Wall Street as the rest of America struggles to survive.”
Students appreciate the experience of the older voices, says Joe Briones, a film undergrad at Los Angeles City College. “The Occupy movement is trying to learn from past protests, what worked and what didn’t,” he says. “Every voice is equal, old or young, when it comes to decisions at meetings, but their experience has been invaluable.”
Experience also helps move the effort to the next level, says Occupy Los Angeles media organizer Lisa Clapier, who has seven children and a farm in Oregon.
Although she organizes social-media events for a living, she's donating her time to this activity. “I wouldn’t miss this for anything," she says. "We’ve read all our lives about pure democracy in action, and now we get a chance to live it.” |
<reponame>dlehdrjs36/Travel
package travel.image.command;
import java.util.ArrayList;
import java.util.List;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import travel.image.com.ImageListDAO;
import travel.image.com.ImageListDTO;
public class MainImageListCommand implements ImageListCommand{
@Override
public void execute(HttpServletRequest request, HttpServletResponse response) {
ImageListDAO dao = new ImageListDAO();
ImageListDTO dto = new ImageListDTO();
List<ImageListDTO> list = new ArrayList<ImageListDTO>();
int count = dao.ImageListTotalCount();
list = dao.imageList(count);
request.setAttribute("list", list);
}
}
|
An application of augmented reality (AR) in the manipulation of fanuc 200iC robot
In this paper, the simulation and manipulation of 6 DOF robot manipulator is presented using the kinematic model and an AR (Augmented reality) environment. In this context, the system is based on a multimodal user interface to overlay virtual objects onto the real world scene. The objective of this work is by providing the right information needed to perform a certain task. The determination of the camera pose in the AR system is solved by using the most popular algorithm in AR applications (ARToolKit). Experiment is carried out to verify the effectiveness of the simulator. The results show that it can satisfy the manipulation in term of accuracy and stability. |
import { MediaProcessorConnectorInterface } from '../../lib/main'
import VideoMirrorHelper from './video-mirror-helper.js'
import VideoSink from './video-sink.js';
class CameraSource {
videoMirrorHelper_: VideoMirrorHelper;
stream_: MediaStream;
mediaProcessorConnector_: MediaProcessorConnectorInterface
sink_: VideoSink
videoTrack_: MediaStreamTrack
audioTrack_: MediaStreamTrack
constructor() {
this.videoMirrorHelper_ = new VideoMirrorHelper();
this.videoMirrorHelper_.setVisibility(true);
this.stream_ = null;
this.sink_ = new VideoSink();
this.videoTrack_ = null;
this.audioTrack_ = null;
}
async init() {
this.stream_ = await navigator.mediaDevices.getUserMedia({audio: false, video: true});
this.videoTrack_ = this.stream_.getVideoTracks()[0]
this.videoMirrorHelper_.setStream(this.stream_);
}
getMaxFrameRate():number{
return this.videoTrack_.getCapabilities().frameRate.max
}
setMediaProcessorConnector(mediaProcessorConnector: MediaProcessorConnectorInterface): Promise<void> {
return new Promise<void>(async (resolve, reject) => {
this.mediaProcessorConnector_ = mediaProcessorConnector;
if (!this.stream_)
{
console.log('[CameraSource] Requesting camera.');
reject("no stream")
}
this.mediaProcessorConnector_.setTrack(this.videoTrack_).then( newTrack => {
let processedStream = new MediaStream();
processedStream.addTrack(newTrack);
this.sink_.setMediaStream(processedStream);
resolve();
})
.catch(e => {
reject(e)
})
});
}
async stopMediaProcessorConnector() {
if(this.mediaProcessorConnector_){
this.mediaProcessorConnector_.destroy().then(() => {
let processedStream = new MediaStream();
processedStream.addTrack(this.videoTrack_);
this.sink_.setMediaStream(processedStream);
})
.catch(e => {
console.log(e);
});
}
}
destroy() {
console.log('[CameraSource] Stopping camera');
this.videoMirrorHelper_.destroy();
if (this.stream_) {
this.stream_.getTracks().forEach(t => t.stop());
}
}
}
export default CameraSource; |
/**
* Set the gripper to open position.
*/
void openWideGripper() {
if (leftGrip != null)
leftGrip.setPosition(LEFT_OPEN_WIDE_POSITION);
if (rightGrip != null)
rightGrip.setPosition(RIGHT_OPEN_WIDE_POSITION);
} |
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package agent
import (
"context"
"os"
"path/filepath"
"testing"
"time"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/command/agentproxyshared/auth"
token_file "github.com/hashicorp/vault/command/agentproxyshared/auth/token-file"
"github.com/hashicorp/vault/command/agentproxyshared/sink"
"github.com/hashicorp/vault/command/agentproxyshared/sink/file"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/vault"
)
func TestTokenFileEndToEnd(t *testing.T) {
var err error
logger := logging.NewVaultLogger(log.Trace)
coreConfig := &vault.CoreConfig{
DisableMlock: true,
DisableCache: true,
Logger: log.NewNullLogger(),
}
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
})
cluster.Start()
defer cluster.Cleanup()
cores := cluster.Cores
vault.TestWaitActive(t, cores[0].Core)
client := cores[0].Client
secret, err := client.Auth().Token().Create(nil)
if err != nil || secret == nil {
t.Fatal(err)
}
tokenFile, err := os.Create(filepath.Join(t.TempDir(), "token_file"))
if err != nil {
t.Fatal(err)
}
tokenFileName := tokenFile.Name()
tokenFile.Close() // WriteFile doesn't need it open
os.WriteFile(tokenFileName, []byte(secret.Auth.ClientToken), 0o666)
defer os.Remove(tokenFileName)
ahConfig := &auth.AuthHandlerConfig{
Logger: logger.Named("auth.handler"),
Client: client,
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
am, err := token_file.NewTokenFileAuthMethod(&auth.AuthConfig{
Logger: logger.Named("auth.method"),
Config: map[string]interface{}{
"token_file_path": tokenFileName,
},
})
if err != nil {
t.Fatal(err)
}
ah := auth.NewAuthHandler(ahConfig)
errCh := make(chan error)
go func() {
errCh <- ah.Run(ctx, am)
}()
defer func() {
select {
case <-ctx.Done():
case err := <-errCh:
if err != nil {
t.Fatal(err)
}
}
}()
// We close these right away because we're just basically testing
// permissions and finding a usable file name
sinkFile, err := os.Create(filepath.Join(t.TempDir(), "auth.tokensink.test."))
if err != nil {
t.Fatal(err)
}
tokenSinkFileName := sinkFile.Name()
sinkFile.Close()
os.Remove(tokenSinkFileName)
t.Logf("output: %s", tokenSinkFileName)
config := &sink.SinkConfig{
Logger: logger.Named("sink.file"),
Config: map[string]interface{}{
"path": tokenSinkFileName,
},
WrapTTL: 10 * time.Second,
}
fs, err := file.NewFileSink(config)
if err != nil {
t.Fatal(err)
}
config.Sink = fs
ss := sink.NewSinkServer(&sink.SinkServerConfig{
Logger: logger.Named("sink.server"),
Client: client,
})
go func() {
errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config})
}()
defer func() {
select {
case <-ctx.Done():
case err := <-errCh:
if err != nil {
t.Fatal(err)
}
}
}()
// This has to be after the other defers, so it happens first. It allows
// successful test runs to immediately cancel all of the runner goroutines
// and unblock any of the blocking defer calls by the runner's DoneCh that
// comes before this and avoid successful tests from taking the entire
// timeout duration.
defer cancel()
if stat, err := os.Lstat(tokenSinkFileName); err == nil {
t.Fatalf("expected err but got %s", stat)
} else if !os.IsNotExist(err) {
t.Fatal("expected notexist err")
}
// Wait 2 seconds for the env variables to be detected and an auth to be generated.
time.Sleep(time.Second * 2)
token, err := readToken(tokenSinkFileName)
if err != nil {
t.Fatal(err)
}
if token.Token == "" {
t.Fatal("expected token but didn't receive it")
}
_, err = os.Stat(tokenFileName)
if err != nil {
t.Fatal("Token file removed")
}
}
|
package promql
import (
"github.com/influxdata/promql/v2"
"github.com/influxdata/promql/v2/pkg/labels"
)
func escapeLabelName(ln string) string {
switch {
case ln == "":
// This can occur in parameters to functions (e.g. label_replace() empty "src" parameter).
return ""
case ln == "__name__":
return "_field"
case ln[0] == '_' || ln[0] == '~':
return "~" + ln
default:
return ln
}
}
func UnescapeLabelName(ln string) string {
switch {
case ln == "_field":
return "__name__"
case ln[0] == '~':
return ln[1:]
default:
return ln
}
}
func escapeLabelNames(in []string) []string {
out := make([]string, len(in))
for i, ln := range in {
out[i] = escapeLabelName(ln)
}
return out
}
func escapeLabelMatchers(in []*labels.Matcher) []*labels.Matcher {
out := make([]*labels.Matcher, len(in))
var err error
for i, m := range in {
out[i], err = labels.NewMatcher(m.Type, escapeLabelName(m.Name), m.Value)
if err != nil {
panic("unable to create escaped label matcher")
}
}
return out
}
type labelNameEscaper struct{}
func (s labelNameEscaper) Visit(node promql.Node, path []promql.Node) (promql.Visitor, error) {
switch n := node.(type) {
case *promql.AggregateExpr:
n.Grouping = escapeLabelNames(n.Grouping)
case *promql.BinaryExpr:
if n.VectorMatching != nil {
n.VectorMatching.MatchingLabels = escapeLabelNames(n.VectorMatching.MatchingLabels)
n.VectorMatching.Include = escapeLabelNames(n.VectorMatching.Include)
}
case *promql.Call:
// Nothing to do here - there are only two functions that take label names
// as string parameters (label_replace() and label_join()), and those handle
// escaping by themselves.
case *promql.MatrixSelector:
n.Name = ""
n.LabelMatchers = escapeLabelMatchers(n.LabelMatchers)
case *promql.VectorSelector:
n.Name = ""
n.LabelMatchers = escapeLabelMatchers(n.LabelMatchers)
}
return s, nil
}
|
<filename>src/it/fridrik/agent/Smith.java<gh_stars>1-10
/*
* Agent Smith - A java hot class redefinition implementation
* Copyright (C) 2007 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package it.fridrik.agent;
import it.fridrik.filemonitor.FileEvent;
import it.fridrik.filemonitor.FileModifiedListener;
import it.fridrik.filemonitor.FileMonitor;
import it.fridrik.filemonitor.JarEvent;
import it.fridrik.filemonitor.JarModifiedListener;
import it.fridrik.filemonitor.JarMonitor;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.lang.instrument.ClassDefinition;
import java.lang.instrument.Instrumentation;
import java.lang.instrument.UnmodifiableClassException;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Vector;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.jar.JarEntry;
import java.util.jar.JarFile;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Agent Smith is an agent with just one aim: redefining classes as soon as they
* are changed. Smith bundles together Instrumentation, FileMonitor and
* JarMonitor
*
* @author <NAME> (<EMAIL>)
* @see FileMonitor
* @see JarMonitor
* @since 1.0
*/
public class Smith implements FileModifiedListener, JarModifiedListener {
/** Min period allowed */
private static final int MONITOR_PERIOD_MIN_VALUE = 1000;
/** Lists of active Smith agents */
private static Vector<Smith> smiths = new Vector<Smith>();
/** Called when the agent is initialized via command line */
public static void premain(String agentArgs, Instrumentation inst) {
initialize(agentArgs, inst);
}
/** Called when the agent is initialized after the jvm startup */
public static void agentmain(String agentArgs, Instrumentation inst) {
initialize(agentArgs, inst);
}
private static void initialize(String agentArgs, Instrumentation inst) {
SmithArgs args = new SmithArgs(agentArgs);
if (!args.isValid()) {
throw new RuntimeException(
"Your parameters are invalid! Check the documentation for the correct syntax");
}
Smith smith = new Smith(inst, args);
smiths.add(smith);
}
/** Stops all active Smith agents */
public static void stopAll() {
for (Smith smith : smiths) {
smith.stop();
}
}
private static final Logger log = Logger.getLogger(Smith.class.getName());
private final Instrumentation inst;
private final List<String> classFolders;
private final String jarFolder;
private final ScheduledExecutorService service;
private HashMap<String, Class<?>> loadedClassesMap;
/**
* Creates and starts a new Smith agent. Please note that periods smaller than
* 500 (milliseconds) won't be considered.
*
* @param inst
* the instrumentation implementation
* @param args
* the {@link SmithArgs} instance
*/
public Smith(Instrumentation inst, SmithArgs args) {
this.inst = inst;
this.classFolders = args.getClassFolders();
this.jarFolder = args.getJarFolder();
int monitorPeriod = MONITOR_PERIOD_MIN_VALUE;
if (args.getPeriod() > monitorPeriod) {
monitorPeriod = args.getPeriod();
}
log.setLevel(args.getLogLevel());
service = Executors.newScheduledThreadPool(2);
FileMonitor fileMonitor = new FileMonitor(classFolders, "class");
fileMonitor.addModifiedListener(this);
service.scheduleWithFixedDelay(fileMonitor, 0, monitorPeriod,
TimeUnit.MILLISECONDS);
if (jarFolder != null) {
JarMonitor jarMonitor = new JarMonitor(jarFolder);
jarMonitor.addJarModifiedListener(this);
service.scheduleWithFixedDelay(jarMonitor, 0, monitorPeriod,
TimeUnit.MILLISECONDS);
}
log.info("Smith: watching class folders: " + classFolders);
log.info("Smith: watching jars folder: " + jarFolder);
log.info("Smith: period between checks (ms): " + monitorPeriod);
log.info("Smith: log level: " + log.getLevel());
}
/**
* Stops this Smith agent
*/
public void stop() {
service.shutdown();
}
/**
* When the monitor notifies of a changed class file, Smith will redefine it
*/
public void fileModified(FileEvent event) {
File classFile = event.getSource();
String className = toClassName(event.getBaseFolder().toString(), classFile.toString());
try {
byte[] classBytes = toByteArray(new FileInputStream(classFile));
redefineClass(className, classBytes);
} catch (Exception e) {
log.log(Level.SEVERE, "fileModified", e);
}
}
/**
* When the monitor notifies of a changed jar file, Smith will redefine the
* changed class file the jar contains
*/
public void jarModified(JarEvent event) {
String className = toClassName(null, event.getEntryName());
JarFile jar = event.getSource();
try {
byte[] classBytes = toByteArray(
jar.getInputStream(
getJarEntry(jar, event.getEntryName())));
redefineClass(className, classBytes);
} catch (Exception e) {
log.log(Level.SEVERE, "jarModified", e);
}
}
/**
* Redefines the specified class
*
* @param className
* the class name to redefine
* @param classBytes
* the compiled class to redefine
* @throws ClassNotFoundException
* if the class name cannot be found
* @throws UnmodifiableClassException
* if the class is unmodifiable
*/
protected void redefineClass(String className, byte[] classBytes)
throws ClassNotFoundException, UnmodifiableClassException
{
HashMap<String, Class<?>> loadedClassesMap = getLoadedClassesMap();
Class<?> clazz = loadedClassesMap.get(className);
if (clazz != null) {
ClassDefinition definition = new ClassDefinition(clazz, classBytes);
inst.redefineClasses(new ClassDefinition[] { definition });
log.info("Redefined " + className);
}
}
private HashMap<String, Class<?>> getLoadedClassesMap() {
if (loadedClassesMap == null) {
loadedClassesMap = new HashMap<String, Class<?>>();
Class<?>[] loadedClasses = inst.getAllLoadedClasses();
for (Class<?> clazz : loadedClasses) {
loadedClassesMap.put(clazz.getName(), clazz);
}
}
return loadedClassesMap;
}
/**
* Converts an absolute path to a file to a fully qualified class name
*
* @param baseFolder
* the base folder where the file was found
* @param fullPath
* the absolute path of the file
* @return a fully qualified class name
*/
private static String toClassName(String baseFolder, String fullPath) {
String className = fullPath;
if (baseFolder != null) {
className = className.substring(baseFolder.length() + 1);
}
return className.replace(".class", "").replace(File.separatorChar, '.');
}
/**
* Gets the specified jar entry from the specified jar file
*
* @param jar
* the jar file that contains the jar entry
* @param entryName
* the name of the entry contained in the jar file
* @return a JarEntry
* @throws IllegalArgumentException
* if the specified entryname is not contained in the specified jar
* file
*/
private static JarEntry getJarEntry(JarFile jar, String entryName) {
JarEntry entry = null;
for (Enumeration<JarEntry> entries = jar.entries(); entries
.hasMoreElements();) {
entry = entries.nextElement();
if (entry.getName().equals(entryName)) {
return entry;
}
}
throw new IllegalArgumentException("EntryName " + entryName
+ " does not exist in jar " + jar);
}
/**
* Loads .class files as byte[]
*
* @param is
* the inputstream of the bytes to load
* @return a byte[]
* @throws IOException
* if an error occurs while reading file
*/
private static byte[] toByteArray(InputStream is) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
byte[] buffer = new byte[1024];
int bytesRead = 0;
while ((bytesRead = is.read(buffer)) != -1) {
byte[] tmp = new byte[bytesRead];
System.arraycopy(buffer, 0, tmp, 0, bytesRead);
baos.write(tmp);
}
byte[] result = baos.toByteArray();
baos.close();
is.close();
return result;
}
}
|
Illustration by Mark Alan Stamaty
The American consumer is back for the first holiday season since 2007. But while shoppers are hitting the malls, they’re also being choosy—and comparison shopping is more easily done online. Web sales will rise 11 percent in November and December, according to ComScore, compared with about 3 percent for sales in bricks-and-mortar stores. Online shoppers can get better prices and perks like free shipping. Online merchants, however, are not exactly defenseless. One way they fight back against picky customers is through “dynamic pricing,” also called “discriminatory,” “personalized,” or “variable” pricing. And, for the most part, customers have no idea it is happening.
In its most brazen form, it works like this: Retailers read the cookies kept on your browser or glean information from your past purchase history when you are logged into a site. That gives them a sense of what you search for and buy, how much you paid for it, and whether you might be willing and able to spend more. They alter their prices or offers accordingly. Consumers—in the few cases they recognize it is going on, by shopping in two browsers simultaneously, for instance—tend to go apoplectic. But the practice is perfectly legal, and increasingly common—pervasive, even, for some products.
Sellers of time-sensitive, highly price-variable goods (think airline tickets, hotel rooms, or car rentals) do it all the time, somewhat openly. If you have ever had the annoying experience of buying a plane ticket through a portal like Kayak, then seeing the final price jump $10 or $40 at check out, you have probably found yourself on the receiving end of dynamic pricing.
Banks do the same for products like mortgages and credit cards, where prices change depending on everything from the customer’s credit rating to the manager’s whims to what browser the searcher uses. This August, the Wall Street Journal reported on a company that helps Capital One determine what credit-card deals to offer customers when they land at the site. The deals change depending not on any credit-rating or salary information given to the firm by the customer—just on information skimmed off of their computer before the page loads. More recently, bloggers caught the bank offering different deals to users using different browsers. (Chrome users demand better deals than Firefox users, FYI.)
Online retailers also alter prices, deals, and offers on regular goods that do not traditionally have much price volatility. Groups like Consumers Union periodically track shopping sites to see how and how often they change prices, and find fairly frequent instances of dynamic pricing. “While surfing Barnes and Noble’s site, we selected a new hardcover book,” the watchdog noted in a 2007 investigation. “[We] placed it in our online ‘cart’ at a cost of $20.80. We added several other books as well but didn’t finalize the sale. Two days later, using the same browser, we found the cart had been emptied. We selected the same titles…[it] now cost $26.00.”Ditto for shoes on Zappos and a number of other products.
It is impossible to know exactly what stores change prices for customers after they have clicked to put a product in their shopping cart—or which stores change a price on a customer depending on their browser or cookies. Shoppers despise the practice, understandably, so stores rarely cop to it unless caught red-handed. But many offer disclaimers implying they are aware of price discrepancies. Pottery Barn, for example, answers the question, “Why is the price of an item in my saved shopping cart different from when I selected it?” on its site. The answer? “Prices are subject to change—including temporary reductions as well as permanent increases. The prices of items in your cart represent the current price for which you will be charged.” In short: Dynamic pricing.
The practice, if mysterious, is not new. Mega-retailer Amazon offered the same DVD for different prices to different customers in 2000, creating a public-relations disaster. The company claimed it was performing A/B price testing—seeing how many more folks would buy the DVD at a higher price—and said it would always give all customers the lower price at sale. But the incident fostered widespread concern about dynamic pricing, and spurred the first thorough study of the practice.
Concern about dynamic pricing resurged in 2005, when the University of Pennsylvania’s Annenberg Public Policy Center published a much-ballyhooed paper, entitled, somewhat provocatively, “Open to Exploitation.” Researchers conducted a 1,500-person survey and found about two in three respondents did not know it is legal “for an online store to charge different people different prices at the same time of day.” About 70 percent did not realize it is also legal for bricks-and-mortar stores to do so. But yes, as long as stores do not discriminate based on age, sex, location, or a few other characteristics, stores can price as capriciously as they want.
Fears aside, how pervasive is the phenomenon? How does it happen? And what can customers do about it?
The truth is, even retail analysts, academics, and industry groups do not have a good sense of the scope of dynamic pricing—not so much because of a lack of interest, but because companies aren’t forthcoming about how they price online. A 2000 Forrester Research report found “most merchants feel pressure to compete on price online and are testing a mix of strategies to appeal to price-conscious consumers,” but has not followed up in recent years—despite the explosive growth of online shopping and the growing sophistication of the Web marketplace.
But if analysts aren’t sure about the scope of the phenomenon, the tech guys building the pricing systems certainly are. One computer scientist who builds smart sites for online retailers—with a nondisclosure agreement, hence the anonymity—says that concerns about different customers getting different price quotes for the same good are probably overblown. Some retailers do it, particularly when gauging the market for certain items. But companies know that consumers watch for it, and will take their business elsewhere if they think they’re getting a raw deal.
That said, major retailers are getting much more sophisticated and subtle about ways to game their shoppers. It’s common for big retail Web sites to direct different users to different deals, offers, or items based on their purchase histories or cookies. They also alter their Web pages with internal ads, letting a shopper’s cookies or browser data influence which sale products pop up. And companies frequently offer special deals for customers with a few items in their shopping bags—from discounts on additional items, to free shipping, to coupons for future purchases. Ingenuity, rather than price-tampering, is now the name of the game.
And as much as retailers try to foil bargain shoppers, consumers actually do hold the upper hand online. Dynamic pricing is easy to counteract. Search multiple sites—including ones that collect prices from across the Internet as well as the sites themselves. Run searches on more than one browser, including one which you have erased cookies. Leave items in a shopping cart for a few days, to gin up discount offers. And be prepared.
Like Slate on Facebook. Follow us on Twitter. |
<filename>src/Intraday/model.ts
import { IntradayTimeSeries } from "./IntradayTimeSeries.entity"
import { MarketDataSchema } from "../Common/interfaces";
type DayTypes = "UP" | "DOWN" | "INSIDE" | "OUTSIDE" | "GAPUP" | "GAPDOWN"
export class IntradayBatch {
public schema: MarketDataSchema = "INTRADAY"
public symbol: string = "";
public sourceName: string = ""
public queryTime = 0;
public timeSeriesInterval: string = 'P01M'
public ISODate: string = "";
public epoch = 0;
public open =0;
public high = 0;
public low = 0;
public close = 0;
public volume = 0;
public timeSeries: IntradayTimeSeries[] = [];
constructor(sym?: string,d?: Date, data?: IntradayTimeSeries[] ) {
if (sym) {
this.symbol = sym;
}
if (d) {
this.queryTime = d.valueOf();
}
if (data) {
this.timeSeries = data;
}
}
} |
<filename>src/util/logger.ts
import * as Winston from "winston";
Winston.cli();
const logger: Winston.LoggerInstance = new (Winston.Logger)({
transports:
[
new (Winston.transports.Console)({ level: "info" })
]
});
export default logger;
export {logger as Logger};
|
Impact of a multifaceted and multidisciplinary intervention on pain, agitation and delirium management in a Canadian community intensive care unit: a quality improvement study protocol
Background: Pain and agitation are closely linked to the development of delirium, which affects 60%–87% of critically ill patients. Delirium is associated with increased mortality and morbidity. Clinical guidelines that suggest routine assessment, treatment and prevention of pain, agitation and delirium (PAD) is crucial to improving patient outcomes. However, the adoption of and adherence to PAD guidelines remain suboptimal, especially in community hospitals. The aim of this quality improvement study is to evaluate the impact of a multifaceted and multidisciplinary intervention on PAD management in a Canadian community intensive care unit (ICU). Methods: This is a quality improvement, uncontrolled, before-and-after study of a multifaceted and multidisciplinary intervention targeting nurses (educational modules, visual reminders), family members (interviews, educational pamphlets and an educational video), physicians (multidisciplinary round script) and the multidisciplinary team as a whole (delirium poster). We will collect data every day for 6 weeks before implementing the intervention. Data collection will include clinical information and information on process of care. We will then implement the intervention. Four weeks after, we will collect data daily for 6 weeks to evaluate the effect of the intervention. On the basis of the volume of the ICU, we expect to enroll approximately 280 patients. We have obtained local ethics approval from the Hamilton Integrated Research Ethics Board (HiREB 18-040-C). Interpretation: The results of this quality improvement study will provide information on adherence to PAD guidelines in a Canadian community ICU setting. They will also supply information on the feasibility of implementing multifaceted and multidisciplinary PAD interventions in community ICUs. Abstract
D elirium is a harmful condition commonly encountered in the intensive care unit (ICU). 1,2 It can affect 60%-87% of critically ill patients and it is associated with multiple adverse outcomes such as increased mortality, 1-3,6,7 prolonged hospital length of stay, 2,3,6,8 increased health care costs 2,4,5,9 and long-term cognitive dysfunction. 5, Pain and agitation are closely linked to the development of delirium. 2 Pain is a common symptom in the ICU; the incidence of moderate to severe pain is up to 50%-80%. Agitation affects at least 71% of patients in the ICU. 16 The causes of pain, agitation and delirium (PAD) are multifactorial. 17 In 2013, the Society of Critical Care Medicine published clinical practice guidelines for the management of PAD in adult patients in the ICU, known as the PAD guidelines. 18 These guidelines strongly recommended the routine assessment of PAD using validated tools and the treatment and prevention of PAD. The validated tools for the assessment of PAD are the Critical Care Pain Observation Tool (CPOT), the Richmond Agitation-Sedation Scale (RASS) and the Confusion Assessment Method for the ICU (CAM-ICU). 18 The optimal screening rates are 4 times per 12-hour shift for pain and agitation and once per 12-hour shift for delirium. 18 Although there is well-established evidence demonstrating the advantages of implementing the PAD guidelines, the adoption of the guidelines and adherence to them remain poor, 18 especially in community ICUs. 19 Barriers to guideline implementation include organizational, professional and personal factors such as lack of training, Research skills, knowledge and motivation for culture change. Only 3% of ICU nurses ranked delirium as the most important condition to evaluate; by comparison, 44% of them ranked level of consciousness as the most important, 23% ranked the presence of pain as the most important and 21% ranked improper placement of invasive device as the most important. 28 ICU nurses also reported many barriers to delirium assessment including intubation (38%), the complexity of the tools for assessing delirium (34%) and the inability to complete assessments of delirium in sedated patients (13%). 29 Moreover, health care professionals considered delirium to be a complex but nonurgent condition, and there were variable management strategies as a result. 30 These observations suggest that health care professionals' attitudes about delirium management need to change if the barriers to the implementation of the PAD guidelines are to be overcome.
Multiple studies have looked at various interventions to improve the management of PAD, including education, 31 monitoring 31,32 and sedation quality feedback. 31 The educational intervention was associated with a 50% relative reduction in sedation-related adverse event rates, 31 whereas pain and delirium monitoring was associated with a decrease in mortality. 32 Barnes-Daly and colleagues evaluated the ICU Liberation Collaborative PAD implementation method called the ABCDEF bundle (awakening and breathing coordination, choice of drugs, delirium monitoring and management, early mobility and family engagement) in a community setting. They found that compliance with the ABCDEF bundle was associated with improved survival and more days free of delirium and coma. 33 Furthermore, Black and colleagues found that patients who received psychological care from family members demonstrated improved psychological outcomes at 4-12 weeks after critical illness. 34 However, only a limited number of studies have investigated the impact of a multifaceted and multidisciplinary intervention on PAD management. Moreover, most of the existing studies were conducted in academic ICUs 28,32,35 and thus it is difficult to extrapolate their findings to com munity ICUs. We therefore set out to conduct a quality improvement study to evaluate the effect of a multifaceted and multidisciplinary intervention to improve the assessment and treatment of PAD in a Canadian community ICU setting. We hypothesize that the implementation of a multidisciplinary intervention codeveloped by front-line health care staff would improve PAD management in a community ICU.
Design
This is a quality improvement study, with an uncontrolled before-and-after design. This study examines the impact of a multifaceted and multidisciplinary intervention targeting nurses, family members and physicians to improve PAD management in a Canadian community ICU.
Setting
We will conduct this study in the ICU of the St. Catharines Site, Niagara Health, a community hospital. The hospital is located in a medium-sized city in Ontario, Canada. Approximately 90 registered nurses provide ICU care. The centre has 1 level III medical-surgical ICU capable of caring for 14 adult patients in private rooms. The ICU is a closed unit with 24-hour intensivist coverage; the intensivists act as the patients' primary physician during their ICU stay. The nurse to patient ratio is 1:1 to 1:2. There is a dedicated ICU pharmacist, respiratory therapist, physiotherapist and dietitian. The health care team provides care to general medical, cardiac, respiratory, nephrological, oncological, general surgical, orthopedic and vascular surgical patients. The ICU is a locked unit but without restriction during family visiting hours. There is a nursing policy in place that stipulates that pain is to be assessed using the Numeric Pain Rating Scale (NPRS) at the beginning of each shift, after analgesic administration every 4 hours and as needed. Sedation level is assessed using the RASS 39 at the beginning of each shift and as needed, and delirium is assessed using the CAM-ICU. 40 Nurses are trained to use these validated tools during their orientation when they join the ICU team. The ICU nurses are responsible for titrating the doses and frequency of analgesics and sedatives according to patients' clinical status. There is no preexisting protocol for medication titration.
Eligibility criteria and sample size calculation
All adult patients (aged 18 yr and above) admitted to the ICU for more than 24 hours will be included in this study. There are no exclusion criteria for this study, as the PAD guidelines can be applied to all adult patients in the ICU. This study will incorporate nonprobability consecutive sampling. Using a priori determination, we calculated the sample size for this study using a 95% confidence interval. 41 On the basis of a previously conducted nurse-focused quality improvement study, expected differences in pain (8.2%), agitation (14.4%) and delirium (14.8%) management are anticipated (Carolyn Tan, Mercedes Camargo, Franziska Miller, et al. Niagara Health: unpublished data, 2019). Using a power of 80% (Z β = 0.20), we calculated that a minimum sample size of 277 patients is required. As a conservative estimate, we plan to include at least 280 patients.
Preintervention data collection
A dedicated research assistant will collect prospective data daily on all admitted ICU patients for 6 weeks (Appendix 1 available at www.cmajopen.ca/content/7/2/E430/suppl/DC1). On the basis of the volume of this ICU, the estimated number of patients enrolled in this period will be approximately 140. See the data analysis section for the details of the data collection.
PAD Advisory Committee
The intervention for this project is multifaceted and multidisciplinary. It focuses on the social and medical needs of the patients in the ICU in relation to the PAD guidelines. 18 The intervention targets 3 groups: nurses, family members and OPEN Research physicians. To facilitate the development of the intervention, we formed a PAD Advisory Committee. It comprises the ICU manager, 5 nurses, 2 physicians (intensivists), an ICU pharmacist and the ICU research coordinator. In addition to informing the development of our intervention, the members of the PAD Advisory Committee will act as local champions to engage, motivate and support front-line health care staff during the implementation of the intervention. More importantly, the PAD Advisory Committee will cultivate positive relationships with end users of the program to enhance PAD protocol adherence, obtain feedback from front-line staff and stimulate collaborative practice among front-line staff. We will hold regular meetings with the PAD Advisory Committee to ensure that we receive regular feedback on the progress of the study, monitor the implementation of the PAD intervention and make changes according to their feedback.
Development of intervention
We designed and developed the intervention over a 1-year period (September 2017 to August 2018) using the Model for Improvement to allow for the development and refinement of the intervention through plan-do-study-act test cycles to optimize adherence. Specifically, the PAD Advisory Committee designed and developed the nurse-focused components (educational modules and visual reminders), the family member focused components (interviews, educational pamphlet and educational video), the physician-focused component (multidisciplinary round script) and the multidisciplinaryfocused component (delirium poster). We circulated the interventional materials among front-line health care staff to receive feedback and we have iteratively refined the contents and the layout of the interventional materials to ensure optimal adoption by front-line staff during implementation. We developed and refined the interview guide for the family member interviews through plan-do-study-act test cycles by conducting interviews with family members and iteratively revising the interview guide.
Educational modules
We will implement an online educational program for all ICU nurses with 4 modules that were developed using the 2013 PAD guidelines. 18 The first module is a PAD program overview. This module will include information on our local PAD research program, postintensive care syndrome and basic pharmacology and pharmacokinetic properties of common ICU drugs in critically ill patients. The second module is the pain module. This module will introduce the CPOT. 18 It will also provide nurses with information on the pharmacology of commonly used nonopioid and opioid analgesics in the ICU. The third module is the agitation module. This module will introduce the RASS. 39 It will also provide nurses with information on the pharmacology of commonly used sedatives. The fourth module is the delirium module. This module will give an overview on delirium and will introduce the CAM-ICU 40 as a validated tool to screen for delirium in the ICU. It will also provide nurses with information on pharmacological and nonpharmacological treatment of delirium.
Visual reminders
We will place CPOT, RASS and CAM-ICU cue cards by each bedside to remind front-line nurses how to properly use these validated tools to screen for pain, agitation and delirium, respectively (Appendix 2 available at www.cmajopen.ca/ content /7/2/E430/suppl/DC1).
Family member focused components
Interviews A team of PAD volunteers (undergraduate students) will conduct in-person interviews (approximately 20 minutes in duration) with family members of all newly admitted ICU patients within 48-72 hours of admission. We will obtain consent before all interviews. The purpose of this intervention is to empower family members to participate in the PAD care of the patients by providing us with important information about patients' baseline cognitive function, mobility and use of visual and hearing aids (Appendix 3 available at www.cmajopen.ca/ content/7/2/E430/suppl/DC1).
Educational pamphlet
We will provide an educational pamphlet (developed with permission from www.icudelirium.org) in the ICU waiting room for family members. The purpose of this component is to provide educational materials to family members about delirium (Appendix 4 available at www.cmajopen.ca/content/7/2/E430/ suppl/DC1).
Educational video
An educational video on delirium (produced by Osmosis) will be made available on a dedicated computer in the ICU waiting room for family members. The purpose of this component is to provide information on delirium to family members through a different educational medium.
Multidisciplinary round script
We will post a script (Appendix 5 available at www.cmajopen.ca/ content/7/2/E430/suppl/DC1) for use during multidisciplinary rounds on the workstation on wheels as a reminder to intensivists to order target RASS score, to discuss PAD assessment and treatment and to encourage nurses to achieve adequate pain control and light sedation.
Poster
We will post our delirium poster in the ICU to remind all front-line multidisciplinary health care staff, patients and family members about the importance of detection, treatment and prevention of delirium (Appendix 6 available at www.cmajopen.ca/ content/7/2/E430/suppl/DC1).
Postintervention data collection
Four weeks after the implementation of the intervention, a dedicated research assistant will collect data on all admitted ICU patients each day for 6 weeks. On the basis of the volume of this ICU, we estimated that approximately 140 patients will be enrolled in this period.
Data analysis
We will analyze quantitative data with descriptive and analytical statistics, using SPSS version 26. We will examine numerical data using means and standard deviations or medians and 25th to 75th percentiles according to data distribution. We will use paired t tests for parametric analyses and Wilcoxon signed-rank paired difference tests for nonparametric analyses comparing before-and-after intervention data of 2 related samples. As our objective is to evaluate the adherence to guideline recommendations on pain, agitation and delirium assessment by nurses per day, we will measure their adherence to our intervention in patient-day units. The level of significance is set at α = 0.05. Our primary outcomes will be (a) the proportion of patient-days with pain assessment using the NPRS (for patients who can verbally report pain) or the CPOT (for patients who cannot verbally report pain) at least 4 times per shift, (b) the proportion of patient-days with agitation assessment using the RASS at least 4 times per shift and (c) the proportion of patient-days with delirium assessment using the CAM-ICU at least once per shift.
Our secondary outcomes will be (a) the average number per patient-day of pain assessments using the NPRS (for patients who can verbally report pain) or the CPOT (for patient who cannot verbally report pain), (b) the average number per patient-day of agitation assessments using the RASS, (c) the average number per patient-day of delirium assessments using the CAM-ICU, (d) the proportion of patientdays with benzodiazepine use, (e) the proportion of patientdays with significant pain defined by NPRS scores of 4 or higher or CPOT scores of 3 or higher, (f) the proportion of patient-days with optimal sedation level defined by a RASS score of between -2 and 0 or a target RASS score at least 50% of the time, (g) the proportion of patient-days with oversedation defined by a RASS score less than -2 at least 50% of the time, (h) the proportion of patient-days with agitation defined by a RASS score greater than 0 at least 50% of the time and (i) daily percent patient-days with a positive delirium screen using the CAM-ICU.
Our balancing measures will include the proportion of patient-days with physical restraint use and inadvertent extubation. Our control measures will include the proportion of patient-days during which mechanically ventilated patients are on stress ulcer prophylaxis and the proportion of patient-days during which pharmacological or mechanical prophylaxis for deep vein thrombosis is used.
Ethics approval
We have obtained local ethics approval from the Hamilton Integrated Research Ethics Board (HiREB 18-040-C), with a waiver of the need to obtain consent as this is a quality improvement study. All data will be anonymized and stored in password-protected computers in a locked research office.
Knowledge translation
We will present our data at the annual scientific meeting of the European Society of Intensive Care Medicine and the Critical Care Canada Forum. We will also publish the results of this study in peer-reviewed academic journals. Should this multifaceted and multidisciplinary intervention be effective in improving PAD management, we will implement it in other Canadian community ICUs.
Interpretation
This quality improvement study will report on the baseline rate at which target RASS scores were ordered by physicians; the rate of PAD screening, the rate of optimal pain and sedation management and the rate of delirium documented by bedside nurses in a Canadian community ICU. We selected these process and outcome metrics on the basis of the recommendations of the 2013 PAD guidelines. 18 This study will also report on the impact of a multifaceted and multidisciplinary intervention on PAD management as reflected by the process and outcome metrics described above. The results of this study will provide information on the feasibility of implementing multifaceted and multidisciplinary PAD interventions in community ICUs.
Limitations
The uncontrolled before-and-after study design may overestimate the effect of the intervention. However, this study design represents a pragmatic approach and reflects real-life experience in a Canadian community ICU setting, where research infrastructure is generally lacking. We chose this study design because it allowed us to conduct a quality improvement study with the greatest possible degree of scientific rigour given the limitations of a community ICU. Lastly, because resources are limited in the community ICU setting, we will not collect information on patient demographics that would allow for adjustment for confounders (e.g., severity of illness, age, history of dementia, delirium, substance use). However, our primary outcomes should not be affected by any major patient-related confounders because according to the PAD guidelines all ICU patients, regardless of severity of illness, should have regular PAD assessments.
Conclusion
This uncontrolled before-and-after study in a Canadian community ICU proposes to determine the effect of a multifaceted and multidisciplinary intervention on the management of PAD. |
/*
* zdbrestrpos() -- restore scan to last saved position
*/
Datum zdbrestrpos(PG_FUNCTION_ARGS) {
elog(NOTICE, "zdbrestrpos()");
PG_RETURN_VOID();
} |
<filename>Intro/program_12/testsortDowning.cpp
#include <iostream.h>
#include <iomanip.h>
#include <conio.h>
//the three functions:
void findaverage(float grades[][1]);
void highaverage(char name[][20], float grades[][1]);
void lowaverage(char name[][20], float grades[][1]);
main()
{
char name[10][20] = { {"<NAME>"}, {"<NAME>"}, {"<NAME>"}, {"<NAME>"}, {"<NAME>"},
{"<NAME>"}, {"<NAME>"}, {"<NAME>"}, {"<NAME>"}, {"<NAME>"} };
float grades[10][1]; //= { {90}, {90.332}, {90.333}, {70.4}, {100.3}, {60.5}, {60.2}, {50}, {25}, {20} } ;
int a = 0, x = 1;
do{
cout<<"TEST SORT \n";
cout<<"This program has ten names stored in a character array. \n";
cout<<"You may enter grades for each of these people and then \n";
cout<<"this program will run three functions: \n";
cout<<"1. Find the class average. \n";
cout<<"2. Find the highest score and print that person's full name. \n";
cout<<"3. Find the lowest score and print that person's initials. \n\n";
for(a = 0; a < 10; a++)
{
cout<<"Enter the grade that "<<name[a]<<" recieved: ";
cin>>grades[a][0];
}
cout<<endl;
//call up the 3 functions
findaverage(grades);
highaverage(name, grades);
lowaverage(name, grades);
do{
cout<<"\nTo continue press 1. To exit press 2. -> ";
cin>>x;
}while(x != 1 && x != 2);
clrscr();
}while(x == 1);
cout<<"Thanks for using the TEST SORT PROGRAM.";
return 0;
}
void findaverage(float grades[][1])
{
float average = 0, total = 0;
int a = 0;
//finds the average by adding all the grades
for(a = 0; a < 10; a++)
{
total += grades[a][0];
}
//and dividing by 10 (the # of people)
average = total/10;
cout<<"The average for this group of people is: "<<setiosflags(ios::fixed)<<setprecision(2)<<average<<endl;
}
void highaverage(char name[][20], float grades[][1])
{
float max = grades[0][0];
int hname = 0, a;
//finds the highest grade and that person's name
for(a = 0; a < 10; a++)
{
if(grades[a][0] > max)
{
max = grades[a][0];
hname = a;
}
}
//prints the highest grade and that person's full name
cout<<"The person with the highest grade is: "<<name[hname]<<" with a score of "<<max<<endl;
}
void lowaverage(char name[][20], float grades[][1])
{
float min = grades[0][0];
int lname = 0, a;
//finds the lowest grade and that person's full name
for(a = 0; a < 10; a++)
{
if(grades[a][0] < min)
{
min = grades[a][0];
lname = a;
}
}
//print this first to make printing the initials easier
cout<<"The person with the lowest grade is: ";
//convert the person's name with lowest grade to initials
//first value of lname should be that person's FIRST initial (ie 0)
cout<<name[lname][0];
//to find the second initial, reuse some code from the initials program:
for(a = 0; a < 20; a++)
{
if(name[lname][a] == 32)
{
cout<<name[lname][a+1];
}
}
//prints the lowest grade and that person's initials
cout<<" with a score of "<<min<<endl;
}
|
These days we have drug fiends, wife-beaters, wimps, traitors, and perverts for role models. We used to have heroes. But heroes were found to be “racist”:
The California Assembly refused Thursday to honor actor John Wayne after a sharp debate in which he was accused of being a racist. Assembly Concurrent Resolution 137, which would have declared May 26 – his birthday – as “John Wayne Day,” garnered just 35 votes, six short of the required majority, while 20 members voted against the measure and 25 members refused to vote. The division was largely along partisan lines, although several Democrats joined minority Republicans in voting for the Wayne resolution, which was offered by Assemblyman Matthew Harper, R-Huntington Beach and had been approved unanimously by the Assembly Rules Committee three days earlier.
Despite his role in helping Vietnamese refugees resettle in Orange County after Democrats turned Southeast Asia over to genocidal communist maniacs, several black and Hispanic assembly members proclaimed him to be “racist” — most likely because he personified a healthier America that they hate and are in the process of eradicating.
Assemblywoman Lorena Gonzalez, D-San Diego, also complained that in Wayne’s Western movies, “there was a lot of slaughtering of Native Americans…” and that he personally sanctioned white occupation of Indian lands.
Why not just skip to the chase and abolish the USA? Why do progressives have to flush it down the toilet by these excruciating increments?
The Duke stares in horror at what is becoming of his country.
On a tip from Byron. |
//creates a new group entry in firestore. group_ID is set here. Other values must be set before calling createGroup.
public void createGroup(Group group){
this.databaseGroup = FirebaseFirestore.getInstance().collection("Groups").document();
group.setGroupId(databaseGroup.getId());
Map<String, Object> map = group.toMap();
databaseGroup.set(map);
} |
Characterization of Indoor Air Quality in Relation to Ventilation Practices in Hospitals of Lahore, Pakistan
Temporal variations of particulate matter ( PM) and carbon dioxide ( CO 2 ) in orthopedic wards and emergency rooms of different hospitals of Lahore, Pakistan were investigated. Hospitals were classified into two groups, I (centrally air-conditioned) and II (non-central air-conditioned) based on the ventilation system. Statistical analysis indicated significantly lower PM and CO 2 levels in centrally air-conditioned hospitals in comparison to non-central air-conditioned. The low indoor-outdoor (I/O) ratio of PM 2.5 in the ward and emergency rooms of group I (0.62, 0.45) as compared to group II (0.70, 0.83), respectively, suggested that indoor spaces equipped with central air-conditioning systems efficiently filter particulates as compared to non- central air conditioning systems. Apart from the ventilation type, increased visitor and doctors’ activities, and cleaning sessions were observed to contribute significantly to indoor air quality. This study adds up to the understanding of temporal variations in PM emissions and the role of ventilation systems in context of hospitals in the urban centers in Pakistan. The findings can inform the development of intervention strategies to maintain the appropriate air quality in health care built environment in developing countries. particulates, carbon dioxide (Co 2 ) , volatile organic compounds (VoCs) , carbon monoxide (Co) , formaldehyde (CH 2 o) , nitrous oxide (N 2 o), glutaraldehyde (C 5 H 8 o 2 ), allergens, and bioaerosols (Śmiełowska
The typical sources of these emissions include cleaning activities such as floor sweeping, dusting of surfaces, movement of people, medical procedures, indoor activities, infiltration from outdoor air, and ventilation practices (Baurès et al. 2018;Capolongo 2016). These emissions are significantly affected by meteorological factors, human activities, building design, management practices, seasonal variations, ventilation mode, and its maintenance (Moscato et al. 2017;Pereira et al. 2017). Among various air pollutants, particulate matter (PM) is of major concern. Apart from the indoor origin, particles from ambient sources can also become a vehicle for infectious aerosols and other adsorbed pollutants while being irritants on their own (Morakinyo et al. 2019;Tellier et al. 2019). The most concerning fractions of PM are fine (≤ to PM 2.5 ) and ultra-fine (≤ to PM 0.1 ) particles (Sturm 2016), and high levels of these whether of indoor or outdoor origin, contribute to the four leading causes of deaths in the world: heart diseases, Chronic obstructive Pulmonary Diseases (CoPD), strokes, and cancer (WHo 2016). Apart from direct health implications, PM 2.5 can be a potential indicator of the possible existence of contaminants that could be risky for patients and health care workers (Ghio 2014;Milton et al. 2013). Therefore, limiting and controlling these particles can be helpful to manage hospital-acquired infections (Morawska & He 2014).
Ventilation plays an important role in controlling and removing contaminants from indoor and outdoor sources. Different type of ventilation system can affect the PM 10 dispersal in the buildings and related risks to indoor air quality and occupant's health (Ali et al. 2017a). Many studies have discussed the role of different ventilation types in managing air quality in health care settings (Beggs et al. 2008;yau et al. 2011). Moreover, various studies have used Co 2 emissions as an indicator of ventilation adequacy in health care environments (Gilkeson et al. 2013;Sribanurekha et al. 2016). The outdoor pollutants infiltrating indoor air are diluted or removed by the ventilation system in place. Hence, another indicator frequently used to quantify the adequacy of ventilation is the indoor-outdoor ratio (I/o). The I/o ratio of PM has been broadly used in several studies to describe the association between indoor and outdoor air by offering a direct, simple understanding of the relationship (Bucur & Danet 2019;He et al. 2019). However, this ratio is flexible depending upon various factors such as building design, indoor pollutant sources, particle deposition, penetration frequency, and air exchange rates (Shrestha et al. 2019). Most of the hospital facilities in Pakistan are localized in the urban areas, where high levels of air pollution have been consistently reported (Ahmad et al. 2019;Ali et al. 2017b). The country has faced over 128,000 deaths related to air pollution during 2017, and undergone a sheer increase in PM 2.5 pollution since 2010 with the population-weighted and annual exposure levels of PM 2.5 measured to be 76 and 58 µg/m 3 , respectively (Health Effects Institute 2019).
Although ambient air quality is monitored via fixed-site stations in urban centers, data on air quality in health care environments is scarce (Asif et al. 2018;Gulshan et al. 2015;Nimra et al. 2015).
Presently, available evidence shows that the design and management of buildings, along with temperature, humidity, and ventilation rate can strongly influence the particulate and gaseous emission in health care facilities (Gola et al. 2019). The improved operations, housekeeping, and maintenance can help to reduce pollutant emissions in the microenvironments (Idris et al. 2020). There is a need to characterize indoor air quality in health care built environments to gain a better understanding of air pollutant emissions dynamics under different ventilation strategies in the context of hospitals in the urban centers in Pakistan. The current investigation was carried out as a case study to gain insights into the temporal characterization of particulate and gaseous emissions in the orthopedic wards and emergency rooms of public and private sector hospitals of Lahore, Pakistan with different ventilation systems in place.
MATeRIALS AND MeTHoDS
Six hospitals (four public and two private) were selected from Lahore, based on their ventilation system as described by Jung et al. (2015). Group I hospitals used central air conditioning by the Air Handling Unit (AHU) while Group II hospitals used non-central air conditioning by split type. Permission was obtained from the hospital administration before sampling. The orthopedic wards and emergency rooms were selected for monitoring based on the findings of our previous study (Nimra et al. 2015) and a high risk of infection in orthopedic patients' rooms. figure 1 depicts the general setup of the central air-conditioned (AHU) and noncentral air (split type) conditioned hospital room. Brief characteristics of hospitals, collected from administration and official websites are given in Table 1. From each hospital, two sites i.e. orthopedic wards and emergency rooms were monitored for PM 2.5 , PM 10, Co 2 emissions along with relative humidity (%RH) and temperature (°C). While the outdoor (ambient) site was monitored only for PM 2.5 . The study was carried during January -December 2017 with each site monitored four times during the whole year at an interval of three months.
Mass concentration of PM 2.5 and PM 10 was measured using real-time monitors: TSI DUST TRAK TM DRX 8533, and TSI DustTrak, Model 8520. Relative humidity (%RH), temperature (°C), and carbon dioxide (Co 2 ) indoors were monitored using Aeroqual 500 series. DustTrak DRX model 8533 was employed for indoor sampling while ambient sampling was conducted for PM 2.5 only using DustTrak model 8520. Both DRX model 8533 and Aeroqual 500 series were factory calibrated before initiation of the study while DustTrak 8520 was calibrated against DRX by running both instruments side by side for four hours and a correction factor of 0.40 was calculated.
each site was sampled from 9 a.m. to 5 p.m. to characterize the air quality and assess air hygiene levels in hospitals. The zero calibration for each instrument was done before sampling at each site. The instrument was placed at a height of 1 m and a distance of 1.5 m away from doors and/or windows. PM 2.5 monitoring in the outdoors was conducted in parallel to indoor sampling where the equipment was positioned at a height of 1 m above the ground and 50 m away from the main entrance of the building.
Time activity diaries were maintained for each sampling. The major defining activities identified at the selected sites were visiting hours, cleaning activities, doctor's round, and peak emergency hours (described below) which were observed to be conducted at specified times hence making it easy to study their impact upon air quality. One-hour data during which these specific activities were performed in each sampling campaign was separated for further analysis. Visiting hours: since the visiting hours are defined in the hospitals, the presence of the highest number of visitors and their physical movement were observed. Cleaning: including housekeeping, cleaning of floors and surfaces. Doctor's round: includes doctors visitation hours and movement of nurses for the general administration of medicine. Peak emergency hours: includes peak emergencies dealing hour with maximum visitors in an emergency.
The data was confirmed to be non-parametric by Kolmogorov-Simonov and Levene test which was analyzed by parametric test after normalizing data. An independent t-test was used to compare mean levels of PM and Co 2 between two groups. for the activities, one way ANoVA by Tukey HSD, LSD, and Games-Howell post hoc was used for analysis in the wards, while an independent t-test was used for emergency rooms. Moreover, to access the impact of outdoor PM 2.5 , hierarchical regression was performed controlling for confounding variables using SPSS v. 21.0. ReSULTS PM 2.5 and PM 10 concentration in the wards and emergency rooms varied with the type of ventilation system (Table 2(a) -2(b)) and figure 2(a) -2(b)). In the wards, mean PM 2.5 and PM 10 were higher in group II (119 ±61 and 150 ±75 µg/m 3 ), as compared to group I hospitals (89 ±56 and 117 ± 74 µg/m 3 ) (Table 2(a)). Similarly, in the emergency rooms, mean PM 2.5 and PM 10 were higher in group II (151± 85 and 183 ± 90 µg/m 3 ), compared to group I hospitals (82 ± 31 and 94 ± 30 µg/m 3 ) (Table 2(b)). Independent sample t-test indicated a statistically significant difference between groups I and II of wards as well as emergency rooms at 0.05 significance level. Different activities in the wards and emergency rooms were observed to produce a pronounced impact upon PM 2.5 and PM 10 concentrations. In the wards, mean PM 2.5 and PM 10 were highest during visiting hours in both groups as compared to doctor's round and cleaning activities (Table 2(a) and figure 2(c) -2(d)). one-way ANoVA showed statistically significant differences among the three different activities in the wards for both groups. The independent sample t-test also showed statistically significant differences between the groups. In the emergency rooms, mean PM 2.5 and PM 10 were highest during peak emergency hours in both groups as compared to cleaning activity (Table 2(b) and figure 2(e) -2(f)). one sample t-test and independent sample t-test showed a statistically significant difference in PM 2.5 and PM 10 concentrations during different activities, both within and between groups I and II, respectively. c d e f The mean PM 2.5 concentration in the outdoor environment monitored in parallel to the wards indoor was 152 ± 33 and 176 ± 59 µg/m 3 for the group I and II, respectively, while the mean PM 2.5 concentration in outdoor parallel to emergency room indoor was 179 ± 52 and 184 ± 51 µg/m 3 , respectively (Table 2(a) -2(b)). A significant difference was observed for PM 2.5 outdoor concentration for the group I and II in the wards, but not for emergency rooms. The average I/o ratio of PM 2.5 in the wards was 0.62 ± 0.40 and 0.70 ± 0.32 for the group I and II, respectively. However, the average I/o ratio of PM 2.5 in the emergency rooms was 0.45 ± 0.10 and 0.83 ± 0.40 for the group I and II, respectively (Table 2(a) -2(b) and figure 3). Hierarchical regression analysis was employed, considering outdoor as an independent and indoor PM 2.5 concentration as a dependent variable while controlling indoor confounding variables (relative humidity, carbon dioxide, temperature, and building age). It showed 38% variations in the PM 2.5 levels indoors were contributed by outdoor in group II, while for group I the impact of outdoor PM 2.5 was not statistically significant (p = 0.687). In the emergency rooms, 54% variations in group II were contributed by outdoor whereas, for group I the impact of outdoor PM 2.5 was also non-significant (p = 0.138) ( Table 3). Average relative humidity and temperature levels in the wards were 47 ± 04 and 26 ± 02 in group I, and 37 ± 09 and 29 ± 05, respectively, in group II. In emergency rooms, the average relative humidity and temperature in the wards were 37 ± 14 and 27 ± 02 in group I, and 45 ± 06 and 28 ± 04, respectively, in group II (Table 4). The %RH showed a positive direct relation with PM 2.5 at a significance level of 0.05.
Co 2 emissions in the wards and emergency rooms varied with the form of ventilation system. In the wards, the average concentration of Co 2 in groups I and II was 712 ± 273 and 1093 ± 510 ppm, respectively; while in the emergency rooms, the mean levels in groups I and II were 782 ± 329 and 939 ± 421 ppm, respectively. The independent sample t-test showed a statistically significant difference in Co 2 levels in the wards and emergencies of both groups at a 0.05 significance level (figure 4).
DISCUSSIoN
Monitoring and control of PM 2.5 particles in hospitals can assist health care personnel to gauge air hygiene and the efficacy of control measures such as ventilation systems (Pankhurst et al. 2012;Verkkala et al. 1998). Being capable of penetrating deep into the alveoli and hence the bloodstream, these fine particles play a major role in the transmission of microbial infections by adherence (Kressel et al. 2004); owing to their small size they can remain airborne for longer durations and could potentially carry infectious or other potential diseases causing agents (Armadans-Gil et al. 2013;Macher et al. 2019). Therefore, PM 2.5 particles may serve as a potential indicator of the existence of contaminants that can be fatal to immunecompromised patients in the hospital. Consequently, studies have been done to assess levels of particulate matter in relation to ventilation practices. Jung et al. (2015) discussed that centrally air-conditioned significantly reduce PM 2.5 and PM 10 as compared to non-centrally airconditioned (split and window type). Moreover, they suggested that increased human activities and poor management practices result in high levels of particulates. (2008) reported that ventilation type, human activities, and management practices significantly affect PM and Co 2 emissions. Indoor PM levels in the hospitals have been reported in various neighboring countries in South east Asia including China and the Philippines. In a study conducted in the urban tertiary care hospital of the Philippines, higher levels of PM 2.5 were reported in naturally ventilated areas as compared to mechanically ventilated (centrally airconditioned) areas (Lomboy et al. 2015).
In China (Wang et al. 2006a), four public hospitals with different ventilation modes were evaluated for air quality. They reported average PM 2.5 and PM 10 ranged between 80-108 and 93-145 µg/m 3 respectively, which are almost the same for minimum levels but lower than observed levels (82-151 and 94-183 µg/m 3 , respectively) in the present study. In another study in Iran (Mohammadyan et al. 2019), the average PM 2.5 levels (range: 38-55 µg/m 3 ) were considerably less, but PM 10 (range: 112-227 µg/m 3 ) was significantly higher than the levels reported in the current study.
Different activities performed in indoor environments have a major effect on the generation and re-suspension of PM (ferro et al. 2004) as seen in the current study as well. Activities like cleaning, high visitor density, and doctors round increased the airborne particulates in hospitals. Movement of people can lead to resuspension of settled dust in different indoor environments (Gaidajis & Angelakoglou 2014;Jung et al. 2018;Pereira et al. 2017;Sidra et al. 2015;Tang et al. 2009). In the present study, elevated levels of PM 2.5 and PM 10 in the wards and emergency rooms were observed during high visitor density and peak emergency times, respectively (Table 2(a) -2(b)); and coarse particles (PM 10 ) mass concentration was higher than fine particles (PM 2.5 ); a trend repeatedly observed in various previous studies (Ahwah et al. 2015;Doğan 2019;El-Sharkawy et al. 2014;Wang et al. 2006aWang et al. , 2006b. Additional sources contributing to PM levels include the curtains and carpets in the hospitals (Verma & Taneja 2011). In another study, the average levels of PM 2.5 in different wards of a centrally air-conditioned hospital of Lahore ranged between 69-488 µg/m 3 . These levels were far higher as compared to PM 2.5 levels of 89 µg/m 3 in centrally air-conditioned wards in the present study. This was probably due to ongoing staff strikes, cracks in buildings, and decreased frequency of cleaning activities as reported by Gulshan et al. (2015).
The air pollutants generated outdoors from anthropogenic sources such as traffic have a significant association with indoor air (Radaideh et al. 2016). This was observed in the current study where the hierarchical regression showed a significant impact of outdoor PM 2.5 on indoor levels in non-centrally air-conditioned hospitals and, no significant impact in centrally airconditioned hospitals (Table 3). This could be due to the efficient filtration of outdoor PM 2.5 in mechanically ventilated hospitals as also reported by Montgomery et al. (2015). These findings are also supported by the I/o ratios used to estimate the difference between indoor and corresponding outdoor concentrations which are dependent upon the location, different activities, building design, and ventilation type (Diapouli et al. 2013;yang et al. 2018). In this study, the I/o of PM 2.5 (0.45-0.62) were lower in centrally air-conditioned hospitals as compared to 0.70-0.83 in non-centrally air-conditioned hospitals which is suggestive of improved filtration of particulates by filtration system as reported in various studies (Cavallo et al. 1993;Chen & Zhao 2011;Peng et al. 2017). I/o ratios can be predictive of the IAQ of the hospitals and other buildings since naturally ventilated buildings or ones with inadequate air filtration systems have a higher I/o ratio (ediagbonya et al. 2013;Mohammadyan et al. 2019;Wang et al. 2006a). Apart from ventilation, the other factor observed to affect I/o for PM 2.5 in the present study was high visitor density. The hospitals with high visitor density and non-centrally air-conditioned were found to have a high I/o ratio for particulates as compared to low visitor density and centrally air-conditioned hospitals. This is consistent with the observations of Mohammadyan et al. (2017Mohammadyan et al. ( , 2016, and Tang et al. (2009). This situation calls for stringent air quality management practices in hospitals, particularly located in urban centers with high levels of particulate pollution.
These results can be useful in the development of efficient emissions control strategies in hospitals. The particle composition, concentration, shape, and size, hygroscopic growth, deposition, and re-suspension have been reported to be dependent on RH, although the process is complex and involves various other factors (Qian et al. 2014). The management practices, particularly, ventilation play a significant role in controlling the humidity, gases, and PM as reported by various studies (Escombe et al. 2007;Seppänen & Kurnitski 2009) and it was observed in this study that RH exhibited a direct relation with PM 2.5 at a significance level of 0.01.
Another parameter that predicts IAQ is Co 2 . Since occupant density is one of the prime Co 2 sources, the indoor concentration of Co 2 can be used to assess the adequacy of ventilation, pollutant concentration associated with occupant activity, and airborne infection risk (Rudnick & Milton 2003). In the present study, the mean Co 2 concentration was 712-782 ppm in centrally air-conditioned sites, whereas 939-1093 ppm in the non-centrally conditioned sites while, ASHRAe 2017 recommends permissible levels to be 1,000 ppm. Similar levels of Co 2 i.e. 643-875 ppm were reported in centrally air-conditioned ambulatory care centers of Malaysia, where the HVAC system was reported to provide adequate ventilation and improved IAQ (Sari et al. 2019). In another study conducted in a government hospital in Thailand, Co 2 levels reported to be 267-1351 ppm, suggesting high patient numbers and insufficient ventilation to cause impaired air quality (Luksamijarulkul et al. 2019). Moreover, comparatively higher Co 2 levels in non-centrally air-conditioned hospitals were also reported by many researchers as well (fonseca et al. 2019; zhou et al. 2015). on the contrary, some studies have reported a high concentration of Co 2 in mechanically ventilated indoor spaces as compared to naturally ventilated indoors (Jurado et al. 2014;Sribanurekha et al. 2016). This could be because other factors in addition to ventilation, such as occupant density and activities of the occupants also affect the indoor concentration of Co 2 . It was noticed in the current study that hospitals can be a high source of PM that may pose serious health implications to immune-compromised patients as well as health care personnel. Various other studies have reported serious health problems in the health care facilities having a high concentration of PM and gaseous emissions like Co 2 and VoCs (Bessonneau et al. 2013;Su et al. 2018).
The exposure risk to the patients and health care personnel can be significantly reduced by enforcement of existing ventilation guidelines and improved management practices in the hospitals.
CoNCLUSIoN
The effective management of air quality in the hospitals needs knowledge of spatio-temporal variations in pollutants. The centrally air-conditioned hospitals were found to improve IAQ by reducing PM 2.5 and PM 10 and Co 2 emissions as compared to non-centrally airconditioned hospitals. Among various activities conducted in hospital premises, the highest PM and Co 2 concentrations were observed during visiting hours, suggesting a pronounced effect of human activities in determining air quality. The indoor PM 2.5 concentrations in non-central air-conditioned hospitals showed a significant association with outdoor concentrations, signifying the impact of ambient air quality in urban centers on indoor quality. The real-time monitoring of particulates and Co 2 can help to inform and evaluate the intervention strategies to maintain air hygiene in health care built environments. However, this study delivers a snapshot view of particulate and Co 2 concentration from hospitals. further studies should be conducted to understand the nature and magnitude of emissions in hospitals. Specifically, the biological and chemical characterization of PM emissions should be done for better air quality management in hospitals.
ACKNoWLeDGeMeNTS
The present study was partially funded by the Higher education Commission, Pakistan under grant number IRSIP 35 BMS 06, awarded to Afzal Nimra for her doctoral research. The authors declare no conflict of interest. |
/// Takes a pointer to a string from C and copies it into a Rust-owned `CString`.
pub unsafe fn ptr_to_cstring(ptr: *mut c_char) -> CString {
// expect that no strings are longer than 100000 bytes
let end_ptr = memchr(ptr as *const c_void, 0, 100000);
let len: usize = end_ptr as usize - ptr as usize;
let slice: &[u8] = slice::from_raw_parts(ptr as *const u8, len);
CString::new(slice).expect("Unable to convert the slice into a CString")
} |
def generate_polyphonic_sequence(
self, num_steps, primer_sequence, temperature=1.0, beam_size=1,
branch_factor=1, steps_per_iteration=1, modify_events_callback=None):
return self._generate_events(num_steps, primer_sequence, temperature,
beam_size, branch_factor, steps_per_iteration,
modify_events_callback=modify_events_callback) |
def _set_parameter(self, name, value):
try:
parameter = self.get_api().getParameter(name)
parameter.setValue(value)
self.get_api().updateParameter(parameter)
except ParameterNotFound:
parameter = Parameter(name=name, value=value)
self.get_api().createParameter(parameter) |
#include <iostream>
#include <algorithm>
#include <cmath>
#include <vector>
#include <set>
#include <map>
#include <string>
#include <numeric>
#include <cstdlib>
using namespace std;
using ll = long long;
using vi = vector<int>;
using vl = vector<ll>;
using pii = pair<int, int>;
using ull = unsigned long long;
#define FOR(i,a,b) for(int i = a; i < b; i++)
#define read(a, n) FOR(iiiii, 0, n) cin >> a[iiiii]
#define write(a, n) FOR(iiiii, 0, n) cout << a[iiiii] << " "
#define all(a) a.begin(), a.end()
bool in_range(int v, int l, int r) {
return v >= l && v <= r;
}
ll mod = 1e9 + 7;
vector<vl> nk(2000, vl(2000, 0));
ll cnk(int n, int k) {
if (k > n) return 0;
if (nk[n][k] != 0) return nk[n][k];
if (k == 0 || n == k) return 1LL;
ll a = cnk(n - 1, k - 1);
ll b = cnk(n - 1, k);
nk[n][k] = (a + b) % mod;
return nk[n][k];
}
void solve() {
int n, m, x;
cin >> n >> m >> x;
vector<pii> a(n);
FOR(i, 0, n) {
cin >> a[i].first;
a[i].second = i;
}
sort(all(a));
reverse(all(a));
vi res(n);
set<pii> b;
FOR(i, 0, m) {
b.insert({0, i});
}
FOR(i, 0, n) {
auto it = b.begin();
pii n = (*it);
n.first += a[i].first;
res[a[i].second] = n.second + 1;
b.erase(it);
b.insert(n);
}
int mmin = INT32_MAX;
int mmax = -1;
for(auto& p: b) {
mmin = min(mmin, p.first);
mmax = max(mmax, p.first);
}
if (mmax - mmin > x) {
cout << "NO\n";
}
else {
cout << "YES\n";
write(res, n);
cout << "\n";
}
}
int main() {
int q = 1;
cin >> q;
while (q--) {
solve();
}
}
|
// HasUndefined returns whether the call has undefined arguments
func (c *call) HasUndefined() bool {
for i := range c.Args {
if c.Args[i] == nil {
return true
}
if basic, ok := c.Args[i].(*types.Basic); ok {
if basic.Kind() == types.Invalid {
return true
}
}
if strings.Index(c.Args[i].String(), "invalid type") >= 0 {
return true
}
}
return false
} |
def apply(self, solution):
size = solution.size
rate = float(1/size)
copy_solution = solution.clone()
rand = random.uniform(0, 1)
copy_solution.data = [x if rand > rate else x + self._sigma(size) * (self.maxi - (self.mini)) for x in solution.data]
copy_solution.data = self._repair(copy_solution)
return copy_solution |
package config
import (
"fmt"
"path/filepath"
"github.com/d3ta-go/system/system/utils"
"github.com/spf13/viper"
)
// NewConfig is a function to Load Configuration
func NewConfig(path string) (*Config, *viper.Viper, error) {
defaultConfigFile, err := GetConfigFilePath(path)
if err != nil {
return nil, nil, err
}
v := viper.New()
v.SetConfigFile(defaultConfigFile)
err = v.ReadInConfig()
if err != nil {
panic(fmt.Errorf("Fatal error config file: %s", err.Error()))
}
v.WatchConfig()
c := new(Config)
err = v.Unmarshal(&c)
if err != nil {
return nil, nil, err
}
return c, v, err
}
// GetConfigFilePath get filepath location
func GetConfigFilePath(baseDir string) (string, error) {
filename := "config.yaml"
filePath := baseDir
// first location
file := filepath.Join(filePath, filename)
exist, _ := utils.FileIsExist(file)
if exist == false {
// check in default location [./] -> binary dir
filePath = "./"
file = filepath.Join(filePath, filename)
exist, _ := utils.FileIsExist(file)
if exist == false {
// check in configuration directory [./conf]
filePath = "./conf"
file = filepath.Join(filePath, filename)
_, err := utils.FileIsExist(file)
// give up, return error
if err != nil {
return "", err
}
// return file, nil
}
// return file, nil
}
return file, nil
}
|
import * as MaskedText from 'react-native-masked-text'
import * as RN from 'react-native'
import * as React from 'react'
import {
StyleSheet,
TextInput as _TextInput,
} from 'react-native'
import { TextInputMaskOptionProp } from 'react-native-masked-text/index'
interface UnmaskedTextInputProps extends RN.TextInputProps {
format?: (text: string) => string
onChangeText: (text: string) => void
}
const Unmasked: React.FunctionComponent<UnmaskedTextInputProps> = (props: UnmaskedTextInputProps) => {
const format: MaskedTextInputProps['format'] = (text: string) => text
const [, setVal] = React.useState(props.value)
const update: (text: string) => void = (text: string) => {
text = (props.format ?? format)(text)
setVal(text)
props.onChangeText(text)
}
return (
<_TextInput
value={props.value}
onChangeText={update}
keyboardType={props.keyboardType}
style={props.style}
secureTextEntry={props.secureTextEntry}
// @ts-ignore
render={(props: RN.TextInputProps) => <_TextInput {...props} style={styles.inputText} />}
/>
)
}
interface MaskedTextInputProps extends RN.TextInputProps {
format?: (text: string) => string
onChangeText: (text: string) => void
type: 'cel-phone' | 'cnpj' | 'cpf' | 'credit-card' | 'custom' | 'datetime' | 'money' | 'only-numbers' | 'zip-code'
options: TextInputMaskOptionProp
}
const Masked: React.FunctionComponent<MaskedTextInputProps> = (props: MaskedTextInputProps) => {
const format: MaskedTextInputProps['format'] = (text: string) => text
const [, setVal] = React.useState(props.value)
const update: (text: string) => void = (text: string) => {
text = (props.format ?? format)(text)
setVal(text)
props.onChangeText(text)
}
return (
<MaskedText.TextInputMask
type={props.type}
value={props.value}
onChangeText={update}
keyboardType={props.keyboardType}
style={props.style}
secureTextEntry={props.secureTextEntry}
options={props.options}
// @ts-ignore
render={(props: string) => <_TextInput {...props} style={styles.inputText} />}
/>
)
}
const styles = StyleSheet.create({
inputText: {
fontSize: 20,
padding: 6,
marginTop: 16,
},
})
export {
Masked,
Unmasked,
}
|
In the wake of numerous reports of Windows 10's forced updates causing problems with drivers, Microsoft has backtracked on the feature and has released a tool that allows you to block or uninstall updates that are suspected to be causing issues.
Here's Microsoft's stance on why it released the tool:
In Windows 10 Insider Preview, your device is always kept up to date with the latest features and fixes. Updates are installed automatically, with no need to select which updates are needed or not needed. In rare cases, a specific driver or update might temporarily cause issues with your device, and in this case you will need a way to prevent the problematic driver or update from reinstalling automatically the next time Windows Updates are installed.
Over the last few days, the Internet has been awash with reports of issues with Windows 10's forced updates interfering with other applications, as my colleague Gordon Kelly reported here. Specifically, it's the way it installs drivers, overriding any other driver-based update applications that are the issue, but the way in which Windows 10's updates are automatically installed has also been rather unpopular with enthusiasts.
In the past, Windows Update has been optional and you can select which updates to install. Not so with Windows 10 as by default it installs updates automatically and doesn't allow you to deselect them and this is the same for both security-based updates as well as those for drivers.
Today, though, ZDNet spotted a tool located on an official Microsoft update webpage that allows you block certain Windows updates and prevent the OS from installing them in future. This is a potential lifeline for users that feared this to be a critical flaw with Windows 10, which is due to be released in two days time.
The ability to avoid all driver updates is potentially very useful as there's no guarantee that the ones provided by Microsoft will always be the latest versions and also that they won't cause issues such as those we've seen over the last few days between Windows Update and Nvidia's GeForce Experience.
You can download the tool from this webpage. I'm still investigating it in a pre-release version of Windows 10 - while it doesn't seem to sync up with Windows Update and allow you to view and block updates before they're downloaded, it does allow you see everything that has been installed and to both uninstall it and block future installs too.
By hiding updates, you essentially prevent Windows 10 from installing them and you can see a list of updates you've already 'hidden' using the tool too. You can uninstall updates already via control panel, but the likelihood is that Windows 10 would just download them again. The tool prevents this, although it's obviously not ideal having to use a separate program that's not integrated into the OS itself somehow.
I'd definitely like to see this written into the OS at some point. While the masses will likely benefit from a more spoon-fed update program, meaning more PCs will be up-to-date, for those of us that are a little more hands-on with our systems and prefer to update graphics card drivers, for example, manually, then having the option to abstain from certain updates makes Windows 10 a lot more attractive than it was yesterday.
Follow @antonyleather |
/**
* Of course we can call {@link #$listOf(int, samples.Builder)} inline and assign the produced
* list of item builders directly to the order builder.
* <p>
* That means, that on every call to some($Order()) we will get a new {@link Order} with a
* complete unique list of items.
*/
@Test
public void testSome$Order_With$ListOf5$Items() {
Order act = some($Order().withItems($listOf(5, $Item())));
assertThat(act.recipient).isNotNull();
assertThat(act.items).hasSize(5);
} |
/*
Database
The sole purpose of this module is to provide dead-simple persistence of data that has been processed / needs to be processed.
It should:
- make interacting with the data as simple as possible
It shouldn't:
- leak abstractions or otherwise require consumers to work with the data store's syntax for querying / retrieving data
*/
import { AmazonOrdersRepository } from './repositories/amazon-orders-repository'
import { TransactionsRepository } from './repositories/transactions-repository'
class Context {
_amazonOrdersRepository: AmazonOrdersRepository
_transactionsRepository: TransactionsRepository
constructor () {
this._amazonOrdersRepository = new AmazonOrdersRepository()
this._transactionsRepository = new TransactionsRepository()
}
get AmazonOrdersRepository (): AmazonOrdersRepository {
return this._amazonOrdersRepository
}
get TransactionsRepository (): TransactionsRepository {
return this._transactionsRepository
}
Compact (): void {
this._amazonOrdersRepository.Compact()
this._transactionsRepository.Compact()
}
}
export const Database: Context = new Context()
|
Not to be confused with Cortlandt, New York
City in New York, United States
Cortland is a city in Cortland County, New York, United States of America. Known as the Crown City, Cortland is located in New York's Southern Tier region. As of the 2010 census, the city had a population of 19,204.[2] It is the county seat of Cortland County.[3]
The city of Cortland, near the western border of the county, is surrounded by the town of Cortlandville.
History [ edit ]
The city is within the former Central New York Military Tract. The city is named after Pierre Van Cortlandt, the first lieutenant governor of the state of New York.[4]
Cortland, settled in 1791, was made a village in 1853 (rechartered in 1864), and was incorporated as a city in 1900 as the 41st city in New York state. When the county was formed in 1808, Cortland vied with other villages and won the status of becoming the county seat. Known as the "Crown City" because of its location on a plain formed by the convergence of seven valleys, Cortland is situated at 1,130 feet (340 m) above sea level. Forty stars representing the 40 cities incorporated before Cortland circle the State of New York and Crown on the city's official seal. The seven points of the crown create seven valleys depicting Cortland's seven surrounding valleys. The 41st star in the center of the crown illustrates Cortland as the closest incorporated city to the geographic center of New York.
The leading industry in Cortland in the late nineteenth and early twentieth century was the Wickwire Brothers wire drawing mill, noted for its production of wire hardware cloth for use as window screens. Durkee's Bakery, founded by Michael C. Antil and Albert Durkee in 1931 from the failed Durkee's Retail bake store - became an employer of more than 250. The extent of their wealth is commemorated in a pair of magnificent mansions. The Victorian Chateauesque style home of Chester Wickwire is now the 1890 House Museum & Center for Victorian Arts, while the 1912 home of Charles Wickwire is now owned and operated by the SUNY Cortland Alumni Association.[5] It is open to the public as well as being used by the Alumni Association to host college-related events and house visiting dignitaries.[6]
Cortland was also home to Brockway Motor Company, a pioneering truck maker. Begun in 1875 as Brockway Carriage Works, it was taken over by Mack Trucks in 1956, and survived until 1977. The city continues to host an annual show of Brockway trucks.[7]
Cortland also boasts a classic octagon house[8] and the still-operating, garden-type Cortland Rural Cemetery.
In 1868 Cortland became the home of the Cortland Normal School, now the State University of New York at Cortland.
From 1960 to 1992, Smith Corona typewriters were manufactured in Cortland.[9]
In 2006, Cortland's historic clock tower burned down. It was later re-built, with spaces for both businesses and apartment style housing.[10]
The Cortland County Courthouse, Cortland County Poor Farm, Cortland Fire Headquarters, Cortland Free Library, First Presbyterian Church Complex, William J. Greenman House, Randall Farm, Tompkins Street Historic District, Unitarian Universalist Church, and United States Post Office are listed on the National Register of Historic Places.[11][12]
Notable people [ edit ]
Geography [ edit ]
Cortland is in west-central Cortland County at (42.600658, −76.181284).[13] Cortland lies between Syracuse, New York and Binghamton, New York. It is surrounded by the town of Cortlandville.
According to the United States Census Bureau, the city has an area of 3.92 square miles (10.14 km2), of which 3.90 square miles (10.09 km2) is land and 0.02 square miles (0.05 km2), or 0.51%, is water.[2]
The Tioughnioga River, a tributary of the Susquehanna River, flows southward past the city.
Transportation [ edit ]
Roads and highways [ edit ]
Interstate 81, U.S. Route 11, and New York State Route 281 are north-south highways servicing the city. New York State Route 13 and New York State Route 41 also serve the city. Via I-81 it is 40 miles (64 km) north to Syracuse and 40 miles (64 km) south to Binghamton. NY-13 leads southwest 18 miles (29 km) to Ithaca.
Bus [ edit ]
Local public transportation by bus is provided by Cortland Transit.[14] Greyhound provides the city with intercity bus service with connections to Syracuse, Binghamton, and points beyond. The closest Amtrak train station is in Syracuse.
Air [ edit ]
Air service is provided by Cortland County Airport located west of the city.
Climate [ edit ]
Climate data for Cortland, New York Month Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec Year Record high °F (°C) 68
(20) 65
(18) 85
(29) 90
(32) 93
(34) 96
(36) 100
(38) 98
(37) 100
(38) 90
(32) 81
(27) 68
(20) 100
(38) Average high °F (°C) 30.6
(−0.8) 32.8
(0.4) 41.9
(5.5) 54.1
(12.3) 67.6
(19.8) 76.3
(24.6) 81.0
(27.2) 79.4
(26.3) 70.7
(21.5) 59.0
(15.0) 46.2
(7.9) 35.1
(1.7) 56.2
(13.5) Average low °F (°C) 15.2
(−9.3) 15.7
(−9.1) 24.1
(−4.4) 34.4
(1.3) 45.3
(7.4) 54.3
(12.4) 58.8
(14.9) 56.9
(13.8) 49.3
(9.6) 39.3
(4.1) 31.7
(−0.2) 21.5
(−5.8) 37.2
(2.9) Record low °F (°C) −25
(−32) −26
(−32) −13
(−25) 11
(−12) 23
(−5) 32
(0) 39
(4) 35
(2) 27
(−3) 18
(−8) 2
(−17) −17
(−27) −26
(−32) Average precipitation inches (mm) 2.74
(70) 2.49
(63) 3.12
(79) 3.22
(82) 3.28
(83) 4.08
(104) 3.37
(86) 2.98
(76) 3.97
(101) 3.17
(81) 3.49
(89) 3.41
(87) 39.32
(999) Average snowfall inches (cm) 19.7
(50) 19.2
(49) 13.2
(34) 4.0
(10) 0
(0) 0
(0) 0
(0) 0
(0) 0
(0) .3
(0.76) 8.2
(21) 22.3
(57) 86.9
(221) Average precipitation days (≥ 0.01 in) 17.4 14.3 14.3 13.4 12.1 11.8 10.6 10.2 11.5 12.6 15.2 16.8 160.2 Average snowy days (≥ 0.1 in) 9.1 7.0 4.5 1.7 0 0 0 0 0 .1 3.2 7.6 33.2 Source #1: NOAA (normals 1971–2000),[15] Source #2: The Weather Channel (extremes)[16]
Demographics [ edit ]
Historical population Census Pop. %± 1870 3,066 — 1880 4,050 32.1% 1890 8,590 112.1% 1900 9,014 4.9% 1910 11,504 27.6% 1920 13,294 15.6% 1930 15,043 13.2% 1940 15,881 5.6% 1950 18,152 14.3% 1960 19,181 5.7% 1970 19,621 2.3% 1980 20,138 2.6% 1990 19,801 −1.7% 2000 18,740 −5.4% 2010 19,204 2.5% Est. 2016 18,795 [1] −2.1% U.S. Decennial Census[17]
As of the census[18] of 2000, there were 18,740 people, 6,922 households, and 3,454 families residing in the city. The population density was 4,778.6 people per square mile (1,845.8/km²). There were 7,550 housing units at an average density of 1,925.2 per square mile (743.6/km²). The racial makeup of the city was 95.72% White, 1.56% African American, 0.25% Native American, 0.57% Asian, 0.02% Pacific Islander, 0.56% from other races, and 1.33% from two or more races. Hispanic or Latino of any race were 1.72% of the population.
There were 6,922 households out of which 24.8% had children under the age of 18 living with them, 34.7% were married couples living together, 11.4% had a female householder with no husband present, and 50.1% were non-families. 36.0% of all households were made up of individuals and 13.0% had someone living alone who was 65 years of age or older. The average household size was 2.28 and the average family size was 2.95.
In the city, the population was spread out with 18.3% under the age of 18, 28.4% from 18 to 24, 23.6% from 25 to 44, 16.8% from 45 to 64, and 12.9% who were 65 years of age or older. The median age was 28 years. For every 100 females, there were 88.4 males. For every 100 females age 18 and over, there were 84.5 males.
The median income for a household in the city was $26,478, and the median income for a family was $39,167. Males had a median income of $29,857 versus $21,614 for females. The per capita income for the city was $14,267. About 13.9% of families and 24.7% of the population were below the poverty line, including 24.8% of those under age 18 and 15.2% of those age 65 or over.
As of 2015 the largest self-reported ancestry groups in Cortland, New York were:
English - 15.6%
Irish - 10.8%
Italian - 9.7%
German - 8.4%
"American" - 5.7%
Dutch - 2.2%
Scottish - 2.1%
French (except Basque) - 1.8%
Polish - 1.8%[19]
Downtown Cortland
Government [ edit ]
The government of Cortland consists of a mayor, who is elected at large, and a city council consisting of eight members. One member is elected from each of the eight voting wards.
Sports [ edit ]
In summer 2009, the New York Jets training camp was moved to Cortland from its traditional home at Hofstra University in Hempstead. The team located their operations at the State University of New York, Cortland campus. The camp drew in 34,000 visitors and brought nearly $4.26 million to the local economy.[20] In 2010, the Jets signed a 3-year contract with SUNY Cortland to continue their partnership.
See also [ edit ] |
/**
* Vaadin scopes configuration.
*
* @author Vaadin Ltd
*
*/
@Configuration
public class VaadinScopesConfig {
/**
* Creates a Vaadin session scope.
*
* @return the Vaadin session scope
*/
@Bean
public static BeanFactoryPostProcessor vaadinSessionScope() {
return new VaadinSessionScope();
}
/**
* Creates a Vaadin UI scope.
*
* @return the Vaadin UI scope
*/
@Bean
public static BeanFactoryPostProcessor vaadinUIScope() {
return new VaadinUIScope();
}
/**
* Creates a Vaadin route scope.
*
* @return the Vaadin route scope
*/
@Bean
public static BeanFactoryPostProcessor vaadinRouteScope() {
return new VaadinRouteScope();
}
} |
Election 2012: Nevada President
Nevada: Obama 50%, Romney 48%
President Obama still receives 50% of the vote in Nevada’s tight presidential race.
The latest Rasmussen Reports telephone survey of Likely Nevada voters, taken the night after the final presidential debate, shows Obama with 50% support to Mitt Romney’s 48%. One percent (1%) prefers some other candidate, and one percent (1%) is undecided. (To see survey question wording, click here.)
Win an IPad: Take the Rasmussen Challenge . This week's entries will be accepted until 11:59pm ET tonight.
This Nevada survey of 500 Likely Voters was conducted on October 23, 2012 by Rasmussen Reports. The margin of sampling error is +/- 4.5 percentage points with a 95% level of confidence. Field work for all Rasmussen Reports surveys is conducted by Pulse Opinion Research, LLC. See methodology.
OR |
Functional and Molecular Imaging: Key Components of Personalized Healthcare
The ongoing revolution in biomedical imaging has produced technologies capable of depicting both tissue structure and function with high fidelity. Advances in nuclear medicine, magnetic resonance imaging, computed tomography and optical imaging facilitate multimodality, multiscale imaging ranging from display of gross pathology to the products of gene expression. Use of multiple modalities also permits a multidimensional look at tissues, based on the way in which they interact with X-rays, magnetic fields or sound waves, as well as how they metabolize a variety of components. The convergence of biomedical imaging with bioengineering, genomics and bioinformatics is creating a climate in which "personalized healthcare" becomes feasible. Sophisticated analysis and combination of imaging and genomic data enhance our ability to: (1) predict the lifetime risk of an individual's developing a given disease; (2) direct the disease in its pre-clinical phase; (3) diagnose the disease thoroughly, down to its molecular level, (4) treat the disease minimally invasively; and (5) monitor the effectiveness of treatment. This presentation will showcase the tools of functional and molecular imaging and demonstrate how they can be used for personalized healthcare. |
from datetime import date
ano = int(input('Digite o ano do seu nascimento: '))
idade = (date.today().year-ano - 18)*-1
if idade > 0:
print(f'Ainda falta {idade} para seu alistamento obrigatorio')
elif idade == 0:
print('Está na epoca que de se alistar!')
elif idade < 0:
print(f'Já passou da epoca de se alistar. Você deveria ter se alistado há {idade*-1} anos') |
<filename>opengl-text/src/gl_vertex_array.cpp
#include "gl_vertex_array.h"
#include "gl_get_proc_address.h"
#include "gl_validate.h"
#include "gl_vertex_buffer.h"
#include <stdexcept>
namespace opengl {
namespace gl {
VertexArray::VertexArray( ) :
vertex_array_ { },
glCreateVertexArrays { GetProcAddress("glCreateVertexArrays", glCreateVertexArrays) },
glBindVertexArray { GetProcAddress("glBindVertexArray", glBindVertexArray) },
glDeleteVertexArrays { GetProcAddress("glDeleteVertexArrays", glDeleteVertexArrays) },
glVertexArrayVertexBuffer { GetProcAddress("glVertexArrayVertexBuffer", glVertexArrayVertexBuffer) },
glEnableVertexArrayAttrib { GetProcAddress("glEnableVertexArrayAttrib", glEnableVertexArrayAttrib) },
glDisableVertexArrayAttrib { GetProcAddress("glDisableVertexArrayAttrib", glDisableVertexArrayAttrib) },
glVertexArrayAttribFormat { GetProcAddress("glVertexArrayAttribFormat", glVertexArrayAttribFormat) },
glVertexArrayAttribBinding { GetProcAddress("glVertexArrayAttribBinding", glVertexArrayAttribBinding) }
{
VALIDATE_ACTIVE_GL_CONTEXT();
if (!glCreateVertexArrays || !glBindVertexArray ||
!glDeleteVertexArrays || !glVertexArrayVertexBuffer ||
!glEnableVertexArrayAttrib || !glDisableVertexArrayAttrib ||
!glVertexArrayAttribFormat || !glVertexArrayAttribBinding)
{
throw
std::runtime_error(
"Not all gl buffer entry points defined!");
}
glCreateVertexArrays(
1,
&vertex_array_);
if (!vertex_array_)
{
throw
std::runtime_error(
"Unable to create a buffer object!");
}
VALIDATE_NO_GL_ERROR();
}
VertexArray::~VertexArray( ) noexcept
{
VALIDATE_ACTIVE_GL_CONTEXT();
glDeleteVertexArrays(
1,
&vertex_array_);
VALIDATE_NO_GL_ERROR();
}
GLuint VertexArray::GetID( ) const noexcept
{
return
vertex_array_;
}
void VertexArray::BindVertexBuffer(
const VertexBuffer & buffer,
const GLuint binding_index,
const std::ptrdiff_t base_offset,
const GLsizei element_stride ) noexcept
{
VALIDATE_ACTIVE_GL_CONTEXT();
glVertexArrayVertexBuffer(
vertex_array_,
binding_index,
buffer.GetID(),
base_offset,
element_stride);
VALIDATE_NO_GL_ERROR();
}
void VertexArray::BindVertexAttribute(
const GLuint binding_index,
const GLuint attribute_index,
const GLint component_count,
const GLenum component_type,
const GLboolean normalize_data,
const GLuint relative_offset ) noexcept
{
VALIDATE_ACTIVE_GL_CONTEXT();
glVertexArrayAttribFormat(
vertex_array_,
attribute_index,
component_count,
component_type,
normalize_data,
relative_offset);
glVertexArrayAttribBinding(
vertex_array_,
attribute_index,
binding_index);
VALIDATE_NO_GL_ERROR();
}
void VertexArray::EnableVertexAttribute(
const GLuint attribute_index ) noexcept
{
VALIDATE_ACTIVE_GL_CONTEXT();
glEnableVertexArrayAttrib(
vertex_array_,
attribute_index);
VALIDATE_NO_GL_ERROR();
}
void VertexArray::DisableVertexAttribute(
const GLuint attribute_index ) noexcept
{
VALIDATE_ACTIVE_GL_CONTEXT();
glDisableVertexArrayAttrib(
vertex_array_,
attribute_index);
VALIDATE_NO_GL_ERROR();
}
void VertexArray::Activate( ) noexcept
{
VALIDATE_ACTIVE_GL_CONTEXT();
glBindVertexArray(
vertex_array_);
VALIDATE_NO_GL_ERROR();
}
void VertexArray::Deactivate( ) noexcept
{
VALIDATE_ACTIVE_GL_CONTEXT();
glBindVertexArray(
0);
VALIDATE_NO_GL_ERROR();
}
}} // namespace opengl::gl
|
// Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
pub mod error;
pub mod linux_plugin_transforms;
use device_types::{
devices::{Device, DeviceId},
mount::Mount,
};
pub use error::ImlDeviceError;
use futures::{future::try_join_all, lock::Mutex, TryStreamExt};
use im::HashSet;
use iml_change::*;
use iml_postgres::sqlx::{self, PgPool};
use iml_tracing::tracing;
use iml_wire_types::{Fqdn, FsType};
use std::{
collections::{BTreeMap, BTreeSet, HashMap},
sync::Arc,
};
pub type Cache = Arc<Mutex<HashMap<Fqdn, Device>>>;
pub type TargetFsRecord = HashMap<String, Vec<(Fqdn, String)>>;
#[derive(serde::Serialize, serde::Deserialize)]
struct FsRecord {
host: String,
target: String,
fs: Option<String>,
mgs_fs: Option<String>,
}
/// Given a db pool, create a new cache and fill it with initial data.
/// This will start the device tree with the previous items it left off with.
pub async fn create_cache(pool: &PgPool) -> Result<Cache, ImlDeviceError> {
let data = sqlx::query!("select * from chroma_core_device")
.fetch_all(pool)
.await?
.into_iter()
.map(|x| -> Result<(Fqdn, Device), ImlDeviceError> {
let d = serde_json::from_value(x.devices)?;
Ok((Fqdn(x.fqdn), d))
})
.collect::<Result<_, _>>()?;
Ok(Arc::new(Mutex::new(data)))
}
pub async fn create_target_cache(pool: &PgPool) -> Result<Vec<Target>, ImlDeviceError> {
let xs: Vec<Target> = sqlx::query_as!(Target, r#"SELECT state, name, active_host_id, host_ids, filesystems, uuid, mount_path, dev_path, fs_type AS "fs_type: FsType" FROM target"#)
.fetch(pool)
.try_collect()
.await?;
Ok(xs)
}
pub async fn update_devices(
pool: &PgPool,
host: &Fqdn,
devices: &Device,
) -> Result<(), ImlDeviceError> {
tracing::info!("Inserting devices from host '{}'", host);
tracing::debug!("Inserting {:?}", devices);
sqlx::query!(
r#"
INSERT INTO chroma_core_device
(fqdn, devices)
VALUES ($1, $2)
ON CONFLICT (fqdn) DO UPDATE
SET devices = EXCLUDED.devices
"#,
host.to_string(),
serde_json::to_value(devices)?
)
.execute(pool)
.await?;
Ok(())
}
pub async fn client_mount_content_id(pool: &PgPool) -> Result<Option<i32>, ImlDeviceError> {
let id = sqlx::query!("select id from django_content_type where model = 'lustreclientmount'")
.fetch_optional(pool)
.await?
.map(|x| x.id);
Ok(id)
}
pub async fn update_client_mounts(
pool: &PgPool,
ct_id: Option<i32>,
host: &Fqdn,
mounts: &HashSet<Mount>,
) -> Result<(), ImlDeviceError> {
let host_id: Option<i32> = sqlx::query!(
"select id from chroma_core_managedhost where fqdn = $1 and not_deleted = 't'",
host.to_string()
)
.fetch_optional(pool)
.await?
.map(|x| x.id);
let host_id = match host_id {
Some(id) => id,
None => {
tracing::warn!("Host '{}' is unknown", host);
return Ok(());
}
};
let mount_map = mounts
.into_iter()
.filter(|m| m.fs_type.0 == "lustre")
.filter(|m| m.opts.0.split(',').find(|x| x == &"ro").is_none())
.filter_map(|m| {
m.source
.0
.to_str()
.and_then(|p| p.splitn(2, ":/").nth(1))
.map(|fs| (fs.to_string(), m.target.0.to_string_lossy().to_string()))
})
.fold(HashMap::new(), |mut acc, (fs_name, mountpoint)| {
let mountpoints = acc.entry(fs_name).or_insert_with(BTreeSet::new);
mountpoints.insert(mountpoint);
acc
});
tracing::debug!("Client mounts at {}({}): {:?}", host, host_id, &mount_map);
let xs = mount_map.into_iter().map(|(fs_name, mountpoints)| async move {
let mountpoints:Vec<String> = mountpoints.into_iter().collect();
let x = sqlx::query!(
r#"
INSERT INTO chroma_core_lustreclientmount
(host_id, filesystem, mountpoints, state, state_modified_at, immutable_state, not_deleted, content_type_id)
VALUES ($1, $2, $3, 'mounted', now(), 'f', 't', $4)
ON CONFLICT (host_id, filesystem, not_deleted) DO UPDATE
SET
mountpoints = excluded.mountpoints,
state = excluded.state,
state_modified_at = excluded.state_modified_at
RETURNING id
"#,
host_id,
&fs_name,
&mountpoints,
ct_id,
).fetch_all(pool).await?;
Ok::<_, ImlDeviceError>(x)
});
let xs: Vec<_> = try_join_all(xs)
.await?
.into_iter()
.flatten()
.map(|x| x.id)
.collect();
let updated = sqlx::query!(
r#"
UPDATE chroma_core_lustreclientmount
SET
mountpoints = array[]::text[],
state = 'unmounted',
state_modified_at = now()
WHERE host_id = $1
AND id != ALL($2)
"#,
host_id,
&xs
)
.execute(pool)
.await?;
tracing::debug!("Updated client mounts: {:?}", updated);
Ok(())
}
#[derive(Debug, serde::Serialize)]
pub struct DeviceMap(BTreeMap<DeviceId, BTreeSet<Vec<DeviceId>>>);
impl DeviceMap {
fn get_shared_parent(&self, id: &DeviceId) -> Option<(&DeviceId, &BTreeSet<Vec<DeviceId>>)> {
let item = self.0.get(id)?;
if id.0.starts_with("lv_") {
let vg = item.iter().find_map(|xs| {
let x = xs.last()?;
if x.0.starts_with("vg_") {
Some(x)
} else {
None
}
})?;
Some((vg, self.0.get(&vg)?))
} else if id.0.starts_with("dataset_") {
let zpool = item.iter().find_map(|xs| {
let x = xs.last()?;
if x.0.starts_with("zpool_") {
Some(x)
} else {
None
}
})?;
Some((zpool, self.0.get(&zpool)?))
} else {
None
}
}
}
#[derive(Debug, serde::Serialize)]
pub struct DeviceIndex<'a>(HashMap<&'a Fqdn, DeviceMap>);
pub fn build_device_index<'a>(x: &'a HashMap<Fqdn, Device>) -> DeviceIndex<'a> {
let mut device_index: HashMap<&Fqdn, DeviceMap> = x
.iter()
.map(|(fqdn, device)| {
let mut map = DeviceMap(BTreeMap::new());
build_device_map(device, &mut map, &[]);
(fqdn, map)
})
.collect();
let xs = device_index.iter().fold(vec![], |mut acc, (fqdn, map)| {
let others = device_index.iter().filter(|(x, _)| &fqdn != x).collect();
acc.extend(add_shared_storage(map, others));
acc
});
for (fqdn, device_id, paths) in xs {
let map = match device_index.get_mut(&fqdn) {
Some(x) => x,
None => continue,
};
map.0.insert(device_id.clone(), paths.clone());
}
DeviceIndex(device_index)
}
fn add_shared_storage<'a>(
map: &'a DeviceMap,
other: HashMap<&'a &Fqdn, &DeviceMap>,
) -> Vec<(Fqdn, DeviceId, BTreeSet<Vec<DeviceId>>)> {
let xs: Vec<_> = map
.0
.iter()
.filter(|(device_id, _)| {
device_id.0.starts_with("lv_")
|| device_id.0.starts_with("dataset_")
|| device_id.0.starts_with("mdraid_")
})
.collect();
let mut matches = vec![];
for (existing_id, parents) in xs {
if existing_id.0.starts_with("lv_") || existing_id.0.starts_with("dataset_") {
let shared = map.get_shared_parent(existing_id);
if let Some((shared_id, shared_parents)) = shared {
let parent_ids = shared_parents
.iter()
.filter_map(|xs| xs.last())
.collect::<Vec<_>>();
matches = other
.iter()
.filter(|(_, map)| parent_ids.iter().all(|p| map.0.get(p).is_some()))
.map(|(fqdn, _)| {
vec![
((**fqdn).clone(), shared_id.clone(), shared_parents.clone()),
((**fqdn).clone(), existing_id.clone(), parents.clone()),
]
})
.flatten()
.chain(matches)
.collect();
}
} else {
let parent_ids = parents
.iter()
.filter_map(|xs| xs.last())
.collect::<Vec<_>>();
matches = other
.iter()
.filter(|(_, map)| parent_ids.iter().all(|p| map.0.get(p).is_some()))
.map(|(fqdn, _)| ((**fqdn).clone(), existing_id.clone(), parents.clone()))
.chain(matches)
.collect();
}
}
matches
}
fn build_device_map(device: &Device, map: &mut DeviceMap, path: &[DeviceId]) {
let id = match device.get_id() {
Some(x) => x,
None => return,
};
let paths = map.0.entry(id.clone()).or_insert_with(BTreeSet::new);
paths.insert(path.to_vec());
let parent_path = [path.to_vec(), vec![id]].concat();
let children = match device.children() {
Some(x) => x,
None => return,
};
children
.iter()
.for_each(|d| build_device_map(d, map, &parent_path));
}
pub fn find_targets<'a>(
x: &'a HashMap<Fqdn, Device>,
mounts: &HashMap<Fqdn, HashSet<Mount>>,
host_map: &HashMap<Fqdn, i32>,
device_index: &DeviceIndex<'a>,
mgs_fs_cache: &HashMap<Fqdn, Vec<String>>,
) -> Vec<Target> {
let xs: Vec<_> = mounts
.iter()
.map(|(k, xs)| xs.into_iter().map(move |x| (k, x)))
.flatten()
.filter(|(_, x)| x.fs_type.0 == "lustre")
.filter(|(_, x)| !x.source.0.to_string_lossy().contains('@'))
.filter_map(|(fqdn, x)| {
let opts: Vec<_> = x.opts.0.split(',').collect();
let is_mgs = opts.contains(&"mgs");
let svname = opts.into_iter().find(|x| x.starts_with("svname="))?;
let svname = svname.split('=').nth(1)?;
let osd = x.opts.0.split(',').find(|x| x.starts_with("osd="))?;
let osd = osd.split('=').nth(1)?;
let mut xs = vec![(fqdn, &x.target, &x.source, svname, osd)];
if is_mgs {
xs.push((fqdn, &x.target, &x.source, "MGS", osd));
}
Some(xs)
})
.flatten()
.collect();
let xs: Vec<_> = xs
.into_iter()
.filter_map(|(fqdn, mntpnt, dev, target, osd)| {
let dev_tree = x.get(&fqdn)?;
let device = dev_tree.find_device_by_devpath(dev)?;
let dev_id = device.get_id()?;
let fs_uuid = device.get_fs_uuid()?;
Some((fqdn, mntpnt, dev_id, dev, fs_uuid, target, osd))
})
.collect();
let xs: Vec<_> = xs
.into_iter()
.filter_map(|(fqdn, mntpnt, dev_id, dev_path, fs_uuid, target, osd)| {
let ys: Vec<_> = device_index
.0
.iter()
.filter(|(k, _)| *k != &fqdn)
.filter_map(|(k, x)| {
tracing::debug!("Searching for device {:?} on {}", dev_id, &k);
x.0.get(&dev_id)?;
tracing::debug!("found device on {}!", &k);
let host_id = host_map.get(&k)?;
Some(*host_id)
})
.collect();
tracing::debug!("ys: {:?}", ys);
let host_id = host_map.get(&fqdn)?;
tracing::debug!("host id: {:?}", host_id);
Some((
fqdn,
host_id,
[vec![*host_id], ys].concat(),
mntpnt,
fs_uuid,
dev_path,
target,
osd,
))
})
.collect();
xs.into_iter()
.map(
|(fqdn, host_id, ids, mntpnt, fs_uuid, dev_path, target, osd)| {
let filesystems = if target == "MGS" {
mgs_fs_cache.get(&fqdn).cloned().unwrap_or_default()
} else {
target
.rsplitn(2, '-')
.nth(1)
.map(String::from)
.map(|x| vec![x])
.unwrap_or_default()
};
Target {
state: "mounted".into(),
active_host_id: Some(*host_id),
host_ids: ids,
dev_path: Some(dev_path.0.to_string_lossy().to_string()),
filesystems,
name: target.into(),
uuid: fs_uuid.into(),
mount_path: Some(mntpnt.0.to_string_lossy().to_string()),
fs_type: match osd {
osd if osd.contains("zfs") => Some(FsType::Zfs),
osd if osd.contains("ldiskfs") => Some(FsType::Ldiskfs),
_ => None,
},
}
},
)
.fold(HashMap::new(), |mut acc: HashMap<String, Target>, x| {
// We may have multiple incoming mounts for the same uuid.
// This could happen when a target moves quickly but not all agents have reported new
// data yet. Handle this case by indexing by name:uuid and only overwritting
// if the current target has no associated filesystems.
let key = format!("{}:{}", x.name, x.uuid);
match acc.get(&key) {
Some(y) if y.filesystems.is_empty() => {
acc.insert(key, x);
}
None => {
acc.insert(key, x);
}
Some(y) => {
tracing::info!("Skipping insert for {:?}, because we have {:?}", x, y);
}
};
acc
})
.into_iter()
.map(|(_, x)| x)
.collect::<Vec<_>>()
}
#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Clone)]
pub struct Target {
pub state: String,
pub name: String,
pub dev_path: Option<String>,
pub active_host_id: Option<i32>,
pub host_ids: Vec<i32>,
pub filesystems: Vec<String>,
pub uuid: String,
pub mount_path: Option<String>,
pub fs_type: Option<FsType>,
}
impl Identifiable for Target {
type Id = String;
fn id(&self) -> Self::Id {
self.uuid.clone()
}
}
impl Target {
pub fn set_unmounted(&mut self) {
self.state = "unmounted".into();
self.active_host_id = None;
}
}
pub fn build_updates(x: Changes<'_, Target>) -> Vec<Target> {
match x {
(Some(Upserts(up)), Some(Deletions(del))) => del
.into_iter()
.cloned()
.map(|mut t| {
t.set_unmounted();
t
})
.chain(up.into_iter().cloned())
.collect(),
(Some(Upserts(xs)), None) => xs.into_iter().cloned().collect(),
(None, Some(Deletions(xs))) => xs
.into_iter()
.cloned()
.map(|mut t| {
t.set_unmounted();
t
})
.collect(),
(None, None) => vec![],
}
}
#[cfg(test)]
mod tests {
use super::*;
use device_types::devices::Device;
use insta::assert_json_snapshot;
#[test]
fn test_index() {
let cluster: HashMap<Fqdn, Device> =
serde_json::from_slice(include_bytes!("../fixtures/devtrees.json")).unwrap();
let index = build_device_index(&cluster);
insta::with_settings!({sort_maps => true}, {
assert_json_snapshot!(index);
});
}
#[test]
fn test_upserts_only() {
let ups = vec![
Target {
state: "mounted".into(),
name: "mdt1".into(),
dev_path: None,
active_host_id: Some(1),
host_ids: vec![2],
filesystems: vec!["fs1".to_string()],
uuid: "123456".into(),
mount_path: Some("/mnt/mdt1".into()),
fs_type: Some(FsType::Ldiskfs),
},
Target {
state: "mounted".into(),
name: "ost1".into(),
dev_path: Some("/dev/mapper/mpathz".to_string()),
active_host_id: Some(3),
host_ids: vec![4],
filesystems: vec!["fs1".to_string()],
uuid: "567890".into(),
mount_path: Some("/mnt/ost1".into()),
fs_type: Some(FsType::Ldiskfs),
},
];
let upserts = Upserts(ups.iter().collect());
let xs = build_updates((Some(upserts), None));
insta::assert_debug_snapshot!(xs);
}
#[test]
fn test_upserts_and_deletions() {
let t = Target {
state: "mounted".into(),
name: "mdt1".into(),
dev_path: None,
active_host_id: Some(1),
host_ids: vec![2],
filesystems: vec!["fs1".to_string()],
uuid: "123456".into(),
mount_path: Some("/mnt/mdt1".into()),
fs_type: Some(FsType::Ldiskfs),
};
let deletions = Deletions(vec![&t]);
let ups = vec![
Target {
state: "mounted".into(),
name: "mdt2".into(),
dev_path: None,
active_host_id: Some(2),
host_ids: vec![1],
filesystems: vec!["fs1".to_string()],
uuid: "654321".into(),
mount_path: Some("/mnt/mdt2".into()),
fs_type: Some(FsType::Ldiskfs),
},
Target {
state: "mounted".into(),
name: "ost1".into(),
dev_path: None,
active_host_id: Some(3),
host_ids: vec![4],
filesystems: vec!["fs1".to_string()],
uuid: "567890".into(),
mount_path: Some("/mnt/ost1".into()),
fs_type: Some(FsType::Ldiskfs),
},
];
let upserts = Upserts(ups.iter().collect());
let xs = build_updates((Some(upserts), Some(deletions)));
insta::assert_debug_snapshot!(xs);
}
#[test]
fn test_deletions_only() {
let t = Target {
state: "mounted".into(),
name: "mdt1".into(),
dev_path: None,
active_host_id: Some(1),
host_ids: vec![2],
filesystems: vec!["fs1".into()],
uuid: "123456".into(),
mount_path: Some("/mnt/mdt1".into()),
fs_type: Some(FsType::Ldiskfs),
};
let deletions = Deletions(vec![&t]);
let xs = build_updates((None, Some(deletions)));
insta::assert_debug_snapshot!(xs);
}
#[test]
fn test_no_upserts_or_deletions() {
let xs = build_updates((None, None));
assert_eq!(xs, vec![]);
}
}
|
// Copyright (c) 2016 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::ops::{Deref, DerefMut};
use std::thread;
use std::time::Duration;
use std::fmt;
use fnv::FnvHasher;
use rand::{self, Rng};
use r2d2;
use r2d2_postgres::{self, PostgresConnectionManager, TlsMode};
use config::DataStoreCfg;
use error::{Error, Result};
use protocol::{Routable, RouteKey, ShardId, SHARD_COUNT};
#[derive(Clone)]
pub struct Pool {
inner: r2d2::Pool<PostgresConnectionManager>,
pub shards: Vec<ShardId>,
}
impl fmt::Debug for Pool {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Pool {{ inner: {:?}, shards: {:?} }}",
self.inner,
self.shards
)
}
}
impl Pool {
pub fn new(config: &DataStoreCfg, shards: Vec<ShardId>) -> Result<Pool> {
loop {
let pool_config_builder =
r2d2::Config::builder()
.pool_size(config.pool_size)
.connection_timeout(Duration::from_secs(config.connection_timeout_sec));
let pool_config = pool_config_builder.build();
let manager = PostgresConnectionManager::new(config, TlsMode::None)?;
match r2d2::Pool::new(pool_config, manager) {
Ok(pool) => {
return Ok(Pool {
inner: pool,
shards: shards,
})
}
Err(e) => {
error!(
"Error initializing connection pool to Postgres, will retry: {}",
e
)
}
}
thread::sleep(Duration::from_millis(config.connection_retry_ms));
}
}
pub fn get_raw(
&self,
) -> Result<r2d2::PooledConnection<r2d2_postgres::PostgresConnectionManager>> {
let conn = self.inner.get().map_err(Error::ConnectionTimeout)?;
Ok(conn)
}
pub fn get_shard(
&self,
shard_id: u32,
) -> Result<r2d2::PooledConnection<r2d2_postgres::PostgresConnectionManager>> {
let conn = self.inner.get().map_err(Error::ConnectionTimeout)?;
debug!("Switching to shard {}", shard_id);
let schema_name = format!("shard_{}", shard_id);
let sql_search_path = format!("SET search_path TO {}", schema_name);
conn.execute(&sql_search_path, &[]).map_err(
Error::SchemaSwitch,
)?;
Ok(conn)
}
pub fn get<T: Routable>(
&self,
routable: &T,
) -> Result<r2d2::PooledConnection<r2d2_postgres::PostgresConnectionManager>> {
let optional_shard_id = routable.route_key().map(
|k| k.hash(&mut FnvHasher::default()),
);
let shard_id = match optional_shard_id {
Some(id) => (id % SHARD_COUNT as u64) as u32,
None => {
let mut rng = rand::thread_rng();
match rng.choose(&self.shards) {
Some(shard) => *shard,
None => 0,
}
}
};
self.get_shard(shard_id)
}
}
impl Deref for Pool {
type Target = r2d2::Pool<PostgresConnectionManager>;
fn deref(&self) -> &r2d2::Pool<PostgresConnectionManager> {
&self.inner
}
}
impl DerefMut for Pool {
fn deref_mut(&mut self) -> &mut r2d2::Pool<PostgresConnectionManager> {
&mut self.inner
}
}
|
/**
* Decodes an application/x-www-form-urlencoded string encoded in UTF-8.
*
* @return the decoded key trimmed and in lower case
* @throws DatastoreException
*/
public static String urlDecode(String id) throws DatastoreException {
try {
String decodedId = URLDecoder.decode(id, "UTF-8").trim().toLowerCase();
if (decodedId.startsWith(SYNAPSE_ID_PREFIX)) {
decodedId = decodedId.substring(SYNAPSE_ID_PREFIX.length());
}
return decodedId;
} catch (UnsupportedEncodingException e) {
throw new DatastoreException(e);
}
} |
// ResetUserPassword is handler for PUT /api/user/password/reset
func (h *Handler) ResetUserPassword(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
h.auth.MustAuthenticateUser(r)
var id int
err := json.NewDecoder(r.Body).Decode(&id)
checkError(err)
tx := h.db.MustBegin()
defer func() {
if r := recover(); r != nil {
tx.Rollback()
panic(r)
}
}()
stmtGet, err := tx.Preparex(`SELECT username FROM user WHERE id = ?`)
checkError(err)
stmtUpdate, err := tx.Preparex(`UPDATE user SET password = ? WHERE id = ?`)
checkError(err)
var username string
err = stmtGet.Get(&username, id)
checkError(err)
password := []byte(randomString(10))
hashedPassword, err := bcrypt.GenerateFromPassword(password, 10)
checkError(err)
stmtUpdate.MustExec(hashedPassword, id)
h.auth.MassLogout(username)
err = tx.Commit()
checkError(err)
result := struct {
ID int `json:"id"`
Password string `json:"password"`
}{id, string(password)}
w.Header().Add("Content-Encoding", "gzip")
w.Header().Add("Content-Type", "application/json")
err = encodeGzippedJSON(w, &result)
checkError(err)
} |
<filename>src/camera.cpp
#include "lib/universal_include.h"
#include <math.h>
#include <string.h>
#include <float.h>
#include "lib/gfx/debug_render.h"
#include "lib/debug_utils.h"
#include "lib/hi_res_time.h"
#include "lib/input.h"
#include "lib/window_manager.h"
#include "lib/math_utils.h"
#include "lib/matrix33.h"
#include "lib/profiler.h"
#include "app.h"
#include "camera.h"
#include "level.h"
#include "renderer.h"
#include "ship.h"
#define MIN_GROUND_CLEARANCE 10.0f // Minimum height relative to land
#define MIN_HEIGHT 10.0f // Height above sea level (which is y=0)
#define MAX_HEIGHT 5000.0f // Height above sea level (which is y=0)
// ***************
// Private Methods
// ***************
void Camera::AdvanceDebugMode()
{
m_targetFov = 60.0f;
float speed = 60.0f;
if (g_inputManager->m_mmb)
speed *= 10.0f;
if (g_keys[KEY_SHIFT])
speed /= 10.0f;
Vector3 right = m_front.CrossProduct(m_up);
Vector3 move;
if (g_keys[KEY_A]) move -= right;
if (g_keys[KEY_D]) move += right;
if (g_keys[KEY_Q]) move -= m_up;
if (g_keys[KEY_E]) move += m_up;
if (g_keys[KEY_W]) move += m_front;
if (g_keys[KEY_S]) move -= m_front;
m_pos += g_advanceTime * speed * move;
int mx = g_inputManager->m_mouseVelX;
int my = g_inputManager->m_mouseVelY;
if (g_inputManager->m_mmb)
{
Matrix33 mat(1);
mat.RotateAroundY((float)mx * -0.005f);
m_up = m_up * mat;
m_front = m_front * mat;
Vector3 right = GetRight();
mat.SetToIdentity();
mat.FastRotateAround(right, (float)my * -0.005f);
m_up = m_up * mat;
m_front = m_front * mat;
}
}
void Camera::AdvancePlayerShipMode()
{
Ship *ship = (Ship*)g_app->m_level->m_playerShip;
m_front = ship->m_front;
m_up.Set(0, 1, 0);
m_pos = ship->m_pos;
m_pos.y += 6.0f;
m_fov = 55.0f;
}
void Camera::Get2dScreenPos(Vector3 const &_vector, float *_screenX, float *_screenY)
{
double outX, outY, outZ;
int viewport[4];
double viewMatrix[16];
double projMatrix[16];
glGetIntegerv(GL_VIEWPORT, viewport);
glGetDoublev(GL_MODELVIEW_MATRIX, viewMatrix);
glGetDoublev(GL_PROJECTION_MATRIX, projMatrix);
gluProject(_vector.x, _vector.y, _vector.z,
viewMatrix,
projMatrix,
viewport,
&outX,
&outY,
&outZ);
*_screenX = outX;
*_screenY = outY;
}
void Camera::SetFov(float _fov)
{
m_targetFov = _fov;
}
// **************
// Public Methods
// **************
Camera::Camera()
: m_fov(60.0f),
m_targetFov(60.0f),
m_vel(0,0,0),
m_mode(ModePlayerShip),
m_shakeAmount(0.0f)
{
m_pos = Vector3(0.0f,
10.0f,
50.0f);
m_front.Set(0, -0.1f, -1);
m_front.Normalize();
m_up = g_upVector;
Vector3 right = m_up.CrossProduct(m_front);
right.Normalize();
m_up = m_front.CrossProduct(right);
m_up.Normalize();
}
void Camera::CreateShake(float _intensity)
{
m_shakeAmount = max(m_shakeAmount, _intensity);
}
void Camera::SetMode(int mode)
{
m_mode = mode;
if (m_mode == ModeDebug)
{
m_pos.Set(-550, 400, 0);
m_front.Set(1, -1, 0);
m_front.Normalize();
Vector3 right = m_front.CrossProduct(g_upVector);
m_up = right.CrossProduct(m_front);
m_up.Normalize();
}
}
void Camera::SetupModelviewMatrix()
{
float dot = m_front * m_up;
DebugAssert(NearlyEquals(m_front.LenSquared(), 1.0f));
DebugAssert(NearlyEquals(m_up.LenSquared(), 1.0f));
DebugAssert(NearlyEquals(dot, 0.0f));
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
float magOfPos = m_pos.Len();
Vector3 front = m_front * magOfPos;
Vector3 up = m_up * magOfPos;
Vector3 forwards = m_pos + front;
gluLookAt(m_pos.x, m_pos.y, m_pos.z,
forwards.x, forwards.y, forwards.z,
up.x, up.y, up.z);
}
bool Camera::PosInViewFrustum(Vector3 const &_pos)
{
Vector3 dirToPos = (_pos - m_pos).Normalize();
float angle = dirToPos * m_front;
float tolerance = 0.2f;
if (angle < m_fov - tolerance)
return false;
return true;
}
void Camera::Advance()
{
START_PROFILE(g_app->m_profiler, "Advance Camera");
if (g_keyDeltas[KEY_TAB])
{
int newMode = m_mode + 1;
newMode %= ModeNumModes;
SetMode(newMode);
}
switch (m_mode)
{
case ModePlayerShip:
AdvancePlayerShipMode();
break;
default:
AdvanceDebugMode();
}
if (m_shakeAmount > 0.0f)
{
m_front.RotateAroundY( sfrand(m_shakeAmount * 0.2f) );
Vector3 up = g_upVector;
up.RotateAround( m_front * sfrand(m_shakeAmount) * 0.3f );
Vector3 right = m_front.CrossProduct(up);
right.Normalize();
m_front.Normalize();
m_up = right.CrossProduct(m_front);
m_shakeAmount -= g_advanceTime;
}
// Re-normalize
m_front.Normalize();
Vector3 right = m_up.CrossProduct(m_front);
right.Normalize();
m_up = m_front.CrossProduct(right);
m_up.Normalize();
ASSERT_VECTOR3_IS_SANE(m_pos);
float dot = m_front * m_up;
DebugAssert(NearlyEquals(dot, 0.0f));
END_PROFILE(g_app->m_profiler, "Advance Camera");
}
void Camera::GetClickRay(Vector3 *rayStart, Vector3 *rayDir)
{
int x = g_inputManager->m_mouseX;
int y = g_inputManager->m_mouseY;
GLint viewport[4];
GLdouble mvMatrix[16], projMatrix[16];
GLdouble objx, objy, objz;
glGetIntegerv(GL_VIEWPORT, viewport);
glGetDoublev(GL_MODELVIEW_MATRIX, mvMatrix);
glGetDoublev(GL_PROJECTION_MATRIX, projMatrix);
int realY = viewport[3] - y - 1;
gluUnProject(x, realY, 0.0f,
mvMatrix,
projMatrix,
viewport,
&objx,
&objy,
&objz);
rayStart->Set(objx, objy, objz);
gluUnProject(x, realY, 1.0f,
mvMatrix,
projMatrix,
viewport,
&objx,
&objy,
&objz);
rayDir->Set(objx, objy, objz);
*rayDir -= *rayStart;
rayDir->Normalize();
}
|
Comparison of two active surveillance programs for the detection of clinical dengue cases in Iquitos, Peru.
Endemic dengue transmission has been documented in the Amazonian city of Iquitos, Peru, since the early 1990s. To better understand the epidemiology of dengue transmission in Iquitos, we established multiple active surveillance systems to detect symptomatic infections. Here we compare the efficacy of distinct community-based (door to door) and school absenteeism-based febrile surveillance strategies in detecting active cases of dengue. Febrile episodes were detected by both systems with equal rapidity after disease onset. However, during the period that both programs were running simultaneously in 2004, a higher number of febrile cases in general (4.52/100 versus 1.64/100 person-years) and dengue cases specifically (2.35/100 versus 1.29/100 person-years) were detected in school-aged children through the community-based surveillance program. Similar results were obtained by direct comparison of 435 participants concurrently enrolled in both programs (P < 0.005). We conclude that, in Iquitos, community-based door-to-door surveillance is a more efficient and sensitive design for detecting active dengue cases than programs based on school absenteeism. |
The Market Ticker
September 29, 2008
You are being asked to pass a $700 billion “bailout” or “rescue” package and are told by your leadership that it is “necessary” to prevent a catastrophe in the financial markets and, by extension, on Main Street.
Please think carefully about the following facts before you vote:
Public opinion is running anywhere from 100:1 to 300:1 against passing this bill, according to sources on Capitol Hill. You must return home after you pass this package to ANGRY constituents with an election less than a month away. Given the massive size of this package, the fact that it rewards the guilty on Wall Street and does nothing to address the cause that anger is fully justified.
Non-financial private debt is $32.4 trillion dollars1 as of 2Q 2008. Household debt is $14.0 trillion. Households lost 400 billion dollars last quarter. You wish to add $700 billion more in losses (via government obligations that taxpayers must cover) this quarter; this package is insignificant against the total bad credit outstanding. Federal capacity to “bail the system out” is insufficient.
It will not and cannot work because the issue is trust, not money. There is lots of money (and credit) but it is being hoarded throughout the system. Consumer savings have gone from nothing to the highest rate ever in American history – in the space of a few months. Money is flying into Treasuries because of lack of trust, not lack of money. You must fix the cause of the problem, not apply band-aids.
Commercial paper is being cited as the “lockup” that threatens an imminent financial train wreck. The truth is that commercial paper rates for “AA” rated non-financial firms is placing at a rate half that of a year ago as the Fed Funds target has been dropped from 5.25 to 2%2. With risk having increased the rate of return offered is lower? This is where the stress is coming from; at last summer’s rates this paper would roll. You are being gamed by Paulson and Bernanke; look at the table in the reference and you will see that even for “threatened sectors” rates are not materially higher than last year .
If you pass this bill and the market implodes you will be held directly responsible. There are records of thousands of signatures across seven petitions faxed to you (at my expense) dating back to October of 2007 on this topic. Many experts, including Nouriel Roubini, “Mish” Shedlock, Dr. Faber, The Weiss Institute and over 160 economists have warned Congress that this proposed plan will not work. Are you prepared to face a full-page ad in the Wall Street Journal and/or USA Today exposing these facts?
There are alternatives that will work ; they all involve restoring trust and using existing market mechanisms to resolve insolvent institutions.3 While I am not particularly partial to my view on how we resolve “failed” institutions, addressing the root of the problem – lack of trust – is paramount. Three elements are involved here, they are obvious, and they must be fixed or you will FAIL. |
def update_packet_filter(self, context, id, packet_filter):
LOG.debug("update_packet_filter() called, "
"id=%(id)s packet_filter=%(packet_filter)s .",
{'id': id, 'packet_filter': packet_filter})
pf_data = packet_filter['packet_filter']
if hasattr(self.ofc.driver, 'validate_filter_update'):
self.ofc.driver.validate_filter_update(context, pf_data)
pf_old = self.get_packet_filter(context, id)
pf = super(PacketFilterMixin, self).update_packet_filter(
context, id, packet_filter)
def _packet_filter_changed(old_pf, new_pf):
LOG.debug('old_pf=%(old_pf)s, new_pf=%(new_pf)s',
{'old_pf': old_pf, 'new_pf': new_pf})
if old_pf['status'] == pf_db.PF_STATUS_ERROR:
LOG.debug('update_packet_filter: Force filter update '
'because the previous status is ERROR.')
return True
for key in new_pf:
if key in ('id', 'name', 'tenant_id', 'network_id',
'in_port', 'status'):
continue
if old_pf[key] != new_pf[key]:
return True
return False
if _packet_filter_changed(pf_old, pf):
if hasattr(self.ofc.driver, 'update_filter'):
if pf_old['admin_state_up'] != pf['admin_state_up']:
LOG.debug('update_packet_filter: admin_state '
'is changed to %s', pf['admin_state_up'])
if pf['admin_state_up']:
self.activate_packet_filter_if_ready(context, pf)
else:
self.deactivate_packet_filter(context, pf)
elif pf['admin_state_up']:
LOG.debug('update_packet_filter: admin_state is '
'unchanged (True)')
if self.ofc.exists_ofc_packet_filter(context, id):
pf = self._update_packet_filter(context, pf, pf_data)
else:
pf = self.activate_packet_filter_if_ready(context, pf)
else:
LOG.debug('update_packet_filter: admin_state is unchanged '
'(False). No need to update OFC filter.')
else:
pf = self.deactivate_packet_filter(context, pf)
pf = self.activate_packet_filter_if_ready(context, pf)
return pf |
import { BotonicMessageEvent, MessageEventTypes } from './message-event'
export interface LocationMessageEvent extends BotonicMessageEvent {
type: MessageEventTypes.LOCATION
lat: number
long: number
}
|
/**
* Content values wrapper for the {@code favourites} table.
*/
public class FavouritesContentValues extends AbstractContentValues {
@Override
public Uri uri() {
return FavouritesColumns.CONTENT_URI;
}
/**
* Update row(s) using the values stored by this object and the given selection.
*
* @param contentResolver The content resolver to use.
* @param where The selection to use (can be {@code null}).
*/
public int update(ContentResolver contentResolver, @Nullable FavouritesSelection where) {
return contentResolver.update(uri(), values(), where == null ? null : where.sel(), where == null ? null : where.args());
}
/**
* Update row(s) using the values stored by this object and the given selection.
*
* @param contentResolver The content resolver to use.
* @param where The selection to use (can be {@code null}).
*/
public int update(Context context, @Nullable FavouritesSelection where) {
return context.getContentResolver().update(uri(), values(), where == null ? null : where.sel(), where == null ? null : where.args());
}
/**
* Unique TMDB Movie id
*/
public FavouritesContentValues putMovieId(long value) {
mContentValues.put(FavouritesColumns.MOVIE_ID, value);
return this;
}
public FavouritesContentValues putOriginalTitle(@NonNull String value) {
if (value == null) throw new IllegalArgumentException("originalTitle must not be null");
mContentValues.put(FavouritesColumns.ORIGINAL_TITLE, value);
return this;
}
public FavouritesContentValues putOverview(@NonNull String value) {
if (value == null) throw new IllegalArgumentException("overview must not be null");
mContentValues.put(FavouritesColumns.OVERVIEW, value);
return this;
}
public FavouritesContentValues putBackdropPath(@Nullable String value) {
mContentValues.put(FavouritesColumns.BACKDROP_PATH, value);
return this;
}
public FavouritesContentValues putBackdropPathNull() {
mContentValues.putNull(FavouritesColumns.BACKDROP_PATH);
return this;
}
public FavouritesContentValues putPosterPath(@Nullable String value) {
mContentValues.put(FavouritesColumns.POSTER_PATH, value);
return this;
}
public FavouritesContentValues putPosterPathNull() {
mContentValues.putNull(FavouritesColumns.POSTER_PATH);
return this;
}
public FavouritesContentValues putReleaseDate(@Nullable String value) {
mContentValues.put(FavouritesColumns.RELEASE_DATE, value);
return this;
}
public FavouritesContentValues putReleaseDateNull() {
mContentValues.putNull(FavouritesColumns.RELEASE_DATE);
return this;
}
public FavouritesContentValues putTagline(@Nullable String value) {
mContentValues.put(FavouritesColumns.TAGLINE, value);
return this;
}
public FavouritesContentValues putTaglineNull() {
mContentValues.putNull(FavouritesColumns.TAGLINE);
return this;
}
public FavouritesContentValues putPopularity(@Nullable Float value) {
mContentValues.put(FavouritesColumns.POPULARITY, value);
return this;
}
public FavouritesContentValues putPopularityNull() {
mContentValues.putNull(FavouritesColumns.POPULARITY);
return this;
}
public FavouritesContentValues putVoteAverage(@Nullable Float value) {
mContentValues.put(FavouritesColumns.VOTE_AVERAGE, value);
return this;
}
public FavouritesContentValues putVoteAverageNull() {
mContentValues.putNull(FavouritesColumns.VOTE_AVERAGE);
return this;
}
/**
* Boolean wether movie is adult rated or not
*/
public FavouritesContentValues putAdult(@Nullable Boolean value) {
mContentValues.put(FavouritesColumns.ADULT, value);
return this;
}
public FavouritesContentValues putAdultNull() {
mContentValues.putNull(FavouritesColumns.ADULT);
return this;
}
public FavouritesContentValues putSerializedReviews(@Nullable String value) {
mContentValues.put(FavouritesColumns.SERIALIZED_REVIEWS, value);
return this;
}
public FavouritesContentValues putSerializedReviewsNull() {
mContentValues.putNull(FavouritesColumns.SERIALIZED_REVIEWS);
return this;
}
public FavouritesContentValues putSerializedTrailers(@Nullable String value) {
mContentValues.put(FavouritesColumns.SERIALIZED_TRAILERS, value);
return this;
}
public FavouritesContentValues putSerializedTrailersNull() {
mContentValues.putNull(FavouritesColumns.SERIALIZED_TRAILERS);
return this;
}
public FavouritesContentValues putIsFavourite(boolean value) {
mContentValues.put(FavouritesColumns.IS_FAVOURITE, value);
return this;
}
} |
ON THE ISSUE OF THE GESER EPIC IN CHINA IN THE 21st CENTURY
. The study of “Geser” in China began in the late fifties of the last century and entered the development stage in the late eighties . In the new century, research on the origin of the Mongolian “Geser”, the translation of the Mongolian “Geser”, the Mongolian version of the “Geser” and the traditional protection of the Mongolian “Geser” was carried out. The content and art of the Mongolian “Geser” also has been intensively discussed. This article aim to systematically introduce the situation of Mongolian Geser research in China in the new century. |
3d–4p Transitions in the zinclike and copperlike ions Y x, xi; Zr xi, xii; Nb xii, xiii; and Mo xiii, xiv
Lines occurring as satellites on the long-wavelength side of the 3d10–3d94p resonance lines of Ni-like ions have been investigated with a low-inductance vacuum spark and a 10.7-m spectrograph for the elements Y, Zr, Nb, and Mo. The lines are interpreted as 3d104s–3d94s4p and 3d104p–3d94p2 transitions in the Cu-like ions Y xi, Zr xii, Nb xiii, and Mo xiv and 3d104s2–3d94s24p transitions in the Zn-like ions Y x, Zr xi, Nb xii, and Mo xiii. The spectra of the Cu-like ions were interpreted by generalized least-squares fits for the energy levels of the sequence of four ions. Thirty-nine levels of 3d94s4p were interpreted simultaneously with a root-mean-square deviation of 122 cm−1; forty-four levels of 3d94p2 were interpreted in the same way with a root-mean-square deviation of 200 cm−1. Line identifications and energy levels were obtained for the 3d107p configuration of the Cu-like ions Y xi–Mo xiv. |
def __BSMlambda(self):
V = self.V
S = self.S
delta = self.delta
return round(delta*(S / V), 4) |
<filename>src/main/java/com/afmobi/cassandra/datastax/pojo/User.java
package com.afmobi.cassandra.datastax.pojo;
public class User {
private int uid;
private String name;
private String passwd;
public int getUid() {
return uid;
}
public void setUid(int uid) {
this.uid = uid;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getPasswd() {
return passwd;
}
public void setPasswd(String passwd) {
this.passwd = passwd;
}
@Override
public String toString() {
return "uid:"+uid+" name:"+name+" passwd:"+<PASSWORD>;
}
}
|
Computing Nash equilibria for scheduling on restricted parallel links
We consider the problem of routing n users on m parallel links, under the restriction that each user may only be routed on a link from a certain set of allowed links for the user. Thus, the problem is equivalent to the correspondingly restricted problem of assigning n jobs to m parallel machines. In a pure Nash equilibrium, no user may improve its own individual cost (delay) by unilaterally switching to another link from its set of allowed links. As our main result, we introduce a polynomial time algorithm to compute from any given assignment a pure Nash equilibrium with non-increased makespan. The algorithm gradually changes a given assignment by pushing unsplittable user traffics through a network that is defined by the users and the links. Here, we use ideas from blocking flows. Furthermore, we use similar techniques as in the generic Preflow-Push algorithm to approximate a schedule with minimum makespan, gaining an improved approximation factor of 2 - 1/w 1 for identical links, where w 1 is the largest user traffic. We extend this result to related links, gaining an approximation factor of 2. Our approximation algorithms run in polynomial time. We close with tight upper bounds on the coordination ratio for pure Nash equilibria. |
package org.hamcrest.reflection;
import org.hamcrest.Description;
import org.hamcrest.TypeSafeDiagnosingMatcher;
import java.util.Arrays;
public class ImplementsInterface extends TypeSafeDiagnosingMatcher<Class<?>> {
private final Class<?> expectedInterface;
public ImplementsInterface(Class<?> expectedInterface) {
this.expectedInterface = expectedInterface;
}
@Override
protected boolean matchesSafely(Class item, Description mismatchDescription) {
final Class[] implementedInterfaces = item.getInterfaces();
mismatchDescription
.appendValue(item)
.appendText(" only implements the following interfaces")
.appendValueList("(", ",", ")", implementedInterfaces);
return Arrays.stream(implementedInterfaces).anyMatch(c -> c.equals(expectedInterface));
}
@Override
public void describeTo(Description description) {
description.appendText("a type that implements").appendValue(expectedInterface);
}
}
|
// Find all netplugin nodes and add them to ofnet master
func (d *MasterDaemon) agentDiscoveryLoop() {
agentEventCh := make(chan objdb.WatchServiceEvent, 1)
watchStopCh := make(chan bool, 1)
err := d.objdbClient.WatchService("netplugin", agentEventCh, watchStopCh)
if err != nil {
log.Fatalf("Could not start a watch on netplugin service. Err: %v", err)
}
for {
agentEv := <-agentEventCh
log.Debugf("Received netplugin watch event: %+v", agentEv)
nodeInfo := ofnet.OfnetNode{
HostAddr: agentEv.ServiceInfo.HostAddr,
HostPort: uint16(agentEv.ServiceInfo.Port),
}
if agentEv.EventType == objdb.WatchServiceEventAdd {
err = d.ofnetMaster.AddNode(nodeInfo)
if err != nil {
log.Errorf("Error adding node %v. Err: %v", nodeInfo, err)
}
} else if agentEv.EventType == objdb.WatchServiceEventDel {
var res bool
log.Infof("Unregister node %+v", nodeInfo)
d.ofnetMaster.UnRegisterNode(&nodeInfo, &res)
}
time.Sleep(100 * time.Millisecond)
}
} |
<filename>aeron-cluster/src/test/java/io/aeron/cluster/ClusterTest.java
/*
* Copyright 2014-2019 Real Logic Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.aeron.cluster;
import io.aeron.cluster.service.Cluster;
import org.agrona.collections.MutableInteger;
import org.agrona.concurrent.IdleStrategy;
import org.agrona.concurrent.YieldingIdleStrategy;
import org.agrona.concurrent.status.CountersReader;
import org.junit.*;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.LockSupport;
import static io.aeron.Aeron.NULL_VALUE;
import static io.aeron.cluster.service.CommitPos.COMMIT_POSITION_TYPE_ID;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.*;
@Ignore
public class ClusterTest
{
private static final String MSG = "Hello World!";
@Test(timeout = 30_000)
public void shouldStopFollowerAndRestartFollower() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
cluster.awaitLeader();
TestNode follower = cluster.followers().get(0);
cluster.stopNode(follower);
Thread.sleep(1_000);
follower = cluster.startStaticNode(follower.index(), false);
Thread.sleep(1_000);
assertThat(follower.role(), is(Cluster.Role.FOLLOWER));
}
}
@Test(timeout = 30_000)
public void shouldNotifyClientOfNewLeader() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
final TestNode leader = cluster.awaitLeader();
cluster.connectClient();
cluster.stopNode(leader);
cluster.awaitLeadershipEvent(1);
}
}
@Test(timeout = 30_000)
public void shouldStopLeaderAndFollowersThenRestartAllWithSnapshot() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
final TestNode leader = cluster.awaitLeader();
cluster.takeSnapshot(leader);
cluster.awaitSnapshotCounter(cluster.node(0), 1);
cluster.awaitSnapshotCounter(cluster.node(1), 1);
cluster.awaitSnapshotCounter(cluster.node(2), 1);
cluster.stopNode(cluster.node(0));
cluster.stopNode(cluster.node(1));
cluster.stopNode(cluster.node(2));
Thread.sleep(1_000);
cluster.startStaticNode(0, false);
cluster.startStaticNode(1, false);
cluster.startStaticNode(2, false);
cluster.awaitLeader();
assertThat(cluster.followers().size(), is(2));
cluster.awaitSnapshotLoadedForService(cluster.node(0));
cluster.awaitSnapshotLoadedForService(cluster.node(1));
cluster.awaitSnapshotLoadedForService(cluster.node(2));
}
}
@Test(timeout = 30_000)
public void shouldShutdownClusterAndRestartWithSnapshots() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
final TestNode leader = cluster.awaitLeader();
cluster.node(0).terminationExpected(true);
cluster.node(1).terminationExpected(true);
cluster.node(2).terminationExpected(true);
cluster.shutdownCluster(leader);
cluster.awaitNodeTermination(cluster.node(0));
cluster.awaitNodeTermination(cluster.node(1));
cluster.awaitNodeTermination(cluster.node(2));
assertTrue(cluster.node(0).service().wasSnapshotTaken());
assertTrue(cluster.node(1).service().wasSnapshotTaken());
assertTrue(cluster.node(2).service().wasSnapshotTaken());
cluster.stopNode(cluster.node(0));
cluster.stopNode(cluster.node(1));
cluster.stopNode(cluster.node(2));
Thread.sleep(1_000);
cluster.startStaticNode(0, false);
cluster.startStaticNode(1, false);
cluster.startStaticNode(2, false);
cluster.awaitLeader();
assertThat(cluster.followers().size(), is(2));
cluster.awaitSnapshotLoadedForService(cluster.node(0));
cluster.awaitSnapshotLoadedForService(cluster.node(1));
cluster.awaitSnapshotLoadedForService(cluster.node(2));
}
}
@Test(timeout = 30_000)
public void shouldAbortClusterAndRestart() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
final TestNode leader = cluster.awaitLeader();
cluster.node(0).terminationExpected(true);
cluster.node(1).terminationExpected(true);
cluster.node(2).terminationExpected(true);
cluster.abortCluster(leader);
cluster.awaitNodeTermination(cluster.node(0));
cluster.awaitNodeTermination(cluster.node(1));
cluster.awaitNodeTermination(cluster.node(2));
assertFalse(cluster.node(0).service().wasSnapshotTaken());
assertFalse(cluster.node(1).service().wasSnapshotTaken());
assertFalse(cluster.node(2).service().wasSnapshotTaken());
cluster.stopNode(cluster.node(0));
cluster.stopNode(cluster.node(1));
cluster.stopNode(cluster.node(2));
Thread.sleep(1_000);
cluster.startStaticNode(0, false);
cluster.startStaticNode(1, false);
cluster.startStaticNode(2, false);
cluster.awaitLeader();
assertThat(cluster.followers().size(), is(2));
assertFalse(cluster.node(0).service().wasSnapshotLoaded());
assertFalse(cluster.node(1).service().wasSnapshotLoaded());
assertFalse(cluster.node(2).service().wasSnapshotLoaded());
}
}
@Test(timeout = 30_000)
public void shouldAbortClusterOnTerminationTimeout() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
final TestNode leader = cluster.awaitLeader();
final List<TestNode> followers = cluster.followers();
assertThat(followers.size(), is(2));
final TestNode followerA = followers.get(0);
final TestNode followerB = followers.get(1);
leader.terminationExpected(true);
followerA.terminationExpected(true);
cluster.stopNode(followerB);
cluster.connectClient();
final int messageCount = 10;
cluster.sendMessages(messageCount);
cluster.awaitResponses(messageCount);
cluster.abortCluster(leader);
cluster.awaitNodeTermination(leader);
cluster.awaitNodeTermination(followerA);
cluster.stopNode(leader);
cluster.stopNode(followerA);
}
}
@Test(timeout = 30_000)
public void shouldEchoMessagesThenContinueOnNewLeader() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
final TestNode originalLeader = cluster.awaitLeader();
cluster.connectClient();
final int preFailureMessageCount = 10;
final int postFailureMessageCount = 7;
cluster.sendMessages(preFailureMessageCount);
cluster.awaitResponses(preFailureMessageCount);
cluster.awaitMessageCountForService(cluster.node(0), preFailureMessageCount);
cluster.awaitMessageCountForService(cluster.node(1), preFailureMessageCount);
cluster.awaitMessageCountForService(cluster.node(2), preFailureMessageCount);
assertThat(cluster.client().leaderMemberId(), is(originalLeader.index()));
cluster.stopNode(originalLeader);
final TestNode newLeader = cluster.awaitLeader(originalLeader.index());
cluster.sendMessages(postFailureMessageCount);
cluster.awaitResponses(preFailureMessageCount + postFailureMessageCount);
assertThat(cluster.client().leaderMemberId(), is(newLeader.index()));
final TestNode follower = cluster.followers().get(0);
cluster.awaitMessageCountForService(newLeader, preFailureMessageCount + postFailureMessageCount);
cluster.awaitMessageCountForService(follower, preFailureMessageCount + postFailureMessageCount);
}
}
@Test(timeout = 30_000)
public void shouldStopLeaderAndRestartAsFollower() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
final TestNode originalLeader = cluster.awaitLeader();
cluster.stopNode(originalLeader);
cluster.awaitLeader(originalLeader.index());
final TestNode follower = cluster.startStaticNode(originalLeader.index(), false);
Thread.sleep(5_000);
assertThat(follower.role(), is(Cluster.Role.FOLLOWER));
assertThat(follower.electionState(), is((Election.State)null));
}
}
@Test(timeout = 30_000)
public void shouldStopLeaderAndRestartAsFollowerWithSendingAfter() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
final TestNode originalLeader = cluster.awaitLeader();
cluster.stopNode(originalLeader);
cluster.awaitLeader(originalLeader.index());
final TestNode follower = cluster.startStaticNode(originalLeader.index(), false);
while (follower.electionState() != null)
{
Thread.sleep(1000);
}
assertThat(follower.role(), is(Cluster.Role.FOLLOWER));
cluster.connectClient();
final int messageCount = 10;
cluster.sendMessages(messageCount);
cluster.awaitResponses(messageCount);
}
}
@Test(timeout = 60_000)
public void shouldStopLeaderAndRestartAsFollowerWithSendingAfterThenStopLeader() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
final TestNode originalLeader = cluster.awaitLeader();
cluster.stopNode(originalLeader);
cluster.awaitLeader(originalLeader.index());
final TestNode follower = cluster.startStaticNode(originalLeader.index(), false);
Thread.sleep(5_000);
assertThat(follower.role(), is(Cluster.Role.FOLLOWER));
assertThat(follower.electionState(), is((Election.State)null));
cluster.connectClient();
final int messageCount = 10;
cluster.sendMessages(messageCount);
cluster.awaitResponses(messageCount);
final TestNode leader = cluster.awaitLeader();
cluster.stopNode(leader);
cluster.awaitLeader(leader.index());
}
}
@Test(timeout = 30_000)
public void shouldAcceptMessagesAfterSingleNodeCleanRestart() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
cluster.awaitLeader();
TestNode follower = cluster.followers().get(0);
cluster.stopNode(follower);
Thread.sleep(10_000);
follower = cluster.startStaticNode(follower.index(), true);
Thread.sleep(1_000);
assertThat(follower.role(), is(Cluster.Role.FOLLOWER));
cluster.connectClient();
final int messageCount = 10;
cluster.sendMessages(messageCount);
cluster.awaitResponses(messageCount);
cluster.awaitMessageCountForService(follower, messageCount);
}
}
@Test(timeout = 30_000)
public void shouldReplaySnapshotTakenWhileDown() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
final TestNode leader = cluster.awaitLeader();
final TestNode followerA = cluster.followers().get(0);
TestNode followerB = cluster.followers().get(1);
cluster.stopNode(followerB);
Thread.sleep(10_000);
cluster.takeSnapshot(leader);
cluster.awaitSnapshotCounter(leader, 1);
cluster.awaitSnapshotCounter(followerA, 1);
cluster.connectClient();
final int messageCount = 10;
cluster.sendMessages(messageCount);
cluster.awaitResponses(messageCount);
followerB = cluster.startStaticNode(followerB.index(), false);
cluster.awaitSnapshotCounter(followerB, 1);
assertThat(followerB.role(), is(Cluster.Role.FOLLOWER));
cluster.awaitMessageCountForService(followerB, messageCount);
assertThat(followerB.errors(), is(0L));
}
}
@Test(timeout = 45_000)
public void shouldTolerateMultipleLeaderFailures() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
final TestNode firstLeader = cluster.awaitLeader();
cluster.stopNode(firstLeader);
final TestNode secondLeader = cluster.awaitLeader();
final long commitPos = secondLeader.commitPosition();
final TestNode newFollower = cluster.startStaticNode(firstLeader.index(), false);
cluster.awaitCommitPosition(newFollower, commitPos);
cluster.awaitNotInElection(newFollower);
cluster.stopNode(secondLeader);
cluster.awaitLeader();
cluster.connectClient();
final int messageCount = 10;
cluster.sendMessages(messageCount);
cluster.awaitResponses(messageCount);
}
}
@Test(timeout = 30_000)
public void shouldAcceptMessagesAfterTwoNodeCleanRestart() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
cluster.awaitLeader();
final List<TestNode> followers = cluster.followers();
TestNode followerA = followers.get(0), followerB = followers.get(1);
cluster.stopNode(followerA);
cluster.stopNode(followerB);
Thread.sleep(5_000);
followerA = cluster.startStaticNode(followerA.index(), true);
followerB = cluster.startStaticNode(followerB.index(), true);
Thread.sleep(1_000);
assertThat(followerA.role(), is(Cluster.Role.FOLLOWER));
assertThat(followerB.role(), is(Cluster.Role.FOLLOWER));
cluster.connectClient();
final int messageCount = 10;
cluster.sendMessages(messageCount);
cluster.awaitResponses(messageCount);
cluster.awaitMessageCountForService(followerA, messageCount);
cluster.awaitMessageCountForService(followerB, messageCount);
}
}
@Test(timeout = 30_000)
public void shouldHaveOnlyOneCommitPositionCounter() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
final TestNode leader = cluster.awaitLeader();
final List<TestNode> followers = cluster.followers();
final TestNode followerA = followers.get(0), followerB = followers.get(1);
cluster.stopNode(leader);
cluster.awaitLeader(leader.index());
assertThat(countersOfType(followerA.countersReader(), COMMIT_POSITION_TYPE_ID), is(1));
assertThat(countersOfType(followerB.countersReader(), COMMIT_POSITION_TYPE_ID), is(1));
}
}
@Test(timeout = 30_000)
public void shouldCallOnRoleChangeOnBecomingLeader() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
TestNode leader = cluster.awaitLeader();
List<TestNode> followers = cluster.followers();
final TestNode followerA = followers.get(0);
final TestNode followerB = followers.get(1);
assertThat(leader.service().roleChangedTo(), is(Cluster.Role.LEADER));
assertThat(followerA.service().roleChangedTo(), is((Cluster.Role)null));
assertThat(followerB.service().roleChangedTo(), is((Cluster.Role)null));
cluster.stopNode(leader);
leader = cluster.awaitLeader(leader.index());
followers = cluster.followers();
final TestNode follower = followers.get(0);
assertThat(leader.service().roleChangedTo(), is(Cluster.Role.LEADER));
assertThat(follower.service().roleChangedTo(), is((Cluster.Role)null));
}
}
@Test(timeout = 30_000)
public void shouldLoseLeadershipWhenNoActiveQuorumOfFollowers() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
final TestNode leader = cluster.awaitLeader();
final List<TestNode> followers = cluster.followers();
final TestNode followerA = followers.get(0);
final TestNode followerB = followers.get(1);
assertThat(leader.service().roleChangedTo(), is(Cluster.Role.LEADER));
cluster.stopNode(followerA);
cluster.stopNode(followerB);
while (leader.service().roleChangedTo() == Cluster.Role.LEADER)
{
TestUtil.checkInterruptedStatus();
Thread.yield();
}
assertThat(leader.service().roleChangedTo(), is(Cluster.Role.FOLLOWER));
}
}
@Test(timeout = 60_000)
public void shouldRecoverWhileMessagesContinue() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
final TestNode leader = cluster.awaitLeader();
final List<TestNode> followers = cluster.followers();
final TestNode followerA = followers.get(0);
TestNode followerB = followers.get(1);
cluster.connectClient();
final Thread messageThread = startMessageThread(cluster, TimeUnit.MICROSECONDS.toNanos(500));
try
{
cluster.stopNode(followerB);
Thread.sleep(10_000);
followerB = cluster.startStaticNode(followerB.index(), false);
Thread.sleep(30_000);
}
finally
{
messageThread.interrupt();
messageThread.join();
}
assertThat(leader.errors(), is(0L));
assertThat(followerA.errors(), is(0L));
assertThat(followerB.errors(), is(0L));
assertThat(followerB.electionState(), is((Election.State)null));
}
}
@Test(timeout = 10_000)
public void shouldCatchupFromEmptyLog() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
cluster.awaitLeader();
final List<TestNode> followers = cluster.followers();
TestNode followerB = followers.get(1);
cluster.stopNode(followerB);
cluster.connectClient();
final int messageCount = 10;
cluster.sendMessages(messageCount);
cluster.awaitResponses(messageCount);
followerB = cluster.startStaticNode(followerB.index(), true);
cluster.awaitMessageCountForService(followerB, messageCount);
}
}
@Test(timeout = 30_000)
public void shouldCatchupFromEmptyLogThenSnapshotAfterShutdownAndFollowerCleanStart() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
final TestNode leader = cluster.awaitLeader();
final List<TestNode> followers = cluster.followers();
final TestNode followerA = followers.get(0);
final TestNode followerB = followers.get(1);
cluster.connectClient();
final int messageCount = 10;
cluster.sendMessages(messageCount);
cluster.awaitResponses(messageCount);
leader.terminationExpected(true);
followerA.terminationExpected(true);
followerB.terminationExpected(true);
cluster.shutdownCluster(leader);
cluster.awaitNodeTermination(cluster.node(0));
cluster.awaitNodeTermination(cluster.node(1));
cluster.awaitNodeTermination(cluster.node(2));
assertTrue(cluster.node(0).service().wasSnapshotTaken());
assertTrue(cluster.node(1).service().wasSnapshotTaken());
assertTrue(cluster.node(2).service().wasSnapshotTaken());
cluster.stopNode(cluster.node(0));
cluster.stopNode(cluster.node(1));
cluster.stopNode(cluster.node(2));
Thread.sleep(1_000);
cluster.startStaticNode(0, false);
cluster.startStaticNode(1, false);
cluster.startStaticNode(2, true);
final TestNode newLeader = cluster.awaitLeader();
assertNotEquals(newLeader.index(), is(2));
assertTrue(cluster.node(0).service().wasSnapshotLoaded());
assertTrue(cluster.node(1).service().wasSnapshotLoaded());
assertFalse(cluster.node(2).service().wasSnapshotLoaded());
cluster.awaitMessageCountForService(cluster.node(2), messageCount);
cluster.awaitSnapshotCounter(cluster.node(2), 1);
assertTrue(cluster.node(2).service().wasSnapshotTaken());
}
}
@Test(timeout = 30_000)
public void shouldCatchUpAfterFollowerMissesOneMessage() throws Exception
{
shouldCatchUpAfterFollowerMissesMessage(TestMessages.NO_OP);
}
@Test(timeout = 30_000)
public void shouldCatchUpAfterFollowerMissesTimerRegistration() throws Exception
{
shouldCatchUpAfterFollowerMissesMessage(TestMessages.REGISTER_TIMER);
}
@Test(timeout = 30_000)
public void shouldCatchUpTwoFreshNodesAfterRestart() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
final TestNode leader = cluster.awaitLeader();
final List<TestNode> followers = cluster.followers();
cluster.connectClient();
final int messageCount = 50_000;
for (int i = 0; i < messageCount; i++)
{
cluster.msgBuffer().putStringWithoutLengthAscii(0, TestMessages.NO_OP);
cluster.sendMessage(TestMessages.NO_OP.length());
}
cluster.awaitResponses(messageCount);
cluster.node(0).terminationExpected(true);
cluster.node(1).terminationExpected(true);
cluster.node(2).terminationExpected(true);
cluster.abortCluster(leader);
cluster.awaitNodeTermination(cluster.node(0));
cluster.awaitNodeTermination(cluster.node(1));
cluster.awaitNodeTermination(cluster.node(2));
cluster.stopNode(cluster.node(0));
cluster.stopNode(cluster.node(1));
cluster.stopNode(cluster.node(2));
final TestNode oldLeader = cluster.startStaticNode(leader.index(), false);
final TestNode oldFollower1 = cluster.startStaticNode(followers.get(0).index(), true);
final TestNode oldFollower2 = cluster.startStaticNode(followers.get(1).index(), true);
cluster.awaitLeader();
assertThat(oldLeader.errors(), is(0L));
assertThat(oldFollower1.errors(), is(0L));
assertThat(oldFollower2.errors(), is(0L));
assertThat(oldFollower1.electionState(), is((Election.State)null));
assertThat(oldFollower2.electionState(), is((Election.State)null));
}
}
@Test(timeout = 30_000)
public void shouldReplayMultipleSnapshotsWithEmptyFollowerLog() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
final TestNode leader = cluster.awaitLeader();
final List<TestNode> followers = cluster.followers();
final TestNode followerA = followers.get(0);
final TestNode followerB = followers.get(1);
cluster.connectClient();
cluster.sendMessages(2);
cluster.awaitResponses(2);
cluster.awaitMessageCountForService(cluster.node(2), 2);
cluster.takeSnapshot(leader);
final int memberCount = 3;
for (int memberId = 0; memberId < memberCount; memberId++)
{
final TestNode node = cluster.node(memberId);
cluster.awaitSnapshotCounter(node, 1);
assertTrue(node.service().wasSnapshotTaken());
node.service().resetSnapshotTaken();
}
cluster.sendMessages(1);
cluster.awaitResponses(3);
cluster.awaitMessageCountForService(cluster.node(2), 3);
leader.terminationExpected(true);
followerA.terminationExpected(true);
followerB.terminationExpected(true);
cluster.awaitNeutralControlToggle(leader);
cluster.shutdownCluster(leader);
cluster.awaitNodeTermination(cluster.node(0));
cluster.awaitNodeTermination(cluster.node(1));
cluster.awaitNodeTermination(cluster.node(2));
assertTrue(cluster.node(0).service().wasSnapshotTaken());
assertTrue(cluster.node(1).service().wasSnapshotTaken());
assertTrue(cluster.node(2).service().wasSnapshotTaken());
cluster.stopNode(cluster.node(0));
cluster.stopNode(cluster.node(1));
cluster.stopNode(cluster.node(2));
Thread.sleep(1_000);
cluster.startStaticNode(0, false);
cluster.startStaticNode(1, false);
cluster.startStaticNode(2, true);
final TestNode newLeader = cluster.awaitLeader();
assertNotEquals(2, newLeader.index());
assertTrue(cluster.node(0).service().wasSnapshotLoaded());
assertTrue(cluster.node(1).service().wasSnapshotLoaded());
assertFalse(cluster.node(2).service().wasSnapshotLoaded());
assertEquals(3, cluster.node(0).service().messageCount());
assertEquals(3, cluster.node(1).service().messageCount());
assertEquals(3, cluster.node(2).service().messageCount());
cluster.reconnectClient();
final int msgCountAfterStart = 4;
final int totalMsgCount = 2 + 1 + 4;
cluster.sendMessages(msgCountAfterStart);
cluster.awaitResponses(totalMsgCount);
cluster.awaitMessageCountForService(newLeader, totalMsgCount);
assertEquals(totalMsgCount, newLeader.service().messageCount());
cluster.awaitMessageCountForService(cluster.node(1), totalMsgCount);
assertEquals(totalMsgCount, cluster.node(1).service().messageCount());
cluster.awaitMessageCountForService(cluster.node(2), totalMsgCount);
assertEquals(totalMsgCount, cluster.node(2).service().messageCount());
}
}
@Test(timeout = 30_000)
public void shouldRecoverQuicklyAfterKillingFollowersThenRestartingOne() throws Exception
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
cluster.awaitLeader();
final TestNode leader = cluster.findLeader();
final TestNode follower = cluster.followers().get(0);
final TestNode follower2 = cluster.followers().get(1);
cluster.connectClient();
cluster.sendMessages(10);
cluster.stopNode(follower);
cluster.stopNode(follower2);
while (leader.role() != Cluster.Role.FOLLOWER)
{
Thread.sleep(1_000);
cluster.sendMessages(1);
}
cluster.startStaticNode(follower2.index(), true);
cluster.awaitLeader();
}
}
private void shouldCatchUpAfterFollowerMissesMessage(final String message) throws InterruptedException
{
try (TestCluster cluster = TestCluster.startThreeNodeStaticCluster(NULL_VALUE))
{
cluster.awaitLeader();
TestNode follower = cluster.followers().get(0);
cluster.stopNode(follower);
Thread.sleep(1_000);
cluster.connectClient();
cluster.msgBuffer().putStringWithoutLengthAscii(0, message);
cluster.sendMessage(message.length());
cluster.awaitResponses(1);
Thread.sleep(1_000);
follower = cluster.startStaticNode(follower.index(), false);
Thread.sleep(1_000);
assertThat(follower.role(), is(Cluster.Role.FOLLOWER));
assertThat(follower.electionState(), is((Election.State)null));
}
}
private int countersOfType(final CountersReader countersReader, final int typeIdToCount)
{
final MutableInteger count = new MutableInteger();
countersReader.forEach(
(counterId, typeId, keyBuffer, label) ->
{
if (typeId == typeIdToCount)
{
count.value++;
}
});
return count.get();
}
private Thread startMessageThread(final TestCluster cluster, final long intervalNs)
{
final Thread thread = new Thread(
() ->
{
final IdleStrategy idleStrategy = YieldingIdleStrategy.INSTANCE;
cluster.msgBuffer().putStringWithoutLengthAscii(0, MSG);
while (!Thread.interrupted())
{
if (cluster.client().offer(cluster.msgBuffer(), 0, MSG.length()) < 0)
{
LockSupport.parkNanos(intervalNs);
}
idleStrategy.idle(cluster.client().pollEgress());
}
});
thread.setDaemon(true);
thread.setName("message-thread");
return thread;
}
}
|
Reversible strain effect on the magnetization of LaCoO3 films
The magnetization of ferromagnetic LaCoO3 films grown epitaxially on piezoelectric substrates has been found to systematically decrease with the reduction of tensile strain. The magnetization change induced by the reversible strain variation reveals an increase of the Co magnetic moment with tensile strain. The biaxial strain dependence of the Curie temperature is estimated to be below 4K/% in the as-grown tensile strain state of our films. This is in agreement with results from statically strained films on various substrates.
grown on PMN-PT(001) by pulsed laser deposition (KrF 248 nm) from a stoichiometric target. The deposition temperature (T ) and the oxygen background pressure were 650 • C and 0.45 mbar, respectively. After deposition, the films were annealed for 10 minutes and cooled down in oxygen atmosphere of 800 mbar. Structure and film thickness were characterized by X-ray diffraction (XRD) measurements with a Philips X'Pert MRD diffractometer using Cu Kα radiation. The magnetization (M ) was measured in a SQUID magnetometer. T C is estimated by extrapolating M 2 for T < T C to M = 0. For strain-dependent measurements, an electrical voltage is supplied to the substrate between the magnetic film and a bottom electrode on the opposite (001) surface of the substrate. The current in the piezo-circuit is below 10 −6 A.
X-ray Θ−2Θ scans show clear (00l) film reflections characteristic of a pseudocubic structure (Fig. 1). PMN-PT has a pseudocubic lattice parameter of 4.02Å and weak rhombohedral or monoclinic distortions 19 . Bulk LCO is rhombohedral with a pseudocubic lattice parameter of 3.805Å 21 . Despite the large misfit of 5.7 %, LCO grows epitaxially oriented on PMN-PT(001). In-plane X-ray reciprocal space mapping around the (013) asymmetric reflection reveals partial relaxation of the LCO film, i.e. the reflections of the film and the substrate have different Q x values (Fig. 1 inset). The derived out-of-plane (c) and in-plane (a) lattice parameters are c = 3.79Å and a = 3.88Å for the 50 nm thick film. The tetragonal distortion estimated as t = c/a is 0.977.
In Fig. 2 we plot M vs T of the film from Fig. 1 measured in field-cooled (FC) and zero-field-cooled (ZFC) modes in a magnetic field (H) of µ 0 H = 200 mT applied along the in-plane direction. Magnetic ordering is observed below about 87 K. A cusp in the ZFC magnetization is found at ∼ 50 K, possibly indicating a glasslike behaviour as in bulk cobaltites. The inset shows the magnetization loop M(H) at 10 K. Clearly, the LCO film is ferromagnetic at low temperatures, with T C ≃ 87 K. This value agrees well with recently published data for strained LCO films 12,13 . The coercive field (H c ) at 10 K is 450 mT.
In the following we discuss the effect of reversible bi- axial strain on the magnetization of LCO. Fig. 3a shows the M dependence on the applied substrate electric field (E) at 75 K, recorded in 200 mT after field cooling. The increasing electric field leads to linear in-plane compression of the substrate 19 and, hence, to reduction of the tensile film strain. The value of the piezoelectrical substrate strain at 10 kV/cm is about −0.1 % at 90 K 22 . A roughly linear, low-hysteresis decrease of M is observed with increasing E, i.e. with decreasing tensile strain. A similar behaviour occurs at various temperatures; the resulting M change measured at E = 10 kV/cm is summarized in Fig. 3b. A M change of ≈ 9% is obtained near T C . In order to clarify the strain effect also for the saturated magnetization, full M (H) loops (H < 5 T) were recorded at 70 K in different strain states. They revealed a similar strain-induced change of the saturated magnetization; see, e.g., the data point for 70 K inserted in Fig. 3b. The observed decrease of M with reduced tensile strain confirms the observation of Fuchs et al. 12,13 that tensile film strain stabilizes ferromagnetism in LaCoO 3 . This may involve strain-induced enhancements of (i) the magnetic moment of the Co ions and (ii) T C with tensile strain. In the following we attempt to separate both effects.
As a first step, the T C shift induced by the reversible strain is estimated. The M change ∆M TC resulting from a T C shift can be approximated by shifting the M (T ) curve (recorded under equal conditions as for the reversible strain runs) by an assumed temperature interval ∆T C and taking the difference to the original data. The thus obtained M change, denoted as ∆M 1 (T ), overestimates ∆M TC at lower T and converges to its real value close to T C . Fig. 3b shows the ∆M 1 (T ) curve calculated for ∆T C = 0.4 K. This value of ∆T C is chosen to fit the measured ∆M at 80 K, close to T C . Therefore, it provides an upper limit for the real T C shift caused by the reversible strain of 0.1% in the LCO film. Hence, for the as-grown state of a = 3.88Å of the LCO film, the biaxial strain change of the transition temperature is estimated as dT C /da ≤ 4 K/%. As a consequence of the above arguments, the measured strain-induced M change in the range of T = 30 ÷ 70 K cannot originate from a T C shift alone, since it substantially exceeds ∆M 1 (T ) giving an upper limit to the M change caused by the T C shift. Clearly, a decrease of the Co magnetic moment itself is needed to explain the data.
The above estimated maximum shift of T C of 4 K/% of biaxial strain for the as-grown state of a(LCO) = 3.88Å can be compared to the Curie temperature vs strain for LCO films grown on various substrates, see Tab. 1 and Ref. 13 . We find little variation of T C in the range of 2 K for a = 3.867 ÷ 3.896Å, indicating a weaker than the estimated maximum strain response of T C . It is worth noting that a compressively strained LCO film (a = 3.789Å) grown on LaAlO 3 shows a remanent magnetization up to 75 K, too. Hence, the strain effect on T C appears to be rather moderate.
Finally, it is interesting to consider the roles played by (i) the tetragonal distortion of the film characterized by the c/a ratio and (ii) the strain-induced volume (V ) change of the unit cell. Tensile strain typically increases V . Thus, its effect is opposite to hydrostatic pressure. Co ions in LaCoO 3 have been reported to transfer to the lowspin state under hydrostatic pressure 23 , as is also found for the doped La 0.82 Sr 0.18 CoO 3 24 . This is consistent with the enlarged ionic radius of Co ions in the excited, i.e.
intermediate/high, spin states 21 . Hence, a V increase is likely to stabilize excited spin states of Co ions. Zhou et al. 14 indicated that volume increase may underlie the ferromagnetism observed in the LCO nanoparticles investigated in their work. The tetragonal distortion, on the other hand, seems to be less important for establishing ferromagnetism in LCO, (even though it may have an influence), since a high T C of about 85 K has been reported for samples without tetragonal distortion, e.g. for nanoparticles 14 and the LCO/SrLaAlO 4 film discussed in Ref. 13 .
Summarizing, the influence of reversible biaxial strain on the magnetization of epitaxially grown LaCoO 3 films has been investigated. The strain-induced increase of T C is estimated to be below 4 K/% of strain in the asgrown state of a = 3.88Å. Our data give evidence for an enhanced magnetic moment of Co ions under tensile strain. Both results confirm that tensile strain strengthens the ferromagnetism in LaCoO 3 films. Dominance of the effect of an enlarged unit cell volume over that of a tetragonal distortion is suggested for inducing ferromagnetism by tensile strain. Soft X-ray absorption experiments, which may clarify the effect of strain on the electronic structure, and particularly on the Co spin state, are in progress.
We thank R. Hühne for stimulating discussions. This work was supported by Deutsche Forschungsgemeinschaft, FOR 520. |
#![allow(unused_variables, dead_code)]
use std::{collections::HashMap, io, thread, net::SocketAddr, sync::{Arc, atomic::AtomicBool, mpsc}};
use thiserror::Error;
use mio::{Poll, Waker, net::{TcpListener, TcpStream}};
use vru_session::{
self as session,
Command,
Event,
NodeDisconnected,
handshake::{PublicKey, SecretKey, Identity},
};
pub struct NodeRef(mpsc::Receiver<Event<NodeError>>);
#[derive(Debug, Error)]
pub enum NodeError {
#[error("io error: {}", _0)]
Io(io::Error),
}
impl session::NodeRef<NodeError> for NodeRef {
fn recv(&self) -> Result<Event<NodeError>, NodeDisconnected> {
self.0.recv().map_err(|mpsc::RecvError| NodeDisconnected)
}
fn try_recv(&self) -> Result<Option<Event<NodeError>>, NodeDisconnected> {
self.0.try_recv()
.map(Some)
.or_else(|error| match error {
mpsc::TryRecvError::Empty => Ok(None),
mpsc::TryRecvError::Disconnected => Err(NodeDisconnected),
})
}
}
pub struct Node<P>
where
P: session::ProcessorFactory,
{
main_thread: thread::JoinHandle<()>,
waker: Waker,
sender: mpsc::Sender<Event<NodeError>>,
incoming: mpsc::Receiver<(SocketAddr, Peer<P::Processor>)>,
peers: HashMap<Identity, Peer<P::Processor>>,
processor_factory: P,
}
impl<P> session::Node<P> for Node<P>
where
P: session::ProcessorFactory + Clone + Send + 'static,
P::Processor: Send,
{
type Error = NodeError;
type Ref = NodeRef;
type Address = SocketAddr;
fn spawn(
sk: SecretKey,
pk: PublicKey,
address: Self::Address,
processor_factory: P,
running: Arc<AtomicBool>,
) -> Result<(Self, Self::Ref), Self::Error> {
use mio::{Interest, Token};
let (sender, rx) = mpsc::channel();
let (peer_tx, peer_rx) = mpsc::channel();
let poll = Poll::new().map_err(NodeError::Io)?;
let waker = Waker::new(poll.registry(), Token(0)).map_err(NodeError::Io)?;
let main_thread = {
let mut listener = TcpListener::bind(address).map_err(NodeError::Io)?;
poll.registry().register(&mut listener, Token(1), Interest::READABLE)
.map_err(NodeError::Io)?;
let state = NodeState::<P> {
sk,
pk,
listener,
poll,
sender: sender.clone(),
incoming: peer_tx,
processor_factory: processor_factory.clone(),
};
thread::Builder::new()
.name("node-main".to_string())
.spawn(move || state.run(running))
.expect("failed to spawn main thread")
};
Ok((
Node {
main_thread,
waker,
sender,
incoming: peer_rx,
peers: HashMap::new(),
processor_factory,
},
NodeRef(rx),
))
}
fn command(&self, command: Command<Self::Address>) {
match command {
Command::Connect { peer_pi, address } => {
let stream = TcpStream::connect(address).unwrap();
},
_ => (),
}
}
fn join(self) {
self.waker.wake().unwrap();
self.main_thread.join().unwrap()
}
}
struct NodeState<P>
where
P: session::ProcessorFactory,
{
sk: SecretKey,
pk: PublicKey,
listener: TcpListener,
poll: Poll,
sender: mpsc::Sender<Event<NodeError>>,
incoming: mpsc::Sender<(SocketAddr, Peer<P::Processor>)>,
processor_factory: P,
}
impl<P> NodeState<P>
where
P: session::ProcessorFactory,
{
fn run(mut self, running: Arc<AtomicBool>) {
use std::{time::Duration, sync::atomic::Ordering};
use mio::Events;
let mut events = Events::with_capacity(2);
while running.load(Ordering::Acquire) {
loop {
match self.poll.poll(&mut events, Some(Duration::from_secs(1))) {
Ok(events) => break events,
Err(error) if error.kind() == io::ErrorKind::TimedOut => {
if !running.load(Ordering::Acquire) {
return;
}
},
Err(error) if error.kind() == io::ErrorKind::Interrupted => return,
Err(error) => self.report(Event::Error(NodeError::Io(error))),
}
}
for event in &events {
match event.token().0 {
0 => {
log::info!("wake");
return;
},
1 => match self.listener.accept() {
Ok((stream, address)) => {
let processor = self.processor_factory.spawn_processor(None);
let peer = Peer::spawn(
self.sk.clone(),
self.pk.clone(),
stream,
processor,
running.clone(),
);
self.incoming.send((address, peer)).unwrap();
},
Err(error) => self.report(Event::Error(NodeError::Io(error))),
},
_ => unreachable!(),
}
}
}
}
fn report(&self, event: Event<NodeError>) {
match self.sender.send(event) {
Ok(()) => (),
Err(mpsc::SendError(event)) => log::warn!("failed to send event: {:?}", event),
}
}
}
struct Peer<P>
where
P: session::Processor,
{
worker_thread: thread::JoinHandle<()>,
processor: P,
waker: Waker,
sender: mpsc::Sender<Vec<u8>>,
}
impl<P> Peer<P>
where
P: session::Processor,
{
fn spawn(
sk: SecretKey,
pk: PublicKey,
stream: TcpStream,
processor: P,
running: Arc<AtomicBool>,
) -> Self {
unimplemented!()
}
fn join(self) {
self.worker_thread.join().unwrap()
}
}
|
<reponame>compomics/peptizer
package com.compomics.peptizer.gui.renderer;
import org.apache.log4j.Logger;
import javax.swing.*;
import java.awt.*;
/**
* Created by IntelliJ IDEA.
* User: kenny
* Date: Jan 20, 2009
* Time: 3:21:22 PM
* To change this template use File | Settings | File Templates.
*/
public class ProjectListRenderer implements ListCellRenderer {
// Class specific log4j logger for ProjectListRenderer instances.
private static Logger logger = Logger.getLogger(ProjectListRenderer.class);
private static JLabel lbl = new JLabel();
public Component getListCellRendererComponent(final JList list, final Object value, final int index, final boolean isSelected, final boolean cellHasFocus) {
String title = value.toString();
if (title.length() > 30) {
title = title.substring(0, 27) + "...";
}
lbl.setText(title);
return lbl;
}
}
|
/**
* Responsible for injecting decorated object and its decorated "children" of {@link AbstractDecorator} type.
*
* @author Alex Objelean
* @created 2 May 2012
* @since 1.4.6
*/
public class InjectorAwareDecorator<T>
extends AbstractDecorator<T> {
private final Injector injector;
public InjectorAwareDecorator(final T decorated, final Injector injector) {
super(decorated);
Validate.notNull(injector);
injector.inject(decorated);
this.injector = injector;
}
protected final Injector getInjector() {
return injector;
}
} |
<reponame>fredwangwang/linear-programming-example
import numpy as np
import cvxopt as co
import cvxpy as cp
import numpy.testing as npt
co.solvers.options['show_progress'] = False
co.solvers.options['glpk'] = {'msg_lev': 'GLP_MSG_ERR'}
# https://www.analyzemath.com/linear_programming/linear_prog_applications.html
def example1_cvxpy():
"""
A = 8
B = 14
Profit:
A = 2
B = 3
A + B < 2000
8A + 14B < 20000
max 2A + 3B
"""
A = cp.Variable()
B = cp.Variable()
constr = [
A + B <= 2000,
8*A + 14*B <= 20000,
A >= 0,
B >= 0,
]
cp.Problem(cp.Maximize(2*A + 3 * B),
constraints=constr).solve(solver='GLPK')
return [A.value, B.value]
def example1_cvxopt():
G = co.matrix(np.array([
[-1, 0],
[0, -1],
[1, 1],
[8, 14]
], dtype=float))
c = co.matrix([-2.0, -3.0])
h = co.matrix([-0.0, -0.0, 2000.0, 20000.0])
sol = co.solvers.lp(c, G, h, solver='glpk')
return np.array(sol['x']).flatten()
def example4_cvxpy():
"""
F1 2%
F2 4%
F3 5%
F3 < 3000
F2 < 2*F1
Max 2 F1 + 4 F2 + 5 F3
"""
f1 = cp.Variable()
f2 = cp.Variable()
f3 = cp.Variable()
constr = [
f1 >= 0,
f2 >= 0,
f3 >= 0,
f2 <= 2 * f1,
f3 <= 3000,
f1 + f2 + f3 <= 20000
]
cp.Problem(cp.Maximize(2 * f1 + 4 * f2 + 5 * f3),
constraints=constr).solve(solver='GLPK')
return [f1.value, f2.value, f3.value]
def example4_cvxopt():
G = co.matrix(np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 0, -1],
[-2, 1, 0], # f2 <= 2 * f1 --> -2 *f1 + f2 <= 0
[0, 0, 1],
[1, 1, 1]
], dtype=float))
c = co.matrix([-2.0, -4.0, -5.0])
h = co.matrix([-0.0, -0.0, -0.0, -0.0, 3000.0, 20000.0])
# default solver results in a diff @ 3 decimal
sol = co.solvers.lp(c, G, h, solver='glpk')
return np.array(sol['x']).flatten()
def example4_cvxopt_explit_z():
"""
Saw in some examples that the objective is explictly listed as one of the
variables. Not exactly see why it is done this way, as it makes the system
of equitions longer and harder to understand... but anyways,
listed here so I can remember how it works. This is essentially doing minmax
"""
G = np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 0, -1],
[-2, 1, 0], # f2 <= 2 * f1 --> -2 *f1 + f2 <= 0
[0, 0, 1],
[1, 1, 1]
], dtype=float)
G = np.hstack((
np.zeros((G.shape[0], 1)),
G
))
G = np.vstack((
G,
# maximize z st: z <= 2 F1 + 4 F2 + 5 F3
# is the same as:
# minimize -z st: -z >= -2f1 - 4f2 - 5f3 ---> z - 2f1 - 4f2 - 5f3 <= 0
np.array([1, -2, -4, -5])
))
G = co.matrix(G)
# resulting G
# first col is the objective, it does not participate in any of the constraits, other than
# the actual minimize objective listed in the very last row.
# |
# v
# [[ 0. -1. 0. 0.]
# [ 0. 0. -1. 0.]
# [ 0. 0. 0. -1.]
# [ 0. -2. 1. 0.]
# [ 0. 0. 0. 1.]
# [ 0. 1. 1. 1.]
# [ 1. -2. -4. -5.]]
c = co.matrix([-1.0, 0.0, 0.0, 0.0]) # minimize -z
h = co.matrix([-0.0, -0.0, -0.0, -0.0, 3000.0, 20000.0, -0.0])
# default solver results in a diff @ 3 decimal
sol = co.solvers.lp(c, G, h, solver='glpk')
return sol['x'][0], np.array(sol['x'][1:]).flatten()
def rock_paper_scissors_cvxpy():
r = cp.Variable()
p = cp.Variable()
s = cp.Variable()
z = cp.Variable() # obj
constr = [
r >= 0,
p >= 0,
s >= 0,
r + p + s == 1,
# rps rules using maxmin
# max Z, s.t.:
z <= +0*r - 1*p + 1*s,
z <= +1*r + 0*p - 1*s,
z <= -1*r + 1*p + 0*s,
]
cp.Problem(cp.Maximize(z), constraints=constr).solve(solver='GLPK')
print('rock paper scissors solution using cvxpy')
print('expected value of the game: ', z.value)
print('best stragegy: ', np.array([r.value, p.value, s.value]).flatten())
# matrix form
rpsrule = np.array([[0, -1, 1],
[1, 0, -1],
[-1, 1, 0]], dtype=float)
rps = cp.Variable(3)
z1 = cp.Variable()
constr1 = [
rps >= 0,
sum(rps) == 1,
# rps rules using maxmin
# max Z, s.t.:
z1 <= rpsrule @ rps
]
cp.Problem(cp.Maximize(z1), constraints=constr1).solve(solver='GLPK')
print('rock paper scissors solution using cvxpy Matrix')
print('expected value of the game: ', z1.value)
print('best stragegy: ', np.array([rps.value]).flatten())
def rock_paper_scissors_cvxopt():
rpsrule = np.array([[0, -1, 1],
[1, 0, -1],
[-1, 1, 0]], dtype=float)
G = co.matrix(np.vstack((
# negating 'rpsrule' or not would generate the same result.
# Without negating it is calcuating the probablity for column player.
# Since this is a zero sum game, the stragegy for both row and column player would be identical.
# Also note that here negating has the same effect as transposing (switch row and col).
np.hstack((np.ones((3, 1)), - rpsrule)),
np.hstack((np.zeros((3, 1)), - np.eye(3))), # each P >= 0
)))
c = co.matrix([-1.0, 0.0, 0.0, 0.0])
h = co.matrix(np.zeros(G.size[0]))
# sum P == 1
A = co.matrix(np.array([[0.0, 1.0, 1.0, 1.0]]))
b = co.matrix([1.0])
sol = co.solvers.lp(c, G, h, A, b, solver='glpk')
print('rock paper scissors solution using cvxopt')
print('expected value of the game: ', sol['x'][0])
print('best stragegy: ', np.array(sol['x'][1:]).flatten())
if __name__ == "__main__":
ex1_coeff = np.array([2, 3])
npt.assert_almost_equal(
np.dot(ex1_coeff, example1_cvxpy()),
np.dot(ex1_coeff, example1_cvxopt()), decimal=10)
ex4_coeff = np.array([2, 4, 5])
npt.assert_almost_equal(
np.dot(ex4_coeff, example4_cvxpy()),
np.dot(ex4_coeff, example4_cvxopt()), decimal=10)
sol = example4_cvxopt()
obj = np.dot(ex4_coeff, sol)
obj_z, sol_z = example4_cvxopt_explit_z()
print('normal vs explicit z:')
print(f'sol: \n{sol}\n{sol_z}\n')
print(f'obj: \n{obj}\n{obj_z}')
print()
rock_paper_scissors_cvxopt()
print()
rock_paper_scissors_cvxpy()
|
.
Bronchopulmonary complications are one of the leading causes of morbidity after cardiac surgery; they lengthen a patient's hospital stay and increase the cost of treatment. The most common postoperative bronchopulmonary complications include pneumonia, atelectasis, respiratory failure, pneumothorax, and bronchospasm. These complications are the consequences of anesthesia and surgical trauma aggravated by the presence of risk factors in the patient in the preoperative period such as any chronic disease involving the lungs, smoking history, persistent cough and / or wheezing, chest and spinal deformities, obesity, senior age. In addition, the presence of chronic heart failure, diabetes mellitus, and chronic kidney disease also increase the risk of developing bronchopulmonary complications. In the prevention and treatment of bronchopulmonary complications the clinical effectiveness of rehabilitation programs after coronary artery bypass grafting is undeniable. The effectiveness of the programs has been proven on the basis of both domestic and foreign long-term in-practice and scientific research. However, despite the significant advances in cardiac rehabilitation there are a number of unresolved issues. Is it possible in a short period of time of the first stationary rehabilitation stage to form the patient's skill to perform breathing exercises and, accordingly, to obtain the maximum effect in the prevention of bronchopulmonary complications? What factors can affect the speed of motor skill formation in the patient's mastering of breathing exercises? What should be the frequency of procedures per day and the number of exercises when a physical therapy instructor works with a patient to increase the effectiveness of the prevention of bronchopulmonary complications in the postoperative period? What category of patients is strictly required for the pre-rehabilitation stage? How should the pre-rehabilitation stage be organized and how long should it take? All these questions require the work-out and implementation of scientifically grounded individual rehabilitation programs with a step-by-step algorithm for managing the patient by a rehabilitation multi-team from the first hours after surgery with the mandatory inclusion of pre-rehabilitation and taking into account the social, anamnestic, clinical and psychological characteristics of the patient. |
<filename>test/test_pgmagick_api.py
from __future__ import print_function
import hashlib
import sys
import unittest
import pgmagick
from pgmagick.api import Image, Draw
print(pgmagick.gminfo().version)
LIBGM_VERSION = [int(v) for v in pgmagick.gminfo().version.split('.')]
class ImageTestCase(unittest.TestCase):
def setUp(self):
self.img = Image((600, 400), 'red')
def test_unicodefilename(self):
self.img.write('unicode.png')
img = Image(u'unicode.png')
img.scale(0.5)
img.write(u'unicode.jpg')
def test_nonarg(self):
Image()
def test_arg(self):
Image((600, 400), 'red')
def test_arg_float(self):
Image((600.5, 400.4), 'red')
def test_blur(self):
self.img.blur(2, 2.3)
self.img.write('t.jpg')
def test_blur_channel(self):
self.img.blur_channel('cyan')
self.img.write('t.jpg')
def test_scale(self):
img = Image((600, 400), 'gradient:#ffffff-#000000')
img.scale(0.6)
img.write('t.jpg')
def test_scale_with_filtertype(self):
img = Image((600, 400), 'gradient:#ffffff-#000000')
img.scale(0.6, 'Catrom')
img.write('t.jpg')
m = hashlib.md5()
with open('t.jpg', 'rb') as fp:
m.update(fp.read())
scale_with_filtertype_catrom_digest = m.hexdigest()
img = Image((600, 400), 'gradient:#ffffff-#000000')
img.scale(0.6, 'Cubic')
img.write('t.jpg')
m = hashlib.md5()
with open('t.jpg', 'rb') as fp:
m.update(fp.read())
scale_with_filtertype_cubic_digest = m.hexdigest()
img = Image((600, 400), 'gradient:#ffffff-#000000')
img.scale(0.6)
img.write('t.jpg')
m = hashlib.md5()
with open('t.jpg', 'rb') as fp:
m.update(fp.read())
scale_digest = m.hexdigest()
self.assertNotEqual(scale_with_filtertype_catrom_digest, scale_digest)
self.assertNotEqual(scale_with_filtertype_catrom_digest, scale_with_filtertype_cubic_digest)
def test_composite_arg_list(self):
base = Image((300, 200), 'green')
layer = Image((300, 200), 'transparent')
drawer = Draw()
drawer.circle(50, 50, 50, 100)
layer.draw(drawer)
base.composite(layer, (10, 10), 'over')
base.write('t.png')
def test_composite_arg_gravity(self):
base = Image((300, 200), 'green')
layer = Image((150, 100), 'transparent')
drawer = Draw()
drawer.circle(50, 50, 20, 20)
layer.draw(drawer)
base.composite(layer, 'center', 'over')
base.write('t.png')
def test_crop(self):
img = Image((300, 200), 'gradient:#ffff00-#00ffff')
img.crop(20, 20, 50, 100)
img.write('t.png')
def test_fontpointsize(self):
img = Image((300, 200), 'red')
img.font_pointsize(60)
self.assertEqual(60, img.font_pointsize())
self.assertEqual(float, type(img.font_pointsize()))
if sys.platform.lower() == 'darwin':
img.font("/Library/Fonts/Arial.ttf")
img.annotate("hello", (100, 100))
img.write('t.png')
def test_size_property(self):
img = Image((500, 300), 'red')
self.assertEqual(img.width, 500)
self.assertEqual(img.height, 300)
img.scale(0.5)
self.assertEqual(img.width, 250)
self.assertEqual(img.height, 150)
class DrawTestCase(unittest.TestCase):
def setUp(self):
self.img = Image((600, 400), 'red')
self.d = Draw()
def test_affine(self):
self.d.affine(10, 10, 20, 20, 40, 40)
self.img.draw(self.d.drawer)
self.img.write('t.jpg')
def test_arc(self):
self.d.arc(30, 30, 40, 40, 40, 40)
self.img.draw(self.d.drawer)
self.img.write('t.jpg')
def test_bezier(self):
points = ((30, 30), (50, 75), (200, 100))
self.d.bezier(points)
self.img.draw(self.d.drawer)
self.img.write('t.png')
def test_circle(self):
self.d.circle(40, 40, 50, 100)
self.img.draw(self.d.drawer)
self.img.write('t.png')
def test_color(self):
self.d.color(40, 40, 'point')
self.img.draw(self.d.drawer)
self.img.write('t.png')
def test_composite(self):
img1 = Image((20, 20), 'plasma:blue')
self.d.composite(10, 10, 0, 0, img1)
self.img.draw(self.d)
self.img.write('t.png')
def test_draw_for_draw_class(self):
self.d.color(40, 40, 'point')
self.d.circle(100, 100, 50, 100)
self.img.draw(self.d)
self.img.write('t.png')
def test_ellipse(self):
self.d.ellipse(150, 150, 120, 120, 0, 120)
self.img.draw(self.d)
self.img.write('t.png')
def test_fill_color(self):
self.d.fill_color('#f09060')
self.d.ellipse(150, 150, 120, 120, 0, 120)
self.img.draw(self.d)
self.img.write('t.png')
def test_fill_rule(self):
self.d.fill_rule('evenodd')
self.d.circle(150, 150, 50, 180)
self.d.fill_rule('nonzero')
self.d.circle(350, 150, 250, 180)
self.img.draw(self.d.drawer)
self.img.write('t.png')
def test_fill_opacity(self):
self.im = Image((600, 400), 'transparent')
self.d.fill_color('red')
self.d.fill_opacity(0.5)
self.d.circle(150, 150, 50, 180)
self.d.fill_color('green')
self.d.fill_opacity(0.8)
self.d.circle(160, 160, 50, 180)
self.img.draw(self.d.drawer)
self.img.write('t.png')
def test_font_style_italic(self):
if sys.platform.lower() == 'darwin':
self.skipTest("DrawableFont() is broken")
self.d.font('vera.ttf', 'italic')
self.d.text(30, 30, "hello pgmagick")
self.img.draw(self.d)
self.img.write('t.png')
def test_font_style_oblique(self):
if sys.platform.lower() == 'darwin':
self.skipTest("DrawableFont() is broken")
self.d.font('vera.ttf', 'oblique')
self.d.text(30, 30, "hello pgmagick")
self.img.draw(self.d)
self.img.write('t.png')
def test_font_stretch_ultracondensed(self):
if sys.platform.lower() == 'darwin':
self.skipTest("DrawableFont() is broken")
self.d.font('vera.ttf', 'oblique', stretch='ultracondensed')
self.d.text(30, 30, "hello pgmagick")
self.img.draw(self.d)
self.img.write('t.png')
def test_font_stretch_extraexpanded(self):
if sys.platform.lower() == 'darwin':
self.skipTest("DrawableFont() is broken")
self.d.font('vera.ttf', 'oblique', stretch='extraexpanded')
self.d.text(30, 30, "hello pgmagick")
self.img.draw(self.d)
self.img.write('t.png')
def test_font_weight100(self):
if sys.platform.lower() == 'darwin':
self.skipTest("DrawableFont() is broken")
self.d.font('vera.ttf', weight=100)
self.d.text(30, 30, "hello pgmagick")
self.img.draw(self.d)
self.img.write('t.png')
def test_font_bold(self):
if sys.platform.lower() == 'darwin':
self.skipTest("DrawableFont() is broken")
self.d.font('vera.ttf', weight='bold')
self.d.text(30, 30, "hello pgmagick")
self.img.draw(self.d)
self.img.write('t.png')
def test_gravity(self):
if sys.platform.lower() == 'darwin':
self.skipTest("DrawableFont() is broken")
self.d.gravity('center')
self.d.text(0, 0, "hello pgmagick")
self.img.draw(self.d)
self.img.write('t.png')
def test_line(self):
self.d.line(10, 10, 40, 200)
self.img.draw(self.d)
self.img.write('t.png')
def test_matte(self):
self.d.matte(30, 30, 'filltoborder')
self.img.draw(self.d)
self.img.write('t.png')
def test_miterlimit(self):
self.d.miterlimit(1)
self.d.stroke_color('black')
self.d.stroke_width(3)
self.d.line(10, 200, 100, 10)
self.d.line(100, 10, 210, 200)
self.img.draw(self.d)
self.d = Draw()
self.d.miterlimit(18)
self.d.stroke_color('black')
self.d.stroke_width(3)
self.d.stroke_opacity(0.5)
self.d.fill_opacity(0.5)
self.d.line(210, 200, 300, 10)
self.d.line(300, 10, 410, 200)
self.img.draw(self.d)
self.img.write('t.png')
def test_path(self):
paths = ((40, 30),)
self.d.path(paths)
self.img.draw(self.d.drawer)
self.img.write('t.png')
def test_point(self):
for i in range(50):
self.d.point(i, i + 10)
for i in range(50, 200, 2):
self.d.point(i, i + 10)
self.img.draw(self.d.drawer)
self.img.write('t.png')
def test_pointsize(self):
self.d.pointsize(10)
self.d.pointsize(30.)
self.d.circle(150, 150, 50, 180)
self.img.draw(self.d.drawer)
self.img.write('t.png')
def test_polygon(self):
coordinate = ((10, 10), (20, 30), (40, 50))
self.d.polygon(coordinate)
coordinate = [(100, 100), (150, 100), [150, 150], (100, 150)]
self.d.polygon(coordinate)
self.img.draw(self.d)
self.img.write('t.png')
def test_polyline(self):
coordinate = ((10, 10), (20, 30), (40, 50))
self.d.polyline(coordinate)
coordinate = [(100, 100), (150, 100), [150, 150], (100, 150)]
self.d.polyline(coordinate)
self.img.draw(self.d)
self.img.write('t.png')
def test_rectangle(self):
self.d.rectangle(50, 50, 100, 100)
self.img.draw(self.d)
self.img.write('t.png')
def test_rotation(self):
self.d.rectangle(150, 150, 200, 200)
self.d.rotation(40)
self.img.draw(self.d)
self.d = Draw()
self.d.fill_opacity(0.8)
self.d.rectangle(150, 150, 200, 200)
self.d.rotation(20)
self.img.draw(self.d)
self.d = Draw()
self.d.fill_opacity(0.6)
self.d.rectangle(150, 150, 200, 200)
self.img.draw(self.d)
self.img.write('t.png')
def test_round_rectangle(self):
self.d = Draw()
self.d.round_rectangle(20, 20, 30, 30, 150, 150)
self.img.draw(self.d)
self.img.write('t.png')
def test_scaling(self):
self.d.scaling(0.4, 0.4)
self.d.circle(150, 150, 150, 200)
self.img.draw(self.d)
self.d = Draw()
self.d.fill_opacity(0.6)
self.d.circle(150, 150, 150, 200)
self.img.draw(self.d)
self.img.write('t.png')
def test_skew(self):
self.d = Draw()
self.d.fill_opacity(0.6)
self.d.circle(50, 50, 50, 100)
self.img.draw(self.d)
self.d = Draw()
self.d.fill_opacity(0.6)
self.d.circle(50, 50, 50, 100)
self.d.skewx(40)
self.img.draw(self.d)
self.d = Draw()
self.d.fill_opacity(0.6)
self.d.circle(50, 50, 50, 100)
self.d.skewy(40)
self.img.draw(self.d)
self.img.write('t.png')
def test_stroke_antialias(self):
self.d.stroke_color('black')
self.d.stroke_width(20)
self.d.stroke_antialias(True)
self.d.line(10, 10, 40, 200)
self.d.stroke_antialias(False)
self.d.line(50, 10, 80, 200)
self.img.draw(self.d)
self.img.write('t.png')
def test_stroke_linecap(self):
self.d.stroke_color('lime')
self.d.stroke_linecap('butt')
self.d.stroke_linecap('round')
self.d.stroke_linecap('square')
self.d.line(10, 10, 40, 200)
self.img.draw(self.d)
self.img.write('t.png')
@unittest.skipIf(LIBGM_VERSION <= [1, 3, 18], "bug in gm version: %s" % str(LIBGM_VERSION))
def test_stroke_linejoin(self):
self.d.stroke_color('lime')
self.d.stroke_linejoin('round')
self.d.stroke_linejoin('bevel')
self.d.stroke_linejoin('miter')
self.d.line(10, 10, 40, 200)
self.img.draw(self.d)
self.img.write('t.png')
def test_stroke_color(self):
self.d.stroke_color('lime')
self.d.line(10, 10, 40, 200)
self.img.draw(self.d)
self.img.write('t.png')
def test_stroke_width(self):
self.d.stroke_width(20)
self.d.line(20, 20, 50, 210)
self.img.draw(self.d)
self.img.write('t.png')
def test_text(self):
if sys.platform.lower() == 'darwin':
self.skipTest("DrawableFont() is broken")
self.d.text(30, 30, "hello pgmagick")
self.img.draw(self.d)
self.img.write('t.png')
def test_text_antialias(self):
if sys.platform.lower() == 'darwin':
self.skipTest("DrawableFont() is broken")
self.d.font('courier', weight='bold')
self.d.pointsize(70)
self.d.text_antialias(False)
self.d.text(30, 100, "hello pgmagick")
self.d.text_antialias(True)
self.d.text(30, 200, "hello pgmagick")
self.img.draw(self.d)
self.img.write('t.png')
def test_text_decoration(self):
if sys.platform.lower() == 'darwin':
self.skipTest("DrawableFont() is broken")
self.d.pointsize(70)
self.d.text_decoration('overline')
self.d.text(30, 100, "hello pgmagick")
self.d.text_decoration('linethrough')
self.d.text(30, 200, "hello pgmagick")
self.d.text_decoration('underline')
self.d.text(30, 300, "hello pgmagick")
self.img.draw(self.d)
self.img.write('t.png')
def test_text_undercolor(self):
if sys.platform.lower() == 'darwin':
self.skipTest("DrawableFont() is broken")
self.d.pointsize(70)
self.d.text_undercolor('lime')
self.d.text(30, 100, "hello pgmagick")
self.img.draw(self.d)
self.img.write('t.png')
def test_translate(self):
self.d.translate(10, 200)
self.img.draw(self.d)
self.img.write('t.png')
unittest.main()
|
/**
* Whether input SodukuMatrix is solvable.
* TODO
*
* @param originMatrix input SodukuMatrix
* @return whether
*/
boolean solvable(SodukuMatrix originMatrix) {
return true;
} |
Full Order Observer With Unmatched Constraint: Unknown Parameters Identification
This letter concerns both state estimation and parameters identification for linear system with unmatched unknown parts. It deals with a full order output delayed unknown inputs observer (DUIO), in which, the time delay concept is investigated to define a new augmented dynamic system including delayed state and output vectors. This estimation approach allows to recover the matching condition which can appear in the observer design problem. The resulting observer has been improved, from the restrictive decoupling condition point of view to guarantee the estimation of state and parameters with asymptotic convergence. Finally, a simulation example based on parametric identification is provided to highlight the feasibility of the suggested method. |
<filename>ui/ui-app/src/main/resources/static/js/ops-mgr/alerts/AlertDetailsController.ts
import * as angular from "angular";
import {moduleName} from "../module-name";
import * as _ from "underscore";
import OpsManagerRestUrlService from "../services/OpsManagerRestUrlService";
import {AccessControlService} from "../../services/AccessControlService";
import AccessConstants from '../../constants/AccessConstants';
import {Transition} from "@uirouter/core";
import "./module-require";
import {FEED_DEFINITION_SUMMARY_STATE_NAME} from "../../feed-mgr/model/feed/feed-constants";
/** Manages the Alert Details page.
* @constructor
* @param $scope the Angular scope
* @param $http the HTTP service
* @param $mdDialog the dialog server
* @param AccessControlService the access control service
* @param OpsManagerRestUrlService the REST URL service
*/
export class AlertDetailsDirectiveController implements ng.IComponentController{
allowAdmin: boolean = false; //Indicates that admin operations are allowed. {boolean}
alertData: any; //The alert details. {Object}
alertId: any;
static readonly $inject=["$scope","$http","$mdDialog","AccessControlService","OpsManagerRestUrlService"];
ngOnInit(){
this.loadAlert(this.alertId); // Fetch alert details
this.accessControlService.getUserAllowedActions() // Fetch allowed permissions
.then((actionSet: any) =>{
this.allowAdmin = this.accessControlService.hasAction(AccessConstants.OPERATIONS_ADMIN, actionSet.actions);
});
}
constructor(private $scope: angular.IScope,
private $http: angular.IHttpService,
private $mdDialog: angular.material.IDialogService ,
private accessControlService: AccessControlService,
private OpsManagerRestUrlService: OpsManagerRestUrlService){
this.ngOnInit();
}// end of constructor
/**
* Gets the class for the specified state.
* @param {string} state the name of the state
* @returns {string} class name
*/
getStateClass =(state: string)=> {
switch (state) {
case "UNHANDLED":
return "error";
case "IN_PROGRESS":
return "warn";
case "HANDLED":
return "success";
default:
return "unknown";
}
};
/**
* Gets the icon for the specified state.
* @param {string} state the name of the state
* @returns {string} icon name
*/
getStateIcon = (state: string)=> {
switch (state) {
case "CREATED":
case "UNHANDLED":
return "error_outline";
case "IN_PROGRESS":
return "schedule";
case "HANDLED":
return "check_circle";
default:
return "help_outline";
}
};
/**
* Gets the display text for the specified state.
* @param {string} state the name of the state
* @returns {string} display text
*/
getStateText = (state: string)=> {
if (state === "IN_PROGRESS") {
return "IN PROGRESS";
} else {
return state;
}
};
//Hides this alert on the list page.
hideAlert = ()=> {
this.alertData.cleared = true;
this.$http.post(this.OpsManagerRestUrlService.ALERT_DETAILS_URL(this.alertData.id), {state: this.alertData.state, clear: true});
};
//Shows the alert removing the 'cleared' flag
showAlert = ()=> {
this.alertData.cleared = false;
this.$http.post(this.OpsManagerRestUrlService.ALERT_DETAILS_URL(this.alertData.id), {state: this.alertData.state, clear: false, unclear:true});
};
/** Loads the data for the specified alert.
* @param {string} alertId the id of the alert
*/
loadAlert = (alertId: string)=> {
if(alertId) {
this.$http.get(this.OpsManagerRestUrlService.ALERT_DETAILS_URL(alertId))
.then( (response: any)=> {
this.alertData = response.data;
// Set time since created
if (angular.isNumber(this.alertData.createdTime)) {
this.alertData.createdTimeSince = Date.now() - this.alertData.createdTime;
}
// Set state information
if (angular.isString(this.alertData.state)) {
this.alertData.stateClass = this.getStateClass(this.alertData.state);
this.alertData.stateIcon = this.getStateIcon(this.alertData.state);
this.alertData.stateText = this.getStateText(this.alertData.state);
}
var isStream = false;
if (angular.isArray(this.alertData.events)) {
angular.forEach(this.alertData.events, (event: any)=> {
event.stateClass = this.getStateClass(event.state);
event.stateIcon = this.getStateIcon(event.state);
event.stateText = this.getStateText(event.state);
event.contentSummary = null;
if(angular.isDefined(event.content)){
try {
var alertEventContent = angular.fromJson(event.content);
if(alertEventContent && alertEventContent.content){
event.contentSummary = angular.isDefined(alertEventContent.content.failedCount) ? alertEventContent.content.failedCount +" failures" : null;
if(!isStream && angular.isDefined(alertEventContent.content.stream)){
isStream = alertEventContent.content.stream;
}
}
}catch(err){
}
}
});
}
this.alertData.links = [];
//add in the detail URLs
if(this.alertData.type == 'http://kylo.io/alert/job/failure') {
if(angular.isDefined(this.alertData.content) && !isStream) {
var jobExecutionId = this.alertData.content;
this.alertData.links.push({label: "Job Execution", value: "job-details({executionId:'" + jobExecutionId + "'})"});
}
this.alertData.links.push({label:"Feed Details", value:FEED_DEFINITION_SUMMARY_STATE_NAME+".feed-activity"+"({feedId:'"+this.alertData.entityId+"'})"});
}
else if(this.alertData.type == 'http://kylo.io/alert/alert/sla/violation') {
if(angular.isDefined(this.alertData.content)) {
this.alertData.links.push({label: "Service Level Assessment", value: "service-level-assessment({assessmentId:'" + this.alertData.content + "'})"});
}
this.alertData.links.push({label:"Service Level Agreement", value:"service-level-agreements({slaId:'"+this.alertData.entityId+"'})"});
}
else if(this.alertData.type == 'http://kylo.io/alert/service') {
this.alertData.links.push({label:"Service Details", value:"service-details({serviceName:'"+this.alertData.subtype+"'})"});
}
});
}
};
/**
* Shows a dialog for adding a new event.
* @param $event the event that triggered this dialog
*/
showEventDialog = ($event: any)=> {
this.$mdDialog.show({
controller: 'EventDialogController',
locals: {
alert: this.alertData
},
parent: angular.element(document.body),
targetEvent: $event,
templateUrl: "./event-dialog.html"
}).then((result: any)=> {
if (result) {
this.loadAlert(this.alertData.id);
}
});
};
}
export interface IMyScope extends ng.IScope {
saving?: boolean;
state?: string;
closeDialog?: any;
saveDialog?: any;
description?: any;
}
/**
* Manages the Update Alert dialog.
* @constructor
* @param $scope the Angular scope
* @param $http
* @param $mdDialog the dialog service
* @param OpsManagerRestUrlService the REST URL service
* @param alert the alert to update
*/
export class EventDialogController implements ng.IComponentController{
static readonly $inject=["$scope","$http","$mdDialog","OpsManagerRestUrlService","alert"];
constructor(private $scope: IMyScope, //the Angular scope
private $http: angular.IHttpService, //the HTTP service
private $mdDialog: angular.material.IDialogService, //the dialog service
private OpsManagerRestUrlService: OpsManagerRestUrlService, //the REST URL service
private alert: any //the alert to update
){
this.ngOnInit();
}
ngOnInit(){
this.$scope.saving = false; //Indicates that this update is currently being saved {boolean}
this.$scope.state = (this.alert.state === "HANDLED") ? "HANDLED" : "IN_PROGRESS"; //The new state for the alert{string}
/**
* Closes this dialog and discards any changes.
*/
this.$scope.closeDialog = ()=> {
this.$mdDialog.hide(false);
};
/**
* Saves this update and closes this dialog.
*/
this.$scope.saveDialog = ()=> {
this.$scope.saving = true;
var event = {state: this.$scope.state, description: this.$scope.description, clear: false};
this.$http.post(this.OpsManagerRestUrlService.ALERT_DETAILS_URL(this.alert.id), event)
.then(()=> {
this.$mdDialog.hide(true);
}, () =>{
this.$scope.saving = false;
});
};
}
}
export class AlertDetailsController implements ng.IComponentController{
alertId: any;
$transition$: Transition;
constructor(){
this.ngOnInit();
}
ngOnInit(){
this.alertId = this.$transition$.params().alertId;
}
}
const module = angular.module(moduleName).component("alertDetailsController", {
bindings: {
$transition$: '<'
},
controller: AlertDetailsController,
controllerAs: "vm",
templateUrl: "./alert-details.html"
});
export default module;
angular.module(moduleName).controller("alertDetailsDirectiveController",
AlertDetailsDirectiveController,
);
angular.module(moduleName).directive("tbaAlertDetails",
[
()=> {
return {
restrict: "EA",
bindToController: {
cardTitle: "@",
alertId:"="
},
controllerAs: "vm",
scope: true,
templateUrl: "./alert-details-template.html",
controller: AlertDetailsDirectiveController
};
}
]);
angular.module(moduleName).controller("EventDialogController",
EventDialogController
); |
// RunState is used to tell if the run loop should continue
async fn process_msg(&mut self, op: TxOpRequest) -> RunState {
match op.msg {
TxOpRequestMsg::Single(ref operation, trace_id) => {
let result = self.execute_single(&operation, trace_id).await;
let _ = op.respond_to.send(TxOpResponse::Single(result));
RunState::Continue
}
TxOpRequestMsg::Batch(ref operations, trace_id) => {
let result = self.execute_batch(&operations, trace_id).await;
let _ = op.respond_to.send(TxOpResponse::Batch(result));
RunState::Continue
}
TxOpRequestMsg::Commit => {
let resp = self.commit().await;
let _ = op.respond_to.send(TxOpResponse::Committed(resp));
RunState::Finished
}
TxOpRequestMsg::Rollback => {
let resp = self.rollback(false).await;
let _ = op.respond_to.send(TxOpResponse::RolledBack(resp));
RunState::Finished
}
}
} |
// waitUntilWorkflowIsDone will wait until the workflow for the given operation is done (successfully or not) for the node
func (d *Driver) waitUntilWorkflowIsDone(operation string, wid string, node string) error {
log.Infof("Waiting for workflow of '%s' operation to finish, it will take a few minutes...", operation)
for {
workflow, err := d.g5kAPI.GetOperationWorkflow(operation, wid)
if err != nil {
return err
}
if ArrayContainsString(workflow.Nodes["ok"], node) {
break
}
if ArrayContainsString(workflow.Nodes["ko"], node) {
return fmt.Errorf("Workflow for '%s' operation failed for the '%s' node", operation, node)
}
if ArrayContainsString(workflow.Nodes["processing"], node) {
log.Debugf("Workflow for '%s' operation is in processing state for the '%s' node", operation, node)
}
time.Sleep(7 * time.Second)
}
log.Infof("Workflow for '%s' operation finished successfully for the '%s' node", operation, node)
return nil
} |
<reponame>joshyamal/cloudproxy
import boto3
import os
import json
import botocore as botocore
from cloudproxy.providers.config import set_auth
from cloudproxy.providers.settings import config
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
ec2 = boto3.resource("ec2", region_name=config["providers"]["aws"]["region"])
ec2_client = boto3.client("ec2", region_name=config["providers"]["aws"]["region"])
tags = [{"Key": "cloudproxy", "Value": "cloudproxy"}]
tag_specification = [
{"ResourceType": "instance", "Tags": tags},
]
def create_proxy():
vpcs = list((ec2.vpcs.filter()))
for vpc in vpcs:
response = ec2_client.describe_vpcs(
VpcIds=[
vpc.id,
],
)
if response["Vpcs"][0]["IsDefault"]:
default_vpc = response["Vpcs"][0]["VpcId"]
user_data = set_auth(config["auth"]["username"], config["auth"]["password"])
try:
sg = ec2.create_security_group(
Description="SG for CloudProxy", GroupName="cloudproxy", VpcId=default_vpc
)
sg.authorize_ingress(
CidrIp="0.0.0.0/0", IpProtocol="tcp", FromPort=8899, ToPort=8899
)
sg.authorize_ingress(
CidrIp="0.0.0.0/0", IpProtocol="tcp", FromPort=22, ToPort=22
)
except botocore.exceptions.ClientError:
pass
sg_id = ec2_client.describe_security_groups(GroupNames=["cloudproxy"])
sg_id = sg_id["SecurityGroups"][0]["GroupId"]
instance = ec2.create_instances(
ImageId="ami-096cb92bb3580c759",
MinCount=1,
MaxCount=1,
InstanceType=config["providers"]["aws"]["size"],
NetworkInterfaces=[
{"DeviceIndex": 0, "AssociatePublicIpAddress": True, "Groups": [sg_id]}
],
TagSpecifications=tag_specification,
UserData=user_data,
)
return instance
def delete_proxy(instance_id):
ids = [instance_id]
deleted = ec2.instances.filter(InstanceIds=ids).terminate()
return deleted
def stop_proxy(instance_id):
ids = [instance_id]
stopped = ec2.instances.filter(InstanceIds=ids).stop()
return stopped
def start_proxy(instance_id):
ids = [instance_id]
started = ec2.instances.filter(InstanceIds=ids).start()
return started
def list_instances():
filters = [
{"Name": "tag:cloudproxy", "Values": ["cloudproxy"]},
{"Name": "instance-state-name", "Values": ["pending", "running", "stopped", "stopping"]},
]
instances = ec2_client.describe_instances(Filters=filters)
return instances["Reservations"]
|
#include<bits/stdc++.h>
typedef long long int ull;
typedef long long int ll;
#define ub upper_bound
#define lb lower_bound
#define pb push_back
#define m_p make_pair
#define int_char_index(a) a-'0'
#define neginf -1000000001
#define inf 1000000000000000001
#define mod 1000000007
#define F first
#define S second
#define fr(j,x,y) for(j=x;j<=y;j++)
#define N 2001
#define frn(i,x,y) for(i=x;i>=y;i--)
using namespace std;
ll power(ll x, ll y){ ll res = 1; x = x % mod; while (y > 0){ if (y & 1)res = (res*x) % mod; y = y>>1;x = (x*x) % mod; }return res;}
ll bst(ll a[],ll item,ll index,ll n){ ll b=index,e=n,mid; mid=(b+e)/2; while(a[mid]!=item&&b<=e) { if(a[mid]<item) b=mid+1; else e=mid-1; mid=(b+e)/2; } return mid; }
ll max3(ll x,ll y,ll z){ return max(max(x,y),z);}
ll min3(ll x,ll y,ll z){ return min(min(x,y),z);}
string cir_shift(string s){ string s1; ll len; len=s.size(); s1=s.substr(1,len-1); s1.pb(s[0]); return s1;}
void swap1(ll &a,ll &b){ll temp;temp=a;a=b;b=temp;}
ll a[N];
bool comp(pair<pair<ll,ll>,ll> a,pair<pair<ll,ll>,ll> b)
{
if(a.F.F==b.F.F)
return a.S<b.S;
return a.F.F<b.F.F;
}
int main()
{
ios::sync_with_stdio(false);
cin.tie();cout.tie();
ll t,n,time,i,l,r;
cin>>t;
vector<pair<pair<ll,ll>,ll> > v;
while(t--)
{
cin>>n;
fr(i,1,n)
{
cin>>l>>r;
v.pb(m_p(m_p(l,r),i));
}
sort(v.begin(),v.end(),comp);
time = v[0].F.F;
fr(i,0,n-1)
{
if(time<v[i].F.F)
{
time = v[i].F.F;
}
if(time<=v[i].F.S)
{
a[v[i].S]=time;
time++;
}
else
{
a[v[i].S]=0;
}
}
fr(i,1,n)
cout<<a[i]<<" ";
cout<<endl;
v.clear();
}
}
|
def check_accessible( self, trans, history ):
if self.is_accessible( trans, history ):
return history
raise exceptions.ItemAccessibilityException( "History is not accessible to the current user", type='error' ) |
Dirty Little Secret: Almost Nobody Cleans Contacts Properly
Enlarge this image toggle caption Marek Brzezinski/iStockPhoto.com Marek Brzezinski/iStockPhoto.com
People who wear contact lenses say they're diligent about keeping them clean. But press them for details, and it turns out that hardly anyone is doing it the right way.
"It's horrible," says Dwight Cavanagh, a clinical professor of ophthalmology at UT Southwestern Medical Center who surveyed contact wearers' hygiene habits. "It was like, 'Mom, I cleaned up my room.' If you go up on the second floor and open the door and look under the bed, what are you going to find?"
In a survey of more than 400 contact lens wearers, Cavanagh found that just 2 percent of them are following the rules for safe contact lens use. Chief among the sins is showering or swimming while wearing contacts, sleeping in them and using them longer than recommended before throwing them out.
People also commit "solution misuse," topping off the disinfectant solution in the case rather than starting afresh, and 47 percent of the people asked said they never replace their lens case, or only do so when the eye doctor gives them a new one at the annual visit. The research was published in the December issue of Optometry and Vision Science.
A separate new survey found that people have turned to beer, baby oil, Coke, petroleum jelly, lemonade, fruit juice, and butter as oh-so-wrong alternatives to contact lens solution. That was from an August 2011 survey in the United Kingdom by Bausch + Lomb, a lens solution manufacturer.
"Do you want to be one of those people who is going blind and it hurts like hell and you can't work for three months?" Cavanagh asked Shots. "Once you've got a serious eye infection going in your cornea, you're in trouble."
Eye infections caused by contact lenses are relatively rare; the risk ranges from 1 in 7,500 for hard-lens wearers to 1 in 500 for people who sleep in daily wear lenses. But multiply that by the 40 million people who put lenses in their eyes every day, and you can see why Cavanagh, a corneal surgeon who has to try to fix the damage, gets agitated.
"We see patients all the time with pseudomonas ulcers, gray green pus, they go blind," he continues. "We see amoeba infections from people showering in their contacts, going swimming in lakes. These infections are horrible."
OK, OK, you got our attention. Shots promises to no longer lick a contact lens before inserting. And showering or swimming with contacts is clearly a big no-no. That exposes eyes to Acanthamoeba, an organism that commonly lives in tap water and lakes. Some infections have involved contaminated contact lens solution, but other people have been infected by showering or swimming.
The American Academy of Opthalmology has good information about the pluses and minuses of types of contact lenses, and how to keep lenses clean. |
"""
TCF CLI VERSION
"""
__version__ = '0.1.1'
|
import { Component } from '@angular/core';
@Component({
templateUrl: './header.page.html',
})
export class LayoutHeaderDemoPageComponent {
public header1 = `import { HeaderModule } from '@acpaas-ui/ngx-layout';
@NgModule({
imports: [
HeaderModule,
]
});
export class AppModule {};`;
public header2 = `<aui-header>
<div auiHeaderLogo>
<aui-logo title="Antwerp logo." src="./assets/a-logo.svg" [link]="'/'"></aui-logo>
</div>
<div auiHeaderContent><!-- Optional --></div>
<div auiHeaderMenuItem>
<a class="a-button-negative o-header__button has-icon-left" href="http://github.com/digipolisantwerp/antwerp-ui_angular">
<aui-icon name="ai-developer-community-github-1"></aui-icon>GitHub
</a>
</div>
</aui-header>
`;
}
|
<gh_stars>0
package uk.gov.gchq.gaffer.types;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TypeSubTypeValueTest {
@Test
public void testComparisonsAreAsExpected() {
TypeSubTypeValue typeSubTypeValue = new TypeSubTypeValue("a", "b", "c");
assertEquals(0, typeSubTypeValue.compareTo(new TypeSubTypeValue("a", "b", "c")));
assertTrue(typeSubTypeValue.compareTo(null) > 0);
assertTrue(typeSubTypeValue.compareTo(new TypeSubTypeValue()) > 0);
assertTrue(typeSubTypeValue.compareTo(new TypeSubTypeValue("1", "b", "c")) > 0);
assertTrue(typeSubTypeValue.compareTo(new TypeSubTypeValue("a", "a", "c")) > 0);
assertTrue(typeSubTypeValue.compareTo(new TypeSubTypeValue("a", "b", "a")) > 0);
assertTrue(typeSubTypeValue.compareTo(new TypeSubTypeValue("b", "a", "c")) < 0);
assertTrue(typeSubTypeValue.compareTo(new TypeSubTypeValue("a", "c", "c")) < 0);
assertTrue(typeSubTypeValue.compareTo(new TypeSubTypeValue("a", "b", "d")) < 0);
}
}
|
// UnmarshalJSON decode json time.Unix to t.
func (t *Time) UnmarshalJSON(b []byte) (err error) {
var sec int64
if err := json.Unmarshal(b, &sec); err != nil {
return err
}
*(*time.Time)(t) = time.Unix(sec, 0)
return nil
} |
PIV measurements and CFD simulations of a hydrofoil at lock-in
As part of an ongoing investigation into the mitigation of vortex induced vibrations of hydrofoils, a combined experimental and numerical study of the fluid-structure interactions and wake of a hydrofoil at lock-in has been conducted at the Waterpower laboratory of the Norwegian University of Science and Technology. The hydrofoil has a blunt trailing edge and Von Karman vortex shedding induces a lock-in effect at a chord based Reynolds number of about 2,7·106. The present paper presents the initial measurements of vortex shedding frequencies going through lock-in, along with CFD simulations at lock-off conditions as well as some empirical estimates of vortex shedding. Experimentally the hydrofoil wake was studied in detail using particle image velocimetry (PIV). Hydrofoil vibration frequencies were measured by both a strain gauge positioned near the trailing edge of the foil as well as by a laser doppler vibrometer (LD-V). Numerically the phenomena was simulated using ANSYS CFX. Several different turbulence models was tested, from the two-equation standard k − ϵ model to the scale adaptive SST-SAS model, with considerably different results. It is observed that the vibrations induced at lock-in considerably shifts and reduces the hydrofoil wake velocity deficit. Further, the CFD results suggest that the driving parameter influencing the shedding frequency is the cross flow separation distance at the trailing edge.
Introduction
In order to avoid unnecessary fatigue and risk of failure when designing hydraulic turbines, it is an established guideline that the shedding frequency of guide vanes, stay vanes, and runner blades should not overlap with the natural frequencies of the blades in the range of operation . According to Blake et al. , the recognition of the link between the trailing edge geometry and vortex shedding became widespread in the 1960's. However, as has been indicated by several researchers, the task of predicting the shedding frequency from a blunt trailing edge strut or hydrofoil can be difficult due to it being highly sensitive to both tip geometry as well the surrounding flow conditions affecting the boundary layers . Nevertheless, for trailing edges with sharp edges and clearly defined separation points modern CFD methods have proven effective . In modern installments of hydraulic turbines limits are constantly being pushed with respect to increasing the performance of components, while at the same time reducing material-and manufacturing costs. Accurate prediction of component behavior becomes more important because safety and expected lifespan must be kept at acceptable levels. For components which objective is to transfer forces to or from fluids, this continuous process might push the components into designs where the structure and fluid are mutually changing the behavior of each other. This is when prediction of the behavior of the dynamics of the system becomes more involved . In an effort to shed more light on the topic at hand, a combined numerical and experimental investigation is underway at the Norwegian University of Science and Technology (NTNU) for a hydrofoil under lock-in conditions. We present here some initial results.
2. Methods 2.1. Experimental Setup 2.1.1. PIV and frequency measurements setup The general layout of the experimental setup is illustrated in figure 1 a) and b). All measurements were done with the hydrofoil centerline angled parallel to the incoming flow field, i.e. 0 degrees angle of attack. The test section volumetric flow rate was measured by an ABB electromagnetic flow-meter located downstream of the test section. The maximum standard deviation in the mean flow during measurements was approximately 0.11 %, while the average temperature was 20.5±1.5 Celsius, giving an approximate chord based Reynolds number uncertainty of 3.65 %.
The hydrofoil vibration frequencies and amplitudes were measured with a strain-gauge located near the trailing edge at approximately mid-span, as well as with a surface laser doppler vibrometer (LD-V) pointing at the trailing edge. Data acquisition was managed with National Instruments (NI) LabVIEW and NI data acquisition devices (DAQ's). Sensor output voltages were sampled at 10 kHz, giving more than 10 samples per period for the frequencies investigated. For a more detailed explanation of the hydrofoil instrumentation, material properties and natural frequencies see Bergan et. al . The frequency amplitude spectra later presented were obtained by performing a P.D. Welch power spectrum analysis in MATLAB with a Hanning window. As a means of smoothing the amplitude spectra the sensor signal were split into varying lengths relative to the approximate shedding frequencies measured, with a 50 % segment overlap. The recording of the 2D PIV vector field in the wake of the hydrofoil was conducted with a high speed system provided by LaVision GmBH. Full resolution images of (1284 x 1024) pixels (px) were recorded in double frame mode at a sampling rate 2.4 kHz, with the camera Table 1 gives a summary of the recording parameters. The recording parameters were balanced such that the average image size of a tracer particle was about 2.4 px and the estimated particle displacement between each frame was about 5-6 px. Vector fields were evaluated using a multipass method, stepping from a 96 px x 96 px interrogation area (IA) with a 50 % overlap to a 64 px x 64 px IA with a 75 % overlap. This gave an average number of illuminated particle image pairs within each area of roughly 10. According to the synthetic PIV image generation evaluation described by Raffel et. al these are image parameters that should give relatively low root mean square (RMS) random errors in the cross correlation evaluation of the vector fields. The degree of peak locking was investigated and found to be acceptably low. The image scaling calibration RMS error of the 3'rd order polynomial fit was about 0.38 px for the reported measurements. To compute the uncertainty, U in the time-averaged stream wise velocity U distributions later presented, the following estimate was applied ; where σ U denotes the standard deviation in U across all samples during a measurement series and N ef f is the effective number of independent samples of U. N ef f involves the computation of the auto-correlation of the time-series of the instantaneous stream wise velocity vectors u(x, y, t) and approaches the total number of samples N in a signal in the case that the samples of u are completely independent. Finally, the error in U due to uncertainty in the the laser-plane span-wise position was investigated by measuring the hydrofoil wake at a parallel 10 mm offset plane. While the test clearly indicated 3D effects coming from the test section channel walls, the relative uncertainty in the positioning of the laser plane on the scale of 0.5 mm should have negligible impact on the time-averaged wake velocity distributions.
Hydrofoil Profile Geometry and Surface Roughness
To facilitate comparison with CFD simulations the hydrofoil surface roughness was measured with a profilometer at different chordwise positions, giving a maximum arithmetic roughness average R a of about 5.8 µm near the leading edge. Following Shclichting & Gersten , we approximate the technical roughness height by k tech = 3.5R a . For a maximum chord based Reynolds number of approximately Re C = 4.5·10 6 this gives for a fully turbulent boundary layer an estimated k + value of approximately 11. Hence we assume that the surface roughness height exceeds the viscous sub-layer near the leading edge and may play an important effect in the development of the boundary layer. Figure 2 shows the blade geometry. The hydrofoil surface position data was measured with a Leitz PMM-C 600 coordinate machine, capable of a repeat-ability range of less than 0.6 µm. These measurements were performed after the foil had been coated with a thin layer of matte black paint, to avoid unnecessary laser reflections in the PIV measurements. Figure 2(b) shows the measured trailing edge along with the numerical grid points along the blade used in the CFD simulations. A noticeable difference is only visible at the steepest part of the trailing edge, where the flow is assumed to be separated. Hence any considerable differences in between the results from the measurements and the simulations are assumed not to stem from differences in the profile geometry.
Numerical Setup
A numerical study was performed to investigate how well the shedding phenomena is predicted in the lock-off region. The numerical simulations was purely Computational Fluid Dynamics (CFD), i.e. no structural response. The simulations was performed in ANSYS CFX. The numerical domain and computational grid is illustrated in figure 3. The channel extends >> 10D h upstream of the blade to ensure that the flow entering the test section is fully developed . The domain was also extended downstream to minimize the risk of back-flow and outlet conditions affecting the blade vortex dynamics. The inlet boundary condition was static pressure, and the outlet condition was a mass flow corresponding to the different flow velocities. The turbulence intensity at the inlet was tested in the range I ∈ , with no noticeable difference in the turbulence levels in the test section.
The mesh was created in ANSYS ICEM CFD, and contained about 13 million, all hexahedral elements. When refining the mesh, it was observed that the coarser mesh simulations underpredicted the shedding frequency. On the final mesh, for a typical flow velocity tested in this article, U ref =11 m/s, the maximal y + value was 1 and ≤ 0.5 at the leading and trailing edge, respectively. As an implicit numerical solver was used the time-step was chosen to be 8e-5 s, giving a corresponding Courant number of 3. Shedding frequencies in the order of f = 500 Hz were expected, and the time-step used corresponded to about 25 points per period. For the Reynolds Averaged Navier-Stokes based simulations several different turbulence models have been tested in order to investigate the effects on the predicted shedding dynamics. The standard k − model , was expected in this case likely to struggle to give an accurate result, due to the known problems with separation and streamline curvature. Another two- , was also tested, along with the k − ω SST , a combination of the two. Additionally, the scale-adaptive SAS-SST model was tested. Given an adequate computational grid the SAS model resolves the larger turbulent structures, at increased computational costs. Further, 2-dimensional simulations was performed on a simplified, shortened test section to investigate means of speeding up the simulations.
Empirical estimates for vortex shedding
It may be interesting to compare the measured and CFD predicted shedding frequencies with some empirical estimates. The first empirical approach utilized in the present study is the traditional Strouhal shedding frequency , f s , here defined as where D in this case is approximated as the blade thickness at the trailing edge, St denotes the Strouhal number and U ∞ is the free stream velocity. For the chord based Reynolds number range encountered in this study, the Strouhal number was chosen to be St = 0.22, a commonly used value . The thickness of the trailing edge, D = 4.8 mm, is measured at the point where the curved surface starts, see figure 2 An empirical formula more specific for the Francis turbine and different trailing edge geometries is described in the paper by Brekke , where the frequency of the vortex shedding is approximated by: Here B is a constant linked to the trailing edge geometry, U ∞ is the free stream velocity, and t is the blade thickness in . The constant B = 131 is chosen from , and is related to a trailing edge geometry very close to the one tested here. Note however that in Brekke's considerations, all blade geometries had parallel upper and lower surfaces. This is not the case for the blade geometry in this study which has tapered surfaces toward the trailing edge. Hence, strictly t = D. Nevertheless equation 3 is used here in its current form.
Wall effects
The hydrofoil tested in this study has a front section area to test section area blockage ratio of 8 %, and requires a correction for the measured Strouhal number, St meas , due to added wall effects . Following the considerations of Ota et al. for incompressible flow over a 2D-geometry experimental setup, we estimate the correction factor, , by the following relation for the free stream Strouhal number St, Here t denotes the height of the hydrofoil (12mm) and h the height of the measuring section (150mm), giving t/h = 8%. The observed correction factor is here estimated by the assumed free stream Strouhal number of 0.22, and the measured Strouhal number outside the range of lock-in. Note that for the rough empirical estimates for the shedding frequencies later presented the free stream velocity is approximated as the average velocity across the undisturbed test section, such that U ∞ = U ref , since, to the authors knowledge there is no reliable way to estimate the correction factor a-priori for the geometry tested in this study. There is a precise agreement between the measuring techniques in lock-in, with resonance starting to occur at around 11.1 m/s as indicated by the sharp rise in the vibrational amplitude. The first, standing peak found in the straingauge frequency amplitude spectrum, presented for some velocities outside lock-in in figure 5, is identified as the hydrofoil natural frequency . The second, broad ranged traveling peak found in the frequency spectrum in the strain measurements can be identified as the shedding frequencies in lock-off conditions, as indicated by the agreement with PIV measurements in the wake. Since the shedding frequency is assumed to be inherently gaussian about it's mean value, the size of the error bars given in figure 4 in the strain-gauge 2'nd peak was estimated by the half width at half maximum (HWHM) for the smoothed frequency distribution curves given in figure 5. In lock-in, the uncertainty in the hydrofoil vibrational frequency was estimated by the standard deviation in peak frequency from repeated measurements at 11.1 m/s and found to be approximately 1.6 Hz. Figure 6 gives the normalised time-averaged streamwise velocity distribution in the hydrofoil wake measured by PIV at two downstream positions x 1 = 9.9D and x 2 = 13.3D, measured from the trailing edge tip. Sets of varying reference velocities are included, both for lock-off (6(a)-(b)) and lock-in conditions (6(c)-(d)). It is noted that the wake velocity distributions varies considerably more during lock-in. Figure 7 shows the stream-wise velocity at two vertical lines, x 1 = 9.9D and x 2 = 13.3D, downstream of the blade at U ref = 9.1 m/s. The velocities are time averaged over 2 s and ≈ 100 shedding periods in the experiment and simulations, respectively.
Numerical results
Next, the frequency of the shedding is compared in figure 8(a). The uncertainty in the fast Fourier transform of the simulated time-signal is estimated to be ξ ≈ 5 Hz. We observe that a linear trend is found for all turbulence models, as is reported by most sources e.g. , and thus it is assumed that the general vortex shedding phenomena is captured.
Discussion
From Heskestad and Olberts we note that according to their measurements, 3 geometries all similar in design to the foil under investigation here, gave a relative standard deviation in the shedding frequency Strouhal number of about 12%. This indicates that the trailing edge shedding frequency is quite sensitive to small changes in the geometry. Hence the error in the rough empirical estimates (equation 2 & equation 3) for the shedding frequencies of about 20% and the wall correction factor estimated to = 2.46 is assumed to contain both wall effects as well as boundary layer separation effects specific to the hydrofoil and trailing edge geometry tested here. The level of the error is sobering, and illustrates the need for either model measurements or accurate case dependent numerical tools in the design phase of hydraulic turbine blade components, even in lock-of conditions. Comparing experimental and numerical results in figure 7 we observe that the wake center is consistently shifted slightly below the hydrofoil center line, with about 0.2D in the experiments at lock-off conditions. It is believed that this is due to the asymmetry of the trailing edge, as the upper separation point is allowed to travel closer to the hydrofoil's centerline due to the relatively gentle curvature, effectively shifting the wake profile. In the simulations points of zero wall shear stress was investigated in order to study the separation points impact on the wake and shedding frequencies obtained. When different flow velocities are compared, the separation occurs later the higher the flow velocity. Correspondingly, the "perceived" thickness of the trailing edge is also thinner at higher speeds. When comparing the results from different 10 turbulence models at a fixed reference velocity it was also found that a delayed separation point corresponded to an increase in the shedding frequency. This indicates that the differences in the turbulence models lie in the simulated boundary layer and subsequently the numerical prediction of the boundary layer separation points. As an example, comparing results in figure 7-9, the k − model separated latest along the trailing edge, resulted in the highest shedding frequency and the most vertically shifted wake.
As an overall trend, the numerical simulations tended to underestimate the velocity deficit, except for the k− model, where the deficit was overestimated. The wake is considerably different depending on which turbulence model is used. From figure 9 we can start to understand why the different velocity profiles in figure 7 look like they do. In the SAS model, the wake breaks down into smaller vortexes, and thus the mean flow in the wake is higher and flatter. This corresponds to the lower velocity deficit in figure 7. Similarly, it can seem like the k − model have the strongest velocity deficit and widest wake. It is also observed that the oscillations in the k − wake are very small. Compared to the PIV measurements, the SAS model performs qualitatively best (not shown), while interestingly, figure 7 indicates that the mean velocity distribution is farthest from the experimental values for this model. Figure 8 (a) show the trends of the shedding frequency for different turbulence models. A linear best fit is performed and the slopes are used as comparison. The slope is varying with ≈ 22%, from k − to SAS. The SST and the SAS model perform very similarly. 2-dimensional simulations with the SST turbulence model is also included in figure 8 (a). Clearly, they provide some predictive value while also providing a significant speedup factor, in this case, of ≈ 70×. When comparing the numerical and experimental results in figure 8 (b), it is seen that there is a general under prediction of the shedding frequency by the numerical simulations. Based on the slope of the experimental results after a linear best fit the offset between the SST and SAS model compared to the experimental values are < 4%, while the numerically observed Strouhal number is about 10 % lower than the comparatively obtained experimental Strouhal number of 0.274 in lock-off conditions. A closer investigation of the possible reasons for the relative offset between the CFD calculations and the experimentally obtained results are part of future work, along with simulations utilizing a fluid-structure interaction coupling for numerical investigation of the hydrofoil behaviour in lock-in.
Conclusion
The wake and shedding frequencies from a hydrofoil with a blunt, asymmetrical trailing edge has been investigated for free-stream velocities where turbulent von-karman vortexes incites a lock-in effect. At lock-in we observe larger stream-wise velocity fluctuations in the hydrofoil wake, than in lock-off conditions, likely due to wandering of the upper separation point at the trailing edge tip. Experimentally obtained shedding frequencies has been compared to numerical simulations as well as empirical estimates. The relative differences between simulations with different turbulence models clearly indicate the difficulties in the modelling of the separation points and subsequent wake characteristics crucial to estimating the risk of lock-in at the design phase for a hydraulic turbine blade component. The numerically obtained results for the trend in the shedding frequencies are in relative agreement with previous studies for similar trailing edge geometries , indicating that a delayed separation point leads to increased shedding frequencies. |
<filename>tests/test_actor.rs
use std::sync;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc;
use actori::prelude::*;
use tokio::time::{delay_for, Duration, Instant};
#[derive(Debug)]
struct Num(usize);
impl Message for Num {
type Result = ();
}
struct MyActor(Arc<AtomicUsize>, Arc<AtomicBool>, Running);
impl Actor for MyActor {
type Context = actori::Context<Self>;
fn stopping(&mut self, _: &mut Self::Context) -> Running {
System::current().stop();
Running::Stop
}
}
impl StreamHandler<Num> for MyActor {
fn handle(&mut self, msg: Num, _: &mut Context<MyActor>) {
self.0.fetch_add(msg.0, Ordering::Relaxed);
}
fn finished(&mut self, _: &mut Context<MyActor>) {
self.1.store(true, Ordering::Relaxed);
}
}
#[actori_rt::test]
async fn test_stream() {
let count = Arc::new(AtomicUsize::new(0));
let err = Arc::new(AtomicBool::new(false));
let items = vec![Num(1), Num(1), Num(1), Num(1), Num(1), Num(1), Num(1)];
let act_count = Arc::clone(&count);
let act_err = Arc::clone(&err);
MyActor::create(move |ctx| {
MyActor::add_stream(futures::stream::iter::<_>(items), ctx);
MyActor(act_count, act_err, Running::Stop)
});
delay_for(Duration::new(0, 1_000_000)).await;
assert_eq!(count.load(Ordering::Relaxed), 7);
assert!(err.load(Ordering::Relaxed));
}
struct MySyncActor {
started: Arc<AtomicUsize>,
stopping: Arc<AtomicUsize>,
stopped: Arc<AtomicUsize>,
msgs: Arc<AtomicUsize>,
stop: bool,
}
impl Actor for MySyncActor {
type Context = actori::SyncContext<Self>;
fn started(&mut self, _: &mut Self::Context) {
self.started.fetch_add(1, Ordering::Relaxed);
}
fn stopping(&mut self, _: &mut Self::Context) -> Running {
self.stopping.fetch_add(1, Ordering::Relaxed);
Running::Continue
}
fn stopped(&mut self, _: &mut Self::Context) {
self.stopped.fetch_add(1, Ordering::Relaxed);
}
}
impl actori::Handler<Num> for MySyncActor {
type Result = ();
fn handle(&mut self, msg: Num, ctx: &mut Self::Context) {
self.msgs.fetch_add(msg.0, Ordering::Relaxed);
if self.stop {
ctx.stop();
}
}
}
#[test]
fn test_restart_sync_actor() {
let started = Arc::new(AtomicUsize::new(0));
let stopping = Arc::new(AtomicUsize::new(0));
let stopped = Arc::new(AtomicUsize::new(0));
let msgs = Arc::new(AtomicUsize::new(0));
let started1 = Arc::clone(&started);
let stopping1 = Arc::clone(&stopping);
let stopped1 = Arc::clone(&stopped);
let msgs1 = Arc::clone(&msgs);
System::run(move || {
let addr = SyncArbiter::start(1, move || MySyncActor {
started: Arc::clone(&started1),
stopping: Arc::clone(&stopping1),
stopped: Arc::clone(&stopped1),
msgs: Arc::clone(&msgs1),
stop: started1.load(Ordering::Relaxed) == 0,
});
addr.do_send(Num(2));
actori_rt::spawn(async move {
let _ = addr.send(Num(4)).await;
delay_for(Duration::new(0, 1_000_000)).await;
System::current().stop();
});
})
.unwrap();
assert_eq!(started.load(Ordering::Relaxed), 2);
assert_eq!(stopping.load(Ordering::Relaxed), 2);
assert_eq!(stopped.load(Ordering::Relaxed), 2);
assert_eq!(msgs.load(Ordering::Relaxed), 6);
}
struct IntervalActor {
elapses_left: usize,
sender: sync::mpsc::Sender<Instant>,
instant: Option<Instant>,
}
impl IntervalActor {
pub fn new(elapses_left: usize, sender: sync::mpsc::Sender<Instant>) -> Self {
Self {
//We stop at 0, so add 1 to make number of intervals equal to elapses_left
elapses_left: elapses_left + 1,
sender,
instant: None,
}
}
}
impl Actor for IntervalActor {
type Context = actori::Context<Self>;
fn started(&mut self, ctx: &mut Self::Context) {
self.instant = Some(Instant::now());
ctx.run_interval(Duration::from_millis(110), move |act, ctx| {
act.elapses_left -= 1;
if act.elapses_left == 0 {
act.sender
.send(act.instant.take().expect("To have Instant"))
.expect("To send result");
ctx.stop();
System::current().stop();
}
});
}
}
#[test]
fn test_run_interval() {
const MAX_WAIT: Duration = Duration::from_millis(10_000);
let (sender, receiver) = sync::mpsc::channel();
std::thread::spawn(move || {
System::run(move || {
let _addr = IntervalActor::new(10, sender).start();
})
.unwrap();
});
let result = receiver
.recv_timeout(MAX_WAIT)
.expect("To receive response in time");
//We wait 10 intervals by ~100ms
assert_eq!(result.elapsed().as_secs(), 1);
}
|
// Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
//
// A number of test-cases based on:
//
// https://golang.org/src/fmt/fmt_test.go
// BSD: Copyright (c) 2009 The Go Authors. All rights reserved.
import { sprintf } from "./printf.ts";
import { assertEquals } from "../testing/asserts.ts";
const S = sprintf;
Deno.test("noVerb", function (): void {
assertEquals(sprintf("bla"), "bla");
});
Deno.test("percent", function (): void {
assertEquals(sprintf("%%"), "%");
assertEquals(sprintf("!%%!"), "!%!");
assertEquals(sprintf("!%%"), "!%");
assertEquals(sprintf("%%!"), "%!");
});
Deno.test("testBoolean", function (): void {
assertEquals(sprintf("%t", true), "true");
assertEquals(sprintf("%10t", true), " true");
assertEquals(sprintf("%-10t", false), "false ");
assertEquals(sprintf("%t", false), "false");
assertEquals(sprintf("bla%t", true), "blatrue");
assertEquals(sprintf("%tbla", false), "falsebla");
});
Deno.test("testIntegerB", function (): void {
assertEquals(S("%b", 4), "100");
assertEquals(S("%b", -4), "-100");
assertEquals(
S("%b", 4.1),
"100.0001100110011001100110011001100110011001100110011",
);
assertEquals(
S("%b", -4.1),
"-100.0001100110011001100110011001100110011001100110011",
);
assertEquals(
S("%b", Number.MAX_SAFE_INTEGER),
"11111111111111111111111111111111111111111111111111111",
);
assertEquals(
S("%b", Number.MIN_SAFE_INTEGER),
"-11111111111111111111111111111111111111111111111111111",
);
// width
assertEquals(S("%4b", 4), " 100");
});
Deno.test("testIntegerC", function (): void {
assertEquals(S("%c", 0x31), "1");
assertEquals(S("%c%b", 0x31, 1), "11");
assertEquals(S("%c", 0x1f4a9), "💩");
//width
assertEquals(S("%4c", 0x31), " 1");
});
Deno.test("testIntegerD", function (): void {
assertEquals(S("%d", 4), "4");
assertEquals(S("%d", -4), "-4");
assertEquals(S("%d", Number.MAX_SAFE_INTEGER), "9007199254740991");
assertEquals(S("%d", Number.MIN_SAFE_INTEGER), "-9007199254740991");
});
Deno.test("testIntegerO", function (): void {
assertEquals(S("%o", 4), "4");
assertEquals(S("%o", -4), "-4");
assertEquals(S("%o", 9), "11");
assertEquals(S("%o", -9), "-11");
assertEquals(S("%o", Number.MAX_SAFE_INTEGER), "377777777777777777");
assertEquals(S("%o", Number.MIN_SAFE_INTEGER), "-377777777777777777");
// width
assertEquals(S("%4o", 4), " 4");
});
Deno.test("testIntegerx", function (): void {
assertEquals(S("%x", 4), "4");
assertEquals(S("%x", -4), "-4");
assertEquals(S("%x", 9), "9");
assertEquals(S("%x", -9), "-9");
assertEquals(S("%x", Number.MAX_SAFE_INTEGER), "1fffffffffffff");
assertEquals(S("%x", Number.MIN_SAFE_INTEGER), "-1fffffffffffff");
// width
assertEquals(S("%4x", -4), " -4");
assertEquals(S("%-4x", -4), "-4 ");
// plus
assertEquals(S("%+4x", 4), " +4");
assertEquals(S("%-+4x", 4), "+4 ");
});
Deno.test("testIntegerX", function (): void {
assertEquals(S("%X", 4), "4");
assertEquals(S("%X", -4), "-4");
assertEquals(S("%X", 9), "9");
assertEquals(S("%X", -9), "-9");
assertEquals(S("%X", Number.MAX_SAFE_INTEGER), "1FFFFFFFFFFFFF");
assertEquals(S("%X", Number.MIN_SAFE_INTEGER), "-1FFFFFFFFFFFFF");
});
Deno.test("testFloate", function (): void {
assertEquals(S("%e", 4), "4.000000e+00");
assertEquals(S("%e", -4), "-4.000000e+00");
assertEquals(S("%e", 4.1), "4.100000e+00");
assertEquals(S("%e", -4.1), "-4.100000e+00");
assertEquals(S("%e", Number.MAX_SAFE_INTEGER), "9.007199e+15");
assertEquals(S("%e", Number.MIN_SAFE_INTEGER), "-9.007199e+15");
});
Deno.test("testFloatE", function (): void {
assertEquals(S("%E", 4), "4.000000E+00");
assertEquals(S("%E", -4), "-4.000000E+00");
assertEquals(S("%E", 4.1), "4.100000E+00");
assertEquals(S("%E", -4.1), "-4.100000E+00");
assertEquals(S("%E", Number.MAX_SAFE_INTEGER), "9.007199E+15");
assertEquals(S("%E", Number.MIN_SAFE_INTEGER), "-9.007199E+15");
assertEquals(S("%E", Number.MIN_VALUE), "5.000000E-324");
assertEquals(S("%E", Number.MAX_VALUE), "1.797693E+308");
});
Deno.test("testFloatfF", function (): void {
assertEquals(S("%f", 4), "4.000000");
assertEquals(S("%F", 4), "4.000000");
assertEquals(S("%f", -4), "-4.000000");
assertEquals(S("%F", -4), "-4.000000");
assertEquals(S("%f", 4.1), "4.100000");
assertEquals(S("%F", 4.1), "4.100000");
assertEquals(S("%f", -4.1), "-4.100000");
assertEquals(S("%F", -4.1), "-4.100000");
assertEquals(S("%f", Number.MAX_SAFE_INTEGER), "9007199254740991.000000");
assertEquals(S("%F", Number.MAX_SAFE_INTEGER), "9007199254740991.000000");
assertEquals(S("%f", Number.MIN_SAFE_INTEGER), "-9007199254740991.000000");
assertEquals(S("%F", Number.MIN_SAFE_INTEGER), "-9007199254740991.000000");
assertEquals(S("%f", Number.MIN_VALUE), "0.000000");
assertEquals(
S("%.324f", Number.MIN_VALUE),
// eslint-disable-next-line max-len
"0.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005",
);
assertEquals(S("%F", Number.MIN_VALUE), "0.000000");
assertEquals(
S("%f", Number.MAX_VALUE),
// eslint-disable-next-line max-len
"179769313486231570000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.000000",
);
assertEquals(
S("%F", Number.MAX_VALUE),
// eslint-disable-next-line max-len
"179769313486231570000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.000000",
);
});
Deno.test("testString", function (): void {
assertEquals(S("%s World%s", "Hello", "!"), "Hello World!");
});
Deno.test("testHex", function (): void {
assertEquals(S("%x", "123"), "313233");
assertEquals(S("%x", "n"), "6e");
});
Deno.test("testHeX", function (): void {
assertEquals(S("%X", "123"), "313233");
assertEquals(S("%X", "n"), "6E");
});
Deno.test("testType", function (): void {
assertEquals(S("%T", new Date()), "object");
assertEquals(S("%T", 123), "number");
assertEquals(S("%T", "123"), "string");
assertEquals(S("%.3T", "123"), "str");
});
Deno.test("testPositional", function (): void {
assertEquals(S("%[1]d%[2]d", 1, 2), "12");
assertEquals(S("%[2]d%[1]d", 1, 2), "21");
});
Deno.test("testSharp", function (): void {
assertEquals(S("%#x", "123"), "0x313233");
assertEquals(S("%#X", "123"), "0X313233");
assertEquals(S("%#x", 123), "0x7b");
assertEquals(S("%#X", 123), "0X7B");
assertEquals(S("%#o", 123), "0173");
assertEquals(S("%#b", 4), "0b100");
});
Deno.test("testWidthAndPrecision", function (): void {
assertEquals(
S("%9.99d", 9),
// eslint-disable-next-line max-len
"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000009",
);
assertEquals(S("%1.12d", 9), "000000000009");
assertEquals(S("%2s", "a"), " a");
assertEquals(S("%2d", 1), " 1");
assertEquals(S("%#4x", 1), " 0x1");
assertEquals(
S("%*.99d", 9, 9),
// eslint-disable-next-line max-len
"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000009",
);
assertEquals(
S("%9.*d", 99, 9),
// eslint-disable-next-line max-len
"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000009",
);
assertEquals(S("%*s", 2, "a"), " a");
assertEquals(S("%*d", 2, 1), " 1");
assertEquals(S("%#*x", 4, 1), " 0x1");
});
Deno.test("testDash", function (): void {
assertEquals(S("%-2s", "a"), "a ");
assertEquals(S("%-2d", 1), "1 ");
});
Deno.test("testPlus", function (): void {
assertEquals(S("%-+3d", 1), "+1 ");
assertEquals(S("%+3d", 1), " +1");
assertEquals(S("%+3d", -1), " -1");
});
Deno.test("testSpace", function (): void {
assertEquals(S("% -3d", 3), " 3 ");
});
Deno.test("testZero", function (): void {
assertEquals(S("%04s", "a"), "000a");
});
// relevant test cases from fmt_test.go
// deno-lint-ignore no-explicit-any
const tests: Array<[string, any, string]> = [
["%d", 12345, "12345"],
["%v", 12345, "12345"],
["%t", true, "true"],
// basic string
["%s", "abc", "abc"],
// ["%q", "abc", `"abc"`], // TODO: need %q?
["%x", "abc", "616263"],
["%x", "\xff\xf0\x0f\xff", "fff00fff"],
["%X", "\xff\xf0\x0f\xff", "FFF00FFF"],
["%x", "", ""],
["% x", "", ""],
["%#x", "", ""],
["%# x", "", ""],
["%x", "xyz", "78797a"],
["%X", "xyz", "78797A"],
["% x", "xyz", "78 79 7a"],
["% X", "xyz", "78 79 7A"],
["%#x", "xyz", "0x78797a"],
["%#X", "xyz", "0X78797A"],
["%# x", "xyz", "0x78 0x79 0x7a"],
["%# X", "xyz", "0X78 0X79 0X7A"],
// basic bytes : TODO special handling for Buffer? other std types?
// escaped strings : TODO decide whether to have %q
// characters
["%c", "x".charCodeAt(0), "x"],
["%c", 0xe4, "ä"],
["%c", 0x672c, "本"],
["%c", "日".charCodeAt(0), "日"],
// Specifying precision should have no effect.
["%.0c", "⌘".charCodeAt(0), "⌘"],
["%3c", "⌘".charCodeAt(0), " ⌘"],
["%-3c", "⌘".charCodeAt(0), "⌘ "],
// Runes that are not printable.
// {"%c", '\U00000e00', "\u0e00"}, // TODO check if \U escape exists in js
//["%c", '\U0010ffff'.codePointAt(0), "\U0010ffff"],
// Runes that are not valid.
["%c", -1, "�"],
// TODO surrogate half, doesn't make sense in itself, how
// to determine in JS?
// ["%c", 0xDC80, "�"],
["%c", 0x110000, "�"],
["%c", 0xfffffffff, "�"],
// TODO
// escaped characters
// Runes that are not printable.
// Runes that are not valid.
// width
["%5s", "abc", " abc"],
["%2s", "\u263a", " ☺"],
["%-5s", "abc", "abc "],
["%05s", "abc", "00abc"],
["%5s", "abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz"],
["%.5s", "abcdefghijklmnopqrstuvwxyz", "abcde"],
["%.0s", "日本語日本語", ""],
["%.5s", "日本語日本語", "日本語日本"],
["%.10s", "日本語日本語", "日本語日本語"],
// ["%08q", "abc", `000"abc"`], // TODO verb q
// ["%-8q", "abc", `"abc" `],
//["%.5q", "abcdefghijklmnopqrstuvwxyz", `"abcde"`],
["%.5x", "abcdefghijklmnopqrstuvwxyz", "6162636465"],
//["%.3q", "日本語日本語", `"日本語"`],
//["%.1q", "日本語", `"日"`]
// change of go testcase utf-8([日]) = 0xe697a5, utf-16= 65e5 and
// our %x takes lower byte of string "%.1x", "日本語", "e6"],,
["%.1x", "日本語", "e5"],
//["%10.1q", "日本語日本語", ` "日"`],
// ["%10v", null, " <nil>"], // TODO null, undefined ...
// ["%-10v", null, "<nil> "],
// integers
["%d", 12345, "12345"],
["%d", -12345, "-12345"],
// ["%d", ^uint8(0), "255"],
//["%d", ^uint16(0), "65535"],
//["%d", ^uint32(0), "4294967295"],
//["%d", ^uint64(0), "18446744073709551615"],
["%d", -1 << 7, "-128"],
["%d", -1 << 15, "-32768"],
["%d", -1 << 31, "-2147483648"],
//["%d", (-1 << 63), "-9223372036854775808"],
["%.d", 0, ""],
["%.0d", 0, ""],
["%6.0d", 0, " "],
["%06.0d", 0, " "], // 0 flag should be ignored
["% d", 12345, " 12345"],
["%+d", 12345, "+12345"],
["%+d", -12345, "-12345"],
["%b", 7, "111"],
["%b", -6, "-110"],
// ["%b", ^uint32(0), "11111111111111111111111111111111"],
// ["%b", ^uint64(0),
// "1111111111111111111111111111111111111111111111111111111111111111"],
// ["%b", int64(-1 << 63), zeroFill("-1", 63, "")],
// 0 octal notation not allowed in struct node...
["%o", parseInt("01234", 8), "1234"],
["%#o", parseInt("01234", 8), "01234"],
// ["%o", ^uint32(0), "37777777777"],
// ["%o", ^uint64(0), "1777777777777777777777"],
["%#X", 0, "0X0"],
["%x", 0x12abcdef, "12abcdef"],
["%X", 0x12abcdef, "12ABCDEF"],
// ["%x", ^uint32(0), "ffffffff"],
// ["%X", ^uint64(0), "FFFFFFFFFFFFFFFF"],
["%.20b", 7, "00000000000000000111"],
["%10d", 12345, " 12345"],
["%10d", -12345, " -12345"],
["%+10d", 12345, " +12345"],
["%010d", 12345, "0000012345"],
["%010d", -12345, "-000012345"],
["%20.8d", 1234, " 00001234"],
["%20.8d", -1234, " -00001234"],
["%020.8d", 1234, " 00001234"],
["%020.8d", -1234, " -00001234"],
["%-20.8d", 1234, "00001234 "],
["%-20.8d", -1234, "-00001234 "],
["%-#20.8x", 0x1234abc, "0x01234abc "],
["%-#20.8X", 0x1234abc, "0X01234ABC "],
["%-#20.8o", parseInt("01234", 8), "00001234 "],
// Test correct f.intbuf overflow checks. // TODO, lazy
// unicode format // TODO, decide whether unicode verb makes sense %U
// floats
["%+.3e", 0.0, "+0.000e+00"],
["%+.3e", 1.0, "+1.000e+00"],
["%+.3f", -1.0, "-1.000"],
["%+.3F", -1.0, "-1.000"],
//["%+.3F", float32(-1.0), "-1.000"],
["%+07.2f", 1.0, "+001.00"],
["%+07.2f", -1.0, "-001.00"],
["%-07.2f", 1.0, "1.00 "],
["%-07.2f", -1.0, "-1.00 "],
["%+-07.2f", 1.0, "+1.00 "],
["%+-07.2f", -1.0, "-1.00 "],
["%-+07.2f", 1.0, "+1.00 "],
["%-+07.2f", -1.0, "-1.00 "],
["%+10.2f", +1.0, " +1.00"],
["%+10.2f", -1.0, " -1.00"],
["% .3E", -1.0, "-1.000E+00"],
["% .3e", 1.0, " 1.000e+00"],
["%+.3g", 0.0, "+0"],
["%+.3g", 1.0, "+1"],
["%+.3g", -1.0, "-1"],
["% .3g", -1.0, "-1"],
["% .3g", 1.0, " 1"],
// //["%b", float32(1.0), "8388608p-23"],
// ["%b", 1.0, "4503599627370496p-52"],
// // Test sharp flag used with floats.
["%#g", 1e-323, "1.00000e-323"],
["%#g", -1.0, "-1.00000"],
["%#g", 1.1, "1.10000"],
["%#g", 123456.0, "123456."],
//["%#g", 1234567.0, "1.234567e+06"],
// the line above is incorrect in go (according to
// my posix reading) %f-> prec = prec-1
["%#g", 1234567.0, "1.23457e+06"],
["%#g", 1230000.0, "1.23000e+06"],
["%#g", 1000000.0, "1.00000e+06"],
["%#.0f", 1.0, "1."],
["%#.0e", 1.0, "1.e+00"],
["%#.0g", 1.0, "1."],
["%#.0g", 1100000.0, "1.e+06"],
["%#.4f", 1.0, "1.0000"],
["%#.4e", 1.0, "1.0000e+00"],
["%#.4g", 1.0, "1.000"],
["%#.4g", 100000.0, "1.000e+05"],
["%#.0f", 123.0, "123."],
["%#.0e", 123.0, "1.e+02"],
["%#.0g", 123.0, "1.e+02"],
["%#.4f", 123.0, "123.0000"],
["%#.4e", 123.0, "1.2300e+02"],
["%#.4g", 123.0, "123.0"],
["%#.4g", 123000.0, "1.230e+05"],
["%#9.4g", 1.0, " 1.000"],
// The sharp flag has no effect for binary float format.
// ["%#b", 1.0, "4503599627370496p-52"], // TODO binary for floats
// Precision has no effect for binary float format.
//["%.4b", float32(1.0), "8388608p-23"], // TODO s.above
// ["%.4b", -1.0, "-4503599627370496p-52"],
// Test correct f.intbuf boundary checks.
//["%.68f", 1.0, zeroFill("1.", 68, "")], // TODO zerofill
//["%.68f", -1.0, zeroFill("-1.", 68, "")], //TODO s.a.
// float infinites and NaNs
["%f", Number.POSITIVE_INFINITY, "+Inf"],
["%.1f", Number.NEGATIVE_INFINITY, "-Inf"],
["% f", NaN, " NaN"],
["%20f", Number.POSITIVE_INFINITY, " +Inf"],
// ["% 20F", Number.POSITIVE_INFINITY, " Inf"], // TODO : wut?
["% 20e", Number.NEGATIVE_INFINITY, " -Inf"],
["%+20E", Number.NEGATIVE_INFINITY, " -Inf"],
["% +20g", Number.NEGATIVE_INFINITY, " -Inf"],
["%+-20G", Number.POSITIVE_INFINITY, "+Inf "],
["%20e", NaN, " NaN"],
["% +20E", NaN, " +NaN"],
["% -20g", NaN, " NaN "],
["%+-20G", NaN, "+NaN "],
// Zero padding does not apply to infinities and NaN.
["%+020e", Number.POSITIVE_INFINITY, " +Inf"],
["%-020f", Number.NEGATIVE_INFINITY, "-Inf "],
["%-020E", NaN, "NaN "],
// complex values // go specific
// old test/fmt_test.go
["%e", 1.0, "1.000000e+00"],
["%e", 1234.5678e3, "1.234568e+06"],
["%e", 1234.5678e-8, "1.234568e-05"],
["%e", -7.0, "-7.000000e+00"],
["%e", -1e-9, "-1.000000e-09"],
["%f", 1234.5678e3, "1234567.800000"],
["%f", 1234.5678e-8, "0.000012"],
["%f", -7.0, "-7.000000"],
["%f", -1e-9, "-0.000000"],
// ["%g", 1234.5678e3, "1.2345678e+06"],
// I believe the above test from go is incorrect according to posix, s. above.
["%g", 1234.5678e3, "1.23457e+06"],
//["%g", float32(1234.5678e3), "1.2345678e+06"],
//["%g", 1234.5678e-8, "1.2345678e-05"], // posix, see above
["%g", 1234.5678e-8, "1.23457e-05"],
["%g", -7.0, "-7"],
["%g", -1e-9, "-1e-09"],
//["%g", float32(-1e-9), "-1e-09"],
["%E", 1.0, "1.000000E+00"],
["%E", 1234.5678e3, "1.234568E+06"],
["%E", 1234.5678e-8, "1.234568E-05"],
["%E", -7.0, "-7.000000E+00"],
["%E", -1e-9, "-1.000000E-09"],
//["%G", 1234.5678e3, "1.2345678E+06"], // posix, see above
["%G", 1234.5678e3, "1.23457E+06"],
//["%G", float32(1234.5678e3), "1.2345678E+06"],
//["%G", 1234.5678e-8, "1.2345678E-05"], // posic, see above
["%G", 1234.5678e-8, "1.23457E-05"],
["%G", -7.0, "-7"],
["%G", -1e-9, "-1E-09"],
//["%G", float32(-1e-9), "-1E-09"],
["%20.5s", "qwertyuiop", " qwert"],
["%.5s", "qwertyuiop", "qwert"],
["%-20.5s", "qwertyuiop", "qwert "],
["%20c", "x".charCodeAt(0), " x"],
["%-20c", "x".charCodeAt(0), "x "],
["%20.6e", 1.2345e3, " 1.234500e+03"],
["%20.6e", 1.2345e-3, " 1.234500e-03"],
["%20e", 1.2345e3, " 1.234500e+03"],
["%20e", 1.2345e-3, " 1.234500e-03"],
["%20.8e", 1.2345e3, " 1.23450000e+03"],
["%20f", 1.23456789e3, " 1234.567890"],
["%20f", 1.23456789e-3, " 0.001235"],
["%20f", 12345678901.23456789, " 12345678901.234568"],
["%-20f", 1.23456789e3, "1234.567890 "],
["%20.8f", 1.23456789e3, " 1234.56789000"],
["%20.8f", 1.23456789e-3, " 0.00123457"],
// ["%g", 1.23456789e3, "1234.56789"],
// posix ... precision(2) = precision(def=6) - (exp(3)+1)
["%g", 1.23456789e3, "1234.57"],
// ["%g", 1.23456789e-3, "0.00123456789"], posix...
["%g", 1.23456789e-3, "0.00123457"], // see above prec6 = precdef6 - (-3+1)
//["%g", 1.23456789e20, "1.23456789e+20"],
["%g", 1.23456789e20, "1.23457e+20"],
// arrays // TODO
// slice : go specific
// TODO decide how to handle deeper types, arrays, objects
// byte arrays and slices with %b,%c,%d,%o,%U and %v
// f.space should and f.plus should not have an effect with %v.
// f.space and f.plus should have an effect with %d.
// Padding with byte slices.
// Same for strings
["%2x", "", " "], // 103
["%#2x", "", " "],
["% 02x", "", "00"],
["%# 02x", "", "00"],
["%-2x", "", " "],
["%-02x", "", " "],
["%8x", "\xab", " ab"],
["% 8x", "\xab", " ab"],
["%#8x", "\xab", " 0xab"],
["%# 8x", "\xab", " 0xab"],
["%08x", "\xab", "000000ab"],
["% 08x", "\xab", "000000ab"],
["%#08x", "\xab", "00000xab"],
["%# 08x", "\xab", "00000xab"],
["%10x", "\xab\xcd", " abcd"],
["% 10x", "\xab\xcd", " ab cd"],
["%#10x", "\xab\xcd", " 0xabcd"],
["%# 10x", "\xab\xcd", " 0xab 0xcd"],
["%010x", "\xab\xcd", "000000abcd"],
["% 010x", "\xab\xcd", "00000ab cd"],
["%#010x", "\xab\xcd", "00000xabcd"],
["%# 010x", "\xab\xcd", "00xab 0xcd"],
["%-10X", "\xab", "AB "],
["% -010X", "\xab", "AB "],
["%#-10X", "\xab\xcd", "0XABCD "],
["%# -010X", "\xab\xcd", "0XAB 0XCD "],
// renamings
// Formatter
// GoStringer
// %T TODO possibly %#T object(constructor)
["%T", {}, "object"],
["%T", 1, "number"],
["%T", "", "string"],
["%T", undefined, "undefined"],
["%T", null, "object"],
["%T", S, "function"],
["%T", true, "boolean"],
["%T", Symbol(), "symbol"],
// %p with pointers
// erroneous things
// {"", nil, "%!(EXTRA <nil>)"},
// {"", 2, "%!(EXTRA int=2)"},
// {"no args", "hello", "no args%!(EXTRA string=hello)"},
// {"%s %", "hello", "hello %!(NOVERB)"},
// {"%s %.2", "hello", "hello %!(NOVERB)"},
// {"%017091901790959340919092959340919017929593813360", 0,
// "%!(NOVERB)%!(EXTRA int=0)"},
// {"%184467440737095516170v", 0, "%!(NOVERB)%!(EXTRA int=0)"},
// // Extra argument errors should format without flags set.
// {"%010.2", "12345", "%!(NOVERB)%!(EXTRA string=12345)"},
//
// // Test that maps with non-reflexive keys print all keys and values.
// {"%v", map[float64]int{NaN: 1, NaN: 1}, "map[NaN:1 NaN:1]"},
// more floats
["%.2f", 1.0, "1.00"],
["%.2f", -1.0, "-1.00"],
["% .2f", 1.0, " 1.00"],
["% .2f", -1.0, "-1.00"],
["%+.2f", 1.0, "+1.00"],
["%+.2f", -1.0, "-1.00"],
["%7.2f", 1.0, " 1.00"],
["%7.2f", -1.0, " -1.00"],
["% 7.2f", 1.0, " 1.00"],
["% 7.2f", -1.0, " -1.00"],
["%+7.2f", 1.0, " +1.00"],
["%+7.2f", -1.0, " -1.00"],
["% +7.2f", 1.0, " +1.00"],
["% +7.2f", -1.0, " -1.00"],
["%07.2f", 1.0, "0001.00"],
["%07.2f", -1.0, "-001.00"],
["% 07.2f", 1.0, " 001.00"], //153 here
["% 07.2f", -1.0, "-001.00"],
["%+07.2f", 1.0, "+001.00"],
["%+07.2f", -1.0, "-001.00"],
["% +07.2f", 1.0, "+001.00"],
["% +07.2f", -1.0, "-001.00"],
];
Deno.test("testThorough", function (): void {
tests.forEach((t, i): void => {
// p(t)
const is = S(t[0], t[1]);
const should = t[2];
assertEquals(
is,
should,
`failed case[${i}] : is >${is}< should >${should}<`,
);
});
});
Deno.test("testWeirdos", function (): void {
assertEquals(S("%.d", 9), "9");
assertEquals(
S("dec[%d]=%d hex[%[1]d]=%#x oct[%[1]d]=%#o %s", 1, 255, "Third"),
"dec[1]=255 hex[1]=0xff oct[1]=0377 Third",
);
});
Deno.test("formatV", function (): void {
const a = { a: { a: { a: { a: { a: { a: { a: {} } } } } } } };
assertEquals(S("%v", a), "[object Object]");
assertEquals(S("%#v", a), `{ a: { a: { a: { a: [Object] } } } }`);
assertEquals(
S("%#.8v", a),
"{ a: { a: { a: { a: { a: { a: { a: {} } } } } } } }",
);
assertEquals(S("%#.1v", a), `{ a: [Object] }`);
});
Deno.test("formatJ", function (): void {
const a = { a: { a: { a: { a: { a: { a: { a: {} } } } } } } };
assertEquals(S("%j", a), `{"a":{"a":{"a":{"a":{"a":{"a":{"a":{}}}}}}}}`);
});
Deno.test("flagLessThan", function (): void {
const a = { a: { a: { a: { a: { a: { a: { a: {} } } } } } } };
const aArray = [a, a, a];
assertEquals(
S("%<#.1v", aArray),
`[ { a: [Object] }, { a: [Object] }, { a: [Object] } ]`,
);
const fArray = [1.2345, 0.98765, 123456789.5678];
assertEquals(S("%<.2f", fArray), "[ 1.23, 0.99, 123456789.57 ]");
});
Deno.test("testErrors", function (): void {
// wrong type : TODO strict mode ...
//assertEquals(S("%f", "not a number"), "%!(BADTYPE flag=f type=string)")
assertEquals(S("A %h", ""), "A %!(BAD VERB 'h')");
assertEquals(S("%J", ""), "%!(BAD VERB 'J')");
assertEquals(S("bla%J", ""), "bla%!(BAD VERB 'J')");
assertEquals(S("%Jbla", ""), "%!(BAD VERB 'J')bla");
assertEquals(S("%d"), "%!(MISSING 'd')");
assertEquals(S("%d %d", 1), "1 %!(MISSING 'd')");
assertEquals(S("%d %f A", 1), "1 %!(MISSING 'f') A");
assertEquals(S("%*.2f", "a", 1.1), "%!(BAD WIDTH 'a')");
assertEquals(S("%.*f", "a", 1.1), "%!(BAD PREC 'a')");
assertEquals(
S("%.[2]*f", 1.23, "p"),
`%!(BAD PREC 'p')%!(EXTRA '1.23')`,
);
assertEquals(S("%.[2]*[1]f Yippie!", 1.23, "p"), "%!(BAD PREC 'p') Yippie!");
assertEquals(S("%[1]*.2f", "a", "p"), "%!(BAD WIDTH 'a')");
assertEquals(S("A", "a", "p"), `A%!(EXTRA '"a"' '"p"')`);
assertEquals(S("%[2]s %[2]s", "a", "p"), `p p%!(EXTRA '"a"')`);
// remains to be determined how to handle bad indices ...
// (realistically) the entire error handling is still up for grabs.
assertEquals(S("%[hallo]s %d %d %d", 1, 2, 3, 4), "%!(BAD INDEX) 2 3 4");
assertEquals(
S("%[5]s", 1, 2, 3, 4),
`%!(BAD INDEX)%!(EXTRA '2' '3' '4')`,
);
assertEquals(S("%[5]f"), "%!(BAD INDEX)");
assertEquals(S("%.[5]f"), "%!(BAD INDEX)");
assertEquals(S("%.[5]*f"), "%!(BAD INDEX)");
});
|
import React from 'react';
import createSvgIcon from './helpers/createSvgIcon';
export default createSvgIcon(
<React.Fragment><path d="M18 2.01L6 2c-1.11 0-2 .89-2 2v16c0 1.11.89 2 2 2h12c1.11 0 2-.89 2-2V4c0-1.11-.89-1.99-2-1.99zM18 20H6L5.99 4H18v16z" /><circle cx="8" cy="6" r="1" /><circle cx="11" cy="6" r="1" /><path d="M12 19c2.76 0 5-2.24 5-5s-2.24-5-5-5-5 2.24-5 5 2.24 5 5 5zm2.36-7.36c1.3 1.3 1.3 3.42 0 4.72-1.3 1.3-3.42 1.3-4.72 0l4.72-4.72z" /></React.Fragment>,
'LocalLaundryServiceOutlined',
);
|
package io.github.shenbinglife.validators.anno.meta;
import java.lang.annotation.*;
/**
* 字段错误消息插值填充的功能注解
* <P>
* 当前的框架内,默认提供的两个插值是:field: 注解内定义的字段名称, val: 被校验对象的字符串格式
* </P>
* <P>
* 在校验注解内部提供的方法上标记该注解,表示可以在校验注解的emsg中使用该字符串插值
* </P>
* 示例:
*
* <pre>
* public @interface Equals {
*
* <code>@MsgFiller("value")</code>
* String value();
*
* String emsg() "${field}字符串不能与${value}相同"
* }
* </code>
* </pre>
*
* @author shenbing
* @version 2017/11/27
* @since since
*/
@Documented
@Target({ElementType.METHOD})
@Retention(RetentionPolicy.RUNTIME)
public @interface MsgFiller {
String value();
}
|
/**
* Creates a formatted String listing the undone commands.
*/
public static String makeResultString(List<HistoryRecord> undoneRecords) {
StringBuilder sb = new StringBuilder();
for (HistoryRecord record : undoneRecords) {
sb.append(record.getCommand().getCommandText().orElse(record.toString()))
.append("\n");
}
return String.format(MESSAGE_UNDO_SUCCESS,
undoneRecords.size(), undoneRecords.size() > 1 ? "s" : "", sb.toString());
} |
<reponame>IsaiahPressman/Kaggle_Hungry_Geese<filename>handcrafted_agents/public/debug.py
from kaggle_environments.envs.hungry_geese.hungry_geese import Observation, Configuration, Action, row_col
def agent(obs_dict, config_dict):
"""This agent always moves NORTH, and is used for debugging"""
observation = Observation(obs_dict)
configuration = Configuration(config_dict)
print(obs_dict)
print(config_dict)
print(observation)
print(configuration)
print()
return Action.NORTH.name
|
package wrap_test
import (
"testing"
"github.com/lucacasonato/wrap"
)
func createDatabase() (*wrap.Database, error) {
client, err := connect()
if err != nil {
return nil, err
}
database := client.Database("testing")
return database, nil
}
func TestDatabase(t *testing.T) {
database, err := createDatabase()
if err != nil {
t.Fatal(err)
}
t.Log(database)
}
func TestDatabaseDelete(t *testing.T) {
database, err := createDatabase()
if err != nil {
t.Fatal(err)
}
err = database.Delete()
if err != nil {
t.Fatal(err)
}
}
|
<reponame>baian1/NeteaseMusiceClient
import {
ADD_SONG,
SET_INDEX,
DELETE_SONG,
SET_SONG_SRC,
DELETE_SONG_BY_INDEX,
SET_TIMER,
CHANGE_SONG_PLAYING,
SET_VOLUME,
SET_PLAY_MODE,
NXET_SONG,
} from "./constants"
import { Play } from "@/api/request"
/**
* 列表函数
* @param data
*/
export const addSong = (data: Play[] | Play) => {
let temp: Play[] = []
if (Array.isArray(data)) {
temp = data
} else {
temp = [data]
}
return {
type: ADD_SONG,
data: temp,
} as const
}
export const deleteSong = (data: { id: number[] | number }) => {
let id: number[] = []
if (Array.isArray(data.id)) {
id = data.id
} else {
id = [data.id]
}
return {
type: DELETE_SONG,
data: { id },
} as const
}
export const deleteSongByIndex = (data: { index: number }) => {
return {
type: DELETE_SONG_BY_INDEX,
data,
} as const
}
/**
* 播放歌曲控制
* @param data
*/
export const setIndex = (data: { index: number }) =>
({
type: SET_INDEX,
data,
} as const)
export const nextSong = (direction: "pre" | "next") =>
({
type: NXET_SONG,
data: {
direction,
},
} as const)
export const setPlayMode = (mode: 0 | 1 | 2) =>
({
type: SET_PLAY_MODE,
data: {
mode,
},
} as const)
/**
* 当前歌曲状态控制
* @param song
* @param src
*/
export const setSong = (song: Play, src: string) =>
({
type: SET_SONG_SRC,
data: {
song,
src,
},
} as const)
export const setTimer = ({
current,
duration,
}: {
current: number
duration: number
}) =>
({
type: SET_TIMER,
data: {
current,
duration,
},
} as const)
export const changeSongPlaying = (playing?: boolean) =>
({
type: CHANGE_SONG_PLAYING,
data: {
playing,
},
} as const)
export const setVolume = (volume: number) =>
({
type: SET_VOLUME,
data: {
volume,
},
} as const)
|
/**
* Returns the matching symbol to a given character that is relevant for the
* 41++ language.
*/
public static char matchingSymbol(char start) {
switch (start) {
case '[':
return ']';
case ']':
return '[';
case '(':
return ')';
case ')':
return '(';
case '\'':
return '\'';
case '\"':
return '\"';
}
return 0;
} |
/* (c) <NAME>. See "licence DDRace.txt" and the readme.txt in the root of the distribution for more information. */
#ifndef DDRACE_H
#define DDRACE_H
#include <game/server/gamecontroller.h>
#include <game/server/teams.h>
#include <game/server/entities/door.h>
#include <vector>
#include <map>
class CGameControllerDDRace : public IGameController
{
public:
CGameControllerDDRace(class CGameContext *pGameServer);
~CGameControllerDDRace();
CGameTeams m_Teams;
std::map < int , std::vector < vec2 > > m_TeleOuts;
void InitTeleporter();
virtual void Tick();
};
#endif
|
def split_format(self, ev, data):
if self.conf.part_mode == 'batch_time':
dtm = self.batch_info['batch_end']
elif self.conf.part_mode == 'event_time':
dtm = ev.ev_time
elif self.conf.part_mode == 'current_time':
dtm = datetime.datetime.now()
elif self.conf.part_mode == 'date_field':
dt_str = data[self.conf.part_field]
if dt_str is None:
raise Exception('part_field(%s) is NULL: %s' % (self.conf.part_field, ev))
dtm = datetime.datetime.strptime(dt_str[:19], "%Y-%m-%d %H:%M:%S")
else:
raise UsageError('Bad value for part_mode: %s' %\
self.conf.part_mode)
vals = {'parent': self.dest_table,
'year': "%04d" % dtm.year,
'month': "%02d" % dtm.month,
'day': "%02d" % dtm.day,
'hour': "%02d" % dtm.hour,
}
return (self.get_part_name() % vals, dtm) |
Alternative Media and Citizen Journalists across the web are in a storm over the latest developments on the Korean peninsula.
North Korea test fired another missile on April 5th, 2017 ahead of the meeting between Chinese President Xi and President Donald Trump.
Strong language has been coming from Secretary of State Rex Tillerson over the last few days warning that a preemptive military strike on North Korea’s nuclear facilities was not off the table. As news on North Korea’s latest missile test broke, Rex Tillerson was quoted as saying,
“The United States has spoken enough about North Korea. We have no further comment.”
What most people don’t understand is that the Korean War actually never ended. Although there was an armistice between U.S. and North Korean forces, no peace treaty was ever really signed.
This armistice signaled the end of hostilities in the Korean peninsula until a final peace agreement could be found and it established the Korean Demilitarized Zone (DMZ). Negotiators could not agree on conditions for top commanders to sign the armistice together in person, so the document was signed on July 27, 1953, by two delegates: U.S. Army Lieutenant General William Harrison, representing the United Nations Command, and North Korean General Nam Il, representing both the Supreme Commander of the Korean People’s Army and the Commander of the Chinese People’s Volunteers. Copies of the agreement were signed separately by UN General Mark W. Clark, Supreme Commander of the Korean People’s Army Kim Il Sung, and Commander of the Chinese People’s Volunteers Peng Dehuai. Several times, North Korea has stated that it no longer recognizes the armistice, in 1994, 1996, 2003, 2006, 2009, and 2013.
Korean War Armistice Agreement – CFR Official Website
This is all happening amid rising tensions between China and its U.S. allied neighbors. China has been taking aggressive stances towards South Korea, Japan, and Taiwan. This coupled with threats from North Korea has triggered the U.S. to take steps to safeguard its allies in the region.
If North Korea strikes South Korea or Japan, the U.S. will retaliate. China won’t sit by with its fingers crossed, they will get involved.
This is just a small sampling of news stories showcasing the kind of situation that is developing in the Asian theater.
My Perspective On A Russia-China Alliance
This all reminds me of the time period leading up to World War 1. Although the players are different, the geopolitical implications and signals are very similar.
China is seeking to become the preeminent global power and there are rumors that they are working with Russia to undermine the western run globalist factions.
We have leadership here in the United States that is priming the pump for war with Russia. They claim that Russia rigged our elections and have stood behind allegations that President Trump is an agent of Vladimir Putin while ignoring the looming Chinese threat.
Instead of seeking to improve relations with Russia, both the Democratic and Republican establishments have looked to scapegoat Russia with the purpose of covering up their own corruption and criminal activities. The globalist deep state within our own government attempted to hack the 2016 Presidential Election to secure a Hillary Clinton victory and still failed. It was agents inside our own intelligence services that leaked information to Wikileaks however it is Russia that is taking the blame for the coup and counter-coup that we saw unfold.
If this continues and we do not change our attitude on how we deal with Russia, there is a real risk that we could be pushing them into a military and economic partnership with China. Although the governments of both nations have disdain for each other, and would actively like to see each other fall, both understand that they have the potential to undermine the western globalists and their agenda and are increasingly looking to the United States as an enemy.
“The Enemy of my Enemy is my Friend”
The economic and military power of an allegiance between Russia and China would be enormous and formidable. They would more than likely threaten open warfare with the West and would feel as if they had the sufficient strength to begin acting aggressively. Russia would most likely seek to unify with its former Soviet Satellite States, and China would probably feel comfortable invading Taiwan or intensifying its military push into the South China Sea, possibly even attempting an invasion of Japan.
A North Korean strike on South Korea, or Japan would immediately reignite the war between the North Koreans and the United States. It would be open warfare.
I fear that with current circumstances as they are, preemptive military action against North Korea could literally spawn open hostilities between the United States and China. If China strikes one of our allies in retaliation it would warrant a military response from American forces and would trigger a conventional or thermonuclear war. Russia would find itself free to act on the European continent as the U.S. and NATO would be occupied with China and North Korea.
The United States and their allies do not have the manpower and equipment to fight a war on so many fronts.
The West is not currently equipped to deal with such a threat.
Image Credit: Stefan Krasowski
Advertisements |
<gh_stars>0
use rayon::prelude::*;
#[derive(Default)]
pub struct ChaCha8 {
input: [u32; 16],
}
const SIGMA: [u8; 16] = *b"expand 32-byte k";
// const TAU: [u8; 16] = *b"expand 16-byte k";
#[inline]
const fn u8to32_idx_little(i: u8, idx: u8) -> u32 {
(i as u32).to_le() << (24 - idx * 8)
}
#[inline]
const fn u8sto32_little(i1: u8, i2: u8, i3: u8, i4: u8) -> u32 {
u8to32_idx_little(i1, 3)
| u8to32_idx_little(i2, 2)
| u8to32_idx_little(i3, 1)
| u8to32_idx_little(i4, 0)
}
#[inline]
fn u32to8_little(i: u32, o: &mut [u8]) {
let a = i.to_le_bytes();
o[0..4].copy_from_slice(&a);
}
#[inline]
const fn rotl32(v: u32, n: u32) -> u32 {
(v.to_le() << n.to_le()) | (v.to_le() >> (32_u32.to_le().wrapping_sub(n.to_le())))
}
#[inline]
const fn rotate(v: u32, c: u32) -> u32 {
rotl32(v, c)
}
#[inline]
const fn xor(v: u32, w: u32) -> u32 {
v.to_le() ^ w.to_le()
}
#[inline]
const fn plus(v: u32, w: u32) -> u32 {
v.wrapping_add(w)
}
macro_rules! quarter_round {
($a:expr, $b:expr, $c:expr, $d:expr) => {
$a = plus($a, $b);
$d = rotate(xor($d, $a), 16);
$c = plus($c, $d);
$b = rotate(xor($b, $c), 12);
$a = plus($a, $b);
$d = rotate(xor($d, $a), 8);
$c = plus($c, $d);
$b = rotate(xor($b, $c), 7);
};
}
impl ChaCha8 {
pub fn new_from_256bit_key(k: &[u8; 32]) -> Self {
let mut x = Self::default();
x.input[0] = u8sto32_little(SIGMA[0], SIGMA[1], SIGMA[2], SIGMA[3]);
x.input[1] = u8sto32_little(SIGMA[4], SIGMA[5], SIGMA[6], SIGMA[7]);
x.input[2] = u8sto32_little(SIGMA[8], SIGMA[9], SIGMA[10], SIGMA[11]);
x.input[3] = u8sto32_little(SIGMA[12], SIGMA[13], SIGMA[14], SIGMA[15]);
x.input[4] = u8sto32_little(k[0], k[1], k[2], k[3]);
x.input[5] = u8sto32_little(k[4], k[5], k[6], k[7]);
x.input[6] = u8sto32_little(k[8], k[9], k[10], k[11]);
x.input[7] = u8sto32_little(k[12], k[13], k[14], k[15]);
x.input[8] = u8sto32_little(k[16], k[17], k[18], k[19]);
x.input[9] = u8sto32_little(k[20], k[21], k[22], k[23]);
x.input[10] = u8sto32_little(k[24], k[25], k[26], k[27]);
x.input[11] = u8sto32_little(k[28], k[29], k[30], k[31]);
// IV is always None
// x.input[14] = 0;
// x.input[15] = 0;
x
}
// pub fn new_from_128bit_key(k: &[u8; 32]) -> Self {
// let mut x = Self::default();
//
// x.input[0] = u8sto32_little(TAU[0], TAU[1], TAU[2], TAU[3]);
// x.input[1] = u8sto32_little(TAU[4], TAU[5], TAU[6], TAU[7]);
// x.input[2] = u8sto32_little(TAU[8], TAU[9], TAU[10], TAU[11]);
// x.input[3] = u8sto32_little(TAU[12], TAU[13], TAU[14], TAU[15]);
// x.input[4] = u8sto32_little(k[0], k[1], k[2], k[3]);
// x.input[5] = u8sto32_little(k[4], k[5], k[6], k[7]);
// x.input[6] = u8sto32_little(k[8], k[9], k[10], k[11]);
// x.input[7] = u8sto32_little(k[12], k[13], k[14], k[15]);
// x.input[8] = u8sto32_little(k[16], k[17], k[18], k[19]);
// x.input[9] = u8sto32_little(k[20], k[21], k[22], k[23]);
// x.input[10] = u8sto32_little(k[24], k[25], k[26], k[27]);
// x.input[11] = u8sto32_little(k[28], k[29], k[30], k[31]);
//
// // IV is always None
// // x.input[14] = 0;
// // x.input[15] = 0;
//
// x
// }
pub fn get_keystream(&self, pos: u64, inp: &mut [u8]) {
inp.par_chunks_exact_mut(64)
.enumerate()
.for_each(|(i, chunk)| {
let j12_13 = pos + i as u64;
let j12 = j12_13 as u32;
let j13 = (j12_13 >> 32) as u32;
let j = [
self.input[0] as u32,
self.input[1] as u32,
self.input[2] as u32,
self.input[3] as u32,
self.input[4] as u32,
self.input[5] as u32,
self.input[6] as u32,
self.input[7] as u32,
self.input[8] as u32,
self.input[9] as u32,
self.input[10] as u32,
self.input[11] as u32,
j12,
j13,
self.input[14] as u32,
self.input[15] as u32,
];
let mut x = j.clone();
for _i in 0..4 {
quarter_round!(x[0], x[4], x[8], x[12]);
quarter_round!(x[1], x[5], x[9], x[13]);
quarter_round!(x[2], x[6], x[10], x[14]);
quarter_round!(x[3], x[7], x[11], x[15]);
quarter_round!(x[0], x[5], x[10], x[15]);
quarter_round!(x[1], x[6], x[11], x[12]);
quarter_round!(x[2], x[7], x[8], x[13]);
quarter_round!(x[3], x[4], x[9], x[14]);
}
for i in 0..16 {
x[i] = plus(x[i], j[i]);
}
u32to8_little(x[0], &mut chunk[0..=3]);
u32to8_little(x[1], &mut chunk[4..=7]);
u32to8_little(x[2], &mut chunk[8..=11]);
u32to8_little(x[3], &mut chunk[12..=15]);
u32to8_little(x[4], &mut chunk[16..=19]);
u32to8_little(x[5], &mut chunk[20..=23]);
u32to8_little(x[6], &mut chunk[24..=27]);
u32to8_little(x[7], &mut chunk[28..=31]);
u32to8_little(x[8], &mut chunk[32..=35]);
u32to8_little(x[9], &mut chunk[36..=39]);
u32to8_little(x[10], &mut chunk[40..=43]);
u32to8_little(x[11], &mut chunk[44..=47]);
u32to8_little(x[12], &mut chunk[48..=51]);
u32to8_little(x[13], &mut chunk[52..=55]);
u32to8_little(x[14], &mut chunk[56..=59]);
u32to8_little(x[15], &mut chunk[60..=63]);
})
}
}
|
def power_spectrum(self, time, radius=None, **kwargs):
radius = basicConfig['params.r_surf'] if radius is None else radius
coeffs = self.synth_coeffs(time, **kwargs)
return mu.power_spectrum(coeffs, radius) |
package org.mitre.synthea.export.rif;
import java.io.IOException;
import java.math.BigDecimal;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;
import org.mitre.synthea.export.rif.enrollment.PartDContractHistory;
import org.mitre.synthea.export.rif.identifiers.PartDContractID;
import org.mitre.synthea.helpers.Config;
import org.mitre.synthea.helpers.SimpleCSV;
import org.mitre.synthea.helpers.Utilities;
import org.mitre.synthea.world.agents.Person;
import org.mitre.synthea.world.concepts.Claim;
import org.mitre.synthea.world.concepts.HealthRecord;
/**
* Exporter for RIF PDE (prescription) file.
*/
public class PDEExporter extends RIFExporter {
private static final Map<Integer, Double> pdeOutOfPocketThresholds = getThresholds();
public static final AtomicLong nextPdeId = new AtomicLong(Config.getAsLong(
"exporter.bfd.pde_id_start", -1));
private static Map<Integer, Double> getThresholds() {
Map<Integer, Double> pdeOutOfPocketThresholds = new HashMap<>();
try {
String csv = Utilities.readResourceAndStripBOM("costs/pde_oop_thresholds.csv");
for (LinkedHashMap<String, String> row : SimpleCSV.parse(csv)) {
int year = Integer.parseInt(row.get("YEAR"));
double threshold = Double.parseDouble(row.get("THRESHOLD"));
pdeOutOfPocketThresholds.put(year, threshold);
}
return pdeOutOfPocketThresholds;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* Construct an exporter for PDE claims.
* @param exporter the exporter instance that will be used to access code mappers
*/
public PDEExporter(BB2RIFExporter exporter) {
super(exporter);
}
/**
* Export PDE claims details for a single person.
* @param person the person to export
* @param startTime earliest claim date to export
* @param stopTime end time of simulation
* @return count of claims exported
* @throws IOException if something goes wrong
*/
long export(Person person, long startTime, long stopTime) throws IOException {
long claimCount = 0;
PartDContractHistory partDContracts =
(PartDContractHistory) person.attributes.get(RIFExporter.BB2_PARTD_CONTRACTS);
// Build a chronologically ordered list of prescription fills (including refills where
// specified).
List<PrescriptionFill> prescriptionFills = new LinkedList<>();
for (HealthRecord.Encounter encounter : person.record.encounters) {
if (encounter.stop < startTime || encounter.stop < CLAIM_CUTOFF) {
continue;
}
if (!hasPartABCoverage(person, encounter.stop)) {
continue;
}
if (RIFExporter.isVAorIHS(encounter)) {
continue;
}
for (HealthRecord.Medication medication : encounter.medications) {
if (!exporter.medicationCodeMapper.canMap(medication.codes.get(0))) {
continue; // skip codes that can't be mapped to NDC
}
long supplyDaysMax = 90; // TBD - 30, 60, 90 day refil schedules?
long supplyInterval = supplyDaysMax * 24 * 60 * 60 * 1000;
long finishTime = medication.stop == 0L ? stopTime : Long.min(medication.stop, stopTime);
String medicationCode = exporter.medicationCodeMapper.map(medication.codes.get(0),
person);
long time = medication.start;
int fillNo = 1;
while (time < finishTime) {
PartDContractID partDContractID = partDContracts.getContractID(time);
PrescriptionFill fill = new PrescriptionFill(time, encounter, medication,
medicationCode, fillNo, partDContractID, supplyInterval, finishTime);
if (partDContractID != null) {
prescriptionFills.add(fill);
}
if (!fill.refillsRemaining()) {
break;
}
time += Long.min((long)fill.days * 24 * 60 * 60 * 1000, supplyInterval);
fillNo++;
}
}
}
Collections.sort(prescriptionFills);
// Export each prescription fill to RIF format
HashMap<BB2RIFStructure.PDE, String> fieldValues = new HashMap<>();
BigDecimal costs = Claim.ZERO_CENTS;
int costYear = 0;
String catastrophicCode = "";
for (PrescriptionFill fill: prescriptionFills) {
long pdeId = nextPdeId.getAndDecrement();
long claimGroupId = RIFExporter.nextClaimGroupId.getAndDecrement();
fieldValues.clear();
exporter.staticFieldConfig.setValues(fieldValues, BB2RIFStructure.PDE.class, person);
// The REQUIRED fields
fieldValues.put(BB2RIFStructure.PDE.PDE_ID, "" + pdeId);
fieldValues.put(BB2RIFStructure.PDE.CLM_GRP_ID, "" + claimGroupId);
fieldValues.put(BB2RIFStructure.PDE.BENE_ID,
(String)person.attributes.get(RIFExporter.BB2_BENE_ID));
fieldValues.put(BB2RIFStructure.PDE.SRVC_DT, RIFExporter.bb2DateFromTimestamp(fill.time));
fieldValues.put(BB2RIFStructure.PDE.SRVC_PRVDR_ID, fill.encounter.provider.cmsProviderNum);
fieldValues.put(BB2RIFStructure.PDE.PRSCRBR_ID,
"" + (9_999_999_999L - fill.encounter.clinician.identifier));
fieldValues.put(BB2RIFStructure.PDE.RX_SRVC_RFRNC_NUM, "" + pdeId);
fieldValues.put(BB2RIFStructure.PDE.PROD_SRVC_ID, fill.medicationCode);
// The following field was replaced by the PartD contract ID, leaving this here for now
// until this is validated
// H=hmo, R=ppo, S=stand-alone, E=employer direct, X=limited income
// fieldValues.put(PrescriptionFields.PLAN_CNTRCT_REC_ID,
// ("R" + Math.abs(
// UUID.fromString(medication.claim.payer.uuid)
// .getMostSignificantBits())).substring(0, 5));
fieldValues.put(BB2RIFStructure.PDE.PLAN_CNTRCT_REC_ID, fill.partDContractID.toString());
fieldValues.put(BB2RIFStructure.PDE.DAW_PROD_SLCTN_CD, "" + (int) person.rand(0, 9));
fieldValues.put(BB2RIFStructure.PDE.QTY_DSPNSD_NUM, "" + fill.quantity);
fieldValues.put(BB2RIFStructure.PDE.DAYS_SUPLY_NUM, "" + fill.days);
fieldValues.put(BB2RIFStructure.PDE.FILL_NUM, "" + fill.fillNo);
int year = Utilities.getYear(fill.time);
if (year != costYear) {
costYear = year;
costs = Claim.ZERO_CENTS;
catastrophicCode = ""; // Blank = Attachment point not met
}
BigDecimal threshold = getDrugOutOfPocketThreshold(year);
costs = costs.add(fill.medication.claim.getTotalPatientCost());
if (costs.compareTo(threshold) < 0) {
fieldValues.put(BB2RIFStructure.PDE.GDC_BLW_OOPT_AMT, String.format("%.2f", costs));
fieldValues.put(BB2RIFStructure.PDE.GDC_ABV_OOPT_AMT, "0");
fieldValues.put(BB2RIFStructure.PDE.CTSTRPHC_CVRG_CD, catastrophicCode);
} else {
if (catastrophicCode.equals("")) {
catastrophicCode = "A"; // A = Attachment point met on this event
} else if (catastrophicCode.equals("A")) {
catastrophicCode = "C"; // C = Above attachment point
}
fieldValues.put(BB2RIFStructure.PDE.GDC_BLW_OOPT_AMT, String.format("%.2f", threshold));
fieldValues.put(BB2RIFStructure.PDE.GDC_ABV_OOPT_AMT,
String.format("%.2f", costs.subtract(threshold)));
fieldValues.put(BB2RIFStructure.PDE.CTSTRPHC_CVRG_CD, catastrophicCode);
}
fieldValues.put(BB2RIFStructure.PDE.TOT_RX_CST_AMT,
String.format("%.2f", fill.medication.claim.getTotalClaimCost()));
// Under normal circumstances, the following fields summed together,
// should equal TOT_RX_CST_AMT:
// - PTNT_PAY_AMT : what the patient paid
// - OTHR_TROOP_AMT : what 3rd party paid out of pocket
// - LICS_AMT : low income subsidized payment
// - PLRO_AMT : what other 3rd party insurances paid
// - CVRD_D_PLAN_PD_AMT : what Part D paid
// - NCVRD_PLAN_PD_AMT : part of total not covered by Part D whatsoever
// OTHR_TROOP_AMT and LICS_AMT are always 0, set in field value spreadsheet
// TODO: make claim copay match the designated cost sharing code, see
// PartDContractHistory.getPartDCostSharingCode
fieldValues.put(BB2RIFStructure.PDE.PTNT_PAY_AMT,
String.format("%.2f", fill.medication.claim.getTotalPatientCost()));
fieldValues.put(BB2RIFStructure.PDE.PLRO_AMT,
String.format("%.2f", fill.medication.claim.getTotalPaidBySecondaryPayer()));
fieldValues.put(BB2RIFStructure.PDE.CVRD_D_PLAN_PD_AMT,
String.format("%.2f", fill.medication.claim.getTotalCoveredCost()));
fieldValues.put(BB2RIFStructure.PDE.NCVRD_PLAN_PD_AMT,
String.format("%.2f", fill.medication.claim.getTotalAdjustment()));
fieldValues.put(BB2RIFStructure.PDE.PHRMCY_SRVC_TYPE_CD, "0" + (int) person.rand(1, 8));
fieldValues.put(BB2RIFStructure.PDE.PD_DT, RIFExporter.bb2DateFromTimestamp(fill.time));
String residenceCode = getResidenceCode(person, fill.encounter);
fieldValues.put(BB2RIFStructure.PDE.PTNT_RSDNC_CD, residenceCode);
exporter.rifWriters.writeValues(BB2RIFStructure.PDE.class, fieldValues);
claimCount++;
}
return claimCount;
}
private static BigDecimal getDrugOutOfPocketThreshold(int year) {
double threshold = pdeOutOfPocketThresholds.getOrDefault(year, 4550.0);
return BigDecimal.valueOf(threshold);
}
private static String getResidenceCode(Person person, HealthRecord.Encounter encounter) {
Set<ClaimType> claimTypes = RIFExporter.getClaimTypes(encounter);
String residenceCode = "00"; // 00=not specified
double roll = person.rand();
if (claimTypes.contains(ClaimType.SNF)) {
residenceCode = "03"; // 03=long-term
} else if (claimTypes.contains(ClaimType.HHA)) {
if (roll <= 0.95) {
residenceCode = "01"; // 01=home
} else {
residenceCode = "04"; // 04=assisted living
}
} else if (claimTypes.contains(ClaimType.HOSPICE)) {
residenceCode = "11"; // 11=hospice
} else if (claimTypes.contains(ClaimType.INPATIENT)) {
if (roll <= 0.95) {
residenceCode = "03"; // 03=nursing
} else if (roll <= 0.99) {
residenceCode = "04"; // 04=assisted living
} else {
residenceCode = "13"; // 13=inpatient rehab
}
} else if (claimTypes.contains(ClaimType.CARRIER)
|| claimTypes.contains(ClaimType.OUTPATIENT)) {
if (roll <= 0.87) {
residenceCode = "01"; // 01=home
} else if (roll <= 0.95) {
residenceCode = "00"; // 00=not specified
} else if (roll <= 0.99) {
residenceCode = "04"; // 04=assisted living
} else if (person.attributes.containsKey("homeless")
&& ((Boolean) person.attributes.get("homeless") == true)) {
residenceCode = "14"; // 14=homeless, rare in actual data
}
} else {
// Other
}
return residenceCode;
}
}
|
/**
* Created by Benedikt Linke on 23.11.15.
*/
public class DocumentController {
private CMSController cmsController;
/**
* Konstruktor
* initialisiert einen DocumentController
* @param cmsController
*/
public DocumentController(CMSController cmsController){
this.cmsController = cmsController;
}
/**
* gibt ein bestimmtes Dokument aus dem CMIS Repository zurück
* @param parentFolder der Ordner in dem sich das Dokument befindet
* @param fileName Name des Dokuments
* @return document
*/
public Document getDocumentByPath(Folder parentFolder, String fileName){
return (Document) cmsController.getSession().getObjectByPath(parentFolder.getPath()+ "/"+ fileName);
}
/**
* gibt ein bestimmtes Dokument aus dem CMIS Repository zurück
* @param documentId ID des zurückzugebenden Dokuments
* @return document
*/
public Document getDocumentById(String documentId){
return (Document) cmsController.getSession().getObject(documentId);
}
/**
* Erstellung eines Dokuments in dem CMIS Repository auf Basis eines bereits existierenden Dokuments.
* Das Dokument existiert entweder nur Temporär oder in einem Verzeichnis des Betriebsystems
* @param parentFolder Zielordner in dem das Dokument erstellt werden soll
* @param file das existierde Dokument
* @param fileType Art des Dokuments
* @return document, das erstellte Dokument aus dem CMIS Repository
* @throws FileNotFoundException
*/
public Document createDocument(Folder parentFolder, File file, String fileType) throws FileNotFoundException {
String fileName = file.getName();
Map<String, Object> properties = new HashMap<String, Object>();
properties.put(PropertyIds.NAME, fileName);
properties.put(PropertyIds.OBJECT_TYPE_ID, "cmis:document");
ContentStream contentStream = cmsController.getSession().getObjectFactory().createContentStream(
fileName,
file.length(),
fileType,
new FileInputStream(file)
);
Document document = null;
try {
document = parentFolder.createDocument(properties, contentStream, null);
Logger.info("Created new document: " + document.getId() + " " + document.getPaths());
} catch (CmisContentAlreadyExistsException ccaee) {
document = (Document) cmsController.getSession().getObjectByPath(parentFolder.getPath() + "/" + fileName);
Logger.info("Document already exists: " + fileName);
}
document.getPaths();
return document;
}
/**
* Löscht ein Dokument aus dem CMIS Repositoy
* @param object id des zulöschenden Dokuments
* @return ture wenn der Löschvorgang erfolgreich war
*/
public boolean deleteDocument(String object){
try {
Document document = (Document) cmsController.getSession().getObject(object);
document.delete();
Logger.info("Deleted document");
return true;
} catch (CmisObjectNotFoundException ccaee) {
Logger.info("Dokument not found");
return false;
}
}
/**
* Läd ein Dokument aus dem CMIS Repository
* @param object id des Dokuments
* @param destinationPath Pfad in dem das geladenen Dokument gespeichert werden soll
* @return true wenn der Download erfolgreich war
*/
public boolean downloadDocument(String object, String destinationPath){
try {
FileUtils.download((Document) cmsController.getSession().getObject(object), destinationPath);
return true;
} catch (IOException e) {
e.printStackTrace();
}
return false;
}
/**
* Liest ein (Dokument)Bild aus dem CMIS Repository und erstellt ein BufferdImage
* @param object id des (Dokument) Bildes
* @return Bufferedimage
*/
public BufferedImage readingImage(String object){
Document document = (Document) cmsController.getSession().getObject(object);
InputStream stream = document.getContentStream().getStream();
BufferedImage bufferedImage = null;
try {
bufferedImage = ImageIO.read(stream);
} catch (IOException e) {
e.printStackTrace();
}
return bufferedImage;
}
/**
* Läd eine JSON Datei aus dem CMIS Repository
* @param object id der JSON Datei
* @return eine JSON Datei vom CMIS Repostory als Stream
*/
public InputStream readingJSON(String object){
Document document = (Document) cmsController.getSession().getObject(object);
InputStream stream = document.getContentStream().getStream();
return stream;
}
} |
<filename>benches/graphemes.rs
#[macro_use]
extern crate bencher;
extern crate unicode_segmentation;
use bencher::Bencher;
use unicode_segmentation::UnicodeSegmentation;
use std::fs;
fn graphemes(bench: &mut Bencher, path: &str) {
let text = fs::read_to_string(path).unwrap();
bench.iter(|| {
for g in UnicodeSegmentation::graphemes(&*text, true) {
bencher::black_box(g);
}
});
bench.bytes = text.len() as u64;
}
fn graphemes_arabic(bench: &mut Bencher) {
graphemes(bench, "benches/texts/arabic.txt");
}
fn graphemes_english(bench: &mut Bencher) {
graphemes(bench, "benches/texts/english.txt");
}
fn graphemes_hindi(bench: &mut Bencher) {
graphemes(bench, "benches/texts/hindi.txt");
}
fn graphemes_japanese(bench: &mut Bencher) {
graphemes(bench, "benches/texts/japanese.txt");
}
fn graphemes_korean(bench: &mut Bencher) {
graphemes(bench, "benches/texts/korean.txt");
}
fn graphemes_mandarin(bench: &mut Bencher) {
graphemes(bench, "benches/texts/mandarin.txt");
}
fn graphemes_russian(bench: &mut Bencher) {
graphemes(bench, "benches/texts/russian.txt");
}
fn graphemes_source_code(bench: &mut Bencher) {
graphemes(bench, "benches/texts/source_code.txt");
}
benchmark_group!(
benches,
graphemes_arabic,
graphemes_english,
graphemes_hindi,
graphemes_japanese,
graphemes_korean,
graphemes_mandarin,
graphemes_russian,
graphemes_source_code,
);
benchmark_main!(benches);
|
/**
* sis_old_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device we are configuring for.
*
* Set PIO mode for device, in host controller PCI config space. This
* function handles PIO set up for all chips that are pre ATA100 and
* also early ATA100 devices.
*
* LOCKING:
* None (inherited from caller).
*/
static void sis_old_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int port = sis_old_port_base(adev);
u8 t1, t2;
int speed = adev->pio_mode - XFER_PIO_0;
const u8 active[] = { 0x00, 0x07, 0x04, 0x03, 0x01 };
const u8 recovery[] = { 0x00, 0x06, 0x04, 0x03, 0x03 };
sis_set_fifo(ap, adev);
pci_read_config_byte(pdev, port, &t1);
pci_read_config_byte(pdev, port + 1, &t2);
t1 &= ~0x0F;
t2 &= ~0x07;
t1 |= active[speed];
t2 |= recovery[speed];
pci_write_config_byte(pdev, port, t1);
pci_write_config_byte(pdev, port + 1, t2);
} |
/**
* This class represents the watermark source that parse the input and emits punctuated watermark.
* If the input represents a punctuated watermark, it generate the MistWatermarkEvent.
* If not, extract the timestamp and make it as MistDataEvent.
*/
public final class PunctuatedEventGenerator<I, V> extends EventGeneratorImpl<I, V> {
/**
* The function check whether the input is watermark or not.
*/
private final MISTPredicate<I> isWatermark;
/**
* The function get input which is watermark and parse the timestamp.
*/
private final WatermarkTimestampFunction<I> parseTimestamp;
@Inject
private PunctuatedEventGenerator(
@Parameter(SerializedTimestampParseUdf.class) final String timestampParseObj,
@Parameter(SerializedWatermarkPredicateUdf.class) final String isWatermarkObj,
final ClassLoader classLoader,
@Parameter(PeriodicCheckpointPeriod.class) final long checkpointPeriod,
final TimeUnit timeUnit,
final ScheduledExecutorService scheduler) throws IOException, ClassNotFoundException {
this(SerializeUtils.deserializeFromString(isWatermarkObj, classLoader),
SerializeUtils.deserializeFromString(timestampParseObj, classLoader),
checkpointPeriod, timeUnit, scheduler);
}
@Inject
private PunctuatedEventGenerator(
@Parameter(SerializedTimestampExtractUdf.class) final String timestampExtractObj,
@Parameter(SerializedTimestampParseUdf.class) final String timestampParseObj,
@Parameter(SerializedWatermarkPredicateUdf.class) final String isWatermarkObj,
final ClassLoader classLoader,
@Parameter(PeriodicCheckpointPeriod.class) final long checkpointPeriod,
final TimeUnit timeUnit,
final ScheduledExecutorService scheduler) throws IOException, ClassNotFoundException {
this((MISTFunction)SerializeUtils.deserializeFromString(timestampExtractObj, classLoader),
(MISTPredicate)SerializeUtils.deserializeFromString(timestampParseObj, classLoader),
(WatermarkTimestampFunction)SerializeUtils.deserializeFromString(isWatermarkObj, classLoader),
checkpointPeriod, timeUnit, scheduler);
}
@Inject
public PunctuatedEventGenerator(
final MISTPredicate<I> isWatermark,
final WatermarkTimestampFunction<I> parseTimestamp,
@Parameter(PeriodicCheckpointPeriod.class) final long checkpointPeriod,
final TimeUnit timeUnit,
final ScheduledExecutorService scheduler) {
this(null, isWatermark, parseTimestamp,
checkpointPeriod, timeUnit, scheduler);
}
@Inject
public PunctuatedEventGenerator(
final MISTFunction<I, Tuple<V, Long>> extractTimestampFunc,
final MISTPredicate<I> isWatermark,
final WatermarkTimestampFunction<I> parseTimestamp,
@Parameter(PeriodicCheckpointPeriod.class) final long checkpointPeriod,
final TimeUnit timeUnit,
final ScheduledExecutorService scheduler) {
super(extractTimestampFunc, checkpointPeriod, timeUnit, scheduler);
this.isWatermark = isWatermark;
this.parseTimestamp = parseTimestamp;
}
@Override
public void emitData(final I input) {
if (isWatermark.test(input)) {
latestWatermarkTimestamp = parseTimestamp.apply(input);
outputEmitter.emitWatermark(new MistWatermarkEvent(latestWatermarkTimestamp));
} else {
MistDataEvent newInputEvent = generateEvent(input);
if (newInputEvent != null) {
outputEmitter.emitData(newInputEvent);
}
}
}
} |
/**
* The Output is where the elements of the query output their bits of SQL to.
*
* @author <a href="mailto:[email protected]">Joe Walnes</a>
*/
public class Output {
/**
* @param indent String to be used for indenting (e.g. "", " ", " ", "\t")
*/
public Output(String indent) {
this.indent = indent;
}
private StringBuffer result = new StringBuffer();
private StringBuffer currentIndent = new StringBuffer();
private boolean newLineComing;
private final String indent;
public String toString() {
return result.toString();
}
public Output print(Object o) {
writeNewLineIfNeeded();
result.append(o);
return this;
}
public Output print(char c) {
writeNewLineIfNeeded();
result.append(c);
return this;
}
public Output println(Object o) {
writeNewLineIfNeeded();
result.append(o);
newLineComing = true;
return this;
}
public Output println() {
newLineComing = true;
return this;
}
public Output space() {
result.append(' ');
return this;
}
public void indent() {
currentIndent.append(indent);
}
public void unindent() {
currentIndent.setLength(currentIndent.length() - indent.length());
}
private void writeNewLineIfNeeded() {
if (newLineComing) {
result.append('\n').append(currentIndent);
newLineComing = false;
}
}
} |
/*
* Copyright 2017 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.dashbuilder.common.client.widgets;
import javax.inject.Inject;
import org.jboss.errai.common.client.api.IsElement;
import org.jboss.errai.common.client.dom.HTMLElement;
import org.jboss.errai.ioc.client.container.SyncBeanManager;
import org.uberfire.client.mvp.UberElement;
import org.uberfire.mvp.Command;
public class FilterLabelSet implements IsElement {
public interface View extends UberElement<FilterLabelSet> {
void clearAll();
void setClearAllEnabled(boolean enabled);
void addLabel(FilterLabel label);
}
private View view;
private SyncBeanManager beanManager;
private Command onClearAllCommand;
private int numberOfLabels = 0;
@Inject
public FilterLabelSet(View view, SyncBeanManager beanManager) {
this.view = view;
this.beanManager = beanManager;
this.view.init(this);
this.view.setClearAllEnabled(false);
}
@Override
public HTMLElement getElement() {
return view.getElement();
}
public void clear() {
view.clearAll();
view.setClearAllEnabled(false);
numberOfLabels = 0;
}
public FilterLabel addLabel(String label) {
FilterLabel filterLabel = beanManager.lookupBean(FilterLabel.class).newInstance();
filterLabel.setLabel(label);
view.addLabel(filterLabel);
numberOfLabels++;
view.setClearAllEnabled(numberOfLabels>1);
return filterLabel;
}
public void setOnClearAllCommand(Command onClearAllCommand) {
this.onClearAllCommand = onClearAllCommand;
}
void onClearAll() {
this.clear();
if (onClearAllCommand != null) {
onClearAllCommand.execute();
}
}
}
|
<reponame>CodeFreezr/rosettacode-to-go<gh_stars>1-10
package main
import (
"fmt"
"os"
"os/exec"
)
func main() {
var h, w int
cmd := exec.Command("stty", "size")
cmd.Stdin = os.Stdin
d, _ := cmd.Output()
fmt.Sscan(string(d), &h, &w)
fmt.Println(h, w)
}
//\Terminal-control-Dimensions\terminal-control-dimensions-2.go
|
// If we have detected a problem with the RSL, we need to reset the fetched
// origin/RSL to the last trusted revision of our local RSL.
pub fn reset_remote_to_local(&mut self) -> Result<()> {
// find reference of origin/RSL
let mut reference = self.repo.find_reference("refs/remotes/origin/RSL")?;
let msg = "Resetting RSL to last trusted state";
println!("{}", &msg);
reference.set_target(self.local_head, &msg)?;
self.remote_head = self.local_head;
Ok(())
} |
<gh_stars>10-100
/// <reference path="../../../../../../types/styled-system__core.d.ts" />
import { HeightProps } from './height';
import { MaxHeightProps } from './max_height';
import { MaxWidthProps } from './max_width';
import { MinHeightProps } from './min_height';
import { MinWidthProps } from './min_width';
import { WidthProps } from './width';
/**
* Style props for the dimensions of an element.
*
* @docsPath UI/Style System/Dimensions
*/
export interface DimensionsSetProps extends HeightProps, MaxHeightProps, MaxWidthProps, MinHeightProps, MinWidthProps, WidthProps {
}
export declare const dimensionsSet: import("@styled-system/core").styleFn;
export declare const dimensionsSetPropTypes: import("../../../private_utils").ObjectMap<string, import("prop-types").Validator<any>>;
//# sourceMappingURL=dimensions_set.d.ts.map |
<reponame>kailag/KiddieCare
import { Component } from '@angular/core';
import { IonicPage, NavController, NavParams, ViewController } from 'ionic-angular';
import { FormGroup, FormBuilder, Validators } from '@angular/forms';
@IonicPage()
@Component({
selector: 'page-add-schedule',
templateUrl: 'add-schedule.html',
})
export class AddSchedulePage {
schedule = {
title: '',
startDate: '',
endDate: new Date(),
notes: '',
location: '',
options: { firstReminderMinutes: 15, secondReminderMinutes: 5, id: 'childRecord' }
}
minDate = new Date().getFullYear();
maxDate = new Date().getFullYear() + 10;
children: any;
selectedChild: any;
addForm: FormGroup;
addScheduleForm: FormGroup;
constructor(public navParams: NavParams, public navCtrl: NavController, public viewCtrl: ViewController, private fb: FormBuilder, ) {
this.addScheduleForm = this.fb.group({
title: ['', Validators.required],
startDate: ['', Validators.required],
notes: ['', Validators.required],
location: ['']
});
}
cancel() {
this.viewCtrl.dismiss();
}
get f() { return this.addScheduleForm.controls };
save() {
if (this.addScheduleForm.invalid) {
return;
}
let start = new Date(this.schedule.startDate);
let end = new Date();
end.setDate(start.getDate());
end.setHours(start.getHours())
this.schedule.endDate = end;
this.schedule.title = `KiddieCare-${this.schedule.title}`;
console.log(this.schedule);
this.viewCtrl.dismiss(this.schedule);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.