content
stringlengths
10
4.9M
/** * Created by einandartun on 6/15/18. */ public class TedPodCastsVO { private String podCastId; private String title; private String imageUrl; private String description; private List<SegmentsVO> segments; public String getPodCastId() { return podCastId; } public String getTitle() { return title; } public String getImageUrl() { return imageUrl; } public String getDescription() { return description; } public List<SegmentsVO> getSegments() { return segments; } }
package AutumnRecruitment.chap1; /** * @Classname Ex15 * @Description TODO * @Date 2019/9/21 10:51 * @Created by 14241 */ public class Ex15 { public static int[] histogram(int[] a, int M) { int[] result = new int[M]; for (int i = 0; i < M; i++) { if (a[i] >= 0 && a[i] < M) { result[a[i]]++; } } return result; } public static void main(String[] args) { int[] a = { 1, 1, 2, 3, 1, 7, 5, 3, 2, 2, 2 }; int[] result = histogram(a, 8); for (int i = 0; i < result.length; i++) { System.out.printf("%3d", result[i]); } } }
<filename>components/DateCreate/story.tsx import styled from '@emotion/styled'; import { Story } from '@storybook/react'; import { DateCreate } from '.'; import { Language } from '../../config/locale'; export default { title: 'Date Create', }; const StoryWrapper = styled.div` padding: 1.5rem; display: flex; flex-direction: column; align-items: stretch; `; export const DateCreateDefaultStory: Story = () => ( <StoryWrapper> <DateCreate onSubmit={() => undefined} offerTitles={{ [Language.de]: 'Angebot', [Language.en]: 'Offer', }} /> </StoryWrapper> );
def summarize(self, sent: str, lemma_func: Callable[[str], str]=None, keep_stop_words: bool=True, scoring_func: Callable[[EmojiSummarizationResult], float]=None) -> EmojiSummarizationResult: if lemma_func is None: lemma_func = self.lemma_func if scoring_func is None: scoring_func = self.score_summarization_result_average sent = self.clean_sentence(sent, lemma_func=lemma_func, keep_stop_words=keep_stop_words) sent_combos = self.combinations_of_sent(sent) best_summarization = EmojiSummarizationResult() best_summarization_score = 100_000_000 for sent_combo in sent_combos: local_summarization = EmojiSummarizationResult() for n_gram in sent_combo: close_emoji, cos_diff, close_desc = self.closest_emoji(n_gram) local_summarization.emojis += close_emoji local_summarization.emojis_n_grams.append(close_desc) local_summarization.uncertainty_scores.append(cos_diff) local_summarization.n_grams = sent_combo if scoring_func(local_summarization) < best_summarization_score: best_summarization = local_summarization best_summarization_score = scoring_func(best_summarization) self.closest_emoji.cache_clear() return best_summarization
def mutabletree(f): pass
__version__ = "0.1.0" __version_info__ = tuple([int(num) for num in __version__.split('.')])
<filename>src/extension/kernel/provider.ts import { NotebookDocument, notebooks, NotebookCell, NotebookCellOutput, NotebookCellOutputItem, workspace, WorkspaceEdit, NotebookController, ExtensionContext, Disposable } from 'vscode'; import { Client } from '../kusto/client'; import { getChartType } from '../output/chart'; import { createPromiseFromToken } from '../utils'; export class KernelProvider { public static register(context: ExtensionContext) { context.subscriptions.push(new Kernel()); } } export class Kernel extends Disposable { controller: NotebookController; constructor() { super(() => { this.dispose(); }); this.controller = notebooks.createNotebookController( 'kusto', 'kusto-notebook', 'Kusto', this.execute.bind(this), [] ); this.controller.supportedLanguages = ['kusto']; this.controller.supportsExecutionOrder = true; this.controller.description = 'Execute Kusto Queries'; } dispose() { this.controller.dispose(); } public execute(cells: NotebookCell[], notebook: NotebookDocument, controller: NotebookController) { cells.forEach((cell) => { this.executeCell(cell, controller); }); } private async executeCell(cell: NotebookCell, controller: NotebookController): Promise<void> { const task = controller.createNotebookCellExecution(cell); const client = await Client.create(cell.notebook); if (!client) { task.end(false); return; } const edit = new WorkspaceEdit(); edit.replaceNotebookCellMetadata(cell.notebook.uri, cell.index, { statusMessage: '' }); const promise = workspace.applyEdit(edit); task.start(Date.now()); task.clearOutput(); let success = false; try { const results = await Promise.race([ createPromiseFromToken(task.token, { action: 'resolve', value: undefined }), client.execute(cell.document.getText()) ]); if (task.token.isCancellationRequested || !results) { return; } success = true; promise.then(() => { const rowCount = results.primaryResults.length ? results.primaryResults[0]._rows.length : undefined; if (rowCount) { const edit = new WorkspaceEdit(); edit.replaceNotebookCellMetadata(cell.notebook.uri, cell.index, { statusMessage: `${rowCount} records` }); workspace.applyEdit(edit); } }); // Dump the primary results table from the list of tables. // We already have that information as a seprate property name `primaryResults`. // This will reduce the amount of JSON (save) in knb file. if (!Array.isArray(results.primaryResults) || results.primaryResults.length === 0) { results.primaryResults = results.tables.filter((item) => item.name === 'PrimaryResult'); } const chartType = getChartType(results); results.tables = results.tables.filter((item) => item.name !== 'PrimaryResult'); results.tableNames = results.tableNames.filter((item) => item !== 'PrimaryResult'); const outputItems: NotebookCellOutputItem[] = []; if (chartType && chartType !== 'table') { outputItems.push(NotebookCellOutputItem.json(results, 'application/vnd.kusto.result.viz+json')); } else { outputItems.push(NotebookCellOutputItem.json(results, 'application/vnd.kusto.result+json')); } task.appendOutput(new NotebookCellOutput(outputItems)); } catch (ex) { console.error('Failed to execute query', ex); if (!ex) { const error = new Error('Failed to execute query'); task.appendOutput(new NotebookCellOutput([NotebookCellOutputItem.error(error)])); } else if (ex instanceof Error && ex) { task.appendOutput(new NotebookCellOutput([NotebookCellOutputItem.error(ex)])); } else if (ex && 'message' in ex) { const innerError = 'innererror' in ex && ex.innererror.message ? ` (${ex.innererror.message})` : ''; const message = `${ex.message}${innerError}`; task.appendOutput(new NotebookCellOutput([NotebookCellOutputItem.error({ message, name: '' })])); } else { const error = new Error('Failed to execute query'); task.appendOutput(new NotebookCellOutput([NotebookCellOutputItem.error(error)])); } } finally { task.end(success, Date.now()); } } } export function isJupyterNotebook(document?: NotebookDocument) { return document?.notebookType === 'jupyter-notebook'; } export function isKustoNotebook(document: NotebookDocument) { return document.notebookType === 'kusto-notebook'; }
mod kafka; pub use self::kafka::Kafka;
<reponame>gabehollombe-aws/aws-doc-sdk-examples // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT-0 // snippet-start:[ses.go.list_addresses] package main // snippet-start:[ses.go.list_addresses.imports] import ( "fmt" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ses" "github.com/aws/aws-sdk-go/service/ses/sesiface" ) // snippet-end:[ses.go.list_addresses.imports] // GetAddresses retrieves the valid email addresses // Inputs: // svc is an SES service client // Output: // If success, information about the valid addresses and nil // Otherwise, nil and an error from the call to ListIdentities func GetAddresses(svc sesiface.SESAPI) ([]string, error) { // snippet-start:[ses.go.list_addresses.list_identities] var addresses []string result, err := svc.ListIdentities(&ses.ListIdentitiesInput{ IdentityType: aws.String("EmailAddress"), }) // snippet-end:[ses.go.list_addresses.list_identities] if err != nil { return addresses, err } // snippet-start:[ses.go.list_addresses.get_attributes] for _, email := range result.Identities { var e = []*string{email} verified, err := svc.GetIdentityVerificationAttributes(&ses.GetIdentityVerificationAttributesInput{ Identities: e, }) // snippet-end:[ses.go.list_addresses.get_attributes] if err != nil { fmt.Println("Got an error retrieving an identity attribute:") fmt.Println(err) continue } // snippet-start:[ses.go.list_addresses.add_attributes] for _, va := range verified.VerificationAttributes { if *va.VerificationStatus == "Success" { addresses = append(addresses, *email) } } // snippet-end:[ses.go.list_addresses.add_attributes] } return addresses, nil } func main() { // snippet-start:[ses.go.list_addresses.session] sess := session.Must(session.NewSessionWithOptions(session.Options{ SharedConfigState: session.SharedConfigEnable, })) svc := ses.New(sess) // snippet-end:[ses.go.list_addresses.session] addresses, err := GetAddresses(svc) if err != nil { fmt.Println("Got an error retrieving addresses:") fmt.Println(err) return } // snippet-start:[ses.go.list_addresses.display] for _, address := range addresses { fmt.Println(address) } // snippet-end:[ses.go.list_addresses.display] } // snippet-end:[ses.go.list_addresses]
/* Copyright 2019 matrix-appservice-discord Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ import * as Chai from "chai"; import { DiscordStore, CURRENT_SCHEMA } from "../../src/store"; import { RemoteStoreRoom, MatrixStoreRoom } from "../../src/db/roomstore"; // we are a test file and thus need those /* tslint:disable: no-any no-unused-expression */ const expect = Chai.expect; // const assert = Chai.assert; let store: DiscordStore; describe("RoomStore", () => { before(async () => { store = new DiscordStore(":memory:"); await store.init(); }); describe("upsertEntry|getEntriesByMatrixId", () => { it("will create a new entry", async () => { await store.roomStore.upsertEntry({ id: "test1", matrix: new MatrixStoreRoom("!abc:def.com"), remote: new RemoteStoreRoom("123456_789", {discord_guild: "123", discord_channel: "456"}), }); const entry = (await store.roomStore.getEntriesByMatrixId("!abc:def.com"))[0]; expect(entry.id).to.equal("test1"); expect(entry.matrix!.roomId).to.equal("!abc:def.com"); expect(entry.remote!.roomId).to.equal("123456_789"); expect(entry.remote!.get("discord_guild")).to.equal("123"); expect(entry.remote!.get("discord_channel")).to.equal("456"); }); it("will update an existing entry's rooms", async () => { await store.roomStore.upsertEntry({ id: "test2", matrix: new MatrixStoreRoom("test2_m"), remote: new RemoteStoreRoom("test2_r", {discord_guild: "123", discord_channel: "456"}), }); await store.roomStore.upsertEntry({ id: "test2", matrix: new MatrixStoreRoom("test2_2m"), remote: new RemoteStoreRoom("test2_2r", {discord_guild: "555", discord_channel: "999"}), }); const entry = (await store.roomStore.getEntriesByMatrixId("test2_2m"))[0]; expect(entry.id).to.equal("test2"); expect(entry.matrix!.roomId).to.equal("test2_2m"); expect(entry.remote!.roomId).to.equal("test2_2r"); expect(entry.remote!.get("discord_guild")).to.equal("555"); expect(entry.remote!.get("discord_channel")).to.equal("999"); }); it("will add new data to an existing entry", async () => { await store.roomStore.upsertEntry({ id: "test3", matrix: new MatrixStoreRoom("test3_m"), remote: new RemoteStoreRoom("test3_r", {discord_guild: "123", discord_channel: "456"}), }); await store.roomStore.upsertEntry({ id: "test3", matrix: new MatrixStoreRoom("test3_m"), remote: new RemoteStoreRoom("test3_r", {discord_guild: "123", discord_channel: "456", update_topic: 1}), }); const entry = (await store.roomStore.getEntriesByMatrixId("test3_m"))[0]; expect(entry.id).to.equal("test3"); expect(entry.matrix!.roomId).to.equal("test3_m"); expect(entry.remote!.roomId).to.equal("test3_r"); expect(entry.remote!.get("update_topic")).to.equal(1); }); it("will replace data on an existing entry", async () => { await store.roomStore.upsertEntry({ id: "test3.1", matrix: new MatrixStoreRoom("test3.1_m"), remote: new RemoteStoreRoom("test3.1_r", {discord_guild: "123", discord_channel: "456"}), }); await store.roomStore.upsertEntry({ id: "test3.1", matrix: new MatrixStoreRoom("test3.1_m"), remote: new RemoteStoreRoom("test3.1_r", {discord_guild: "-100", discord_channel: "seventythousand"}), }); const entry = (await store.roomStore.getEntriesByMatrixId("test3.1_m"))[0]; expect(entry.id).to.equal("test3.1"); expect(entry.matrix!.roomId).to.equal("test3.1_m"); expect(entry.remote!.roomId).to.equal("test3.1_r"); expect(entry.remote!.get("discord_guild")).to.equal("-100"); expect(entry.remote!.get("discord_channel")).to.equal("seventythousand"); }); it("will delete data on an existing entry", async () => { await store.roomStore.upsertEntry({ id: "test3.2", matrix: new MatrixStoreRoom("test3.2_m"), remote: new RemoteStoreRoom("test3.2_r", { discord_channel: "456", discord_guild: "123", update_icon: true, }), }); await store.roomStore.upsertEntry({ id: "test3.2", matrix: new MatrixStoreRoom("test3.2_m"), remote: new RemoteStoreRoom("test3.2_r", {discord_guild: "123", discord_channel: "456"}), }); const entry = (await store.roomStore.getEntriesByMatrixId("test3.2_m"))[0]; expect(entry.id).to.equal("test3.2"); expect(entry.matrix!.roomId).to.equal("test3.2_m"); expect(entry.remote!.roomId).to.equal("test3.2_r"); expect(entry.remote!.get("update_icon")).to.be.eq(0); }); }); describe("getEntriesByMatrixIds", () => { it("will get multiple entries", async () => { const EXPECTED_ROOMS = 2; await store.roomStore.upsertEntry({ id: "test4_1", matrix: new MatrixStoreRoom("!test_mOne:eggs.com"), remote: new RemoteStoreRoom("test4_r", {discord_guild: "five", discord_channel: "five"}), }); await store.roomStore.upsertEntry({ id: "test4_2", matrix: new MatrixStoreRoom("!test_mTwo:eggs.com"), remote: new RemoteStoreRoom("test4_r", {discord_guild: "nine", discord_channel: "nine"}), }); const entries = await store.roomStore.getEntriesByMatrixIds(["!test_mOne:eggs.com", "!test_mTwo:eggs.com"]); expect(entries).to.have.lengthOf(EXPECTED_ROOMS); expect(entries[0].id).to.equal("test4_1"); expect(entries[0].matrix!.roomId).to.equal("!test_mOne:eggs.com"); expect(entries[1].id).to.equal("test4_2"); expect(entries[1].matrix!.roomId).to.equal("!test_mTwo:eggs.com"); }); }); describe("linkRooms", () => { it("will link a room", async () => { const matrix = new MatrixStoreRoom("test5_m"); const remote = new RemoteStoreRoom("test5_r", {discord_guild: "five", discord_channel: "five"}); await store.roomStore.linkRooms(matrix, remote); const entries = await store.roomStore.getEntriesByMatrixId("test5_m"); expect(entries[0].matrix!.roomId).to.equal("test5_m"); expect(entries[0].remote!.roomId).to.equal("test5_r"); expect(entries[0].remote!.get("discord_guild")).to.equal("five"); expect(entries[0].remote!.get("discord_channel")).to.equal("five"); }); }); describe("getEntriesByRemoteRoomData", () => { it("will get an entry", async () => { await store.roomStore.upsertEntry({ id: "test6", matrix: new MatrixStoreRoom("test6_m"), remote: new RemoteStoreRoom("test6_r", {discord_guild: "find", discord_channel: "this"}), }); const entries = await store.roomStore.getEntriesByRemoteRoomData({ discord_channel: "this", discord_guild: "find", }); expect(entries[0].matrix!.roomId).to.equal("test6_m"); expect(entries[0].remote!.roomId).to.equal("test6_r"); expect(entries[0].remote!.get("discord_guild")).to.equal("find"); expect(entries[0].remote!.get("discord_channel")).to.equal("this"); }); }); describe("removeEntriesByRemoteRoomId", () => { it("will remove a room", async () => { await store.roomStore.upsertEntry({ id: "test7", matrix: new MatrixStoreRoom("test7_m"), remote: new RemoteStoreRoom("test7_r", {discord_guild: "find", discord_channel: "this"}), }); await store.roomStore.removeEntriesByRemoteRoomId("test7_r"); const entries = await store.roomStore.getEntriesByMatrixId("test7_m"); expect(entries).to.be.empty; }); }); describe("removeEntriesByMatrixRoomId", () => { it("will remove a room", async () => { await store.roomStore.upsertEntry({ id: "test8", matrix: new MatrixStoreRoom("test8_m"), remote: new RemoteStoreRoom("test8_r", {discord_guild: "find", discord_channel: "this"}), }); await store.roomStore.removeEntriesByRemoteRoomId("test8_m"); const entries = await store.roomStore.getEntriesByMatrixId("test8_r"); expect(entries).to.be.empty; }); }); });
<reponame>MDecker-MobileComputing/Ionic_AbkVerz /** * Ein Objekt dieser Klasse kapselt ein Paar von Abkürzung (Key) und Array mit * zugehörigen Bedeutungen (Value für Key). */ export class AbkBedeutung { constructor( public abkuerzung : string, public bedeutungen: string[] ) {} }
// Copyright 2015 <NAME> and <NAME>. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build ignore package main import ( "flag" "github.com/cpmech/gofem/fem" "github.com/cpmech/gofem/inp" "github.com/cpmech/gosl/io" ) func main() { // input data simfile := "simfile.sim" zmin := 0.0 zmax := 3.0 npts := 11 // parse flags flag.Parse() if len(flag.Args()) > 0 { simfile = flag.Arg(0) } if len(flag.Args()) > 1 { zmin = io.Atof(flag.Arg(1)) } if len(flag.Args()) > 2 { zmax = io.Atob(flag.Arg(2)) } if len(flag.Args()) > 3 { npts = io.Atoi(flag.Arg(3)) } // print input data io.Pf("\nInput data\n") io.Pf("==========\n") io.Pf(" simfile = %30s // simulation filename\n", simfile) io.Pf(" zmin = %30s // min elevation\n", zmin) io.Pf(" zmax = %30v // max elevation\n", zmax) io.Pf(" npts = %30v // number of points\n", npts) io.Pf("\n") // sim file sim := inp.ReadSim("", simfile, false) if sim == nil { io.PfRed("cannot read sim file\n") return } // layer var lay fem.GeoLayer lay.Zmin = zmin lay.Zmax = zmax lay.Cl = sim.WaterRho0 / sim.WaterBulk //if !lay.ReadPorousParameters(sim.Regions[0], // TODO }
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use rustc::middle::cstore::{CrateStore, ChildItem, DefLike}; use rustc::middle::privacy::{AccessLevels, AccessLevel}; use rustc::hir::def::Def; use rustc::hir::def_id::{CRATE_DEF_INDEX, DefId}; use rustc::ty::Visibility; use syntax::ast; use std::cell::RefMut; use clean::{Attributes, Clean}; // FIXME: this may not be exhaustive, but is sufficient for rustdocs current uses /// Similar to `librustc_privacy::EmbargoVisitor`, but also takes /// specific rustdoc annotations into account (i.e. `doc(hidden)`) pub struct LibEmbargoVisitor<'a, 'b: 'a, 'tcx: 'b> { cx: &'a ::core::DocContext<'b, 'tcx>, cstore: &'a CrateStore<'tcx>, // Accessibility levels for reachable nodes access_levels: RefMut<'a, AccessLevels<DefId>>, // Previous accessibility level, None means unreachable prev_level: Option<AccessLevel>, } impl<'a, 'b, 'tcx> LibEmbargoVisitor<'a, 'b, 'tcx> { pub fn new(cx: &'a ::core::DocContext<'b, 'tcx>) -> LibEmbargoVisitor<'a, 'b, 'tcx> { LibEmbargoVisitor { cx: cx, cstore: &*cx.sess().cstore, access_levels: cx.access_levels.borrow_mut(), prev_level: Some(AccessLevel::Public), } } pub fn visit_lib(&mut self, cnum: ast::CrateNum) { let did = DefId { krate: cnum, index: CRATE_DEF_INDEX }; self.update(did, Some(AccessLevel::Public)); self.visit_mod(did); } // Updates node level and returns the updated level fn update(&mut self, did: DefId, level: Option<AccessLevel>) -> Option<AccessLevel> { let attrs: Vec<_> = self.cx.tcx().get_attrs(did).iter() .map(|a| a.clean(self.cx)) .collect(); let is_hidden = attrs.list("doc").has_word("hidden"); let old_level = self.access_levels.map.get(&did).cloned(); // Accessibility levels can only grow if level > old_level && !is_hidden { self.access_levels.map.insert(did, level.unwrap()); level } else { old_level } } pub fn visit_mod(&mut self, did: DefId) { for item in self.cstore.item_children(did) { if let DefLike::DlDef(def) = item.def { match def { Def::Mod(did) | Def::ForeignMod(did) | Def::Trait(did) | Def::Struct(did) | Def::Union(did) | Def::Enum(did) | Def::TyAlias(did) | Def::Fn(did) | Def::Method(did) | Def::Static(did, _) | Def::Const(did) => self.visit_item(did, item), _ => {} } } } } fn visit_item(&mut self, did: DefId, item: ChildItem) { let inherited_item_level = match item.def { DefLike::DlImpl(..) | DefLike::DlField => unreachable!(), DefLike::DlDef(def) => { match def { Def::ForeignMod(..) => self.prev_level, _ => if item.vis == Visibility::Public { self.prev_level } else { None } } } }; let item_level = self.update(did, inherited_item_level); if let DefLike::DlDef(Def::Mod(did)) = item.def { let orig_level = self.prev_level; self.prev_level = item_level; self.visit_mod(did); self.prev_level = orig_level; } } }
<filename>pkg/model/ingress/ingress_table.go package ingress import ( "strings" "github.com/containerum/chkit/pkg/model" ) var ( _ model.TableRenderer = Ingress{} _ model.TableItem = Ingress{} ) func (ingress Ingress) RenderTable() string { return model.RenderTable(ingress) } func (ingress Ingress) TableHeaders() []string { return []string{ "Name", "Host", "Service", } } func (ingress Ingress) TableRows() [][]string { return [][]string{{ ingress.Name, strings.Join(ingress.Rules.Hosts(), "\n"), strings.Join(ingress.Rules.ServicesTableView(), "\n"), }} } func (ingress Ingress) String() string { return ingress.RenderTable() }
<filename>driver/unpack_test.go package driver_test import ( "bytes" "errors" "io" "io/ioutil" "os" "path/filepath" "code.cloudfoundry.org/groot-windows/driver" "code.cloudfoundry.org/groot-windows/driver/fakes" hcsfakes "code.cloudfoundry.org/groot-windows/hcs/fakes" "code.cloudfoundry.org/lager" "code.cloudfoundry.org/lager/lagertest" winio "github.com/Microsoft/go-winio" "github.com/Microsoft/go-winio/archive/tar" "github.com/Microsoft/hcsshim" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = Describe("Unpack", func() { var ( storeDir string d *driver.Driver hcsClientFake *fakes.HCSClient tarStreamerFake *fakes.TarStreamer privilegeElevatorFake *fakes.PrivilegeElevator logger lager.Logger layerID string buffer *bytes.Buffer layerWriterFake *hcsfakes.LayerWriter ) BeforeEach(func() { var err error storeDir, err = ioutil.TempDir("", "driver") Expect(err).To(Succeed()) hcsClientFake = &fakes.HCSClient{} tarStreamerFake = &fakes.TarStreamer{} privilegeElevatorFake = &fakes.PrivilegeElevator{} limiterFake := &fakes.Limiter{} d = driver.New(hcsClientFake, tarStreamerFake, privilegeElevatorFake, limiterFake) d.Store = storeDir logger = lagertest.NewTestLogger("driver-unpack-test") layerID = "aaa" buffer = bytes.NewBuffer([]byte("tar ball contents")) tarStreamerFake.NextReturns(nil, io.EOF) tarStreamerFake.WriteBackupStreamFromTarFileReturns(nil, io.EOF) layerWriterFake = &hcsfakes.LayerWriter{} hcsClientFake.NewLayerWriterReturns(layerWriterFake, nil) }) AfterEach(func() { Expect(os.RemoveAll(storeDir)).To(Succeed()) }) It("passes the correct DriverInfo to LayerExists", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(Succeed()) Expect(hcsClientFake.LayerExistsCallCount()).To(Equal(1)) di, id := hcsClientFake.LayerExistsArgsForCall(0) Expect(di).To(Equal(hcsshim.DriverInfo{HomeDir: d.LayerStore(), Flavour: 1})) Expect(id).To(Equal(layerID)) }) It("create an associated layerId path", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(Succeed()) expectedDir := filepath.Join(d.LayerStore(), layerID) Expect(expectedDir).To(BeADirectory()) }) It("elevates itself with the backup and restore privileges", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(Succeed()) Expect(privilegeElevatorFake.EnableProcessPrivilegesCallCount()).To(Equal(1)) Expect(privilegeElevatorFake.EnableProcessPrivilegesArgsForCall(0)).To(Equal([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege})) }) Context("when the backup/restore privileges cannot be acquired", func() { var expectedErr error BeforeEach(func() { expectedErr = errors.New("Failed to elevate privileges") privilegeElevatorFake.EnableProcessPrivilegesReturns(expectedErr) }) It("errors", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(MatchError(expectedErr)) }) }) It("releases the backup and restore privileges on exit", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(Succeed()) Expect(privilegeElevatorFake.DisableProcessPrivilegesCallCount()).To(Equal(1)) Expect(privilegeElevatorFake.DisableProcessPrivilegesArgsForCall(0)).To(Equal([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege})) }) It("creates a layer writer with the correct layer id", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(Succeed()) Expect(hcsClientFake.NewLayerWriterCallCount()).To(Equal(1)) di, actualLayerID, parentIDs := hcsClientFake.NewLayerWriterArgsForCall(0) Expect(di).To(Equal(hcsshim.DriverInfo{HomeDir: d.LayerStore(), Flavour: 1})) Expect(actualLayerID).To(Equal(layerID)) Expect(parentIDs).To(BeEmpty()) }) It("closes the layer writer on exit", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(Succeed()) Expect(layerWriterFake.CloseCallCount()).To(Equal(1)) }) It("sets up a tar reader with the layer tarball contents, clearing it at the end", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(Succeed()) Expect(tarStreamerFake.SetReaderCallCount()).To(Equal(2)) Expect(tarStreamerFake.SetReaderArgsForCall(0)).To(Equal(buffer)) r := tarStreamerFake.SetReaderArgsForCall(1) b, ok := r.(*bytes.Reader) Expect(ok).To(BeTrue()) Expect(b.Size()).To(Equal(int64(0))) }) Context("when the layer contains files", func() { var ( whiteoutFileHeader, linkFileHeader, regularFileHeader *tar.Header ) BeforeEach(func() { whiteoutFileHeader = &tar.Header{Name: "something/somethingelse/.wh.filename"} linkFileHeader = &tar.Header{ Name: "something/somethingelse/linkfile", Typeflag: tar.TypeLink, Linkname: "link/name/file", } regularFileHeader = &tar.Header{Name: "regular/file/name"} }) Context("the driver store is unset", func() { BeforeEach(func() { d.Store = "" }) It("return an error", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(HaveOccurred()) Expect(err).To(MatchError("driver store must be set")) }) }) Context("when there are multiple files", func() { BeforeEach(func() { tarStreamerFake.NextReturnsOnCall(0, whiteoutFileHeader, nil) tarStreamerFake.NextReturnsOnCall(1, linkFileHeader, nil) tarStreamerFake.NextReturnsOnCall(2, regularFileHeader, nil) tarStreamerFake.NextReturnsOnCall(3, linkFileHeader, nil) tarStreamerFake.NextReturnsOnCall(4, regularFileHeader, nil) tarStreamerFake.WriteBackupStreamFromTarFileReturnsOnCall(0, whiteoutFileHeader, nil) tarStreamerFake.FileInfoFromHeaderReturnsOnCall(0, "regular/file/name", 100, &winio.FileBasicInfo{}, nil) tarStreamerFake.FileInfoFromHeaderReturnsOnCall(1, "regular/file/other-name", 200, &winio.FileBasicInfo{}, nil) }) It("reads files from the layer tarball until EOF", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(Succeed()) Expect(tarStreamerFake.NextCallCount()).To(Equal(5)) }) It("returns the size of the layer", func() { size, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(Succeed()) Expect(size).To(Equal(int64(300))) }) It("writes the size to the size file", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(Succeed()) content, err := ioutil.ReadFile(filepath.Join(d.LayerStore(), layerID, "size")) Expect(err).NotTo(HaveOccurred()) Expect(string(content)).To(Equal("300")) }) }) Context("the file is a whiteout file", func() { BeforeEach(func() { tarStreamerFake.NextReturnsOnCall(0, &tar.Header{ Name: "something/somethingelse/.wh.filename", }, nil) }) It("removes the file and finds the next file", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(Succeed()) Expect(tarStreamerFake.NextCallCount()).To(Equal(2)) Expect(layerWriterFake.RemoveCallCount()).To(Equal(1)) Expect(layerWriterFake.RemoveArgsForCall(0)).To(Equal("something\\somethingelse\\filename")) }) Context("when removing the file fails", func() { var expectedErr error BeforeEach(func() { expectedErr = errors.New("Failed to remove file!") layerWriterFake.RemoveReturns(expectedErr) }) It("errors", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(MatchError(expectedErr)) }) }) }) Context("the file is a link", func() { BeforeEach(func() { tarStreamerFake.NextReturnsOnCall(0, &tar.Header{ Name: "something/somethingelse/linkfile", Typeflag: tar.TypeLink, Linkname: "link/name/file", }, nil) }) It("adds the file as a link", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(Succeed()) Expect(tarStreamerFake.NextCallCount()).To(Equal(2)) Expect(layerWriterFake.AddLinkCallCount()).To(Equal(1)) nameArg, linknameArg := layerWriterFake.AddLinkArgsForCall(0) Expect(nameArg).To(Equal("something\\somethingelse\\linkfile")) Expect(linknameArg).To(Equal("link\\name\\file")) }) Context("when adding the link fails", func() { var expectedErr error BeforeEach(func() { expectedErr = errors.New("Failed to add link") layerWriterFake.AddLinkReturns(expectedErr) }) It("errors", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(MatchError(expectedErr)) }) }) }) Context("the file is regular file", func() { var ( tarHeader *tar.Header fileInfo *winio.FileBasicInfo ) BeforeEach(func() { tarHeader = &tar.Header{ Name: "regular/file/name", } tarStreamerFake.NextReturnsOnCall(0, tarHeader, nil) fileInfo = &winio.FileBasicInfo{} tarStreamerFake.FileInfoFromHeaderReturns("regular/file/name", 100, fileInfo, nil) }) It("adds the file to the layer", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(Succeed()) Expect(tarStreamerFake.NextCallCount()).To(Equal(1)) Expect(tarStreamerFake.FileInfoFromHeaderCallCount()).To(Equal(1)) Expect(tarStreamerFake.FileInfoFromHeaderArgsForCall(0)).To(Equal(tarHeader)) Expect(layerWriterFake.AddCallCount()).To(Equal(1)) actualName, actualFileInfo := layerWriterFake.AddArgsForCall(0) Expect(actualName).To(Equal("regular\\file\\name")) Expect(actualFileInfo).To(Equal(fileInfo)) Expect(tarStreamerFake.WriteBackupStreamFromTarFileCallCount()).To(Equal(1)) actualWriter, actualTarHeader := tarStreamerFake.WriteBackupStreamFromTarFileArgsForCall(0) Expect(actualWriter).To(Equal(layerWriterFake)) Expect(actualTarHeader).To(Equal(tarHeader)) }) It("returns the size of the layer", func() { size, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(Succeed()) Expect(size).To(Equal(int64(100))) }) It("writes the size to the size file", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(Succeed()) content, err := ioutil.ReadFile(filepath.Join(d.LayerStore(), layerID, "size")) Expect(err).NotTo(HaveOccurred()) Expect(string(content)).To(Equal("100")) }) Context("when getting the file info fails", func() { var expectedErr error BeforeEach(func() { expectedErr = errors.New("Failed to get file info") tarStreamerFake.FileInfoFromHeaderReturns("", 0, nil, expectedErr) }) It("errors", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(MatchError(expectedErr)) }) }) Context("when adding the file to the layer fails", func() { var expectedErr error BeforeEach(func() { expectedErr = errors.New("Failed to add file") layerWriterFake.AddReturns(expectedErr) }) It("errors", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(MatchError(expectedErr)) }) }) }) Context("when getting the next file fails", func() { var expectedErr error BeforeEach(func() { expectedErr = errors.New("Failed to get next file") tarStreamerFake.NextReturns(nil, expectedErr) }) It("errors", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(MatchError(expectedErr)) }) }) }) Context("when the layer being unpacked has parents", func() { It("creates a layer writer with its parent layer paths from newest to oldest", func() { parentIDs := []string{"oldest-parent-id", "newest-parent-id"} _, err := d.Unpack(logger, layerID, parentIDs, buffer) Expect(err).To(Succeed()) _, _, hcsParentIds := hcsClientFake.NewLayerWriterArgsForCall(0) Expect(hcsParentIds).To(Equal([]string{filepath.Join(d.LayerStore(), "newest-parent-id"), filepath.Join(d.LayerStore(), "oldest-parent-id")})) }) }) Context("when creating the layer writer fails", func() { var expectedErr error BeforeEach(func() { expectedErr = errors.New("Failed to create layer writer!") hcsClientFake.NewLayerWriterReturns(nil, expectedErr) }) It("errors", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(MatchError(expectedErr)) }) }) Context("the layer has already been unpacked", func() { BeforeEach(func() { Expect(os.MkdirAll(filepath.Join(d.LayerStore(), layerID), 0755)).To(Succeed()) Expect(ioutil.WriteFile(filepath.Join(d.LayerStore(), layerID, "size"), []byte("300"), 0644)) hcsClientFake.LayerExistsReturnsOnCall(0, true, nil) }) It("does not unpack the layer and returns the size", func() { size, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).NotTo(HaveOccurred()) Expect(size).To(Equal(int64(300))) Expect(privilegeElevatorFake.EnableProcessPrivilegesCallCount()).To(Equal(1)) Expect(privilegeElevatorFake.DisableProcessPrivilegesCallCount()).To(Equal(1)) Expect(hcsClientFake.NewLayerWriterCallCount()).To(Equal(0)) Expect(tarStreamerFake.SetReaderCallCount()).To(Equal(0)) Expect(tarStreamerFake.NextCallCount()).To(Equal(0)) Expect(tarStreamerFake.FileInfoFromHeaderCallCount()).To(Equal(0)) Expect(tarStreamerFake.WriteBackupStreamFromTarFileCallCount()).To(Equal(0)) }) }) Context("the layer has already been unpacked without size file", func() { var ( tarHeader *tar.Header fileInfo *winio.FileBasicInfo ) BeforeEach(func() { tarHeader = &tar.Header{ Name: "regular/file/name", } tarStreamerFake.NextReturnsOnCall(0, tarHeader, nil) fileInfo = &winio.FileBasicInfo{} tarStreamerFake.FileInfoFromHeaderReturns("regular/file/name", 300, fileInfo, nil) Expect(os.MkdirAll(filepath.Join(d.LayerStore(), layerID), 0755)).To(Succeed()) hcsClientFake.LayerExistsReturnsOnCall(0, true, nil) }) It("destroys the layer and re-unpacks", func() { size, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).NotTo(HaveOccurred()) Expect(size).To(Equal(int64(300))) Expect(privilegeElevatorFake.EnableProcessPrivilegesCallCount()).To(Equal(1)) Expect(privilegeElevatorFake.DisableProcessPrivilegesCallCount()).To(Equal(1)) Expect(hcsClientFake.DestroyLayerCallCount()).To(Equal(1)) Expect(hcsClientFake.NewLayerWriterCallCount()).To(Equal(1)) Expect(tarStreamerFake.SetReaderCallCount()).To(Equal(2)) Expect(tarStreamerFake.NextCallCount()).To(Equal(1)) Expect(tarStreamerFake.FileInfoFromHeaderCallCount()).To(Equal(1)) Expect(tarStreamerFake.WriteBackupStreamFromTarFileCallCount()).To(Equal(1)) contents, err := ioutil.ReadFile(filepath.Join(d.LayerStore(), layerID, "size")) Expect(err).NotTo(HaveOccurred()) Expect(string(contents)).To(Equal("300")) }) }) Context("LayerExists returns an error", func() { BeforeEach(func() { hcsClientFake.LayerExistsReturnsOnCall(0, false, errors.New("LayerExists failed")) }) It("returns an error", func() { _, err := d.Unpack(logger, layerID, []string{}, buffer) Expect(err).To(MatchError("LayerExists failed")) }) }) })
/** Asserts that generic class names are parsed correctly */ public void testParseGenericClassNames() throws Exception { CodeEntityBase root = parseTestFile("generictypes.cs"); CodeEntityBase namespace = root.getChildren().get(0); assertTypeAndFqName(namespace, NAMESPACE, "GenericsTestLibrary"); assertEquals(7, namespace.getChildren().size()); assertTypeAndFqName(namespace.getChildren().get(0), ETokenType.STRUCT, "GenericsTestLibrary.Test<T0>"); assertTypeAndFqName(namespace.getChildren().get(1), ETokenType.DELEGATE, "GenericsTestLibrary.SampleDelegate"); assertTypeAndFqName(namespace.getChildren().get(2), ETokenType.CLASS, "GenericsTestLibrary.Class1"); assertTypeAndFqName(namespace.getChildren().get(3), ETokenType.CLASS, "GenericsTestLibrary.Class1<T0,T1>"); assertTypeAndFqName(namespace.getChildren().get(4), ETokenType.CLASS, "GenericsTestLibrary.Class1<T0,T1>"); assertTypeAndFqName(namespace.getChildren().get(5), ETokenType.CLASS, "GenericsTestLibrary.Class1<T0>"); assertTypeAndFqName(namespace.getChildren().get(6), ETokenType.INTERFACE, "GenericsTestLibrary.IInterface<T0,T1,T2>"); }
#ifndef MONTE_CARLO_SEARCH_TREE #define MONTE_CARLO_SEARCH_TREE #include "node.hpp" #include <random> #include <chrono> #include <algorithm> #include <cmath> #include <mpi.h> // It may not work for Windows; try something like: // #include "C:\Program Files (x86)\IntelSWTools\mpi\2019.0.117\intel64\include\mpi.h" // DEBUG #include <cassert> /* - - - - - - - - - - - - - - - - - */ // Declaration of the template class // /* - - - - - - - - - - - - - - - - - */ template<class Game, class Move> class MonteCarloSearchTree { public: // Shared pointer to Node class type typedef typename Node<Game,Move>::NodePointerType NodePointerType; /* List of public methods */ // Performs the UCT search and find the best next move Move uct_search(void); // Updates the pointer to the curret game state void change_current_status(const Move&); // Setters void set_outer_iter(unsigned it) { outer_iter = it; } void set_inner_iter(unsigned it) { inner_iter = it; } void set_ucb_constant(double c) { ucb_constant = c; } // Constructors MonteCarloSearchTree(unsigned, unsigned); MonteCarloSearchTree(unsigned, unsigned, double); MonteCarloSearchTree(int, unsigned, unsigned); MonteCarloSearchTree(int, unsigned, unsigned, double); // For the moment copy assignment and costructors are prevented (not used) // In future deep-copy costructor and assignment may be implemented MonteCarloSearchTree& operator = (const MonteCarloSearchTree&) = delete; MonteCarloSearchTree(const MonteCarloSearchTree&) = delete; // Print info (DEBUG) void print_current_status_info(void) const; // Default destructor ~MonteCarloSearchTree() = default; private: /* List of private methods */ // Given a node, computes its UCB double compute_ucb(const NodePointerType&) const; // Given a parent node, returns the child with best UCB NodePointerType best_child_ucb(const NodePointerType&) const; // Steps of the MCST algorithm // (1) selects the best UCB node and returns it, via pointer // (2) produces a leaf from the selected node and returns it // (3) performs multiple MC simulations and returns the summed score // (4) update visits and scores of the nodes all way back to the root NodePointerType select(void) const; NodePointerType expand(const NodePointerType); double rollout(const NodePointerType); void back_propagation(const NodePointerType, double); // RNG management int gen_rand_seed(void); void set_rand_seed(void); /* List of private members */ NodePointerType root; NodePointerType current_game_node; bool is_parallel = false; unsigned outer_iter; unsigned inner_iter; double ucb_constant = sqrt(2.0); // Utilities for seed generation int seed; std::default_random_engine rng; std::chrono::steady_clock rng_time; int seed_increment = 0; }; /* - - - - - - - - - - - - - - - - - */ // Definition of methods // /* - - - - - - - - - - - - - - - - - */ // ************* // Constructors: // ************* template<class Game, class Move> MonteCarloSearchTree<Game,Move>::MonteCarloSearchTree(unsigned oi, unsigned ii): root(), current_game_node(root), outer_iter(oi), inner_iter(ii) { set_rand_seed(); rng.seed(seed); int tmp; MPI_Initialized( &tmp ); is_parallel = (bool)tmp; root = std::make_shared< Node<Game, Move> >(); current_game_node = root; } template<class Game, class Move> MonteCarloSearchTree<Game,Move>::MonteCarloSearchTree(unsigned oi, unsigned ii, double c): root(), current_game_node(root), outer_iter(oi), inner_iter(ii), ucb_constant(c) { set_rand_seed(); rng.seed(seed); int tmp; MPI_Initialized( &tmp ); is_parallel = (bool)tmp; root = std::make_shared< Node<Game, Move> >(); current_game_node = root; } template<class Game, class Move> MonteCarloSearchTree<Game,Move>::MonteCarloSearchTree(int s, unsigned oi, unsigned ii): root(), current_game_node(root), seed(s), rng(s), outer_iter(oi), inner_iter(ii) { int tmp; MPI_Initialized( &tmp ); is_parallel = (bool)tmp; root = std::make_shared< Node<Game, Move> >(); current_game_node = root; } template<class Game, class Move> MonteCarloSearchTree<Game,Move>::MonteCarloSearchTree(int s, unsigned oi, unsigned ii, double c): root(), current_game_node(root), seed(s), rng(s), outer_iter(oi), inner_iter(ii), ucb_constant(c) { int tmp; MPI_Initialized( &tmp ); is_parallel = (bool)tmp; root = std::make_shared< Node<Game, Move> >(); current_game_node = root; } // ******** // Methods: // ******** template<class Game, class Move> double MonteCarloSearchTree<Game,Move>::compute_ucb(const NodePointerType& target_node) const { // DEBUG assert ( target_node->get_visits()>0 ); return ( target_node->get_wins() / (double)(target_node->get_visits()) ) + ucb_constant * sqrt( log( (double)(target_node->get_parent()->get_visits()) ) / (double)(target_node->get_visits()) ); } template<class Game, class Move> typename MonteCarloSearchTree<Game,Move>::NodePointerType MonteCarloSearchTree<Game,Move>::best_child_ucb(const NodePointerType& target_parent) const { std::vector<NodePointerType> children = target_parent->get_children(); NodePointerType select = (*(children.cbegin())); double best_ucb = compute_ucb(select); double temp_ucb = 0.0; for (auto it = children.cbegin()+1; it != children.cend(); ++it) { temp_ucb = compute_ucb(*it); if( temp_ucb >= best_ucb ) { select = (*it); best_ucb = temp_ucb; } } return select; } template<class Game, class Move> typename MonteCarloSearchTree<Game,Move>::NodePointerType MonteCarloSearchTree<Game,Move>::select() const { NodePointerType selected_node = current_game_node; while( selected_node->all_moves_tried() && selected_node->has_children() ) { // DEBUG assert( best_child_ucb(selected_node)!=nullptr ); selected_node = best_child_ucb(selected_node); } return selected_node; } template<class Game, class Move> typename Node<Game,Move>::NodePointerType MonteCarloSearchTree<Game,Move>::expand(const NodePointerType current_parent) { // NodePointerType expanded_node = nullptr; // If there are still moves that have not been tried: if( !(current_parent->all_moves_tried()) ) { std::vector<Move> available_moves = current_parent->get_moves(); std::size_t tot_moves = available_moves.size(); std::uniform_int_distribution<> choose(0, tot_moves-1); int idx = 0; if ( is_parallel ) { int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (rank==0) idx = choose(rng); MPI_Bcast(&idx, 1, MPI_INT, 0, MPI_COMM_WORLD); } else idx = choose(rng); return current_parent->make_child(available_moves[idx]); } // If all moves have been tried and it's not a leaf else if ( current_parent->has_children() ) { std::vector<NodePointerType> available_children = current_parent->get_children(); std::size_t tot_moves = available_children.size(); std::uniform_int_distribution<> choose(0, tot_moves-1); int idx = 0; if ( is_parallel ) { int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (rank==0) idx = choose(rng); MPI_Bcast(&idx, 1, MPI_INT, 0, MPI_COMM_WORLD); } else idx = choose(rng); return available_children[idx]; } // If it is a leaf else { return current_parent; } } template<class Game, class Move> double MonteCarloSearchTree<Game,Move>::rollout(const NodePointerType current_leaf) { double total_score(0.0); if ( is_parallel ) { int size, rank; MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); unsigned local_iter = inner_iter/size; int reminder = inner_iter%size; if (rank<reminder) local_iter++; for (unsigned i = 0; i<local_iter; ++i) { Game temp_game = current_leaf->get_game(); temp_game.set_seed(gen_rand_seed()); while ( !temp_game.get_terminal_status() ) temp_game.apply_action(temp_game.random_action()); double result = (double)temp_game.evaluate(); result = 0.5*(result+1) * ( current_game_node->get_player()==1 ) + 0.5*(1-result) * ( current_game_node->get_player()==2 ); total_score += result; /* 1.0 win; 0.5 draw; 0.0 lose */ } MPI_Allreduce(MPI_IN_PLACE, &total_score, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); } else { for (unsigned i = 0; i<inner_iter; ++i) { Game temp_game = current_leaf->get_game(); temp_game.set_seed(gen_rand_seed()); while ( !temp_game.get_terminal_status() ) temp_game.apply_action( temp_game.random_action() ); double result = (double)temp_game.evaluate(); result = 0.5*(result+1) * ( current_game_node->get_player()==1 ) + 0.5*(1-result) * ( current_game_node->get_player()==2 ); total_score += result; /* 1.0 win; 0.5 draw; 0.0 lose */ } } return total_score; } template<class Game, class Move> void MonteCarloSearchTree<Game,Move>::back_propagation(NodePointerType current_leaf, double score) { current_leaf->update(score, inner_iter); Node<Game,Move>* temp_ptr = current_leaf->get_parent(); while ( temp_ptr!=nullptr ) { temp_ptr->update(score, inner_iter); temp_ptr = temp_ptr->get_parent(); } } template<class Game, class Move> Move MonteCarloSearchTree<Game,Move>::uct_search() { for (unsigned i = 0; i<outer_iter; ++i) { // Select NodePointerType selected_parent = select(); // Expand NodePointerType selected_leaf = expand(selected_parent); // MC simulate double score = rollout(selected_leaf); // Back propagate back_propagation(selected_leaf, score); } // Select best move std::vector<NodePointerType> candidate_nodes = current_game_node->get_children(); // Class node has the move where it come from stored auto best_node_it = candidate_nodes.begin(); for (auto it = candidate_nodes.begin(); it != candidate_nodes.end(); it++) { if ( ( (*it)->get_wins() )/(double)( (*it)->get_visits() ) > ( (*best_node_it)->get_wins() )/(double)( (*best_node_it)->get_visits() ) ) { best_node_it = it; } } current_game_node = *best_node_it; return (*best_node_it)->get_last_move(); } template<class Game, class Move> void MonteCarloSearchTree<Game,Move>::change_current_status(const Move& opponent_move) { std::vector<Move> possible_opponent_moves = current_game_node->get_moves(); auto it_out = std::find( possible_opponent_moves.cbegin(), possible_opponent_moves.cend(), opponent_move ); if( it_out == (current_game_node->get_moves().end()) ) { std::vector<NodePointerType> possible_opponent_games = current_game_node->get_children(); for (auto it_in = possible_opponent_games.cbegin(); it_in != possible_opponent_games.cend(); ++it_in) { if ( (*it_in)->get_last_move() == opponent_move ) { current_game_node = *it_in; return; } } } else { current_game_node = current_game_node->make_child(opponent_move); } } template<class Game, class Move> void MonteCarloSearchTree<Game,Move>::set_rand_seed() { auto now_time = rng_time.now(); std::chrono::duration<double> diff = now_time.time_since_epoch(); if ( is_parallel ) { int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); seed = ( (int)((diff.count())*100) + rank*10 + seed_increment ); } else seed = ( (int)((diff.count())*100) + seed_increment ); rng.seed(seed); seed_increment++; } template<class Game, class Move> int MonteCarloSearchTree<Game,Move>::gen_rand_seed() { seed_increment++; if ( is_parallel ) { int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); return ( seed + seed_increment + rank*10 ); } else return ( seed + seed_increment ); } template<class Game, class Move> void MonteCarloSearchTree<Game,Move>::print_current_status_info() const { current_game_node->print_node(); std::cout << "Moves left: "; for( auto it = (current_game_node->get_moves()).cbegin(); it!=(current_game_node->get_moves()).cend(); it++ ) std::cout << (*it).to_string() << " "; std::cout << std::endl; Game tmp = current_game_node->get_game(); tmp.print_board(); } #endif /* MONTE_CARLO_SEARCH_TREE */
<gh_stars>1-10 /* DWARF 2 section names. Copyright (C) 1990-2021 Free Software Foundation, Inc. This file is part of GDB. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef GDB_DWARF2_SECT_NAMES_H #define GDB_DWARF2_SECT_NAMES_H /* Names for a dwarf2 debugging section. The field NORMAL is the normal section name (usually from the DWARF standard), while the field COMPRESSED is the name of compressed sections. If your object file format doesn't support compressed sections, the field COMPRESSED can be NULL. Likewise, the debugging section is not supported, the field NORMAL can be NULL too. It doesn't make sense to have a NULL NORMAL field but a non-NULL COMPRESSED field. */ struct dwarf2_section_names { const char *normal; const char *compressed; /* Return true if NAME matches either of this section's names. */ bool matches (const char *name) const { return ((normal != nullptr && strcmp (name, normal) == 0) || (compressed != nullptr && strcmp (name, compressed) == 0)); } }; /* List of names for dward2 debugging sections. Also most object file formats use the standardized (ie ELF) names, some (eg XCOFF) have customized names due to restrictions. The table for the standard names is defined in dwarf2read.c. Please update all instances of dwarf2_debug_sections if you add a field to this structure. It is always safe to use { NULL, NULL } in this case. */ struct dwarf2_debug_sections { struct dwarf2_section_names info; struct dwarf2_section_names abbrev; struct dwarf2_section_names line; struct dwarf2_section_names loc; struct dwarf2_section_names loclists; struct dwarf2_section_names macinfo; struct dwarf2_section_names macro; struct dwarf2_section_names str; struct dwarf2_section_names str_offsets; struct dwarf2_section_names line_str; struct dwarf2_section_names ranges; struct dwarf2_section_names rnglists; struct dwarf2_section_names types; struct dwarf2_section_names addr; struct dwarf2_section_names frame; struct dwarf2_section_names eh_frame; struct dwarf2_section_names gdb_index; struct dwarf2_section_names debug_names; struct dwarf2_section_names debug_aranges; /* This field has no meaning, but exists solely to catch changes to this structure which are not reflected in some instance. */ int sentinel; }; /* Section names for ELF. */ extern const struct dwarf2_debug_sections dwarf2_elf_names; #endif /* GDB_DWARF2_SECT_NAMES_H */
// do is used to issue the http request with client to TapPay server and parse the http.Response func (c *client) do(req *http.Request) ([]byte, error) { rawResp, err := c.httpClient.Do(req) if err != nil { return nil, err } defer rawResp.Body.Close() var b []byte buf := bytes.NewBuffer(b) if _, err = io.Copy(buf, rawResp.Body); err != nil { return nil, err } return buf.Bytes(), nil }
<filename>Leetcode/67_Add_Binary.cpp // Question link : https://leetcode.com/problems/add-binary/ #include<bits/stdc++.h> using namespace std; class Solution { public: string addBinary(string a,string b){ // l1 = length of string a // l2 = Length of string b int l1=a.size(),l2=b.size(),carry=0; string an; // when l1=l2=carry=0 will zero then while loop will terminate while(l1||l2||carry){ // s1 & s2 is for adding digits int s1,s2; if(l1){s1=a[l1-1]-'0';l1--;} else s1=0; if(l2){s2=b[l2-1]-'0';l2--;} else s2=0; int sum = s1+s2+carry; if(sum==0){ an+="0"; carry = 0; }else if(sum==1){ an+="1"; carry = 0; }else if(sum==2){ an+="0"; carry=1; } else{ an+="1"; carry=1; } } // reverse string string ans; for(int i=an.size()-1;i>=0;i--){ ans+=an[i]; } return ans; } }; int main(){ Solution s; string a,b; cin>>a>>b; cout<<s.addBinary(a,b); return 0; }
/** * Returns {@code true} if all the specifications defined in the given {@link ParamsSchema} * exist in this parameters schema, and if they are all identical as defined by * {@link ParamSpec#equals(Object)}. * * @param otherSchema * The whole schema to add. * @return <ul> * <li>{@code true} if all the specifications defined in the given {@link ParamsSchema} * exist in this parameters schema, and if they are all identical as defined by * {@link ParamSpec#equals(Object)}.</li> * <li> * {@code false} if at least one {@link ParamSpec} of {@code otherSchema} is absent from * this schema.</li> * <li> * {@code false} if at least one {@link ParamSpec} appears with the same key in this * schema and {@code otherSchema} but is not the same as defined by * {@link ParamSpec#equals(Object)}.</li> * </ul> */ public boolean isSuperSetOf(ParamsSchema otherSchema) { if (otherSchema == null) { throw new IllegalArgumentException("The specified schema cannot be null."); } for (String key : otherSchema.specs.keySet()) { if (!specs.containsKey(key)) { return false; } if (!otherSchema.specs.get(key).equals(specs.get(key))) { return false; } } return true; }
<gh_stars>1-10 #define KUSEG 0x00000000 #define KSEG0 0x80000000 #define KSEG1 0xA0000000 #define KSEG2 0xC0000000 #define PAGE_SIZE 0x1000 #define PAGE_SHIFT 0xC #define CPUADDR 0xFFFFE000
use crate::key; use crate::value::*; use chain_crypto::{Ed25519Extended, PublicKey}; use imhamt::{Hamt, InsertError, UpdateError}; use std::collections::hash_map::DefaultHasher; pub type AccountAlg = Ed25519Extended; /// Possible errors during an account operation #[derive(Debug, Clone, PartialEq, Eq)] pub enum LedgerError { NonExistent, AlreadyExists, MismatchCounter, NeedTotalWithdrawal, NonZero, ValueError(ValueError), } impl From<ValueError> for LedgerError { fn from(e: ValueError) -> Self { LedgerError::ValueError(e) } } impl From<UpdateError<LedgerError>> for LedgerError { fn from(e: UpdateError<LedgerError>) -> Self { match e { UpdateError::KeyNotFound => LedgerError::NonExistent, UpdateError::ValueCallbackError(v) => v, } } } impl From<InsertError> for LedgerError { fn from(e: InsertError) -> Self { match e { InsertError::EntryExists => LedgerError::AlreadyExists, } } } /// Account Identifier (also used as Public Key) #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Identifier(PublicKey<AccountAlg>); impl From<PublicKey<AccountAlg>> for Identifier { fn from(pk: PublicKey<AccountAlg>) -> Self { Identifier(pk) } } impl From<Identifier> for PublicKey<AccountAlg> { fn from(i: Identifier) -> Self { i.0 } } /// Account Secret Key pub type Secret = key::AccountSecretKey; #[derive(Clone)] pub struct State { counter: SpendingCounter, value: Value, } impl State { pub fn new(v: Value) -> State { State { counter: SpendingCounter(0), value: v, } } /// Add a value to an account state /// /// Only error if value is overflowing pub fn add(&self, v: Value) -> Result<State, LedgerError> { let new_value = (self.value + v)?; let mut st = self.clone(); st.value = new_value; Ok(st) } /// Subtract a value from an account state, and return the new state. /// /// Note that this *also* increment the counter, as this function would be usually call /// for spending. /// /// If the counter is also reaching the extremely rare of max, we only authorise /// a total withdrawal of fund otherwise the fund will be stuck forever in limbo. pub fn sub(&self, v: Value) -> Result<Option<State>, LedgerError> { let new_value = (self.value - v)?; match self.counter.increment() { None => { if new_value == Value::zero() { Ok(None) } else { Err(LedgerError::NeedTotalWithdrawal) } } Some(new_counter) => Ok(Some(State { counter: new_counter, value: new_value, })), } } pub fn get_value(&self) -> Value { self.value } } /// Spending counter associated to an account. /// /// every time the owner is spending from an account, /// the counter is incremented. A matching counter /// needs to be used in the spending phase to make /// sure we have non-replayability of a transaction. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct SpendingCounter(u32); impl SpendingCounter { fn increment(&self) -> Option<Self> { self.0.checked_add(1).map(SpendingCounter) } pub fn to_bytes(&self) -> [u8; 4] { self.0.to_le_bytes() } } /// Account Spending witness, which contains a /// cryptographic signature and a counter. /// The counter need to be matched with the current state of this account in the ledger, /// otherwise the transaction will not be valid. #[derive(Debug, Clone, PartialEq, Eq)] pub struct SpendingWitness { counter: SpendingCounter, signature: u32, // FIXME } /// The public ledger of all accounts associated with their current state #[derive(Clone)] pub struct Ledger(Hamt<DefaultHasher, Identifier, State>); impl Ledger { /// Create a new empty account ledger pub fn new() -> Self { Ledger(Hamt::new()) } /// Add a new account into this ledger. /// /// If the identifier is already present, error out. pub fn add_account( &self, account: &Identifier, initial_value: Value, ) -> Result<Self, LedgerError> { self.0 .insert(account.clone(), State::new(initial_value)) .map(Ledger) .map_err(|e| e.into()) } /// Remove an account from this ledger /// /// If the account still have value > 0, then error pub fn remove_account(&self, account: &Identifier) -> Result<Self, LedgerError> { self.0 .update(account, |st| { if st.value == Value::zero() { Ok(None) } else { Err(LedgerError::NonZero) } }) .map(Ledger) .map_err(|e| e.into()) } /// Add value to an existing account. /// /// If the account doesn't exist, error out. pub fn add_value(&self, account: &Identifier, value: Value) -> Result<Self, LedgerError> { self.0 .update(account, |st| st.add(value).map(Some)) .map(Ledger) .map_err(|e| e.into()) } /// Subtract value to an existing account. /// /// If the account doesn't exist, or that the value would become negative, errors out. pub fn remove_value( &self, account: &Identifier, value: Value, ) -> Result<(Self, SpendingCounter), LedgerError> { // ideally we don't need 2 calls to do this let counter = self .0 .lookup(account) .map_or(Err(LedgerError::NonExistent), |st| Ok(st.counter))?; self.0 .update(account, |st| st.sub(value)) .map(|ledger| (Ledger(ledger), counter)) .map_err(|e| e.into()) } pub fn get_total_value(&self) -> Result<Value, ValueError> { let values = self.0.iter().map(|(_, state)| state.get_value()); Value::sum(values) } }
module UnitB.Expr ( module Logic.Expr , module Logic.Expr.Printable , Expr, ExprP, RawExpr , raw ) where import Logic.Expr hiding (Expr,ExprP,RawExpr) import qualified Logic.Expr import Logic.Expr.Printable type Expr = DispExpr type RawExpr = Logic.Expr.Expr type ExprP = Either [String] Expr raw :: (Functor m, HasExpr expr) => m expr -> m RawExpr raw = fmap getExpr
#ifndef KMEANSML_H #define KMEANSML_H 1 double *new_init_DOUBLE( int n ); void init_INT( int n, int array[] ); void init_DOUBLE( int n, double array[] ); void init_array2D_DOUBLE ( int d, int c, double *arr[] ); void kmeans( int no_of_dimension, int no_of_sample, int no_of_cluster, int it_max, int it_num, double *sample[], int cluster[], double *cluster_center[],int cluster_population[], double cluster_entropy[] ); #endif
def pr_filter_list_layout(list_id, item_id, resource, rfields, record): record_id = record["pr_filter.id"] item_class = "thumbnail" raw = record._row T = current.T resource_name = raw["pr_filter.resource"] resource = current.s3db.resource(resource_name) crud_strings = current.response.s3.crud_strings.get(resource.tablename) if crud_strings: resource_name = crud_strings.title_list else: resource_name = " ".join(s.capitalize() for s in resource.name.split("_")) title = record["pr_filter.title"] fstring = S3FilterString(resource, raw["pr_filter.query"]) query = fstring.represent() actions = filter_actions(resource, raw["pr_filter.url"], fstring.get_vars) item = DIV(DIV(DIV(actions, _class="action-bar fleft"), SPAN(T("%(resource)s Filter") % \ dict(resource=resource_name), _class="card-title"), DIV(A(I(" ", _class="icon icon-trash"), _title=T("Delete this Filter"), _class="dl-item-delete"), _class="edit-bar fright"), _class="card-header"), DIV(DIV(H5(title, _id="filter-title-%s" % record_id, _class="media-heading jeditable"), DIV(query), _class="media-body"), _class="media"), _class=item_class, _id=item_id, ) return item
<reponame>ku-fpg/arduino-lab<filename>firmware/HaskinoFirmwareShallow/HaskinoFirmware.h #ifndef HaskinoFirmwareH #define HaskinoFirmwareH #define FIRMWARE_MAJOR 0 #define FIRMWARE_MINOR 6 #endif /* HaskinoFirmwareH */
// Construct the ZKP that the commitment contains 0,or +/-c public void construct() { if(val == null) throw new RuntimeException("Must commit to a value first" + "before constructing the proof!"); commitment = new BigInteger[1]; if(val != null) commitment[0] = commit(val, r); else commitment[0] = commit(val); BitCommitment bc1 = new BitCommitment(g, h); BitCommitment bc2 = new BitCommitment(g, h); /** * We need to make sure that the random numbers in bc1 and bc2 sum * to the random number used in this commitment. So we generate * only 1 random number. Note that since the commitment should be * C1^{c}/C2^c no matter what val is, the random numbers associated * with C1, C2, and this commitment should satisfy * * r = (r1 - r2)*c * */ BigInteger rc = r.multiply(CONST.modInverse(q)).mod(q); if(val.equals(BigInteger.ZERO)) { bc2.commit(0); bc1.commit(0, rc.add(bc2.getRandomness()).mod(q)); } else if(val.equals(CONST)) { bc2.commit(0); bc1.commit(1, rc.add(bc2.getRandomness()).mod(q)); } else if(val.equals(CONST.negate())) { bc2.commit(1); bc1.commit(0, rc.add(bc2.getRandomness()).mod(q)); } /** * The first element in the BitCommitmentProof is the commitment * itself so we don't need to store the bit commitment. */ bcp1 = (BitCommitment.BitCommitmentProof)bc1.getProof(); bcp2 = (BitCommitment.BitCommitmentProof)bc2.getProof(); BigInteger C1 = bcp1.getCommitment()[0]; BigInteger C2 = bcp2.getCommitment()[0]; }
// Calls to C land // // When entering C land, the rbp, & rsp of the last Java frame have to be recorded // in the (thread-local) JavaThread object. When leaving C land, the last Java fp // has to be reset to 0. This is required to allow proper stack traversal. void MacroAssembler::set_last_Java_frame(Register java_thread, Register last_java_sp, Register last_java_fp, address last_java_pc) { vzeroupper(); if (!java_thread->is_valid()) { java_thread = rdi; get_thread(java_thread); } if (!last_java_sp->is_valid()) { last_java_sp = rsp; } if (last_java_fp->is_valid()) { movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp); } if (last_java_pc != NULL) { lea(Address(java_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()), InternalAddress(last_java_pc)); } movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp); }
package com.telRan.Manager; import org.openqa.selenium.By; import org.openqa.selenium.WebDriver; public class NavigationHelper extends HelperBase { public NavigationHelper(WebDriver wd) { super(wd); } public void clickLoginButton(){ click(By.xpath("//ul[@class='menu-btns']//button[@type='button'][contains(text(),'Log in')]")); } public void clickLogout(){ click(By.xpath("//ul[@class='menu-btns']//button[@type='button'][contains(text(),'Sign Out')]")); } public void clickRegisterButton(){ click(By.xpath("//ul[@class='menu-btns']//button[@type='button'][contains(text(),'Register')]")); } public void clickHomepageButton(){ click(By.xpath("//div[@class='header-logo p-2 text-left cv-logo title-header-logo']//span")); } public void clickProfilButton(){ click(By.cssSelector("div.grid-container div.main ul.menu-btns app-btns-logged:nth-child(2) li:nth-child(2) > button.btn.btn-danger")); } public void clickCloseButton(){ click(By.xpath("//button[@class='btn btn-close']")); } public void fillLogIn(String email, String password){ type(By.name("username"), email); type(By.name("password"), password); click(By.xpath("//button[@type='submit']")); } public void logIn() { clickLoginButton(); fillLogIn("<EMAIL>","mickael55"); clickCloseButton(); } }
<gh_stars>10-100 /* FMI Interface for Model Exchange and CoSimulation Version 2 This file is part of FMICodeGenerator (https://github.com/ghorwin/FMICodeGenerator) BSD 3-Clause License Copyright (c) 2018, <NAME> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <memory> #include <iostream> #include <sstream> #include <cstring> // for memcpy #ifdef DEBUG #define FMI_ASSERT(p) if (!(p)) \ { std::cerr << "Assertion failure\nCHECK: " << #p << "\nFILE: " << myFilename(__FILE__) << "\nLINE: " << __LINE__ << '\n'; \ return fmi2Error; } #else #define FMI_ASSERT(p) (void)0; #endif // DEBUG #ifdef _WIN32 #if _WIN32_WINNT < 0x0501 #define _WIN32_WINNT 0x0501 #endif #include <windows.h> #endif // _WIN32 #include "fmi2Functions.h" #include "InstanceData.h" // *** FMI Interface Functions *** /* Inquire version numbers of header files */ const char* fmi2GetTypesPlatform() { // returns platform type, currently "default" return fmi2TypesPlatform; } const char* fmi2GetVersion() { // returns fmi version, currently "2.0" return "2.0"; } // Enables/disables debug logging fmi2Status fmi2SetDebugLogging(void* c, fmi2Boolean loggingOn, size_t nCategories, const char* const categories[]) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); modelInstance->logger(fmi2OK, "logAll", std::string("fmi2SetDebugLogging: logging switched ") + (loggingOn ? "on." : "off.")); modelInstance->m_loggingOn = (loggingOn == fmi2True); if (modelInstance->m_loggingOn) { modelInstance->m_loggingCategories.clear(); for (size_t i=0; i<nCategories; ++i) modelInstance->m_loggingCategories.push_back(std::string(categories[i])); } return fmi2OK; } /* Creation and destruction of FMU instances */ void* fmi2Instantiate(fmi2String instanceName, fmi2Type fmuType, fmi2String guid, fmi2String fmuResourceLocation, const fmi2CallbackFunctions* functions, fmi2Boolean, fmi2Boolean loggingOn) { // initial checks if (functions == NULL) return NULL; if (functions->logger == NULL) return NULL; std::string instanceNameString = instanceName; if (instanceNameString.empty()) { if (loggingOn) functions->logger(functions->componentEnvironment, instanceName, fmi2Error, "logStatusError", "fmi2Instantiate: Missing instance name."); return NULL; } // check for correct model if (std::string(InstanceData::GUID) != guid) { functions->logger(functions->componentEnvironment, instanceName, fmi2Error, "logStatusError", "fmi2Instantiate: Invalid/mismatching guid."); return NULL; } // instantiate data structure for instance-specific data InstanceData * data = InstanceData::create(); // transfer function arguments data->m_callbackFunctions = functions; data->m_instanceName = instanceName; data->m_modelExchange = (fmuType == fmi2ModelExchange); data->m_resourceLocation = fmuResourceLocation; data->m_loggingOn = loggingOn; // return data pointer return data; } // Free allocated instance data structure void fmi2FreeInstance(void* c) { InstanceData * modelInstance = static_cast<InstanceData*>(c); modelInstance->logger(fmi2OK, "logAll", "fmi2FreeInstance: Model instance deleted."); delete modelInstance; } /* Enter and exit initialization mode, terminate and reset */ // Overrides project settings? fmi2Status fmi2SetupExperiment(void* c, int, double, double, int, double) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); modelInstance->logger(fmi2OK, "logAll", "fmi2SetupExperiment: Call of setup experiment."); // transfer experiment specs to Therakles return fmi2OK; } // All scalar variables with initial="exact" or "approx" can be set before // fmi2SetupExperiment has to be called at least once before fmi2Status fmi2EnterInitializationMode(void* c) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); modelInstance->logger(fmi2OK, "logAll", "fmi2EnterInitializationMode: Go into initialization mode."); modelInstance->m_initializationMode = true; // let instance data initialize everything that's needed // now the output directory parameter should be set try { modelInstance->init(); // compute and cache serialization size, might be zero if serialization is not supported if (!modelInstance->m_modelExchange) modelInstance->computeFMUStateSize(); // init successful return fmi2OK; } catch (std::exception & ex) { std::string err = ex.what(); err += "\nModel initialization failed."; modelInstance->logger(fmi2Error, "logStatusError", err); return fmi2Error; } } // Switch off all initialization equations fmi2Status fmi2ExitInitializationMode(void* c) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); modelInstance->logger(fmi2OK, "logAll", "fmi2ExitInitializationMode: Go out from initialization mode."); modelInstance->m_initializationMode = false; return fmi2OK; } fmi2Status fmi2Terminate(void* c) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); modelInstance->clearBuffers(); modelInstance->logger(fmi2OK, "logAll", "fmi2Terminate: Terminate model."); return fmi2OK; } fmi2Status fmi2Reset(void* c) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); modelInstance->logger(fmi2Warning, "logStatusWarning", "fmi2Reset: Reset the whole model to default. Not implemented yet."); return fmi2OK; } /* Getting and setting variables values */ fmi2Status fmi2GetReal(void* c, const fmi2ValueReference vr[], size_t nvr, fmi2Real value[]) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); for (size_t i=0; i<nvr; ++i) { try { modelInstance->getReal(vr[i], value[i]); } catch (std::exception & ex) { std::string err = ex.what(); err += "\nError in fmi2GetReal()"; modelInstance->logger(fmi2Error, "logStatusError", err); return fmi2Error; } } return fmi2OK; } fmi2Status fmi2GetInteger(void* c, const fmi2ValueReference vr[], size_t nvr, fmi2Integer value[]) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); for (size_t i=0; i<nvr; ++i) { try { modelInstance->getInt(vr[i], value[i]); } catch (std::exception & ex) { std::string err = ex.what(); err += "\nError in fmi2GetInteger()"; modelInstance->logger(fmi2Error, "logStatusError", err); return fmi2Error; } } return fmi2OK; } fmi2Status fmi2GetBoolean(void* c, const fmi2ValueReference vr[], size_t nvr, fmi2Boolean value[]) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); for (size_t i=0; i<nvr; ++i) { try { bool val; modelInstance->getBool(vr[i], val); value[i] = val; } catch (std::exception & ex) { std::string err = ex.what(); err += "\nError in fmi2GetBoolean()"; modelInstance->logger(fmi2Error, "logStatusError", err); return fmi2Error; } } return fmi2OK; } fmi2Status fmi2GetString(void* c, const fmi2ValueReference vr[], size_t nvr, fmi2String value[]) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); for (size_t i=0; i<nvr; ++i) { try { modelInstance->getString(vr[i], value[i]); } catch (std::exception & ex) { std::string err = ex.what(); err += "\nError in fmi2GetString()"; modelInstance->logger(fmi2Error, "logStatusError", err); return fmi2Error; } } return fmi2OK; } fmi2Status fmi2SetReal (void* c, const fmi2ValueReference vr[], size_t nvr, const fmi2Real value[]) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); for (size_t i=0; i<nvr; ++i) { try { modelInstance->setReal(vr[i], value[i]); } catch (std::exception & ex) { std::string err = ex.what(); err += "\nError in fmi2SetReal()"; modelInstance->logger(fmi2Error, "logStatusError", err); return fmi2Error; } } return fmi2OK; } fmi2Status fmi2SetInteger(void* c, const fmi2ValueReference vr[], size_t nvr, const fmi2Integer value[]) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); for (size_t i=0; i<nvr; ++i) { try { modelInstance->setInt(vr[i], value[i]); } catch (std::exception & ex) { std::string err = ex.what(); err += "\nError in fmi2SetInteger()"; modelInstance->logger(fmi2Error, "logStatusError", err); return fmi2Error; } } return fmi2OK; } fmi2Status fmi2SetBoolean(void* c, const fmi2ValueReference vr[], size_t nvr, const fmi2Boolean value[]) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); for (size_t i=0; i<nvr; ++i) { try { modelInstance->setBool(vr[i], value[i]); } catch (std::exception & ex) { std::string err = ex.what(); err += "\nError in fmi2SetBoolean()"; modelInstance->logger(fmi2Error, "logStatusError", err); return fmi2Error; } } return fmi2OK; } fmi2Status fmi2SetString(void* c, const fmi2ValueReference vr[], size_t nvr, const fmi2String value[]) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); for (size_t i=0; i<nvr; ++i) { try { modelInstance->setString(vr[i], value[i]); } catch (std::exception & ex) { std::string err = ex.what(); err += "\nError in fmi2SetString()"; modelInstance->logger(fmi2Error, "logStatusError", err); return fmi2Error; } } return fmi2OK; } /* Getting and setting the internal FMU state */ fmi2Status fmi2GetFMUstate(void* c, fmi2FMUstate* FMUstate) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); if (modelInstance->m_fmuStateSize == 0) { modelInstance->logger(fmi2Error, "logStatusError", "fmi2GetFMUstate is called though FMU was not yet completely set up " "or serialization is not supported by this FMU."); return fmi2Error; } // check if new alloc is needed if (*FMUstate == NULL) { // alloc new memory fmi2FMUstate fmuMem = malloc(modelInstance->m_fmuStateSize); // remember this memory array modelInstance->m_fmuStates.insert(fmuMem); // store size of memory in first 8 bytes of fmu memory *(size_t*)(fmuMem) = modelInstance->m_fmuStateSize; // return newly created FMU mem *FMUstate = fmuMem; } else { // check if FMUstate is in list of stored FMU states if (modelInstance->m_fmuStates.find(*FMUstate) == modelInstance->m_fmuStates.end()) { modelInstance->logger(fmi2Error, "logStatusError", "fmi2GetFMUstate is called with invalid FMUstate (unknown or already released pointer)."); return fmi2Error; } } // now copy FMU state into memory array modelInstance->serializeFMUstate(*FMUstate); return fmi2OK; } fmi2Status fmi2SetFMUstate(void* c, fmi2FMUstate FMUstate) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); // check if FMUstate is in list of stored FMU states if (modelInstance->m_fmuStates.find(FMUstate) == modelInstance->m_fmuStates.end()) { modelInstance->logger(fmi2Error, "logStatusError", "fmi2SetFMUstate is called with invalid FMUstate (unknown or already released pointer)."); return fmi2Error; } // now copy FMU state into memory array if (!modelInstance->deserializeFMUstate(FMUstate)) return fmi2Error; return fmi2OK; } fmi2Status fmi2FreeFMUstate(void* c, fmi2FMUstate* FMUstate) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); if (FMUstate == NULL) { // similar to "delete NULL" this is a no-op return fmi2OK; } // check if FMUstate is in list of stored FMU states if (modelInstance->m_fmuStates.find(*FMUstate) == modelInstance->m_fmuStates.end()) { modelInstance->logger(fmi2Error, "logStatusError", "fmi2FreeFMUstate is called with invalid FMUstate (unknown or already released pointer)."); return fmi2Error; } // free memory free(*FMUstate); // and remove pointer from list of own fmu state pointers modelInstance->m_fmuStates.erase(*FMUstate); *FMUstate = NULL; // set pointer to zero return fmi2OK; } fmi2Status fmi2SerializedFMUstateSize(fmi2Component c, fmi2FMUstate FMUstate, size_t* s) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); // check if FMUstate is in list of stored FMU states if (modelInstance->m_fmuStates.find(FMUstate) == modelInstance->m_fmuStates.end()) { modelInstance->logger(fmi2Error, "logStatusError", "fmi2FreeFMUstate is called with invalid FMUstate (unknown or already released pointer)."); return fmi2Error; } // if the state of stored previously, then we must have a valid fmu size FMI_ASSERT(modelInstance->m_fmuStateSize != 0); // store size of memory to copy *s = modelInstance->m_fmuStateSize; return fmi2OK; } fmi2Status fmi2SerializeFMUstate(fmi2Component c, fmi2FMUstate FMUstate, fmi2Byte serializedState[], size_t /*s*/) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); // check if FMUstate is in list of stored FMU states if (modelInstance->m_fmuStates.find(FMUstate) == modelInstance->m_fmuStates.end()) { modelInstance->logger(fmi2Error, "logStatusError", "fmi2FreeFMUstate is called with invalid FMUstate (unknown or already released pointer)."); return fmi2Error; } // if the state of stored previously, then we must have a valid fmu size FMI_ASSERT(modelInstance->m_fmuStateSize != 0); // copy memory std::memcpy(serializedState, FMUstate, modelInstance->m_fmuStateSize); return fmi2OK; } fmi2Status fmi2DeSerializeFMUstate(void* c, const char serializedState[], size_t s, fmi2FMUstate* FMUstate) { (void)s; InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); // check if FMUstate is in list of stored FMU states if (modelInstance->m_fmuStates.find(FMUstate) == modelInstance->m_fmuStates.end()) { modelInstance->logger(fmi2Error, "logStatusError", "fmi2FreeFMUstate is called with invalid FMUstate (unknown or already released pointer)."); return fmi2Error; } // if the state of stored previously, then we must have a valid fmu size FMI_ASSERT(modelInstance->m_fmuStateSize == s); // copy memory std::memcpy(*FMUstate, serializedState, modelInstance->m_fmuStateSize); return fmi2OK; } /* Getting partial derivatives */ // 33 // optional possibility to evaluate partial derivatives for the FMU fmi2Status fmi2GetDirectionalDerivative(void* c, const unsigned int[], size_t, const unsigned int[], size_t, const double[], double[]) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); modelInstance->logger(fmi2Warning, "logStatusWarning", "fmi2GetDirectionalDerivative is called but not implemented"); return fmi2Warning; } /* Enter and exit the different modes */ // Model-Exchange only fmi2Status fmi2EnterEventMode(void* c){ InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); FMI_ASSERT(modelInstance->m_modelExchange); std::string text = "fmi2EnterEventMode: Enter into event mode."; modelInstance->logger(fmi2OK, "logAll", text.c_str()); return fmi2OK; } // Model-Exchange only fmi2Status fmi2NewDiscreteStates(void*, fmi2EventInfo* eventInfo) { eventInfo->newDiscreteStatesNeeded = false; return fmi2OK; } // Model-Exchange only fmi2Status fmi2EnterContinuousTimeMode(void* c) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); FMI_ASSERT(modelInstance->m_modelExchange); modelInstance->logger(fmi2OK, "logAll", "fmi2EnterContinuousTimeMode: Enter into continuous mode."); return fmi2OK; } // Model-Exchange only fmi2Status fmi2CompletedIntegratorStep (void* c, fmi2Boolean, fmi2Boolean* enterEventMode, fmi2Boolean* terminateSimulation) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); FMI_ASSERT(modelInstance->m_modelExchange); // Currently, we never enter Event mode *enterEventMode = false; modelInstance->logger(fmi2OK, "logAll", "Integrator step completed."); try { modelInstance->completedIntegratorStep(); } catch (std::exception & ex) { std::string err = ex.what(); err += "\nError in fmi2CompletedIntegratorStep()"; modelInstance->logger(fmi2Error, "logStatusError", err); *terminateSimulation = true; return fmi2Error; } *terminateSimulation = false; return fmi2OK; } /* Providing independent variables and re-initialization of caching */ // Sets a new time point // Model-Exchange only fmi2Status fmi2SetTime (void* c, fmi2Real time) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); FMI_ASSERT(modelInstance->m_modelExchange); std::stringstream strm; strm << "fmi2SetTime: Set time point: " << time << " s"; modelInstance->logger(fmi2OK, "logAll", strm.str()); // cache new time point modelInstance->m_tInput = time; modelInstance->m_externalInputVarsModified = true; return fmi2OK; } // Model-Exchange only fmi2Status fmi2SetContinuousStates(void* c, const fmi2Real x[], size_t nx) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); FMI_ASSERT(modelInstance->m_modelExchange); std::stringstream strm; strm << "fmi2SetContinuousStates: Setting continuous states with size " << nx << " with model size " << modelInstance->m_yInput.size(); modelInstance->logger(fmi2OK, "logAll", strm.str()); FMI_ASSERT(nx == modelInstance->m_yInput.size()); // cache input Y vector std::memcpy( &(modelInstance->m_yInput[0]), x, nx*sizeof(double) ); modelInstance->m_externalInputVarsModified = true; return fmi2OK; } /* Evaluation of the model equations */ // Model-Exchange only fmi2Status fmi2GetDerivatives(void* c, fmi2Real derivatives[], size_t nx) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); FMI_ASSERT(modelInstance->m_modelExchange); std::stringstream strm; strm << "fmi2GetDerivatives: Getting derivatives with size " << nx << " with model size " << modelInstance->m_ydot.size(); modelInstance->logger(fmi2OK, "logAll", strm.str()); // Update model state if any of the inputs have been modified. // Does nothing, if the model state is already up-to-date after a previous call // to updateIfModified(). try { modelInstance->updateIfModified(); } catch (std::exception & ex) { std::string err = ex.what(); err += "\nfmi2GetDerivatives: Exception while updating model"; modelInstance->logger(fmi2Error, "logStatusError", err); return fmi2Error; } // return derivatives currently cached in model std::memcpy( derivatives, &(modelInstance->m_ydot[0]), nx * sizeof(double) ); return fmi2OK; } // Model-Exchange only fmi2Status fmi2GetEventIndicators (void*, fmi2Real[], size_t){ return fmi2OK; } // Model-Exchange only fmi2Status fmi2GetContinuousStates(void* c, fmi2Real x[], size_t nx) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); FMI_ASSERT(modelInstance->m_modelExchange); std::stringstream strm; strm << "fmi2GetContinuousStates: Getting continuous states with size " << nx << " with model size " << modelInstance->m_yInput.size(); modelInstance->logger(fmi2OK, "logAll", strm.str()); FMI_ASSERT(nx == modelInstance->m_yInput.size()); std::memcpy( x, &(modelInstance->m_yInput[0]), nx * sizeof(double) ); return fmi2OK; } // Model-Exchange only fmi2Status fmi2GetNominalsOfContinuousStates(void*, fmi2Real[], size_t) { return fmi2OK; } // CoSim only fmi2Status fmi2SetRealInputDerivatives(void*, const fmi2ValueReference vr[], size_t nvr, const fmi2Integer order[], const fmi2Real value[]) { (void)order; (void)value; (void)vr; (void)nvr; return fmi2OK; } // CoSim only fmi2Status fmi2GetRealOutputDerivatives(void*, const fmi2ValueReference vr[], size_t nvr, const fmi2Integer order[], fmi2Real value[]) { (void)order; (void)value; (void)vr; (void)nvr; return fmi2OK; } // CoSim only fmi2Status fmi2DoStep(void* c, double currentCommunicationPoint, double communicationStepSize, int noSetFMUStatePriorToCurrentPoint) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); FMI_ASSERT(!modelInstance->m_modelExchange); if (noSetFMUStatePriorToCurrentPoint == fmi2True) { modelInstance->clearBuffers(); } //modelInstance->logger(fmi2OK, "logAll", IBK::FormatString("fmi2DoStep: %1 += %2").arg(currentCommunicationPoint).arg(communicationStepSize)); // if currentCommunicationPoint < current time of integrator, restore try { modelInstance->integrateTo(currentCommunicationPoint + communicationStepSize); } catch (std::exception & ex) { std::string err = ex.what(); err += "\fmi2DoStep: Exception while integrating model"; modelInstance->logger(fmi2Error, "logStatusError", err); return fmi2Error; } return fmi2OK; } // CoSim only fmi2Status fmi2CancelStep(void* c) { InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); FMI_ASSERT(!modelInstance->m_modelExchange); modelInstance->logger(fmi2OK, "logAll", "fmi2CancelStep: cancel current step."); return fmi2OK; } // CoSim only fmi2Status fmi2GetStatus(void* c, const fmi2StatusKind s, fmi2Status* value) { (void)s;(void)value; InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); FMI_ASSERT(!modelInstance->m_modelExchange); modelInstance->logger(fmi2OK, "logAll", "fmi2GetStatus: get current status."); return fmi2OK; } // CoSim only fmi2Status fmi2GetRealStatus(void* c, const fmi2StatusKind s, fmi2Real* value) { (void)s;(void)value; InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); FMI_ASSERT(!modelInstance->m_modelExchange); modelInstance->logger(fmi2OK, "logAll", "fmi2GetRealStatus: get real status."); return fmi2OK; } // CoSim only fmi2Status fmi2GetIntegerStatus(void* c, const fmi2StatusKind s, fmi2Integer* value) { (void)s;(void)value; InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); FMI_ASSERT(!modelInstance->m_modelExchange); modelInstance->logger(fmi2OK, "logAll", "fmi2GetIntegerStatus: get integer status."); return fmi2OK; } // CoSim only fmi2Status fmi2GetBooleanStatus(void* c, const fmi2StatusKind s, fmi2Boolean* value) { (void)s;(void)value; InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); FMI_ASSERT(!modelInstance->m_modelExchange); modelInstance->logger(fmi2OK, "logAll", "fmi2GetBooleanStatus: get boolean status."); return fmi2OK; } // CoSim only fmi2Status fmi2GetStringStatus(void* c, const fmi2StatusKind s, fmi2String* value) { (void)s;(void)value; InstanceData * modelInstance = static_cast<InstanceData*>(c); FMI_ASSERT(modelInstance != NULL); FMI_ASSERT(!modelInstance->m_modelExchange); modelInstance->logger(fmi2OK, "logAll", "fmi2GetStringStatus: get string status."); return fmi2OK; }
After reached +20000% in just two weeks on Bittrex, Monaco launches today the new Mobile App to bring cryptocurrencies to every Wallet. The Monaco app allows users to spend bitcoin and ether conveniently in everyday life. The App is avalable in the App Store and the Play Store Downloading the app you can pre-register today to reserve your Monaco card today. Monaco released many cards, in order to satisfy the needs of each customer. Here you can find a recap of the listed products. The most expensive one is the Obsidian Black, that has unlimited Interbank Exchange Rates Limit and $1,000 Free ATM Withdrawal Limit. The Precious Metal costs 500 MCO and has $10,000 Interbank Exchange Rates Limit and $800 Free ATM Withdrawal Limit. The Ruby Steel card costs 10 times less than the previous one, only 50 MCO, and consequently has lower limits for Interbank Exchange Rates and Free ATM Withdrawal. The last one, Midnight Blue, is FREE! It’s a plastic card with all the basic features, no monthly or annual fees and free shipping. It’s very difficult to find a competitor that offers a card with similiar features without any cost. The amazing features of the cards combined with the efficiency of the easy-to-use App is the perfect solution for every need. The Monaco App can be described in these few lines: – Industry leading credit card security – Earn 0.75% cashback on ALL transactions with Monaco Platinum cards – Up to 8% savings on exchanges compared to the high street banks – Send and receive money with any Monaco user instantly with 0 fees – Fund your card via bank transfer, credit card or cryptocurrencies like Bitcoin and Ether – Spend your cryptocurrency like cash anywhere – Customize notifications and alerts – Next generation card security to instantly block or unblock your card with a tap – 24/7 customer support – Make and manage all your transactions seamlessly via the Monaco mobile app – Absolutely positively no hidden fees Read here our presentation, when Monaco announced the launch of a new Ethereum and Bitcoin Debit Card
CLASSIFICATION OF STRAWBERRY FRUIT SHAPE BY MACHINE LEARNING Abstract. Shape is one of the most important traits of agricultural products due to its relationships with the quality, quantity, and value of the products. For strawberries, the nine types of fruit shape were defined and classified by humans based on the sampler patterns of the nine types. In this study, we tested the classification of strawberry shapes by machine learning in order to increase the accuracy of the classification, and we introduce the concept of computerization into this field. Four types of descriptors were extracted from the digital images of strawberries: (1) the Measured Values (MVs) including the length of the contour line, the area, the fruit length and width, and the fruit width/length ratio; (2) the Ellipse Similarity Index (ESI); (3) Elliptic Fourier Descriptors (EFDs), and (4) Chain Code Subtraction (CCS). We used these descriptors for the classification test along with the random forest approach, and eight of the nine shape types were classified with combinations of MVs + CCS + EFDs. CCS is a descriptor that adds human knowledge to the chain codes, and it showed higher robustness in classification than the other descriptors. Our results suggest machine learning's high ability to classify fruit shapes accurately. We will attempt to increase the classification accuracy and apply the machine learning methods to other plant species.
<filename>backend/src/routes/rFuncionario.ts<gh_stars>0 import { Router } from 'express'; import vFuncionario from '../validators/vFuncionario'; import cFuncionario from '../controllers/cFuncionario'; const funcionariosRoutes = Router(); funcionariosRoutes.post('/funcionarios', vFuncionario.validarCadastroFuncionario, cFuncionario.criarFuncionario); funcionariosRoutes.get('/funcionarios', vFuncionario.validarBuscarFuncionarios, cFuncionario.buscarFuncionarios); export default funcionariosRoutes;
/// Returns a module function bodies builder. pub fn code_section( &mut self, ) -> Result<(&ModuleResources, ModuleFunctionBodiesBuilder), String> { self.ensure_section_in_order(ModuleSection::FunctionBodies)?; let Self { res, bodies, .. } = self; let res = &*res; let builder = ModuleFunctionBodiesBuilder::new(res, bodies); Ok((res, builder)) }
<reponame>kentwait/contagion package contagiongo // import ( // "fmt" // "testing" // ) // func TestNewGenotype(t *testing.T) { // defer func() { // if err := recover(); err != nil { // t.Fatalf(UnexpectedErrorWhileError, "calling NewGenotype constructor", err) // } // }() // sequence := []uint8{0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1} // NewGenotype(sequence) // } // func TestGenotype_Fitness(t *testing.T) { // sites := 100 // fm := NeutralMultiplicativeFM(0, "m", sites, 2) // genotype := NewGenotype(sampleSequence(sites)) // logFitness := genotype.Fitness(fm) // if logFitness != 0.0 { // t.Errorf(UnequalFloatParameterError, "log fitness", 0.0, logFitness) // } // // Fitness has been assigned during the first call // // If fitness model has the same ID, then just recalls previous value. // logFitness = genotype.Fitness(fm) // if logFitness != 0.0 { // t.Errorf(UnequalFloatParameterError, "log fitness", 0.0, logFitness) // } // } // func TestEmptyGenotypeSet(t *testing.T) { // defer func() { // if err := recover(); err != nil { // t.Fatalf(UnexpectedErrorWhileError, "calling EmptyGenotypeSet constructor", err) // } // }() // EmptyGenotypeSet() // } // func TestGenotypeSet_Add(t *testing.T) { // g := sampleGenotype() // set := EmptyGenotypeSet() // set.Add(g) // if l := set.Size(); l != 1 { // t.Errorf(UnequalIntParameterError, "size of genotype set", 1, l) // } // set.Add(g) // if l := set.Size(); l != 1 { // t.Errorf(UnequalIntParameterError, "size of genotype set", 1, l) // } // } // func TestGenotypeSet_AddSequence(t *testing.T) { // sequence := sampleSequence(1000) // set := EmptyGenotypeSet() // set.AddSequence(sequence) // if l := set.Size(); l != 1 { // t.Errorf(UnequalIntParameterError, "size of genotype set", 1, l) // } // set.AddSequence(sequence) // if l := set.Size(); l != 1 { // t.Errorf(UnequalIntParameterError, "size of genotype set", 1, l) // } // } // func TestGenotypeSet_Remove(t *testing.T) { // sequence := sampleSequence(1000) // set := EmptyGenotypeSet() // set.AddSequence(sequence) // set.Remove(sequence) // if l := set.Size(); l != 0 { // t.Errorf(UnequalIntParameterError, "size of genotype set", 0, l) // } // } // func TestNewGenotypeNode(t *testing.T) { // defer func() { // if err := recover(); err != nil { // t.Fatalf(UnexpectedErrorWhileError, "calling NewGenotype constructor", err) // } // }() // sequence := sampleSequence(1000) // set := EmptyGenotypeSet() // newGenotypeNode(sequence, set) // } // func TestNewGenotypeNode_Getters(t *testing.T) { // set := EmptyGenotypeSet() // p1 := newGenotypeNode(sampleSequence(1000), set) // p2 := newGenotypeNode(sampleSequence(1000), set, p1) // p3 := newGenotypeNode(sampleSequence(1000), set, p2) // if l := len(p2.Parents()); l != 1 { // t.Errorf(UnequalIntParameterError, "number of parents", 1, l) // } // if n := p2.Parents()[0]; n.UID() != p1.UID() { // t.Errorf(UnequalStringParameterError, "parent UID", fmt.Sprint(p1.UID()), fmt.Sprint(n.UID())) // } // if l := len(p2.Children()); l != 1 { // t.Errorf(UnequalIntParameterError, "number of childen", 1, l) // } // if n := p2.Children()[0]; n.UID() != p3.UID() { // t.Errorf(UnequalStringParameterError, "child UID", fmt.Sprint(p3.UID()), fmt.Sprint(n.UID())) // } // } // func TestEmptyGenotypeTree(t *testing.T) { // defer func() { // if err := recover(); err != nil { // t.Fatalf(UnexpectedErrorWhileError, "calling EmptyGenotypeSet constructor", err) // } // }() // EmptyGenotypeTree() // } // func TestGenotypeTree_NewNode(t *testing.T) { // tree := EmptyGenotypeTree() // sequence := sampleSequence(1000) // p1 := tree.NewNode(sequence, 0) // if l := tree.Set().Size(); l != 1 { // t.Errorf(UnequalIntParameterError, "size of genotype set", 1, l) // } // if l := len(tree.NodeMap()); l != 1 { // t.Errorf(UnequalIntParameterError, "size of genotype map", 1, l) // } // sequence[0] = 1 // tree.NewNode(sequence, 0, p1) // if l := tree.Set().Size(); l != 2 { // t.Errorf(UnequalIntParameterError, "size of genotype set", 2, l) // } // if l := len(tree.NodeMap()); l != 2 { // t.Errorf(UnequalIntParameterError, "size of genotype map", 2, l) // } // }
As millions of Americans file their income tax returns, their chances of getting audited by the IRS have rarely been so low. The number of people audited by the IRS in 2016 year dropped for the sixth straight year, to just over 1 million. The last time so few people were audited was 2004. Since then, the U.S. has added about 30 million people. The IRS blames budget cuts as money for the agency shrunk from $12.2 billion in 2010 to $11.2 billion last year. Over that period, the agency has lost more than 17,000 employees, including nearly 7,000 enforcement agents. A little more than 80,000 people work at the IRS. IRS Commissioner John Koskinen said budget cuts are costing the federal government between $4 billion and $8 billion a year in uncollected taxes. "We are the only agency if you give us more people and money, we give you more money back," Koskinen said in an interview. So is it safe to cheat on your taxes? Not necessarily, according to tax experts. "I don't think it's open season for people to cheat," said Joseph Perry, a partner at the accounting firm Marcum. "I think there are a certain group of people that will always try to push the envelope to get away with things that they think they can get away with." As Koskinen put it: "If you're a taxpayer, you don't want to roll the roulette wheel and have the little white ball land on your number because then we're not very happy." Most people don't have much of an opportunity to cheat on their taxes because the IRS collects a lot of information to verify taxpayers' finances. Employers report wages, banks report interest, brokerages report capital gains and lenders report mortgage interest. In 2016, the number of people audited by the IRS dropped by 16 percent from the year before. Just 0.7 percent of individuals were audited, either in person or by mail. That's the lowest audit rate since 2003. The higher your income, the more likely you are to get audited. The IRS audited 1.7 percent of returns that reported more than $200,000 in income. Agents audited 5.8 percent of returns that reported more than $1 million in income. Both audit rates were steep declines from the year before. The most well-known audit in Washington is one on President Donald Trump, who has cited it in refusing to release his tax returns. The IRS, however, has said an audit would not prevent an individual from releasing the returns. Corporate audits were down by 17 percent last year. Just 0.49 percent of corporations were audited, the lowest rate in at least a decade. Republicans in Congress began cutting money at the IRS after they took control of both the House and Senate in the 2010 elections. They became more enthusiastic about the spending cuts after it became public that the agency had improperly singled out conservative political groups for extra scrutiny when they applied for tax-exempt status during the 2010 and 2012 elections. Koskinen was not at the IRS when the political groups were mistreated, but some Republicans in Congress have been unhappy with his cooperation in their investigations. When asked about IRS money, many Republican lawmakers inevitably mention the mistreatment of conservative groups. "Go look at all the areas where they've wasted money, mismanaged taxpayer resources," said Rep. Jim Jordan, R-Ohio. "Not to mention the fact that, you know, one of the reasons we went after them so hard is they did target people for their political views." Democrats argue that it's a costly move. "You know, when somebody doesn't pay, that means that others pay more," said Rep. Richard Neal of Massachusetts, the top Democrat on tax-writing House Ways and Means Committee. "But I think there's no question that the majority (Republicans) here and apparently the president, they have had a target on the IRS." Tony Reardon, president of the National Treasury Employees Union, which represents IRS workers, questioned Trump's proposal to increase military spending by billions while the GOP targets the IRS. "The IRS collects 93 percent of our nation's revenue. You cannot increase defense spending and cut IRS funding at the same time. It does not add up," Reardon said. Most federal agencies are bracing for budget cuts under the Trump administration, though Koskinen said he is making the case that the IRS already "gave at the office." It's unclear, however, how much influence Koskinen will have in the Trump administration. He was appointed by President Barack Obama and his term ends in November. Treasury Secretary Steven Mnuchin offered the IRS a bone at his confirmation hearing when he acknowledged that adding agents would increase tax revenues. The department oversees the IRS, and Mnuchin was appointed by Trump. "I can assure you that the president-elect understands the concept of where we add people and we make money," Mnuchin said at the hearing, which was held before Trump's inauguration. "He'll get that completely. That's a very quick conversation with Donald Trump."
/** * Clear out the contents of the bag, both on disk and in memory. * Any attempts to read after this is called will produce undefined * results. */ @Override public void clear() { synchronized (mContents) { mContents.clear(); if (mSpillFiles != null) { for (int i = 0; i < mSpillFiles.size(); i++) { boolean res = mSpillFiles.get(i).delete(); if (!res) warn ("DefaultAbstractBag.clear: failed to delete " + mSpillFiles.get(i), PigWarning.DELETE_FAILED, null); } mSpillFiles.clear(); } mSize = 0; aggSampleTupleSize = 0; sampled = 0; } }
def partitions(self, on: str) -> Dict[str, pd.DataFrame]: partitioned_by = set(self._obj[on]) if len(partitioned_by) < 2: raise errors.SnowFrameInternalError( msg=( f"Found one distinct value, '{list(partitioned_by)[0]}' " f"within '{on}' column of DataFrame. A minimum of 2 is required." ) ) base_partitions = {p: self._obj[self._obj[on] == p] for p in partitioned_by} return {p: df.drop(columns=[on]) for p, df in base_partitions.items()}
<filename>src/Function/typecheck/typecheck.ts import { RuntimeType } from "RuntimeType" const typecheck = (f: Function, type: RuntimeType) => { return (...varargs: any[]) => { let lastArg = null let currentType = type try { for (const argument of varargs) { lastArg = argument currentType = currentType.evaluate(argument) currentType = currentType.evaluate(f(argument)) } } catch (e) { e.message = `${e.message} ${type.toString()} ${type.toUnderline()} Argument ${lastArg} must be a ${currentType.type}. ` throw e } } } export default typecheck
package soccar.physics.models; import org.jbox2d.collision.shapes.PolygonShape; import org.jbox2d.dynamics.Body; import org.jbox2d.dynamics.BodyDef; import org.jbox2d.dynamics.FixtureDef; import soccar.physics.Game; /** * @author Marc */ public class Wall implements Updateable { private Body body; // The Box2D body private float width; private float height; public Wall(float x, float y, float width, float height, float angle) { this.width = width; this.height = height; PolygonShape ps = new PolygonShape(); ps.setAsBox(width / 2, height / 2); FixtureDef fd = new FixtureDef(); fd.friction = 0; fd.shape = ps; BodyDef bd = new BodyDef(); bd.position.set(x, y); bd.angle = (float) Math.toRadians(angle); body = Game.WORLD.createBody(bd); body.createFixture(fd); } public Body getBody() { return body; } public float getX() { return body.getPosition().x; } public float getY() { return body.getPosition().y; } public float getWidth() { return width; } public float getHeight() { return height; } public double getAngle() { return Math.toDegrees(body.getAngle()); } @Override public void update() { } }
// Spin off a thread to periodically clean up expired component locks. func (s *SmD) CompReservationCleanup() { go func() { for { xnames, err := s.db.DeleteCompReservationsExpired() if err != nil { s.LogAlways("CompReservationCleanup(): Lookup failure: %s", err) time.Sleep(10 * time.Second) } else { if len(xnames) > 0 { s.LogAlways("CompReservationCleanup(): Release %d expired component reservations for: %v", len(xnames), xnames) } time.Sleep(30 * time.Second) } } }() }
def simulation_decoder(dct: Dict[str, Any]) -> Dict[str, Any]: if "guid" in dct: dct["guid"] = UUID(dct["guid"]) if "type" in dct and "data" in dct: factory = BaseElement.factory[dct["type"]] return factory(**dct["data"]) return dct
/** Called when the activity is first created. */ @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.main); cx = (TextView)findViewById(R.id.txtcx); cy = (TextView)findViewById(R.id.txtcy); cz = (TextView)findViewById(R.id.txtcz); sx = (TextView)findViewById(R.id.txtsx); sy = (TextView)findViewById(R.id.txtsy); sz = (TextView)findViewById(R.id.txtsz); findViewById(R.id.Button01).setOnClickListener(new OnClickListener(){ @Override public void onClick(View v) { sx.setText(cx.getText()); sy.setText(cy.getText()); sz.setText(cz.getText()); } }); registerAccelerometer(); }
import { Component, OnInit } from '@angular/core'; import * as d3 from 'd3'; @Component({ selector: 'app-makingbarchart', templateUrl: '../../common/common.html', styleUrls: ['../../common/common.css'] }) export class MakingbarchartComponent implements OnInit { private code: string; constructor() { } ngOnInit() { // Width and height const w = 500; const h = 100; const barPadding = 1; const dataset = [5, 10, 13, 19, 21, 25, 22, 18, 15, 13, 11, 12, 15, 20, 18, 17, 16, 18, 23, 25]; // Create SVG element const svg = d3.select('.result') .append('svg') .attr('width', w) .attr('height', h); svg.selectAll('rect') .data(dataset) .enter() .append('rect') .attr('x', function (d, i) { return i * (w / dataset.length); }) .attr('y', function (d) { return h - (d * 4); }) .attr('width', w / dataset.length - barPadding) .attr('height', function (d) { return d * 4; }) .attr('fill', function (d) { return 'rgb(0, 0, ' + Math.round(d * 10) + ')'; }); svg.selectAll('text') .data(dataset) .enter() .append('text') .text(function (d) { return d; }) .attr('text-anchor', 'middle') .attr('x', function (d, i) { return i * (w / dataset.length) + (w / dataset.length - barPadding) / 2; }) .attr('y', function (d) { return h - (d * 4) + 14; }) .attr('font-family', 'sans-serif') .attr('font-size', '11px') .attr('fill', 'white'); this.code = `// Width and height const w = 500; const h = 100; const barPadding = 1; const dataset = [5, 10, 13, 19, 21, 25, 22, 18, 15, 13, 11, 12, 15, 20, 18, 17, 16, 18, 23, 25]; // Create SVG element const svg = d3.select('.result') .append('svg') .attr('width', w) .attr('height', h); svg.selectAll('rect') .data(dataset) .enter() .append('rect') .attr('x', function (d, i) { return i * (w / dataset.length); }) .attr('y', function (d) { return h - (d * 4); }) .attr('width', w / dataset.length - barPadding) .attr('height', function (d) { return d * 4; }) .attr('fill', function (d) { return 'rgb(0, 0, ' + Math.round(d * 10) + ')'; }); svg.selectAll('text') .data(dataset) .enter() .append('text') .text(function (d) { return d; }) .attr('text-anchor', 'middle') .attr('x', function (d, i) { return i * (w / dataset.length) + (w / dataset.length - barPadding) / 2; }) .attr('y', function (d) { return h - (d * 4) + 14; }) .attr('font-family', 'sans-serif') .attr('font-size', '11px') .attr('fill', 'white');`; } }
import math import numpy as np class ErrorClass(object): def __init__(self, x_opt=None, f_opt=None): self.is_x_opt = False self.is_f_opt = False self.f_opt = None self.x_opt = None if f_opt!=None: self.f_opt = f_opt self.is_f_opt = True self.x_opt = x_opt self.is_x_opt = True def function_error(self, grad_f, f): error_2 = np.linalg.norm(grad_f) error_1 = -1 if self.is_f_opt: error_1 = np.abs(f - self.f_opt) return [error_1, error_2] def x_error(self, x): x_error = -1 if self.is_x_opt: x_error = np.linalg.norm(x - self.x_opt) return x_error
n=int(input()) ar=sorted(list(map(int,input().split()))) arr=[] hs=[0]*(100+5) for i in range(n//2): arr.append(ar[i*2]) hs[i*2]=1 for i in range(n-1,-1,-1): if(hs[i]!=1): arr.append(ar[i]) print(*arr)
def publish_results(self, dist_dir, use_basename_prefix, vt, bundle_dir, archivepath, id, archive_ext): name = vt.target.basename if use_basename_prefix else id bundle_copy = os.path.join(dist_dir, '{}-bundle'.format(name)) absolute_symlink(bundle_dir, bundle_copy) self.context.log.info( 'created bundle copy {}'.format(os.path.relpath(bundle_copy, get_buildroot()))) if archivepath: ext = archive.archive_extensions.get(archive_ext, archive_ext) archive_copy = os.path.join(dist_dir,'{}.{}'.format(name, ext)) safe_mkdir_for(archive_copy) atomic_copy(archivepath, archive_copy) self.context.log.info( 'created archive copy {}'.format(os.path.relpath(archive_copy, get_buildroot())))
import * as path from 'path'; import { TextDocument } from 'vscode'; import { ReadonlyDocument } from '../../project/readOnlyDocument'; var assert = require('chai').assert; let project_path = path.join(__dirname + "\\..\\..\\..\\test_case\\pdfMarkups.lsp"); suite("ReadonlyDocument Tests", function () { let doc: ReadonlyDocument; test("Creating with: open()", function () { try { doc = ReadonlyDocument.open(project_path); assert.equal(doc.languageId, 'autolisp'); } catch (err) { assert.fail("Could not create document"); } }); test("Creating with: getMemoryDocument()", function () { try { doc = ReadonlyDocument.getMemoryDocument(doc as TextDocument); assert.equal(doc.languageId, 'autolisp'); } catch (err) { assert.fail("Could not create document"); } }); test("Creating with: createMemoryDocument()", function () { try { doc = ReadonlyDocument.createMemoryDocument(doc.getText(), 'autolisp'); assert.notEqual(doc.lines, 0); doc.fileName = project_path; } catch (err) { assert.fail("Could not create document"); } }); test("Generating: atomsForest", function () { try { assert.notEqual(doc.atomsForest.length, 0); } catch (err) { assert.fail("Could not get atomsForest value"); } }); });
def FindItForCrash(stacktrace_list, callstack, component_to_regression_dict, component_to_crash_revision_dict): if not component_to_regression_dict: result = GenerateAndFilterBlameList(callstack, component_to_crash_revision_dict, component_to_regression_dict) if result: return_message = ( 'Regression information is not available. The result is ' 'the blame information.') else: return_message = ('Findit could not find any suspected CLs.') return (return_message, result) for stacktrace in stacktrace_list: if not stacktrace.stack_list: continue main_stack = stacktrace.GetCrashStack() components = ParseCrashComponents(main_stack) result_for_stacktrace = FindMatchForStacktrace( stacktrace, components, component_to_regression_dict) filtered_result = FilterAndGenerateReasonForMatches(result_for_stacktrace) if not filtered_result: continue return_message = ( 'The result is a list of CLs that change the crashed files.') return (return_message, filtered_result) result = GenerateAndFilterBlameList( callstack, component_to_crash_revision_dict, component_to_regression_dict) if result: return_message = ( 'No CL in the regression range changes the crashed files. ' 'The result is the blame information.') else: return_message = ('Findit could not find any suspected CLs.') return (return_message, result)
def from_webmias(query, webmias): assert isinstance(query, Query) assert isinstance(webmias, WebMIaSIndex) response = webmias.query(query) assert isinstance(response, _Element) response_text = etree.tostring(response, pretty_print=True).decode("utf-8") assert isinstance(response_text, str) return ExecutedQuery(query, response_text)
def diff( self, datablock: T.ID, key: Union[int, str], datablock_property: T.Property, context: Context ) -> Optional[DeltaReplace]: if datablock is None: return DeltaReplace(DatablockRefProxy()) value = read_attribute(datablock, key, datablock_property, None, context) assert isinstance(value, DatablockRefProxy) if value._datablock_uuid != self._datablock_uuid: return DeltaReplace(value) else: return None
// BUG: Diagnostic contains: remove @Bar1 // BUG: Diagnostic contains: remove @Bar2 public class TestClass2 { // BUG: Diagnostic contains: remove @Bar1 // BUG: Diagnostic contains: remove @Bar2 private int n; // BUG: Diagnostic contains: remove @Bar1 // BUG: Diagnostic contains: remove @Bar2 public TestClass2() {} // BUG: Diagnostic contains: remove @Bar1 // BUG: Diagnostic contains: remove @Bar2 public void setN( // BUG: Diagnostic contains: remove @Bar1 // BUG: Diagnostic contains: remove @Bar2 int n) {} }
/** * Method to add a node category. * * @param nodeCategory Node category to add. */ public void addNodeCategory(final String nodeCategory) { List<String> _nodeCategories = this.getNodeCategories(); boolean _contains = _nodeCategories.contains(nodeCategory); boolean _not = (!_contains); if (_not) { List<String> _nodeCategories_1 = this.getNodeCategories(); _nodeCategories_1.add(nodeCategory); } else { ConfigSpaceGenerator.LOGGER.info((nodeCategory + " node category already exists")); } }
import solveutils import scramble import cube.vectors as vc import numpy as np BUFFERORDER = { "corner": ["UFR", "UFL", "UBL", "UBR", "DFR", "DFL", "DBR"], "edge": ["UF", "UB", "UR", "UL", "DF", "DB", "FR", "FL", "DR", "DL", "BR"], } PSEUDOS = { "UFR": ("UF", "UR"), "UFL": ("UF", "UL"), "UBR": ("UB", "UR"), "UBL": ("UB", "UL"), "DFR": ("DF", "DR"), "DFL": ("DF", "DL"), "DBR": ("DB", "DR"), } def solve(scram): cube = solveutils.Solver() cube = scramble.scramble(cube, scram) edge_buffers = iter(BUFFERORDER["edge"]) corner_buffers = iter(BUFFERORDER["corner"]) edge_buffer = vc.get_vector(next(edge_buffers)) corner_buffer = vc.get_vector(next(corner_buffers)) cyclebreaks = 0 flips = 0 twists = 0 numcornerbuffers = 1 numedgebuffers = 1 while cube.count_solved("corner") < 8: if cube.is_permuted(corner_buffer): if cube.is_solved(corner_buffer): if not cube.corner_parity: corner_buffer = vc.get_vector(next(corner_buffers)) continue try: cube.cycle_break(corner_buffer) cyclebreaks += 1 except: cube.flip_or_twist(corner_buffer) twists += 1 else: cube.solve_piece(corner_buffer) if cube.corner_parity: a, b = PSEUDOS[vc.get_name(corner_buffer)] a, b = vc.get_vector(a), vc.get_vector(b) try: if cube.log['UFR'][-1] == 'BUL': a = vc.get_name('UB') b = vc.get_name('UL') except: pass cube.pseudoswap(a, b) # AFTER THIS DONT EDIT SO WE CAN SAVE while cube.count_solved("edge") < 12: if cube.is_permuted(edge_buffer): if cube.is_solved(edge_buffer): if not cube.edge_parity: edge_buffer = vc.get_vector(next(edge_buffers)) continue try: cube.cycle_break(edge_buffer) cyclebreaks += 1 except: cube.flip_or_twist(edge_buffer) flips += 1 else: cube.solve_piece(edge_buffer) # print(scram) # print(cube.log) values = list(map(list, (ele for ele in cube.log.values()))) values = sum(values, []) algs = int(len(values) / 2) + len(list(values)) % 2 corner_parity = 1 if cube.corner_parity else 0 buffers = [x for x in cube.log.keys() if x not in {'twist', 'flip'}] cornerbuffers = [x for x in buffers if len(x) == 3] edgebuffers = [x for x in buffers if len(x) == 2] # print(algs) return algs, corner_parity, cyclebreaks, flips, twists, len(cornerbuffers)#, len(edgebuffers) def test(): x = solve("U2 D F U B R2 L2 D B2 L2 B2 L' F2 D2 R2 U2 R' D2 L F2") print(x) def main(): data = [] with open("scrambles.txt") as f: scrams = f.read().splitlines() for scram in scrams: thissolve = solve(scram) data.append([x for x in thissolve]) return data if __name__ == "__main__": test()
package io.flowing.retail.kafka.order.process; import java.util.Collections; import java.util.Map; import java.util.UUID; import io.camunda.zeebe.spring.client.annotation.ZeebeWorker; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import io.flowing.retail.kafka.order.domain.Order; import io.flowing.retail.kafka.order.process.payload.RetrievePaymentCommandPayload; import io.flowing.retail.kafka.order.messages.Message; import io.flowing.retail.kafka.order.messages.MessageSender; import io.flowing.retail.kafka.order.persistence.OrderRepository; import io.camunda.zeebe.client.api.response.ActivatedJob; @Component public class RetrievePaymentAdapter { @Autowired private MessageSender messageSender; @Autowired private OrderRepository orderRepository; @ZeebeWorker(type = "retrieve-payment", autoComplete = true) public Map<String, String> handle(ActivatedJob job) { OrderFlowContext context = OrderFlowContext.fromMap(job.getVariablesAsMap()); Order order = orderRepository.findById(context.getOrderId()).get(); // generate an UUID for this communication String correlationId = UUID.randomUUID().toString(); messageSender.send( // new Message<RetrievePaymentCommandPayload>( // "RetrievePaymentCommand", // context.getTraceId(), // new RetrievePaymentCommandPayload() // .setRefId(order.getId()) // .setReason("order") // .setAmount(order.getTotalSum())) // .setCorrelationid(correlationId)); return Collections.singletonMap("CorrelationId_RetrievePayment", correlationId); } }
// MockCookieGenerator for further tests class MockCookieGenerator : public CookieGenerator { public: MockCookieGenerator() {} MOCK_CONST_METHOD1(GenerateCookie, StatusOr<string>(pid_t pid)); private: DISALLOW_COPY_AND_ASSIGN(MockCookieGenerator); }
def refresh(self): self.update_datetime() self.plane_name = self.choice_plane_var.get() self.inp_dt.update( from_date=self.from_date, to_date=self.to_date, plane_name=self.plane_name, var_to_update=self.choice_math_val.get() ) self.update_state_png(save=False) self.draw_cells() self.draw_colormap_bar(*self.cellmap.mapper.get_clim())
#include<bits/stdc++.h> using namespace std; int main() { #ifndef ONLINE_JUDGE freopen("input.txt", "r", stdin); freopen("output.txt", "w", stdout); #endif int n; cin>>n; int arr1[n+1], arr2[n+1]; for(int i=1;i<=n;i++) cin>>arr1[i]; for(int i=1;i<=n;i++) cin>>arr2[i]; int rota[n+1],rotb[n+1]; unordered_map<int,int>mp1,mp2; for(int i=1;i<=n;i++) { rota[arr1[i]]=i; } for(int i=1;i<=n;i++) { rotb[i] = (rota[arr2[i]]-i+n)%n; } for(int i=1;i<=n;i++) mp2[rotb[i]]++; int ansa=0,ansb=0; for(auto x: mp2) { ansa = max(ansa, x.second); } // for(auto y: mp2) // { // ansb = max(ansb, y.second); // } cout<<ansa<<endl; // cout<<min(ansb,ansa)<<endl; // cout<<ansa<<" "<<ansb<<endl; }
// It's not really easy to write a full up general tester for this. // Even the simplest commands on Linux have dozens of system calls. // The Go assembler should in principle let us write a 3 line assembly // program that just does an exit system call: // MOVQ $exit, RARG // SYSCALL // But that's for someone else to do :-) func TestNoCommandFail(t *testing.T) { Debug = t.Logf c, err := New() if err != nil { t.Fatal(err) } c.Raw = true go c.RunTracerFromCmd(exec.Command("hi", "/etc/hosts")) r := <-c.Records if r.Err == nil { t.Fatalf("Got nil, want a non-nil error") } }
/** * Test ThriftClientConfigParser parse method to check whether it * reads the correct xml filed values. * * @throws Exception */ @Test public void testThriftClientConfigParser() throws Exception { URL configFileUrl = ThriftClientConfigParserTest.class.getResource("/thrift-client-config.xml"); System.setProperty(ThriftClientConfig.THRIFT_CLIENT_CONFIG_FILE_PATH, configFileUrl.getPath()); ThriftClientConfig thriftClientConfig = ThriftClientConfig.getInstance(); thriftClientConfig.getThriftClientInfo(); assertEquals("Incorrect Password", "admin", thriftClientConfig.getThriftClientInfo().getUsername()); assertEquals("Incorrect Password", "1234", thriftClientConfig.getThriftClientInfo().getPassword()); assertEquals("Incorrect IP", "192.168.10.10", thriftClientConfig.getThriftClientInfo().getIp()); assertEquals("Incorrect Port", "9300", thriftClientConfig.getThriftClientInfo().getPort()); }
/** * Check if the provided transfer is prepared for deletion. * * @param pTransfer The transfer to check. * * @return TRUE if the transfer is prepared for deletion. */ public boolean isTransferDeleted(ITransferInformation pTransfer) { boolean result; LOGGER.debug("Obtaining staging folder for transfer with id '{}'", pTransfer.getTransferId()); File stagingFolder = getLocalStagingFolder(pTransfer, getContext(pTransfer)); if (stagingFolder.exists()) { File deleteFile = new File(stagingFolder, Constants.STAGING_DELETED_FILENAME); result = deleteFile.exists(); } else { LOGGER.info("Staging folder was not found. Probably the transfer was already deleted."); result = true; } return result; }
#!/usr/bin/python3 import simplejson from loguru import logger from tornado.gen import coroutine from tornado.concurrent import run_on_executor from .BaseRequestHandler import BaseRequestHandler from utils import unloadRequestParams, Success from configs import default_get_response class Main(BaseRequestHandler): @coroutine def get(self): logger.info("[/] GET Request from: {}".format(self.request.remote_ip)) result = yield self.handleGet() self.response(result) @coroutine def post(self): logger.info("[/] POST Request from: {}".format(self.request.remote_ip)) result = yield self.handlePost() self.response(result) @run_on_executor def handleGet(self): if not self.request.arguments: return default_get_response result = unloadRequestParams(self.request.arguments) # do something with the data return Success(200, result) @run_on_executor def handlePost(self): data = simplejson.loads(self.request.body) # do something with the data return Success(200, data)
import string from typing import Literal from torch.utils.data import ConcatDataset, DataLoader, SequentialSampler from aligner.data.multi_source_sampler import RoundRobinMultiSourceSampler def _create_sample_data_loader(mode: Literal["min_size", "max_size_cycle"]) -> DataLoader: dataset1 = string.ascii_lowercase dataset2 = range(10) dataset = ConcatDataset([dataset1, dataset2]) # noqa sampler = RoundRobinMultiSourceSampler([SequentialSampler(dataset1), SequentialSampler(dataset2)], sequence_sizes=[4, 3], mode=mode) return DataLoader(dataset, sampler=sampler, batch_size=None) def test_multi_source_sampler_min_size() -> None: data_loader = _create_sample_data_loader(mode="min_size") expected_list = ["a", "b", "c", "d", 0, 1, 2, "e", "f", "g", "h", 3, 4, 5, "i", "j", "k", "l", 6, 7, 8, "m", "n", "o", "p", 9] assert len(data_loader) == len(expected_list) assert list(data_loader) == expected_list def test_multi_source_sampler_max_size_cycle() -> None: data_loader = _create_sample_data_loader(mode="max_size_cycle") expected_list = ["a", "b", "c", "d", 0, 1, 2, "e", "f", "g", "h", 3, 4, 5, "i", "j", "k", "l", 6, 7, 8, "m", "n", "o", "p", 9, 0, 1, "q", "r", "s", "t", 2, 3, 4, "u", "v", "w", "x", 5, 6, 7, "y", "z"] assert len(data_loader) == len(expected_list) assert list(data_loader) == expected_list
/* * @lc app=leetcode.cn id=305 lang=rust * * [305] 岛屿数量 II */ // @lc code=start struct UnionFind { count: usize, sz: Vec<usize>, id: Vec<usize>, land: Vec<bool>, } impl UnionFind { pub fn new(size: usize) -> UnionFind { UnionFind { count: 0, sz: vec![1usize; size], id: (0usize..size).collect::<Vec<usize>>(), land: vec![false; size], } } pub fn add(&mut self, id: usize) { if !self.land[id] { self.land[id] = true; self.count += 1; } } pub fn connected(&self, p: usize, q: usize) -> bool { self.find(p)==self.find(q) } pub fn find(&self, mut p: usize) -> usize { while p!= self.id[p] { p = self.id[p] } p } pub fn try_union(&mut self, p: usize, q: usize) { if self.land[p] && self.land[q] { let pid = self.find(p); let qid = self.find(q); if pid==qid { return; } if self.sz[pid]>self.sz[qid] { self.id[qid] = pid; self.sz[pid] += self.sz[qid]; } else { self.id[pid] = qid; self.sz[qid] += self.sz[pid]; } self.count -= 1; } } #[inline(always)] pub fn count(&self) -> usize { self.count } } const MOVEMENTS: [(isize, isize);4] = [(1,0), (-1,0), (0,1), (0,-1)]; impl Solution { #[inline(always)] fn pos2id(x: usize, y: usize, _rows: usize, cols: usize) -> usize { x * cols + y } pub fn num_islands2(m: i32, n: i32, positions: Vec<Vec<i32>>) -> Vec<i32> { let m = m as usize; let n = n as usize; let points = (m*n) as usize; let mut counts = vec![]; let mut uf = UnionFind::new(points); for pos in positions { let x = pos[0] as usize; let y = pos[1] as usize; let id = Solution::pos2id(x,y,m,n); uf.add(id); for movement in &MOVEMENTS { let next_x = x as isize + movement.0; let next_y = y as isize + movement.1; if next_x>=0 && next_y>=0 && next_x < (m as isize) && next_y < (n as isize) { let next_id = Solution::pos2id(next_x as usize, next_y as usize, m, n); uf.try_union(id, next_id); } } counts.push(uf.count() as i32); } counts } } // @lc code=end struct Solution; #[cfg(test)] mod test { use super::*; #[test] fn test_num_islands2() { let positions = [[0,0], [0,1], [1,2], [2,1]].iter().map(|x| x.to_vec()).collect::<Vec<Vec<i32>>>(); let expected = vec![1,1,2,3]; assert_eq!(Solution::num_islands2(3,3,positions), expected); } }
/** * Resolver that uses the "on demand" import statements. */ public static class ImportOnDemandResolver extends AbstractResolver { private Set<String> importStmts; /** * Creates a {@link ImportOnDemandResolver} * * @param pmdClassLoader * the class loader to use * @param importStmts * the import statements */ public ImportOnDemandResolver(PMDASMClassLoader pmdClassLoader, Set<String> importStmts) { super(pmdClassLoader); this.importStmts = new HashSet<>(); for (final String stmt : importStmts) { if (stmt.endsWith("*")) { this.importStmts.add(stmt); } } } @Override public Class<?> resolve(String name) throws ClassNotFoundException { if (name == null) { throw new ClassNotFoundException(); } name = name.replace('.', '$'); for (String importStmt : importStmts) { final String fqClassName = new StringBuilder(importStmt.length() + name.length()).append(importStmt) .replace(importStmt.length() - 1, importStmt.length(), name).toString(); if (pmdClassLoader.couldResolve(fqClassName)) { try { return pmdClassLoader.loadClass(fqClassName); } catch (ClassNotFoundException ignored) { // ignored, we'll throw a custom exception later } } } throw new ClassNotFoundException("Type " + name + " not found"); } @Override public boolean couldResolve(String name) { name = name.replace('.', '$'); for (String importStmt : importStmts) { final String fqClassName = new StringBuilder(importStmt.length() + name.length()).append(importStmt) .replace(importStmt.length() - 1, importStmt.length(), name).toString(); // can any class be resolved / was never attempted? if (pmdClassLoader.couldResolve(fqClassName)) { return true; } } return false; } }
S = input() T = input() # print(S, T) # # ない場合はTを分解し、それらの文字列が存在するかを確認 # for i in range(1, len(T)): # # 分解文字列 # for j in range(i+1): # subTs= T[j:len(T)-i+j] # if subTs in S[j:len(T)-i+j]: # print(i) # exit() # print(subTs) # # 全てない場合は全文字を書き換える # print(len(T)) # ない場合は、文字列の類似数を算出する def similar(str1, str2): # print(str1, str2) cnt = 0 for i in range(len(str1)): if str1[i] == str2[i]: cnt += 1 return cnt max_value = 0 for i in range(0, len(S)-len(T)+1): t = similar(S[i:len(T)+i], T) # print('t', t) if t > max_value: max_value = t print(len(T)-max_value)
#include <eepp/window/cengine.hpp> #include <eepp/system/cpackmanager.hpp> #include <eepp/system/cinifile.hpp> #include <eepp/graphics/ctexturefactory.hpp> #include <eepp/graphics/cfontmanager.hpp> #include <eepp/graphics/cglobalbatchrenderer.hpp> #include <eepp/graphics/cshaderprogrammanager.hpp> #include <eepp/graphics/ctextureatlasmanager.hpp> #include <eepp/graphics/cframebuffermanager.hpp> #include <eepp/graphics/cvertexbuffermanager.hpp> #include <eepp/ui/cuimanager.hpp> #include <eepp/audio/caudiolistener.hpp> #include <eepp/graphics/renderer/cgl.hpp> #include <eepp/helper/haikuttf/hkfontmanager.hpp> #include <eepp/physics/cphysicsmanager.hpp> #include <eepp/network/ssl/csslsocket.hpp> #include <eepp/window/cbackend.hpp> #include <eepp/window/backend/SDL/cbackendsdl.hpp> #include <eepp/window/backend/SDL2/cbackendsdl2.hpp> #include <eepp/window/backend/SFML/cbackendsfml.hpp> #define BACKEND_SDL 1 #define BACKEND_SDL2 2 #define BACKEND_SFML 3 #ifndef DEFAULT_BACKEND #if defined( EE_BACKEND_SDL2 ) #define DEFAULT_BACKEND BACKEND_SDL2 #elif defined( EE_BACKEND_SDL_1_2 ) #define DEFAULT_BACKEND BACKEND_SDL #elif defined( EE_BACKEND_SFML_ACTIVE ) #define DEFAULT_BACKEND BACKEND_SFML #endif #endif namespace EE { namespace Window { SINGLETON_DECLARE_IMPLEMENTATION(cEngine) cEngine::cEngine() : mBackend( NULL ), mWindow( NULL ), mSharedGLContext( false ), mMainThreadId( 0 ) { cTextureAtlasManager::CreateSingleton(); } cEngine::~cEngine() { Physics::cPhysicsManager::DestroySingleton(); Graphics::Private::cFrameBufferManager::DestroySingleton(); Graphics::Private::cVertexBufferManager::DestroySingleton(); cGlobalBatchRenderer::DestroySingleton(); cTextureFactory::DestroySingleton(); cTextureAtlasManager::DestroySingleton(); cFontManager::DestroySingleton(); UI::cUIManager::DestroySingleton(); Graphics::cGL::DestroySingleton(); cShaderProgramManager::DestroySingleton(); cPackManager::DestroySingleton(); cLog::DestroySingleton(); HaikuTTF::hkFontManager::DestroySingleton(); #ifdef EE_SSL_SUPPORT Network::SSL::cSSLSocket::End(); #endif Destroy(); eeSAFE_DELETE( mBackend ); } void cEngine::Destroy() { std::list<cWindow*>::iterator it; for ( it = mWindows.begin(); it != mWindows.end(); it++ ) { eeSAFE_DELETE( *it ); } mWindow = NULL; } Backend::cBackend * cEngine::CreateSDLBackend( const WindowSettings &Settings ) { #if defined( EE_SDL_VERSION_1_2 ) return eeNew( Backend::SDL::cBackendSDL, () ); #else return NULL; #endif } Backend::cBackend * cEngine::CreateSDL2Backend( const WindowSettings &Settings ) { #if defined( EE_SDL_VERSION_2 ) return eeNew( Backend::SDL2::cBackendSDL2, () ); #else return NULL; #endif } Backend::cBackend * cEngine::CreateSFMLBackend( const WindowSettings &Settings ) { #if defined( EE_BACKEND_SFML_ACTIVE ) return eeNew( Backend::SFML::cBackendSFML, () ); #else return NULL; #endif } cWindow * cEngine::CreateSDLWindow( const WindowSettings& Settings, const ContextSettings& Context ) { #if defined( EE_SDL_VERSION_1_2 ) if ( NULL == mBackend ) { mBackend = CreateSDLBackend( Settings ); } return eeNew( Backend::SDL::cWindowSDL, ( Settings, Context ) ); #else return NULL; #endif } cWindow * cEngine::CreateSDL2Window( const WindowSettings& Settings, const ContextSettings& Context ) { #if defined( EE_SDL_VERSION_2 ) if ( NULL == mBackend ) { mBackend = CreateSDL2Backend( Settings ); } return eeNew( Backend::SDL2::cWindowSDL, ( Settings, Context ) ); #else return NULL; #endif } cWindow * cEngine::CreateSFMLWindow( const WindowSettings& Settings, const ContextSettings& Context ) { #if defined( EE_BACKEND_SFML_ACTIVE ) if ( NULL == mBackend ) { mBackend = CreateSFMLBackend( Settings ); } return eeNew( Backend::SFML::cWindowSFML, ( Settings, Context ) ); #else return NULL; #endif } cWindow * cEngine::CreateDefaultWindow( const WindowSettings& Settings, const ContextSettings& Context ) { #if DEFAULT_BACKEND == BACKEND_SDL return CreateSDLWindow( Settings, Context ); #elif DEFAULT_BACKEND == BACKEND_SDL2 return CreateSDL2Window( Settings, Context ); #elif DEFAULT_BACKEND == BACKEND_SFML return CreateSFMLWindow( Settings, Context ); #endif } cWindow * cEngine::CreateWindow( WindowSettings Settings, ContextSettings Context ) { cWindow * window = NULL; if ( NULL != mWindow ) { Settings.Backend = mWindow->GetWindowInfo()->WindowConfig.Backend; } else { mMainThreadId = cThread::GetCurrentThreadId(); } switch ( Settings.Backend ) { case WindowBackend::SDL: window = CreateSDLWindow( Settings, Context ); break; case WindowBackend::SDL2: window = CreateSDL2Window( Settings, Context ); break; case WindowBackend::SFML: window = CreateSFMLWindow( Settings, Context ); break; case WindowBackend::Default: default: window = CreateDefaultWindow( Settings, Context ); break; } if ( NULL == window ) { window = CreateDefaultWindow( Settings, Context ); } if ( NULL == mWindow ) { mWindow = window; } mWindows.push_back( mWindow ); return window; } void cEngine::DestroyWindow( cWindow * window ) { mWindows.remove( window ); if ( window == mWindow ) { if ( mWindows.size() > 0 ) { mWindow = mWindows.back(); } else { mWindow = NULL; } } eeSAFE_DELETE( window ); } bool cEngine::ExistsWindow( cWindow * window ) { std::list<cWindow*>::iterator it; for ( it = mWindows.begin(); it != mWindows.end(); it++ ) { if ( (*it) == window ) return true; } return false; } cWindow * cEngine::GetCurrentWindow() const { return mWindow; } void cEngine::SetCurrentWindow( cWindow * window ) { if ( NULL != window && window != mWindow ) { mWindow = window; mWindow->SetCurrent(); } } Uint32 cEngine::GetWindowCount() const { return mWindows.size(); } bool cEngine::Running() const { return NULL != mWindow; } cTime cEngine::Elapsed() const { eeASSERT( Running() ); return mWindow->Elapsed(); } const Uint32& cEngine::GetWidth() const { eeASSERT( Running() ); return mWindow->GetWidth(); } const Uint32& cEngine::GetHeight() const { eeASSERT( Running() ); return mWindow->GetHeight(); } Uint32 cEngine::GetDefaultBackend() const { #if DEFAULT_BACKEND == BACKEND_SDL return WindowBackend::SDL; #elif DEFAULT_BACKEND == BACKEND_SDL2 return WindowBackend::SDL2; #elif DEFAULT_BACKEND == BACKEND_SFML return WindowBackend::SFML; #endif } WindowSettings cEngine::CreateWindowSettings( cIniFile * ini, std::string iniKeyName ) { eeASSERT ( NULL != ini ); ini->ReadFile(); int Width = ini->GetValueI( iniKeyName, "Width", 800 ); int Height = ini->GetValueI( iniKeyName, "Height", 600 ); int BitColor = ini->GetValueI( iniKeyName, "BitColor", 32); bool Windowed = ini->GetValueB( iniKeyName, "Windowed", true ); bool Resizeable = ini->GetValueB( iniKeyName, "Resizeable", true ); std::string Backend = ini->GetValue( iniKeyName, "Backend", "" ); Uint32 WinBackend = GetDefaultBackend(); String::ToLower( Backend ); if ( "sdl2" == Backend ) WinBackend = WindowBackend::SDL2; else if ( "sdl" == Backend ) WinBackend = WindowBackend::SDL; else if ( "sfml" == Backend ) WinBackend = WindowBackend::SFML; Uint32 Style = WindowStyle::Titlebar; if ( !Windowed ) Style |= WindowStyle::Fullscreen; if ( Resizeable ) Style |= WindowStyle::Resize; std::string Icon = ini->GetValue( iniKeyName, "WinIcon", "" ); std::string Caption = ini->GetValue( iniKeyName, "WinCaption", "" ); WindowSettings WinSettings( Width, Height, Caption, Style, WinBackend, BitColor, Icon ); #if EE_PLATFORM == EE_PLATFORM_IOS //! @TODO: Check if SDL2 default win settings are being forced ( it wasn't working fine some time ago ) WinSettings.Width = 960; WinSettings.Height = 640; WinSettings.Style = WindowStyle::NoBorder; #endif return WinSettings; } WindowSettings cEngine::CreateWindowSettings( std::string iniPath, std::string iniKeyName ) { cIniFile Ini( iniPath ); return CreateWindowSettings( &Ini, iniKeyName ); } ContextSettings cEngine::CreateContextSettings( cIniFile * ini, std::string iniKeyName ) { eeASSERT ( NULL != ini ); ini->ReadFile(); bool VSync = ini->GetValueB( iniKeyName, "VSync", true ); std::string GLVersion = ini->GetValue( iniKeyName, "GLVersion", "0" ); String::ToLower( GLVersion ); EEGL_version GLVer; if ( "3" == GLVersion || "opengl 3" == GLVersion || "gl3" == GLVersion || "opengl3" == GLVersion ) GLVer = GLv_3; else if ( "4" == GLVersion || "opengl es 2" == GLVersion || "gles2" == GLVersion || "opengles2" == GLVersion || "es2" == GLVersion ) GLVer = GLv_ES2; else if ( "5" == GLVersion || "opengl 3 core profile" == GLVersion || "gl3cp" == GLVersion || "opengl3cp" == GLVersion || "opengl core profile" == GLVersion || "core profile" == GLVersion || "cp" == GLVersion ) GLVer = GLv_3CP; else if ( "opengl es 1" == GLVersion || "gles1" == GLVersion || "gl es 1" == GLVersion || "opengl es1" == GLVersion || "opengles1" == GLVersion || "es1" == GLVersion || "gles 1" == GLVersion ) GLVer = GLv_ES1; else if ( "2" == GLVersion || "opengl 2" == GLVersion || "gl2" == GLVersion || "gl 2" == GLVersion ) GLVer = GLv_2; else GLVer = GLv_default; bool doubleBuffering = ini->GetValueB( iniKeyName, "DoubleBuffering", true ); int depthBufferSize = ini->GetValueI( iniKeyName, "DepthBufferSize", 24 ); int stencilBufferSize = ini->GetValueI( iniKeyName, "StencilBufferSize", 1 ); return ContextSettings( VSync, GLVer, doubleBuffering, depthBufferSize, stencilBufferSize ); } ContextSettings cEngine::CreateContextSettings( std::string iniPath, std::string iniKeyName ) { cIniFile Ini( iniPath ); return CreateContextSettings( &Ini ); } void cEngine::EnableSharedGLContext() { mSharedGLContext = true; } void cEngine::DisableSharedGLContext() { mSharedGLContext = false; } bool cEngine::IsSharedGLContextEnabled() { return mSharedGLContext; } Uint32 cEngine::GetMainThreadId() { return mMainThreadId; } }}
<filename>pkg/processor/HostMatcher_test.go<gh_stars>1-10 package processor import ( "github.com/stretchr/testify/assert" "net/http" "testing" ) func Test_HostMatcher_Append(t *testing.T) { var rule = HostMatchRule{ Host: "example.com", HostFilter: "mytest.local", } var matcher = NewHostMatcher() assert.Equal(t, 0, len(matcher.rules), "HostMatcher.rules must be empty after create") matcher.Append(rule) assert.Equal(t, 1, len(matcher.rules), "HostMatcher.rules must be have single rule after add") } func Test_HostMatcher_Count(t *testing.T) { var rule = HostMatchRule{ Host: "example.com", HostFilter: "mytest.local", } var matcher = NewHostMatcher() assert.Equal(t, 0, matcher.Count(), "HostMatcher.Count() must be empty after create") matcher.Append(rule) assert.Equal(t, 1, matcher.Count(), "HostMatcher.Count() must be have single rule after add") } func Test_HostMatcher_GetHostName_NoPort(t *testing.T) { var matcher = NewHostMatcher() var request, _ = http.NewRequest(http.MethodGet, "http://localtest/add/item", nil) assert.Equal(t, "localtest", matcher.getHostName(*request),) } func Test_HostMatcher_GetHostName_WithPort(t *testing.T) { var matcher = NewHostMatcher() var request, _ = http.NewRequest(http.MethodGet, "http://localtest:7080/add/item", nil) assert.Equal(t, "localtest", matcher.getHostName(*request),) } func Test_MatchRule_MatchHost_MatchFull(t *testing.T) { rule := HostMatchRule{ Host: "example.com", HostFilter: "mytest.local", } assert.Equal(t, RULE_FILTER_HOST, rule.MatchHost("mytest.local")) } func Test_MatchRule_MatchHost_MatchPartial(t *testing.T) { rule := HostMatchRule{ Host: "example.com", HostFilter: "mytest.l", } assert.Equal(t, RULE_FILTER_HOST, rule.MatchHost("mytest.local")) } func Test_MatchRule_MatchHost_Match_None(t *testing.T) { rule := HostMatchRule{ Host: "example.com", HostFilter: "mytest.local", } assert.Equal(t, RULE_FILTER_NONE, rule.MatchHost("mytest.33")) } func Test_MatchRule_MatchPort_Match_Star(t *testing.T) { rule := HostMatchRule{ Host: "example.com", PortFilter: "*", } assert.Equal(t, RULE_FILTER_PORT, rule.MatchPort("8091")) } func Test_MatchRule_MatchPort_Match_Success(t *testing.T) { rule := HostMatchRule{ Host: "example.com", PortFilter: "8080", } assert.Equal(t, RULE_FILTER_PORT, rule.MatchPort("8080")) } func Test_MatchRule_MatchPort_Match_None(t *testing.T) { rule := HostMatchRule{ Host: "example.com", PortFilter: "8080", } assert.Equal(t, RULE_FILTER_NONE, rule.MatchPort("80")) }
import { ISlashCommand, SlashCommandContext } from "@rocket.chat/apps-engine/definition/slashcommands"; import { SkypeCallingApp } from "../SkypeCallingApp"; import { IRead, IModify, IHttp } from "@rocket.chat/apps-engine/definition/accessors"; import { shareLink } from "../lib/ShareLink"; export class SkypeCallingSendCommand implements ISlashCommand { public command = 'skype-send-link'; public i18nParamsExample = 'SkypeCallingSend_Command_Example'; public i18nDescription = 'SkypeCallingSend_Command_Description'; public providesPreview = false; constructor(private readonly app: SkypeCallingApp) {} public async executor(context: SlashCommandContext, read: IRead, modify: IModify, http: IHttp): Promise<void> { const skypeId = context.getArguments().slice().join(' '); const result = await http.post(`https://is.gd/create.php?format=simple&url=skype:${skypeId}?call`); await shareLink(result.content, context, read, modify); } }
import React from "react"; import NextLink from "next/link"; import styled from "@emotion/styled"; import { linkHoverEffect } from "./LongStoryExternalLink"; import smoothScrollTo from "../../../utils/smoothScrollTo"; import navLinks from "../../../data/navLinks"; const Link = styled.a` text-decoration: none; ${linkHoverEffect} `; const ProjectsScrollLink = ({ children }: { children?: React.ReactNode }) => { return ( <NextLink href={navLinks[1].href} passHref> {/* eslint-disable-next-line jsx-a11y/anchor-is-valid */} <Link onClick={() => smoothScrollTo(navLinks[1].scrollName)}> {children} </Link> </NextLink> ); }; export default ProjectsScrollLink;
package vib.app.module; import ij.ImagePlus; import vib.NaiveResampler; public class ResampleLabels extends Module { public String getName() { return "ResampleLabels"; } protected String getMessage() { return "Resampling label"; } protected void run(State state, int index) { if (index < 0) new Label().runOnAllImagesAndTemplate(state); prereqsDone(state, index); if (state.options.resamplingFactor == 1) return; String labelPath = state.getImagePath(-1, index); String resampledPath = state.getResampledPath(-1, index); if (state.upToDate(labelPath, resampledPath)) return; ImagePlus image = state.getImage(labelPath); ImagePlus resampled = NaiveResampler.resample(image, state.options.resamplingFactor); if(!state.save(resampled, resampledPath)) throw new RuntimeException("Could not save " + resampledPath); } }
def register(func: Callable, name: Optional[str] = None, sort_value: int = 0) -> Callable: package_name, _, plugin_name = func.__module__.rpartition(".") package_name = _PLUGINS["__aliases__"].get(package_name, package_name) file_path = pathlib.Path(sys.modules[func.__module__].__file__) _PLUGINS["__packages__"].setdefault(package_name, [package_name]) plugin_info = _PLUGINS.setdefault(package_name, dict()).setdefault(plugin_name, dict()) if name is None: name = func.__name__ plugin_info.setdefault("__parts__", list()).append(name) plugin = Plugin(f"{plugin_name}.{name}", func, file_path, sort_value) plugin_info[name] = plugin log.debug(f"Registering {plugin.name} ({plugin.file_path}) as a {package_name}-plugin") if "__parts__" in plugin_info: plugin_info["__default__"] = plugin_info[plugin_info["__parts__"][0]] return func
pub mod day_4_module { use std::env; use std::fs; #[derive(Clone, Debug)] struct Board { board : Vec<Vec<i32>> } fn sum_board(numbers : &[i32], board : &Board) -> i32 { let mut total_sum = 0; // println!("----Test board {:?}", board.board); for row in &board.board { // println!(" row len {}", row.len()); let count = row.len(); let sum : i32 = row.iter().filter(|x| { // println!("testing if {} is contained in {:?}", x, numbers); !numbers.contains(x) }).sum(); // println!("valid row {}", valid); total_sum += sum; // return false; } total_sum } fn test_board(numbers : &[i32], board : &Board) -> bool { // println!("----Test board {:?}", board.board); for row in &board.board { // println!(" row len {}", row.len()); let count = row.len(); let valid : usize = row.iter().filter(|x| { // println!("testing if {} is contained in {:?}", x, numbers); numbers.contains(x) }).count(); // println!("valid row {}", valid); if valid == count { return true; } // return false; } // let gamma_string = gamma.iter().fold(String::new(), |acc, x| format!("{}{}", acc, x)); // println!(" test columns"); for i in 0 .. 5 { let column : Vec<&i32> = board.board.iter().fold(Vec::new(), |acc, x| { let mut c = acc.clone(); c.push(x.get(i).unwrap()); c }); let count = column.len(); let valid : usize = column .iter() .filter(|x| numbers.contains(x)) .count(); // println!(" valid column {} ", valid); if valid == count { return true; } } false } pub fn run() { println!("day 4 exercise-----------"); let content = fs::read_to_string("input4.txt").expect("Something went wrong"); //println!("text /n{}", content); let split = content.split("\n"); let items: Vec<&str> = split.collect(); // let size = items[0].len(); // let mut result = vec![0;size]; // let mut count = 0; let chosen_number : Vec<i32> = items .first() .unwrap() .split(",") .map(|n| n.parse::<i32>().unwrap()) .collect(); // println!("{:?}", chosen_number); // let mut y = 0; // let mut current_board_index = 0; let mut boards : Vec<Board> = Vec :: new(); let mut current_board : Board = Board {board: Vec :: new()}; // boards.push(current_board.clone()); println!("---- generating boards"); for item in items.iter().skip(2) { // println!("line ------- {}", item); if item.len() == 0 { // println!("creating board"); // this means we add what we have accumulated. // y = 0; // println!("{:?}", current_board); boards.push(current_board.clone()); current_board = Board {board: Vec :: new()}; } else { let row : Vec<i32> = item .split(" ") .filter(|x| x.len() > 0) .map(|x| x.parse::<i32>().unwrap()) // .map(|x| // { // match x // { // Ok(i) => Some(i), // Err(e) => None // } // }) .collect(); // println!("{:?}", row); current_board.board.push(row); } } boards.push(current_board.clone()); println!("----- calculting winners {}" ,boards.len()); // println!("{:?}", boards); // println!(" for input {:?}", &chosen_number[0 .. 12]); // for (index, board) in boards .iter().enumerate() // { // let result = test_board(&chosen_number[0 .. 12], board); // println!(" result for {:?} is {}", board, result ); // // break; // } 'outer : for (index, _number) in chosen_number.iter().enumerate() { // println!(" for input {:?}", &chosen_number[0 .. index]); for (board_index, board) in boards .iter().enumerate() { let current_numbers = &chosen_number[0 .. index]; let result = test_board(current_numbers, board); // println!(" result for {} is {}", board_index, result); if result { let sum = sum_board(current_numbers, board); let last_number = chosen_number.get(index - 1).unwrap(); println!("final result {} {} > {}", last_number, sum , (sum * last_number)); break 'outer; } // break; } // break; } } }
New album, Mister Mellow, is out now via Stones Throw Records. There’s no way I could describe Washed Out’s music better than the description on his Facebook page. Washed Out is Ernest Greene, a young guy from Georgia (via South Carolina) who makes bedroom synthpop that sounds blurred and woozily evocative, like someone smeared Vaseline all over an early OMD demo tape, then stayed up all night trying to recreate what they heard. I spoke to Ernest about the irony of creating flawed, human music with computers and why it is important to him to continue to progress and develop as an artist. Download: MP3 • iTunes • Stitcher Show notes Quick fix, throwaway music culture “I’m jealous of songwriters who can create characters and alternative universes” Combating working alone/perspective Tricks to step outside of the process Plunderphonics / musique concrete An album of music as an entity Has to be tangibly made Worries of audience rejection “I have a hard time doing the same thing over and over” Music by Ealadha. Sponsored by Indietracks and Vinylify (10% discount with code ’77HG3H’). Follow Overblown on Facebook and Twitter. Subscribe to receive email notication when each new episode of 'The Creative Process' is released every Monday.
<reponame>toby3d/telegram package telegram import ( "bytes" "io" "log" "mime/multipart" "net" "path" "path/filepath" "strings" "time" json "github.com/json-iterator/go" "github.com/kirillDanshin/dlog" http "github.com/valyala/fasthttp" ) // Bot represents a bot user with access token getted from @BotFather. type Bot struct { *User AccessToken string Updates UpdatesChannel client *http.Client marshler json.API } // New creates a new default Bot structure based on the input access token. func New(accessToken string) (b *Bot, err error) { b = new(Bot) b.marshler = json.ConfigFastest b.SetClient(&http.Client{}) b.AccessToken = accessToken b.User, err = b.GetMe() return b, err } // SetClient allow set custom fasthttp.Client (for proxy traffic, for example). func (b *Bot) SetClient(newClient *http.Client) { if b == nil { b = new(Bot) } b.client = newClient } func (b Bot) Do(method string, payload interface{}) ([]byte, error) { u := http.AcquireURI() defer http.ReleaseURI(u) u.SetScheme("https") u.SetHost("api.telegram.org") u.SetPath(path.Join("bot"+b.AccessToken, method)) var buf bytes.Buffer if err := b.marshler.NewEncoder(&buf).Encode(payload); err != nil { return nil, err } req := http.AcquireRequest() defer http.ReleaseRequest(req) req.Header.SetUserAgent("toby3d/telegram") req.Header.SetMethod(http.MethodPost) req.Header.SetContentType("application/json") req.SetHostBytes(u.Host()) req.SetRequestURI(u.String()) req.SetBody(buf.Bytes()) resp := http.AcquireResponse() defer http.ReleaseResponse(resp) if err := b.client.Do(req, resp); err != nil { return nil, err } return resp.Body(), nil } func (b Bot) Upload(method string, payload map[string]string, files ...*InputFile) ([]byte, error) { if len(files) == 0 { return b.Do(method, payload) } body := new(bytes.Buffer) w := multipart.NewWriter(body) for i := range files { _, fileName := filepath.Split(files[i].Attachment.Name()) part, err := w.CreateFormFile(fileName, fileName) if err != nil { return nil, err } if _, err = io.Copy(part, files[i].Attachment); err != nil { return nil, err } } for key, val := range payload { if err := w.WriteField(key, val); err != nil { return nil, err } } if err := w.Close(); err != nil { return nil, err } u := http.AcquireURI() defer http.ReleaseURI(u) u.SetScheme("https") u.SetHost("api.telegram.org") u.SetPath(path.Join("bot"+b.AccessToken, method)) req := http.AcquireRequest() defer http.ReleaseRequest(req) req.Header.SetMethod(http.MethodPost) req.SetRequestURIBytes(u.RequestURI()) req.Header.SetContentType(w.FormDataContentType()) req.Header.SetMultipartFormBoundary(w.Boundary()) if _, err := body.WriteTo(req.BodyWriter()); err != nil { return nil, err } resp := http.AcquireResponse() defer http.ReleaseResponse(resp) if err := b.client.Do(req, resp); err != nil { return nil, err } return resp.Body(), nil } // IsMessageFromMe checks that the input message is a message from the current bot. func (b Bot) IsMessageFromMe(m Message) bool { return b.User != nil && m.From != nil && m.From.ID == b.ID } // IsForwardFromMe checks that the input message is a forwarded message from the current bot. func (b Bot) IsForwardFromMe(m Message) bool { return b.User != nil && m.IsForward() && m.ForwardFrom.ID == b.ID } // IsReplyToMe checks that the input message is a reply to the current bot. func (b Bot) IsReplyToMe(m Message) bool { return m.Chat.IsPrivate() || (m.IsReply() && b.IsMessageFromMe(*m.ReplyToMessage)) } // IsCommandToMe checks that the input message is a command for the current bot. func (b Bot) IsCommandToMe(m Message) bool { if !m.IsCommand() { return false } if m.Chat.IsPrivate() { return true } parts := strings.Split(m.RawCommand(), "@") if len(parts) <= 1 { return false } return strings.EqualFold(parts[1], b.User.Username) } // IsMessageMentionsMe checks that the input message mentions the current bot. func (b Bot) IsMessageMentionsMe(m Message) bool { if b.User == nil { return false } if b.IsCommandToMe(m) { return true } var entities []*MessageEntity switch { case m.HasMentions(): entities = m.Entities case m.HasCaptionMentions(): entities = m.CaptionEntities } for _, entity := range entities { if entity.IsMention() && entity.User.ID == b.ID { return true } } return false } // IsForwardMentionsMe checks that the input forwarded message mentions the current bot. func (b Bot) IsForwardMentionsMe(m Message) bool { return m.IsForward() && b.IsMessageMentionsMe(m) } // IsReplyMentionsMe checks that the input message mentions the current b. func (b Bot) IsReplyMentionsMe(m Message) bool { return m.IsReply() && b.IsMessageMentionsMe(*m.ReplyToMessage) } // IsMessageToMe checks that the input message is addressed to the current b. func (b Bot) IsMessageToMe(m Message) bool { return m.Chat != nil && (m.Chat.IsPrivate() || b.IsCommandToMe(m) || b.IsReplyToMe(m) || b.IsMessageMentionsMe(m)) } // NewFileURL creates a fasthttp.URI to file with path getted from GetFile method. func (b Bot) NewFileURL(filePath string) *http.URI { if b.AccessToken == "" || filePath == "" { return nil } result := http.AcquireURI() result.SetScheme("https") result.SetHost("api.telegram.org") result.SetPath(path.Join("file", "bot"+b.AccessToken, filePath)) return result } // NewRedirectURL creates new fasthttp.URI for redirecting from one chat to another. func (b Bot) NewRedirectURL(param string, group bool) *http.URI { if b.User == nil || b.User.Username == "" { return nil } link := http.AcquireURI() link.SetScheme("https") link.SetHost("t.me") link.SetPath(b.User.Username) q := link.QueryArgs() key := "start" if group { key += "group" } q.Set(key, param) link.SetQueryStringBytes(q.QueryString()) return link } // NewLongPollingChannel creates channel for receive incoming updates using long polling. func (b *Bot) NewLongPollingChannel(params *GetUpdates) UpdatesChannel { if params == nil { params = &GetUpdates{ Offset: 0, Limit: 100, Timeout: 60, } } b.Updates = make(UpdatesChannel, params.Limit) go func() { for { updates, err := b.GetUpdates(params) if err != nil { dlog.Ln(err.Error()) dlog.Ln("Failed to get updates, retrying in 3 seconds...") time.Sleep(time.Second * 3) continue } for _, update := range updates { if update.ID < params.Offset { continue } params.Offset = update.ID + 1 b.Updates <- update } } }() return b.Updates } // NewWebhookChannel creates channel for receive incoming updates via an outgoing webhook. Returns updates channel and // shutdown func. // // If cert argument is provided by two strings (["path/to/cert.file", "path/to/cert.key"]), then TLS server will be // created by this filepaths. func (b *Bot) NewWebhookChannel(u *http.URI, p SetWebhook, ln net.Listener, crt ...string) (UpdatesChannel, func() error) { b.Updates = make(UpdatesChannel, 100) // NOTE(toby3d): channel size by default GetUpdates.Limit parameter handleFunc := func(ctx *http.RequestCtx) { dlog.Ln("Request path:", string(ctx.Path())) if !bytes.HasPrefix(ctx.Path(), u.Path()) { dlog.Ln("Unsupported request path:", string(ctx.Path())) return } dlog.Ln("Catched supported request path:", string(ctx.Path())) upd := new(Update) if err := b.marshler.Unmarshal(ctx.Request.Body(), upd); err != nil { return } b.Updates <- upd } srv := http.Server{ Name: b.Username, Concurrency: p.MaxConnections, Handler: handleFunc, ReduceMemoryUsage: true, } go func() { var err error switch { case len(crt) == 2: dlog.Ln("Creating TLS router...") err = srv.ServeTLS(ln, crt[0], crt[1]) default: dlog.Ln("Creating simple router...") err = srv.Serve(ln) } if err != nil { log.Fatalln(err.Error()) } }() if _, err := b.SetWebhook(p); err != nil { log.Fatalln(err.Error()) } return b.Updates, srv.Shutdown }
<gh_stars>0 #include <eputils.h> #include <fx2regs.h> #include <fx2macros.h> #include <delay.h> #include <gpif.h> #include <fx2lafw.h> #include <gpif-acquisition.h> __bit gpif_acquiring; static void gpif_reset_waveforms(void) { int i; /* Reset WAVEDATA. */ AUTOPTRSETUP = 0x03; AUTOPTRH1 = 0xe4; AUTOPTRL1 = 0x00; for (i = 0; i < 128; i++) EXTAUTODAT1 = 0; } static void gpif_setup_registers(void) { /* TODO. Value probably irrelevant, as we don't use RDY* signals? */ GPIFREADYCFG = 0; /* * Set TRICTL = 0, thus CTL0-CTL5 are CMOS outputs. * TODO: Probably irrelevant, as we don't use CTL0-CTL5? */ GPIFCTLCFG = 0; /* When GPIF is idle, tri-state the data bus. */ /* Bit 7: DONE, bit 0: IDLEDRV. TODO: Set/clear DONE bit? */ GPIFIDLECS = (0 << 0); /* When GPIF is idle, set CTL0-CTL5 to 0. */ GPIFIDLECTL = 0; /* * Map index 0 in WAVEDATA to FIFORD. The rest is assigned too, * but not used by us. * * GPIFWFSELECT: [7:6] = SINGLEWR index, [5:4] = SINGLERD index, * [3:2] = FIFOWR index, [1:0] = FIFORD index */ GPIFWFSELECT = (uint8_t)(0x3 << 6) | (uint8_t)(0x2 << 4) | (uint8_t)(0x1 << 2) | (uint8_t)(0x0 << 0); /* Contains RDY* pin values. Read-only according to TRM. */ GPIFREADYSTAT = 0; /* Make GPIF stop on transaction count not flag. */ EP2GPIFPFSTOP = (0 << 0); } static void gpif_init_addr_pins(void) { /* * Configure the 9 GPIF address pins (GPIFADR[8:0], which consist of * PORTC[7:0] and PORTE[7]), and output an initial address (zero). * TODO: Probably irrelevant, the 56pin FX2 has no ports C and E. */ PORTCCFG = 0xff; /* Set PORTC[7:0] as alt. func. (GPIFADR[7:0]). */ OEC = 0xff; /* Configure PORTC[7:0] as outputs. */ PORTECFG |= 0x80; /* Set PORTE[7] as alt. func. (GPIFADR[8]). */ OEE |= 0x80; /* Configure PORTE[7] as output. */ SYNCDELAY(); GPIFADRL = 0x00; /* Clear GPIFADR[7:0]. */ SYNCDELAY(); GPIFADRH = 0x00; /* Clear GPIFADR[8]. */ } static void gpif_init_flowstates(void) { /* Clear all flowstate registers, we don't use this functionality. */ FLOWSTATE = 0; FLOWLOGIC = 0; FLOWEQ0CTL = 0; FLOWEQ1CTL = 0; FLOWHOLDOFF = 0; FLOWSTB = 0; FLOWSTBEDGE = 0; FLOWSTBHPERIOD = 0; } void gpif_init_la(void) { /* * Setup the FX2 in GPIF master mode, using the external clock * (not inverted), and using async sampling. */ IFCONFIG = 0x4e; //0xee /* Abort currently executing GPIF waveform (if any). */ GPIFABORT = 0xff; /* Setup the GPIF registers. */ gpif_setup_registers(); /* Reset WAVEDATA. */ gpif_reset_waveforms(); /* Initialize GPIF address pins, output initial values. */ gpif_init_addr_pins(); /* Initialize flowstate registers (not used by us). */ gpif_init_flowstates(); /* Reset the status. */ gpif_acquiring = FALSE; } static void gpif_make_delay_state(volatile BYTE *pSTATE, uint8_t delay) { /* * DELAY * Delay cmd->sample_delay clocks. */ pSTATE[0] = delay; /* * OPCODE * SGL=0, GIN=0, INCAD=0, NEXT=0, DATA=0, DP=0 * Collect data in this state. */ pSTATE[8] = 0x00; /* * OUTPUT * OE[0:3]=0, CTL[0:3]=0 */ pSTATE[16] = 0x00; /* * LOGIC FUNCTION * Not used. */ pSTATE[24] = 0x00; } static void gpid_make_data_dp_state(volatile BYTE *pSTATE) { /* * BRANCH * Branch to IDLE if condition is true, back to S5 otherwise. * re-execute */ pSTATE[0] = (uint8_t)(7 << 3) | (uint8_t)(5 << 0) | (uint8_t)(1 << 7); /* * OPCODE * SGL=0, GIN=0, INCAD=0, NEXT=0, DATA=1, DP=1 */ pSTATE[8] = (1 << 1) | (1 << 0); /* * OUTPUT * OE[0:3]=0, CTL[0:3]=0 */ pSTATE[16] = 0x00; /* * LOGIC FUNCTION * Evaluate if the FIFO full flag is set. * LFUNC=0 (AND), TERMA=6 (FIFO Flag), TERMB=6 (FIFO Flag) */ pSTATE[24] = (6 << 3) | (6 << 0); } bool gpif_acquisition_start(const struct cmd_start_acquisition *cmd) { // int i; volatile BYTE *pSTATE = &GPIF_WAVE_DATA; /* Ensure GPIF is idle before reconfiguration. */ while (!(GPIFTRIG & 0x80)); /* Configure the EP2 FIFO. */ if (cmd->flags & CMD_START_FLAGS_SAMPLE_16BIT) { EP2FIFOCFG = bmAUTOIN | bmWORDWIDE; } else { EP2FIFOCFG = bmAUTOIN; } SYNCDELAY(); /* Set IFCONFIG to the correct clock source. */ if (cmd->flags & CMD_START_FLAGS_INV_CLK) { IFCONFIG = 0x5e; } /* Populate delay states. */ gpif_make_delay_state(pSTATE++, 0); // 256 tiks delay gpif_make_delay_state(pSTATE++, 0); // 256 tiks delay gpif_make_delay_state(pSTATE++, 0); // 256 tiks delay gpif_make_delay_state(pSTATE++, 0); // 256 tiks delay gpif_make_delay_state(pSTATE++, 0); // 256 tiks delay /* Populate S2 - the decision point. */ gpid_make_data_dp_state(pSTATE++); /* Execute the whole GPIF waveform once. */ gpif_set_tc16(1); /* Perform the initial GPIF read. */ gpif_fifo_read(GPIF_EP2); /* Update the status. */ gpif_acquiring = TRUE; return true; } void gpif_poll(void) { /* Detect if acquisition has completed. */ if (gpif_acquiring && (GPIFTRIG & 0x80)) { /* Activate NAK-ALL to avoid race conditions. */ FIFORESET = 0x80; SYNCDELAY(); /* Switch to manual mode. */ EP2FIFOCFG = 0; SYNCDELAY(); /* Reset EP2. */ FIFORESET = 0x02; SYNCDELAY(); /* Return to auto mode. */ EP2FIFOCFG = bmAUTOIN; SYNCDELAY(); /* Release NAK-ALL. */ FIFORESET = 0x00; SYNCDELAY(); gpif_acquiring = FALSE; } }
// Copyright 2019-2023 The Liqo Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package discovery import ( "context" "time" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" discoveryv1alpha1 "github.com/liqotech/liqo/apis/discovery/v1alpha1" discoveryPkg "github.com/liqotech/liqo/pkg/discovery" foreignclusterutils "github.com/liqotech/liqo/pkg/utils/foreignCluster" ) func (discovery *Controller) startGarbageCollector(ctx context.Context) { for { select { case <-time.After(30 * time.Second): _ = discovery.collectGarbage(ctx) case <-ctx.Done(): return } } } // The GarbageCollector deletes all ForeignClusters discovered with LAN and WAN that have expired TTL. func (discovery *Controller) collectGarbage(ctx context.Context) error { req, err := labels.NewRequirement(discoveryPkg.DiscoveryTypeLabel, selection.In, []string{ string(discoveryPkg.LanDiscovery), }) utilruntime.Must(err) var fcs discoveryv1alpha1.ForeignClusterList if err := discovery.List(ctx, &fcs, &client.ListOptions{ LabelSelector: labels.NewSelector().Add(*req), }); err != nil { klog.Error(err) return err } for i := range fcs.Items { if foreignclusterutils.IsExpired(&fcs.Items[i]) { klog.V(4).Infof("delete foreignCluster %v (TTL expired)", fcs.Items[i].Name) klog.Infof("delete foreignCluster %v", fcs.Items[i].Name) if err := discovery.Delete(ctx, &fcs.Items[i]); err != nil { klog.Error(err) continue } } } return nil }
UCLA cornerback Fabian Moreau, who suffered a chest injury during the school's pro day on Tuesday, had surgery Wednesday evening to repair a torn pectoral muscle, a source told ESPN. The defensive back suffered the injury while working on the bench press. NFL Network first reported the news. Fabian Moreau should be ready for training camp despite having surgery for a torn pectoral muscle. Ric Tapia/Icon SMI Moreau, projected to be in the first 40 picks of this year's NFL draft, is expected to be fully recovered by the start of training camps in late July. He will continue to take pre-draft visits. Moreau is the second top cornerback to suffer a significant injury during a pro day workout this year. Sidney Jones, the top projected cornerback, suffered a torn Achilles tendon during the University of Washington's pro day a few weeks ago. He underwent surgery earlier this week and tweeted that his doctor told him he should be able to return at some point in the 2017 season.
string=input() if len(string)==0: print(string) else: A=[] for i in range(len(string)//2): A.append(string[i*2]) A.append(string[len(string)-1]) A.sort() print("+".join(A))
78 Healthcare workers and bloodborne pathogen exposure incidents Introduction Healthcare workers are at risk of infection caused by bloodborne pathogens, particularly hepatitis B (HBV), hepatitis C (HCV) and human immunodeficiency virus (HIV) due to sharps injuries and skin and mucous membrane contacts with blood or other potentially infectious materials. Our aim was to evaluate the reporting, management and consequences of bloodborne pathogen exposure incidents in healthcare workers. Methods The study included all healthcare workers of the largest University Medical Centre in Slovenia (UMCL) who reported bloodborne incidents and were treated from 1 January 2008 to 31 December 2016 according to the guidelines. The data were collected from medical records. Result The average number of employed healthcare workers was 5492. The mean incidence rate of annually reported and treated incidents was 2.22 per 100 health workers. Average annual injuries incidence rates were the highest at the Dental Clinic (9.83 per 100), Department of Surgery (2.86 per 100) and Department of Internal Medicine (2.25 per 100). Incidents occurred most frequently in nurse’s aides (5.79 per 100), followed by doctors (2.28 per 100) and nurses (1.69 per 100). The most common were sharps injuries (1.93 per 100), followed by contact of eye (0.11 per 100) and skin (0.04 per 100) with blood. The most frequent cause was contact when disposing of used needles (39.83%). Incidents most commonly happened on Fridays. Approximately 81% of exposed workers were vaccinated against HBV before the incident. Among the reported cases, one became HBsAg positive after the incident, while none of them was anti-HCV or anti-HIV reactive during the follow-up. Discussion More work-related interventions are needed to prevent bloodborne incidents among healthcare workers. Therefore, we are developing an educational campaign to raise awareness of the importance of prevention, reporting and treating bloodborne exposure incidents and vaccination against HBV.
import redis userMap = {} tsize = 0 gtsize = 0 r = redis.StrictRedis(host='X.X.X.X', port=XXXX, db=0, password='<PASSWORD>') print r #Add verdification of acccess_key and user and cluster to be onboarded access_key = "" user = "" target_cluster = "" userkey = access_key + "_user_agl" userhash = { "epoch": "21", "version": "1.0", "target_write_cluster": target_cluster, "user_name" : user, "placement_policy" : "Default", "bootstrap_cluster" : target_cluster } print "Updating User -->", user print "Updating User Key as -->", userkey print "Updating hash as -->" print userhash res = r.hmset(userkey, userhash) print "Updating User Result-->", res print "Exiting"
I Said No Research is an always already whole-self endeavour. As researchers we do not get to choose what parts of us to leave behind at home when we go to work; this is especially clear in the doing of fieldwork. Additionally, what happens in “the field” does not stay there. In fact that is the point. We move between fieldwork and reflection at varying intervals; it is through this corrugated process that research emerges. Research institutions need to recognize and provide appropriate preparation and support systems for researchers when their work takes them outside of the institutions’ walls. What follows is an account of the fieldwork experience that lead me to think about these dynamics of research and a window into those thoughts.
<filename>logdevice/common/configuration/ZookeeperConfig.h /** * Copyright (c) 2017-present, Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #pragma once #include <chrono> #include <cstddef> #include <vector> #include <folly/dynamic.h> #include "logdevice/common/Sockaddr.h" namespace facebook { namespace logdevice { namespace configuration { class ZookeeperConfig { public: // maximum length of cluster name string constexpr static const size_t MAX_CLUSTER_NAME = 127; // URI scheme for ip-based quroum specifications constexpr static const char* URI_SCHEME_IP = "ip"; explicit ZookeeperConfig() {} ZookeeperConfig(std::vector<Sockaddr> quorum, std::chrono::milliseconds session_timeout) : quorum_(std::move(quorum)), session_timeout_(session_timeout), uri_scheme_(URI_SCHEME_IP), quorum_string_(makeQuorumString(quorum_)), zookeeper_properties_(folly::dynamic::object) {} ZookeeperConfig(std::vector<Sockaddr> quorum, std::chrono::milliseconds session_timeout, std::string uri_scheme, std::string quorum_string, folly::dynamic properties) : quorum_(std::move(quorum)), session_timeout_(session_timeout), uri_scheme_(std::move(uri_scheme)), quorum_string_(std::move(quorum_string)), zookeeper_properties_(std::move(properties)) {} ZookeeperConfig(std::string quorum_string, std::string uri_scheme, std::chrono::milliseconds session_timeout, folly::dynamic properties = folly::dynamic::object) : session_timeout_(session_timeout), uri_scheme_(uri_scheme), quorum_string_(quorum_string), zookeeper_properties_(std::move(properties)) {} /** * @return A comma-separated list of ip:ports of the ZK servers */ const std::string& getQuorumString() const { return quorum_string_; } /** * @return The session timeout in milliseconds */ std::chrono::milliseconds getSessionTimeout() const { return session_timeout_; } /** * @return The quorum as an unsorted vector * DEPRECATED */ const std::vector<Sockaddr>& getQuorum() const { return quorum_; } /** * @return The scheme of the URI used in the zookeeper_uri property. * * The scheme indicates the syntax of the address part of the URI. In other * words the sheme indicates the format of the quorum string used to * initialize Zookeeper clients. * Currently, the only supported scheme is "ip". The associated syntax of * a quorum is a comma-separated list of IP address and port pairs. For * instance, a valid URI may look like the following: * "ip://1.2.3.4:2181,192.168.127.12:2181,192.168.127.12:2181" * */ const std::string& getUriScheme() const { return uri_scheme_; } /** * @return The URI resolving to the configured zookeeper ensemble. */ std::string getZookeeperUri() const { return uri_scheme_ + "://" + quorum_string_; } /** * @return The properties that were left out by the parser * as a map of key/value pairs */ const folly::dynamic& getProperties() const { return zookeeper_properties_; } /** * @return Zookeeper config as folly dynamic suitable to be serialized as JSON * in the main config. */ folly::dynamic toFollyDynamic() const; static std::unique_ptr<ZookeeperConfig> fromJson(const folly::dynamic& parsed); /** * Equality operator to ocmpare two Zookeeper config objects */ bool operator==(const ZookeeperConfig& other) const; private: static std::string makeQuorumString(const std::vector<Sockaddr>& quroum); // Addresses of all ZK servers in the quorum we use to store and increment // next epoch numbers for logs const std::vector<Sockaddr> quorum_; // sesstion timeout to pass to zookeeper_init std::chrono::milliseconds session_timeout_{0}; std::string uri_scheme_; std::string quorum_string_; folly::dynamic zookeeper_properties_; }; }}} // namespace facebook::logdevice::configuration
package cronsifter import ( "bytes" "fmt" "os" "path" "time" ) // Simple logger for writing to a file. This is heavily // based on https://github.com/siddontang/go-log. // SimpleLogger is a basic logging struct that provide log rotating. type SimpleLogger struct { file *os.File filename string maxBytes int fileCount int } // NewSimpleLogger takes the filename, maxBytes and fileCount to create a // SimpleLogger. maxBytes and fileCount are used to determine when to rotate // the files. func NewSimpleLogger(filename string, maxBytes, fileCount int) (*SimpleLogger, error) { dir := path.Dir(filename) os.Mkdir(dir, 0777) sl := &SimpleLogger{filename: filename, maxBytes: maxBytes, fileCount: fileCount} var err error sl.file, err = os.OpenFile(sl.filename, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) if err != nil { return nil, err } return sl, nil } // Write the given bytes to the file. func (l *SimpleLogger) Write(b []byte) (int, error) { l.rotate() var buf bytes.Buffer buf.WriteString(fmt.Sprintf("%s: ", time.Now().Format("Mon Jan 2 15:04:05 -0700 MST 2006"))) buf.Write(b) if len(b) > 0 && b[len(b)-1] != '\n' { buf.WriteString("\n") } return l.file.Write(buf.Bytes()) } // Close the SimpleLogger's file handle. func (l *SimpleLogger) Close() error { if l.file != nil { return l.file.Close() } return nil } func (l *SimpleLogger) rotate() error { f, err := l.file.Stat() if err != nil { return err } if l.maxBytes <= 0 { return fmt.Errorf("Max Bytes needs to be greater than 0 instead of: %d", l.maxBytes) } if f.Size() < int64(l.maxBytes) { return nil } if l.fileCount > 0 { l.file.Close() for i := l.fileCount - 1; i > 0; i-- { prev := fmt.Sprintf("%s.%d", l.filename, i) next := fmt.Sprintf("%s.%d", l.filename, i+1) os.Rename(prev, next) } next := fmt.Sprintf("%s.1", l.filename) os.Rename(l.filename, next) l.file, _ = os.OpenFile(l.filename, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) } return nil }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ // Base-class & helper classes for testing RewriteContext and its // interaction with various subsystems. #include "net/instaweb/rewriter/public/rewrite_context_test_base.h" #include "base/logging.h" #include "net/instaweb/http/public/http_cache.h" #include "net/instaweb/http/public/logging_proto_impl.h" #include "net/instaweb/rewriter/cached_result.pb.h" #include "net/instaweb/rewriter/input_info.pb.h" #include "net/instaweb/rewriter/public/output_resource.h" #include "net/instaweb/rewriter/public/rewrite_options.h" #include "net/instaweb/rewriter/public/rewrite_result.h" #include "pagespeed/kernel/base/function.h" #include "pagespeed/kernel/base/gtest.h" #include "pagespeed/kernel/base/stl_util.h" #include "pagespeed/kernel/http/google_url.h" #include "pagespeed/kernel/http/http_names.h" #include "pagespeed/kernel/http/response_headers.h" #include "pagespeed/kernel/thread/mock_scheduler.h" namespace net_instaweb { const char TrimWhitespaceRewriter::kFilterId[] = "tw"; const char TrimWhitespaceSyncFilter::kFilterId[] = "ts"; const char UpperCaseRewriter::kFilterId[] = "uc"; const char NestedFilter::kFilterId[] = "nf"; const char CombiningFilter::kFilterId[] = "cr"; // This is needed to prevent link error due to EXPECT_EQ on this field in // RewriteContextTest::TrimFetchHashFailedShortTtl. const int64 RewriteContextTestBase::kLowOriginTtlMs; TrimWhitespaceRewriter::~TrimWhitespaceRewriter() { } bool TrimWhitespaceRewriter::RewriteText(const StringPiece& url, const StringPiece& in, GoogleString* out, ServerContext* server_context) { LOG(INFO) << "Trimming whitespace."; ++num_rewrites_; TrimWhitespace(in, out); return in != *out; } HtmlElement::Attribute* TrimWhitespaceRewriter::FindResourceAttribute( HtmlElement* element) { if (element->keyword() == HtmlName::kLink) { return element->FindAttribute(HtmlName::kHref); } return NULL; } TrimWhitespaceSyncFilter::~TrimWhitespaceSyncFilter() { } void TrimWhitespaceSyncFilter::StartElementImpl(HtmlElement* element) { if (element->keyword() == HtmlName::kLink) { HtmlElement::Attribute* href = element->FindAttribute(HtmlName::kHref); if (href != NULL) { GoogleUrl gurl(driver()->google_url(), href->DecodedValueOrNull()); href->SetValue(StrCat(gurl.Spec(), ".pagespeed.ts.0.css")); } } } UpperCaseRewriter::~UpperCaseRewriter() { } NestedFilter::~NestedFilter() { } NestedFilter::Context::~Context() { STLDeleteElements(&strings_); } void NestedFilter::Context::RewriteSingle( const ResourcePtr& input, const OutputResourcePtr& output) { ++filter_->num_top_rewrites_; // Assume that this file just has nested CSS URLs one per line, // which we will rewrite. StringPieceVector pieces; SplitStringPieceToVector(input->ExtractUncompressedContents(), "\n", &pieces, true); GoogleUrl base(input->url()); if (base.IsWebValid()) { // Add a new nested multi-slot context. for (int i = 0, n = pieces.size(); i < n; ++i) { GoogleUrl url(base, pieces[i]); if (url.IsWebValid()) { bool unused; ResourcePtr resource(Driver()->CreateInputResource( url, RewriteDriver::InputRole::kUnknown, &unused)); if (resource.get() != NULL) { ResourceSlotPtr slot(new NestedSlot(resource)); RewriteContext* nested_context = filter_->upper_filter()->MakeNestedRewriteContext(this, slot); AddNestedContext(nested_context); nested_slots_.push_back(slot); // Test chaining of a 2nd rewrite on the same slot, if asked. if (chain_) { RewriteContext* nested_context2 = filter_->upper_filter()->MakeNestedRewriteContext(this, slot); AddNestedContext(nested_context2); } } } } // TODO(jmarantz): start this automatically. This will be easier // to do once the states are kept more explicitly via a refactor. StartNestedTasks(); } } void NestedFilter::Context::Harvest() { RewriteResult result = kRewriteFailed; GoogleString new_content; if (filter_->check_nested_rewrite_result_) { for (int i = 0, n = nested_slots_.size(); i < n; ++i) { EXPECT_EQ(filter_->expected_nested_rewrite_result(), nested_slots_[i]->was_optimized()); } } CHECK_EQ(1, num_slots()); for (int i = 0, n = num_nested(); i < n; ++i) { CHECK_EQ(1, nested(i)->num_slots()); ResourceSlotPtr slot(nested(i)->slot(0)); ResourcePtr resource(slot->resource()); StrAppend(&new_content, resource->url(), "\n"); } // Warning: this uses input's content-type for simplicity, but real // filters should not do that --- see comments in // CacheExtender::RewriteLoadedResource as to why. if (Driver()->Write(ResourceVector(1, slot(0)->resource()), new_content, slot(0)->resource()->type(), slot(0)->resource()->charset(), output(0).get())) { result = kRewriteOk; } RewriteDone(result, 0); } void NestedFilter::StartElementImpl(HtmlElement* element) { HtmlElement::Attribute* attr = element->FindAttribute(HtmlName::kHref); if (attr != NULL) { bool unused; ResourcePtr resource = CreateInputResource( attr->DecodedValueOrNull(), RewriteDriver::InputRole::kUnknown, &unused); if (resource.get() != NULL) { ResourceSlotPtr slot(driver()->GetSlot(resource, element, attr)); // This 'new' is paired with a delete in RewriteContext::FinishFetch() Context* context = new Context(driver(), this, chain_); context->AddSlot(slot); driver()->InitiateRewrite(context); } } } CombiningFilter::CombiningFilter(RewriteDriver* driver, MockScheduler* scheduler, int64 rewrite_delay_ms) : RewriteFilter(driver), scheduler_(scheduler), num_rewrites_(0), num_render_(0), num_will_not_render_(0), num_cancel_(0), rewrite_delay_ms_(rewrite_delay_ms), rewrite_block_on_(NULL), rewrite_signal_on_(NULL), on_the_fly_(false), optimization_only_(true), disable_successors_(false) { ClearStats(); } CombiningFilter::~CombiningFilter() { } CombiningFilter::Context::Context(RewriteDriver* driver, CombiningFilter* filter, MockScheduler* scheduler) : RewriteContext(driver, NULL, NULL), combiner_(driver, filter), scheduler_(scheduler), time_at_start_of_rewrite_us_(scheduler_->timer()->NowUs()), filter_(filter) { combiner_.set_prefix(filter_->prefix_); } bool CombiningFilter::Context::Partition(OutputPartitions* partitions, OutputResourceVector* outputs) { MessageHandler* handler = Driver()->message_handler(); CachedResult* partition = partitions->add_partition(); for (int i = 0, n = num_slots(); i < n; ++i) { if (!slot(i)->resource()->IsSafeToRewrite(rewrite_uncacheable()) || !combiner_.AddResourceNoFetch(slot(i)->resource(), handler).value) { return false; } // This should be called after checking IsSafeToRewrite, since // AddInputInfoToPartition requires the resource to be loaded() slot(i)->resource()->AddInputInfoToPartition( Resource::kIncludeInputHash, i, partition); } OutputResourcePtr combination(combiner_.MakeOutput()); // MakeOutput can fail if for example there is only one input resource. if (combination.get() == NULL) { return false; } // ResourceCombiner provides us with a pre-populated CachedResult, // so we need to copy it over to our CachedResult. This is // less efficient than having ResourceCombiner work with our // cached_result directly but this allows code-sharing as we // transition to the async flow. combination->UpdateCachedResultPreservingInputInfo(partition); DisableRemovedSlots(partition); outputs->push_back(combination); return true; } void CombiningFilter::Context::Rewrite(int partition_index, CachedResult* partition, const OutputResourcePtr& output) { if (filter_->rewrite_signal_on_ != NULL) { filter_->rewrite_signal_on_->Notify(); } if (filter_->rewrite_block_on_ != NULL) { filter_->rewrite_block_on_->Wait(); } if (filter_->rewrite_delay_ms() == 0) { DoRewrite(partition_index, partition, output); } else { int64 wakeup_us = time_at_start_of_rewrite_us_ + 1000 * filter_->rewrite_delay_ms(); Function* closure = MakeFunction( this, &Context::DoRewrite, partition_index, partition, output); scheduler_->AddAlarmAtUs(wakeup_us, closure); } } void CombiningFilter::Context::DoRewrite(int partition_index, CachedResult* partition, OutputResourcePtr output) { ++filter_->num_rewrites_; // resource_combiner.cc takes calls WriteCombination as part // of Combine. But if we are being called on behalf of a // fetch then the resource still needs to be written. RewriteResult result = kRewriteOk; if (!output->IsWritten()) { ResourceVector resources; for (int i = 0, n = num_slots(); i < n; ++i) { ResourcePtr resource(slot(i)->resource()); resources.push_back(resource); } if (!combiner_.Write(resources, output)) { result = kRewriteFailed; } } RewriteDone(result, partition_index); } void CombiningFilter::Context::Render() { ++filter_->num_render_; // Slot 0 will be replaced by the combined resource as part of // rewrite_context.cc. But we still need to delete slots 1-N. for (int p = 0, np = num_output_partitions(); p < np; ++p) { DisableRemovedSlots(output_partition(p)); } } void CombiningFilter::Context::WillNotRender() { ++filter_->num_will_not_render_; } void CombiningFilter::Context::Cancel() { ++filter_->num_cancel_; } void CombiningFilter::Context::DisableRemovedSlots( const CachedResult* partition) { if (filter_->disable_successors_) { slot(0)->set_disable_further_processing(true); } for (int i = 1; i < partition->input_size(); ++i) { int slot_index = partition->input(i).index(); slot(slot_index)->RequestDeleteElement(); } } void CombiningFilter::StartElementImpl(HtmlElement* element) { if (element->keyword() == HtmlName::kLink) { HtmlElement::Attribute* href = element->FindAttribute(HtmlName::kHref); if (href != NULL) { bool unused; ResourcePtr resource(CreateInputResource( href->DecodedValueOrNull(), RewriteDriver::InputRole::kUnknown, &unused)); if (resource.get() != NULL) { if (context_.get() == NULL) { context_.reset(new Context(driver(), this, scheduler_)); } context_->AddElement(element, href, resource); } } } } const int64 RewriteContextTestBase::kRewriteDeadlineMs; RewriteContextTestBase::~RewriteContextTestBase() { } void RewriteContextTestBase::SetUp() { trim_filter_ = NULL; other_trim_filter_ = NULL; combining_filter_ = NULL; nested_filter_ = NULL; // The default deadline set in RewriteDriver is dependent on whether // the system was compiled for debug, or is being run under valgrind. // However, the unit-tests here use mock-time so we want to set the // deadline explicitly. options()->set_rewrite_deadline_ms(kRewriteDeadlineMs); other_options()->set_rewrite_deadline_ms(kRewriteDeadlineMs); RewriteTestBase::SetUp(); EXPECT_EQ(kRewriteDeadlineMs, rewrite_driver()->rewrite_deadline_ms()); EXPECT_EQ(kRewriteDeadlineMs, other_rewrite_driver()->rewrite_deadline_ms()); } void RewriteContextTestBase::TearDown() { rewrite_driver()->WaitForShutDown(); RewriteTestBase::TearDown(); } void RewriteContextTestBase::InitResourcesToDomain(const char* domain) { ResponseHeaders default_css_header; SetDefaultLongCacheHeaders(&kContentTypeCss, &default_css_header); int64 now_ms = http_cache()->timer()->NowMs(); default_css_header.SetDateAndCaching(now_ms, kOriginTtlMs); default_css_header.ComputeCaching(); // trimmable SetFetchResponse(StrCat(domain, "a.css"), default_css_header, " a "); // not trimmable SetFetchResponse(StrCat(domain, "b.css"), default_css_header, "b"); SetFetchResponse(StrCat(domain, "c.css"), default_css_header, "a.css\nb.css\n"); // not trimmable, low ttl. ResponseHeaders low_ttl_css_header; SetDefaultLongCacheHeaders(&kContentTypeCss, &low_ttl_css_header); low_ttl_css_header.SetDateAndCaching(now_ms, kLowOriginTtlMs); low_ttl_css_header.ComputeCaching(); low_ttl_css_header.Add(HttpAttributes::kContentType, "text/css"); SetFetchResponse(StrCat(domain, "d.css"), low_ttl_css_header, "d"); // trimmable, low ttl. SetFetchResponse(StrCat(domain, "e.css"), low_ttl_css_header, " e "); // trimmable, with charset. ResponseHeaders encoded_css_header; server_context()->SetDefaultLongCacheHeaders( &kContentTypeCss, "koi8-r", StringPiece(), &encoded_css_header); SetFetchResponse(StrCat(domain, "a_ru.css"), encoded_css_header, " a = \xc1 "); // trimmable, private ResponseHeaders private_css_header; private_css_header.set_major_version(1); private_css_header.set_minor_version(1); private_css_header.SetStatusAndReason(HttpStatus::kOK); private_css_header.SetDateAndCaching(now_ms, kOriginTtlMs, ",private"); private_css_header.Add(HttpAttributes::kContentType, "text/css"); private_css_header.ComputeCaching(); SetFetchResponse(StrCat(domain, "a_private.css"), private_css_header, " a "); // trimmable, no-cache ResponseHeaders no_cache_css_header; no_cache_css_header.set_major_version(1); no_cache_css_header.set_minor_version(1); no_cache_css_header.SetStatusAndReason(HttpStatus::kOK); no_cache_css_header.SetDateAndCaching(now_ms, 0, ",no-cache"); no_cache_css_header.Add(HttpAttributes::kContentType, "text/css"); no_cache_css_header.ComputeCaching(); SetFetchResponse(StrCat(domain, "a_no_cache.css"), no_cache_css_header, " a "); // trimmable, no-transform ResponseHeaders no_transform_css_header; no_transform_css_header.set_major_version(1); no_transform_css_header.set_minor_version(1); no_transform_css_header.SetStatusAndReason(HttpStatus::kOK); no_transform_css_header.SetDateAndCaching(now_ms, kOriginTtlMs, ",no-transform"); no_transform_css_header.Add(HttpAttributes::kContentType, "text/css"); no_transform_css_header.ComputeCaching(); SetFetchResponse(StrCat(domain, "a_no_transform.css"), no_transform_css_header, " a "); // trimmable, no-cache, no-store ResponseHeaders no_store_css_header; no_store_css_header.set_major_version(1); no_store_css_header.set_minor_version(1); no_store_css_header.SetStatusAndReason(HttpStatus::kOK); no_store_css_header.SetDateAndCaching(now_ms, 0, ",no-cache,no-store"); no_store_css_header.Add(HttpAttributes::kContentType, "text/css"); no_store_css_header.ComputeCaching(); SetFetchResponse(StrCat(domain, "a_no_store.css"), no_store_css_header, " a "); } void RewriteContextTestBase::InitUpperFilter(OutputResourceKind kind, RewriteDriver* rewrite_driver) { UpperCaseRewriter* rewriter; rewrite_driver->AppendRewriteFilter( UpperCaseRewriter::MakeFilter(kind, rewrite_driver, &rewriter)); } void RewriteContextTestBase::InitCombiningFilter(int64 rewrite_delay_ms) { RewriteDriver* driver = rewrite_driver(); combining_filter_ = new CombiningFilter(driver, mock_scheduler(), rewrite_delay_ms); driver->AppendRewriteFilter(combining_filter_); driver->AddFilters(); } void RewriteContextTestBase::InitNestedFilter( bool expected_nested_rewrite_result) { RewriteDriver* driver = rewrite_driver(); // Note that we only register this instance for rewrites, not HTML // handling, so that uppercasing doesn't end up messing things up before // NestedFilter gets to them. UpperCaseRewriter* upper_rewriter; SimpleTextFilter* upper_filter = UpperCaseRewriter::MakeFilter(kOnTheFlyResource, driver, &upper_rewriter); AddFetchOnlyRewriteFilter(upper_filter); nested_filter_ = new NestedFilter(driver, upper_filter, upper_rewriter, expected_nested_rewrite_result); driver->AppendRewriteFilter(nested_filter_); driver->AddFilters(); } void RewriteContextTestBase::InitTrimFilters(OutputResourceKind kind) { trim_filter_ = new TrimWhitespaceRewriter(kind); rewrite_driver()->AppendRewriteFilter( new SimpleTextFilter(trim_filter_, rewrite_driver())); rewrite_driver()->AddFilters(); other_trim_filter_ = new TrimWhitespaceRewriter(kind); other_rewrite_driver()->AppendRewriteFilter( new SimpleTextFilter(other_trim_filter_, other_rewrite_driver())); other_rewrite_driver()->AddFilters(); } void RewriteContextTestBase::ClearStats() { RewriteTestBase::ClearStats(); if (trim_filter_ != NULL) { trim_filter_->ClearStats(); } if (other_trim_filter_ != NULL) { other_trim_filter_->ClearStats(); } if (combining_filter_ != NULL) { combining_filter_->ClearStats(); } if (nested_filter_ != NULL) { nested_filter_->ClearStats(); } } } // namespace net_instaweb
/** * Represents a prediction of the value of a categorical target. The prediction is not just a single category * value, but a probability distribution over all known category values. * * @see NumericPrediction */ public final class CategoricalPrediction extends Prediction { /** "counts" can be fractional, or less than 1 */ private final double[] categoryCounts; /** Normalized categoryCounts */ private volatile double[] categoryProbabilities; private volatile int maxCategory; public CategoricalPrediction(int[] categoryCounts) { this(toDoubles(categoryCounts)); } private static double[] toDoubles(int[] values) { double[] result = new double[values.length]; for (int i = 0; i < result.length; i++) { result[i] = values[i]; } return result; } /** * @param categoryCounts "counts" for each category, which may be fractional */ public CategoricalPrediction(double[] categoryCounts) { super((int) Math.round(sum(categoryCounts))); this.categoryCounts = categoryCounts; recompute(); } private static double sum(double[] categoryCounts) { return Arrays.stream(categoryCounts).sum(); } private synchronized void recompute() { double total = sum(categoryCounts); double maxCount = Double.NEGATIVE_INFINITY; int theMaxCategory = -1; double[] newCategoryProbabilities = new double[categoryCounts.length]; for (int i = 0; i < newCategoryProbabilities.length; i++) { double count = categoryCounts[i]; if (count > maxCount) { maxCount = count; theMaxCategory = i; } newCategoryProbabilities[i] = count / total; } Preconditions.checkArgument(theMaxCategory >= 0); categoryProbabilities = newCategoryProbabilities; maxCategory = theMaxCategory; } public double[] getCategoryCounts() { return categoryCounts; } public double[] getCategoryProbabilities() { return categoryProbabilities; } public int getMostProbableCategoryEncoding() { return maxCategory; } /** * @return {@link FeatureType#CATEGORICAL} */ @Override public FeatureType getFeatureType() { return FeatureType.CATEGORICAL; } @Override public void update(Example train) { CategoricalFeature target = (CategoricalFeature) train.getTarget(); update(target.getEncoding(), 1); } public synchronized void update(int encoding, int count) { categoryCounts[encoding] += count; setCount(getCount() + count); recompute(); } @Override public boolean equals(Object o) { if (!(o instanceof CategoricalPrediction)) { return false; } CategoricalPrediction other = (CategoricalPrediction) o; return Arrays.equals(categoryCounts, other.categoryCounts); } @Override public int hashCode() { return Arrays.hashCode(categoryCounts); } @Override public String toString() { return ':' + Arrays.toString(categoryProbabilities); } }
import { ValidateNested } from 'class-validator'; import { Output } from 'src/common/dtos/output.dto'; import { Category } from '../category.entity'; export class AllCategoriesOutput extends Output { @ValidateNested() results?: Category[]; }
//balances the avl search tree and it's nodes that are inputted into the function void balanceAVLTree(avlTreeInit *tree) { avlNodeInit *tempRoot = NULL; tempRoot = balanceAVLNode(tree->avlRoot); if(tempRoot != tree->avlRoot) { tree->avlRoot = tempRoot; } }
/** * Returns the text string with all characters equal to letter replaced with * '#'. * * @param letter character to replace * @return text string with all characters equal to letter replaced with '#' */ public String mark(char letter) { String s3 = ""; for (int i = 0; i < text.length(); i++) { if (text.charAt(i) == letter) s3 = s3 + "#"; else s3 = s3 + text.charAt(i); } return s3; }
Role of interactions in a dissipative many-body localized system Recent experimental and theoretical efforts have focused on the effect of dissipation on quantum many-body systems in their many-body localized (MBL) phase. While in the presence of dephasing noise such systems reach a unique ergodic state, their dynamics is characterized by slow relaxation manifested in non-exponential decay of self-correlations. Here we shed light on a currently much debated issue, namely the role of interactions for this relaxation dynamics. We focus on the experimentally relevant situation of the evolution from an initial charge density wave in the presence of strong dephasing noise. We find a crossover from a regime dominated by disorder to a regime dominated by interactions, with a concomitant change of time correlators from stretched exponential to compressed exponential form. The strongly interacting regime can be explained in terms of nucleation and growth dynamics of relaxing regions - reminiscent of the kinetics of crystallization in soft matter systems - and should be observable experimentally. This interaction-driven crossover suggests that the competition between interactions and noise give rise to a much richer structure of the MBL phase than anticipated so far. I. INTRODUCTION Many-body quantum systems in the presence of quenched disorder undergo a transition between an ergodic phase and a many-body localized (MBL) phase . While the transport properties of the MBL phase are still debated 6 it is generally accepted that it is characterized by a slow growth of entanglement entropy 7-10 , and ergodicity breaking which has been observed in numerical studies 5,11,12 and experiments . Some nonergodic aspects of the MBL phase have also been argued to be present with translation invariance 17,18 . While most literature has focused on closed quantum, the imperfect isolation of the cold atomic ensembles used in recent experimental observations of MBL calls for an understanding of the effect of dissipation on the MBL phase 19,20 . In Ref. 21 a chain of interacting fermions in contact with an infinite temperature dephasing bath was studied numerically. At conditions where the closed system would be in the MBL phase, a slow approach to the infinite temperature state was observed in the open system, characterized by a stretched exponential decay of self-correlations. Stretched exponential behavior was confirmed analytically in Ref. 22 in terms of a non-interacting (Anderson) system valid for large disorder. Similarly, in Ref. 23 the scaling properties of the same system were studied in the large disorder limit, finding independence of the dynamics from interactions. A central question is therefore whether interactions play any role in the relaxation to the ergodic state due to dephasing in an otherwise MBL system. Here we address this question by studying the dissipative dynamics of a disordered XXZ chain in its MBL phase 21 . Our main result is that depending on the interaction strength the system explores two different regimes within the MBL phase. We show that in the dynamics of an initial spindensity wave, depending on interaction strength, there is a crossover from a disorder dominated regime to an interaction dominated regime, whose observable signature is a change of behaviour of self-correlators from a stretched exponential to a compressed exponential dependence with time. This latter behaviour is due to nucleation and growth of relaxing regions. A crossover of this sort is often a manifestation of non-equilibrium and aging behaviour in soft matter and glassy systems, see for example . II. MODEL We consider a paradigmatic MBL system, the disordered XXZ chain in a spinless fermions description, where we denote withĉ † k the fermion creation operator, withn k =ĉ † kĉ k the number operator, and the random field h k ∈ is independently drawn for each site from a uniform distribution. This model exhibits an MBL transition for h c /J 7.2 5,10,28 . Following 21 we couple the system to an infinite temperature Markovian dephasing bath. At weak coupling between system and bath, the dynamics can be described by a Lindblad quantum Master equation 29,30 where ρ is the system's density matrix and γ ≥ 0 sets the coupling to the bath. Eq. (2) has the advantage of being experimentally relevant, as it can be derived from microscopic principles for experiments on both cold fermionic 31 and bosonic 32 gases in the lowest band of an optical lattice. The decoherence is caused by off-resonant scattering of photons forming the lattice potential, and the dissipation rate γ is controlled by the detuning and intensity of the trapping laser. Eq. (2) conserves fermion number and in what follows we focus on the half-filling sector. III. RATE EQUATION DESCRIPTION For times t 1/γ, there are two situations described by the Master equation (2) which may be reduced to a classical rate equation. The first is the limit of large dephasing, γ J . The second is that of large interactions and/or large longitudinal fields, V, h J 23 . This effective dynamics describes the evolution of the diagonal elements of the density matrix p α . If we express these in terms of the probability vector |p = α p α |α , where |α are the N !/(N/2)! 2 Fock states in the half-filling sector (see also 23 ) Eq. (2) reduces to where P k =n k +n k+1 − 2n knk+1 . Eq. (3) describes classical hopping of particles on the lattice, with a rescaled time τ = J 2 γt/h 2 , cf. 22,23 . The rate for hopping between site k and k + 1 is given by where ∆h k = h k+1 − h k 37 and n k = Tr(n k |p ) is the total probability of having an excitation on site k. In the following we will fix the energy scale to γ = 1. The rates Γ k are configuration-dependent, see Fig. 1(a). In the dynamics described by Eq. (3), the rates Γ k act as a kinetic constraint 33 , as often encountered in systems with complex relaxation like glasses 38 : the form of the rates Γ k does not determine the properties of the stationary state, but rather the relaxation pathways. The rates Γ k are random through the field h k . Their distribution P (Γ) depends on the strength of the interactions and on the specific configuration under consideration. The analytical expression of P (Γ) is given in Appendix A and plotted in Fig. 1(b) for various values of V for the configurations of the left column of Fig. 1(a). When V < 2h, the distribution is bimodal, with a peak at Γ/h 2 = 1 from values of the field such that ∆h k = ±V , , and another peak at values Γ/h 2 ∼ 4/(3h) 2 (for V 2h). The form of P (Γ) changes qualitatively when V > 2h when ∆h k = ±V is not accessible, and the distribution P (Γ) becomes unimodal retaining only the slower peak, which for V 2h is centered at Γ ∼ V −2 . The qualitative change of P (Γ) already hints at the different dynamical regimes depending on the strength of the interactions. IV. DISTINCT DYNAMICAL REGIMES WITHIN THE MBL PHASE To explore the relaxation dynamics we focus on the case in which the initial state is the charge density wave (CDW) state, where the corresponding probability is • denoting empty and occupied sites, respectively. This is relevant for recent experiments , where ergodicity properties of were studied via the evolution of an initial CDW quantified by the imbalance which gives a direct readout of the self-correlations and thus accounts for the ergodicity properties of the system. In Fig. 2 we report our results on the imbalance, obtained by realising Eq. (3) via kinetic Monte Carlo, averaged over disorder,Ī. The decay ofĪ becomes slower for increasing interactions. We quantify this slowing down by defining the saturation time T such thatĪ(T ) = e −2 . As shown in Fig. 2(a) we observe two different regimes: For V < 2h the saturation time shows little dependence on V , while for V > 2h it increases with increasing interaction, signalling a slowdown of the dynamics. The inset shows that in the region V < 2h, while T is approximately independent of V , the shape of the relaxation function depends on the strength of the interaction. Our data is well fitted by the functionĪ(τ ) ∼ exp − (τ /T ) β . This form is motivated by the analytical arguments below. The results on the exponent β and the time-scale T are reported in Fig. 2(b)-(c). We find that at V 2h the relaxation of the imbalance switches from a stretched exponential behavior (β < 1) to a compressed exponential behavior (β > 1) (see Fig. 2(b)). Despite the rapid increase in β at V 2h which suggests a sharp acceleration of the dynamics, the increase in the timescale T combines to give the slowing down observed in Fig. 2(a). The minimum in T at V ∼ 0.3h, and the large V behavior are compatible with the results in 22 . A finite size study for the exponent β is shown in the inset of Fig. 2(b). Although in the stretched exponential regime (V < 2h) finite size effects have a marginal impact, in the compressed exponential regime (V > 2h) they cause a saturation of the exponent to lower values. The origin of this behavior will become clear below. The two regimes of stretched and compressed exponential decay, for V 2h and V 2h, respectively, can be argued as follows. When V 2h the dynamics is dominated by disorder, and we can set V = 0. In this case the long time dynamics is characterized by large portions of the chain in which the system has relaxed (giving null contributions to the imbalance), with isolated non-relaxed pairs of neighboring sites corresponding to the largest ∆h k . The approach of I(τ ) to equilibrium is then determined by those sites. Their dynamics can be studied by focusing on, say, sites k and k +1 with a single excitation between them with relaxed neighbors serving as a bath. That is, we set n k to the stationary average 1/2 for k > k + 1 or k < k. This setting is sketched in Fig. 3(a). We calculate the population as n k ≡ Tr(n k |p ) where the Tr operator is the trace, or sum, of the probabilities of the state i.e. Tr |p = α p α . We consider the two sites in the initial configuration (k, k + 1) = (1, 0), and since the dynamics conserves the number of excitations we can set p ↑↑ = p ↓↓ = 0. The evolution of n k is given asṅ such that, using Eq. (3), we obtaiṅ As anticipated we set n k+2 and n k−1 to their relaxed value n k−1 = n k+2 = 1/2, such that we can write the equations for the sites under consideration aṡ We are interested in the local imbalance I k = n k+1 − n k which can be obtained by integratinġ (9) In this case, the rates in Eq. (4) depend only on the difference of the random fields on the sites they are connecting. The rates associated to two contiguous links (e.g., Γ k and Γ k+1 ) are therefore not statistically independent, since they both depend on the field on the site they share, but those of links further apart are. When solving Eq. (9) we can treat Γ k−1 and Γ k+1 as independent. As an approximation we set them equal when averaging over the disorder Γ k−1 = Γ k+1 = Γ , leading tō where P (Γ k , Γ ) is the joint probability reported in Appendix A. A numerical integration of Eq. (10) gives a stretched exponential behavior. In Fig. 2(b) the results on β obtained by fitting are compared with the numerical data in the weak interaction regime, showing good agreement. In fact the local imbalance is a good approximation of the imbalance in the regime we are considering. This is because non-relaxed links are far-apart enough to be considered independent, and they give the same average contribution. In the opposite limit V 2h, in contrast, the first step costs V + ∆h k when starting from the CDW state. This sets the time-scale τ ∼ V 2 to observe transitions of the kind ... . We refer to these as nucleation events, happening at homogeneous rate Γ n 2(h/V ) 2 . After a nucleation event has occurred relaxation can proceed via transitions like .., whose rates are independent of V . This dynamics is conveniently described by the following coarse-grained approximation. Since the imbalance is a quantity with a period of two sites it is natural to divide the chain in the following way: (1, 2)|(3, 4)|...| (N − 1, N )|. We focus then on a set of new degrees of freedom labeled by the contribution that the pairs of original sites bring to the imbalance: The CDW configuration corresponds to the |1, 1, 1, 1, ..., 1, 1 state, and a nucleation event creates either two 0 sites or a -1 site as shown in Fig 3(b). In what follows we will focus on the case in which two 0 sites are created. Once a nucleation has happened the events |0, 1 ↔ |−1, 0 , and |1, 0 ↔ |0, −1 are possible with the non-interacting rate Γ k = h 2 /(1 + ∆h 2 k ). This pair of reversible processes implies that the 0 sites can be treated as random walkers, which when moving away from each other create a growing region of −1 sites. The site dependent differences between these rates are small for the time-scales we are considering (V 2h), and we will assume a constant rate Γ e . When averaged over random realizations the extension of the region between the 0 sites expands following the lawḠ e (τ ) ∼ √ Γ e τ , contributing with net zero imbalance since the sites falling in this region are now equally likely to be a 1 or −1. This growth dynamics together with the initial nucleation events -reminiscent for example of a crystallization process -is well described by the so-called Avrami law 39-43 . Here we give a sketch of the derivation in our case. The average number of nucleation events up to a given timeν(τ ) can be found by integratingν = N Γ n /2. Not accounting for overlap of the expanded regions, the total number of transformed sites can be expressed as This dynamics is sketched in Fig. 3(c) for a single expanding region of transformed sites. Overlaps can be excluded by assuming the increment in transformed sites dN tr is proportional to dN multiplied by the probability of not having an already transformed site (1 − 2N tr /N ), giving Initializing our dynamics in the untransformed state the imbalance at a given time is given byĪ(τ ) = 1 − 2N tr (τ )/N , leading to the compressed exponential behavior with exponent β = 3/2 observed in Fig. 2(b). Equation (12) also yields the functional dependence of the time-scale T ∼ (V /h) 4/3 for large V /h, which is confirmed by our numerical results in Fig. 2(c). This picture breaks when the distance between nucleation events becomes comparable to the system length. In this case we can consider the expansion of a nucleated region as instantaneous and the imbalance as fully relaxed after a single nucleation event. In a single realization we can then model the imbalance as I(τ |τ ) = 1 − θ(τ − τ ), where τ is the time at which the first nucleation event happens. The probability of nucleation at this time is given as π(τ ) = N exp −N τ /V 2 /V 2 , such that the imbalance averaged over realizations is This is the origin of the strong size dependence of the dynamics for large V , such as the saturation of the exponent β to 1 in the inset of Fig. 2(b) for e.g. a system of length N = 10. V. PARTICLE LOSS We finish by considering another situation that can occur experimentally, that of particle loss. This corre-sponds to the processes |• → |• , which can be modelled in our effective description by adding to the r.h.s. of Eq.(3) the operator where κ is the loss rate. Loss acts to relax the local imbalance to 0 in a non-collective manner. For κ γJ 2 h −2 the imbalance decays as I ≈ e −κt , and none of the above features survive. In contrast, for κ γJ 2 h −2 only the nucleation-expansion dynamics is significantly modified: decay can act as a nucleation event, and is dominant when κ γJ 2 /V 2 . This affects marginally the value of the compressed β, but results in a saturation of the time scales T , T for large enough V , see insets of Fig. 2(b)-(c). VI. CONCLUSION We have considered the effect of interactions on the dynamics of a MBL system subject to dephasing noise. We found two relaxation regimes, one dominated by disorder, and one dominated by interactions. The physical manifestation is a crossover in the decay of time correlators, from stretched to compressed exponential in time. While the stretched exponential regime was expected for weak interactions 21,22 , the crossover to compressed exponential is novel. Our effective classical approximation suggests that this regime would also exist with particle loss. This latter behaviour is due to nucleation and growth dynamics dominating relaxation, a regime which should display strong finite size effects. The above described dynamics should be observable in current experiments on MBL under controlled noise. Here we obtain analytically the distributions of rates P (Γ) depicted in Fig. 1(b). We consider the rate associated with a fermion hopping in an "interacting" config- uration, namely the ones depicted in the left column of Fig. 1(a). In particular we will focus on the bottom case in the left column of Fig. 1(a), where the interaction V comes with a plus sign in the rate, the other case being trivially deducible by this case. The "non-interacting" cases can be extracted easily from the results below by setting the interactions V = 0. The quantity of interest is the rate for a hopping event involving sites k and k + 1 (which we normalize by h 2 for simplicity), corresponding to where ∆h k = h k+1 − h k . In Eq. (A1) we made the inverse function explicit since it will be used below. Since we focus on a single rate we will drop the site dependency for all the quantities at hand. The rates Γ are random variables, since they depend on the difference of the random field on two contiguous sites. The distribution of the difference ∆h can be easily extracted form the random fields' one since both h k and h k+1 are identically distributed with the same probability between −h and h. The index s in Eq. (A3) is summed over the inverse functions in the region under consideration. Considering the case where V < 2h, as depicted in Fig. 4 the inverse function is multivalued in the region ∆h ∈ on the other hand the inverse function is single-valued having as single contribution the branch g −1 V (Γ k ) + . The probability density for the rates Eq. (A3) is defined then as In the case V > 2h the function g −1 V (Γ k ) is never multivalued in the region of parameters considered. This results in a probability density function which is defined by the first line of Eq. (A5). The joint probability P (Γ k , Γ ) used in Eq. (10) is more involved and cumbersome to write in a closed form. We can though express it in a form that allows for direct numerical integration. Calling h k , h k+1 , h the random fields on respectively the k, k + 1, and relaxed sites (see main text for an explanation) we can define This expression can be readily plugged into Eq. (10) giv-ing the results presented in Fig. 1(b) in the main text.
/** * @param args the command line arguments */ public static void main(String[] args) { Calendar calendar = new GregorianCalendar(); System.out.println("Current time: " + new Date()); System.out.println("Year: " + calendar.get(Calendar.YEAR) + ", Month: " + calendar.get(Calendar.MONTH) + ", Date: " + calendar.get(Calendar.DATE) + ", Hour: " + calendar.get(Calendar.HOUR) + ", Hour of day: " + calendar.get(Calendar.HOUR_OF_DAY) ); Calendar calendar1 = new GregorianCalendar(2018, 10, 19); System.out.println("11/19/2018: " + dayNameOfWeek(calendar1.get(Calendar.DAY_OF_WEEK))); }
<reponame>Abq-Tool-Shed/Capstone-Abq-Tool-Shed import {Tool} from "../interfaces/Tool"; import {connect} from '../database.utils'; import {RowDataPacket} from "mysql2"; export async function selectToolByToolId(toolId: string) : Promise<Tool|null> { try { const mySqlConnection = await connect(); const mySqlQuery = " SELECT BIN_TO_UUID(toolId) AS toolId, BIN_TO_UUID (toolProfileId) AS toolProfileId, BIN_TO_UUID(toolCategoryId) as toolCategoryId, toolDescription, toolImage, toolPostDate from tool where toolId = UUID_TO_BIN(:toolId)" const result = await mySqlConnection.execute(mySqlQuery, {toolId}) as RowDataPacket[] const tools : Array<Tool> = result [0] as Array<Tool> await mySqlConnection.release() return tools.length === 1 ? {...tools[0]} : null; } catch (error) { console.log(error) throw error } }
import { BinaryView, Frag, Read, Write } from "@ot-builder/bin-util"; import { Assert } from "@ot-builder/errors"; import { GsubGpos } from "@ot-builder/ot-layout"; import { Data, Sigma } from "@ot-builder/prelude"; import { Tag, UInt24 } from "@ot-builder/primitive"; export const FeatureParams = { read(view: BinaryView, tag: Tag) { for (const handler of FeatureParamHandlers) { const result = handler.tryRead(tag, view); if (result) return result; } return undefined; }, writeOpt(fp: Data.Maybe<Sigma.DependentPair>, tag: Tag) { if (!fp) return null; const tagDrivenTypeID = GsubGpos.FeatureParams.tagToTypeIDMap[tag]; if (!tagDrivenTypeID) return null; const fpTag = fp.cast(tagDrivenTypeID); if (!fpTag) return null; for (const handler of FeatureParamHandlers) { const result = handler.tryWrite(fp); if (result) return result; } return null; } }; type Handler<T> = { tryRead(tag: Tag, view: BinaryView): undefined | Sigma.DependentPair; tryWrite(fp: Sigma.DependentPair): undefined | Frag; }; function CreateHandler<T>(tid: Sigma.TypeID<T>, io: Read<T> & Write<T>): Handler<T> { return { tryRead(tag, view) { if (GsubGpos.FeatureParams.tagToTypeIDMap[tag] === tid) { return Sigma.DependentPair.create(tid, view.next(io)); } else { return undefined; } }, tryWrite(fpRaw) { const fp = fpRaw.cast(tid); if (fp) return Frag.from(io, fp); else return undefined; } }; } const FeatureParamStylisticSet = CreateHandler(GsubGpos.FeatureParams.TID_StylisticSet, { read(view: BinaryView): GsubGpos.FeatureParams.StylisticSet { const version = view.uint16(); Assert.VersionSupported("FeatureParams::StylisticSet", version, 0); const uiNameID = view.uint16(); return { uiNameID }; }, write(frag: Frag, fp: GsubGpos.FeatureParams.StylisticSet) { frag.uint16(0); frag.uint16(fp.uiNameID); } }); const FeatureParamCharacterVariant = CreateHandler(GsubGpos.FeatureParams.TID_CharacterVariant, { read(view: BinaryView): GsubGpos.FeatureParams.CharacterVariant { const format = view.uint16(); Assert.FormatSupported("FeatureParam::CharacterVariant", format, 0); const featUiLabelNameId = view.uint16(); const featUiTooltipTextNameId = view.uint16(); const sampleTextNameId = view.uint16(); const numNamedParameters = view.uint16(); const firstParamUiLabelNameId = view.uint16(); const charCount = view.uint16(); const chars = view.array(charCount, UInt24); return { featUiLabelNameId, featUiTooltipTextNameId, sampleTextNameId, numNamedParameters, firstParamUiLabelNameId, characters: chars }; }, write(frag: Frag, fp: GsubGpos.FeatureParams.CharacterVariant) { frag.uint16(0) .uint16(fp.featUiLabelNameId) .uint16(fp.featUiTooltipTextNameId) .uint16(fp.sampleTextNameId) .uint16(fp.numNamedParameters) .uint16(fp.firstParamUiLabelNameId) .uint16(fp.characters.length) .array(UInt24, fp.characters); } }); const FeatureParamHandlers = [FeatureParamStylisticSet, FeatureParamCharacterVariant];
def discover(cls): self = cls() globus_alias = 'raw-ephys' ra, rep, rep_sub = (GlobusStorageLocation() & {'globus_alias': globus_alias}).fetch1().values() smap = {'{}/{}'.format(s['water_restriction_number'], s['session_date']).replace('-', ''): s for s in (experiment.Session() * (lab.WaterRestriction() * lab.Subject.proj()))} ftmap = {t['file_type']: t for t in (FileType() & "file_type like 'ephys%%'")} skey = None sskip = set() sfiles = [] def commit(skey, sfiles): log.info('commit. skey: {}, sfiles: {}'.format(skey, sfiles)) if not sfiles: log.info('skipping. no files in set') return h2o, sdate, ftypes = set(), set(), set() ptmap = defaultdict(lambda: defaultdict(list)) for s in sfiles: ptmap[s['probe']][s['trial']].append(s) h2o.add(s['water_restriction_number']) sdate.add(s['session_date']) ftypes.add(s['file_type']) if len(h2o) != 1 or len(sdate) != 1: log.info('skipping. bad h2o {} or session date {}'.format( h2o, sdate)) return h2o, sdate = next(iter(h2o)), next(iter(sdate)) {k: {kk: vv for kk, vv in v.items()} for k, v in ptmap.items()} if all('trial' in f for f in ftypes): ds_type = 'ephys-raw-trialized' ds_name = '{}_{}_{}'.format(h2o, sdate, ds_type) ds_key = {'dataset_name': ds_name, 'globus_alias': globus_alias} if (DataSet & ds_key): log.info('DataSet: {} already exists. Skipping.'.format( ds_key)) return DataSet.insert1({**ds_key, 'dataset_type': ds_type}, allow_direct_insert=True) as_key = {k: v for k, v in smap[skey].items() if k in ArchivedSession.primary_key} ArchivedSession.insert1( {**as_key, 'globus_alias': globus_alias}, allow_direct_insert=True, skip_duplicates=True) for p in ptmap: ep_key = {**as_key, **ds_key, 'probe_folder': p} ArchivedRawEphys.insert1(ep_key, allow_direct_insert=True) for t in ptmap[p]: for f in ptmap[p][t]: DataSet.PhysicalFile.insert1( {**ds_key, **f}, allow_direct_insert=True, ignore_extra_fields=True) ArchivedRawEphys.RawEphysTrial.insert1( {**ep_key, **ds_key, 'trial': t, 'file_subpath': f['file_subpath']}, allow_direct_insert=True) elif all('concat' in f for f in ftypes): raise NotImplementedError('concatenated not yet implemented') else: log.info('skipping. mixed filetypes detected') return gsm = self.get_gsm() gsm.activate_endpoint(rep) for ep, dirname, node in gsm.fts('{}:{}'.format(rep, rep_sub)): log.debug('checking: {}:{}/{}'.format( ep, dirname, node.get('name', ''))) edir = re.match('([a-z]+[0-9]+)/([0-9]{8})/([0-9]+)', dirname) if not edir or node['DATA_TYPE'] != 'file': continue log.debug('dir match: {}'.format(dirname)) h2o, sdate, probe = edir[1], edir[2], edir[3] skey_i = '{}/{}'.format(h2o, sdate) if skey_i != skey: if skey and skey in smap: with dj.conn().transaction: try: commit(skey, sfiles) except Exception as e: log.error( 'Exception {} committing {}. files: {}'.format( repr(e), skey, sfiles)) skey, sfiles = skey_i, [] if skey not in smap: if skey not in sskip: log.debug('session {} not known. skipping.'.format(skey)) sskip.add(skey) continue fname = node['name'] log.debug('found file {}'.format(fname)) if '.' not in fname: log.debug('skipping {} - no dot in fname'.format(fname)) continue froot, fext = fname.split('.', 1) ftype = {g['file_type']: g for g in ftmap.values() if fnmatch(fname, g['file_glob'])} if len(ftype) != 1: log.debug('skipping {} - incorrect type matches: {}'.format( fname, ftype)) continue ftype = next(iter(ftype.values()))['file_type'] trial = None if 'trial' in ftype: trial = int(froot.split('_t')[1]) file_subpath = '{}/{}'.format(dirname, fname) sfiles.append({'water_restriction_number': h2o, 'session_date': '{}-{}-{}'.format( sdate[:4], sdate[4:6], sdate[6:]), 'probe': int(probe), 'trial': int(trial), 'file_subpath': file_subpath, 'file_type': ftype}) if skey: with dj.conn().transaction: commit(skey, sfiles)
<reponame>zealoussnow/chromium // Copyright 2021 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_SSL_HTTPS_ONLY_MODE_UPGRADE_INTERCEPTOR_H_ #define CHROME_BROWSER_SSL_HTTPS_ONLY_MODE_UPGRADE_INTERCEPTOR_H_ #include <memory> #include "base/sequence_checker.h" #include "chrome/browser/ssl/https_only_mode_upgrade_url_loader.h" #include "content/public/browser/url_loader_request_interceptor.h" #include "services/network/public/cpp/resource_request.h" #include "url/gurl.h" namespace content { class BrowserContext; } // namespace content // A class that attempts to intercept HTTP navigation requests and redirect them // to HTTPS. Its lifetime matches that of the content/ navigation loader code. class HttpsOnlyModeUpgradeInterceptor : public content::URLLoaderRequestInterceptor { public: explicit HttpsOnlyModeUpgradeInterceptor(int frame_tree_node_id); ~HttpsOnlyModeUpgradeInterceptor() override; HttpsOnlyModeUpgradeInterceptor(const HttpsOnlyModeUpgradeInterceptor&) = delete; HttpsOnlyModeUpgradeInterceptor& operator=( const HttpsOnlyModeUpgradeInterceptor&) = delete; // content::URLLoaderRequestInterceptor: void MaybeCreateLoader( const network::ResourceRequest& tentative_resource_request, content::BrowserContext* browser_context, content::URLLoaderRequestInterceptor::LoaderCallback callback) override; // Sets the ports used by the EmbeddedTestServer (which uses random ports) to // determine the correct port to upgrade/fallback to in tests. static void SetHttpsPortForTesting(int port); static void SetHttpPortForTesting(int port); static int GetHttpsPortForTesting(); static int GetHttpPortForTesting(); private: // Creates a URL loader that immediately serves a redirect to the HTTPS // version of the URL. void CreateHttpsRedirectLoader( const network::ResourceRequest& tentative_resource_request, content::URLLoaderRequestInterceptor::LoaderCallback callback); // Runs `callback` with `handler`. void HandleRedirectLoader( content::URLLoaderRequestInterceptor::LoaderCallback callback, RequestHandler handler); // URLLoader that serves redirects. std::unique_ptr<HttpsOnlyModeUpgradeURLLoader> redirect_url_loader_; // Used to access the WebContents for the navigation. int frame_tree_node_id_; SEQUENCE_CHECKER(sequence_checker_); }; #endif // CHROME_BROWSER_SSL_HTTPS_ONLY_MODE_UPGRADE_INTERCEPTOR_H_
/** * Return true if should traverse given url. * * @throws MalformedURLException */ public boolean traverseIt(FaeUtil faeUtil, String urlFrom, int depth, int urlNum, int cnt, String url) throws MalformedURLException { boolean traverseIt = true; String msg = null; if (depth > faeUtil.DEPTH) { traverseIt = false; } if (faeUtil.m_urlsRead.contains(url)) { traverseIt = false; msg = urlNum + ":" + cnt + ": depth:" + depth + ": " + url + " ALREADY READ, NOT PROCESSING"; } for (String doNotRead : faeUtil.m_urlsToNotTraverse) { if (url.startsWith(doNotRead)) { traverseIt = false; msg = urlNum + ":" + cnt + ": depth:" + depth + ": " + url + " MATCHES DO NOT READ LIST, NOT PROCESSING"; } } if (traverseIt) { if (faeUtil.m_ctrl.SPAN_DOMAINS != null) { boolean match = false; StringTokenizer st = new StringTokenizer( faeUtil.m_ctrl.SPAN_DOMAINS, ","); while (st.hasMoreTokens()) { String tok = st.nextToken(); if (tok.equals("*")) { match = true; break; } if (url.contains("http") && url.indexOf(tok + "/") != -1) { URL t = new URL(url); if (t.getHost().indexOf(tok) != -1){ String prefix = t.getHost().substring(0, t.getHost().indexOf(tok)); int num = countOccurrences(prefix, '.'); if (num == 0 || num == 1) { match = true; break; } } else { match = false; break; } } } if (!match) { traverseIt = false; faeUtil.m_filteredURLs.add(url); String timing = "\"" + url + "\",\"" + urlFrom + "\""; faeUtil.m_filteredURLsCSV.add(timing); msg = urlNum + ":" + cnt + ": depth:" + depth + ": " + url + " DOES NOT MATCH ANY SPAN DOMAIN, NOT PROCESSING"; } } else { traverseIt = false; faeUtil.m_filteredURLs.add(url); String timing = "\"" + url + "\",\"" + urlFrom + "\""; faeUtil.m_filteredURLsCSV.add(timing); msg = urlNum + ":" + cnt + ": depth:" + depth + ": " + url + ": NO SPAN DOMAINS GIVEN, NOT PROCESSING"; } if (faeUtil.m_ctrl.INCLUDE_DOMAINS != null) { if(count(url,"http") == 1) { StringTokenizer st = new StringTokenizer( faeUtil.m_ctrl.INCLUDE_DOMAINS, ","); while (st.hasMoreTokens()) { String tok = st.nextToken(); if (url.indexOf("/" + tok + "/") != -1) { traverseIt = true; faeUtil.m_filteredURLs.remove(url); String timing = "\"" + url + "\",\"" + urlFrom + "\""; faeUtil.m_filteredURLsCSV.remove(timing); msg = urlNum + ":" + cnt + ": depth:" + depth + ": " + url + " MATCHES INCLUDE DOMAIN " + tok + ", PROCESSING"; } } } } if (faeUtil.m_ctrl.EXCLUDE_DOMAINS != null) { StringTokenizer st = new StringTokenizer( faeUtil.m_ctrl.EXCLUDE_DOMAINS, ","); while (st.hasMoreTokens()) { String tok = st.nextToken(); if (url.indexOf("/" + tok + "/") != -1) { traverseIt = false; faeUtil.m_filteredURLs.add(url); String timing = "\"" + url + "\",\"" + urlFrom + "\""; faeUtil.m_filteredURLsCSV.add(timing); msg = urlNum + ":" + cnt + ": depth:" + depth + ": " + url + " MATCHES EXCLUDE DOMAIN " + tok + ", NOT PROCESSING"; } } } } if (msg != null) faeUtil.verbose(msg); return traverseIt; }
import { getRepository } from 'typeorm'; import { Resolvers } from '../resolvers-types.generated'; import { Context } from '../../context'; import { User } from '../../database/entity/User'; import { Department } from '../../database/entity/Department'; import errors from '../../utils/errors'; import { UserType } from '../../interfaces'; const resolvers: Resolvers<Context> = { Query: { department: async (_, { input: { id: departmentID } }, { jwt: { id, type }, departmentLoader }) => { const dep = await departmentLoader.load(departmentID); const [usr] = dep.users.filter((u) => u.id === id); if (!usr && type !== UserType.ROOT) throw errors.unauthorized; return { id: dep.id, }; }, departments: async (_, __, { jwt: { id } }) => { const departmentRepo = getRepository(Department); const userRepo = getRepository(User); const usr = await userRepo.findOne({ relations: ['departments'], where: { id } }); if (!usr) throw errors.internalServerError; if (usr.type === UserType.CENTRE) throw errors.unauthorized; let deps: Department[]; if (usr.type === UserType.ROOT) { deps = await departmentRepo.find(); } else { deps = usr.departments; } return deps.map((d) => ({ id: d.id })); }, }, Mutation: { createDepartment: async (_, { input: { name } }) => { if (!name) throw errors.fieldsRequired; const departmentRepo = getRepository(Department); const dep = departmentRepo.create({ name }); await departmentRepo.save(dep); return { code: '200', message: 'Department created successfully', }; }, }, Department: { name: async ({ id }, __, { departmentLoader }) => { const { name } = await departmentLoader.load(id); return name; }, users: async ({ id }, __, { departmentLoader }) => { const { users } = await departmentLoader.load(id); return users.map((u) => ({ id: u.id })); }, channels: async ({ id }, __, { departmentLoader }) => { const { channels } = await departmentLoader.load(id); return channels.map((c) => ({ id: c.id })); }, createdAt: async ({ id }, __, { departmentLoader }) => { const { createdAt } = await departmentLoader.load(id); return createdAt.toISOString(); }, }, }; export default resolvers;
Israeli government enjoined from disclosing personal account information to the IRS One disadvantage of United States citizenship is that all income from whatever source, worldwide, is subject to taxation. This is true regardless of how one obtains American citizenship; indeed, there are many "accidental Americans" who, by dint of having been born to a person with American citizenship, are technically American citizens, many of whom are now being hit with significant tax bills from the Internal Revenue Service. Renunciation of American citizenship is possible, and is being done in record numbers, but it comes at a price in paperwork if not in dollars – the Expatriation Tax. Complicating matters is the Foreign Account Tax Compliance Act (FATCA), which requires American taxpayers to report their foreign accounts to the United States Treasury. The United State has entered into various Tax Information Exchange Agreements (TIEAs) with various countries to ensure that the foreign banks give the U.S. Treasury the information required under FATCA. One such agreement is with Israel.
const ConfigFilePaths = { // sizes: }