content
stringlengths
10
4.9M
import { Clipboard } from '@angular/cdk/clipboard'; import { Component, EventEmitter, Input, OnInit, Output, OnDestroy, TemplateRef } from '@angular/core'; import { ActivatedRoute } from '@angular/router'; import { NbGlobalPhysicalPosition, NbToastrService, NbDialogRef, NbDialogService } from '@nebular/theme'; import { AccountService, GlobalService } from '@services'; import { AccountVM, RoleVM } from '@view-models'; import { NgxSpinnerService } from 'ngx-spinner'; import { State } from '@store/states'; import { Store } from '@ngrx/store'; import { authSelector } from '@store/selectors'; import { tap, catchError, finalize } from 'rxjs/operators'; import { Subscription, of } from 'rxjs'; @Component({ selector: 'app-role-account', templateUrl: './role-account.component.html', styleUrls: ['./role-account.component.scss'] }) export class RoleAccountComponent implements OnInit, OnDestroy { @Output() useToggle: EventEmitter<boolean> = new EventEmitter<boolean>(); @Input() selectedRole: RoleVM; @Input() account: AccountVM; you: AccountVM; canUpdate = false; subscriptions: Subscription[] = []; constructor( protected readonly service: AccountService, protected readonly globalService: GlobalService, protected readonly dialogService: NbDialogService, protected readonly toastrService: NbToastrService, protected readonly spinner: NgxSpinnerService, protected readonly store: Store<State> ) { } ngOnInit() { } useEdit = () => { this.globalService.triggerView$.next({ type: 'employee', payload: { employee: this.account } }); } useView = () => { this.globalService.triggerView$.next({ type: 'employee', payload: { employee: this.account, isProfile: true } }); } usePhone = (phone: string) => { window.open('tel:' + phone, '_self'); } useMail = (email: string) => { this.globalService.triggerView$.next({ type: 'mail', payload: { email } }); } useDialog = (template: TemplateRef<any>) => { this.dialogService.open(template, { closeOnBackdropClick: true }); } useCheckRole = () => { return this.account.roles.find((role) => role.id === this.selectedRole?.id); } useShowSpinner = () => { this.spinner.show('role-main'); } useHideSpinner = () => { setTimeout(() => { this.spinner.hide('role-main'); }, 1000); } useToggleRole = () => { this.useShowSpinner(); const role = this.useCheckRole(); let roles = this.account.roles; if (role) { roles = roles.filter((r) => r.id !== this.selectedRole.id); } else { roles = roles.concat([this.selectedRole]); } this.service.update({...this.account, roles}) .pipe( tap((data) => { this.toastrService.success('', !role ? 'Set role for employee successful' : 'Unset role for employee successful', { duration: 3000 }); }), catchError((err) => { this.toastrService.danger('', (!role ? 'Set role employee fail! ' : 'Unset role employee fail! ') + err.message, { duration: 3000 }); return of(undefined); }), finalize(() => { this.useHideSpinner(); }) ) .subscribe(); } useToggleState = () => { this.useShowSpinner(); const subscription = this.service.update({ id: this.account.id, isDelete: !this.account.isDelete } as any) .pipe( tap((data) => { this.toastrService.success('', !this.account.isDelete ? 'Disabled employee successful' : 'Active employee successful', { duration: 3000 }); }), catchError((err) => { (err); this.toastrService.danger('', (!this.account.isDelete ? 'Disabled employee fail! ' : 'Active employee fail! ') + err.message, { duration: 3000 }); return of(undefined); }), finalize(() => { this.useHideSpinner(); }) ) .subscribe() this.subscriptions.push(subscription); } useRemove = (ref: NbDialogRef<any>) => { ref.close(); this.useShowSpinner(); const subscription = this.service.remove(this.account.id) .pipe( tap((data) => { this.toastrService.success('', 'Remove employee successful', { duration: 3000 }); }), catchError((err) => { (err); this.toastrService.danger('', 'Remove employee fail! ' + err.message, { duration: 3000 }); return of(undefined); }), finalize(() => { this.useHideSpinner(); }) ) .subscribe() this.subscriptions.push(subscription); } ngOnDestroy() { this.subscriptions.forEach((subscription$) => subscription$.unsubscribe()); } }
#ifndef WORK_QUEUE_HPP_ #define WORK_QUEUE_HPP_ #include "spmc.hpp" #include <thread> #include <vector> //Uses a single queue per thread //but upon failure to get work //a thread goes and looks for other threads //while possible to have a single wait free queue //will pay the costs of contention. Or at least we will see... //now //in the real world //that's a-ok since work done by threads is going to be longer than //the cost of a memory read. But it's more interesting to make these //things highly optimized //avoiding virtual calls for now, is it worth it? //would make simpler, indirect call prediction would help //will benchmark this version first, and unless this is < 15-20 //ns/push-pop, will try virtualizing it template<class Callable> class sp_work_queue { struct work_queue{ spmc_queue<Callable> queue; alignas(64) uint64_t id; uint64_t _counter; //used to make work stealing more 'random' std::atomic<bool> alignas(64) needs_wakeup; void needs_notify(); void un_notify(); }; //this could point contention at a single source... //use as a last resort std::atomic<uint64_t> recent_work_stole; std::atomic<uint64_t> current_tasks; size_t current_queue; std::vector<work_queue> queues; void run_worker(work_queue& worker); bool steal_some_work(work_queue& worker, Callable& cl); bool steal_from_last_stolen(work_queue& worker, Callable& cl); bool steal_from_all(work_queue& worker, Callable& cl) //!Returns true if the queue is not accepting any more jobs bool is_dead(); public: bool add_job(const Callable& jb); }; template<class Callable> void sp_work_queue<Callable>::work_queue::needs_notify() { needs_wakeup.store(true, std::memory_order_release); } template<class Callable> void sp_work_queue<Callable>::work_queue::un_notify() { needs_wakeup.store(false, std::memory_order_release); } template<class Callable> bool sp_work_queue<Callable>::add_job(const Callable& jb) { for (size_t tries = 0; tries < n_queues * 2; tries++) { auto& curq = queues[current_queue]; current_queue = (current_queue + 1) % n_queues; if (curq.try_push(jb)) return true; } return false; } template<class Callable> bool sp_work_queue<Callable>::run_worker(work_queue& worker) { while !(is_dead()) { Callable mycl; //I use continue here to avoid deeply nested ifs //it is purely a stylistic choic. //Work was available - perform it and go onto the next iteration if (worker.queue.try_pop(mycl)) { mycl(); continue; } //No work in the queue - check out some other queues //using 'advanced' scheduling if (steal_some_work(worker, cl)) { mycl(); continue; } //Take a look at a queue which recently had work stolen if (steal_from_last_stolen(worker, cl)) { mycl(); continue; } //claim notification comes before final check of queue //to avoid race condition where //worker needs notification //but producer has pushed, and found a false check on notification worker.needs_notify(); if (worker.queue.try_pop(mycl)) { worker.un_notify(); mycl(); continue; } //later //wait_on(worker) } } //just get some implementations out there... //!This can be much smarter template<class Callable> bool sp_work_queue<Callable>::steal_some_work(worker& w, Callable& cl) { auto id = w.id + w.counter++; auto qs = queues.size(); for (size_t i = id; i < qs; i++) { if (queues[i % qs].try_pop(cl)) return true; } return false; } template<class Callable> bool sp_work_queue<Callable>::steal_from_last_stolen(worker& w, Callable& cl) { auto recst = recent_work_stole.load(std::memory_order_acquire); auto& rec = queues[recst]; return rec.try_pop(cl); } #endif
import { CanActivate, ExecutionContext, InternalServerErrorException, } from '@nestjs/common'; import { StrategyRegistry } from './strategy.registry'; export const AuthGuard = (name: string) => { return class AuthGuard implements CanActivate { public async canActivate(context: ExecutionContext): Promise<boolean> { const strategy = StrategyRegistry.getStrategy(name); if (!strategy) { console.warn(`strategy with name: ${name} not found`); throw new InternalServerErrorException(); } const user = await Promise.resolve(strategy.validate(context)); const req = context.switchToHttp().getRequest(); req.user = user; return true; } }; };
<filename>src/module.ts export * from "./BigMap"; export * from "./BigSet"; export * from "./BigWeakMap"; export * from "./BigWeakSet";
def updateConfigPanel(self): self.configPanel.delete('1.0', END) self.writeToLog("Writing Install Configuration to info panel...\n") if self.install_config is not None: self.writeToConfigPanel("Currently Loaded Install Configuration:\n\n") self.writeToConfigPanel("Install Location: {}\n\n".format(self.install_config.install_location)) self.writeToConfigPanel("Modules to auto-build:\n-------------------------------\n") for module in self.install_config.get_module_list(): if module.build == "YES": self.writeToConfigPanel("Name: {},\t\t\tVersion: {}\n".format(module.name, module.version)) self.writeToConfigPanel("\nModules with detected custom build scripts:\n----------------------------\n") for module in self.install_config.get_module_list(): if module.custom_build_script_path is not None: self.writeToConfigPanel("Name: {},\t\t\t Version: {}\n".format(module.name, module.version)) self.writeToConfigPanel("\nModules to clone but not build:\n----------------------------\n") for module in self.install_config.get_module_list(): if module.build == "NO" and module.clone == "YES": self.writeToConfigPanel("Name: {},\t\t\t Version: {}\n".format(module.name, module.version)) self.writeToConfigPanel("\nModules to package:\n-----------------------------\n") for module in self.install_config.get_module_list(): if module.package == "YES": self.writeToConfigPanel("Name: {},\t\t\t Version: {}\n".format(module.name, module.version)) self.writeToLog("Done.\n\n") else: self.showErrorMessage("Config Error", "ERROR - Could not display Install Configuration: not loaded correctly")
<filename>src/acme/mod.rs /// Certificate Authority functionality pub mod ca; /// Challenge management, including supervisory handlers. pub mod challenge; /// Types for managing DNS records pub mod dns; /// ACME HTTP handlers pub mod handlers; /// ACME JOSE implementation pub mod jose; use std::{collections::HashSet, convert::TryFrom, sync::Arc}; use hyper::Body; use tokio::sync::Mutex; use async_trait::async_trait; use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use crate::{ errors::{ db::{LoadError, SaveError}, ACMEValidationError, }, models::{nonce::Nonce, Postgres, Record}, util::make_nonce, }; use self::dns::DNSName; lazy_static! { /// List of supported algorithms, with the ACME preferred one first; in our case this is /// "ES256". pub static ref ACME_EXPECTED_ALGS: [String; 2] = ["ES256".to_string(), "RS256".to_string()]; } /// A Result<> that calls can return to trampoline through ratpack handlers swiftly by triggering HTTP /// "problem documents" (RFC7807) to be returned immediately from ratpack's routing framework. #[must_use] pub enum ACMEResult { Ok(hyper::Response<Body>), Err(crate::errors::Error), } impl Into<Result<hyper::Response<Body>, serde_json::Error>> for ACMEResult { fn into(self) -> Result<hyper::Response<Body>, serde_json::Error> { match self { ACMEResult::Ok(res) => Ok(res), ACMEResult::Err(e) => { return Ok(hyper::Response::builder() .status(500) .header("content-type", "application/json") .body(Body::from(serde_json::to_string(&e)?)) .unwrap()) } } } } impl From<crate::errors::Error> for ACMEResult { fn from(e: crate::errors::Error) -> Self { return ACMEResult::Err(e); } } /// Defines the notion of an "identifier" from the ACME specification. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] // NOTE: other identifier types as they are added may break this #[serde(tag = "type", content = "value")] pub enum ACMEIdentifier { DNS(dns::DNSName), // NOTE: DNS names cannot be wildcards. } impl TryFrom<String> for ACMEIdentifier { type Error = LoadError; fn try_from(value: String) -> Result<Self, Self::Error> { match DNSName::from_str(&value) { Ok(x) => Ok(ACMEIdentifier::DNS(x)), Err(e) => Err(LoadError::Generic(e.to_string())), } } } impl ACMEIdentifier { pub fn to_string(self) -> String { match self { ACMEIdentifier::DNS(name) => name.to_string(), } } } #[async_trait] /// NonceValidator is a storage trait that controls the generation and validation of nonces, used /// heavily in ACME and especially in the `Replay-Nonce` HTTP header present in all calls, and the /// `nonce` field in ACME protected headers. pub trait NonceValidator { /// This function must mutate the underlying storage to prune the nonce it's validating after a /// successful fetch. One may use ACMEValidationError::NonceFetchError to specify errors with /// fetching the Nonce. Likewise, ACMEValidationError::NonceNotFound is expected to be returned /// when the nonce cannot be located (validation error). async fn validate(&self, nonce: &str) -> Result<(), ACMEValidationError>; /// This function is expected to always make & store a new nonce; if it fails to add because it already /// exists, it should return error. async fn make(&self) -> Result<String, SaveError>; } /// Defines a basic (very basic) Nonce validation system #[derive(Debug, Clone)] pub struct SetValidator(Arc<Mutex<HashSet<String>>>); impl Default for SetValidator { fn default() -> Self { SetValidator(Arc::new(Mutex::new(HashSet::new()))) } } #[async_trait] impl NonceValidator for SetValidator { async fn validate(&self, nonce: &str) -> Result<(), ACMEValidationError> { if self.0.lock().await.remove(nonce) { Ok(()) } else { Err(ACMEValidationError::NonceNotFound) } } async fn make(&self) -> Result<String, SaveError> { let nonce = make_nonce(None); if !self.0.lock().await.insert(nonce.clone()) { return Err(SaveError::Generic("could not persist nonce".to_string())); } Ok(nonce) } } #[derive(Clone)] /// Defines a PostgreSQL-backed nonce validator pub struct PostgresNonceValidator(crate::models::Postgres); impl PostgresNonceValidator { pub fn new(pg: Postgres) -> Self { Self(pg) } } #[async_trait] impl NonceValidator for PostgresNonceValidator { async fn validate(&self, nonce: &str) -> Result<(), ACMEValidationError> { let nonce = match Nonce::find(nonce.to_string(), self.0.clone()).await { Ok(nonce) => nonce, Err(_) => return Err(ACMEValidationError::NonceNotFound), }; if let Err(_) = nonce.delete(self.0.clone()).await { return Err(ACMEValidationError::NonceNotFound); } Ok(()) } async fn make(&self) -> Result<String, SaveError> { let mut nonce = Nonce::new(); nonce.create(self.0.clone()).await?; Ok(nonce.id().unwrap().unwrap()) } }
// compares two slices of strings and returns true if they are same, returns false otherwise. // The elements in the slices don't have to be in the same order for them to be equal. func cmpSlices(s1, s2 []string) bool { equal := (len(s1) == len(s2)) for i := 0; equal && i < len(s1); i++ { found := false for j := 0; !found && j < len(s2); j++ { found = (s1[i] == s2[j]) } equal = equal && found } return equal }
This product defies corporate ideas about what a RPG should be, and in doing so, became very important, not just to the users who love this setting , but it pushed RPG design into a new direction. Dark Sun breaks many of the traditional rules and ideas that governed RPG design at the time : it ignored the formula for achieving game balance, it perfected and applied the meta-game in a functional way that favored the user over the company, it . . . well, it broke a lot of new ground. This game is important on so many levels that it makes talking about it in a cohesive way very difficult. The exact product that we’ll be looking at today is 2400. It is the original boxed set, and it contained so many rules changes, that it worried TSR Executives so much that they had to go back and increase the influence of AD&D; which they ended up doing, but twisting the traditional concepts in very creative ways that still made Dark Sun unique. Most of TSR's titles are full of things that, honestly, you can do yourself. You are paying to have lots of work done for you, but the tropes and elements within the setting are all things that follow a default setting. If we create a map, and just apply the principles of the Core Rules, we still end up with Forgotten Realms. Dark Sun is not Forgotten Realms. The designers actually earned their money with this product, creating new design and achieving something that we folks at home couldn't really come up with by ourselves; not on this level anyway! They got paid for their efforts, and thought deeply about ways to take a dumb concept like “War World” and make into a professional quality game with its own identity and design. The concepts introduced are elegant and unique, they not only allow the game to function differently, but they push the users in a direction that, up to that point, TSR had been pushing them away from. This is not a game for new users. Dark Sun was designed for very advanced tables that have almost become jaded with traditional fantasy tropes. It has been well documented on the Web that this is a very difficult game, but what hasn’t been discussed is what makes it so hard to play. It isn’t just the mechanics, it isn’t just the combat, nor that PCs will die, it isn’t even the imbalanced nature of the game; what makes Dark Sun so challenging is that it attacks you on a psychological level. Unlike most games where the players are limited in what they can do by their characters, in this game, the characters are limited and held back by us. All that stuff that players are used to doing: saving little villages against bullies, defending lawful kingdoms from evil enemies. All of that clear cut Good vs. Evil stuff goes right out the window. By the time that you start playing Dark Sun, those wars and struggles had ended centuries ago, and the good guys lost. Dark Sun isn’t a war game, it is a survival game. The players who are used to being in control of their own destinies, and like to feel important to the story, aren’t. The characters that they will be playing are strong enough to take over typical AD&D settings, but here, they aren’t worth the salt in their own tears. This world is brutal, and it affects the people playing in it. Many clubs who start playing Dark Sun quit, they make up excuses that the setting is too preachy, or complain because they don’t seem to be getting anywhere; these are cop-outs. Dark Sun is difficult because it forces us to think and play differently than we had to before. All of the weak and helpless are gone, going murder hoboing isn’t just a PC strategy that they share with the villain, this is a way of life. Characters in Dark Sun aren’t nice, there is a moral ambiguity to everything, and always a feeling of isolated repression that most players of RPGs just aren’t ready for. The fact that whatever you think that you earned yesterday, you have to defend today; makes any victory or a sense of gain far and in between. We are talking about a world where finding a weapon made out of copper is a huge deal! There just aren’t enough resources to go around, and that leads to some very very dark sessions. Even the DM is not exempt from the cruelty of Dark Sun, as we’ve got to enforce the rules and amp up our capacity to concoct evil acts that go way beyond the standards of the typical game. This isn’t a horror game (the ideas behind horror are almost romantic in nature and execution), Dark Sun is a metaphor for much darker lines of thinking that cause emotional and psychological discomfort because, unlike other settings, we really wouldn't want to go to this place, but we fear that one day we might just be forced to. The Rules book within the box will be used by both Players and the Dungeon Master regularly. All of the classic races have been twisted to fit the setting, and new, more powerful playable races have been added. Players are able to exceed the standard ability scores, and are encouraged to create super characters, as they will be very hard to keep alive. At all times the players must have at least 3 characters ready to go, they can choose one of them to play for that session. Characters are also started at a higher level than normal; because of the conditions on Athas, there are no low level characters. The world is more dangerous than normal, but honestly, a really good player can keep a character alive, especially if you’ve been playing 2nd Edition for a long time, but it does let the DM be more aggressive then he typically would be. The level of risks that one must take to get by here are high. The enemy is typically desperate, so fights to the death are typically the rule, not the exception. Classes have also been introduced which would normally only be associated with villains, if you want to play a Templar, for instance, and serve under a Sorcerer King, you can, and you’ll get the same benefits as the NPCs do. How this translates into a cooperative game is left to the party to figure out. I’m sure that everyone who is reading this knows all of these changes already, so I won’t go into it to much. This is a basic introduction to a very large world which is supported by modules and novels, however since everything has been completed, the DM has even more options than he did at the time that this product was currently being circulated. If the DM wants to run this according to core, or just build upon the basic concepts Dark Sun supports either/or. The second booklet is the Wanderer’s Journal, which presents the setting to the Dungeon Master. It helps the user understand the culture and gives adventure ideas. They also added a short story called “A Little Knowledge”, as this is meant to be a literary style game. The module included with the Setting is a bit odd, it included two flip books, one for the Dungeon Master, and one for the Player Characters. Art is very important to this setting, not just for the users, the dedicated artist of Dark Sun, Gerald Brom, was instrumental in the games overall design. He would draw characters, places, and items, and the writers would come up with ways to introduce them into the game. Historically, this was the first TSR product that incorporated a dedicated professional artist which was responsible for capturing the unique look and feel of a setting. It is up to the DM if they want to use the flip books or not,there is enough potential material here to play the game without the published adventure. A SUCCESSFUL META-GAME Typically the meta-gaming concept always benefits the company as it allows a product to be re-marketed and repackaged again and again and again. It involves reworking the map, dramatically altering the setting, and enforcing DM PCs disguised as NPCs. Typically the DM’s first task of prepping a new setting is finding these elements, and minimizing or eliminating their impact upon the game itself. Forgotten Realms didn’t need the Spell Plague, it didn’t benefit the users at all. The Lords of Ravenloft are simplified and pointless characters that anybody could create themselves and a successful game is achieved by completely ignoring them during play. Dark Sun is a meta-game, but the meta-game has been properly incorporated into the system, and allowed to function where it belongs, in the background. There are major NPCs; however they are actually functional, it is up to the players to identify them and decide if the character should be eliminated or not. How a character being removed from the board will impact the game according to the needs and the creative whims of the DM. What these major players are doing impacts the game, but it does so in a way which frees up the DM to run the game better. The challenge in running these scenarios comes in micromanaging the party; the primary and daily goals for the PCs is always satisfying basic needs first, gathering resources enough to risk practicing higher ideals is where the true heroics of this game come through. To have predictable characters in the background is a blessing for the DM, not a curse. THE DARK SUN SOAPBOX This game is very different than any other setting published by TSR, this rises above just a game where people sit around the table and play pretend: the design, the function, the story, all of it combined with the imaginations and development through individual play results in something that is art. Dark Sun is social commentary, it uses metaphors and deals with very adult and complex issues which are directly mirrored by real life. That results in something that, while uncomfortable to play, provides a fascinating experience that is unique to it. All of the people who started playing Dark Sun and quit because it was disturbing never got to the true heart of the setting itself; it is easy to be blinded by the violence, the wickedness, the unfathomable odds stacked against you, but you also get to experience an element here that isn’t as pure in other settings: Hope. This game is amazing and fun, but very different from anything that you will play. It will be a very trying and grueling experience, but a very rewarding one as well. It has the potential to take a great player and make them even better. If you want to escape from the Tolkien influence and discover brand new challenges this is the product for you. I think that many of us want to play evil characters to exercise demons or just to cut loose. This game allows users to do that, and continue to learn from the experience. Evil consumes itself, and that is exactly what is going on in Dark Sun. While in the short term it feels like you aren’t getting anywhere; that there are no real rewards to what you are doing, in the long term game Dark Sun, unlike standard D&D, allows you to actually feel a huge sense of accomplishment, especially if you are able to answer the challenge of this miserable dying world in a meaningful way. I give this product an A+. This is perhaps the greatest thing that TSR accidentally released. It wasn’t directed at their target audience of novice consumers, but at experienced users who desperately needed a challenge, and were hungry for new ways to play without sacrificing design. For a hard to hit demographic, it exceeded my expectations. As far as the relevance to modern gaming; the days of living under the threat of The Bomb have returned, so yes. The ideas and fears which inspired this game are not out of date. Now, this specific product is the introduction to Dark Sun; it will get you started. Two other boxsets fleshed out the system, and in 1995 Dark Sun was revised, and while that set is more complete and better written than this one, I feel that in order to have the greatest potential at the table, it is this original box set that you should get. This is Athas in its purest form, warts and all, and not to sound like an elitist, but if the DM can’t tailor the original to fit his club’s needs, then they probably aren’t ready to run it yet. In the last 10 years the desire for this product has increased the price. While I always prefer to have hard-copies, the PDF is definitely an option. The ideas behind Dark Sun is what matters, one can print off the material that is required to run the game, and leave the rest on your PC and do just fine. The legacy of Dark Sun was as epic as the game itself, users updated it to 3e, unfortunately it was softened in the process. 4e also took a crack at it, but the 2nd Edition Setting is the one that offers the greatest amount of potential and challenge. It is this specific timeline that will shape the game into a unique experience, and it really is a shame when people skip it.
The UK will spend £180 million of taxpayers’ money over five years in Africa in an effort to convince migrants to stay where they are, reintegrate, or “return home”. The International Development Secretary Penny Mordaunt made the announcement on what has been called International Migrants Day, claiming the “UK aid will help tackle root causes driving people to migrate”. The package, targeting Libya, Sudan, and Tanzania, aims to help create jobs, improve education, and tackle poverty and modern-day slavery. Around £121 million will be spent over five years in Sudan “supporting at least 450,000 refugees, migrants and community members” by providing them with housing and food. In Tanzania, £55 million will be spent over four years to help keep 460,000 migrants and refugees in the nation, including “identif[ying] ways for people to find work, so that they can stay where they are and either return home or not be forced to migrate elsewhere.” An extra £5 million will go to Libya to protect migrants affected by slavery and abuse, after many were drawn to the nation by the European Union’s lax borders and aid agency’s promises of safety. News – International Development Secretary @PennyMordaunt sets out how UK aid will help tackle the root causes driving people to migrate: https://t.co/vV1pkKP6bj #InternationalMigrantsDay pic.twitter.com/DJjYfQEEfL — DFID (@DFID_UK) December 18, 2017 Announcing the package, Ms. Mordaunt said: “The sheer scale of the migration crisis in the Mediterranean makes it one of the most pressing global challenges we face, and behind the numbers are millions of individual tales of both hope and tragedy. “That’s why UK aid is working to help address the root causes of mass migration by creating jobs and providing education, whilst also supporting vulnerable migrants whose lives are at risk due to a lack of food and medicine, or whose freedom is at risk from traffickers and criminal thugs. “The UK will be directly affected unless we take action now. “There is no silver bullet and this approach will take time, but as we continue to create jobs, educate thousands of children and save lives, the benefit of our support for the poorest people and the UK will become increasingly obvious.”
/** * Helper for testing with an operation processor * * @author bratseth */ public class OperationProcessorTester { private final Endpoint endpoint; private final int clusterId = 0; private final ManualClock clock; private final TestResultCallback resultCallback; private final OperationProcessor operationProcessor; public OperationProcessorTester() { endpoint = Endpoint.create("test-endpoint"); SessionParams.Builder params = new SessionParams.Builder(); Cluster.Builder clusterParams = new Cluster.Builder(); clusterParams.addEndpoint(endpoint); params.addCluster(clusterParams.build()); ConnectionParams.Builder connectionParams = new ConnectionParams.Builder(); connectionParams.setDryRun(true); connectionParams.setRunThreads(false); params.setConnectionParams(connectionParams.build()); clock = new ManualClock(Instant.ofEpochMilli(0)); resultCallback = new TestResultCallback(); operationProcessor = new OperationProcessor(new IncompleteResultsThrottler(1, 100, clock, new ThrottlePolicy()), resultCallback, params.build(), new ScheduledThreadPoolExecutor(1), clock); } public ManualClock clock() { return clock; } /** Asserts that this has but a single IOThread and returns it */ public IOThread getSingleIOThread() { assertEquals(1, clusterConnections().size()); assertEquals(1, clusterConnections().get(0).ioThreads().size()); return clusterConnections().get(0).ioThreads().get(0); } /** Do n iteration of work in all io threads of this */ public void tick(int n) { for (int i = 0; i < n; i++) for (ClusterConnection cluster : operationProcessor.clusters()) for (IOThread thread : cluster.ioThreads()) thread.tick(); } public void send(String documentId) { operationProcessor.sendDocument(new Document(documentId, documentId, "data of " + documentId, null, clock.instant())); } public int incomplete() { return operationProcessor.getIncompleteResultQueueSize(); } public int success() { return resultCallback.successes; } public List<ClusterConnection> clusterConnections() { return operationProcessor.clusters(); } public int failures() { return resultCallback.failures; } public int endpointExceptions() { return resultCallback.endpointExceptions; } public Result lastResult() { return resultCallback.lastResult; } private static class TestResultCallback implements FeedClient.ResultCallback { private int successes = 0; private int failures = 0; private int endpointExceptions = 0; private Result lastResult; @Override public void onCompletion(String docId, Result documentResult) { this.lastResult = documentResult; if (documentResult.isSuccess()) successes++; else failures++; } @Override public void onEndpointException(FeedEndpointException exception) { endpointExceptions++; } } }
// Main lexer. Lexes a string from a given point. int lex(struct Token *reading, char string[], size_t *c) { while (1) { if (string[*c] == '`') { (*c)++; while (string[*c] != '`') { putchar(string[*c]); (*c)++; } putchar('\n'); (*c)++; } else if (string[*c] == '/' && string[*c + 1] == '/') { (*c) += 2; while (string[*c] != '\n') { (*c)++; } (*c)++; } else if (skipChar(string[*c])) { while (skipChar(string[*c])) { (*c)++; } } else { break; } } reading->type = 0; reading->value = 0; switch (string[(*c)++]) { case '\0': reading->type = FILE_END; return 0; case '{': reading->type = BRACKET_LEFT; return 0; case '}': reading->type = BRACKET_RIGHT; return 0; case '+': reading->type = ADD; return 0; case '*': reading->type = MULT; return 0; case '(': reading->type = PAREN_LEFT; return 0; case ')': reading->type = PAREN_RIGHT; return 0; case ';': reading->type = SEMICOLON; return 0; case '=': reading->type = EQUAL; return 0; } (*c)--; while (isAlpha(string[*c])) { reading->type = TEXT; reading->string[reading->value] = string[*c]; reading->value++; (*c)++; } if (string[*c] == '\"') { (*c)++; while (string[*c] != '\"') { reading->type = TEXT; reading->string[reading->value] = string[*c]; reading->value++; (*c)++; } } reading->string[reading->value] = '\0'; if (string[*c] == '\'') { (*c)++; reading->type = INTEGER; reading->value = string[*c]; (*c) += 2; return 0; } while (string[*c] >= '0' && string[*c] <= '9') { reading->type = INTEGER; reading->value *= 10; reading->value += string[*c] - '0'; (*c)++; } return 0; }
<reponame>PlutoDAO/simple-stellar-signer import { xBullWalletConnect } from '@creit-tech/xbull-wallet-connect'; import type { Transaction } from 'stellar-sdk'; import { xBull } from '../../../assets'; import { CURRENT_STELLAR_NETWORK, StellarNetwork } from '../../stellar/StellarNetwork'; import type IStorage from '../../storage/IStorage'; import AbstractWallet from '../AbstractWallet'; import type IWallet from '../IWallet'; type XBullNetwork = 'public' | 'testnet'; export default class XBull extends AbstractWallet implements IWallet { public static NAME = 'xbull'; public static FRIENDLY_NAME = 'xBull'; public static XBullExtension = 'https://wallet.xbull.app'; public xBullBridge: xBullWalletConnect; public XBullNetwork: XBullNetwork; constructor(storage: IStorage) { super(storage); this.xBullBridge = new xBullWalletConnect(); if (CURRENT_STELLAR_NETWORK === StellarNetwork.PUBLIC) { this.XBullNetwork = StellarNetwork.PUBLIC as XBullNetwork; } else { this.XBullNetwork = StellarNetwork.TESTNET as XBullNetwork; } } public override async getPublicKey(): Promise<string> { const publicKey = await this.xBullBridge.connect(); super.persistWallet(); return publicKey; } public override async sign(tx: Transaction): Promise<string> { const signedXdr = await this.xBullBridge.sign({ xdr: tx.toXDR() }); this.xBullBridge.closeConnections(); return signedXdr; } public override getFriendlyName(): string { return XBull.FRIENDLY_NAME; } public override getName(): string { return XBull.NAME; } public override getImage(): string { return xBull; } public override getExtension(): string { return XBull.XBullExtension; } public override isInstalled(): Promise<boolean> { const xBullPromise: Promise<boolean> = new Promise((resolve) => { resolve(true); }); return xBullPromise; } }
def observable_product(*observables): res_obs = {} for obs in observables: for k in obs: if k in res_obs: if obs[k] != res_obs[k]: return None else: res_obs[k] = obs[k] return res_obs
EU Referendum: the myth of preventing war 09/05/2016 Follow @eureferendum With Mr Cameron claiming on this of all days, "Europe Day", that leaving the EU would bring us closer to war, all he is actually doing is repeating the most insidious myth of them all in relation to the European Union - that this monstrous construct has helped keep the peace. With Mr Cameron claiming on this of all days, "Europe Day", that leaving the EU would bring us closer to war, all he is actually doing is repeating the most insidious myth of them all in relation to the European Union - that this monstrous construct has helped keep the peace. The myth finds is purest form in the EU hagiography, where the European Commission's asserts that "probably very few people in Europe know that on 9 May 1950 the first move was made towards the creation of what is now known as the European Union". Crucially, it would have us believe that this was an attempt at preventing a Third World War, but completely omits to tell us that the intellectual genesis lies not in the post-WWII period but in the 1920s, devised is response to the problems arising out of the First World War. For the myth-makers, though, the origin of the EU was Paris on 9 May 1950. Against the background of the threat of a Third World War engulfing the whole of Europe, the then French Foreign Minister Robert Schuman (pictured above, right, with Jean Monnet) read to the international press a declaration calling France, Germany and other European countries to pool their coal and steel production as "the first concrete foundation of a European federation". Needless to say, the reality is very different. Far from the heroic Schuman standing at the centre of the "project", he turns out to be little more than an unwitting stooge, manipulated by one man who had made its his life's work to set up a "government for Europe". That man was Jean Monnet. At the time, he was nearing the end of implementing his four-year plan for the "modernisation" of France. Already, he had seen two post-war attempts at creating his "government" fail. These had been the OEEC, created on the back of the …ever give concrete expression to European unity. Amid these vast groupings of countries, the common interest was too indistinct, and common disciplines were too lax. A start would have to be made by doing something more practical and more ambitious. National sovereignty would have to be tackled more boldly and on a narrower front. However, if Monnet was sure that something much "more practical and ambitious" was needed to achieve the desired goal, then events in the late spring of 1950 conspired to create precisely the opportunity he was looking for. During 1949, West Germany had finally emerged to self-government under the Chancellorship of Konrad Adenauer. Under its Basic Law, passed on 8 May 1949, the new Federal Democratic Republic, or FDR, was based on a federation of the eleven highly decentralised Land governments which, on British insistence, retained considerable power, guaranteed by a constitutional court. In crucial respects the federal government, centred in Bonn, could not act without the consent of the Länder. In particular, all international treaties had to be ratified by the Länder through their legislative assembly, the Bundesrat. At the time, the new Germany, under the guidance of Ludwig Erhard, was already showing signs of a remarkable economic recovery. This raised the question of how the new nation should be assimilated into the western European community. This was what was on offer was the creation of a supranational European Institution, charged with the management of the coal and steel industry, the very sector which was, at that time, the basis of all military power. The countries which Schuman called upon had almost destroyed each other "in a dreadful conflict which had left after it a sense of material and moral desolation". Thus, concludes the European Commission in its own history, "Everything... began that day".Needless to say, the reality is very different. Far from the heroic Schuman standing at the centre of the "project", he turns out to be little more than an unwitting stooge, manipulated by one man who had made its his life's work to set up a "government for Europe".That man was Jean Monnet. At the time, he was nearing the end of implementing his four-year plan for the "modernisation" of France. Already, he had seen two post-war attempts at creating his "government" fail. These had been the OEEC, created on the back of the Marshall Plan , and the Council of Europe. With a sense of resigned detachment, he had decided that neither of them could:However, if Monnet was sure that something much "more practical and ambitious" was needed to achieve the desired goal, then events in the late spring of 1950 conspired to create precisely the opportunity he was looking for.During 1949, West Germany had finally emerged to self-government under the Chancellorship of Konrad Adenauer. Under its Basic Law, passed on 8 May 1949, the new Federal Democratic Republic, or FDR, was based on a federation of the eleven highly decentralised Land governments which, on British insistence, retained considerable power, guaranteed by a constitutional court. In crucial respects the federal government, centred in Bonn, could not act without the consent of the Länder. In particular, all international treaties had to be ratified by the Länder through their legislative assembly, the Bundesrat.At the time, the new Germany, under the guidance of Ludwig Erhard, was already showing signs of a remarkable economic recovery. This raised the question of how the new nation should be assimilated into the western European community. The argument centred on that old bone of contention, the coal and steel industries of the Ruhr, heartland of Germany's economy and formerly the arsenal of her war machine. In 1948, France had demanded the setting up of an International Ruhr Authority, which would enable French officials to control Germany's coal and steel production and ensure that a substantial part of that production was diverted to aid French reconstruction. It was a curious echo of France's disastrous policy after the First World War. Naturally the new West Germany was bitterly opposed to such an authority. Equally so were the other two occupying powers, America and Britain. For over two years this dispute had festered, without resolution. But in the spring of 1950 the US Secretary of State Dean Acheson finally lost patience. He issued France with what amounted to an ultimatum. On 11 May there would be a foreign ministers' meeting in London; and unless the French could offer a satisfactory compromise proposal, the USA would impose a solution on all parties. What Monnet had in mind was that the coal and steel industries, not just of France and Germany but of other western European countries, should be placed under the direction of a supranational authority: just as over dinner in Paris in 1917 he and Arthur Salter had come up with a similar plan for the control of allied shipping. When Monnet came to commit his plan to paper, he was obviously troubled by how much he dare reveal of its real underlying purpose. Before getting to its final stage, it went through nine separate drafts. In the first, the pooling of coal and steel was regarded as "the first step of a Franco-German Union". The second opened it up to the "first step of a Franco-German Union and a European federation". By the fifth draft, this had been changed to "Europe must be organised on a federal basis. A Franco-German Union is an essential element is this". The seventh demanded that "Europe must be organised on a Federal basis". But, by the final draft, almost all this was missing. All he would allow himself was a reference to the pool being "the first step of a European federation", a vague term which could mean different things to different people. Although what Monnet really had in mind was the creation of a European entity with all the attributes of a state, the anodyne phrasing was deliberately chosen with a view to making it difficult to dilute by converting it into just another intergovernmental body. It was also couched in this fashion so that it would not scare off national governments by emphasising that its purpose was to override their sovereignty. At the Council of Europe in August 1949 Churchill had shocked many delegates by proposing that she should be given the warmest of welcomes. Two of the western occupying powers, the USA and Britain, wanted to see her continue on the road towards full economic recovery and nationhood as soon as possible. But this had provoked a deep rift with France, which wanted to continue exercising control over the German economy, for fear that she might once again become too strong a political and economic rival.The argument centred on that old bone of contention, the coal and steel industries of the Ruhr, heartland of Germany's economy and formerly the arsenal of her war machine. In 1948, France had demanded the setting up of an International Ruhr Authority, which would enable French officials to control Germany's coal and steel production and ensure that a substantial part of that production was diverted to aid French reconstruction. It was a curious echo of France's disastrous policy after the First World War. Naturally the new West Germany was bitterly opposed to such an authority. Equally so were the other two occupying powers, America and Britain.For over two years this dispute had festered, without resolution. But in the spring of 1950 the US Secretary of State Dean Acheson finally lost patience. He issued France with what amounted to an ultimatum. On 11 May there would be a foreign ministers' meeting in London; and unless the French could offer a satisfactory compromise proposal, the USA would impose a solution on all parties. This gave Monnet the opportunity for which he had been waiting. For years he had dreamed of building a "United States of Europe", beginning by integrating the coal and steel industries, and setting up a supranational authority to run them. This was the idea first put forward in the 1920s, by Coudenhove and Louis Loucheur , and partly implemented by Luxembourg steel magnate, Emil Mayrisch in 1926. It was the idea Monnet himself had outlined to Paul-Henri Spaak in 1941 and in a memorandum he had written in Algiers in 1943.What Monnet had in mind was that the coal and steel industries, not just of France and Germany but of other western European countries, should be placed under the direction of a supranational authority: just as over dinner in Paris in 1917 he and Arthur Salter had come up with a similar plan for the control of allied shipping.When Monnet came to commit his plan to paper, he was obviously troubled by how much he dare reveal of its real underlying purpose. Before getting to its final stage, it went through nine separate drafts. In the first, the pooling of coal and steel was regarded as "the first step of a Franco-German Union".The second opened it up to the "first step of a Franco-German Union and a European federation". By the fifth draft, this had been changed to "Europe must be organised on a federal basis. A Franco-German Union is an essential element is this". The seventh demanded that "Europe must be organised on a Federal basis". But, by the final draft, almost all this was missing. All he would allow himself was a reference to the pool being "the first step of a European federation", a vague term which could mean different things to different people.Although what Monnet really had in mind was the creation of a European entity with all the attributes of a state, the anodyne phrasing was deliberately chosen with a view to making it difficult to dilute by converting it into just another intergovernmental body. It was also couched in this fashion so that it would not scare off national governments by emphasising that its purpose was to override their sovereignty. Once his memorandum was complete, Monnet's next problem was how to get it adopted. He could not act as the champion of his own plan. As a natural behind-the-scenes operator, his style was always to act indirectly. He needed to win over very senior support in the French government. Furthermore, as a potential advocate, Schuman had other advantages. Born in 1886 in Luxembourg to a German mother, he was fluent in both German and French, having read law at the universities of Berlin, Munich and Bonn. He had then moved to Alsace Lorraine when it was under German rule, which meant that in 1914 he had been recruited into the German army. Yet in the Second World War, when Alsace Lorraine had again become part of Germany, he had, as a French citizen, been arrested by the Gestapo. He was thus a perfect witness to the need to resolve the Franco-German conflict. To get to Schuman, Monnet approached his chef de cabinet Bernard Clappier, telling him to advise his boss that he had some ideas for the London conference. He had expected Clappier to call him back but, by Friday 28 April, Monnet had heard nothing. Fearing that Schuman was not interested, Monnet sent a copy of his memorandum to prime minister Georges Bidault, via his closest aide, again asking for it to passed on. In the memorandum, Monnet wrote of the "German situation" becoming a cancer that would be dangerous to peace. For future peace, he wrote, the creation of a dynamic Europe is indispensable: We must therefore abandon the forms of the past and enter the path of transformation, both by creating common basic economic conditions and by setting up new authorities accepted by the sovereign nations. Europe has never existed. It is not the addition of sovereign nations met together in councils that makes an entity of them. We must genuinely create Europe; it must become manifest to itself… Alas for Bidault, who thereby missed his chance of immortality, the memorandum did not reach him. Meanwhile, Clappier had re-appeared full of apologies. Monnet gave him a copy of the memorandum and immediately decided that Schuman should see it. He caught him at Gare de l'Est, as he was sitting in a train, waiting to go to Metz for the weekend. When Schuman returned to Paris, after studying the document, he had adopted the plan wholeheartedly. It had now become the "Schuman Plan", although in reality it was not his at all. In the final analysis, he was not even committed to it, except as a device to get him off a hook. Once Schuman had agreed, the contents of the Plan were passed by his office in great secrecy to the German chancellor, Konrad Adenauer, in the hope of securing his provisional agreement. Other governments, especially the British, were not told. According to Professor Bernard Lavergne, a prominent political commentator of the time, who was to publish a highly critical study of the plan: The curious thing was that M. Bidault, the Premier, was – at least, at first – not at all favourable to the Plan which, in early May, was suddenly sprung on him by his Foreign Minister, M. Schuman. And oddly enough – though this was typical of M. Schuman's furtive statesmanship and diplomacy – neither was M. François-Poncet, the French High Commissioner, nor the Quay d'Orsay, or even the French Government, properly informed of what was going on during the days that preceded the "Schuman bombshell" of 9 May. However, "as a result of a curious coincidence", Dean Acheson was already on his way to the summit in London, and had decided to go via Paris to confer informally with Schuman. By another "coincidence", Monnet was present at their meeting. As Monnet disingenuously put it, "courtesy and honesty obliged us to take Acheson into our confidence". The Plan was also presented to the French Cabinet, but only in a most perfunctory way: Only three or four ministers were informed about it (the Plan), and when, finally, on 8 May, the Council of Ministers met, no serious discussion took place at all. Schuman gave them a rough sketch of the Plan, and, without really knowing what it was all about, they gave it their blessing. Schuman then took an audacious step. He would announce "his" plan by appealing directly to the peoples of Europe, through the media. In a radio broadcast on 9 May 1950 – today officially commemorated as "Europe Day" – he revealed Monnet's plan to the world. "World peace", he began: …cannot be safeguarded without the making of creative efforts proportionate to the dangers which threaten it. The contribution which an organised and living Europe can bring to civilisation is indispensable to the maintenance of peaceful relations. In taking upon herself for more than 20 years the role of champion of a united Europe, France has always had as her essential aim the service of peace. A united Europe was not achieved and we had war. Europe will not be made all at once, or according to a single plan. It will be built through concrete achievements which first create a de facto solidarity. The coming together of the nations of Europe requires the elimination of the age-old opposition of France and Germany… With this aim in view the French Government proposes that action be taken immediately on one limited but decisive point…it proposes that Franco-German production of coal and steel as a whole be placed under a common High Authority, within the framework of an organisation open to the participation of the other countries of Europe. The pooling of coal and steel production should immediately provide for the setting up of common foundations for economic development as a first step in the federation of Europe. After describing how "the solidarity in production thus established will make it plain that any war between France and Germany becomes not merely unthinkable, but materially impossible", he went on to say that this would help simply and speedily to achieve "that fusion of interest which is indispensable to the establishment of a common economic system". The whole thing was an elaborate charade, a meticulous coup by the puppet-master extraordinaire, Jean Monnet, to a plan he and his colleague Arthur Salter had devised all those years ago, to deal with a different war and an entirely different geopolitical situations. But, with the backing of the commission, the myth endures. Hilariously, though, when I addressed a group of 150 sixth-formers recently, on the history of the European Union, not one of them knew that 9 May was "Europe Day". For all the effort the EU has expended on cultivating its core myth, it has failed to have any decisive impact. Whether Mr Cameron likes it or not, it is truly an idea The core of this narrative is drawn from the recently republished book, The Great Deception, by Christopher Booker and Richard North. In this, Monnet had constant contact with senior members of the French government. His obvious choice was foreign minister, Robert Schuman. It would be he that would have to face US Secretary of State Dean Acheson in a few days time. Monnet knew that his officials had few ideas to offer. He would most likely, therefore, be receptive to some new ideas.Furthermore, as a potential advocate, Schuman had other advantages. Born in 1886 in Luxembourg to a German mother, he was fluent in both German and French, having read law at the universities of Berlin, Munich and Bonn. He had then moved to Alsace Lorraine when it was under German rule, which meant that in 1914 he had been recruited into the German army. Yet in the Second World War, when Alsace Lorraine had again become part of Germany, he had, as a French citizen, been arrested by the Gestapo. He was thus a perfect witness to the need to resolve the Franco-German conflict.To get to Schuman, Monnet approached his chef de cabinet Bernard Clappier, telling him to advise his boss that he had some ideas for the London conference. He had expected Clappier to call him back but, by Friday 28 April, Monnet had heard nothing. Fearing that Schuman was not interested, Monnet sent a copy of his memorandum to prime minister Georges Bidault, via his closest aide, again asking for it to passed on. In the memorandum, Monnet wrote of the "German situation" becoming a cancer that would be dangerous to peace. For future peace, he wrote, the creation of a dynamic Europe is indispensable:Alas for Bidault, who thereby missed his chance of immortality, the memorandum did not reach him. Meanwhile, Clappier had re-appeared full of apologies. Monnet gave him a copy of the memorandum and immediately decided that Schuman should see it. He caught him at Gare de l'Est, as he was sitting in a train, waiting to go to Metz for the weekend. When Schuman returned to Paris, after studying the document, he had adopted the plan wholeheartedly. It had now become the "Schuman Plan", although in reality it was not his at all. In the final analysis, he was not even committed to it, except as a device to get him off a hook.Once Schuman had agreed, the contents of the Plan were passed by his office in great secrecy to the German chancellor, Konrad Adenauer, in the hope of securing his provisional agreement. Other governments, especially the British, were not told. According to Professor Bernard Lavergne, a prominent political commentator of the time, who was to publish a highly critical study of the plan:However, "as a result of a curious coincidence", Dean Acheson was already on his way to the summit in London, and had decided to go via Paris to confer informally with Schuman. By another "coincidence", Monnet was present at their meeting. As Monnet disingenuously put it, "courtesy and honesty obliged us to take Acheson into our confidence". The Plan was also presented to the French Cabinet, but only in a most perfunctory way:Schuman then took an audacious step. He would announce "his" plan by appealing directly to the peoples of Europe, through the media. In a radio broadcast on 9 May 1950 – today officially commemorated as "Europe Day" – he revealed Monnet's plan to the world. "World peace", he began:After describing how "the solidarity in production thus established will make it plain that any war between France and Germany becomes not merely unthinkable, but materially impossible", he went on to say that this would help simply and speedily to achieve "that fusion of interest which is indispensable to the establishment of a common economic system". This was the "Schuman Declaration" which now occupies pride of place on the EU's Europa website as the document "which led to the creation of what is now the European Union". Yet, according to one historian, although the plan was immediately greeted with great excitement by the press, the curious thing was that literally nobody knew exactly what it was about, not even Schuman.The whole thing was an elaborate charade, a meticulous coup by the puppet-master, Jean Monnet, to a plan he and his colleague Arthur Salter had devised all those years ago, to deal with a different war and an entirely different geopolitical situations.But, with the backing of the commission, the myth endures. Hilariously, though, when I addressed a group of 150 sixth-formers recently, on the history of the European Union, not one of them knew that 9 May was "Europe Day". For all the effort the EU has expended on cultivating its core myth, it has failed to have any decisive impact. Whether Mr Cameron likes it or not, it is truly an idea whose time has gone
<reponame>oleg-nazarov/cpp-yandex-praktikum<filename>transport_catalogue/svg/__tests__/svg__tests.cpp #define _USE_MATH_DEFINES #include <cmath> #include <iterator> #include <sstream> #include <string> #include "../../../helpers/run_test.h" #include "../svg.h" using namespace std::literals; using namespace svg; Polyline CreateStar(Point center, double outer_rad, double inner_rad, int num_rays) { Polyline polyline; for (int i = 0; i <= num_rays; ++i) { double angle = 2 * M_PI * (i % num_rays) / num_rays; polyline.AddPoint({center.x + outer_rad * sin(angle), center.y - outer_rad * cos(angle)}); if (i == num_rays) { break; } angle += M_PI / num_rays; polyline.AddPoint({center.x + inner_rad * sin(angle), center.y - inner_rad * cos(angle)}); } return polyline; } namespace shapes { class Triangle : public Drawable { public: Triangle(Point p1, Point p2, Point p3) : p1_(std::move(p1)), p2_(std::move(p2)), p3_(std::move(p3)) {} void Draw(ObjectContainer& container) const override { container.Add(Polyline().AddPoint(p1_).AddPoint(p2_).AddPoint(p3_).AddPoint(p1_)); } private: Point p1_, p2_, p3_; }; class Star : public Drawable { public: Star(Point center, double outer_rad, double inner_rad, int num_rays) : center_(std::move(center)), outer_rad_(outer_rad), inner_rad_(inner_rad), num_rays_(num_rays) {} void Draw(ObjectContainer& container) const override { container.Add(CreateStar(center_, outer_rad_, inner_rad_, num_rays_) .SetFillColor("red"s) .SetStrokeColor("black"s)); } private: Point center_; double outer_rad_; double inner_rad_; int num_rays_; }; class Snowman : public Drawable { public: Snowman(Point head_center, double head_radius) : head_center_(std::move(head_center)), head_radius_(head_radius) {} void Draw(ObjectContainer& container) const override { using namespace std::literals; // bottom circle container.Add(Circle() .SetCenter({head_center_.x, head_center_.y + head_radius_ * 5}) .SetRadius(head_radius_ * 2) .SetFillColor("rgb(240,240,240)"s) .SetStrokeColor("black"s)); // middle circle container.Add(Circle() .SetCenter({head_center_.x, head_center_.y + head_radius_ * 2}) .SetRadius(head_radius_ * 1.5) .SetFillColor("rgb(240,240,240)"s) .SetStrokeColor("black"s)); // top circle container.Add(Circle() .SetCenter({head_center_.x, head_center_.y}) .SetRadius(head_radius_) .SetFillColor("rgb(240,240,240)"s) .SetStrokeColor("black"s)); } private: Point head_center_; double head_radius_; }; } // namespace shapes template <typename DrawableIterator> void DrawPicture(DrawableIterator begin, DrawableIterator end, svg::ObjectContainer& target) { for (auto it = begin; it != end; ++it) { (*it)->Draw(target); } } template <typename Container> void DrawPicture(const Container& container, svg::ObjectContainer& target) { using namespace std; DrawPicture(begin(container), end(container), target); } // TODO: make order of attributes not important // void TestMockedSnowmanStarTriangle() { // using namespace svg; // using namespace shapes; // using namespace std; // std::ostringstream output; // std::string mocked_output( // "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n" // "<svg xmlns=\"http://www.w3.org/2000/svg\" version=\"1.1\">\n" // " <polyline points=\"100,20 120,50 80,40 100,20\"/>\n" // " <polyline points=\"50,10 52.3511,16.7639 59.5106,16.9098 53.8042,21.2361 55.8779,28.0902 50,24 44.1221,28.0902 46.1958,21.2361 40.4894,16.9098 47.6489,16.7639 50,10\" fill=\"red\" stroke=\"black\"/>\n" // " <circle cx=\"30\" cy=\"70\" r=\"20\" fill=\"rgb(240,240,240)\" stroke=\"black\"/>\n" // " <circle cx=\"30\" cy=\"40\" r=\"15\" fill=\"rgb(240,240,240)\" stroke=\"black\"/>\n" // " <circle cx=\"30\" cy=\"20\" r=\"10\" fill=\"rgb(240,240,240)\" stroke=\"black\"/>\n" // " <text x=\"10\" y=\"100\" dx=\"0\" dy=\"0\" font-size=\"12\" font-family=\"Verdana\" fill=\"yellow\" stroke=\"yellow\" stroke-width=\"3\" stroke-linecap=\"round\" stroke-linejoin=\"round\">Happy New Year!</text>\n" // " <text x=\"10\" y=\"100\" dx=\"0\" dy=\"0\" font-size=\"12\" font-family=\"Verdana\" fill=\"red\">Happy New Year!</text>\n" // "</svg>\n"); // vector<unique_ptr<svg::Drawable>> picture; // picture.emplace_back(make_unique<Triangle>(Point{100, 20}, Point{120, 50}, Point{80, 40})); // picture.emplace_back(make_unique<Star>(Point{50.0, 20.0}, 10.0, 4.0, 5)); // picture.emplace_back(make_unique<Snowman>(Point{30, 20}, 10.0)); // svg::Document doc; // DrawPicture(picture, doc); // const Text base_text = Text() // .SetFontFamily("Verdana"s) // .SetFontSize(12) // .SetPosition({10, 100}) // .SetData("Happy New Year!"s); // doc.Add(Text{base_text} // .SetStrokeColor("yellow"s) // .SetFillColor("yellow"s) // .SetStrokeLineJoin(StrokeLineJoin::ROUND) // .SetStrokeLineCap(StrokeLineCap::ROUND) // .SetStrokeWidth(3)); // doc.Add(Text{base_text}.SetFillColor("red"s)); // doc.Render(output); // ASSERT_EQUAL(output.str(), mocked_output); // } void TestRgb() { svg::Rgb rgb{255, 0, 100}; ASSERT(rgb.red == 255); ASSERT(rgb.green == 0); ASSERT(rgb.blue == 100); svg::Rgb color; ASSERT(color.red == 0 && color.green == 0 && color.blue == 0); } void TestRgba() { svg::Rgba rgba{100, 20, 50, 0.3}; ASSERT(rgba.red == 100); ASSERT(rgba.green == 20); ASSERT(rgba.blue == 50); ASSERT(rgba.opacity == 0.3); svg::Rgba color; ASSERT(color.red == 0 && color.green == 0 && color.blue == 0 && color.opacity == 1.0); } int main() { // RUN_TEST(TestMockedSnowmanStarTriangle); RUN_TEST(TestRgb); RUN_TEST(TestRgba); return 0; }
<filename>src/qbl_xml.rs use aes::cipher::{BlockDecryptMut, BlockEncryptMut, KeyIvInit}; use hex_literal::hex; pub fn qbl_to_xml(qbl: &[u8]) -> Vec<u8> { let crypt = qbl_to_crypt(qbl); let lenxml = crypt_to_lenxml(&crypt); lenxml_to_xml(&lenxml) } pub fn xml_to_qbl(xml: &[u8]) -> Vec<u8> { let lenxml = xml_to_lenxml(xml); let crypt = lenxml_to_crypt(&lenxml); crypt_to_qbl(&crypt) } // conversion between .qbl format and plain encrypted data // qbl format (is actually a more general format, but is only used with specific bytes) // 7e 00 {len+7 as u32} 7f 01 {len as u32} {crypt as [u8;len]} 7b fn qbl_to_crypt(qbl: &[u8]) -> &[u8] { // TODO validate first 12 and final byte against magic // eg qbl[8..12] as u32 == qbl.len()-13 &qbl[12..qbl.len() - 1] } fn crypt_to_qbl(crypt: &[u8]) -> Vec<u8> { let mut qbl = Vec::with_capacity(13 + crypt.len()); qbl.push(0x7e); qbl.push(0); // TODO verify crypt.len() + 7 <= U32::MAX push_u32(&mut qbl, (crypt.len() + 7) as u32); qbl.push(0x7f); qbl.push(1); push_u32(&mut qbl, crypt.len() as u32); qbl.extend_from_slice(crypt); qbl.push(0x7b); qbl } // conversion between plain enctrypted data and len tagged xml // I dont realy know why Liquid Flower is using encryption (and not compression), but whatever... const KEY: [u8; 16] = hex!("30 85 c1 24 9a 56 b6 30 79 67 5c 88 c8 8a dc ba"); const IV: [u8; 16] = hex!("df 86 4a 53 c4 68 c9 8f b4 a5 61 dc 14 ff 53 57"); fn crypt_to_lenxml(crypt: &[u8]) -> Vec<u8> { cbc::Decryptor::<aes::Aes128Dec>::new(&KEY.into(), &IV.into()) .decrypt_padded_vec_mut::<aes::cipher::block_padding::Pkcs7>(crypt) .unwrap() } fn lenxml_to_crypt(lenxml: &[u8]) -> Vec<u8> { cbc::Encryptor::<aes::Aes128Enc>::new(&KEY.into(), &IV.into()) .encrypt_padded_vec_mut::<aes::cipher::block_padding::Pkcs7>(lenxml) } // conversion between len tagged xml and plain xml fn lenxml_to_xml(lenxml: &[u8]) -> Vec<u8> { // first byte with a 0 8th bit is the last byte of the length // TODO check the length is correct let mut i = 0; while lenxml[i] & 0x80 == 0x80 { i += 1; } lenxml[i + 1..].to_vec() } fn xml_to_lenxml(xml: &[u8]) -> Vec<u8> { let mut lenxml = Vec::with_capacity(5 + xml.len()); push_usize(&mut lenxml, xml.len()); lenxml.extend_from_slice(xml); lenxml } fn push_u32(vec: &mut Vec<u8>, v: u32) { vec.extend_from_slice(&v.to_le_bytes()); } fn push_usize(vec: &mut Vec<u8>, mut len: usize) { while len > 0x7F { vec.push((0x80 | (len & 0x7F)) as u8); len >>= 7; } vec.push(len as u8); }
/* Copyright © 2021 Compose Generator Contributors All rights reserved. */ package test
/* * Licensed to Elasticsearch B.V. under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch B.V. licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ // Inspired by the inspector CSV exporter import { FormatFactory } from 'src/plugins/data/common/field_formats/utils'; import { Datatable } from 'src/plugins/expressions'; const LINE_FEED_CHARACTER = '\r\n'; const nonAlphaNumRE = /[^a-zA-Z0-9]/; const allDoubleQuoteRE = /"/g; export const CSV_MIME_TYPE = 'text/plain;charset=utf-8'; // TODO: enhance this later on function escape(val: object | string, quoteValues: boolean) { if (val != null && typeof val === 'object') { val = val.valueOf(); } val = String(val); if (quoteValues && nonAlphaNumRE.test(val)) { val = `"${val.replace(allDoubleQuoteRE, '""')}"`; } return val; } interface CSVOptions { csvSeparator: string; quoteValues: boolean; formatFactory: FormatFactory; raw?: boolean; } export function datatableToCSV( { columns, rows }: Datatable, { csvSeparator, quoteValues, formatFactory, raw }: CSVOptions ) { // Build the header row by its names const header = columns.map((col) => escape(col.name, quoteValues)); const formatters = columns.reduce<Record<string, ReturnType<FormatFactory>>>( (memo, { id, meta }) => { memo[id] = formatFactory(meta?.params); return memo; }, {} ); // Convert the array of row objects to an array of row arrays const csvRows = rows.map((row) => { return columns.map((column) => escape(raw ? row[column.id] : formatters[column.id].convert(row[column.id]), quoteValues) ); }); if (header.length === 0) { return ''; } return ( [header, ...csvRows].map((row) => row.join(csvSeparator)).join(LINE_FEED_CHARACTER) + LINE_FEED_CHARACTER ); // Add \r\n after last line }
// LossFunctions.h // Here we define the different loss functions that can be used // with the BDT system. #ifndef L1Trigger_L1TMuonEndCap_emtf_LossFunctions #define L1Trigger_L1TMuonEndCap_emtf_LossFunctions #include "Event.h" #include <string> #include <algorithm> #include <cmath> // ======================================================== // ================ Define the Interface ================== //========================================================= namespace emtf { // Define the Interface class LossFunction { public: // The gradient of the loss function. // Each tree is a step in the direction of the gradient // towards the minimum of the Loss Function. virtual double target(Event* e) = 0; // The fit should minimize the loss function in each // terminal node at each iteration. virtual double fit(std::vector<Event*>& v) = 0; virtual std::string name() = 0; virtual int id() = 0; virtual ~LossFunction() = default; }; // ======================================================== // ================ Least Squares ========================= // ======================================================== class LeastSquares : public LossFunction { public: LeastSquares() {} ~LeastSquares() override {} double target(Event* e) override { // Each tree fits the residuals when using LeastSquares. return e->trueValue - e->predictedValue; } double fit(std::vector<Event*>& v) override { // The average of the residuals minmizes the Loss Function for LS. double SUM = 0; for (unsigned int i = 0; i < v.size(); i++) { Event* e = v[i]; SUM += e->trueValue - e->predictedValue; } return SUM / v.size(); } std::string name() override { return "Least_Squares"; } int id() override { return 1; } }; // ======================================================== // ============== Absolute Deviation =================== // ======================================================== class AbsoluteDeviation : public LossFunction { public: AbsoluteDeviation() {} ~AbsoluteDeviation() override {} double target(Event* e) override { // The gradient. if ((e->trueValue - e->predictedValue) >= 0) return 1; else return -1; } double fit(std::vector<Event*>& v) override { // The median of the residuals minimizes absolute deviation. if (v.empty()) return 0; std::vector<double> residuals(v.size()); // Load the residuals into a vector. for (unsigned int i = 0; i < v.size(); i++) { Event* e = v[i]; residuals[i] = (e->trueValue - e->predictedValue); } // Get the median and return it. int median_loc = (residuals.size() - 1) / 2; // Odd. if (residuals.size() % 2 != 0) { std::nth_element(residuals.begin(), residuals.begin() + median_loc, residuals.end()); return residuals[median_loc]; } // Even. else { std::nth_element(residuals.begin(), residuals.begin() + median_loc, residuals.end()); double low = residuals[median_loc]; std::nth_element(residuals.begin() + median_loc + 1, residuals.begin() + median_loc + 1, residuals.end()); double high = residuals[median_loc + 1]; return (high + low) / 2; } } std::string name() override { return "Absolute_Deviation"; } int id() override { return 2; } }; // ======================================================== // ============== Huber ================================ // ======================================================== class Huber : public LossFunction { public: Huber() {} ~Huber() override {} double quantile; double residual_median; double target(Event* e) override { // The gradient of the loss function. if (std::abs(e->trueValue - e->predictedValue) <= quantile) return (e->trueValue - e->predictedValue); else return quantile * (((e->trueValue - e->predictedValue) > 0) ? 1.0 : -1.0); } double fit(std::vector<Event*>& v) override { // The constant fit that minimizes Huber in a region. quantile = calculateQuantile(v, 0.7); residual_median = calculateQuantile(v, 0.5); double x = 0; for (unsigned int i = 0; i < v.size(); i++) { Event* e = v[i]; double residual = e->trueValue - e->predictedValue; double diff = residual - residual_median; x += ((diff > 0) ? 1.0 : -1.0) * std::min(quantile, std::abs(diff)); } return (residual_median + x / v.size()); } std::string name() override { return "Huber"; } int id() override { return 3; } double calculateQuantile(std::vector<Event*>& v, double whichQuantile) { // Container for the residuals. std::vector<double> residuals(v.size()); // Load the residuals into a vector. for (unsigned int i = 0; i < v.size(); i++) { Event* e = v[i]; residuals[i] = std::abs(e->trueValue - e->predictedValue); } std::sort(residuals.begin(), residuals.end()); unsigned int quantile_location = whichQuantile * (residuals.size() - 1); return residuals[quantile_location]; } }; // ======================================================== // ============== Percent Error =========================== // ======================================================== class PercentErrorSquared : public LossFunction { public: PercentErrorSquared() {} ~PercentErrorSquared() override {} double target(Event* e) override { // The gradient of the squared percent error. return (e->trueValue - e->predictedValue) / (e->trueValue * e->trueValue); } double fit(std::vector<Event*>& v) override { // The average of the weighted residuals minimizes the squared percent error. // Weight(i) = 1/true(i)^2. double SUMtop = 0; double SUMbottom = 0; for (unsigned int i = 0; i < v.size(); i++) { Event* e = v[i]; SUMtop += (e->trueValue - e->predictedValue) / (e->trueValue * e->trueValue); SUMbottom += 1 / (e->trueValue * e->trueValue); } return SUMtop / SUMbottom; } std::string name() override { return "Percent_Error"; } int id() override { return 4; } }; } // namespace emtf #endif
def compute(self, **kwargs): districts = [] if 'list' in kwargs: arg_list = kwargs['list'] filtered = filter(lambda x: not x is None, arg_list) if len(filtered) == 0: return reduced = reduce(lambda x, y: x + y, filtered) self.result = {'value': reduced / len(filtered)} elif 'district' in kwargs: districts.append(kwargs['district']) elif 'plan' in kwargs: plan = kwargs['plan'] version = kwargs[ 'version'] if 'version' in kwargs else plan.version districts = plan.get_districts_at_version( version, include_geom=False) else: return if not 'list' in kwargs: total = 0.0 count = 0.0 for district in districts: if district.district_id == 0: continue count += 1 argnum = 0 argsum = 0.0 while ('value%d' % (argnum + 1, )) in self.arg_dict: argnum += 1 number = self.get_value('value%d' % argnum, district) if not number is None: argsum += float(number) total += argsum / argnum if count == 0: self.result = None return self.result = {'value': total / count}
Insulin sensitivity and obstructive sleep apnea in adolescents with polycystic ovary syndrome. BACKGROUND Polycystic ovary syndrome (PCOS) in adults is linked with insulin resistance (IR) and obstructive sleep apnea (OSA). However, less is known about these associations in adolescents. METHODS We studied 3 groups of adolescents: 27 obese PCOS (OPCOS) (ages 13-21)11 normal-weight PCOS (NPCOS) (ages 13-21 years), and 8 healthy controls (ages 18-21 years). A hyperinsulinemic euglycemic clamp study was performed in all groups to determine IR by insulin sensitivity (M/I). Polysomnography was performed to assess for OSA in OPCOS and NPCOS groups. We compared indices of IR among all groups and OSA among OPCOS, and NPCOS. RESULTS We noted that OPCOS and NPCOS and controls differed significantly in their IR. M/I was significantly lower in OPCOS vs. controls (p=0.0061), and also lower for NPCOS vs control but this approached but did not reach statistical significance (p=0.084). In addition, none of the NPCOS subjects had OSA compared to 42% of OPCOS (p=0.03). CONCLUSIONS Our study suggests OPCOS adolescents have increased IR compared to controls and NPCOS subjects. Higher IR for NPCOS vs controls approached but did not reach statistical significance. Larger studies are needed. In addition, adolescents with OPCOS are at a high risk for OSA.
Antibody and cytotoxic T lymphocyte responses of humans to live and inactivated influenza vaccines. The antibody and HLA-restricted virus-specific cytotoxic T lymphocyte (CTL) responses to influenza vaccine of 36 volunteers were analysed. Three vaccines were used: a live attenuated, and two types of inactivated, a whole virus and a purified surface antigen vaccine. Antibody to haemagglutinin (HA) was assayed using plaque neutralization, single-radial-haemolysis and haemagglutination-inhibition (HI) techniques. Antibodies to nucleoprotein and matrix antigens were also measured. Most of the volunteers had antibody responses to the HA in the inactivated vaccines which were detected by all three techniques. Nine of the twelve recipients of the live virus vaccine did not have an antibody response detected by the HI test, but four of these did have antibody responses when the plaque neutralization test was used. Single-radial-haemolysis was more sensitive than the HI test for detecting low levels of antibody, but the plaque neutralization test was the most sensitive for detecting low levels of antibody. Most volunteers had a rise in their HLA-restricted influenza-specific memory CTL response, but three recipients of live vaccine who did not have an antibody response by any technique also did not have an increase in their cytotoxic T cell activity. Three volunteers, two of whom had received live vaccine, had a positive CTL response without antibody response.
ASL2 Ro8 Preview Pt1- Bisu to BeSt Sea and hero? Text by BigFan Graphics by v1 All of TBLS have made it safely into the Ro8 with most taking their respective groups in 2-0 fashion even when facing tough opponents such as hero, last and Movie. Now that we are at the Ro8, this is where the real fun begins. We have a pretty decent spread across the board: 2T, 2Z and 4P. The map pool will consist of five maps with players able to veto out one map each. Each series is a bo5 as well so we will be treated to at least three games at the minimum. The first series is between Sea and Bisu, no doubt an uphill battle for the terran against the revolutionist while the second series between hero and BeSt promises to be entertaining as BeSt looks to upset a ZvP specialist. Will these matches go as predictable or will we see upsets all over the place? Check out the Group A is set to start in so let's get ready to rumble!!!!! The Ro8 is finally upon us and I'm personally ecstatic as hell! We've had a lot of great BW tournaments in the last couple of years such as SSL. However, the game quality and hype surrounding this tournament has been in a league of its own at times. The viewership peaked at 107k during the flash and movie game in group D just several days ago. Let that sink in for a moment and you'll see the significance.All of TBLS have made it safely into the Ro8 with most taking their respective groups in 2-0 fashion even when facing tough opponents such as hero, last and Movie. Now that we are at the Ro8, this is where the real fun begins. We have a pretty decent spread across the board: 2T, 2Z and 4P. The map pool will consist of five maps with players able to veto out one map each. Each series is a bo5 as well so we will be treated to at least three games at the minimum.The first series is between Sea and Bisu, no doubt an uphill battle for the terran against the revolutionist while the second series between hero and BeSt promises to be entertaining as BeSt looks to upset a ZvP specialist. Will these matches go as predictable or will we see upsets all over the place? Check out the Ro8 thread to see the remaining two groups. Then continue reading for a recap of the last two Ro16 groups by c3rberUs, a preview of the first two Ro8 groups by FlaShFTW and BLinD-RawR and then staff predictions.Group A is set to start inso let's get ready to rumble!!!!! Introduction RO16 Recaps by c3rberUs RO8 Preview by BLinD-RawR & FlaShFTW Recommended Games by Bigfan Staff Predictions Liquipedia Everything about ASL2 Group C: Last <Demian> Rush Bisu<Demian> hero Bisu <Benzene> Last hero <Benzene> Rush hero <Circuit Breaker> Last and advance to the Ro8! Bisu and hero advance to the Ro8! Last vs Rush: The first game of this very Korean Christmas was a TvT on Demian. It was a TvT that was as standard as you can get with not much in the way of thrills. Starting from a mirror factory-expansion opening, Last went for his preferred vulture game while Rush tried to set the tempo with a hidden starport play. The wraiths only did minor damage before being chased away by goliaths but they were crucial when it finally came time to set siege lines at the center of the map. Now, if the game was purely about making your opponent's tanks go kaboom, Rush would be way ahead. However, Last persevered through the early losses and patiently developed his pieces; splitting the map and capturing bases earlier. He eventually turned the tide and Rush was reduced to picking off stragglers before getting overrun with a final push from all sides. Hero vs Bisu: Again on Demian, Hero probably knew what will come from Bisu after playing each other online a lot. Hero made handfuls of zerglings to counter Bisu's drone-hungry zealot aggression. He dealt with it perfectly and then he used the same zerglings to run past a single cannon in Bisu's natural. They picked off probes and generally annoyed Bisu before they all got wiped out. Back in the Zerg base, it became apparent that Hero was assembling hydras for a surprise bust. The surprise play was almost revealed when zealots made a suicide-run to Hero's third but fortunately, Bisu did not notice the hydra rallied there. Against a single cannon and only a bunch of zealots, Hero moved in and it was almost surely the game right there as we've seen in so many PvZs. But instead of dying to the barrage of spines, Bisu's zealot speed kicked in and somehow pushed the hydras back, giving him enough time to reinforce his single cannon. Hero tried a second time but couldn't get close with a DT prowling the entrance. Then in a counterattack, zealots and storm decimated everything in Hero's third and Bisu got the win. Hydras vs speedlot Bisu vs Last: In the Winners' matchup, Bisu made his plan to do a proxy gate no secret with the gateway and pylon blocking about half of Last's natural entrance. Despite the close distance, Last managed to keep his base protected with a quick wall-in and proceeded with his FD push. The push was sent out to eliminate the obvious threat of the proxy gate. However, Bisu was able to turn his defense into offense after sniping Last's tank. Another battle in Last's natural ended horrendously wrong for Last but was saved by his cute factory-float into hidden mines. What was a fake double build was actually a real two-factory push and a questionable starport. Having lost too much already, Last poured everything into a final push but was left totally unprepared for DTs and typed out at the first sound of the warpblade slash. dt drop! Hero vs Rush: The Losers' matchup was one of those games on par with the famous 14CC versus 4-pool game. Rush went ahead and played the metagame surrounding Benzene. With it's long rush distance, it was a natural haven for greedy builds. Countering this, he proceeded to get two proxy barracks and surprise hero. hero on the other hand, went ahead and played probably the greediest zerg build - 3-hatchery before spawning pool. This meant he had nothing but drones to combat the oncoming marine onslaught. hero lost his natural quite quickly. The marines then went up to his main base where it should have been game. But bad decisions from Rush and the third hatchery's zerglings kept hero in the game. What was incredible is that hero's build allowed him to counter nicely; sacrificing a hatchery while still keeping two for pumping lings. When hero upgraded speed, Rush was be unable to run back to his base before the lings and that was GG. Hero vs Last: For the final match, we had an STX teamkill match on Circuit Breaker. It was a sloppy game on the micromanagement side but the play from both players was pretty solid; especially the victor, hero. In contrast to this season's 3-hatch lurker movement thanks to the Tyrant, hero went for the usual muta route. Despite muta-ling pretty much decimating Last's bio forces, I was convinced back then that it was futile with how broken the mech-switch is. But hero countered every punch and jab from Last. Then on four-bases, hero went and dropped lurkers on three places simultaneously. The effect was devastating on Last's economy, allowing hero to grab the initiative and grip it firmly. Another ground-based and drop attack into a cross map push with ultras sealed the win. Group D: Flash <Demian> Sea Movie <Demian> Rain Flash <Benzene> Movie Rain <Benzene> Sea Movie <Circuit Breaker> Sea and advance to the Ro8! Flash and Sea advance to the Ro8! FlaSh vs Sea: These two drinking buddies ran into each other again in the group stage like they did last season. Set on Demian, Sea wanted to get a quick win here over the rampaging juggernaut with a hidden 2-factory opening. Meanwhile, FlaSh did not go 14CC, instead he went with a less-greedy barracks expansion. Going starport after factory, it put Sea into a race against time to do at minimal, a crippling blow. Sea's push initially looked scary as he forced a lift of the natural CC and then camped at the bottom of the ramp. But that was as far as he went before FlaSh was able to clean up and reclaim with wraiths. After that, Sea tried some valkyrie-wraiths and finally a last ditch drop that was doomed from the start thanks to the mass of mech guarding only two bases. Powered by an earlier expansion, FlaSh simply outnumbered Sea which led to a rather quick game. You think you control the sky? cute. -Flash Movie vs Rain: On the same map, Movie opened with 3-gateway robotics versus Rain's core-expansion. You know it's really dangerous when you have nothing but probes to defend Movie's three dragoons at your natural. Home turf reinforcements allowed Rain to live but the rout allowed Movie to expand comfortably himself. After some macroing, Movie decided to push the initiative and attacked Rain with full force. They started to face off in the traditional shuttle-reaver dance. But what happened after was a game-ending miscue from Rain; a) engaging through his own chokepoint and b) not bringing his four anti-reaver-drop dragoons to his natural despite the fact that he obviously knew it was on the natural. FlaSh vs Movie: Movie wanted an early advantage using a gas steal elevator rush on FlaSh on Benzene. Although it was unknown to FlaSh up to the last second, it was very ineffective with the first drop consisting of one dragoon and two zeals were helplessly outmatched by a tank, marines, vultures and fat SCVs. The second drop with the reaver was even more fruitless, being repelled immediately by a goliath. FlaSh then expanded to get the mineral-only and made his push against Movie. Movie was playing catch up now and succeeded initially in breaking the push to defend his mineral-only third. But then, FlaSh's reinforcements that were on route to the scene made a detour and took to Movie's undefended bottom expansions. By the time Movie was ready to meet the main mech force, FlaSh's eco had already kicked in and no recall was going to save him. When the dust settled and the GG's thrown, FlaSh became the first Terran to enter this ASL's round of eight. Rain vs Sea: Rain was put into another test of young talent versus experience in the form of veteran Sea. In this elimination game, Sea was the first to press the action, sending out a force from four factories off of two bases. Rain meanwhile was teching to arbiters and only had dragoons to fend off the rolling mass of Terran metal - which he did rather handsomely. He then tried to catch up on base count and only tried an ineffective zealot drop. All of this played into Sea's plan as he remained undisturbed pumping units and upgrading on double armories. Of course, Rain was not going to remain passive for long and pulled a Nal_rA with hallucination recall (gif). While the recall was relatively solid, Sea cleaned it up, marched across the map and broke the protoss army. Rain simply wasn't able to keep up as Sea cut him off from reinforcing the top side expansions and got trounced from the league. Hallucination+Recall = Success! Sea vs Movie: The battle of the two Afreeca veterans. Things got off to a hot start with Movie proxying at the center of Circuit Breaker. Sea's sim city and the game's border offered really good protection so Movie wasn't able to do much damage. Going with the previous game's pattern, Sea went aggressive on two bases but this time, it was a five tank push. Sea then calmly set his lines on the center of the map where he butted heads with Movie's troops. The battle ended with Sea wiping out all but one dragoon and an arbiter and that meant that the way was open to Movie's fledgeling third. The protoss countered through the bottom passageway forcing a CC lift at the mineral-only. Sea was able to prevent further incursion and suffered a setback when his main assault force got destroyed at the front of Movie's natural. But he remained calm, bringing in more troops seemingly for attrition. Movie sensed this and when he finally scouted Sea's fourth, he knew it was only a matter of time and promptly typed out. The first game of this very Korean Christmas was a TvT on Demian. It was a TvT that was as standard as you can get with not much in the way of thrills. Starting from a mirror factory-expansion opening, Last went for his preferred vulture game while Rush tried to set the tempo with a hidden starport play. The wraiths only did minor damage before being chased away by goliaths but they were crucial when it finally came time to set siege lines at the center of the map. Now, if the game was purely about making your opponent's tanks go kaboom, Rush would be way ahead. However, Last persevered through the early losses and patiently developed his pieces; splitting the map and capturing bases earlier. He eventually turned the tide and Rush was reduced to picking off stragglers before getting overrun with a final push from all sides.Again on Demian, Hero probably knew what will come from Bisu after playing each other online a lot. Hero made handfuls of zerglings to counter Bisu's drone-hungry zealot aggression. He dealt with it perfectly and then he used the same zerglings to run past a single cannon in Bisu's natural. They picked off probes and generally annoyed Bisu before they all got wiped out. Back in the Zerg base, it became apparent that Hero was assembling hydras for a surprise bust. The surprise play was almost revealed when zealots made a suicide-run to Hero's third but fortunately, Bisu did not notice the hydra rallied there. Against a single cannon and only a bunch of zealots, Hero moved in and it was almost surely the game right there as we've seen in so many PvZs. But instead of dying to the barrage of spines, Bisu's zealot speed kicked in and somehow pushed the hydras back, giving him enough time to reinforce his single cannon. Hero tried a second time but couldn't get close with a DT prowling the entrance. Then in a counterattack, zealots and storm decimated everything in Hero's third and Bisu got the win.In the Winners' matchup, Bisu made his plan to do a proxy gate no secret with the gateway and pylon blocking about half of Last's natural entrance. Despite the close distance, Last managed to keep his base protected with a quick wall-in and proceeded with his FD push. The push was sent out to eliminate the obvious threat of the proxy gate. However, Bisu was able to turn his defense into offense after sniping Last's tank. Another battle in Last's natural ended horrendously wrong for Last but was saved by his cute factory-float into hidden mines. What was a fake double build was actually a real two-factory push and a questionable starport. Having lost too much already, Last poured everything into a final push but was left totally unprepared for DTs and typed out at the first sound of the warpblade slash.The Losers' matchup was one of those games on par with the famous 14CC versus 4-pool game. Rush went ahead and played the metagame surrounding Benzene. With it's long rush distance, it was a natural haven for greedy builds. Countering this, he proceeded to get two proxy barracks and surprise hero. hero on the other hand, went ahead and played probably the greediest zerg build - 3-hatchery before spawning pool. This meant he had nothing but drones to combat the oncoming marine onslaught. hero lost his natural quite quickly. The marines then went up to his main base where it should have been game. But bad decisions from Rush and the third hatchery's zerglings kept hero in the game. What was incredible is that hero's build allowed him to counter nicely; sacrificing a hatchery while still keeping two for pumping lings. When hero upgraded speed, Rush was be unable to run back to his base before the lings and that was GG.For the final match, we had an STX teamkill match on Circuit Breaker. It was a sloppy game on the micromanagement side but the play from both players was pretty solid; especially the victor, hero. In contrast to this season's 3-hatch lurker movement thanks to the Tyrant, hero went for the usual muta route. Despite muta-ling pretty much decimating Last's bio forces, I was convinced back then that it was futile with how broken the mech-switch is. But hero countered every punch and jab from Last. Then on four-bases, hero went and dropped lurkers on three places simultaneously. The effect was devastating on Last's economy, allowing hero to grab the initiative and grip it firmly. Another ground-based and drop attack into a cross map push with ultras sealed the win.These two drinking buddies ran into each other again in the group stage like they did last season. Set on Demian, Sea wanted to get a quick win here over the rampaging juggernaut with a hidden 2-factory opening. Meanwhile, FlaSh did not go 14CC, instead he went with a less-greedy barracks expansion. Going starport after factory, it put Sea into a race against time to do at minimal, a crippling blow. Sea's push initially looked scary as he forced a lift of the natural CC and then camped at the bottom of the ramp. But that was as far as he went before FlaSh was able to clean up and reclaim with wraiths. After that, Sea tried some valkyrie-wraiths and finally a last ditch drop that was doomed from the start thanks to the mass of mech guarding only two bases. Powered by an earlier expansion, FlaSh simply outnumbered Sea which led to a rather quick game.On the same map, Movie opened with 3-gateway robotics versus Rain's core-expansion. You know it's really dangerous when you have nothing but probes to defend Movie's three dragoons at your natural. Home turf reinforcements allowed Rain to live but the rout allowed Movie to expand comfortably himself. After some macroing, Movie decided to push the initiative and attacked Rain with full force. They started to face off in the traditional shuttle-reaver dance. But what happened after was a game-ending miscue from Rain; a) engaging through his own chokepoint and b) not bringing his four anti-reaver-drop dragoons to his natural despite the fact that he obviously knew it was on the natural.Movie wanted an early advantage using a gas steal elevator rush on FlaSh on Benzene. Although it was unknown to FlaSh up to the last second, it was very ineffective with the first drop consisting of one dragoon and two zeals were helplessly outmatched by a tank, marines, vultures and fat SCVs. The second drop with the reaver was even more fruitless, being repelled immediately by a goliath. FlaSh then expanded to get the mineral-only and made his push against Movie. Movie was playing catch up now and succeeded initially in breaking the push to defend his mineral-only third. But then, FlaSh's reinforcements that were on route to the scene made a detour and took to Movie's undefended bottom expansions. By the time Movie was ready to meet the main mech force, FlaSh's eco had already kicked in and no recall was going to save him. When the dust settled and the GG's thrown, FlaSh became the first Terran to enter this ASL's round of eight.Rain was put into another test of young talent versus experience in the form of veteran Sea. In this elimination game, Sea was the first to press the action, sending out a force from four factories off of two bases. Rain meanwhile was teching to arbiters and only had dragoons to fend off the rolling mass of Terran metal - which he did rather handsomely. He then tried to catch up on base count and only tried an ineffective zealot drop. All of this played into Sea's plan as he remained undisturbed pumping units and upgrading on double armories. Of course, Rain was not going to remain passive for long and pulled a Nal_rA with hallucination recall (gif). While the recall was relatively solid, Sea cleaned it up, marched across the map and broke the protoss army. Rain simply wasn't able to keep up as Sea cut him off from reinforcing the top side expansions and got trounced from the league.The battle of the two Afreeca veterans. Things got off to a hot start with Movie proxying at the center of Circuit Breaker. Sea's sim city and the game's border offered really good protection so Movie wasn't able to do much damage. Going with the previous game's pattern, Sea went aggressive on two bases but this time, it was a five tank push. Sea then calmly set his lines on the center of the map where he butted heads with Movie's troops. The battle ended with Sea wiping out all but one dragoon and an arbiter and that meant that the way was open to Movie's fledgeling third. The protoss countered through the bottom passageway forcing a CC lift at the mineral-only. Sea was able to prevent further incursion and suffered a setback when his main assault force got destroyed at the front of Movie's natural. But he remained calm, bringing in more troops seemingly for attrition. Movie sensed this and when he finally scouted Sea's fourth, he knew it was only a matter of time and promptly typed out. Group A: Bisu vs Sea See you later Head to Head: Bisu 7-1 Sea It has been 11 years since Bisu and Sea have begun their progamer careers, starting off in the same place too. Yet their careers went off in such different directions, but that's a story for another time. For now, their paths have crossed once more and maybe 2017 is the year Bisu actually wins a final or Offline Sea becomes something more than a meme. The good old days Bisu got faced with what could be his best possible opponent in Sea as he has always had the upper hand going against him going 7-1 in official tournament games and administering tons of beatdowns in sponmatches and ladder. This has pretty much inflated his already stellar PvT record in the Post-KeSPA Era. This is the first Bo5 that Bisu is playing against sea since the Terror TEMPTS Starleague all the way back in April of 2015 and compared to then, Bisu seems a lot more ready to take on Sea as their rivalry was a bit more back and forth at that time and now it's just Bisu all over him at 70%. When it comes to Sea, it's 2 things that are keeping him from going into the Ro4 right now. One is obviously the fact that he has to play Bisu, but Bisu aside, he has been playing some really good TvP so far in the tournament against Movie and Rain. Having been seeded into the Ro16 from his performance in ASL1 (Top 4) and bringing the A-game aggression that he has always been good at and pulling it off is just going to give him the mental boos that he needs to go up against Bisu. The other issue that keeps Sea from achieving is the offline match status. It has been touched before, by me even, but it is always going to be brought up with Sea. He needs to be able to get over his mental block in offline tournaments. This affected him even during the heyday of his rivalry with Killer when he could never make it far in an SSL. If 2017 is going to be his year, then it's this series that's really going to tell us all about it. Map breakdown: Set 1 & 5- Eye Of The Storm Set 1 & 5- Eye Of The Storm It's a big map. Expansions are pretty far apart with same ground mains and natural, with a lot of space in front of the natural to setup for either player and hit both bases at once. For Bisu, it would be early gateway aggression or quick shuttle/reaver into wall-hopping (it's not an elevator if it doesn't go up/down) into the main with his forces to hit Sea. Alternatively, Sea can go for a build similar to what he did against Rain on Benzene with 4 factories off 2 bases and get that early tank army onto Bisu's natural ramp area for a solid contain and push. Set 2- Demian Being a 3 player map, there are an uneven number of bases for two players to take and usually the third main and natural are the biggest points of contention. Since both players know this, their strategies are going to reflect it. Bisu can't play defensively and take those bases. He will have to come up with a mid game push to make sure he can secure them as Sea is also going to anticipate this. Sea will focus on a tighter defensive opening with some good turret rings to keep recalls while slowly expanding to keep the base count as equal as possible. It's likely going to be a drawn out game but the clock is going to be on Sea's side, not Bisu's. Set 3- Circuit Breaker Now, I could say like every other time that the strategy of Sea and Bisu is going to be Sea turtling off of 3 base/3 gas (4 base with 1 mineral only) and ready his 2/1 timing while Bisu takes the rest of the map and delays the push as much as he can. I think Sea is going to throw caution to the wind and proxy rax bunker push on cross spawn and most likely fail in marine control leading him to gg out of the game causing a collective sigh on every viewer. Set 4- Benzene I have no clue why this map is supposed to be bad for carriers. It's got some dead areas and some ridges. There's that high ground behind the natural that can be reached but what about the opposite side? Easy to park carriers there without much trouble of getting hit. That said, Bisu won't go carriers and will instead stick with a gateway arbiter army while Sea is probably going to go for a similar build as he has against Rain in the Ro16. Bisu <Eye of the Storm> Sea Bisu <Demian> Sea Bisu <Circuit Breaker> Sea Bisu <Benzene> Sea Bisu <Eye of the Storm> Sea advances 3-1! Group B: Hero vs Best The Hero We Need Head to Head: Hero 6-4 Best There’s no question that the lack of Zergs in this tournament, or at least notably that advanced to the Round of 16, has had the community’s eyes locked on our two Zerg players. Hero is one such player and he was selected to play against Best in the Round of 8. Will one of our remaining Zergs make it to the next stage? Or will he be stopped cold in his tracks by one of the Dragons? got matched with the worst possible player out of the 2nd place finishers in their groups. and were opponents which I felt Best could defeat. Instead, he got placed against his worst nightmare, a Zerg who knows how to ZvP. And yes, is by no means a weak ZvPer but let’s face it, he’s extremely rusty and hasn’t been playing up to form so Best beating Jaedong isn’t really a secret. A Bo5 series is really going to push Best to the brink against a guy who’s ZvP is his saving grace. However, Best has shown a decent ability to macro in his games against Jaedong as well as defend quite well against the hydra busts if hero decides to go for them. However, his high templar control really needs work as shown through his two games vs the dong. Luckily, it didn’t matter enough as he still won both those games, but against hero, he’ll need to be on top of his game. Mfw I have to play my worst matchup Best’s PvZ winrate in the KeSPA era was a measly 47.4% and while it has slightly improved in the post-KeSPA era to 53.9%, I’m not convinced Best is geared up to take this matchup on. Granted, maybe he’s not in too bad of shape considering he defeated Hero, 2-1, in the . Sure, he was 6-3 against but this is also Larva who we’re talking about. Like I’ve said, Best has to go against his worst nightmare matchup and it’s not going to look pretty. I think he can take a game off hero but that’s about it. We’ll see how his preparation is against hero’s aggressive hydra style and maybe, just maybe, he can turn this series around. is probably thanking the stars he didn’t have to play in the Round of 8, but getting put up against a Protoss definitely made his chances to get to the finals that much higher. The ZvP sniper is rejoicing from this selection (and also probably happy he can’t play due to the randomization rules). After a disappointing knockout in the Round of 8 last season to Sea, hero is looking to rebound in this ASL. Also to note, Hero is looking to rebound in the year overall, having not won a single tournament in 2016 compared to his big win in 2015 at the SSL11. His aggressive hydra builds will look to put Best at the edge of his seat and he’s also shown great usage of the early lings against the gate-first builds which Best has been favoring as of late. His game against Bisu on Demian showed he also has a killer instinct, getting a 5 ling run-by and dealing considerable economic damage to Bisu’s probe line. Best gave Jaedong many windows of opportunities for ling runbys but was never punished. Hero will definitely make use of those opportunities and try to keep Best off of his strong macro game. My Best impersonation after I’m through with him Hero back in the KeSPA era had a mediocre 53% ZvP winrate but since the post-KeSPA era started, he’s been rocking an amazing 70.6%. In his sponmatches in November, he was also at 70% with amazing records against top Protoss like 7-3 and 8-1. Just another testament to his ZvP ability, hero took Bisu to 5 games in the SSL9 and also dismantled him in the SSL11 3-0 so hero’s Bo5 ZvP is definitely one of the strongest in the scene. Hero will go into this Round of 8 with a lot of confidence and will closely review Best’s games with Jaedong to find all the weak points in the armor to abuse. The tipping point for hero will be his late game management against arguably one of the strongest macro players in the game. Hero may instead decide to keep the games low-economy to push Best off his comfort game and look for him to mix in lurker drops to harass Best’s economy. Map Breakdown: will be our repeat map and this map has shown a ton of promise for hero and his aggressive hydra style with the close mineral-only base playing to his advantage. Probably the best map for hero to play on. If it gets to a game 5, I expect him to be able to take that game too so some insurance for hero if he somehow drops 2 games. Best's best opportunity (hehe) will be to get this to a late game split map scenario and abuse the OP Protoss gimmicks like Reavers, Archons, and Storms to push him to a victory. Basically, see Rain vs EffOrt on CB for an example game. will definitely challenge hero’s creativity when it comes to his style since depending on spawn (vertical), the 3rd is going to be extremely far away for him and he’ll need to play a much more turtle 4 base style. Hero isn’t the worst at the style but I’m sure he’d much prefer to play his bread and butter 4 hatch hydra aggression. Still, in the KeSPA era, EotS had a 59% ZvP winrate so it can’t be that bad for hero. Best will most likely be forced to opt for some kind of 8 gate goon all-in on this map since it sucks for longer games as the expansions are pretty difficult to take. Otherwise, cheese and forward gates are his best option. is the one map I think hero will struggle on, which is ironic given the forward close mineral-only that probably caters to his style. However, the mineral-only base is super wide open and not good for simcities later on in the game. So he’ll probably have to opt for that far-away 3rd gas base and we’ve seen how Protoss have been able to abuse it with nice zealot timings followed by a fast 3rd of their own. Best will just look to see which 3rd hero takes and will punish him accordingly and the mineral-only will help him jump start his economy past the mid-game so we can see his macro game shine. is another solid map for hero, given that the 3rd is reasonably close and if the game extends, the low ground 4th is pretty close as well. Hero also showed he could go toe-to-toe against Bisu on this map though Bisu eventually overpowered him through brute strength. Still, I think this map is also favored for Zerg. Best did defeat Jaedong on this map so we'll see if he can utilize the same winning formula on hero. Strong map control with a deathball while expanding behind will be the name of the game but he must defend against hero's inevitable lurker drops on this map. hero <Circuit Breaker> Best hero <Eye of the Storm> Best hero <Benzene> Best hero <Demian> Best hero <Circuit Breaker> Best advances 3-1! It has been 11 years since Bisu and Sea have begun their progamer careers, starting off in the same place too. Yet their careers went off in such different directions, but that's a story for another time. For now, their paths have crossed once more and maybe 2017 is the year Bisu actually wins a final or Offline Sea becomes something more than a meme.Bisu got faced with what could be his best possible opponent in Sea as he has always had the upper hand going against him going 7-1 in official tournament games and administering tons of beatdowns in sponmatches and ladder. This has pretty much inflated his already stellar PvT record in the Post-KeSPA Era. This is the first Bo5 that Bisu is playing against sea since the Terror TEMPTS Starleague all the way back in April of 2015 and compared to then, Bisu seems a lot more ready to take on Sea as their rivalry was a bit more back and forth at that time and now it's just Bisu all over him at 70%.When it comes to Sea, it's 2 things that are keeping him from going into the Ro4 right now. One is obviously the fact that he has to play Bisu, but Bisu aside, he has been playing some really good TvP so far in the tournament against Movie and Rain. Having been seeded into the Ro16 from his performance in ASL1 (Top 4) and bringing the A-game aggression that he has always been good at and pulling it off is just going to give him the mental boos that he needs to go up against Bisu. The other issue that keeps Sea from achieving is the offline match status. It has been touched before, by me even, but it is always going to be brought up with Sea. He needs to be able to get over his mental block in offline tournaments. This affected him even during the heyday of his rivalry with Killer when he could never make it far in an SSL. If 2017 is going to be his year, then it's this series that's really going to tell us all about it.It's a big map. Expansions are pretty far apart with same ground mains and natural, with a lot of space in front of the natural to setup for either player and hit both bases at once. For Bisu, it would be early gateway aggression or quick shuttle/reaver into wall-hopping (it's not an elevator if it doesn't go up/down) into the main with his forces to hit Sea. Alternatively, Sea can go for a build similar to what he did against Rain on Benzene with 4 factories off 2 bases and get that early tank army onto Bisu's natural ramp area for a solid contain and push.Being a 3 player map, there are an uneven number of bases for two players to take and usually the third main and natural are the biggest points of contention. Since both players know this, their strategies are going to reflect it. Bisu can't play defensively and take those bases. He will have to come up with a mid game push to make sure he can secure them as Sea is also going to anticipate this. Sea will focus on a tighter defensive opening with some good turret rings to keep recalls while slowly expanding to keep the base count as equal as possible. It's likely going to be a drawn out game but the clock is going to be on Sea's side, not Bisu's.Now, I could say like every other time that the strategy of Sea and Bisu is going to be Sea turtling off of 3 base/3 gas (4 base with 1 mineral only) and ready his 2/1 timing while Bisu takes the rest of the map and delays the push as much as he can. I think Sea is going to throw caution to the wind and proxy rax bunker push on cross spawn and most likely fail in marine control leading him to gg out of the game causing a collective sigh on every viewer.I have no clue why this map is supposed to be bad for carriers. It's got some dead areas and some ridges. There's that high ground behind the natural that can be reached but what about the opposite side? Easy to park carriers there without much trouble of getting hit. That said, Bisu won't go carriers and will instead stick with a gateway arbiter army while Sea is probably going to go for a similar build as he has against Rain in the Ro16. Bisu advances 3-1!There’s no question that the lack of Zergs in this tournament, or at least notably that advanced to the Round of 16, has had the community’s eyes locked on our two Zerg players. Hero is one such player and he was selected to play against Best in the Round of 8. Will one of our remaining Zergs make it to the next stage? Or will he be stopped cold in his tracks by one of the Dragons? Best got matched with the worst possible player out of the 2nd place finishers in their groups. Sea and GuemChi were opponents which I felt Best could defeat. Instead, he got placed against his worst nightmare, a Zerg who knows how to ZvP. And yes, Jaedong is by no means a weak ZvPer but let’s face it, he’s extremely rusty and hasn’t been playing up to form so Best beating Jaedong isn’t really a secret. A Bo5 series is really going to push Best to the brink against a guy who’s ZvP is his saving grace. However, Best has shown a decent ability to macro in his games against Jaedong as well as defend quite well against the hydra busts if hero decides to go for them. However, his high templar control really needs work as shown through his two games vs the dong. Luckily, it didn’t matter enough as he still won both those games, but against hero, he’ll need to be on top of his game.Best’s PvZ winrate in the KeSPA era was a measly 47.4% and while it has slightly improved in the post-KeSPA era to 53.9%, I’m not convinced Best is geared up to take this matchup on. Granted, maybe he’s not in too bad of shape considering he defeated Hero, 2-1, in the Kim Carry Myeongin so there’s that. But in sponmatches in the month of November, Best has not impressed. He was 3-6 against his opponent, hero, and even scarier was that he was 0-10 against a slumping EffOrt . Sure, he was 6-3 against Larva but this is also Larva who we’re talking about. Like I’ve said, Best has to go against his worst nightmare matchup and it’s not going to look pretty. I think he can take a game off hero but that’s about it. We’ll see how his preparation is against hero’s aggressive hydra style and maybe, just maybe, he can turn this series around. hero is probably thanking the stars he didn’t have to play Flash in the Round of 8, but getting put up against a Protoss definitely made his chances to get to the finals that much higher. The ZvP sniper is rejoicing from this selection (and also probably happy he can’t play Bisu due to the randomization rules). After a disappointing knockout in the Round of 8 last season to Sea, hero is looking to rebound in this ASL. Also to note, Hero is looking to rebound in the year overall, having not won a single tournament in 2016 compared to his big win in 2015 at the SSL11. His aggressive hydra builds will look to put Best at the edge of his seat and he’s also shown great usage of the early lings against the gate-first builds which Best has been favoring as of late. His game against Bisu on Demian showed he also has a killer instinct, getting a 5 ling run-by and dealing considerable economic damage to Bisu’s probe line. Best gave Jaedong many windows of opportunities for ling runbys but was never punished. Hero will definitely make use of those opportunities and try to keep Best off of his strong macro game.Hero back in the KeSPA era had a mediocre 53% ZvP winrate but since the post-KeSPA era started, he’s been rocking an amazing 70.6%. In his sponmatches in November, he was also at 70% with amazing records against top Protoss like Bisu 7-3 and Movie 8-1. Just another testament to his ZvP ability, hero took Bisu to 5 games in the SSL9 and also dismantled him in the SSL11 3-0 so hero’s Bo5 ZvP is definitely one of the strongest in the scene. Hero will go into this Round of 8 with a lot of confidence and will closely review Best’s games with Jaedong to find all the weak points in the armor to abuse. The tipping point for hero will be his late game management against arguably one of the strongest macro players in the game. Hero may instead decide to keep the games low-economy to push Best off his comfort game and look for him to mix in lurker drops to harass Best’s economy. Circuit Breaker will be our repeat map and this map has shown a ton of promise for hero and his aggressive hydra style with the close mineral-only base playing to his advantage. Probably the best map for hero to play on. If it gets to a game 5, I expect him to be able to take that game too so some insurance for hero if he somehow drops 2 games. Best's best opportunity (hehe) will be to get this to a late game split map scenario and abuse the OP Protoss gimmicks like Reavers, Archons, and Storms to push him to a victory. Basically, see Rain vs EffOrt on CB for an example game. Eye of the Storm will definitely challenge hero’s creativity when it comes to his style since depending on spawn (vertical), the 3rd is going to be extremely far away for him and he’ll need to play a much more turtle 4 base style. Hero isn’t the worst at the style but I’m sure he’d much prefer to play his bread and butter 4 hatch hydra aggression. Still, in the KeSPA era, EotS had a 59% ZvP winrate so it can’t be that bad for hero. Best will most likely be forced to opt for some kind of 8 gate goon all-in on this map since it sucks for longer games as the expansions are pretty difficult to take. Otherwise, cheese and forward gates are his best option. Benzene is the one map I think hero will struggle on, which is ironic given the forward close mineral-only that probably caters to his style. However, the mineral-only base is super wide open and not good for simcities later on in the game. So he’ll probably have to opt for that far-away 3rd gas base and we’ve seen how Protoss have been able to abuse it with nice zealot timings followed by a fast 3rd of their own. Best will just look to see which 3rd hero takes and will punish him accordingly and the mineral-only will help him jump start his economy past the mid-game so we can see his macro game shine. Demian is another solid map for hero, given that the 3rd is reasonably close and if the game extends, the low ground 4th is pretty close as well. Hero also showed he could go toe-to-toe against Bisu on this map though Bisu eventually overpowered him through brute strength. Still, I think this map is also favored for Zerg. Best did defeat Jaedong on this map so we'll see if he can utilize the same winning formula on hero. Strong map control with a deathball while expanding behind will be the name of the game but he must defend against hero's inevitable lurker drops on this map. hero advances 3-1! Bisu vs hero Demian Flash vs Sea Demian Jaedong vs Sharp Benzene Shuttle vs Light Benzene hero vs Rush Benzene There are some games that just wow us whether its due to one player being a macro monster, a micro master, a great decision-maker or for a bunch of other reasons. This was a game that emanated that feeling. Anyone who remembers past Bisu games will recognize what I mean. After losing many probes to a zergling runby into his main, Bisu claws his way back into the match with some stellar micro and decision making, up against the worst unit imaginable for him, hydras and one of his worst opponents, hero. There are some games that just wow us whether its due to one player being a macro monster, a micro master, a great decision-maker or for a bunch of other reasons. This was a game that emanated that feeling. Anyone who remembers past Bisu games will recognize what I mean. After losing many probes to a zergling runby into his main, Bisu claws his way back into the match with some stellar micro and decision making, up against the worst unit imaginable for him, hydras and one of his worst opponents, hero. Patience is a virtue. Flash faced one of the strongest oppositions from close comrade Sea. Up against a strong two fac attack from Sea that caused him to lift his expansion, Flash made some great choices to deflect what could've been a game ending push for any other low level terran player. Decision making at its finest from one of the best terran players in the history of the game! Patience is a virtue. Flash faced one of the strongest oppositions from close comrade Sea. Up against a strong two fac attack from Sea that caused him to lift his expansion, Flash made some great choices to deflect what could've been a game ending push for any other low level terran player. Decision making at its finest from one of the best terran players in the history of the game! As the saying goes, context is important. You won't find any amazing macro or micro in this game. However, this game is the epitome of Mind games. On the brink of elimination and having to play two ZvTs in a row, Jaedong chose to go for pool first, instead of the typical hatch opening that most Zergs would go for in an elimination situation. Jaedong's winning record is pretty low when it comes to pool first builds on his stream. His opponent, Sharp, chose to go for a proxy rax, anticipating the hatch opening and was caught completely offguard. The most amazing part is that Jaedong also decided to go for the same build the next game against Mind who also opened with a forward rax and lost the game. Jaedong can't possibly repeat the same build twice, right? As the saying goes, context is important. You won't find any amazing macro or micro in this game. However, this game is the epitome of Mind games. On the brink of elimination and having to play two ZvTs in a row, Jaedong chose to go for pool first, instead of the typical hatch opening that most Zergs would go for in an elimination situation. Jaedong's winning record is pretty low when it comes to pool first builds on his stream. His opponent, Sharp, chose to go for a proxy rax, anticipating the hatch opening and was caught completely offguard. The most amazing part is that Jaedong also decided to go for the same build the next game against Mind who also opened with a forward rax and lost the game. Jaedong can't possibly repeat the same build twice, right? We have come to expect perfection from our BW heroes, however, there are times where sloppiness can be tolerated. Even more so when the winner is none other than Light, a player who can't win TvP to save his life. His opponent? The ASL season 1 champion, Shuttle, who has been nothing short of solid in the PvT matchup. The result of their engagement was a sloppy but fun to watch back and forth scuffle as both players tried their best to starve the other and stave off elimination. This is one game that will keep you on the edge of your seat! We have come to expect perfection from our BW heroes, however, there are times where sloppiness can be tolerated. Even more so when the winner is none other than Light, a player who can't win TvP to save his life. His opponent? The ASL season 1 champion, Shuttle, who has been nothing short of solid in the PvT matchup. The result of their engagement was a sloppy but fun to watch back and forth scuffle as both players tried their best to starve the other and stave off elimination. This is one game that will keep you on the edge of your seat! https://www.youtube.com/watch?v=jWbi-GHatw8&t=1h33m17s Another entertaining and bizzare game played by none other than hero and Rush this time. Realizing that BBS is a reasonable option on Benzene, Rush decided to proxy near the 6 o'clock position. What does hero do? Scouts around for a proxy, misses it by mere pixels and goes for 3 hatch before pool. It looked like hero's elimination from ASL was basically confirmed as his natural went down without much of a fight, however, it seemed that several factors played into his favour. Is 3 hatch before pool really the counter to BBS? Another entertaining and bizzare game played by none other than hero and Rush this time. Realizing that BBS is a reasonable option on Benzene, Rush decided to proxy near the 6 o'clock position. What does hero do? Scouts around for a proxy, misses it by mere pixels and goes for 3 hatch before pool. It looked like hero's elimination from ASL was basically confirmed as his natural went down without much of a fight, however, it seemed that several factors played into his favour. Is 3 hatch before pool really the counter to BBS? Bisu vs Sea BisuDagger (Bisu 3-2) BigFan (Bisu 3-1) 2Pacalypse- (Bisu 3-1) FlaShFTW (Bisu 3-2) c3rberUs (Bisu 3-1) v1 (Bisu 3-1) BLinD-RawR (Bisu 3-2) mustaju (Bisu 3-0) Best vs hero BisuDagger (Best 3-1) BigFan (hero 3-0) 2Pacalypse- (hero 3-1) FlaShFTW (hero 3-1) c3rberUs (hero 3-1) v1 (hero 3-2) BLinD-RawR (hero 3-1) mustaju (hero 3-2) Writers: c3rberUs, BLinD-RawR, FlaShFTW, BigFan Graphics: v1 Editors: BigFan Photo Credits: Liquipedia, DailyEsports, Afreeca and Sinbal Farm BW Editor-In-Chief "Watch Bakemonogatari or I will kill you." -Toad, April 18th, 2017
/* $NetBSD: t_nanosleep.c,v 1.3 2013/03/31 16:47:16 christos Exp $ */ /*- * Copyright (c) 2011 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by <NAME>. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <sys/cdefs.h> __RCSID("$NetBSD: t_nanosleep.c,v 1.3 2013/03/31 16:47:16 christos Exp $"); #include <sys/time.h> #include <sys/wait.h> #include <atf-c.h> #include <errno.h> #include <time.h> #include <signal.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sysexits.h> #include <unistd.h> static void handler(int signo) { /* Nothing. */ } ATF_TC(nanosleep_basic); ATF_TC_HEAD(nanosleep_basic, tc) { atf_tc_set_md_var(tc, "descr", "Test that nanosleep(2) works"); } ATF_TC_BODY(nanosleep_basic, tc) { static const size_t maxiter = 10; struct timespec ts1, ts2, tsn; size_t i; for (i = 1; i < maxiter; i++) { tsn.tv_sec = 0; tsn.tv_nsec = i; (void)memset(&ts1, 0, sizeof(struct timespec)); (void)memset(&ts2, 0, sizeof(struct timespec)); ATF_REQUIRE(clock_gettime(CLOCK_MONOTONIC, &ts1) == 0); ATF_REQUIRE(nanosleep(&tsn, NULL) == 0); ATF_REQUIRE(clock_gettime(CLOCK_MONOTONIC, &ts2) == 0); /* * Verify that we slept at least one nanosecond. */ if (timespeccmp(&ts2, &ts1, <=) != 0) { (void)fprintf(stderr, "sleep time:: sec %llu, nsec %lu\n\t\t" "ts1: sec %llu, nsec %lu\n\t\t" "ts2: sec %llu, nsec %lu\n", (unsigned long long)tsn.tv_sec, tsn.tv_nsec, (unsigned long long)ts1.tv_sec, ts1.tv_nsec, (unsigned long long)ts2.tv_sec, ts2.tv_nsec); atf_tc_fail_nonfatal("inaccuracies in sleep time " "(resolution = %lu nsec)", tsn.tv_nsec); } } } ATF_TC(nanosleep_err); ATF_TC_HEAD(nanosleep_err, tc) { atf_tc_set_md_var(tc, "descr", "Test errors from nanosleep(2) (PR bin/14558)"); } ATF_TC_BODY(nanosleep_err, tc) { struct timespec ts; ts.tv_sec = 1; ts.tv_nsec = -1; errno = 0; ATF_REQUIRE_ERRNO(EINVAL, nanosleep(&ts, NULL) == -1); ts.tv_sec = 1; ts.tv_nsec = 1000000000; errno = 0; ATF_REQUIRE_ERRNO(EINVAL, nanosleep(&ts, NULL) == -1); ts.tv_sec = -1; ts.tv_nsec = 0; errno = 0; ATF_REQUIRE_ERRNO(0, nanosleep(&ts, NULL) == 0); errno = 0; ATF_REQUIRE_ERRNO(EFAULT, nanosleep((void *)-1, NULL) == -1); } ATF_TC(nanosleep_sig); ATF_TC_HEAD(nanosleep_sig, tc) { atf_tc_set_md_var(tc, "descr", "Test signal for nanosleep(2)"); } ATF_TC_BODY(nanosleep_sig, tc) { struct timespec tsn, tsr; pid_t pid; int sta; /* * Test that a signal interrupts nanosleep(2). * * (In which case the return value should be -1 and the * second parameter should contain the unslept time.) */ pid = fork(); ATF_REQUIRE(pid >= 0); ATF_REQUIRE(signal(SIGINT, handler) == 0); if (pid == 0) { tsn.tv_sec = 10; tsn.tv_nsec = 0; tsr.tv_sec = 0; tsr.tv_nsec = 0; errno = 0; if (nanosleep(&tsn, &tsr) != -1) _exit(EXIT_FAILURE); if (errno != EINTR) _exit(EXIT_FAILURE); if (tsr.tv_sec == 0 && tsr.tv_nsec == 0) _exit(EXIT_FAILURE); _exit(EXIT_SUCCESS); } (void)sleep(1); (void)kill(pid, SIGINT); (void)wait(&sta); if (WIFEXITED(sta) == 0 || WEXITSTATUS(sta) != EXIT_SUCCESS) atf_tc_fail("signal did not interrupt nanosleep(2)"); } ATF_TP_ADD_TCS(tp) { ATF_TP_ADD_TC(tp, nanosleep_basic); ATF_TP_ADD_TC(tp, nanosleep_err); ATF_TP_ADD_TC(tp, nanosleep_sig); return atf_no_error(); }
def _get_fetch_all_method(cls): return cls.fetch_all_api_method or f"{cls._get_name_plural()}.list"
Inside of every critic there is a regretful fan struggling against the permanence of publicly stated professional opinions. For all the differences that supposedly exist between critics and “regular people,” the only one that really matters is that critics are required by paycheck to take a snapshot of their feelings about a particular piece of art at a particular moment in time, and then pretend that this is how they will feel forever. In reality, there’s always that one review (only one if you’re lucky) you’d write differently with the benefit of hindsight. Personally speaking, I’m grateful that most of the publications I wrote for before The A.V. Club are either defunct or have bad search engines. I have opinions from my past that I’d cross the street to avoid being associated with. One review I’m fine standing behind is my take on Radiohead’s The King Of Limbs, which was turned in three days after the record was released on Friday, Feb. 18, and posted on The A.V. Club just eight days after the album’s existence was announced to the world via the band’s website. Even with the quick turnaround, my Kings Of Limbs review lagged behind the discussion that raged online in social and professional media seemingly from the very moment download codes starting appearing in the inboxes of Radiohead fans. Coverage of The King Of Limbs played out like breaking news—by the end of the day Friday, after tens of thousands of people had already given their yays or nays on the record on Facebook and Twitter, reviews started appearing in major publications like Esquire and NME. Regardless of whether the evaluations were positive or negative, it was incredible to me that so many people had already formulated opinions they felt comfortable putting out there for public consumption. Had they really given enough time to The King Of Limbs to truly “get” the album? Advertisement As both a music fan and a critic, I’m naturally of two minds on this question. As a fan, I have gut reactions like anybody else, and I’m just as liable to shoot my mouth off on an artist I might have only given a moment’s notice. The proliferation of social media didn’t invent this kind of casual acceptance or dismissal—it merely allowed people to broadcast it to whoever might be paying attention. So, while it might annoy me as a Radiohead fan when somebody “mehs” The King Of Limbs after only playing it once, I recognize that I do this all the time with artists plenty of other people take seriously. Likewise, I’ve enthused about an album on Twitter after only hearing a couple of songs, only to demur a week or so later after several more listens. Your opinions as a music fan tend to be instinctual and emotional—in contrast to the self-conscious, intellectual aesthetics of the critic—and you’re under no obligations to justify them beyond your own whims. Besides, there’s a lot of music out there; it can seem like a chore to spend extra time with something that seems unappealing at first contact when there are so many other choices. But one of the many great things about being a music fan is that you have an open invitation to revisit any artist whenever you feel like it; somebody that didn’t strike your fancy today might end up being a new obsession a year from now. Gut reactions only become a problem when people convince themselves that a cursory listen renders any further investigation moot. This is especially fatal for an album like The King Of Limbs, a purposefully difficult listen that takes time to ingratiate itself. The rewards are considerable for patient listeners, but there are more obstacles than ever preventing listeners from engaging with the sorts of “grower” records that Radiohead has banked its career on. This includes (I fear) the echo-chamber of social media, a forum better suited for glibness than thoughtfulness, where directing a tossed-off zinger at a popular institution like Radiohead is considered fresher and funnier than singing the same old praises. Advertisement Don’t worry—I’m not about to launch into another tired screed about “the death of the album” or the short attention spans of the iTunes generation. If anybody has an audience that’s still willing to put in the time to make sense of a curveball like The King Of Limbs, it’s the band that actually put out The King Of Limbs. As New York magazine pop music critic Nitsuh Abebe wrote of Radiohead last week: No other band makes so many fans turn quite so studiously patient and open-minded. It’s as if the world has agreed that this is the one flagship group everyone will turn to for that experience—the band people will enjoy taking seriously, approaching slowly, and pondering as art rather than entertainment. The whole concept of “serious listening” has somehow become this one act’s brand. How improbable is that? What if “serious listening” is supposed to be part of your job description? How can you do your job as a critic without coming late to the party on records like The King Of Limbs or Kanye West’s My Beautiful Dark Twisted Fantasy, another blue-chip album that was made available to critics and the public simultaneously? In the rush to stay relevant in the discussion about “important” records, do critics sacrifice some of the perspective that they’re supposed to provide for readers? Advertisement Those were some of the questions I posed to one of America’s best-known music critics, Greg Kot of the Chicago Tribune and the public radio show Sound Opinions, in a recent e-mail. Kot’s two-and-a-half-star review of The King Of Limbs appeared on the Tribune’s website Saturday night, and though I thought it was thorough and well thought-out (while disagreeing with the final verdict), I was curious about whether Kot had any misgivings about commenting on the record so quickly. Turns out he did, and not just for the Radiohead album. “I always wish I had more time to listen to a piece of music before reviewing it,” he wrote. “As one editor once told me, it’s like a jazz solo, no overdubs. Sometimes I want the overdub.” For Kot, “A good piece of criticism should both educate and illuminate, in addition to entertain, and that’s something that can’t be done in a day, let alone a few hours. It’s exciting to see people weighing in on the Radiohead album immediately after hearing it for the first time, but that’s not really criticism—it’s more like a first impression.” Like Kot, I come from a newspaper background, so I’m accustomed to the tight deadlines that require banging out a concert review within minutes of the final encore. At least with album reviews, writers typically have weeks or even months to ruminate before writing anything. (Promo copies for Baltimore indie-rock duo Wye Oak’s forthcoming Civilian went out in December, which I predict will help that very fine record do well with critics.) But the tricky thing about music writing—and part of what makes it the trickiest form of arts writing, in my opinion—is that a good piece of music should elicit varying responses over spans of time and in all sorts of environments. Unlike books, movies, or TV shows, songs are supposed to be experienced many, many times. It’s unlikely you’ve read any book outside of a small handful of favorites more than once, and you probably haven’t read those favorites dozens of times. It’s possible you’ve given your favorite movie 100 viewings, but that still pales in comparison to how often you’re exposed to a hit pop song or a deathless golden oldie on the radio. Music by nature is a slow burn, parsing out its charms in small increments over the course of weeks, years, even decades. Music can be the focus of your attention, but it often fades into the background, only to re-emerge when you least expect it and reveal a whole other dimension. No other art form weaves its way into the fabric of your life like music, and this inevitably shapes our feelings about it. Advertisement Along with time, context and environment play big parts in how we appreciate music, and as a critic you have to take that into account. An album might be best experienced on headphones, or it might demand to be played at 11 on stereo speakers. Discussing music critically means understanding that music can be good in different ways. It’s like food—you might love Thai cuisine, but that doesn’t make Polish cuisine bad because it doesn’t have the same flavors. It’s up to you to figure out how the artist is attempting to be “good” in order to measure whether the music succeeds by that standard, and that takes time and consideration. I think about this a lot when writing the monthly This Was Pop column with my colleague Genevieve Koski. When regarding something like Far East Movement’s “Like A G6,” one of the biggest dance-pop hits of 2010, it’s foolhardy to dissect the lyrics or gauge how “innovative” the music is. This is a song that’s intended to be blasted in bars and dance clubs for large groups of drunken singles, and in that context, “Like A G6” works does nearly as well as The King Of Limbs does for stationary, contemplative Radiohead fans. Kot was so articulate on this point I’ll just quote him at length: For me, writing about a piece of recorded music is about listening, thinking, and listening some more, over a span of days in different contexts: over PC or laptop speakers, in the car driving around, on headphones, over a good stereo, on the kitchen boombox or iPod while washing dishes. As the context changes, you get new information, new angles into the music, and you eventually get a perspective on the album’s language. It’s a question of decoding that language for yourself, and then informing the reader about your findings. The job isn’t to have a definitive, be-all-end-all take on what a particular piece of music sounds like or what it means, but about offering an informed perspective on it. There is no one “right” opinion. But every great piece of music needs a great listener to “get it” and the better a piece of music is, the more perspectives it invites. You hope to be one of those “great” listeners, and raise the level of discussion about art/music/culture among your readers. I feel like I’m learning how to do that job—being a better listener—every time I try to distill thoughts, feelings, impressions of something as abstract as music into something as concrete as a review. Advertisement I ended up listening to The King Of Limbs about a dozen times on Friday and Saturday before I started writing my review on Sunday. That Friday morning I listened to it three times, including two listens in my car on a drive from Milwaukee to Chicago. The first listen was on my work computer at home at 8 a.m. It left me cold; The King Of Limbs played at medium volume on Mac speakers sounds like an Ethernet cable making sweet love to a modem. My opinion of the record improved considerably when I burned the files on a blank CD and listened to them in my car. Not only is my car (or any car, really) my favorite place to hear music (because you’re surrounded by speakers and you can play it louder than any place else), but taking in The King Of Limbs while hurtling forward at rapid speeds started to shift my perspective on the record. By the time I got to Chicago about an hour and a half later, I decided that I really liked The Kings Of Limbs, and subsequent listens would be about investigating why, exactly. In my review, I likened hearing The King Of Limbs to the sensations of “fumbling into motion” and “being in a car crash.” Would I have written that if I hadn’t been driving when I formulated my opinion of the record? Hard to say, but that’s what The King Of Limbs evoked for me during the time that I was writing about the record. Ask me how I feel about it in six months, and I might write an entirely different review.
Brachioradial pruritus in a patient with metastatic breast cancer to her cervical spine BRP: brachioradial pruritus MRI: magnetic resonance imaging UV: ultraviolet INTRODUCTION Brachioradial pruritus (BRP) is a localized neuropathic condition of aberrant sensation that occurs on the dorsolateral upper extremities. BRP most commonly occurs in women, particular those who are fair skinned, with a mean age of 59 years at diagnosis. The diagnosis is clinical, with patients reporting symptoms in C5-C6 dermatomes of the dorsolateral arms, which sometimes radiate to the shoulders, neck, or upper trunk. Classically, patients report improvement of the pruritus with application of ice packs and return of symptoms after removal. The pathogenesis of BRP is unknown, although the condition has been linked to cervical spinal disease and ultraviolet (UV) radiation. Here we present a 46-year-old woman with UVinduced pruritus of her bilateral shoulders, anterior upper arms, and dorsal forearms. BRP was diagnosed subsequently in the setting of breast cancer metastases of the cervical spine, a novel association to our knowledge. INTRODUCTION Brachioradial pruritus (BRP) is a localized neuropathic condition of aberrant sensation that occurs on the dorsolateral upper extremities. BRP most commonly occurs in women, particular those who are fair skinned, with a mean age of 59 years at diagnosis. 1 The diagnosis is clinical, with patients reporting symptoms in C5-C6 dermatomes of the dorsolateral arms, which sometimes radiate to the shoulders, neck, or upper trunk. 2 Classically, patients report improvement of the pruritus with application of ice packs and return of symptoms after removal. 3 The pathogenesis of BRP is unknown, although the condition has been linked to cervical spinal disease and ultraviolet (UV) radiation. Here we present a 46-year-old woman with UVinduced pruritus of her bilateral shoulders, anterior upper arms, and dorsal forearms. BRP was diagnosed subsequently in the setting of breast cancer metastases of the cervical spine, a novel association to our knowledge. CASE PRESENTATION A 46-year-old woman with a medical history of metastatic breast cancer presented to our clinic for slowly worsening pruritus of her bilateral shoulders, anterior upper arms, and dorsal forearms present for 1 year. Sunlight worsened her symptoms and application of ice relieved them, although they always returned upon removal of the ice. The patient's medications included fulvestrant, pertuzumab, trastuzumab, anastrozole, denosumab, and venlafaxine. Basic laboratory evaluation including liver and kidney function was unremarkable. Physical examination found excoriations on her bilateral upper extremities without appreciable primary lesions. There was mild lichenification of the bilateral arms concentrated at skin folds. She had breast cancer diagnosed 9 years prior at stage T2N1M0, for which she was treated with a combination of chemotherapy, surgery, and radiation. She remained in remission for the next 8 years, but unfortunately at about the time that her pruritus began, she was found to have widely metastatic recurrent breast cancer to the brain, liver, and axial skeletal system. A cervical spine magnetic resonance imaging (MRI) at the time of presentation showed diffuse osseous metastatic disease throughout the vertebral bodies and posterior elements (Fig 1). Alignment and cervical cord were intact with no overt prevertebral soft tissue swelling. In light of this history, BRP was diagnosed likely secondary to her cervical spine metastases. As she was already being treated with a selective serotonin and norepinephrine reuptake inhibitor, additional treatment recommendations included strict photoprotection and low-dose gabapentin. She initially reported drowsiness with this medication, although slow titration to 500 mg daily divided into 3 afternoon doses eventually improved both her pruritus and drowsiness. Although referral to the neurology department for electromyography was considered, this was deemed unnecessary given the patient's classic presentation, MRI findings, and improvement with gabapentin. Unfortunately, her DISCUSSION The etiology of BRP is not well understood, although the prevailing theory is that symptoms result from the combination of 2 factors: (1) abnormalities of the cervical spine may compress the spinal cord or cervical nerves as they exit the spinal column and (2) increased UV exposure further damages peripheral sensory nerves. The interplay of these factors is thought to result in the inappropriate interpretation of normal stimuli as pruritus in the distribution of the affected nerves. 2 Although most BRP is seen in the setting of cervical spine degenerative changes, a few cases have been reported secondary to primary spinal cord tumors. To our knowledge, this is the first case of BRP reported in the setting of metastatic malignancy to the cervical spine. Neural pathways for pruritus involve the nonmedullated, mechanoinsensitive C fibers, and finely medullated polymodal C fibers. These peripheral nerve fibers ascend through the dorsal horn of the spinal cord, synapsing with second-order neurons and crossing the midline until they eventually course through the spinothalamic tract to the thalamus. Thus, lesions that occur anywhere along these tracts are capable of causing neuropathic itch. Localized cutaneous pruritus may be related to damage of the peripheral nerve, nerve root, spinal cord, or central nervous system. 5 Our patient's dermatomal distribution of symptoms correlated well with the locations of her spinal metastases, supporting this theoretical mechanism. Imaging with computed tomography or MRI of patients with BRP typically finds abnormalities such as degenerative joint disease, cervical nerve impingement, osteoarthritis, foraminal stenosis, or other evidence of nerve compression. 2 In this patient, although cervical spine MRI showed no macroscopic evidence of spinal cord compression by the tumor islands, it is possible that subtle compression or nerve irritation could have resulted from inflammation and edema surrounding the metastases. Metastatic cancer often induces inflammation that may cause fluid buildup, leading to pressure that can compress nervous tissue. Furthermore, almost all patients with BRP related to degenerative spinal change also do not meet radiologic diagnostic criteria for cervical radiculopathy. 2 This finding suggests that our current imaging resolution or visual diagnostic capabilities may not be sensitive enough to identify subtle changes that result in BRP. Although this could explain the compressive aspect of BRP development in our patient, the role of UV radiation has yet to be addressed. The impact of UV radiation on BRP symptoms has been repeatedly described, yet only theoretically explained. The most widely accepted theory is that excessive UV radiation may damage and reduce the C fibers that transmit itch leading to an increased pruritic response, a process known as alloknesis. 2 This epidemiologically explains why most BRP patients are fair-skinned individuals who live in sunny climates and participate in outdoor activities. Moreover, the pruritus of BRP frequently worsens during the summer months, when temperatures are warmer. In our patient, sunlight worsened her symptoms and application of ice relieved them, indicating temperature and UV sensitivity of her condition. Therefore, we hypothesize that our patient's BRP resulted from her metastatic cervical spinal disease acting as the predisposing factor, and exposure to UV radiation acting as the inciting factor. CONCLUSION Our case suggests that metastatic cancer to the cervical spine may cause compression of cervical nerves and predisposes these patients to BRP. This case also shows that future studies are needed to explore the complex neuropathic signaling involved in this condition and expand our current knowledge of associated risk factors beyond degenerative spinal disease.
// Copyright 2018 by <NAME> // // https://github.com/martinmoene/kalman-estimator // // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef DSP_FILTER_HPP_INCLUDED #define DSP_FILTER_HPP_INCLUDED #include "dsp/biquad-design.hpp" namespace dsp { /// Low-pass filter template< typename T > class Lpf { public: using value_type = T; using BiQuad = dsp::BiQuadT<T>; // dbGain not available for lpf, hpf: const value_type dbGain = 0; Lpf( value_type f3db, value_type fs, value_type Q ) : bq( make_biquad( dsp::biquad_design< dsp::FilterResponse::low_pass >( dbGain, f3db, fs, Q ) ) ) {} auto operator()( value_type sample ) { return bq.step( sample ); } private: BiQuad bq; }; /// High-pass filter template< typename T > class Hpf { public: using value_type = T; using BiQuad = dsp::BiQuadT<T>; // dbGain not available for lpf, hpf: const value_type dbGain = 0; Hpf( value_type f3db, value_type fs, value_type Q ) : bq( make_biquad( dsp::biquad_design< dsp::FilterResponse::high_pass >( dbGain, f3db, fs, Q ) ) ) {} auto operator()( value_type sample ) { return bq.step( sample ); } private: BiQuad bq; }; } // namespace dsp #endif // DSP_FILTER_HPP_INCLUDED
<gh_stars>0 export interface SavePlayerResponse { id: string; error: string; }
A Charge Domain P-8T SRAM Compute-In-Memory with Low-Cost DAC/ADC Operation for 4-bit Input Processing This paper presents a low cost PMOS-based 8T (P-8T) SRAM Compute-In-Memory (CIM) architecture that efficiently per-forms the multiply-accumulate (MAC) operations between 4-bit input activations and 8-bit weights. First, bit-line (BL) charge-sharing technique is employed to design the low-cost and reliable digital-to-analog conversion of 4-bit input activations in the pro-posed SRAM CIM, where the charge domain analog computing provides variation tolerant and linear MAC outputs. The 16 local arrays are also effectively exploited to implement the analog mul-tiplication unit (AMU) that simultaneously produces 16 multipli-cation results between 4-bit input activations and 1-bit weights. For the hardware cost reduction of analog-to-digital converter (ADC) without sacrificing DNN accuracy, hardware aware sys-tem simulations are performed to decide the ADC bit-resolutions and the number of activated rows in the proposed CIM macro. In addition, for the ADC operation, the AMU-based reference col-umns are utilized for generating ADC reference voltages, with which low-cost 4-bit coarse-fine flash ADC has been designed. The 256X80 P-8T SRAM CIM macro implementation using 28nm CMOS process shows that the proposed CIM shows the accuracies of 91.46% and 66.67% with CIFAR-10 and CIFAR-100 dataset, respectively, with the energy efficiency of 50.07-TOPS/W. INTRODUCTION Recently, Compute-In-Memory (CIM) techniques have become one of the most promising solutions to get over the memory wall problems when designing deep neural network (DNN) accelerators. The main idea of CIM is to perform a part of or all the computations of DNN inside memories to reduce the data trans-fer between memories and processors. In the static random-access memory (SRAM) based CIMs - , input activations are applied on word-lines (WLs) while the weight values are stored in SRAM bit-cells. Then, MAC operations are performed inside SRAM and the results are represented as analog voltages on bitlines (BLs). In the typical SRAM CIM, since WLs and weight storage nodes usually represent 1-bit data, the computations between binarized inputs and weights (such as BNN) are commonly performed - , where the DNN accuracy degradation is one of the main design bottlenecks. To improve DNN accuracies, SRAM CIMs need to process multi-bit input-activations and weight precisions. Several previous works have proposed multi-bit SRAM CIM by utilizing high resolution DAC/ADCs - . However, the multi-bit SRAM CIMs - incur significant hardware and energy overhead since they typically need expensive digital-to-analog converter (DAC) for input activations and analog-to-digital converter (ADC) to read the analog partial MAC results. In addition, when designing SRAM CIM using multi-bit precisions, the energy efficiency and inference accuracies are mainly decided by the multiple design variables that are the bit-precision of input activations, the number of activated rows in the CIM macros, the partial sum quantization scheme, ADC bit-resolution, etc. So, for the energy efficient design of CIMs with good enough DNN accuracies, the comprehensive analysis of the multiple design variables is highly required. In this paper, we propose the PMOS-based 8T (P-8T) SRAM Compute-In-Memory (CIM) architecture that can process 4-bit input activations and 8-bit weights with very low hardware cost. In the proposed SRAM CIM, the analog multiplication unit (AMU) that consists of the 16 local arrays, utilizes BL chargesharing technique for low-cost digital-to-analog conversion. Using the charge domain in-SRAM operations, the AMU enables cost effective digital-to-analog conversion without employing conventional DAC circuits. The charge domain operations in AMU also improves variation tolerance, thus generating linear MAC outputs. In addition, to improve energy efficiencies without sacrificing DNN accuracy, hardware (design parameters, analog MAC variation, ADC offset) considered system simulations are per-formed to decide the number of active rows and ADC bitresolutions in the proposed CIM. The 4-bit coarse-fine flash ADC with in-SRAM reference voltage generation is also proposed to reduce the hardware cost of analog-to-digital conversion in the proposed CIM. The hardware implementation results of the P-8T SRAM CIM using 28nm CMOS process show reliable MAC operations between 4-bit input activations and 8-bit weights with very low energy and hardware cost. The rest of this paper is organized as follows. Section II introduces the preliminary of multi-bit CIM. The details of the proposed P-8T SRAM CIM architecture are described in Section III. Section IV presents the hardware aware system analysis. Experimental results are shown in Section V. Finally, Section V concludes this work. Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from [email protected]. 1.(a) shows the typical multi-bit SRAM CIM and its design considerations to improve DNN accuracies as well as energy efficiency. As shown in the figure, multi-bit input activations are first converted into multiple WL analog levels (WL voltages, pulse width) using DACs. In the SRAM array, the multi-bit weights are bit-sliced by 1-bit data and stored in the bitcells. In order to simultaneously perform the MAC operations of different channels, the bit-sliced weight data having the same bit position is shared on the same BL. When the analog input values are applied on WLs, the MAC operations between multi-bit inputs and 1-bit weights (pMAC: partial MAC) are performed, and the results are expressed as BL analog voltage. Then, the analog voltages are converted to the digital values using ADCs. Finally, the partial MAC values are processed by the shift-adders to complete the MAC operations. ISLPED When designing the SRAM CIM that processes the high bitprecisions of inputs and weights, there exists the trade-offs between the design variables that are the bit-precision of input activations, the number of the activated rows in CIM, the partial sum quantization scheme, the ADC bit-resolution, and the hardware cost of analog circuits. As shown in the right-upper figure of Fig. 1(b), when the precision of the input activations is getting higher, the accuracy should be improved, but the area and energy cost of DAC increase as well. For example, as shown in the figure, in the case of 8-bit weight, the accuracy is similar to that of full-precision when using larger than 4-bit inputs. On the other hands, as presented in the lower figure of Fig. 1(b), the throughput of SRAM CIM is mainly decided by the number of activated rows, while the ADC cost becomes larger with increasing number of activated rows due to increasing sensing levels. In terms of the ADC bit resolution, ADC with small bit resolution saves the energy consumption of CIM at the cost of accuracy degradation. Therefore, to improve the energy efficiency and the throughput of SRAM CIM without accuracy degradation, the hardware cost of DAC and ADC should be carefully designed considering the design variables of CIM. In the proposed SRAM CIM design, the BL charge sharing in-SRAM based digital-to-analog conversion is proposed for the low cost implementation of 4-bit DAC. In addition, with the hardware considered system simulations, the partial MAC outputs of 241 levels can be read using 4-bit ADC with negligible accuracy degradation, thus leading to dramatic ADC hardware cost reduction. Fig. 2(b). The proposed P-8T SRAM bit-cell includes a conventional 6T SRAM for read/write operation and two PMOS transistors (P0 and P1) for multiplication. In the normal SRAM mode, it operates the same as the conventional 6T SRAM array. The 4-bit DAC and its MAC Operation using BL Charge-Sharing First, each CBL is connected with an iBL, and both BLs (all the CBL's and IBL's in Fig. 3 (a)) are initially precharged to VDD. Then, the input data (X of Fig. 3(a)) is applied to the NMOS transistor of the local peripheral circuit (N0 of Fig. 2(b)) to perform the digital-to-analog conversion of 4-bit input. Depending on the inputs X , each NMOS transistor in local peripheral circuits (N0~N15) shown in Fig. 3 Fig. 2 (b). In other words, the local peripheral circuit type B disconnects between two different iBLs. For example, as shown in Fig. 2(a), the LA (between iBL and iBL ) has a type B peripheral circuit, which is same as LA , LA , and LA , while the rests have a type A peripheral circuit. After the charged or discharged capacitances are decided depending on the input activation value, when the eDAC signal is 'High', the switch in the type B peripheral circuit connects different iBLs. Then, as shown in Fig. 3(b), the charge sharing is performed between all CBL capacitances, and every iBLs and CBLs in the AMU have the same voltage level representing the analog level of 4-bit input data. If the 4-bit input data is '1000(2)', the CBL voltage level is decided as half-VDD. Using BL charge sharing in the proposed AMU, the analog voltage levels of 4-bit input can be expressed as: In the proposed AMU, the multiplication between the 4-bit input activation and 1-bit weight is performed through P-8T bitcell, and the result is represented as the CBL analog voltage. The detailed process of the multiplication is illustrated in Fig. 3(c). When the multiplication process starts, eMULTb signal becomes 'Low' to disconnect iBL with CBL. Then, the CWL signal becomes 'Low' to activate P1 of Fig. 3(c). When the weight value is '1', the previous CBL voltage (4-bit input value) is preserved since the P0 transistor is turned off. When the weight value is '0', the P0 transistor is turned on, and CBL capacitance is filled with VDD representing zero multiplication result. So, the multiplication result between the 4-bit input activation and 1-bit weight is represented by 16 different analog voltages of the CBL. As shown in Fig. 2 (b), the proposed AMU simultaneously performs 16 multiplications. The operating waveform and the truth table of the overall multiplication operation is summarized in Fig. 4. As shown in the figure, the multiplication using the AMU can be divided into the precharge phase (Pch.), digital-to-analog conversion phase (DA conversion), and multiplication phase (Mult.). Based on BL charge-sharing operation, the AMU performs the digital-to-analog conversion of a 4-bit input data without employing the conventional DAC circuits. Fig. 5 presents the overall architecture of the 256x80 P-8T SRAM CIM macro, the circuit model, and the equation of the accumulation operation. As shown in Fig. 5 (a), the proposed SRAM array that consists of multiple AMUs can be divided into AMU arrays for analog computation (indicated in bold) and AMUREF arrays for ADC reference generation (indicated in dot). The ADC reference generation using AMUREF arrays will be discussed in section III. B. In the proposed SRAM CIM macro, 16 different local arrays are connected to an accumulation bit-line (ABL), so the column-wise summation is performed to complete the accumulation operation. For example, LA0,0, LA1,0, … , LA15,0 are connected to ABL0, as shown in Fig. 5 (a). During the CIM operation, 16 4-bit input data of different channels in a layer are applied to the AMU. After each local array in AMU completes the multiplications that are stored in CBL capacitances (CCBL's) as depicted in Fig. 3(a), all the CBL charges in the same ABL are shared using the eACC signal. As a result, the summation of 16 4-bit multiplication results is represented as the ABL analog voltage. In the proposed SRAM CIM macro, since the partial MAC (pMAC) output has 241 levels, the 8-bit resolution ADC is needed for accurate analog readout. However, considering hardware considered system simulations, 4-bit flash ADC instead of the 8-bit ADC is adopted with ignorable accuracy loss in our design, which will be addressed in section IV. Finally, after the ADC operation and add-shift operation, the proposed SRAM CIM macro produces 8 partial sum (128 MAC operation) results between 4-bit inputs and 8-bit weights. Fig. 5 (b), when the eACC signal connects CBL and ABL, all the charges in each CBL capacitance are summed onto ABL to generate the pMAC results. Based on the BL charge sharing operation, the total charge is equal to the charge-sum of 16 multiplication results stored in each CBL capacitance (CCBL) and the charge of the CABL that is precharged by VDD. The total capacitance is equal to the sum of 16 CBL capacitances and the ABL capacitance (CABL). Thus, VABL, which represents the partial MAC value, can be expressed as the equation shown in left-bottom figure of Fig. 5 (b). In order to evaluate the circuit operations of the proposed SRAM CIM macro, the comparison between 2 different voltage curves of pMAC value is shown in the right figure of Fig. 5 (b). The voltage curves are obtained from the ideal equation (in dot line) and the actual circuit simulations using 10K-samples Monte-Carlo simulation (in bold line), respectively. As shown in the figure, the voltage curve from Monte-Carlo simulations shows almost same results compared with the ideal equation curve. By utilizing BL charge sharing for accumulation together with digital-to-analog conversion and multiplication, the proposed SRAM CIM macro provide variation tolerant and linear pMAC outputs. Bit-Line Charge-Sharing based ADC operation In the proposed SRAM CIM macro, for low cost implementation of analog-to-digital conversion, we propose the 4-bit coarsefine flash ADC based on in-SRAM reference voltage generation scheme. For the 4-bit ADC operation, 16 AMUREF arrays presented in Fig. 4(a) are utilized to generate 16 different ADC reference voltages. While 64 AMU arrays process the MAC operations, AMUREF arrays also perform almost the same operations using a different input patterns using the NMOS transistors (N0~N15 depicted in Fig. 3(a)) to generate the ADC reference voltages. Fig. 6(a) illustrates the operation principle of AMUREF arrays for ADC reference voltage generation. As shown in the figure, during digital-to-analog conversion phase, the input pattern of '1000(2)' is applied to all the AMUREF arrays, and the half-VDD state is formed in all RBLs (RBL ~ RBL ). In the multiplication phase, the stored data pattern of the local arrays in the same RBL determines whether the CBL voltage is 'half-VDD' or 'VDD'. After BL charge sharing is performed, the ADC reference voltage is generated on the CBL depending on the stored data pattern. Based on charge sharing operation, the reference voltage can be expressed as equation: = ( 1 2 + (16 − ) ) × 16 . As shown in the table in Phase 3, since the number of storing '1' data differs by each ABLREF , 16 different voltage levels (VDD, 31 /32VDD, … 17 /32VDD) are formed and used for 4-bit ADC operation. Fig. 6(b) also shows the proposed 4-bit ADC structure that performs 1-bit coarse and 3-bit fine analog readout. Although the partial MAC (pMAC) output of the proposed CIM macro has 241 levels, most of the pMAC results are places within the range from 0 to 128 (129 levels), as shown in the left-upper figure of Fig. 6(b). Here, the pMAC distributions are obtained from 2nd layer of ResNet-20 with CIFAR-10 dataset when using the proposed SRAM CIM. To reduce the ADC bit resolution, the pMAC values that are larger than the pre-decided threshold (rarely observed) are quantized as the threshold value. The accuracy results with different threshold values are discussed in section IV. For the ADC operation, first of all, the 1-bit coarse analog readout is performed by comparing the ABL voltage having a pMAC value with ABLREF having 48 /64 VDD to decide the MSB of pMAC. Then, based on the MSB data, we can determine whether the next 3-bit flash ADC sampling applies to the lower pMAC range (from 0 to 63) or the upper pMAC range (from 64 to 127). When the MSB is '0', the next 3-bit flash ADC sampling occurs on the lower pMAC range (from 0 to 63) using ABLREF ~ ABLREF . Otherwise, it is on the upper pMAC range (from 64 to 127) using ABLREF ~ ABLREF . In the proposed 4-bit ADC, only 8 comparators are needed to complete the operation. HARDWARE AWARE SYSTEM ANALY-SIS As mentioned in Section II, the energy efficiency and inference accuracies of SRAM CIM are mainly affected by ADC bit-resolution and its hardware cost. In the proposed SRAM CIM, the partial sum quantization method and the ADC bit-resolution are investigated based on the hardware considered system simulations that take into account the number of activated rows in CIM macro and the hardware errors (PVT error and comparator offset). Here, the hardware errors are obtained from the 10K Monte-Carlo simulation shown in Fig. 5(b). Fig. 7 presents the results of the hardware considered PyTorch simulations using ResNet-20 with CIFAR-10 dataset. As shown in the upper-left figure of Fig. 7(a), since most of the pMAC values have small amplitude, as mentioned in Section III.B, the pMAC values that are larger than the pre-decided threshold, are quantized to the threshold value. Here, we define = 1 − ℎ ℎ /2 , where the q means the ADC resolution for precise pMAC readout. For example, if the cutoff value is 0.375 in the case of 4 activated rows (Th. = 24), pMAC values larger than 24 are clipped and represented as 23, as shown in left figure of Fig. 7(a). To find the proper cutoff value, the design variables such as the number of activated rows and hardware errors (PVT error and comparator offset) in CIM, are also considered when analyzing accuracies. In the center figure of 7(a), the accuracy analysis is performed only considering the number of activated rows and varying cutoff values. When the cutoff value is 0.625, the accuracy decreases smaller than 1% regardless of the number of the activated rows. However, when hardware errors are considered (the right figure of Fig. 7 (a)), at the cutoff value of 0.50, the accuracy degrades around 1%. So, in our design, the partial sum quantization with the cutoff value of 0.50 is used. Fig. 7(b) shows the energy efficiency (left figure) and the accuracy (center and right figures) results for different ADC bitresolutions and the different number of activated rows at the cutoff value of 0.50. As shown in the figures, without considering the hardware errors (the center figure of Fig. 7(b)), regardless of the number of the activated rows, as ADC bit resolution becomes larger, the accuracies approach to the baseline (full precision) while degrading the energy efficiencies. However, when the hardware errors are considered, even if the ADC resolutions increases, accuracy drop is observed with increasing number of activated rows. In addition, as shown in the right figure of Fig. 7(b), when the number of the activated rows is 4, the accuracy using 4-bit ADC is similar to the accuracies obtained using 5-bit or 6bit ADCs. It is because the higher bit resolution of ADC makes the sensing margin smaller, resulting it more susceptibility to hardware errors. So, in the case of the number of the activated rows being 8 and 16, the best accuracy is observed when 4-bit ADC is used. Based on the hardware considered system simulations, the proposed SRAM CIM uses the cutoff value of 0.5 with 4-bit ADC resolution. EXPERIMENTAL RESULTS The 256×80 P-8T SRAM CIM macro has been implemented using 28nm CMOS process. Fig. 8 shows the layout and the summary of the implementation. The proposed SRAM CIM utilizes the charge domain operations for the 4-bit input digital-to-analog conversions and for the MAC operations between the inputs and 8-bit weights. The inference accuracies of the proposed SRAM CIM using CIFAR-10 and CIFAR-100 data with ResNet-20, are shown in the Table I. In the case of activating 8 rows, the accuracies of CIFAR-10 and CIFAR-100 are 91.46% and 66.67%, respectively. When activating 16 rows for high throughput, the accuracies of CIFAR-10 and CIFAR-100 are 90.47% and 64.26%, respectively. Fig. 9(a) presents the reliability analysis of the digital-to-analog conversion using AMU when performing 10K-samples Monte-Carlo simulations. Thanks to the BL charge-sharing operation, the proposed digital-to-analog conversion is seamlessly performed at different supply voltages. The worst standard deviation of 1.8mV is observed when the DAC code is 8 and 0.6V, leading to negligible accuracy degradation. Fig. 9(b) also shows the comparison of the energy consumption between different 4bit flash ADCs. The proposed ADC effectively reduces the energy consumption of the reference voltage generation using in-SRAM reference generation. It also reduces the number of comparators by performing the 1-bit coarse sampling and 3-bit fine sampling. Compared with the conventional 4-bit flash ADC using R-ladder, the proposed coarse-fine (C-F) ADC saves a 43.9% of the normalized energy consumption. The energy efficiencies and the operating frequencies of the proposed SRAM CIM when running with different supply voltages are presented in in Fig. 10(a). In the case of 16 activated rows, the energy efficiencies of 9.77-TOPS/W and 50.07-TOPS/W are achieved with 1.2V and 0.6V supply voltages, respectively. The operating frequency ranges from 76.9MHz at 0.6V and 435MHz at 1.2V. As shown in Fig. 10(b), AMU with 256×80 array that performs digital-to-analog conversions and MAC operations, consumes only 11.4% of the total energy. The ADC delay accounts for only 31.8% of the total delay. The comparisons to prior state-of-the-art multi-bit SRAM CIMs , are also shown in Table II CONCLUSIONS In this paper, we present PMOS based 8T SRAM CIM architecture that efficiently performs the multiply-accumulate (MAC) operations between 4-bit input activations and 8-bit weights. In the proposed CIM macro, based on the BL charge-sharing operation, the analog multiplication unit (AMU) performs the MAC operations, and it also generates the ADC reference voltages. The BL charge sharing based digital-to-analog conversion can represent 4-bit inputs as BL analog voltages, alleviating the DAC implementation cost. In addition, the 4-bit coarse-fine flash ADC using in-SRAM ADC reference voltages can convert partial MAC outputs of 241 levels into 4-bit digital outputs, which reduce the ADC cost while maintaining inference accuracies. Hardware-considered system simulations is also employed to decide the CIM design parameters that leads to highly energy efficient CIM design without sacrificing DNN accuracies.
<reponame>inawrath/walmart-frontend import React from "react" import { render } from "@testing-library/react" import Layout from "../layout" test("Layout with text", () => { const { getByText } = render(<Layout>Testing</Layout>) expect(getByText("Testing")).toContainHTML("<main>Testing</main>") })
//View Holder Class public class BusListAdapterViewHolder extends RecyclerView.ViewHolder implements View.OnClickListener { public final TextView mBusListTextView; public final TextView mBusDirTextView; CircularProgressButton follow_cbp; public BusListAdapterViewHolder(View view) { super(view); mBusListTextView = (TextView) view.findViewById(R.id.list_item_sticky_header_social_text); mBusDirTextView = (TextView) view.findViewById(R.id.list_item_sticky_header_social_pdate); //Set onclick listener to the elements you want to make clickable view.setOnClickListener(this); } @Override public void onClick(View v) { //Opening the Profile of the user by passing user's id /*int adapterPosition = getAdapterPosition(); String BusListid = mBusListData[adapterPosition][0]; String BusListName=mBusListData[adapterPosition][1]; mClickHandler.onClick(BusListid, BusListName);*/ } }
/** * close tensors to release memory used by them (to be used when try catch statement can't be used) */ private static void closeTensors(final Collection<Tensor<?>> ts) { for (final Tensor<?> t : ts) { try { t.close(); } catch (final Exception e) { System.err.println("Error closing Tensor."); e.printStackTrace(); } } ts.clear(); }
<filename>JZ_Offer/ms_60/m50_test.go package ms_60 import ( "github.com/stretchr/testify/assert" "testing" ) // 面试题50. 第一个只出现一次的字符 func firstUniqChar(s string) byte { temp := make(map[byte]int) for i := range s { temp[s[i]]++ } // map 无序,所以又遍历一遍字符串 for i := range s { if temp[s[i]] == 1 { return s[i] } } return ' ' } func TestFirstUniqChar(t *testing.T) { assertions := assert.New(t) assertions.Equal(firstUniqChar(""), byte(' ')) assertions.Equal(firstUniqChar("abaccdeffb"), byte('d')) assertions.Equal(firstUniqChar("abaccdeff"), byte('b')) }
def checklinkfunc(link): key = Settings().SECRETS['GOOGLE_SAFE_BROWSING_API_KEY'] checker = SafeBrowsing(f"{key}") regex = r"{regex}".format(regex=Settings().SECRETS['VALID_LINK_REGEX']) url = findall(regex, link) try: link = url[0][0] response = checker.lookup_urls([link]) if response[link]["malicious"] == False: return "{link} **is safe!**".format(link=link) elif response[link]["malicious"] == True: return "{link} **is malicious!!!**".format(link=link) else: return "Something's wrong" except: message = "There was no link in your command\n" message += "Example command: ``checklink <pastethelinkhere>``" return message
/** * @brief Token credential. */ class TokenCredential { public: virtual AccessToken GetToken(Context const& context, std::vector<std::string> const& scopes) const = 0; virtual ~TokenCredential() = default; protected: TokenCredential() {} private: TokenCredential(TokenCredential const&) = delete; void operator=(TokenCredential const&) = delete; }
<reponame>friendlyanon/jluna // // This file contains the code of the section on manual implementation of usertypes in docs/manual.md // #include <jluna.hpp> /// cpp-side implementation of frogs and tadpoles class Frog { public: struct Tadpole { std::string _name; Tadpole() : _name("") {} Frog evolve() const { if (_name == "") throw std::invalid_argument("tadpole needs to be named before evolving"); return Frog(_name); } }; public: std::vector<Frog::Tadpole> spawn(size_t n) const { std::vector<Frog::Tadpole> out; for (size_t i = 0; i < n; ++i) out.push_back(Frog::Tadpole()); return out; } std::string get_name() { return _name; } private: Frog(std::string name) : _name(name) {} std::string _name; }; /// julia-side implementation of frogs and tadpoles const char* frogs_dot_jl = R"( mutable struct Tadpole _name::String evolve::Function Tadpole() = new( "", (this::Tadpole) -> Frog(this) ) end struct Frog _name::String spawn::Function Frog(as_tadpole::Tadpole) = new( as_tadpole._name, (n::Integer) -> [Tadpole() for _ in 1:n] ) end function generate_frog(name::String) ::Frog tadpole = Tadpole() tadpole._name = name return Frog(tadpole) end )"; // box cpp Frog::Tadpole to julia Tadpole template<Is<Frog::Tadpole> T> Any* box(T in) { auto sentinel = GCSentinel(); static auto* tadpole_ctor = jl_find_function("Main", "Tadpole"); auto* out = jluna::safe_call(tadpole_ctor); static auto* setfield = jl_find_function("Base", "setfield!"); static auto field_symbol = Symbol("_name"); jluna::safe_call(setfield, out, (Any*) field_symbol, box<std::string>(in._name)); return out; } // box cpp Frog to julia Frog template<Is<Frog> T> Any* box(T in) { auto sentinel = GCSentinel(); static auto* frog_ctor = jl_find_function("Main", "generate_frog"); auto* out = jluna::safe_call(frog_ctor, box<std::string>(in._name)); return out; } // unbox julia Tadpole to cpp Frog::Tadpole template<Is<Frog::Tadpole> T> T unbox(Any* in) { auto sentinel = GCSentinel(); static auto* getfield = jl_find_function("Base", "getfield"); static auto field_symbol = Symbol("_name"); Any* julia_side_name = jluna::safe_call(getfield, in, (Any*) field_symbol); auto out = Frog::Tadpole(); out._name = unbox<std::string>(julia_side_name); return out; } // unbox julia Frog to cpp Frog template<Is<Frog> T> T unbox(Any* in) { auto sentinel = GCSentinel(); static auto* getfield = jl_find_function("Base", "getfield"); static auto field_symbol = Symbol("_name"); Any* julia_side_name = jluna::safe_call(getfield, in, (Any*) field_symbol); auto tadpole = Frog::Tadpole(); tadpole._name = unbox<std::string>(julia_side_name); return tadpole.evolve(); } int main() { State::initialize(); State::safe_eval(frogs_dot_jl); auto cpp_tadpole = Frog::Tadpole(); cpp_tadpole._name = "Ted"; State::new_named_undef("jl_tadpole") = box<Frog::Tadpole>(cpp_tadpole); State::safe_eval(R"( println(jl_tadpole) jl_frog = jl_tadpole.evolve(jl_tadpole); println(jl_frog) )"); Frog cpp_frog = Main["jl_frog"]; std::cout << cpp_frog.get_name(); return 0; }
/** * create a time point with a clock or a string */ TEST(datetime, time_point_with_clock_string){ ptime pt{second_clock::universal_time()}; cout << pt.date() << endl; cout<< pt.time_of_day() << endl; pt = from_iso_string("20220103T213440"); cout << pt.date() << endl; cout << pt.time_of_day() << endl; pt = second_clock::local_time(); cout << pt.date() << endl; cout << pt.time_of_day() << endl; }
package org.motechproject.openmrs.ws; import java.net.URI; import java.net.URISyntaxException; import org.motechproject.MotechException; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Value; import org.springframework.stereotype.Component; import org.springframework.web.util.UriTemplate; /** * Represents a single OpenMRS Web Application instance */ @Component public class OpenMrsInstance { private static final String OPENMRS_WEB_SERVICE_PATH = "/ws/rest/v1"; private final String openmrsUrl; private final String motechPatientIdentifierTypeName; private final ApiVersion apiVersion; public static enum ApiVersion { API_1_8, API_1_9; } @Autowired public OpenMrsInstance(@Value("${openmrs.url}") String openmrsUrl, @Value("${openmrs.motechIdName}") String motechPatientIdentifierTypeName, @Value("${openmrs.apiVersion}") String apiVersionUsed) { this.openmrsUrl = openmrsUrl + OPENMRS_WEB_SERVICE_PATH; this.motechPatientIdentifierTypeName = motechPatientIdentifierTypeName; if ("1.8".equals(apiVersionUsed)) { apiVersion = ApiVersion.API_1_8; } else if ("1.9".equals(apiVersionUsed)) { apiVersion = ApiVersion.API_1_9; } else { throw new MotechException("OpenMRS API Version must be set to either: 1.8 or 1.9"); } } public String getOpenmrsUrl() { return openmrsUrl; } public String getMotechPatientIdentifierTypeName() { return motechPatientIdentifierTypeName; } public ApiVersion getApiVersion() { return apiVersion; } public URI toInstancePath(String path) { try { return new URI(openmrsUrl + path); } catch (URISyntaxException e) { throw new MotechException("Bad URI"); } } public URI toInstancePathWithParams(String path, Object... params) { return new UriTemplate(openmrsUrl + path).expand(params); } }
/** * Inform about a JSP execution where the execution time is known * * @param dTime */ public static void addJSPMeasurement(final double dTime) { synchronized (oStatsLock) { if (iRequests + jspMeasurements == 0) { dMinTime = dMaxTime = dTime; } else { dMinTime = Math.min(dMinTime, dTime); dMaxTime = Math.max(dMaxTime, dTime); } jspMeasurements++; totalJSPExecutionTime += dTime; } }
/* * Copyright (c) 2019 - 2021 Geode-solutions * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <geode/basic/assert.h> #include <geode/basic/logger.h> #include <geode/basic/range.h> #include <geode/basic/uuid.h> #include <geode/geometry/point.h> #include <geode/mesh/builder/edged_curve_builder.h> #include <geode/mesh/builder/point_set_builder.h> #include <geode/mesh/builder/surface_mesh_builder.h> #include <geode/mesh/builder/triangulated_surface_builder.h> #include <geode/mesh/core/geode_edged_curve.h> #include <geode/mesh/core/geode_point_set.h> #include <geode/mesh/core/geode_polygonal_surface.h> #include <geode/mesh/core/geode_polyhedral_solid.h> #include <geode/mesh/core/geode_triangulated_surface.h> #include <geode/mesh/core/point_set.h> #include <geode/model/mixin/core/block.h> #include <geode/model/mixin/core/corner.h> #include <geode/model/mixin/core/detail/count_relationships.h> #include <geode/model/mixin/core/line.h> #include <geode/model/mixin/core/model_boundary.h> #include <geode/model/mixin/core/surface.h> #include <geode/model/representation/builder/brep_builder.h> #include <geode/model/representation/builder/detail/copy.h> #include <geode/model/representation/core/brep.h> #include <geode/model/representation/io/brep_input.h> #include <geode/model/representation/io/brep_output.h> #include <geode/tests/common.h> std::array< geode::uuid, 6 > add_corners( const geode::BRep& model, geode::BRepBuilder& builder ) { std::array< geode::uuid, 6 > uuids; for( const auto c : geode::Range{ 6 } ) { uuids[c] = builder.add_corner(); builder.set_corner_name( uuids[c], absl::StrCat( "corner", c + 1 ) ); } const auto& temp_corner = model.corner( builder.add_corner( geode::OpenGeodePointSet3D::impl_name_static() ) ); builder.remove_corner( temp_corner ); const auto message = absl::StrCat( "[Test] BRep should have ", 6, " corners" ); OPENGEODE_EXCEPTION( model.nb_corners() == 6, message ); OPENGEODE_EXCEPTION( geode::detail::count_relationships( model.corners() ) == 6, message ); OPENGEODE_EXCEPTION( model.corner( uuids[3] ).name() == "corner4", "[Test] Wrong Corner name" ); return uuids; } std::array< geode::uuid, 9 > add_lines( const geode::BRep& model, geode::BRepBuilder& builder ) { std::array< geode::uuid, 9 > uuids; for( const auto l : geode::Range{ 9 } ) { uuids[l] = builder.add_line(); builder.set_line_name( uuids[l], absl::StrCat( "line", l + 1 ) ); } const auto& temp_line = model.line( builder.add_line( geode::OpenGeodeEdgedCurve3D::impl_name_static() ) ); builder.remove_line( temp_line ); const auto message = absl::StrCat( "[Test] BRep should have ", 9, " lines" ); OPENGEODE_EXCEPTION( model.nb_lines() == 9, message ); OPENGEODE_EXCEPTION( geode::detail::count_relationships( model.lines() ) == 9, message ); OPENGEODE_EXCEPTION( model.line( uuids[3] ).name() == "line4", "[Test] Wrong Line name" ); return uuids; } std::array< geode::uuid, 5 > add_surfaces( const geode::BRep& model, geode::BRepBuilder& builder ) { std::array< geode::uuid, 5 > uuids; for( const auto s : geode::Range{ 2 } ) { uuids[s] = builder.add_surface( geode::OpenGeodeTriangulatedSurface3D::impl_name_static() ); builder.set_surface_name( uuids[s], absl::StrCat( "surface", s + 1 ) ); } for( const auto s : geode::Range{ 2, 5 } ) { uuids[s] = builder.add_surface( geode::OpenGeodePolygonalSurface3D::impl_name_static() ); } const auto& temp_surface = model.surface( builder.add_surface() ); builder.remove_surface( temp_surface ); const auto message = absl::StrCat( "[Test] BRep should have ", 5, " surfaces" ); OPENGEODE_EXCEPTION( model.nb_surfaces() == 5, message ); OPENGEODE_EXCEPTION( geode::detail::count_relationships( model.surfaces() ) == 5, message ); OPENGEODE_EXCEPTION( model.surface( uuids[1] ).name() == "surface2", "[Test] Wrong Surface name" ); return uuids; } std::array< geode::uuid, 1 > add_blocks( const geode::BRep& model, geode::BRepBuilder& builder ) { std::array< geode::uuid, 1 > uuids; for( const auto b : geode::Range{ 1 } ) { uuids[b] = builder.add_block(); builder.set_block_name( uuids[b], absl::StrCat( "block", b + 1 ) ); } const auto& temp_block = model.block( builder.add_block( geode::OpenGeodePolyhedralSolid3D::impl_name_static() ) ); builder.remove_block( temp_block ); const auto message = absl::StrCat( "[Test] BRep should have ", 1, " block" ); OPENGEODE_EXCEPTION( model.nb_blocks() == 1, message ); OPENGEODE_EXCEPTION( geode::detail::count_relationships( model.blocks() ) == 1, message ); OPENGEODE_EXCEPTION( model.block( uuids[0] ).name() == "block1", "[Test] Wrong Block name" ); return uuids; } std::array< geode::uuid, 3 > add_model_boundaries( const geode::BRep& model, geode::BRepBuilder& builder ) { std::array< geode::uuid, 3 > uuids; for( const auto mb : geode::Range{ 3 } ) { uuids[mb] = builder.add_model_boundary(); builder.set_model_boundary_name( uuids[mb], absl::StrCat( "boundary", mb + 1 ) ); } const auto& temp_boundary = model.model_boundary( builder.add_model_boundary() ); builder.remove_model_boundary( temp_boundary ); const auto message = absl::StrCat( "[Test] BRep should have ", 3, " model boundaries" ); OPENGEODE_EXCEPTION( model.nb_model_boundaries() == 3, message ); OPENGEODE_EXCEPTION( geode::detail::count_relationships( model.model_boundaries() ) == 3, message ); OPENGEODE_EXCEPTION( model.model_boundary( uuids[0] ).name() == "boundary1", "[Test] Wrong ModelBoundary name" ); return uuids; } void add_corner_line_boundary_relation( const geode::BRep& model, geode::BRepBuilder& builder, absl::Span< const geode::uuid > corner_uuids, absl::Span< const geode::uuid > line_uuids ) { builder.add_corner_line_boundary_relationship( model.corner( corner_uuids[0] ), model.line( line_uuids[0] ) ); builder.add_corner_line_boundary_relationship( model.corner( corner_uuids[0] ), model.line( line_uuids[5] ) ); builder.add_corner_line_boundary_relationship( model.corner( corner_uuids[0] ), model.line( line_uuids[2] ) ); builder.add_corner_line_boundary_relationship( model.corner( corner_uuids[1] ), model.line( line_uuids[0] ) ); builder.add_corner_line_boundary_relationship( model.corner( corner_uuids[1] ), model.line( line_uuids[1] ) ); builder.add_corner_line_boundary_relationship( model.corner( corner_uuids[1] ), model.line( line_uuids[3] ) ); builder.add_corner_line_boundary_relationship( model.corner( corner_uuids[2] ), model.line( line_uuids[1] ) ); builder.add_corner_line_boundary_relationship( model.corner( corner_uuids[2] ), model.line( line_uuids[2] ) ); builder.add_corner_line_boundary_relationship( model.corner( corner_uuids[2] ), model.line( line_uuids[4] ) ); builder.add_corner_line_boundary_relationship( model.corner( corner_uuids[3] ), model.line( line_uuids[5] ) ); builder.add_corner_line_boundary_relationship( model.corner( corner_uuids[3] ), model.line( line_uuids[6] ) ); builder.add_corner_line_boundary_relationship( model.corner( corner_uuids[3] ), model.line( line_uuids[8] ) ); builder.add_corner_line_boundary_relationship( model.corner( corner_uuids[4] ), model.line( line_uuids[3] ) ); builder.add_corner_line_boundary_relationship( model.corner( corner_uuids[4] ), model.line( line_uuids[6] ) ); builder.add_corner_line_boundary_relationship( model.corner( corner_uuids[4] ), model.line( line_uuids[7] ) ); builder.add_corner_line_boundary_relationship( model.corner( corner_uuids[5] ), model.line( line_uuids[4] ) ); builder.add_corner_line_boundary_relationship( model.corner( corner_uuids[5] ), model.line( line_uuids[7] ) ); builder.add_corner_line_boundary_relationship( model.corner( corner_uuids[5] ), model.line( line_uuids[8] ) ); for( const auto& corner_id : corner_uuids ) { for( const auto& incidence : model.incidences( model.corner( corner_id ) ) ) { OPENGEODE_EXCEPTION( absl::c_find( line_uuids, incidence.id() ) != line_uuids.end(), "[Test] All Corners incidences should be Lines" ); } OPENGEODE_EXCEPTION( model.nb_incidences( corner_id ) == 3, "[Test] All Corners should be connected to 3 Lines" ); } for( const auto& line_id : line_uuids ) { for( const auto& boundary : model.boundaries( model.line( line_id ) ) ) { OPENGEODE_EXCEPTION( absl::c_find( corner_uuids, boundary.id() ) != corner_uuids.end(), "[Test] All Lines incidences should be Corners" ); } OPENGEODE_EXCEPTION( model.nb_boundaries( line_id ) == 2, "[Test] All Lines should be connected to 2 Corners" ); } } void add_line_surface_boundary_relation( const geode::BRep& model, geode::BRepBuilder& builder, absl::Span< const geode::uuid > line_uuids, absl::Span< const geode::uuid > surface_uuids ) { builder.add_line_surface_boundary_relationship( model.line( line_uuids[0] ), model.surface( surface_uuids[0] ) ); builder.add_line_surface_boundary_relationship( model.line( line_uuids[0] ), model.surface( surface_uuids[1] ) ); builder.add_line_surface_boundary_relationship( model.line( line_uuids[1] ), model.surface( surface_uuids[0] ) ); builder.add_line_surface_boundary_relationship( model.line( line_uuids[1] ), model.surface( surface_uuids[2] ) ); builder.add_line_surface_boundary_relationship( model.line( line_uuids[2] ), model.surface( surface_uuids[0] ) ); builder.add_line_surface_boundary_relationship( model.line( line_uuids[2] ), model.surface( surface_uuids[3] ) ); builder.add_line_surface_boundary_relationship( model.line( line_uuids[3] ), model.surface( surface_uuids[1] ) ); builder.add_line_surface_boundary_relationship( model.line( line_uuids[3] ), model.surface( surface_uuids[2] ) ); builder.add_line_surface_boundary_relationship( model.line( line_uuids[4] ), model.surface( surface_uuids[2] ) ); builder.add_line_surface_boundary_relationship( model.line( line_uuids[4] ), model.surface( surface_uuids[3] ) ); builder.add_line_surface_boundary_relationship( model.line( line_uuids[5] ), model.surface( surface_uuids[1] ) ); builder.add_line_surface_boundary_relationship( model.line( line_uuids[5] ), model.surface( surface_uuids[3] ) ); builder.add_line_surface_boundary_relationship( model.line( line_uuids[6] ), model.surface( surface_uuids[1] ) ); builder.add_line_surface_boundary_relationship( model.line( line_uuids[6] ), model.surface( surface_uuids[4] ) ); builder.add_line_surface_boundary_relationship( model.line( line_uuids[7] ), model.surface( surface_uuids[2] ) ); builder.add_line_surface_boundary_relationship( model.line( line_uuids[7] ), model.surface( surface_uuids[4] ) ); builder.add_line_surface_boundary_relationship( model.line( line_uuids[8] ), model.surface( surface_uuids[3] ) ); builder.add_line_surface_boundary_relationship( model.line( line_uuids[8] ), model.surface( surface_uuids[4] ) ); for( const auto& line_id : line_uuids ) { for( const auto& incidence : model.incidences( model.line( line_id ) ) ) { OPENGEODE_EXCEPTION( absl::c_find( surface_uuids, incidence.id() ) != surface_uuids.end(), "[Test] All Lines incidences should be Surfaces" ); } OPENGEODE_EXCEPTION( model.nb_incidences( line_id ) == 2, "[Test] All Lines should be connected to 2 Surfaces" ); } } void add_surface_block_relation( const geode::BRep& model, geode::BRepBuilder& builder, absl::Span< const geode::uuid > surface_uuids, absl::Span< const geode::uuid > block_uuids ) { for( const auto& surface_id : surface_uuids ) { builder.add_surface_block_boundary_relationship( model.surface( surface_id ), model.block( block_uuids.front() ) ); } for( const auto& surface_id : surface_uuids ) { for( const auto& incidence : model.incidences( model.surface( surface_id ) ) ) { OPENGEODE_EXCEPTION( block_uuids.front() == incidence.id(), "[Test] All Surfaces incidences should be Blocks" ); } OPENGEODE_EXCEPTION( model.nb_incidences( surface_id ) == 1, "[Test] All Surfaces should be connected to 1 Block" ); } OPENGEODE_EXCEPTION( model.nb_boundaries( block_uuids.front() ) == surface_uuids.size(), "[Test] The Block should be connected to all Surfaces" ); } void add_surfaces_in_model_boundaries( const geode::BRep& model, geode::BRepBuilder& builder, absl::Span< const geode::uuid > surface_uuids, absl::Span< const geode::uuid > boundary_uuids ) { builder.add_surface_in_model_boundary( model.surface( surface_uuids[0] ), model.model_boundary( boundary_uuids[0] ) ); for( const auto i : geode::Range{ 1, 4 } ) { builder.add_surface_in_model_boundary( model.surface( surface_uuids[i] ), model.model_boundary( boundary_uuids[1] ) ); } builder.add_surface_in_model_boundary( model.surface( surface_uuids[4] ), model.model_boundary( boundary_uuids[2] ) ); for( const auto& surface_id : surface_uuids ) { OPENGEODE_EXCEPTION( model.nb_collections( surface_id ) == 1, "[Test] All Surfaces should be in 1 collection (of type " "Boundary)" ); } } void add_internal_corner_relations( const geode::BRep& model, geode::BRepBuilder& builder, absl::Span< const geode::uuid > corner_uuids, absl::Span< const geode::uuid > surface_uuids, absl::Span< const geode::uuid > block_uuids ) { for( const auto& corner_id : corner_uuids ) { builder.add_corner_surface_internal_relationship( model.corner( corner_id ), model.surface( surface_uuids.front() ) ); builder.add_corner_block_internal_relationship( model.corner( corner_id ), model.block( block_uuids.front() ) ); } for( const auto& corner_id : corner_uuids ) { for( const auto& embedding : model.embedding_surfaces( model.corner( corner_id ) ) ) { OPENGEODE_EXCEPTION( surface_uuids.front() == embedding.id(), "[Test] All Corners embedded surfaces should be Surfaces" ); OPENGEODE_EXCEPTION( model.nb_internal_corners( embedding ) == corner_uuids.size(), "[Test] Surface should embed all Lines" ); } for( const auto& embedding : model.embedding_blocks( model.corner( corner_id ) ) ) { OPENGEODE_EXCEPTION( block_uuids.front() == embedding.id(), "[Test] All Corners embedded blocks should be Blocks" ); OPENGEODE_EXCEPTION( model.nb_internal_corners( embedding ) == corner_uuids.size(), "[Test] Block should embed all Lines" ); } OPENGEODE_EXCEPTION( model.nb_embeddings( corner_id ) == 2, "[Test] All Corners should be embedded to 1 Block and 1 Surface" ); OPENGEODE_EXCEPTION( model.nb_embedding_surfaces( model.corner( corner_id ) ) == 1, "[Test] All Corners should be embedded to 1 Surface" ); OPENGEODE_EXCEPTION( model.nb_embedding_blocks( model.corner( corner_id ) ) == 1, "[Test] All Corners should be embedded to 1 Block" ); } } void add_internal_line_relations( const geode::BRep& model, geode::BRepBuilder& builder, absl::Span< const geode::uuid > line_uuids, absl::Span< const geode::uuid > surface_uuids, absl::Span< const geode::uuid > block_uuids ) { for( const auto& line_id : line_uuids ) { builder.add_line_surface_internal_relationship( model.line( line_id ), model.surface( surface_uuids.front() ) ); builder.add_line_block_internal_relationship( model.line( line_id ), model.block( block_uuids.front() ) ); } for( const auto& line_id : line_uuids ) { for( const auto& embedding : model.embedding_surfaces( model.line( line_id ) ) ) { OPENGEODE_EXCEPTION( surface_uuids.front() == embedding.id(), "[Test] All Line embedded surfaces should be Surfaces" ); OPENGEODE_EXCEPTION( model.nb_internal_lines( embedding ) == line_uuids.size(), "[Test] Surface should embed all Lines" ); } for( const auto& embedding : model.embedding_blocks( model.line( line_id ) ) ) { OPENGEODE_EXCEPTION( block_uuids.front() == embedding.id(), "[Test] All Lines embedded blocks should be Blocks" ); OPENGEODE_EXCEPTION( model.nb_internal_lines( embedding ) == line_uuids.size(), "[Test] Block should embed all Lines" ); } OPENGEODE_EXCEPTION( model.nb_embeddings( line_id ) == 2, "[Test] All Surfaces should be embedded to 1 Block and 1 Surface" ); OPENGEODE_EXCEPTION( model.nb_embedding_surfaces( model.line( line_id ) ) == 1, "[Test] All Surfaces should be embedded to 1 Surface" ); OPENGEODE_EXCEPTION( model.nb_embedding_blocks( model.line( line_id ) ) == 1, "[Test] All Surfaces should be embedded to 1 Block" ); } } void add_internal_surface_relations( const geode::BRep& model, geode::BRepBuilder& builder, absl::Span< const geode::uuid > surface_uuids, absl::Span< const geode::uuid > block_uuids ) { for( const auto& surface_id : surface_uuids ) { builder.add_surface_block_internal_relationship( model.surface( surface_id ), model.block( block_uuids.front() ) ); } for( const auto& surface_id : surface_uuids ) { for( const auto& embedding : model.embedding_blocks( model.surface( surface_id ) ) ) { OPENGEODE_EXCEPTION( model.nb_internal_surfaces( embedding ) == surface_uuids.size(), "[Test] Block should embed all Surfaces" ); OPENGEODE_EXCEPTION( block_uuids.front() == embedding.id(), "[Test] All Surfaces embeddings should be Blocks" ); } OPENGEODE_EXCEPTION( model.nb_embeddings( surface_id ) == 1, "[Test] All Surfaces should be embedded to 1 Block" ); OPENGEODE_EXCEPTION( model.nb_embedding_blocks( model.surface( surface_id ) ) == 1, "[Test] All Surfaces should be embedded to 1 Block" ); } } void set_geometry( geode::BRepBuilder& builder, absl::Span< const geode::uuid > corner_uuids, absl::Span< const geode::uuid > line_uuids, absl::Span< const geode::uuid > surface_uuids ) { std::array< geode::Point3D, 6 > points; points[0] = geode::Point3D{ { 0., 0., 0. } }; points[1] = geode::Point3D{ { 0., 1., 0. } }; points[2] = geode::Point3D{ { 1., 1., 0. } }; points[3] = geode::Point3D{ { 1., 1., 2. } }; points[4] = geode::Point3D{ { 1., 2., 2. } }; points[5] = geode::Point3D{ { 2., 2., 2. } }; for( const auto i : geode::Range{ 6 } ) { builder.corner_mesh_builder( corner_uuids[i] ) ->create_point( points[i] ); } builder.line_mesh_builder( line_uuids[0] )->create_point( points[0] ); builder.line_mesh_builder( line_uuids[0] )->create_point( points[1] ); builder.line_mesh_builder( line_uuids[1] )->create_point( points[1] ); builder.line_mesh_builder( line_uuids[1] )->create_point( points[2] ); builder.line_mesh_builder( line_uuids[2] )->create_point( points[0] ); builder.line_mesh_builder( line_uuids[2] )->create_point( points[2] ); builder.line_mesh_builder( line_uuids[3] )->create_point( points[1] ); builder.line_mesh_builder( line_uuids[3] )->create_point( points[4] ); builder.line_mesh_builder( line_uuids[4] )->create_point( points[2] ); builder.line_mesh_builder( line_uuids[4] )->create_point( points[5] ); builder.line_mesh_builder( line_uuids[5] )->create_point( points[0] ); builder.line_mesh_builder( line_uuids[5] )->create_point( points[3] ); builder.line_mesh_builder( line_uuids[6] )->create_point( points[3] ); builder.line_mesh_builder( line_uuids[6] )->create_point( points[4] ); builder.line_mesh_builder( line_uuids[7] )->create_point( points[4] ); builder.line_mesh_builder( line_uuids[7] )->create_point( points[5] ); builder.line_mesh_builder( line_uuids[8] )->create_point( points[3] ); builder.line_mesh_builder( line_uuids[8] )->create_point( points[5] ); for( const auto i : geode::Range{ 9 } ) { builder.line_mesh_builder( line_uuids[i] )->create_edge( 0, 1 ); } builder .surface_mesh_builder< geode::TriangulatedSurface3D >( surface_uuids[0] ) ->create_point( points[0] ); builder .surface_mesh_builder< geode::TriangulatedSurface3D >( surface_uuids[0] ) ->create_point( points[1] ); builder .surface_mesh_builder< geode::TriangulatedSurface3D >( surface_uuids[0] ) ->create_point( points[2] ); builder .surface_mesh_builder< geode::TriangulatedSurface3D >( surface_uuids[0] ) ->create_polygon( { 0, 1, 2 } ); builder.surface_mesh_builder( surface_uuids[1] )->create_point( points[0] ); builder.surface_mesh_builder( surface_uuids[1] )->create_point( points[1] ); builder.surface_mesh_builder( surface_uuids[1] )->create_point( points[4] ); builder.surface_mesh_builder( surface_uuids[1] )->create_point( points[3] ); builder.surface_mesh_builder( surface_uuids[1] ) ->create_polygon( { 0, 1, 2 } ); builder.surface_mesh_builder( surface_uuids[1] ) ->create_polygon( { 0, 2, 3 } ); builder.surface_mesh_builder( surface_uuids[2] )->create_point( points[4] ); builder.surface_mesh_builder( surface_uuids[2] )->create_point( points[1] ); builder.surface_mesh_builder( surface_uuids[2] )->create_point( points[2] ); builder.surface_mesh_builder( surface_uuids[2] )->create_point( points[5] ); builder.surface_mesh_builder( surface_uuids[2] ) ->create_polygon( { 0, 1, 2 } ); builder.surface_mesh_builder( surface_uuids[2] ) ->create_polygon( { 0, 2, 3 } ); builder.surface_mesh_builder( surface_uuids[3] )->create_point( points[3] ); builder.surface_mesh_builder( surface_uuids[3] )->create_point( points[0] ); builder.surface_mesh_builder( surface_uuids[3] )->create_point( points[2] ); builder.surface_mesh_builder( surface_uuids[3] )->create_point( points[5] ); builder.surface_mesh_builder( surface_uuids[3] ) ->create_polygon( { 0, 1, 2 } ); builder.surface_mesh_builder( surface_uuids[3] ) ->create_polygon( { 0, 2, 3 } ); builder.surface_mesh_builder( surface_uuids[4] )->create_point( points[3] ); builder.surface_mesh_builder( surface_uuids[4] )->create_point( points[4] ); builder.surface_mesh_builder( surface_uuids[4] )->create_point( points[5] ); builder.surface_mesh_builder( surface_uuids[4] ) ->create_polygon( { 0, 1, 2 } ); } void test_boundary_ranges( const geode::BRep& model, absl::Span< const geode::uuid > corner_uuids, absl::Span< const geode::uuid > line_uuids, absl::Span< const geode::uuid > surface_uuids, absl::Span< const geode::uuid > block_uuids ) { geode::index_t line_boundary_count{ 0 }; for( const auto& line_boundary : model.boundaries( model.line( line_uuids[0] ) ) ) { line_boundary_count++; OPENGEODE_EXCEPTION( line_boundary.id() == corner_uuids[0] || line_boundary.id() == corner_uuids[1], "[Test] BoundaryCornerRange iteration result is not correct" ); OPENGEODE_EXCEPTION( model.is_boundary( line_boundary, model.line( line_uuids[0] ) ), "[Test] Corner should be boundary of Line" ); } OPENGEODE_EXCEPTION( line_boundary_count == 2, "[Test] BoundaryCornerRange should iterates on 2 Corners" ); geode::index_t surface_boundary_count{ 0 }; for( const auto& surface_boundary : model.boundaries( model.surface( surface_uuids[0] ) ) ) { surface_boundary_count++; OPENGEODE_EXCEPTION( surface_boundary.id() == line_uuids[0] || surface_boundary.id() == line_uuids[1] || surface_boundary.id() == line_uuids[2], "[Test] BoundaryLineRange iteration result is not correct" ); OPENGEODE_EXCEPTION( model.is_boundary( surface_boundary, model.surface( surface_uuids[0] ) ), "[Test] Line should be boundary of Surface" ); } OPENGEODE_EXCEPTION( surface_boundary_count == 3, "[Test] BoundaryLineRange should iterates on 3 Lines" ); geode::index_t block_boundary_count{ 0 }; for( const auto& block_boundary : model.boundaries( model.block( block_uuids[0] ) ) ) { block_boundary_count++; OPENGEODE_EXCEPTION( block_boundary.id() == surface_uuids[0] || block_boundary.id() == surface_uuids[1] || block_boundary.id() == surface_uuids[2] || block_boundary.id() == surface_uuids[3] || block_boundary.id() == surface_uuids[4], "[Test] BoundarySurfaceRange iteration result is not correct" ); OPENGEODE_EXCEPTION( model.is_boundary( block_boundary, model.block( block_uuids[0] ) ), "[Test] Surface should be boundary of Block" ); } OPENGEODE_EXCEPTION( block_boundary_count == 5, "[Test] BoundarySurfaceRange should iterates on 5 Surfaces" ); } void test_incidence_ranges( const geode::BRep& model, absl::Span< const geode::uuid > corner_uuids, absl::Span< const geode::uuid > line_uuids, absl::Span< const geode::uuid > surface_uuids, absl::Span< const geode::uuid > block_uuids ) { geode::index_t corner_incidence_count{ 0 }; for( const auto& corner_incidence : model.incidences( model.corner( corner_uuids[0] ) ) ) { corner_incidence_count++; OPENGEODE_EXCEPTION( corner_incidence.id() == line_uuids[0] || corner_incidence.id() == line_uuids[2] || corner_incidence.id() == line_uuids[5], "[Test] IncidentLineRange iteration result is not correct" ); } OPENGEODE_EXCEPTION( corner_incidence_count == 3, "[Test] IncidentLineRange should iterates on 3 Lines" ); geode::index_t line_incidence_count{ 0 }; for( const auto& line_incidence : model.incidences( model.line( line_uuids[0] ) ) ) { line_incidence_count++; OPENGEODE_EXCEPTION( line_incidence.id() == surface_uuids[0] || line_incidence.id() == surface_uuids[1], "[Test] IncidentSurfaceRange iteration result is not correct" ); } OPENGEODE_EXCEPTION( line_incidence_count == 2, "[Test] IncidentSurfaceRange should iterates on 2 Surfaces" ); const auto& surface_incidences = model.incidences( model.surface( surface_uuids[0] ) ); geode::index_t surface_incidence_count{ 0 }; for( const auto& surface_incidence : surface_incidences ) { surface_incidence_count++; OPENGEODE_EXCEPTION( surface_incidence.id() == block_uuids[0], "[Test] IncidentBlockRange iteration result is not correct" ); } OPENGEODE_EXCEPTION( surface_incidence_count == 1, "[Test] IncidentBlockRange should iterates on 1 Block" ); } void test_item_ranges( const geode::BRep& model, absl::Span< const geode::uuid > surface_uuids, absl::Span< const geode::uuid > boundary_uuids ) { const auto& boundary_items = model.model_boundary_items( model.model_boundary( boundary_uuids[1] ) ); geode::index_t boundary_item_count{ 0 }; for( const auto& boundary_item : boundary_items ) { boundary_item_count++; OPENGEODE_EXCEPTION( boundary_item.id() == surface_uuids[1] || boundary_item.id() == surface_uuids[2] || boundary_item.id() == surface_uuids[3], "[Test] ItemSurfaceRange iteration result is not correct" ); OPENGEODE_EXCEPTION( model.is_model_boundary_item( boundary_item, model.model_boundary( boundary_uuids[1] ) ), "[Test] Surface should be item of ModelBoundary" ); } OPENGEODE_EXCEPTION( boundary_item_count == 3, "[Test] IncidentLineRange should iterates " "on 3 Surfaces (Boundary 1)" ); } void test_reloaded_brep( const geode::BRep& model ) { OPENGEODE_EXCEPTION( model.nb_corners() == 6, "[Test] Number of Corners in reloaded BRep should be 6" ); OPENGEODE_EXCEPTION( model.nb_lines() == 9, "[Test] Number of Lines in reloaded BRep should be 9" ); OPENGEODE_EXCEPTION( model.nb_surfaces() == 5, "[Test] Number of Surfaces in reloaded BRep should be 5" ); OPENGEODE_EXCEPTION( model.nb_blocks() == 1, "[Test] Number of Blocks in reloaded BRep should be 1" ); OPENGEODE_EXCEPTION( model.nb_model_boundaries() == 3, "[Test] Number of Boundaries in reloaded BRep should be 3" ); } void test_moved_brep( const geode::BRep& model ) { OPENGEODE_EXCEPTION( model.nb_corners() == 6, "[Test] Number of Corners in moved BRep should be 6" ); OPENGEODE_EXCEPTION( model.nb_lines() == 9, "[Test] Number of Lines in moved BRep should be 9" ); OPENGEODE_EXCEPTION( model.nb_surfaces() == 5, "[Test] Number of Surfaces in moved BRep should be 5" ); OPENGEODE_EXCEPTION( model.nb_blocks() == 1, "[Test] Number of Blocks in moved BRep should be 1" ); OPENGEODE_EXCEPTION( model.nb_model_boundaries() == 3, "[Test] Number of Boundaries in moved BRep should be 3" ); } void test_clone( const geode::BRep& brep ) { geode::BRep brep2; geode::BRepBuilder builder{ brep2 }; builder.copy( brep ); OPENGEODE_EXCEPTION( brep2.nb_corners() == 6, "[Test] BRep should have 6 corners" ); OPENGEODE_EXCEPTION( brep2.nb_lines() == 9, "[Test] BRep should have 9 lines" ); OPENGEODE_EXCEPTION( brep2.nb_surfaces() == 5, "[Test] BRep should have 5 surfaces" ); OPENGEODE_EXCEPTION( brep2.nb_blocks() == 1, "[Test] BRep should have 1 block" ); OPENGEODE_EXCEPTION( brep2.nb_model_boundaries() == 3, "[Test] BRep should have 3 model boundaries" ); const auto mappings = builder.copy_components( brep ); builder.copy_relationships( mappings, brep ); OPENGEODE_EXCEPTION( brep2.nb_corners() == 12, "[Test] BRep should have 12 corners" ); OPENGEODE_EXCEPTION( brep2.nb_lines() == 18, "[Test] BRep should have 18 lines" ); OPENGEODE_EXCEPTION( brep2.nb_surfaces() == 10, "[Test] BRep should have 10 surfaces" ); OPENGEODE_EXCEPTION( brep2.nb_blocks() == 2, "[Test] BRep should have 2 blocks" ); OPENGEODE_EXCEPTION( brep2.nb_model_boundaries() == 6, "[Test] BRep should have 6 model boundaries" ); for( const auto& corner : brep.corners() ) { const auto& new_corner = brep2.corner( mappings.at( geode::Corner3D::component_type_static() ) .in2out( corner.id() ) ); for( const auto& line : brep.incidences( corner ) ) { bool found{ false }; for( const auto& new_line : brep2.incidences( new_corner ) ) { if( mappings.at( geode::Line3D::component_type_static() ) .in2out( line.id() ) == new_line.id() ) { found = true; break; } } OPENGEODE_EXCEPTION( found, "[Test] All Corners incidences are not correct" ); } } for( const auto& line : brep.lines() ) { const auto& new_line = brep2.line( mappings.at( geode::Line3D::component_type_static() ) .in2out( line.id() ) ); for( const auto& surface : brep.incidences( line ) ) { bool found = { false }; for( const auto& new_surface : brep2.incidences( new_line ) ) { if( mappings.at( geode::Surface3D::component_type_static() ) .in2out( surface.id() ) == new_surface.id() ) { found = true; break; } } OPENGEODE_EXCEPTION( found, "[Test] All Lines incidences are not correct" ); } } for( const auto& surface : brep.surfaces() ) { const auto& new_surface = brep2.surface( mappings.at( geode::Surface3D::component_type_static() ) .in2out( surface.id() ) ); for( const auto& block : brep.incidences( surface ) ) { bool found = { false }; for( const auto& new_block : brep2.incidences( new_surface ) ) { if( mappings.at( geode::Block3D::component_type_static() ) .in2out( block.id() ) == new_block.id() ) { found = true; break; } } OPENGEODE_EXCEPTION( found, "[Test] All Surfaces incidences are not correct" ); } } for( const auto& model_boundary : brep.model_boundaries() ) { const auto& new_model_boundary = brep2.model_boundary( mappings.at( geode::ModelBoundary3D::component_type_static() ) .in2out( model_boundary.id() ) ); for( const auto& surface : brep.model_boundary_items( model_boundary ) ) { bool found = { false }; for( const auto& new_surface : brep2.model_boundary_items( new_model_boundary ) ) { if( mappings.at( geode::Surface3D::component_type_static() ) .in2out( surface.id() ) == new_surface.id() ) { found = true; break; } } OPENGEODE_EXCEPTION( found, "[Test] All ModelBoundaries incidences are not correct" ); } } } void test() { geode::BRep model; geode::BRepBuilder builder( model ); // This BRep represents a prism const auto corner_uuids = add_corners( model, builder ); const auto line_uuids = add_lines( model, builder ); const auto surface_uuids = add_surfaces( model, builder ); const auto block_uuids = add_blocks( model, builder ); const auto model_boundary_uuids = add_model_boundaries( model, builder ); set_geometry( builder, corner_uuids, line_uuids, surface_uuids ); add_corner_line_boundary_relation( model, builder, corner_uuids, line_uuids ); add_line_surface_boundary_relation( model, builder, line_uuids, surface_uuids ); add_surface_block_relation( model, builder, surface_uuids, block_uuids ); add_surfaces_in_model_boundaries( model, builder, surface_uuids, model_boundary_uuids ); add_internal_corner_relations( model, builder, corner_uuids, surface_uuids, block_uuids ); add_internal_line_relations( model, builder, line_uuids, surface_uuids, block_uuids ); add_internal_surface_relations( model, builder, surface_uuids, block_uuids ); OPENGEODE_EXCEPTION( model.nb_internals( block_uuids.front() ) == corner_uuids.size() + line_uuids.size() + surface_uuids.size(), "[Test] The Block should embed all Corners & Lines & Surfaces " "(that are internal to the " "Block)" ); test_boundary_ranges( model, corner_uuids, line_uuids, surface_uuids, block_uuids ); test_incidence_ranges( model, corner_uuids, line_uuids, surface_uuids, block_uuids ); test_item_ranges( model, surface_uuids, model_boundary_uuids ); test_clone( model ); const auto file_io = absl::StrCat( "test.", model.native_extension() ); geode::save_brep( model, file_io ); auto model2 = geode::load_brep( file_io ); test_reloaded_brep( model2 ); geode::BRep model3{ std::move( model2 ) }; test_moved_brep( model3 ); } OPENGEODE_TEST( "brep" )
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/lite/experimental/micro/examples/micro_speech/micro_features/filterbank_util.h" #include <assert.h> #include <math.h> #include "tensorflow/lite/experimental/micro/examples/micro_speech/micro_features/static_alloc.h" #define kFilterbankIndexAlignment 4 #define kFilterbankChannelBlockSize 4 void FilterbankFillConfigWithDefaults(struct FilterbankConfig* config) { config->num_channels = 32; config->lower_band_limit = 125.0f; config->upper_band_limit = 7500.0f; config->output_scale_shift = 7; } static float FreqToMel(float freq) { return 1127.0 * log(1.0 + (freq / 700.0)); } static void CalculateCenterFrequencies(const int num_channels, const float lower_frequency_limit, const float upper_frequency_limit, float* center_frequencies) { assert(lower_frequency_limit >= 0.0f); assert(upper_frequency_limit > lower_frequency_limit); const float mel_low = FreqToMel(lower_frequency_limit); const float mel_hi = FreqToMel(upper_frequency_limit); const float mel_span = mel_hi - mel_low; const float mel_spacing = mel_span / (static_cast<float>(num_channels)); int i; for (i = 0; i < num_channels; ++i) { center_frequencies[i] = mel_low + (mel_spacing * (i + 1)); } } static void QuantizeFilterbankWeights(const float float_weight, int16_t* weight, int16_t* unweight) { *weight = floor(float_weight * (1 << kFilterbankBits) + 0.5); *unweight = floor((1.0 - float_weight) * (1 << kFilterbankBits) + 0.5); } int FilterbankPopulateState(tflite::ErrorReporter* error_reporter, const struct FilterbankConfig* config, struct FilterbankState* state, int sample_rate, int spectrum_size) { state->num_channels = config->num_channels; const int num_channels_plus_1 = config->num_channels + 1; // How should we align things to index counts given the byte alignment? const int index_alignment = (kFilterbankIndexAlignment < sizeof(int16_t) ? 1 : kFilterbankIndexAlignment / sizeof(int16_t)); STATIC_ALLOC_ENSURE_ARRAY_SIZE( state->channel_frequency_starts, (num_channels_plus_1 * sizeof(*state->channel_frequency_starts))); STATIC_ALLOC_ENSURE_ARRAY_SIZE( state->channel_weight_starts, (num_channels_plus_1 * sizeof(*state->channel_weight_starts))); STATIC_ALLOC_ENSURE_ARRAY_SIZE( state->channel_widths, (num_channels_plus_1 * sizeof(*state->channel_widths))); STATIC_ALLOC_ENSURE_ARRAY_SIZE(state->work, (num_channels_plus_1 * sizeof(*state->work))); float center_mel_freqs[kFeatureSliceSize + 1]; STATIC_ALLOC_ENSURE_ARRAY_SIZE( center_mel_freqs, (num_channels_plus_1 * sizeof(*center_mel_freqs))); int16_t actual_channel_starts[kFeatureSliceSize + 1]; STATIC_ALLOC_ENSURE_ARRAY_SIZE( actual_channel_starts, (num_channels_plus_1 * sizeof(*actual_channel_starts))); int16_t actual_channel_widths[kFeatureSliceSize + 1]; STATIC_ALLOC_ENSURE_ARRAY_SIZE( actual_channel_widths, (num_channels_plus_1 * sizeof(*actual_channel_widths))); CalculateCenterFrequencies(num_channels_plus_1, config->lower_band_limit, config->upper_band_limit, center_mel_freqs); // Always exclude DC. const float hz_per_sbin = 0.5 * sample_rate / (static_cast<float>(spectrum_size) - 1); state->start_index = 1.5 + config->lower_band_limit / hz_per_sbin; state->end_index = 0; // Initialized to zero here, but actually set below. // For each channel, we need to figure out what frequencies belong to it, and // how much padding we need to add so that we can efficiently multiply the // weights and unweights for accumulation. To simplify the multiplication // logic, all channels will have some multiplication to do (even if there are // no frequencies that accumulate to that channel) - they will be directed to // a set of zero weights. int chan_freq_index_start = state->start_index; int weight_index_start = 0; int needs_zeros = 0; int chan; for (chan = 0; chan < num_channels_plus_1; ++chan) { // Keep jumping frequencies until we overshoot the bound on this channel. int freq_index = chan_freq_index_start; while (FreqToMel((freq_index)*hz_per_sbin) <= center_mel_freqs[chan]) { ++freq_index; } const int width = freq_index - chan_freq_index_start; actual_channel_starts[chan] = chan_freq_index_start; actual_channel_widths[chan] = width; if (width == 0) { // This channel doesn't actually get anything from the frequencies, it's // always zero. We need then to insert some 'zero' weights into the // output, and just redirect this channel to do a single multiplication at // this point. For simplicity, the zeros are placed at the beginning of // the weights arrays, so we have to go and update all the other // weight_starts to reflect this shift (but only once). state->channel_frequency_starts[chan] = 0; state->channel_weight_starts[chan] = 0; state->channel_widths[chan] = kFilterbankChannelBlockSize; if (!needs_zeros) { needs_zeros = 1; int j; for (j = 0; j < chan; ++j) { state->channel_weight_starts[j] += kFilterbankChannelBlockSize; } weight_index_start += kFilterbankChannelBlockSize; } } else { // How far back do we need to go to ensure that we have the proper // alignment? const int aligned_start = (chan_freq_index_start / index_alignment) * index_alignment; const int aligned_width = (chan_freq_index_start - aligned_start + width); const int padded_width = (((aligned_width - 1) / kFilterbankChannelBlockSize) + 1) * kFilterbankChannelBlockSize; state->channel_frequency_starts[chan] = aligned_start; state->channel_weight_starts[chan] = weight_index_start; state->channel_widths[chan] = padded_width; weight_index_start += padded_width; } chan_freq_index_start = freq_index; } // Allocate the two arrays to store the weights - weight_index_start contains // the index of what would be the next set of weights that we would need to // add, so that's how many weights we need to allocate. STATIC_ALLOC_ENSURE_ARRAY_SIZE( state->weights, (weight_index_start * sizeof(*state->weights))); for (int i = 0; i < weight_index_start; ++i) { state->weights[i] = 0; } STATIC_ALLOC_ENSURE_ARRAY_SIZE( state->unweights, (weight_index_start * sizeof(*state->unweights))); for (int i = 0; i < weight_index_start; ++i) { state->unweights[i] = 0; } // Next pass, compute all the weights. Since everything has been memset to // zero, we only need to fill in the weights that correspond to some frequency // for a channel. const float mel_low = FreqToMel(config->lower_band_limit); for (chan = 0; chan < num_channels_plus_1; ++chan) { int frequency = actual_channel_starts[chan]; const int num_frequencies = actual_channel_widths[chan]; const int frequency_offset = frequency - state->channel_frequency_starts[chan]; const int weight_start = state->channel_weight_starts[chan]; const float denom_val = (chan == 0) ? mel_low : center_mel_freqs[chan - 1]; int j; for (j = 0; j < num_frequencies; ++j, ++frequency) { const float weight = (center_mel_freqs[chan] - FreqToMel(frequency * hz_per_sbin)) / (center_mel_freqs[chan] - denom_val); // Make the float into an integer for the weights (and unweights). const int weight_index = weight_start + frequency_offset + j; QuantizeFilterbankWeights(weight, state->weights + weight_index, state->unweights + weight_index); } if (frequency > state->end_index) { state->end_index = frequency; } } if (state->end_index >= spectrum_size) { error_reporter->Report("Filterbank end_index is above spectrum size."); return 0; } return 1; }
// newTemplateConfig returns a base template for running a container. // // It uses a network strategy of just setting a loopback interface // and the default setup for devices. // // If p is nil, a default container is created. func newTemplateConfig(t *testing.T, p *tParam) *configs.Config { var allowedDevices []*devices.Rule for _, device := range specconv.AllowedDevices { allowedDevices = append(allowedDevices, &device.Rule) } if p == nil { p = &tParam{} } config := &configs.Config{ Rootfs: newRootfs(t), Capabilities: &configs.Capabilities{ Bounding: []string{ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE", }, Permitted: []string{ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE", }, Inheritable: []string{ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE", }, Ambient: []string{ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE", }, Effective: []string{ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE", }, }, Namespaces: configs.Namespaces([]configs.Namespace{ {Type: configs.NEWNS}, {Type: configs.NEWUTS}, {Type: configs.NEWIPC}, {Type: configs.NEWPID}, {Type: configs.NEWNET}, }), Cgroups: &configs.Cgroup{ Resources: &configs.Resources{ MemorySwappiness: nil, Devices: allowedDevices, }, }, MaskPaths: []string{ "/proc/kcore", "/sys/firmware", }, ReadonlyPaths: []string{ "/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus", }, Devices: specconv.AllowedDevices, Hostname: "integration", Mounts: []*configs.Mount{ { Source: "proc", Destination: "/proc", Device: "proc", Flags: defaultMountFlags, }, { Source: "tmpfs", Destination: "/dev", Device: "tmpfs", Flags: unix.MS_NOSUID | unix.MS_STRICTATIME, Data: "mode=755", }, { Source: "devpts", Destination: "/dev/pts", Device: "devpts", Flags: unix.MS_NOSUID | unix.MS_NOEXEC, Data: "newinstance,ptmxmode=0666,mode=0620,gid=5", }, { Device: "tmpfs", Source: "shm", Destination: "/dev/shm", Data: "mode=1777,size=65536k", Flags: defaultMountFlags, }, { Source: "sysfs", Destination: "/sys", Device: "sysfs", Flags: defaultMountFlags | unix.MS_RDONLY, }, }, Networks: []*configs.Network{ { Type: "loopback", Address: "127.0.0.1/0", Gateway: "localhost", }, }, Rlimits: []configs.Rlimit{ { Type: unix.RLIMIT_NOFILE, Hard: uint64(1025), Soft: uint64(1025), }, }, } if p.userns { config.UidMappings = []configs.IDMap{{HostID: 0, ContainerID: 0, Size: 1000}} config.GidMappings = []configs.IDMap{{HostID: 0, ContainerID: 0, Size: 1000}} config.Namespaces = append(config.Namespaces, configs.Namespace{Type: configs.NEWUSER}) } else { config.Mounts = append(config.Mounts, &configs.Mount{ Destination: "/sys/fs/cgroup", Device: "cgroup", Flags: defaultMountFlags | unix.MS_RDONLY, }) } if p.systemd { id := strconv.FormatInt(-int64(time.Now().Nanosecond()), 36) config.Cgroups.Name = strings.ReplaceAll(t.Name(), "/", "_") + id config.Cgroups.Parent = "system.slice" config.Cgroups.ScopePrefix = "runc-test" } else { config.Cgroups.Path = "/test/integration" } return config }
North Korea has tried warnings of nuclear attack and racist diatribes to criticize U.S. President Barack Obama. Now it's turning to Abraham Lincoln. North Korea's state media have constructed an imaginary letter from the 16th U.S. president that attacks Obama's "deception" over Pyongyang's pursuit of nuclear weapons. It is the latest response from the North to rising animosity with Washington following Pyongyang's nuclear test and long-range rocket launch earlier this year. The letter, posted only in Korean on the DPRK Today website, is likely aimed at a domestic audience. DPRK Today is a relatively little known outlet compared with the North's main Korean Central News Agency and the Rodong Sinmun newspaper, which outsiders regularly check to find news from the authoritarian country. The letter is titled "Advice from Lincoln to Obama." "Hey, Obama," it begins. "I know you have a lot on your mind these days ... I've decided to give you a little advice after seeing you lost in thought before my portrait during a recent Easter Prayer Breakfast." In the letter, Lincoln derides Obama's Nobel Peace Prize-winning push to build a nuclear-free world by questioning why the United States has not taken the initiative to scale back its nuclear arsenal first, even as it asks countries such as North Korea to scrap their atomic programs. "If the United States, a country with the world's largest nuclear weapons stockpile, only pays lip service, like a parrot, and doesn't do anything actively, it will be a mockery to the entire world," the letter has Lincoln say. Although the fake Lincoln criticizes Obama, the North doesn't portray the late president as a good leader. "Hey, Obama, it's the 21st Century," the letter says. "The tactic by past American presidents, including me, who deceived the people ... is outdated. That doesn't work now. The world doesn't trust an America that doesn't take responsibility for what it says." North Korea's state media has often used harsh language against U.S. and South Korean leaders in times of tension. In recent weeks, Pyongyang has stepped up rhetoric against Washington and Seoul during their annual springtime military drills, which it calls an invasion rehearsal. The drills are set to end later this month. In 2014, the North's state news agency, KCNA, called Obama a "monkey." Earlier that year, it called Secretary of State John Kerry a wolf with a "hideous lantern jaw" after U.S. and South Korean troops launched summertime drills. The North has also called South Korean President Park Geun-hye a "prostitute" numerous times.
def _propagate(self, dest, path, stamp, dirname=os.path.dirname): while path != "": oldval = dest.get(path, -1) if stamp < oldval: return dest[path] = stamp path = dirname(path)
<reponame>wuyuanqing527/FireHelper package com.wyq.firehelper.ui.layout.pullextviewlayout; import android.content.Context; import android.util.AttributeSet; import android.view.MotionEvent; import android.view.View; import android.view.ViewGroup; import android.widget.LinearLayout; import android.widget.Scroller; import com.wyq.firehelper.base.utils.common.ScreenUtils; import androidx.recyclerview.widget.RecyclerView; public class ExtRecyclerViewLayout extends LinearLayout { private LoadingDot loadingDot; /** * 下拉时先上移loadingDot高度再缓缓下移 */ private RecyclerView headRecyclerView; private View headView; private Scroller scroller; /** * 头部长度 */ private float headHeight = 0; /** * 布局总长度, */ private float totalHeight = 0; /** * 滑动阻尼 */ private float dampingFactor = 1.0f; /** * 每次滑动距离,向下为正 */ private float deltaY = 0; private float lastX = 0; private float lastY = 0; private float lastInterceptX = 0; private float lastIntercepty = 0; public ExtRecyclerViewLayout(Context context) { this(context, null); } public ExtRecyclerViewLayout(Context context, AttributeSet attrs) { this(context, attrs, 0); } public ExtRecyclerViewLayout(Context context, AttributeSet attrs, int defStyleAttr) { this(context, attrs, defStyleAttr, 0); } public ExtRecyclerViewLayout(Context context, AttributeSet attrs, int defStyleAttr, int defStyleRes) { super(context, attrs, defStyleAttr, defStyleRes); scroller = new Scroller(context); } @Override protected void onSizeChanged(int w, int h, int oldw, int oldh) { super.onSizeChanged(w, h, oldw, oldh); // Logger.i("onSizeChanged " + h + " oldHeight:" + oldh); initViewSize(); } // @Override // public void onWindowFocusChanged(boolean hasWindowFocus) { // super.onWindowFocusChanged(hasWindowFocus); // if (hasWindowFocus) { // Logger.i("hasWindowFocus"); // initViewSize(); // } // } @Override protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) { super.onMeasure(widthMeasureSpec, heightMeasureSpec); int measuredWidth = 0; int measuredHeight = 0; int childCount = getChildCount(); measureChildren(widthMeasureSpec, heightMeasureSpec); int widthSpaceSize = MeasureSpec.getSize(widthMeasureSpec); int widthSpecMode = MeasureSpec.getMode(widthMeasureSpec); int heightSpaceSize = MeasureSpec.getSize(heightMeasureSpec); int heightSpecMode = MeasureSpec.getMode(heightMeasureSpec); if (childCount == 0) { setMeasuredDimension(0, 0); } else if (widthSpecMode == MeasureSpec.AT_MOST && heightSpecMode == MeasureSpec.AT_MOST) { measuredWidth = getChildAt(0).getMeasuredWidth(); for (int i = 0; i < childCount; i++) { measuredHeight += getChildAt(i).getMeasuredHeight(); } setMeasuredDimension(measuredWidth, measuredHeight); } else if (widthSpecMode == MeasureSpec.AT_MOST) { measuredWidth = getChildAt(0).getMeasuredWidth(); setMeasuredDimension(measuredWidth, heightSpaceSize); } else if (heightSpecMode == MeasureSpec.AT_MOST) { for (int i = 0; i < childCount; i++) { measuredHeight += getChildAt(i).getMeasuredHeight(); } setMeasuredDimension(widthSpaceSize, measuredHeight); } } @Override protected void onLayout(boolean changed, int l, int t, int r, int b) { super.onLayout(changed, l, t, r, b); //do nothing // initViewSize(); } /** * 初始化各项参数 */ private void initViewSize() { int childCount = getChildCount(); if (childCount < 1) { return; } totalHeight = 0; for (int i = 0; i < childCount; i++) { totalHeight += getChildAt(i).getMeasuredHeight(); } headView = getChildAt(0); if (headView instanceof ViewGroup) { int count = ((ViewGroup) headView).getChildCount(); if (count > 1) { for (int i = 0; i < count; i++) { View view = ((ViewGroup) headView).getChildAt(i); if (view instanceof LoadingDot && loadingDot == null) { loadingDot = (LoadingDot) view; } else if (view instanceof RecyclerView && headRecyclerView == null) { headRecyclerView = (RecyclerView) view; } } } } headHeight = headView.getMeasuredHeight(); if (getPaddingTop() != -(int) headHeight) { setPadding(0, -(int) headHeight, 0, 0); } // Logger.i("totalHeight:" + totalHeight + " headHeight:" + headHeight); } @Override public boolean onInterceptTouchEvent(MotionEvent event) { boolean intercept = false; float x = event.getX(); float y = event.getY(); switch (event.getAction()) { case MotionEvent.ACTION_DOWN: intercept = false; break; case MotionEvent.ACTION_MOVE: float dx = x - lastInterceptX; float dy = y - lastIntercepty; // Logger.i("intercept " + "lastIntercepty:" + lastIntercepty + " maxheight:" + maxHeadHeight + " scrolly:" + getScrollY() + " gety:" + getY() + " gettop:" + getTop()); if (Math.abs(dy) > Math.abs(dx)) { intercept = true; } else { intercept = false; } break; case MotionEvent.ACTION_UP: intercept = false; break; default: break; } lastX = x; lastY = y; lastInterceptX = x; lastIntercepty = y; // Logger.i(intercept + ""); return intercept; } @Override public boolean onTouchEvent(MotionEvent event) { float x = event.getX(); float y = event.getY(); switch (event.getAction()) { case MotionEvent.ACTION_DOWN: break; case MotionEvent.ACTION_MOVE: float dx = x - lastX; float dy = y - lastY; // Logger.i("intercept " + " maxheight:" + maxHeadHeight + " dy:" + dy + " scrolly:" + getScrollY() + " getTranslationY:" + getTranslationY() + " gety:" + getY() + " gettop:" + getTop()); deltaY = dy; if (Math.abs(dy) > Math.abs(dx)) { // if ((dy < 0 && getScrollY() <= 0) || (dy > 0 && getScrollY() >= -maxHeadHeight)) pullHeadView(dy); } break; case MotionEvent.ACTION_UP: int scrollY = getScrollY(); float baseHeight = -headHeight / 2; if (scrollY <= baseHeight) { //开 //当滑动距离在头部长度之内并且向上滑动则直接关闭头部 if (scrollY > -headHeight && deltaY < 0) { resetHeadView(0); } else { resetHeadView((int) -headHeight); } } else { //关 //如果上滑距离大于底部隐藏距离则,重置到底部刚好显示距离 if (scrollY > getResetHeight()) { resetHeadView(getResetHeight()); } else if (scrollY < 0) { //如果头部差一点没关掉就关掉 resetHeadView(0); } } deltaY = 0; break; default: break; } lastX = x; lastY = y; return true; } private int getResetHeight() { int screenHeight = ScreenUtils.getHeightPX(getContext()); int navigationBarHeight = ScreenUtils.getNavigationBarHeight(getContext());//即使是实体按键,这个也是有虚拟数值的 // Logger.i(screenHeight + " nav:" + navigationBarHeight + " scrollY:" + getScrollY() + " getHeight:" + getHeight() + " getTop:" + getTop() + " getPaddingTop:" + getPaddingTop() + " getPaddingBottom:" + getPaddingBottom() + " getBottom:" + getBottom() + " headHeight:" + headHeight); float bottomHideHeight = navigationBarHeight + (totalHeight - headHeight + getTop() - screenHeight); // Logger.i("bottomHideHeight:" + bottomHideHeight); if (bottomHideHeight < 0) { return 0; } return (int) bottomHideHeight; } private void resetHeadView(int y) { smoothScrollTo(y); if (y >= 0) {//头部未显示 loadingDot.setPercent(0); } else {//头部完全显示 loadingDot.setPercent(1); } headRecyclerView.setTranslationY(0); loadingDot.setTranslationY(0); if (getScrollY() > -headHeight) { headView.setTranslationY(0); } else { //如果下拉超出头部高度,需要特殊处理,1/2速度上移,见computeScroll() } } private void pullHeadView(float dy) { int scrollY = getScrollY(); int absScrollY = Math.abs(scrollY); scrollBy(0, (int) (-dy / dampingFactor)); if (dy < 0) {//向上滑动 // resetHeadView(0); loadingDot.setVisibility(View.GONE); if (scrollY > 0) { dampingFactor = 2; } else { dampingFactor = 1; } return; } if (absScrollY <= headHeight) { dampingFactor = 1; if (scrollY < 0) { float percent = absScrollY / headHeight; loadingDot.setPercent(percent); loadingDot.setVisibility(View.VISIBLE); if (percent < 0.5) { headRecyclerView.setTranslationY(-loadingDot.getMeasuredHeight()); } else if (percent > 0.5) { float transY = headHeight * (percent - 0.5f); headRecyclerView.setTranslationY(transY - loadingDot.getMeasuredHeight()); loadingDot.setTranslationY(transY); } } } else { dampingFactor = 2;//阻尼增大 loadingDot.setVisibility(View.GONE); float transY = (absScrollY - headHeight) / 2; headView.setTranslationY(-transY); } } private void smoothScrollTo(int y) { int scrollY = getScrollY(); int dy = y - scrollY; scroller.startScroll(0, scrollY, 0, dy, 500); invalidate(); } @Override public void computeScroll() { super.computeScroll(); if (scroller.computeScrollOffset()) { int scrollY = scroller.getCurrY(); scrollTo(scroller.getCurrX(), scrollY); if (scrollY < -headHeight) {//下拉超出头部高度时,以1/2速度上移 headView.setTranslationY((scrollY + headHeight) / 2); } postInvalidate(); } } @Override protected void onDetachedFromWindow() { super.onDetachedFromWindow(); } }
package com.alipay.api.domain; import com.alipay.api.AlipayObject; import com.alipay.api.internal.mapping.ApiField; /** * 修改查询口碑零售供应商和商户的仓库信息 * * @author <NAME> * @since 1.0, 2018-09-17 09:53:53 */ public class KoubeiRetailWmsWarehouseModifyModel extends AlipayObject { private static final long serialVersionUID = 1556564512818548869L; /** * 扩展信息 ADMIN_PHONE:管理员电话 CITY_MANAGER_PHONE:城市经理电话 PURCHASE_MANAGER_PHONE:采购经理电话 */ @ApiField("ext_info") private String extInfo; /** * 是否开启安全库存开关 ON打开 OFF关闭 */ @ApiField("safety_inventory_switch") private String safetyInventorySwitch; /** * 仓库编码 */ @ApiField("warehouse_code") private String warehouseCode; public String getExtInfo() { return this.extInfo; } public void setExtInfo(String extInfo) { this.extInfo = extInfo; } public String getSafetyInventorySwitch() { return this.safetyInventorySwitch; } public void setSafetyInventorySwitch(String safetyInventorySwitch) { this.safetyInventorySwitch = safetyInventorySwitch; } public String getWarehouseCode() { return this.warehouseCode; } public void setWarehouseCode(String warehouseCode) { this.warehouseCode = warehouseCode; } }
from litex.soc.cores.cpu.neorv32.core import NEORV32
The total apprehensions of migrants along the U.S/Mexico border increased 33 percent over the previous year’s numbers, a new report from U.S. Customs and Border Protection revealed. While the apprehension numbers reported in January decreased slightly from December 2016, the comparison to the prior year’s numbers show Border Patrol agents are still dealing with massive numbers of illegal border crossers. Focusing on Unaccompanied Alien Children (UAC) and Family Unit Aliens (FMUA), the apprehensions over the prior year are even higher. In January 2017, 4,421 UAC’s were apprehended along the southern border with Mexico. This compares to the prior year’s number of UAC apprehensions of 3,113–a 30 percent increase. FMUA apprehensions in that same monthly comparison increased by nearly 300 percent. In January 2016, 3,145 FMUAs were apprehended after crossing the border between the ports of entry. This compares to 4,421 in January 2017–a 29 percent increase. Total apprehensions in January 2017 were 31,575, up from 23,767 the year before. While many outlets are reporting the January numbers are down, that is only true in a month-to-month comparison. CBP numbers for January in years past have traditionally been lower than the prior month’s numbers. During the first four months of the new fiscal year, 168,222 total immigrant apprehensions occurred along the southwest border from Texas to California. That number is up by 25 percent compared to the first four months of FY 2016 – 126,334. “We’re not a deterrent because they’re looking for us, so we can be standing there and [the smuggler will] still send them across,” Supervisory Border Patrol Agent Marlene Castro told Fox News Channel’s William La Jeunesse during a December interview. “It’s been a group, and then maybe five minutes later another group, and then half an hour later you’ll see another one.” CBP officials took advantage of the January lull, compared to the previous month, mothballing two temporary detention facilities that were opened at the end of 2016 to deal with the massive increases in illegal border crossers, Breitbart Texas reported. The Tornillo station, located about 40 miles east of El Paso, will be placed in a “warm standby” status in the event it is needed to return to an operational status in the future. The stated basic maintenance, oversight, and 24-hour security will continue at the facility to make sure it is ready if the migrant crossings surge again. It is anticipated CBP could bring this facility, and the recently closed Donna facility, back into fully operational status within a 24-hour period if needed. Bob Price serves as associate editor and senior political news contributor for Breitbart Texas. He is a founding member of the Breitbart Texas team. Follow him on Twitter @BobPriceBBTX. This article has been updated to reflect adjustments to apprehension rates of increase, overall figures in particular. The original story noted that total apprehensions were occurring at a rate of 132 percent.
<filename>app/src/main/java/com/google/firebase/udacity/friendlychat/messages/ActivityMessages.java package com.google.firebase.udacity.friendlychat.messages; import android.content.Context; import android.content.Intent; import android.support.annotation.NonNull; import android.support.v4.app.FragmentTransaction; import android.support.v7.app.AppCompatActivity; import android.os.Bundle; import android.view.Menu; import android.view.MenuInflater; import android.view.MenuItem; import android.widget.Toast; import com.firebase.ui.auth.AuthUI; import com.google.firebase.auth.FirebaseAuth; import com.google.firebase.auth.FirebaseUser; import com.google.firebase.udacity.friendlychat.R; import com.google.firebase.udacity.friendlychat.common.base.ActivityBase; import java.util.Arrays; public class ActivityMessages extends ActivityBase implements FragmentMessages.OnFragmentMessagesInteractionListener { private static final int RC_SIGN_IN = 1000; private FirebaseAuth firebaseAuth; private String username; private static final String ANONYMOUS = "anonymous"; public static void startActivity(Context context) { Intent i = new Intent(context, ActivityMessages.class); context.startActivity(i); } @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_messages); username = ANONYMOUS; firebaseAuth = FirebaseAuth.getInstance(); // if (savedInstanceState == null) { // loadFragment(); // } setListeners(); } @Override protected void initializeViews() { } @Override protected void setListeners() { firebaseAuth.addAuthStateListener(authStateListener); } @Override protected void loadFragment() { FragmentTransaction fragmentTransaction = getSupportFragmentManager().beginTransaction(); fragmentTransaction.add(R.id.frmContainer, FragmentMessages.newInstance(username)).commitAllowingStateLoss(); } @Override public void onAuthenticationListener() { } @Override public boolean onCreateOptionsMenu(Menu menu) { MenuInflater inflater = getMenuInflater(); inflater.inflate(R.menu.main_menu, menu); return true; } @Override public boolean onOptionsItemSelected(MenuItem item) { switch (item.getItemId()) { case R.id.sign_out_menu: AuthUI.getInstance().signOut(this); return true; default: return super.onOptionsItemSelected(item); } } @Override public void onActivityResult(int requestCode, int resultCode, Intent data) { super.onActivityResult(requestCode, resultCode, data); if (requestCode == RC_SIGN_IN) { if (resultCode == RESULT_OK) { Toast.makeText(this, "Signed in!", Toast.LENGTH_SHORT).show(); loadFragment(); } else if (resultCode == RESULT_CANCELED) { // Sign in was canceled by the user, finish the activity Toast.makeText(this, "Sign in canceled", Toast.LENGTH_SHORT).show(); finish(); } } } private FirebaseAuth.AuthStateListener authStateListener = new FirebaseAuth.AuthStateListener() { @Override public void onAuthStateChanged(@NonNull FirebaseAuth firebaseAuth) { FirebaseUser user = firebaseAuth.getCurrentUser(); if (user != null) { username = user.getDisplayName(); loadFragment(); } else { startActivityForResult(AuthUI.getInstance() .createSignInIntentBuilder() .setIsSmartLockEnabled(false) .setProviders( Arrays.asList(new AuthUI.IdpConfig.Builder(AuthUI.EMAIL_PROVIDER).build(), new AuthUI.IdpConfig.Builder(AuthUI.GOOGLE_PROVIDER).build())).build(), RC_SIGN_IN); } } }; }
<filename>warn.go<gh_stars>100-1000 package main import ( "fmt" "log" "sync" ) type warningCollector struct { mu sync.Mutex warnings []string } func (w *warningCollector) warn(s string) { w.mu.Lock() w.warnings = append(w.warnings, s) w.mu.Unlock() log.Printf("WARNING: %s", s) } func (w *warningCollector) Warnf(format string, a ...interface{}) { w.warn(fmt.Sprintf(format, a...)) } func (w *warningCollector) Warn(a ...interface{}) { w.warn(fmt.Sprint(a...)) } func (w *warningCollector) Warns() []string { var l []string w.mu.Lock() l = append(l, w.warnings...) w.mu.Unlock() return l } // WarningCollector is the default warning collector var WarningCollector = &warningCollector{} // Warnf logs a warning func Warnf(format string, a ...interface{}) { WarningCollector.Warnf(format, a...) } // Warns returns the logged warnings func Warns() []string { return WarningCollector.Warns() }
Decidability of finding the fastest Turing machine to compute a function with a finite domain Input Output 1 1 2 4 3 9 4 16 Let's say we have some (total) function \(f\), defined on some finite domain \(A\): $$ f : A \mapsto B $$ For example, f might be a square function defined on \( \{1, 2, 3, 4\} \):The question now arises, is it possible to define an effective procedure that will determine the fastest Turing machine that computes the function \(f\). Without any limitations on the Turing machine, we can simply construct a Turing machine that computes f in one step - you basically encode f into the transition function of the Turing machine. Theorem: There is an effective procedure for finding the fastest Turing machine that computes a function f with finite domain. The fastest Turing machine takes just 1 step. Proof: To do this we construct a machine \(T_1\) that has a different tape symbol for each input value to the function - e.g. element in the domain \( A \). The transition function of \(T_1\) then simply maps from the read element \(a\) to the output element \( f(a) \). The machine \(T_1\) writes \( f(a) \) to the tape, overwriting \( a \), then halts. Q.E.D. I don't find this a very interesting proof, since it doesn't really follow the ethos of Turing machines - which it violates by throwing more table symbols at the problem until it can do it in one step. A more interesting problem is the case where the tape alphabet \( \Gamma \) is fixed - e.g. to the binary case, where it consists of just the set \( \{0, 1, b\} \) - zero, one, and blank. This corresponds a lot better to 'real' computers. So let's fix the tape alphabet and ask the question again: Given a fixed tape alphabet \( \Gamma \), is there an effective procedure for finding the fastest Turing machine that computes the given function \(f\), where \(f\) has a finite domain. Let's also say that we have some 'reference' Turing machine \(T_{ref}\) that computes this function. Such a reference Turing machine can always be constructed by simply handling each possible input value as a special case and returning the corresponding output value. In normal programming pseudocode it could look something like if input == 1 return 1 else if input == 2 return 4 else if input == 3 return 9 else if input == 4 return 16 First we need to define what it means to be fastest. I'll define the speed of a Turing machine that computes such a function (finite domain, total) to be the greatest number of steps that the machine takes over all of its inputs - so it's a worst case measure. Given that definition of speed, the fastest turing machine that computes the function f will be the machine that takes the least number of steps for its worst case input - e.g. the input that it takes the largest number of steps on. Theorem: Given a fixed tape alphabet \( \Gamma \), There is an effective procedure for finding the fastest Turing machine that computes the given function \(f\), where \(f\) has a finite domain. (e.g. the problem is decidable) Proof: To prove this, we will first show that there is a finite number of effectively-different Turing machines that need to be considered. Then the fastest can be found by testing each Turing machine in turn, and selecting the fastest. Suppose the worst case number of steps for the reference Turing machine \( T_{ref} \) is \(N_{ref}\). Lemma: The maximum number of states that may be visited after N steps is $$ |\Gamma| ^ N $$ where \( |\Gamma| \) is the number of different tape symbols. Proof: After zero steps the machine is in the initial state. From the initial state, it may transition to a different state based on each possible tape symbol, so after 1 step it may be in \( |\Gamma| \) states. Likewise, after two steps it may be in one of \( |\Gamma|^2 \) states. So after N steps it may be in one of up to \( |\Gamma|^N \) states. Q.E.D. Consider any Turing machine faster than \( T_{ref} \). Since its worst case number of steps will be less than \(N_{ref}\), when run on any input in A, the number of possible states it may be in is less than or equal to \( |\Gamma|^ {N_{ref}} \). Therefore we need only consider Turing machines with number of states less than or equal to \( M = |\Gamma|^ {N_{ref}} \). Any machine with more states than \(M\) will be effectively the same as one of the machines with number of states <= \(M\), since the extra states can only be reached after more than \(N_{ref}\) steps. The procedure to find the fastest Turing machine \(T_{f}\), then is as follows: Consider each Turing machine \(T_{i}\) with \(M\) states, and a given transition function using the \(M\) states. The number of such Turing machines is finite, and bounded by a exponential function of \(M\) and \(|\Gamma|\). For each element \(a\) in the domain \(A\), run \(T_{i}\) with input \(a\), for up to \(N_{ref}\) steps. If it halts with incorrect output, reject the machine. If it does not halt after \(N_{ref}\) steps, we can reject the machine, as it will not be faster than \(T_{ref}\), and therefore cannot be the fastest Turing machine. If it computes \(f(a)\) correctly for all elements in \(A\), and it has smallest worst-case running time of all Turing machines considered so far, remember it. Once all potential Turing machines have been considered, the one with the lowest worst-case running time that computes \(f\) correctly will be the fastest Turing machine that computes \(f\) we are looking for. Q.E.D. Remarks There are a couple of interesting things about this proof. First, we avoid the issue you usually get when enumerating over Turing machines run on some input - that the machine may not halt. We sidestep this problem as we have an upper bound on the number of steps to run the machine for. Secondly, a lot of functions we deal with in computer programs are defined on finite domains, especially functions that take as input finite precision integers or floating point numbers. For example, the number of single precision floating point values is of course finite. So in theory, we can write a program to compute, for example, the fastest sqrt(float) function. There's a humorous theorem called the 'Full employment theorem', which says that writing a compiler that outputs the smallest program for some function is impossible, as the problem is uncomputable. However, this article has shown that finding the fastest program is computable, at least for a finite domain. A related question is if finding the fastest program for infinitely sized domains, e.g. the integers, is computable. I'm not sure about this one, let me know if you have a proof either way :) Edit, 7th May 2014: Reddit user wildeye pointed me at Manual Blum's work: A Machine-Independent Theory of the Complexity of Recursive Functions (PDF) which has some things to say about bounds on more general functions.
antenna = [] antenna.append(int(input())) antenna.append(int(input())) antenna.append(int(input())) antenna.append(int(input())) antenna.append(int(input())) k = int(input()) flg = 'Yay!' for i in range(0, len(antenna)): for j in range(i+1, len(antenna)): if (antenna[j]-antenna[i])>k: flg = ':(' break print(flg)
<reponame>astrophysicist87/PHripser<gh_stars>0 /* Ripser: a lean C++ code for computation of Vietoris-Rips persistence barcodes MIT License Copyright (c) 2015–2021 <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. You are under no obligation whatsoever to provide any bug fixes, patches, or upgrades to the features, functionality or performance of the source code ("Enhancements") to anyone; however, if you choose to make your Enhancements available either publicly, or directly to the author of this software, without imposing a separate written license agreement for such Enhancements, then you hereby grant the following license: a non-exclusive, royalty-free perpetual license to install, use, modify, prepare derivative works, incorporate into other computer software, distribute, and sublicense such enhancements or derivative works thereof, in binary and source code form. */ //#define USE_COEFFICIENTS //#define INDICATE_PROGRESS #define PRINT_PERSISTENCE_PAIRS //#define USE_ROBINHOOD_HASHMAP #include <algorithm> #include <cassert> #include <chrono> #include <cmath> #include <fstream> #include <iostream> #include <numeric> #include <queue> #include <sstream> #include <unordered_map> #ifdef USE_ROBINHOOD_HASHMAP #include "robin-hood-hashing/src/include/robin_hood.h" template <class Key, class T, class H, class E> using hash_map = robin_hood::unordered_map<Key, T, H, E>; template <class Key> using hash = robin_hood::hash<Key>; #else template <class Key, class T, class H, class E> using hash_map = std::unordered_map<Key, T, H, E>; template <class Key> using hash = std::hash<Key>; #endif typedef float value_t; typedef int64_t index_t; typedef uint16_t coefficient_t; #ifdef INDICATE_PROGRESS static const std::chrono::milliseconds time_step(40); #endif static const std::string clear_line("\r\033[K"); static const size_t num_coefficient_bits = 8; static const index_t max_simplex_index = (1l << (8 * sizeof(index_t) - 1 - num_coefficient_bits)) - 1; void check_overflow(index_t i) { if #ifdef USE_COEFFICIENTS (i > max_simplex_index) #else (i < 0) #endif throw std::overflow_error("simplex index " + std::to_string((uint64_t)i) + " in filtration is larger than maximum index " + std::to_string(max_simplex_index)); } class binomial_coeff_table { std::vector<std::vector<index_t>> B; public: binomial_coeff_table(index_t n, index_t k) : B(n + 1) { for (index_t i = 0; i <= n; ++i) { B[i].resize(k + 1, 0); B[i][0] = 1; for (index_t j = 1; j < std::min(i, k + 1); ++j) B[i][j] = B[i - 1][j - 1] + B[i - 1][j]; if (i <= k) B[i][i] = 1; check_overflow(B[i][std::min(i >> 1, k)]); } } index_t operator()(index_t n, index_t k) const { assert(n < B.size() && k < B[n].size() && n >= k - 1); return B[n][k]; } }; bool is_prime(const coefficient_t n) { if (!(n & 1) || n < 2) return n == 2; for (coefficient_t p = 3; p <= n / p; p += 2) if (!(n % p)) return false; return true; } std::vector<coefficient_t> multiplicative_inverse_vector(const coefficient_t m) { std::vector<coefficient_t> inverse(m); inverse[1] = 1; // m = a * (m / a) + m % a // Multipying with inverse(a) * inverse(m % a): // 0 = inverse(m % a) * (m / a) + inverse(a) (mod m) for (coefficient_t a = 2; a < m; ++a) inverse[a] = m - (inverse[m % a] * (m / a)) % m; return inverse; } #ifdef USE_COEFFICIENTS struct __attribute__((packed)) entry_t { index_t index : 8 * sizeof(index_t) - num_coefficient_bits; coefficient_t coefficient : num_coefficient_bits; entry_t(index_t _index, coefficient_t _coefficient) : index(_index), coefficient(_coefficient) {} entry_t(index_t _index) : index(_index), coefficient(0) {} entry_t() : index(0), coefficient(0) {} }; static_assert(sizeof(entry_t) == sizeof(index_t), "size of entry_t is not the same as index_t"); entry_t make_entry(index_t i, coefficient_t c) { return entry_t(i, c); } index_t get_index(const entry_t& e) { return e.index; } index_t get_coefficient(const entry_t& e) { return e.coefficient; } void set_coefficient(entry_t& e, const coefficient_t c) { e.coefficient = c; } std::ostream& operator<<(std::ostream& stream, const entry_t& e) { stream << get_index(e) << ":" << get_coefficient(e); return stream; } #else typedef index_t entry_t; const index_t get_index(const entry_t& i) { return i; } index_t get_coefficient(const entry_t& i) { return 1; } entry_t make_entry(index_t _index, coefficient_t _value) { return entry_t(_index); } void set_coefficient(entry_t& e, const coefficient_t c) {} #endif const entry_t& get_entry(const entry_t& e) { return e; } typedef std::pair<value_t, index_t> diameter_index_t; value_t get_diameter(const diameter_index_t& i) { return i.first; } index_t get_index(const diameter_index_t& i) { return i.second; } typedef std::pair<index_t, value_t> index_diameter_t; index_t get_index(const index_diameter_t& i) { return i.first; } value_t get_diameter(const index_diameter_t& i) { return i.second; } struct diameter_entry_t : std::pair<value_t, entry_t> { using std::pair<value_t, entry_t>::pair; diameter_entry_t(value_t _diameter, index_t _index, coefficient_t _coefficient) : diameter_entry_t(_diameter, make_entry(_index, _coefficient)) {} diameter_entry_t(const diameter_index_t& _diameter_index, coefficient_t _coefficient) : diameter_entry_t(get_diameter(_diameter_index), make_entry(get_index(_diameter_index), _coefficient)) {} diameter_entry_t(const diameter_index_t& _diameter_index) : diameter_entry_t(get_diameter(_diameter_index), make_entry(get_index(_diameter_index), 0)) {} diameter_entry_t(const index_t& _index) : diameter_entry_t(0, _index, 0) {} }; const entry_t& get_entry(const diameter_entry_t& p) { return p.second; } entry_t& get_entry(diameter_entry_t& p) { return p.second; } const index_t get_index(const diameter_entry_t& p) { return get_index(get_entry(p)); } const coefficient_t get_coefficient(const diameter_entry_t& p) { return get_coefficient(get_entry(p)); } const value_t& get_diameter(const diameter_entry_t& p) { return p.first; } void set_coefficient(diameter_entry_t& p, const coefficient_t c) { set_coefficient(get_entry(p), c); } template <typename Entry> struct greater_diameter_or_smaller_index_comp { bool operator()(const Entry& a, const Entry& b) { return greater_diameter_or_smaller_index(a, b); } }; template <typename Entry> bool greater_diameter_or_smaller_index(const Entry& a, const Entry& b) { return (get_diameter(a) > get_diameter(b)) || ((get_diameter(a) == get_diameter(b)) && (get_index(a) < get_index(b))); } enum compressed_matrix_layout { LOWER_TRIANGULAR, UPPER_TRIANGULAR }; template <compressed_matrix_layout Layout> struct compressed_distance_matrix { std::vector<value_t> distances; std::vector<value_t*> rows; compressed_distance_matrix(std::vector<value_t>&& _distances) : distances(std::move(_distances)), rows((1 + std::sqrt(1 + 8 * distances.size())) / 2) { assert(distances.size() == size() * (size() - 1) / 2); init_rows(); } template <typename DistanceMatrix> compressed_distance_matrix(const DistanceMatrix& mat) : distances(mat.size() * (mat.size() - 1) / 2), rows(mat.size()) { init_rows(); for (size_t i = 1; i < size(); ++i) for (size_t j = 0; j < i; ++j) rows[i][j] = mat(i, j); } value_t operator()(const index_t i, const index_t j) const; size_t size() const { return rows.size(); } void init_rows(); }; typedef compressed_distance_matrix<LOWER_TRIANGULAR> compressed_lower_distance_matrix; typedef compressed_distance_matrix<UPPER_TRIANGULAR> compressed_upper_distance_matrix; template <> void compressed_lower_distance_matrix::init_rows() { value_t* pointer = &distances[0]; for (size_t i = 1; i < size(); ++i) { rows[i] = pointer; pointer += i; } } template <> void compressed_upper_distance_matrix::init_rows() { value_t* pointer = &distances[0] - 1; for (size_t i = 0; i < size() - 1; ++i) { rows[i] = pointer; pointer += size() - i - 2; } } template <> value_t compressed_lower_distance_matrix::operator()(const index_t i, const index_t j) const { return i == j ? 0 : i < j ? rows[j][i] : rows[i][j]; } template <> value_t compressed_upper_distance_matrix::operator()(const index_t i, const index_t j) const { return i == j ? 0 : i > j ? rows[j][i] : rows[i][j]; } struct sparse_distance_matrix { std::vector<std::vector<index_diameter_t>> neighbors; index_t num_edges; sparse_distance_matrix(std::vector<std::vector<index_diameter_t>>&& _neighbors, index_t _num_edges) : neighbors(std::move(_neighbors)), num_edges(_num_edges) {} template <typename DistanceMatrix> sparse_distance_matrix(const DistanceMatrix& mat, const value_t threshold) : neighbors(mat.size()), num_edges(0) { for (size_t i = 0; i < size(); ++i) for (size_t j = 0; j < size(); ++j) if (i != j && mat(i, j) <= threshold) { ++num_edges; neighbors[i].push_back({j, mat(i, j)}); } } value_t operator()(const index_t i, const index_t j) const { auto neighbor = std::lower_bound(neighbors[i].begin(), neighbors[i].end(), index_diameter_t{j, 0}); return (neighbor != neighbors[i].end() && get_index(*neighbor) == j) ? get_diameter(*neighbor) : std::numeric_limits<value_t>::infinity(); } size_t size() const { return neighbors.size(); } }; struct euclidean_distance_matrix { std::vector<std::vector<value_t>> points; euclidean_distance_matrix(std::vector<std::vector<value_t>>&& _points) : points(std::move(_points)) { for (auto p : points) { assert(p.size() == points.front().size()); } } value_t operator()(const index_t i, const index_t j) const { assert(i < points.size()); assert(j < points.size()); return std::sqrt(std::inner_product( points[i].begin(), points[i].end(), points[j].begin(), value_t(), std::plus<value_t>(), [](value_t u, value_t v) { return (u - v) * (u - v); })); } size_t size() const { return points.size(); } }; class union_find { std::vector<index_t> parent; std::vector<uint8_t> rank; public: union_find(const index_t n) : parent(n), rank(n, 0) { for (index_t i = 0; i < n; ++i) parent[i] = i; } index_t find(index_t x) { index_t y = x, z; while ((z = parent[y]) != y) y = z; while ((z = parent[x]) != y) { parent[x] = y; x = z; } return z; } void link(index_t x, index_t y) { if ((x = find(x)) == (y = find(y))) return; if (rank[x] > rank[y]) parent[y] = x; else { parent[x] = y; if (rank[x] == rank[y]) ++rank[y]; } } }; template <typename T> T begin(std::pair<T, T>& p) { return p.first; } template <typename T> T end(std::pair<T, T>& p) { return p.second; } template <typename ValueType> class compressed_sparse_matrix { std::vector<size_t> bounds; std::vector<ValueType> entries; typedef typename std::vector<ValueType>::iterator iterator; typedef std::pair<iterator, iterator> iterator_pair; public: size_t size() const { return bounds.size(); } iterator_pair subrange(const index_t index) { return {entries.begin() + (index == 0 ? 0 : bounds[index - 1]), entries.begin() + bounds[index]}; } void append_column() { bounds.push_back(entries.size()); } void push_back(const ValueType e) { assert(0 < size()); entries.push_back(e); ++bounds.back(); } }; template <class Predicate> index_t get_max(index_t top, const index_t bottom, const Predicate pred) { if (!pred(top)) { index_t count = top - bottom; while (count > 0) { index_t step = count >> 1, mid = top - step; if (!pred(mid)) { top = mid - 1; count -= step + 1; } else count = step; } } return top; } template <typename DistanceMatrix> class ripser { const DistanceMatrix dist; const index_t n, dim_max; const value_t threshold; const float ratio; const coefficient_t modulus; const binomial_coeff_table binomial_coeff; const std::vector<coefficient_t> multiplicative_inverse; mutable std::vector<diameter_entry_t> cofacet_entries; mutable std::vector<index_t> vertices; struct entry_hash { std::size_t operator()(const entry_t& e) const { return hash<index_t>()(::get_index(e)); } }; struct equal_index { bool operator()(const entry_t& e, const entry_t& f) const { return ::get_index(e) == ::get_index(f); } }; typedef hash_map<entry_t, size_t, entry_hash, equal_index> entry_hash_map; public: ripser(DistanceMatrix&& _dist, index_t _dim_max, value_t _threshold, float _ratio, coefficient_t _modulus) : dist(std::move(_dist)), n(dist.size()), dim_max(std::min(_dim_max, index_t(dist.size() - 2))), threshold(_threshold), ratio(_ratio), modulus(_modulus), binomial_coeff(n, dim_max + 2), multiplicative_inverse(multiplicative_inverse_vector(_modulus)) {} index_t get_max_vertex(const index_t idx, const index_t k, const index_t n) const { return get_max(n, k - 1, [&](index_t w) -> bool { return (binomial_coeff(w, k) <= idx); }); } index_t get_edge_index(const index_t i, const index_t j) const { return binomial_coeff(i, 2) + j; } template <typename OutputIterator> OutputIterator get_simplex_vertices(index_t idx, const index_t dim, index_t n, OutputIterator out) const { --n; for (index_t k = dim + 1; k > 0; --k) { n = get_max_vertex(idx, k, n); *out++ = n; idx -= binomial_coeff(n, k); } return out; } value_t compute_diameter(const index_t index, const index_t dim) const { value_t diam = -std::numeric_limits<value_t>::infinity(); vertices.resize(dim + 1); get_simplex_vertices(index, dim, dist.size(), vertices.rbegin()); for (index_t i = 0; i <= dim; ++i) for (index_t j = 0; j < i; ++j) { diam = std::max(diam, dist(vertices[i], vertices[j])); } return diam; } class simplex_coboundary_enumerator; class simplex_boundary_enumerator { private: index_t idx_below, idx_above, j, k; diameter_entry_t simplex; index_t dim; const coefficient_t modulus; const binomial_coeff_table& binomial_coeff; const ripser& parent; public: simplex_boundary_enumerator(const diameter_entry_t _simplex, const index_t _dim, const ripser& _parent) : idx_below(get_index(_simplex)), idx_above(0), j(_parent.n - 1), k(_dim), simplex(_simplex), modulus(_parent.modulus), binomial_coeff(_parent.binomial_coeff), parent(_parent) {} simplex_boundary_enumerator(const index_t _dim, const ripser& _parent) : simplex_boundary_enumerator(-1, _dim, _parent) {} void set_simplex(const diameter_entry_t _simplex, const index_t _dim) { idx_below = get_index(_simplex); idx_above = 0; j = parent.n - 1; k = _dim; simplex = _simplex; dim = _dim; } bool has_next() { return (k >= 0); } diameter_entry_t next() { j = parent.get_max_vertex(idx_below, k + 1, j); index_t face_index = idx_above - binomial_coeff(j, k + 1) + idx_below; value_t face_diameter = parent.compute_diameter(face_index, dim - 1); coefficient_t face_coefficient = (k & 1 ? -1 + modulus : 1) * get_coefficient(simplex) % modulus; idx_below -= binomial_coeff(j, k + 1); idx_above += binomial_coeff(j, k); --k; return diameter_entry_t(face_diameter, face_index, face_coefficient); } }; diameter_entry_t get_zero_pivot_facet(const diameter_entry_t simplex, const index_t dim) { static simplex_boundary_enumerator facets(0, *this); facets.set_simplex(simplex, dim); while (facets.has_next()) { diameter_entry_t facet = facets.next(); if (get_diameter(facet) == get_diameter(simplex)) return facet; } return diameter_entry_t(-1); } diameter_entry_t get_zero_pivot_cofacet(const diameter_entry_t simplex, const index_t dim) { static simplex_coboundary_enumerator cofacets(*this); cofacets.set_simplex(simplex, dim); while (cofacets.has_next()) { diameter_entry_t cofacet = cofacets.next(); if (get_diameter(cofacet) == get_diameter(simplex)) return cofacet; } return diameter_entry_t(-1); } diameter_entry_t get_zero_apparent_facet(const diameter_entry_t simplex, const index_t dim) { diameter_entry_t facet = get_zero_pivot_facet(simplex, dim); return ((get_index(facet) != -1) && (get_index(get_zero_pivot_cofacet(facet, dim - 1)) == get_index(simplex))) ? facet : diameter_entry_t(-1); } diameter_entry_t get_zero_apparent_cofacet(const diameter_entry_t simplex, const index_t dim) { diameter_entry_t cofacet = get_zero_pivot_cofacet(simplex, dim); return ((get_index(cofacet) != -1) && (get_index(get_zero_pivot_facet(cofacet, dim + 1)) == get_index(simplex))) ? cofacet : diameter_entry_t(-1); } bool is_in_zero_apparent_pair(const diameter_entry_t simplex, const index_t dim) { return (get_index(get_zero_apparent_cofacet(simplex, dim)) != -1) || (get_index(get_zero_apparent_facet(simplex, dim)) != -1); } void assemble_columns_to_reduce(std::vector<diameter_index_t>& simplices, std::vector<diameter_index_t>& columns_to_reduce, entry_hash_map& pivot_column_index, index_t dim) { #ifdef INDICATE_PROGRESS std::cerr << clear_line << "assembling columns" << std::flush; std::chrono::steady_clock::time_point next = std::chrono::steady_clock::now() + time_step; #endif columns_to_reduce.clear(); std::vector<diameter_index_t> next_simplices; simplex_coboundary_enumerator cofacets(*this); for (diameter_index_t& simplex : simplices) { cofacets.set_simplex(diameter_entry_t(simplex, 1), dim - 1); while (cofacets.has_next(false)) { #ifdef INDICATE_PROGRESS if (std::chrono::steady_clock::now() > next) { std::cerr << clear_line << "assembling " << next_simplices.size() << " columns (processing " << std::distance(&simplices[0], &simplex) << "/" << simplices.size() << " simplices)" << std::flush; next = std::chrono::steady_clock::now() + time_step; } #endif auto cofacet = cofacets.next(); if (get_diameter(cofacet) <= threshold) { next_simplices.push_back({get_diameter(cofacet), get_index(cofacet)}); if (!is_in_zero_apparent_pair(cofacet, dim) && (pivot_column_index.find(get_entry(cofacet)) == pivot_column_index.end())) columns_to_reduce.push_back({get_diameter(cofacet), get_index(cofacet)}); } } } simplices.swap(next_simplices); #ifdef INDICATE_PROGRESS std::cerr << clear_line << "sorting " << columns_to_reduce.size() << " columns" << std::flush; #endif std::sort(columns_to_reduce.begin(), columns_to_reduce.end(), greater_diameter_or_smaller_index<diameter_index_t>); #ifdef INDICATE_PROGRESS std::cerr << clear_line << std::flush; #endif } index_t find_cluster( std::vector<std::vector<index_t> > & cluster_vector, index_t i ) { for ( index_t j = 0; j < (index_t)cluster_vector.size(); j++ ) if ( std::count(cluster_vector[j].begin(), cluster_vector[j].end(), i) ) return j; std::cerr << "Error: failed to find cluster " << i << " in cluster vector" << " of size " << cluster_vector.size() << std::endl; return (-1); } void merge_clusters( std::vector<std::vector<index_t> > & cluster_vector, index_t i, index_t j ) { cluster_vector[i].insert( cluster_vector[i].end(), cluster_vector[j].begin(), cluster_vector[j].end() ); cluster_vector.erase( cluster_vector.begin()+j ); return; } void print_cluster_multiplicities( std::vector<std::vector<index_t> > & cluster_vector ) { std::cout << "Multiplicities:"; for (auto cluster : cluster_vector) std::cout << " " << cluster.size(); std::cout << std::endl; return; } double get_cluster_entropy( std::vector<std::vector<index_t> > & cluster_vector, index_t n ) { double total = 0.0, den = 0.0; for (index_t i = 1; i <= n; i++) den += log( (double)i ); for (auto cluster : cluster_vector) { double num = 0.0; for (index_t i = 1; i <= (index_t)cluster.size(); i++) num += log( (double)i ); total += num / den; } return (total); } void compute_dim_0_pairs(std::vector<diameter_index_t>& edges, std::vector<diameter_index_t>& columns_to_reduce) { #ifdef PRINT_PERSISTENCE_PAIRS std::cout << "persistence intervals in dim 0:" << std::endl; #endif union_find dset(n); edges = get_edges(); std::sort(edges.rbegin(), edges.rend(), greater_diameter_or_smaller_index<diameter_index_t>); std::vector<index_t> vertices_of_edge(2); // cluster_vector initially contains a list of n clusters, each with size 1 // each cluster contains a list of the point IDs it contains std::vector<std::vector<index_t> > cluster_vector( n, std::vector<index_t>(1) ); for (index_t i = 0; i < n; ++i) cluster_vector[i][0] = i; // assign 1 point ID to each cluster print_cluster_multiplicities( cluster_vector ); std::cout << "FORMAT: 0 0 0 " << get_cluster_entropy( cluster_vector, n ) << std::endl; for (auto e : edges) { get_simplex_vertices(get_index(e), 1, n, vertices_of_edge.rbegin()); index_t u = dset.find(vertices_of_edge[0]), v = dset.find(vertices_of_edge[1]); if (u != v) { #ifdef PRINT_PERSISTENCE_PAIRS if (get_diameter(e) != 0) { index_t uCluster = find_cluster( cluster_vector, u ); index_t vCluster = find_cluster( cluster_vector, v ); //index_t uCluster = find_cluster( cluster_vector, vertices_of_edge[0] ); //index_t vCluster = find_cluster( cluster_vector, vertices_of_edge[1] ); merge_clusters( cluster_vector, uCluster, vCluster ); print_cluster_multiplicities( cluster_vector ); std::cout << "FORMAT: 0 0 " << get_diameter(e) << " " << get_cluster_entropy( cluster_vector, n ) << std::endl; std::cout << " [0," << get_diameter(e) << ")" << std::endl; } #endif dset.link(u, v); } else if (get_index(get_zero_apparent_cofacet(e, 1)) == -1) columns_to_reduce.push_back(e); } std::reverse(columns_to_reduce.begin(), columns_to_reduce.end()); #ifdef PRINT_PERSISTENCE_PAIRS for (index_t i = 0; i < n; ++i) if (dset.find(i) == i) std::cout << " [0, )" << std::endl; #endif } template <typename Column> diameter_entry_t pop_pivot(Column& column) { diameter_entry_t pivot(-1); #ifdef USE_COEFFICIENTS while (!column.empty()) { if (get_coefficient(pivot) == 0) pivot = column.top(); else if (get_index(column.top()) != get_index(pivot)) return pivot; else set_coefficient(pivot, (get_coefficient(pivot) + get_coefficient(column.top())) % modulus); column.pop(); } return (get_coefficient(pivot) == 0) ? -1 : pivot; #else while (!column.empty()) { pivot = column.top(); column.pop(); if (column.empty() || get_index(column.top()) != get_index(pivot)) return pivot; column.pop(); } return -1; #endif } template <typename Column> diameter_entry_t get_pivot(Column& column) { diameter_entry_t result = pop_pivot(column); if (get_index(result) != -1) column.push(result); return result; } template <typename Column> diameter_entry_t init_coboundary_and_get_pivot(const diameter_entry_t simplex, Column& working_coboundary, const index_t& dim, entry_hash_map& pivot_column_index) { static simplex_coboundary_enumerator cofacets(*this); bool check_for_emergent_pair = true; cofacet_entries.clear(); cofacets.set_simplex(simplex, dim); while (cofacets.has_next()) { diameter_entry_t cofacet = cofacets.next(); if (get_diameter(cofacet) <= threshold) { cofacet_entries.push_back(cofacet); if (check_for_emergent_pair && (get_diameter(simplex) == get_diameter(cofacet))) { if ((pivot_column_index.find(get_entry(cofacet)) == pivot_column_index.end()) && (get_index(get_zero_apparent_facet(cofacet, dim + 1)) == -1)) return cofacet; check_for_emergent_pair = false; } } } for (auto cofacet : cofacet_entries) working_coboundary.push(cofacet); return get_pivot(working_coboundary); } template <typename Column> void add_simplex_coboundary(const diameter_entry_t simplex, const index_t& dim, Column& working_reduction_column, Column& working_coboundary) { static simplex_coboundary_enumerator cofacets(*this); working_reduction_column.push(simplex); cofacets.set_simplex(simplex, dim); while (cofacets.has_next()) { diameter_entry_t cofacet = cofacets.next(); if (get_diameter(cofacet) <= threshold) working_coboundary.push(cofacet); } } template <typename Column> void add_coboundary(compressed_sparse_matrix<diameter_entry_t>& reduction_matrix, const std::vector<diameter_index_t>& columns_to_reduce, const size_t index_column_to_add, const coefficient_t factor, const size_t& dim, Column& working_reduction_column, Column& working_coboundary) { diameter_entry_t column_to_add(columns_to_reduce[index_column_to_add], factor); add_simplex_coboundary(column_to_add, dim, working_reduction_column, working_coboundary); for (diameter_entry_t simplex : reduction_matrix.subrange(index_column_to_add)) { set_coefficient(simplex, get_coefficient(simplex) * factor % modulus); add_simplex_coboundary(simplex, dim, working_reduction_column, working_coboundary); } } void compute_pairs(const std::vector<diameter_index_t>& columns_to_reduce, entry_hash_map& pivot_column_index, const index_t dim) { #ifdef PRINT_PERSISTENCE_PAIRS std::cout << "persistence intervals in dim " << dim << ":" << std::endl; #endif compressed_sparse_matrix<diameter_entry_t> reduction_matrix; #ifdef INDICATE_PROGRESS std::chrono::steady_clock::time_point next = std::chrono::steady_clock::now() + time_step; #endif for (size_t index_column_to_reduce = 0; index_column_to_reduce < columns_to_reduce.size(); ++index_column_to_reduce) { diameter_entry_t column_to_reduce(columns_to_reduce[index_column_to_reduce], 1); value_t diameter = get_diameter(column_to_reduce); reduction_matrix.append_column(); std::priority_queue<diameter_entry_t, std::vector<diameter_entry_t>, greater_diameter_or_smaller_index_comp<diameter_entry_t>> working_reduction_column, working_coboundary; diameter_entry_t e, pivot = init_coboundary_and_get_pivot( column_to_reduce, working_coboundary, dim, pivot_column_index); while (true) { #ifdef INDICATE_PROGRESS if (std::chrono::steady_clock::now() > next) { std::cerr << clear_line << "reducing column " << index_column_to_reduce + 1 << "/" << columns_to_reduce.size() << " (diameter " << diameter << ")" << std::flush; next = std::chrono::steady_clock::now() + time_step; } #endif if (get_index(pivot) != -1) { auto pair = pivot_column_index.find(get_entry(pivot)); if (pair != pivot_column_index.end()) { entry_t other_pivot = pair->first; index_t index_column_to_add = pair->second; coefficient_t factor = modulus - get_coefficient(pivot) * multiplicative_inverse[get_coefficient(other_pivot)] % modulus; add_coboundary(reduction_matrix, columns_to_reduce, index_column_to_add, factor, dim, working_reduction_column, working_coboundary); pivot = get_pivot(working_coboundary); } else if (get_index(e = get_zero_apparent_facet(pivot, dim + 1)) != -1) { set_coefficient(e, modulus - get_coefficient(e)); add_simplex_coboundary(e, dim, working_reduction_column, working_coboundary); pivot = get_pivot(working_coboundary); } else { #ifdef PRINT_PERSISTENCE_PAIRS value_t death = get_diameter(pivot); if (death > diameter * ratio) { #ifdef INDICATE_PROGRESS std::cerr << clear_line << std::flush; #endif std::cout << " [" << diameter << "," << death << ")" << std::endl; std::cout << "FORMAT: " << dim << " " << diameter << " " << death << std::endl; } #endif pivot_column_index.insert({get_entry(pivot), index_column_to_reduce}); while (true) { diameter_entry_t e = pop_pivot(working_reduction_column); if (get_index(e) == -1) break; assert(get_coefficient(e) > 0); reduction_matrix.push_back(e); } break; } } else { #ifdef PRINT_PERSISTENCE_PAIRS #ifdef INDICATE_PROGRESS std::cerr << clear_line << std::flush; #endif std::cout << " [" << diameter << ", )" << std::endl; std::cout << "FORMAT: " << dim << " " << diameter << std::endl; #endif break; } } } #ifdef INDICATE_PROGRESS std::cerr << clear_line << std::flush; #endif } std::vector<diameter_index_t> get_edges(); void compute_barcodes() { std::vector<diameter_index_t> simplices, columns_to_reduce; compute_dim_0_pairs(simplices, columns_to_reduce); for (index_t dim = 1; dim <= dim_max; ++dim) { entry_hash_map pivot_column_index; pivot_column_index.reserve(columns_to_reduce.size()); compute_pairs(columns_to_reduce, pivot_column_index, dim); if (dim < dim_max) assemble_columns_to_reduce(simplices, columns_to_reduce, pivot_column_index, dim + 1); } } }; template <> class ripser<compressed_lower_distance_matrix>::simplex_coboundary_enumerator { index_t idx_below, idx_above, j, k; std::vector<index_t> vertices; diameter_entry_t simplex; const coefficient_t modulus; const compressed_lower_distance_matrix& dist; const binomial_coeff_table& binomial_coeff; const ripser& parent; public: simplex_coboundary_enumerator(const diameter_entry_t _simplex, const index_t _dim, const ripser& _parent) : modulus(_parent.modulus), dist(_parent.dist), binomial_coeff(_parent.binomial_coeff), parent(_parent) { if (get_index(_simplex) != -1) parent.get_simplex_vertices(get_index(_simplex), _dim, parent.n, vertices.rbegin()); } simplex_coboundary_enumerator(const ripser& _parent) : modulus(_parent.modulus), dist(_parent.dist), binomial_coeff(_parent.binomial_coeff), parent(_parent) {} void set_simplex(const diameter_entry_t _simplex, const index_t _dim) { idx_below = get_index(_simplex); idx_above = 0; j = parent.n - 1; k = _dim + 1; simplex = _simplex; vertices.resize(_dim + 1); parent.get_simplex_vertices(get_index(_simplex), _dim, parent.n, vertices.rbegin()); } bool has_next(bool all_cofacets = true) { return (j >= k && (all_cofacets || binomial_coeff(j, k) > idx_below)); } diameter_entry_t next() { while ((binomial_coeff(j, k) <= idx_below)) { idx_below -= binomial_coeff(j, k); idx_above += binomial_coeff(j, k + 1); --j; --k; assert(k != -1); } value_t cofacet_diameter = get_diameter(simplex); for (index_t i : vertices) cofacet_diameter = std::max(cofacet_diameter, dist(j, i)); index_t cofacet_index = idx_above + binomial_coeff(j--, k + 1) + idx_below; coefficient_t cofacet_coefficient = (k & 1 ? modulus - 1 : 1) * get_coefficient(simplex) % modulus; return diameter_entry_t(cofacet_diameter, cofacet_index, cofacet_coefficient); } }; template <> class ripser<sparse_distance_matrix>::simplex_coboundary_enumerator { index_t idx_below, idx_above, k; std::vector<index_t> vertices; diameter_entry_t simplex; const coefficient_t modulus; const sparse_distance_matrix& dist; const binomial_coeff_table& binomial_coeff; std::vector<std::vector<index_diameter_t>::const_reverse_iterator> neighbor_it; std::vector<std::vector<index_diameter_t>::const_reverse_iterator> neighbor_end; index_diameter_t neighbor; const ripser& parent; public: simplex_coboundary_enumerator(const diameter_entry_t _simplex, const index_t _dim, const ripser& _parent) : modulus(_parent.modulus), dist(_parent.dist), binomial_coeff(_parent.binomial_coeff), parent(_parent) { if (get_index(_simplex) != -1) set_simplex(_simplex, _dim); } simplex_coboundary_enumerator(const ripser& _parent) : modulus(_parent.modulus), dist(_parent.dist), binomial_coeff(_parent.binomial_coeff), parent(_parent) {} void set_simplex(const diameter_entry_t _simplex, const index_t _dim) { idx_below = get_index(_simplex); idx_above = 0; k = _dim + 1; simplex = _simplex; vertices.resize(_dim + 1); parent.get_simplex_vertices(idx_below, _dim, parent.n, vertices.rbegin()); neighbor_it.resize(_dim + 1); neighbor_end.resize(_dim + 1); for (index_t i = 0; i <= _dim; ++i) { auto v = vertices[i]; neighbor_it[i] = dist.neighbors[v].rbegin(); neighbor_end[i] = dist.neighbors[v].rend(); } } bool has_next(bool all_cofacets = true) { for (auto &it0 = neighbor_it[0], &end0 = neighbor_end[0]; it0 != end0; ++it0) { neighbor = *it0; for (size_t idx = 1; idx < neighbor_it.size(); ++idx) { auto &it = neighbor_it[idx], end = neighbor_end[idx]; while (get_index(*it) > get_index(neighbor)) if (++it == end) return false; if (get_index(*it) != get_index(neighbor)) goto continue_outer; else neighbor = std::max(neighbor, *it); } while (k > 0 && vertices[k - 1] > get_index(neighbor)) { if (!all_cofacets) return false; idx_below -= binomial_coeff(vertices[k - 1], k); idx_above += binomial_coeff(vertices[k - 1], k + 1); --k; } return true; continue_outer:; } return false; } diameter_entry_t next() { ++neighbor_it[0]; value_t cofacet_diameter = std::max(get_diameter(simplex), get_diameter(neighbor)); index_t cofacet_index = idx_above + binomial_coeff(get_index(neighbor), k + 1) + idx_below; coefficient_t cofacet_coefficient = (k & 1 ? modulus - 1 : 1) * get_coefficient(simplex) % modulus; return diameter_entry_t(cofacet_diameter, cofacet_index, cofacet_coefficient); } }; template <> std::vector<diameter_index_t> ripser<compressed_lower_distance_matrix>::get_edges() { std::vector<diameter_index_t> edges; std::vector<index_t> vertices(2); for (index_t index = binomial_coeff(n, 2); index-- > 0;) { get_simplex_vertices(index, 1, dist.size(), vertices.rbegin()); value_t length = dist(vertices[0], vertices[1]); if (length <= threshold) edges.push_back({length, index}); } return edges; } template <> std::vector<diameter_index_t> ripser<sparse_distance_matrix>::get_edges() { std::vector<diameter_index_t> edges; for (index_t i = 0; i < n; ++i) for (auto n : dist.neighbors[i]) { index_t j = get_index(n); if (i > j) edges.push_back({get_diameter(n), get_edge_index(i, j)}); } return edges; } enum file_format { LOWER_DISTANCE_MATRIX, UPPER_DISTANCE_MATRIX, DISTANCE_MATRIX, POINT_CLOUD, DIPHA, SPARSE, BINARY }; static const uint16_t endian_check(0xff00); static const bool is_big_endian = *reinterpret_cast<const uint8_t*>(&endian_check); template <typename T> T read(std::istream& input_stream) { T result; char* p = reinterpret_cast<char*>(&result); if (input_stream.read(p, sizeof(T)).gcount() != sizeof(T)) return T(); if (is_big_endian) std::reverse(p, p + sizeof(T)); return result; } compressed_lower_distance_matrix read_point_cloud(std::istream& input_stream) { std::vector<std::vector<value_t>> points; std::string line; value_t value; while (std::getline(input_stream, line)) { std::vector<value_t> point; std::istringstream s(line); while (s >> value) { point.push_back(value); s.ignore(); } if (!point.empty()) points.push_back(point); assert(point.size() == points.front().size()); } euclidean_distance_matrix eucl_dist(std::move(points)); index_t n = eucl_dist.size(); std::cout << "point cloud with " << n << " points in dimension " << eucl_dist.points.front().size() << std::endl; std::vector<value_t> distances; for (int i = 0; i < n; ++i) for (int j = 0; j < i; ++j) distances.push_back(eucl_dist(i, j)); return compressed_lower_distance_matrix(std::move(distances)); } sparse_distance_matrix read_sparse_distance_matrix(std::istream& input_stream) { std::vector<std::vector<index_diameter_t>> neighbors; index_t num_edges = 0; std::string line; while (std::getline(input_stream, line)) { std::istringstream s(line); size_t i, j; value_t value; s >> i; s >> j; s >> value; if (i != j) { neighbors.resize(std::max({neighbors.size(), i + 1, j + 1})); neighbors[i].push_back({j, value}); neighbors[j].push_back({i, value}); ++num_edges; } } for (size_t i = 0; i < neighbors.size(); ++i) std::sort(neighbors[i].begin(), neighbors[i].end()); return sparse_distance_matrix(std::move(neighbors), num_edges); } compressed_lower_distance_matrix read_lower_distance_matrix(std::istream& input_stream) { std::vector<value_t> distances; value_t value; while (input_stream >> value) { distances.push_back(value); input_stream.ignore(); } return compressed_lower_distance_matrix(std::move(distances)); } compressed_lower_distance_matrix read_upper_distance_matrix(std::istream& input_stream) { std::vector<value_t> distances; value_t value; while (input_stream >> value) { distances.push_back(value); input_stream.ignore(); } return compressed_lower_distance_matrix(compressed_upper_distance_matrix(std::move(distances))); } compressed_lower_distance_matrix read_distance_matrix(std::istream& input_stream) { std::vector<value_t> distances; std::string line; value_t value; for (int i = 0; std::getline(input_stream, line); ++i) { std::istringstream s(line); for (int j = 0; j < i && s >> value; ++j) { distances.push_back(value); s.ignore(); } } return compressed_lower_distance_matrix(std::move(distances)); } compressed_lower_distance_matrix read_dipha(std::istream& input_stream) { if (read<int64_t>(input_stream) != 8067171840) { std::cerr << "input is not a Dipha file (magic number: 8067171840)" << std::endl; exit(-1); } if (read<int64_t>(input_stream) != 7) { std::cerr << "input is not a Dipha distance matrix (file type: 7)" << std::endl; exit(-1); } index_t n = read<int64_t>(input_stream); std::vector<value_t> distances; for (int i = 0; i < n; ++i) for (int j = 0; j < n; ++j) if (i > j) distances.push_back(read<double>(input_stream)); else read<double>(input_stream); return compressed_lower_distance_matrix(std::move(distances)); } compressed_lower_distance_matrix read_binary(std::istream& input_stream) { std::vector<value_t> distances; while (!input_stream.eof()) distances.push_back(read<value_t>(input_stream)); return compressed_lower_distance_matrix(std::move(distances)); } compressed_lower_distance_matrix read_file(std::istream& input_stream, const file_format format) { switch (format) { case LOWER_DISTANCE_MATRIX: return read_lower_distance_matrix(input_stream); case UPPER_DISTANCE_MATRIX: return read_upper_distance_matrix(input_stream); case DISTANCE_MATRIX: return read_distance_matrix(input_stream); case POINT_CLOUD: return read_point_cloud(input_stream); case DIPHA: return read_dipha(input_stream); default: return read_binary(input_stream); } } void print_usage_and_exit(int exit_code) { std::cerr << "Usage: " << "ripser " << "[options] [filename]" << std::endl << std::endl << "Options:" << std::endl << std::endl << " --help print this screen" << std::endl << " --format use the specified file format for the input. Options are:" << std::endl << " lower-distance (lower triangular distance matrix; default)" << std::endl << " upper-distance (upper triangular distance matrix)" << std::endl << " distance (full distance matrix)" << std::endl << " point-cloud (point cloud in Euclidean space)" << std::endl << " dipha (distance matrix in DIPHA file format)" << std::endl << " sparse (sparse distance matrix in sparse triplet format)" << std::endl << " binary (lower triangular distance matrix in binary format)" << std::endl << " --dim <k> compute persistent homology up to dimension k" << std::endl << " --threshold <t> compute Rips complexes up to diameter t" << std::endl #ifdef USE_COEFFICIENTS << " --modulus <p> compute homology with coefficients in the prime field Z/pZ" << std::endl #endif << " --ratio <r> only show persistence pairs with death/birth ratio > r" << std::endl << std::endl; exit(exit_code); } int main(int argc, char** argv) { const char* filename = nullptr; file_format format = DISTANCE_MATRIX; index_t dim_max = 1; value_t threshold = std::numeric_limits<value_t>::max(); float ratio = 1; coefficient_t modulus = 2; for (index_t i = 1; i < argc; ++i) { const std::string arg(argv[i]); if (arg == "--help") { print_usage_and_exit(0); } else if (arg == "--dim") { std::string parameter = std::string(argv[++i]); size_t next_pos; dim_max = std::stol(parameter, &next_pos); if (next_pos != parameter.size()) print_usage_and_exit(-1); } else if (arg == "--threshold") { std::string parameter = std::string(argv[++i]); size_t next_pos; threshold = std::stof(parameter, &next_pos); if (next_pos != parameter.size()) print_usage_and_exit(-1); } else if (arg == "--ratio") { std::string parameter = std::string(argv[++i]); size_t next_pos; ratio = std::stof(parameter, &next_pos); if (next_pos != parameter.size()) print_usage_and_exit(-1); } else if (arg == "--format") { std::string parameter = std::string(argv[++i]); if (parameter.rfind("lower", 0) == 0) format = LOWER_DISTANCE_MATRIX; else if (parameter.rfind("upper", 0) == 0) format = UPPER_DISTANCE_MATRIX; else if (parameter.rfind("dist", 0) == 0) format = DISTANCE_MATRIX; else if (parameter.rfind("point", 0) == 0) format = POINT_CLOUD; else if (parameter == "dipha") format = DIPHA; else if (parameter == "sparse") format = SPARSE; else if (parameter == "binary") format = BINARY; else print_usage_and_exit(-1); #ifdef USE_COEFFICIENTS } else if (arg == "--modulus") { std::string parameter = std::string(argv[++i]); size_t next_pos; modulus = std::stol(parameter, &next_pos); if (next_pos != parameter.size() || !is_prime(modulus)) print_usage_and_exit(-1); #endif } else { if (filename) { print_usage_and_exit(-1); } filename = argv[i]; } } std::ifstream file_stream(filename); if (filename && file_stream.fail()) { std::cerr << "couldn't open file " << filename << std::endl; exit(-1); } if (format == SPARSE) { sparse_distance_matrix dist = read_sparse_distance_matrix(filename ? file_stream : std::cin); std::cout << "sparse distance matrix with " << dist.size() << " points and " << dist.num_edges << "/" << (dist.size() * (dist.size() - 1)) / 2 << " entries" << std::endl; ripser<sparse_distance_matrix>(std::move(dist), dim_max, threshold, ratio, modulus) .compute_barcodes(); } else { compressed_lower_distance_matrix dist = read_file(filename ? file_stream : std::cin, format); value_t min = std::numeric_limits<value_t>::infinity(), max = -std::numeric_limits<value_t>::infinity(), max_finite = max; int num_edges = 0; value_t enclosing_radius = std::numeric_limits<value_t>::infinity(); if (threshold == std::numeric_limits<value_t>::max()) { for (size_t i = 0; i < dist.size(); ++i) { value_t r_i = -std::numeric_limits<value_t>::infinity(); for (size_t j = 0; j < dist.size(); ++j) r_i = std::max(r_i, dist(i, j)); enclosing_radius = std::min(enclosing_radius, r_i); } } for (auto d : dist.distances) { min = std::min(min, d); max = std::max(max, d); if (d != std::numeric_limits<value_t>::infinity()) max_finite = std::max(max_finite, d); if (d <= threshold) ++num_edges; } std::cout << "value range: [" << min << "," << max_finite << "]" << std::endl; if (threshold == std::numeric_limits<value_t>::max()) { std::cout << "distance matrix with " << dist.size() << " points, using threshold at enclosing radius " << enclosing_radius << std::endl; ripser<compressed_lower_distance_matrix>(std::move(dist), dim_max, enclosing_radius, ratio, modulus) .compute_barcodes(); } else { std::cout << "sparse distance matrix with " << dist.size() << " points and " << num_edges << "/" << (dist.size() * dist.size() - 1) / 2 << " entries" << std::endl; ripser<sparse_distance_matrix>(sparse_distance_matrix(std::move(dist), threshold), dim_max, threshold, ratio, modulus) .compute_barcodes(); } exit(0); } }
<reponame>RollingArray/newsent-client-app import { PageInfoTitleModule } from '../page-info-title/page-info-title.component.module'; import { NgModule } from "@angular/core"; import { CommonModule } from "@angular/common"; import { SharedModule } from "src/app/shared/module/shared.module"; import { IonicModule } from "@ionic/angular"; import { FeedDetailsComponent } from './feed-details.component'; import { FeedThumbnailModule } from '../feed-thumbnail/feed-thumbnail.component.module'; @NgModule({ imports: [CommonModule, SharedModule, IonicModule, PageInfoTitleModule, FeedThumbnailModule], declarations: [FeedDetailsComponent], exports: [FeedDetailsComponent], entryComponents: [FeedDetailsComponent] }) export class FeedDetailsModule {}
<reponame>WanglinLi595/OpenCV_Image_Process_Study<gh_stars>0 #!/usr/bin/env python # coding=utf-8 ''' @描述: @版本: V1_0 @作者: LiWanglin @创建时间: Do not edit @最后编辑人: LiWanglin @最后编辑时间: Do not Edit ''' #!/usr/bin/env python # coding=utf-8 ''' @描述: 中值滤波 @版本: V1_0 @作者: LiWanglin @创建时间: 2020.02.12 @最后编辑人: LiWanglin @最后编辑时间: 2020.02.12 ''' import cv2 as cv import numpy as np from matplotlib import pyplot as plt img = cv.imread('./test_image/opencv-logo.png') median = cv.medianBlur(img,5) plt.subplot(121),plt.imshow(img),plt.title('Original') plt.xticks([]), plt.yticks([]) plt.subplot(122),plt.imshow(median),plt.title('Blurred') plt.xticks([]), plt.yticks([]) plt.show()
We’re outnumbered. Plain as day. And they’re not going away. The estimated ratio of insects to humans is 200 million to one, say Iowa State University entomologists Larry Pedigo and Marlin Rice in their newly published (sixth edition) textbook, Entomology and Pest Management. Rice is the 2009 president of the Entomological Society of America. There's an average of 400 million insects per acre of land, they say. 400 million! Per acre. “The fact is, today’s human population is adrift in a sea of insects,” they write in their introduction. Well, what about biomass? Surely we outweigh these critters? No, we don't. The United States “is home to some 400 pounds of insect biomass per acre, compared with our 14 pounds of flesh and bone,” they write. “Another amazing statistic is that in the Brazilian Amazon, ants alone outweigh the total biomass of all vertebrates by four to one. Based solely on numbers and biomass, insects are the most successful animals on earth!” There you go. The insects are the land owners; we are the tenants. “They are the chief consumers of plants; they are the major predators of plant eaters; they play a major role in decay of organic matter; and they serve as food for other kinds of animals,” Pedigo and Rice write. Insects represent the good, the bad and the ugly. The good: they give us honey and pollinate our crops. They spin our silk. They serve as natural enemies of pests. They provide food for wildlife (not to mention food for some of us humans). They are scavengers. They provide us with ideas for our art work. They are fodder for our horror movies. And what scientist hasn't benefitted from the inheritance studies of the fruit fly, Drosophila melanogasta? What ecologist hasn't studied water pollution by examining the mayfly population? Mayflies are the counterpart of canaries in the coal mine. The bad: they eat our food crops, forests and ornamental plants. They devour or spoil our stored grain. They chew holes in our clothing. They pester us. They annoy our animals, too. The ugly: They can—and do—kill us. Think mosquitoes. Think malaria, West Nile virus, yellow fever, dengue, encephalitis and other vectorborne diseases. But wait, there's more! Many more. Scientists have described more than 900,000 species of insects but there could be seven times as many out there, the authors point out. Ironically, despite the huge numbers of insects, many people don't know the meaning of the word, entomology, the science of insects. They should. Insects outnumber us and always will. They've lived on the earth longer than us (400 million years) and adapt to changes better than we do. Most are tiny. Most can fly. And most reproduce like there's no tomorrow. "Based solely on numbers and biomass, insects are the most successful animals on earth," the authors claim. You can't argue with that. /o:p>/st1:place>/o:p>/o:p>/o:p>/o:p>/st1:country-region>/st1:place>/o:p>/o:p>/o:p>/o:p>/o:p>/o:p>/o:p>/o:smarttagtype>/o:smarttagtype>
/** * A dialog which uses fingerprint APIs to authenticate the user, and falls back to password * authentication if fingerprint is not available. */ @SuppressWarnings("ResourceType") public class FingerprintDialog extends DialogFragment implements TextView.OnEditorActionListener, DigitusCallback { public interface Callback { void onFingerprintDialogAuthenticated(); void onFingerprintDialogVerifyPassword(FingerprintDialog dialog, String password); } static final long ERROR_TIMEOUT_MILLIS = 1600; static final long SUCCESS_DELAY_MILLIS = 1300; static final String TAG = "[DIGITUS_FPDIALOG]"; private View mFingerprintContent; private View mBackupContent; private EditText mPassword; private CheckBox mUseFingerprintFutureCheckBox; private TextView mPasswordDescriptionTextView; private TextView mNewFingerprintEnrolledTextView; private ImageView mFingerprintIcon; private TextView mFingerprintStatus; private Stage mStage = Stage.FINGERPRINT; private Digitus mDigitus; private Callback mCallback; public FingerprintDialog() { } public static <T extends FragmentActivity & Callback> FingerprintDialog show(T context, String keyName, int requestCode) { FingerprintDialog dialog = getVisible(context); if (dialog != null) dialog.dismiss(); dialog = new FingerprintDialog(); Bundle args = new Bundle(); args.putString("key_name", keyName); args.putInt("request_code", requestCode); args.putBoolean("was_initialized", Digitus.get() != null && Digitus.get().mCallback == context); dialog.setArguments(args); dialog.show(context.getSupportFragmentManager(), TAG); return dialog; } public static <T extends FragmentActivity> FingerprintDialog getVisible(T context) { Fragment frag = context.getSupportFragmentManager().findFragmentByTag(TAG); if (frag != null && frag instanceof FingerprintDialog) return (FingerprintDialog) frag; return null; } @Override public void onSaveInstanceState(Bundle outState) { super.onSaveInstanceState(outState); outState.putSerializable("stage", mStage); } @NonNull @Override public Dialog onCreateDialog(Bundle savedInstanceState) { if (getArguments() == null || !getArguments().containsKey("key_name")) throw new IllegalStateException("FingerprintDialog must be shown with show(Activity, String, int)."); else if (savedInstanceState != null) mStage = (Stage) savedInstanceState.getSerializable("stage"); MaterialDialog dialog = new MaterialDialog.Builder(getActivity()) .title(R.string.sign_in) .customView(R.layout.fingerprint_dialog_container, false) .positiveText(android.R.string.cancel) .negativeText(R.string.use_password) .autoDismiss(false) .onPositive(new MaterialDialog.SingleButtonCallback() { @Override public void onClick(@NonNull MaterialDialog materialDialog, @NonNull DialogAction dialogAction) { materialDialog.dismiss(); } }) .onNegative(new MaterialDialog.SingleButtonCallback() { @Override public void onClick(@NonNull MaterialDialog materialDialog, @NonNull DialogAction dialogAction) { if (mStage == Stage.FINGERPRINT) { goToBackup(materialDialog); } else { verifyPassword(); } } }) .build(); final View v = dialog.getCustomView(); assert v != null; mFingerprintContent = v.findViewById(R.id.fingerprint_container); mBackupContent = v.findViewById(R.id.backup_container); mPassword = (EditText) v.findViewById(R.id.password); mPassword.setOnEditorActionListener(this); mPasswordDescriptionTextView = (TextView) v.findViewById(R.id.password_description); mUseFingerprintFutureCheckBox = (CheckBox) v.findViewById(R.id.use_fingerprint_in_future_check); mNewFingerprintEnrolledTextView = (TextView) v.findViewById(R.id.new_fingerprint_enrolled_description); mFingerprintIcon = (ImageView) v.findViewById(R.id.fingerprint_icon); mFingerprintStatus = (TextView) v.findViewById(R.id.fingerprint_status); mFingerprintStatus.setText(R.string.initializing); return dialog; } @Override public void onViewCreated(View view, @Nullable Bundle savedInstanceState) { super.onViewCreated(view, savedInstanceState); updateStage(null); } @Override public void onResume() { super.onResume(); mDigitus = Digitus.init(getActivity(), getArguments().getString("key_name", ""), getArguments().getInt("request_code", -1), FingerprintDialog.this); } @Override public void onPause() { super.onPause(); if (Digitus.get() != null) Digitus.get().stopListening(); } @Override public void onCancel(DialogInterface dialog) { super.onCancel(dialog); redirectToActivity(); } @Override public void onDismiss(DialogInterface dialog) { super.onDismiss(dialog); redirectToActivity(); } private void redirectToActivity() { Digitus.deinit(); if (getActivity() != null && getActivity() instanceof DigitusCallback && getArguments().getBoolean("was_initialized", false)) { Digitus.init(getActivity(), getArguments().getString("key_name", ""), getArguments().getInt("request_code", -1), (DigitusCallback) getActivity()); } } @Override public void onAttach(Activity activity) { super.onAttach(activity); if (!(activity instanceof Callback)) { Digitus.deinit(); throw new IllegalStateException("Activities showing a FingerprintDialog must implement FingerprintDialog.Callback."); } mCallback = (Callback) activity; } /** * Switches to backup (password) screen. This either can happen when fingerprint is not * available or the user chooses to use the password authentication method by pressing the * button. This can also happen when the user had too many fingerprint attempts. */ private void goToBackup(MaterialDialog dialog) { mStage = Stage.PASSWORD; updateStage(dialog); mPassword.requestFocus(); // Show the keyboard. mPassword.postDelayed(mShowKeyboardRunnable, 500); // Fingerprint is not used anymore. Stop listening for it. mDigitus.stopListening(); } private void toggleButtonsEnabled(boolean enabled) { MaterialDialog dialog = (MaterialDialog) getDialog(); dialog.getActionButton(DialogAction.POSITIVE).setEnabled(enabled); dialog.getActionButton(DialogAction.NEGATIVE).setEnabled(enabled); } private void verifyPassword() { toggleButtonsEnabled(false); mCallback.onFingerprintDialogVerifyPassword(this, mPassword.getText().toString()); } public void notifyPasswordValidation(boolean valid) { final MaterialDialog dialog = (MaterialDialog) getDialog(); final View positive = dialog.getActionButton(DialogAction.POSITIVE); final View negative = dialog.getActionButton(DialogAction.NEGATIVE); toggleButtonsEnabled(true); if (valid) { if (mStage == Stage.NEW_FINGERPRINT_ENROLLED && mUseFingerprintFutureCheckBox.isChecked()) { // Re-create the key so that fingerprints including new ones are validated. Digitus.get().recreateKey(); mStage = Stage.FINGERPRINT; } mPassword.setText(""); mCallback.onFingerprintDialogAuthenticated(); dismiss(); } else { mPasswordDescriptionTextView.setText(R.string.password_not_recognized); final int red = ContextCompat.getColor(getActivity(), R.color.material_red_500); MDTintHelper.setTint(mPassword, red); ((TextView) positive).setTextColor(red); ((TextView) negative).setTextColor(red); } } private final Runnable mShowKeyboardRunnable = new Runnable() { @Override public void run() { if (mDigitus != null) mDigitus.mInputMethodManager.showSoftInput(mPassword, 0); } }; private void updateStage(@Nullable MaterialDialog dialog) { if (dialog == null) dialog = (MaterialDialog) getDialog(); if (dialog == null) return; switch (mStage) { case FINGERPRINT: dialog.setActionButton(DialogAction.POSITIVE, android.R.string.cancel); dialog.setActionButton(DialogAction.NEGATIVE, R.string.use_password); mFingerprintContent.setVisibility(View.VISIBLE); mBackupContent.setVisibility(View.GONE); break; case NEW_FINGERPRINT_ENROLLED: // Intentional fall through case PASSWORD: dialog.setActionButton(DialogAction.POSITIVE, android.R.string.cancel); dialog.setActionButton(DialogAction.NEGATIVE, android.R.string.ok); mFingerprintContent.setVisibility(View.GONE); mBackupContent.setVisibility(View.VISIBLE); if (mStage == Stage.NEW_FINGERPRINT_ENROLLED) { mPasswordDescriptionTextView.setVisibility(View.GONE); mNewFingerprintEnrolledTextView.setVisibility(View.VISIBLE); mUseFingerprintFutureCheckBox.setVisibility(View.VISIBLE); } break; } } @Override public boolean onEditorAction(TextView v, int actionId, KeyEvent event) { if (actionId == EditorInfo.IME_ACTION_GO) { verifyPassword(); return true; } return false; } /** * Enumeration to indicate which authentication method the user is trying to authenticate with. */ public enum Stage { FINGERPRINT, NEW_FINGERPRINT_ENROLLED, PASSWORD } private void showError(CharSequence error) { if (getActivity() == null) return; mFingerprintIcon.setImageResource(R.drawable.ic_fingerprint_error); mFingerprintStatus.setText(error); mFingerprintStatus.setTextColor(ContextCompat.getColor(getActivity(), R.color.warning_color)); mFingerprintStatus.removeCallbacks(mResetErrorTextRunnable); mFingerprintStatus.postDelayed(mResetErrorTextRunnable, ERROR_TIMEOUT_MILLIS); } Runnable mResetErrorTextRunnable = new Runnable() { @Override public void run() { if (getActivity() == null) return; mFingerprintStatus.setTextColor(ContextCompat.getColor(getActivity(), R.color.hint_color)); mFingerprintStatus.setText(getResources().getString(R.string.fingerprint_hint)); mFingerprintIcon.setImageResource(R.drawable.ic_fp_40px); } }; // Digitus callbacks @Override public void onDigitusReady(Digitus digitus) { digitus.startListening(); } @Override public void onDigitusListening(boolean newFingerprint) { mFingerprintStatus.setText(R.string.fingerprint_hint); if (newFingerprint) mStage = Stage.NEW_FINGERPRINT_ENROLLED; updateStage(null); } @Override public void onDigitusAuthenticated(Digitus digitus) { toggleButtonsEnabled(false); mFingerprintStatus.removeCallbacks(mResetErrorTextRunnable); mFingerprintIcon.setImageResource(R.drawable.ic_fingerprint_success); mFingerprintStatus.setTextColor(ContextCompat.getColor(getActivity(), R.color.success_color)); mFingerprintStatus.setText(getResources().getString(R.string.fingerprint_success)); mFingerprintIcon.postDelayed(new Runnable() { @Override public void run() { mCallback.onFingerprintDialogAuthenticated(); dismiss(); } }, SUCCESS_DELAY_MILLIS); } @Override public void onDigitusError(Digitus digitus, DigitusErrorType type, Exception e) { switch (type) { case FINGERPRINTS_UNSUPPORTED: goToBackup(null); break; case UNRECOVERABLE_ERROR: case PERMISSION_DENIED: showError(e.getMessage()); mFingerprintIcon.postDelayed(new Runnable() { @Override public void run() { goToBackup(null); } }, ERROR_TIMEOUT_MILLIS); break; case REGISTRATION_NEEDED: mPasswordDescriptionTextView.setText(R.string.no_fingerprints_registered); goToBackup(null); break; case HELP_ERROR: showError(e.getMessage()); break; case FINGERPRINT_NOT_RECOGNIZED: showError(getResources().getString(R.string.fingerprint_not_recognized)); break; } } }
// Library object that contains external method definitions. If the list of external calls // contains any null handles during execution, the library will attempt to load a // dynamic library file at specified path. // Symbols are loaded lazily at runtime unless specified otherwise. class library : public handle<class library_data, sizeof(size_t) * 16> { public: library(std::string_view path, bool preload_symbols, std::span<const external_call> calls); library(std::string_view path, bool preload_symbols, std::initializer_list<external_call> calls) : library(path, preload_symbols, init_span(calls)) {} ~library(); private: friend class environment; }
Attack-induced changes in response to decapitation of plasma catecholamines of victim mice. Male mice of 3 different strains (NIH, C57BR/cdJ and A/HeJ) were exposed individuality (victims) to attack by trained fighter Swiss Webster mice for 10 minutes daily for various numbers of days. Immediately after the last attack the victim mice were decapitated along with unattacked control mice of the appropriate strain, and plasma norepinephrine (NE) and epinephrine (E) were measured. The concentration of NE was significantly lower in decapitated C57BR/cdJ mice than in the other 2 strains. The concentrations of E and NE in the plasma of decapitated C57BR/cdJ and A/HeJ mice were significantly greater after 4 days of attacks, whereas in NIH mice plasma levels of only NE were greater and this occurred only after 14 days of attacks. After 7 days of exposure to attack, C57BR/cdJ and A/HeJ victim mice were permitted to rest for various periods of time. In C57BR/cdJ animals plasma E returned to almost normal levels after 2 days, and NE after 4 days, while in A/HeJ mice plasma E and NE returned to control levels already after 1 day of rest.
/* ****************************************************** */ /* Main Entrypoint - Draw contents of Outliner editor */ void draw_outliner(const bContext *C) { Main *mainvar = CTX_data_main(C); Scene *scene = CTX_data_scene(C); ARegion *ar = CTX_wm_region(C); View2D *v2d = &ar->v2d; SpaceOops *soops = CTX_wm_space_outliner(C); uiBlock *block; int sizey = 0, sizex = 0, sizex_rna = 0; TreeElement *te_edit = NULL; outliner_build_tree(mainvar, scene, soops); outliner_height(soops, &soops->tree, &sizey); if (ELEM(soops->outlinevis, SO_DATABLOCKS, SO_USERDEF)) { outliner_rna_width(soops, &soops->tree, &sizex_rna, 0); sizex_rna = max_ii(OL_RNA_COLX, sizex_rna + OL_RNA_COL_SPACEX); sizex = sizex_rna + OL_RNA_COL_SIZEX + 50; } else { outliner_rna_width(soops, &soops->tree, &sizex, 0); if ((soops->flag & SO_HIDE_RESTRICTCOLS) == 0) sizex += OL_TOGW * 3; } sizey += OL_Y_OFFSET; UI_view2d_totRect_set(v2d, sizex, sizey); v2d->flag |= (V2D_PIXELOFS_X | V2D_PIXELOFS_Y); UI_view2d_view_ortho(v2d); outliner_back(ar); block = UI_block_begin(C, ar, __func__, UI_EMBOSS); outliner_draw_tree((bContext *)C, block, scene, ar, soops, &te_edit); if (ELEM(soops->outlinevis, SO_DATABLOCKS, SO_USERDEF)) { outliner_draw_rnacols(ar, sizex_rna); outliner_draw_rnabuts(block, ar, soops, sizex_rna, &soops->tree); } else if ((soops->outlinevis == SO_ID_ORPHANS) && !(soops->flag & SO_HIDE_RESTRICTCOLS)) { outliner_draw_restrictcols(ar); outliner_draw_userbuts(block, ar, soops, &soops->tree); } else if (!(soops->flag & SO_HIDE_RESTRICTCOLS)) { outliner_draw_restrictcols(ar); outliner_draw_restrictbuts(block, scene, ar, soops, &soops->tree); } if (te_edit) { outliner_buttons(C, block, ar, te_edit); } UI_block_end(C, block); UI_block_draw(C, block); soops->storeflag &= ~SO_TREESTORE_REDRAW; }
#ifndef SYSTHREAD_H #define SYSTHREAD_H #include <QThread> #include <QApplication> #include <QQmlApplicationEngine> #include <QQmlContext> #include <QWindow> #include <QObject> #include <QDebug> #include <QX11Info> #include <QQuickImageProvider> #include <QList> #include <QVariant> #include <QMetaObject> #include <QHash> #include "context.h" #include "systray.h" class SysThread : public QThread { Q_OBJECT public: void run(); QWindow *main; Context *ctx; }; #endif // SYSTHREAD_H
// Make copy of frontend keys to prevent itterator invalidation. // Requires lock on c.mu. func (c *Cache) keys(frontend int) []Key { m := c.frontends[frontend] keys := make([]Key, 0, len(m)) for k := range m { keys = append(keys, k) } return keys }
n = int(input()) for _ in range(n): s = input() lens = len(s) mat = set(map(ord,(i for i in s))) if len(mat)==lens and max(mat)-min(mat)==lens-1: print("Yes") else: print("No")
// appendString appends to slice all strings from v and returns it. func appendString(slice []string, v reflect.Value) []string { k := v.Kind() if k == reflect.String { return append(slice, v.String()) } if (k == reflect.Slice || k == reflect.Array) && v.Type().Elem().Kind() == reflect.String { for i := 0; i < v.Len(); i++ { slice = append(slice, v.Index(i).String()) } } return slice }
def __checkCoords(self, ri_coord, coord): diff = ri_coord.reshape(-1, 2) - coord.reshape(2) if (np.abs(np.min(diff)) < self.tile_scope) & (np.abs(np.max(diff)) < self.tile_scope): coord_ls1 = ri_coord.reshape(-1, 2).tolist() coord_ls2 = coord.reshape(2).tolist() if coord_ls2 in coord_ls1: return False return True else: return False
def _CleanupModules(self): dirty = set() for full_name, module in sys.modules.iteritems(): if full_name in self._saved_sys_modules: sys.modules[full_name] = self._saved_sys_modules[full_name] continue if (hasattr(module, '__file__') and module.__file__.startswith(_PYTHON_LIB_PREFIX)): continue dirty.add(full_name) for full_name in dirty: module = sys.modules.pop(full_name) if '.' in full_name: package_name, module_name = full_name.rsplit('.', 1) if package_name not in dirty: package = sys.modules.get(package_name) if package is None: continue package = sys.modules[package_name] try: if getattr(package, module_name) != module: logging.warning('cleanup of inconsistent module %s', full_name) delattr(package, module_name) except AttributeError: pass
package com.prowidesoftware.swift.model.mx.dic; import javax.xml.bind.annotation.XmlEnum; import javax.xml.bind.annotation.XmlType; /** * <p>Java class for AgreementFramework1Code. * * <p>The following schema fragment specifies the expected content contained within this class. * <p> * <pre> * &lt;simpleType name="AgreementFramework1Code"&gt; * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}string"&gt; * &lt;enumeration value="FBAA"/&gt; * &lt;enumeration value="BBAA"/&gt; * &lt;enumeration value="DERV"/&gt; * &lt;enumeration value="ISDA"/&gt; * &lt;enumeration value="NONR"/&gt; * &lt;/restriction&gt; * &lt;/simpleType&gt; * </pre> * */ @XmlType(name = "AgreementFramework1Code") @XmlEnum public enum AgreementFramework1Code { /** * French Banker's Association Agreement. * */ FBAA, /** * British Banker's Association Agreement. * */ BBAA, /** * German Rahmenvertrag Agreement. * */ DERV, /** * International Swaps and Derivatives Association Agreement. * */ ISDA, /** * No information about the master agreement is available. * */ NONR; public String value() { return name(); } public static AgreementFramework1Code fromValue(String v) { return valueOf(v); } }
#include <bits/stdc++.h> using namespace std; #define sz(x) ((int) (x).size()) #define forn(i,n) for (int i = 0; i < int(n); ++i) typedef long long ll; typedef long long i64; typedef long double ld; const int inf = int(1e9) + int(1e5); const ll infl = ll(2e18) + ll(1e10); const int maxn = 1030; int cnt[maxn]; int cnt2[maxn]; int main() { #ifdef LOCAL assert(freopen("c.in", "r", stdin)); #else #endif int n, k, x; cin >> n >> k >> x; forn (i, n) { int a; scanf("%d", &a); cnt[a]++; } forn (iter, k) { int carry = 1; forn (i, maxn) { int m = (carry + cnt[i]) >> 1; cnt2[i] += cnt[i] - m; cnt2[i ^ x] += m; carry ^= (cnt[i] & 1); } memcpy(cnt, cnt2, sizeof(cnt)); memset(cnt2, 0, sizeof(cnt2)); } int nn = 0; forn (i, maxn) forn (j, cnt[i]) { ++nn; //cerr << i << ' '; } //cerr << '\n'; assert(nn == n); int mx = maxn - 1; while (cnt[mx] == 0) --mx; int mn = 0; while (cnt[mn] == 0) ++mn; cout << mx << ' ' << mn << '\n'; }
<reponame>vnherdeiro/electron-hearbeat-monitor import { Component, ViewChild, ElementRef } from '@angular/core'; import { Subscription, fromEvent, Observable } from 'rxjs'; import { filter, map, pairwise, scan, bufferCount, share } from 'rxjs/operators'; import * as _ from 'lodash'; @Component({ selector: 'app-root', templateUrl: './app.component.html', styleUrls: ['./app.component.css'] }) export class AppComponent { title = 'hearbeat-monitor'; delays$:Observable<number>; heartrate$:Observable<number>; moving_avg$:Observable<number>; // max$:Observable<number>; // min$:Observable<number>; avg_window = 10; avg_exp_decay = 3.; weights:number[] = []; @ViewChild('video') video_ref:ElementRef; //coefficient for a sigmoid mapping x -> a + b/(1+exp(x/100)) of the heartrate to a playback speed a = 2.1766 b = -3.7245 constructor(){ for( let i = -this.avg_window+1; i <= 0; i++){ this.weights.push( Math.exp(i/this.avg_exp_decay)); } this.weights.reverse(); } ngOnInit() { this.delays$ = fromEvent(document, 'keypress') .pipe( filter( x => x['code'] === 'Space'), //collects spacebar keydowns map( x => Date.now()), //maps keystrokes to the event time pairwise(), //groups them by following pairs map( x => (x[1]-x[0])/1000), //computes time gaps in seconds ); this.heartrate$ = this.delays$.pipe( map( x => 60. / +x) ); this.moving_avg$ = this.heartrate$.pipe( bufferCount( this.avg_window, 1), //stores the previous avg_window values with forward steps of 1 map( x => this.weightedAvg(x, this.weights)), share() ); this.moving_avg$.subscribe( val => this.setVideoPlaybackRate( val)); // this.max$ = this.moving_avg$.pipe( scan( Math.max, 0)); // this.min$ = this.moving_avg$.pipe( scan( Math.min, 666)); } weightedAvg( arr:number[], weights:number[]){ arr.reverse(); let sum_values:number = 0; let sum_weights:number = 0; for( let index in arr){ sum_values += arr[index]*weights[index]; sum_weights += weights[index] } let ret = sum_values/sum_weights; return ret; } setVideoPlaybackRate( heartrate:number){ if( this.video_ref) { this.video_ref.nativeElement.playbackRate = this.heartrate2playback( heartrate); } } heartrate2playback( heartrate:number){ return this.a + this.b/( 1+Math.exp(heartrate/100)) } }
#include<bits/stdc++.h> using namespace std; int main() { string sub[300],s; int n,sn=-1; cin>>n>>s; for(int i=0;i<300;i++)sub[i] = ""; for(int i=0,j=0;i<n;i++){ if(s[i]>='A' && s[i]<='Z' && i!=0){ if(s[i-1]>='A' && s[i-1]<='Z'){ continue; } j++; sn = j+1; continue; } if(s[i]>='A' && s[i]<='Z')continue; sub[j] = sub[j]+s[i]; sn = j+1; } if(sn==-1){ cout<<"0"<<endl; return 0; } //cout<<sn<<endl; int ans=0; for(int i=0;i<sn;i++){ string tmp = sub[i]; //cout<<tmp<<endl; int tans=0; bool used[26]; memset(used,0,sizeof(used)); for(int j=0;j<tmp.size();j++){ if(!used[tmp[j]-'a']){ tans++; used[tmp[j]-'a']=true; } } if(tans>ans){ ans=tans; } } cout<<ans<<endl; return 0; }
<reponame>sgsellan/gpytoolbox import numpy as np from scipy.sparse import csr_matrix from . subdivide_quad import subdivide_quad def initialize_quadtree(P,max_depth=8,min_depth=1,graded=False,vmin=None,vmax=None): # Builds an adaptatively refined (optionally graded) quadtree for # prototyping on adaptative grids. Keeps track of all parenthood and # adjacency information so that traversals and differential quantities are # easy to compute. This code is *purposefully* not optimized beyond # asymptotics for simplicity in understanding its functionality and # translating it to other programming languages beyond prototyping. # # # # Inputs: # P is a #P by 3 matrix of points. The output tree will be more subdivided in # regions with more points # Optional: # MinDepth integer minimum tree depth (depth one is a single box) # MaxDepth integer max tree depth (min edge length will be # bounding_box_length*2^(-MaxDepth)) # Graded boolean whether to ensure that adjacent quads only differ by # one in depth or not (this is useful for numerical applications, # not so much for others like position queries). # # Outputs: # C #nodes by 3 matrix of cell centers # W #nodes vector of cell widths (**not** half widths) # CH #nodes by 4 matrix of child indeces (-1 if leaf node) # PAR #nodes vector of immediate parent indeces (to traverse upwards) # D #nodes vector of tree depths # A #nodes by #nodes sparse adjacency matrix, where a value of a in the # (i,j) entry means that node j is to the a-th direction of i # (a=1: left; a=2: right; a=3: bottom; a=4: top). # # We start with a bounding box if (vmin is None): vmin = np.amin(P,axis=0) if (vmax is None): vmax = np.amax(P,axis=0) C = (vmin + vmax)/2.0 C = C[None,:] #print(C) W = np.array([np.amax(vmax-vmin)]) CH = np.array([[-1,-1,-1,-1]],dtype=int) # for now it's leaf node D = np.array([1],dtype=int) A = csr_matrix((1,1)) PAR = np.array([-1],dtype=int) # supreme Neanderthal ancestral node # Now, we will loop quad_ind = -1 while True: quad_ind = quad_ind + 1 if quad_ind>=C.shape[0]: break is_child = (CH[quad_ind,1]==-1) # Does this quad contain any point? (Or is it below our min depth) if ((D[quad_ind]<min_depth or np.any(is_in_quad(P,C[quad_ind,:],W[quad_ind]))) and D[quad_ind]<max_depth and is_child): # If it does, subdivide it C,W,CH,PAR,D,A = subdivide_quad(quad_ind,C,W,CH,PAR,D,A,graded) return C,W,CH,PAR,D,A # This just checks if a point is in a square def is_in_quad(queries,center,width): max_corner = center + width*np.array([0.5,0.5]) min_corner = center - width*np.array([0.5,0.5]) return ( (queries[:,0]>=min_corner[0]) & (queries[:,1]>=min_corner[1]) & (queries[:,0]<=max_corner[0]) & (queries[:,1]<=max_corner[1]) )
class ReversiView: ''' Creates window with the reversi board and controls the game using gui. ''' def __init__(self, boardSize=8, w=850, h=410): ''' :param w: width of the window :param h: height of the window ''' self.root = Tk() self.boardSize = boardSize self.stone_board = [-1] * self.boardSize for row in range(self.boardSize): self.stone_board[row] = [-1] * self.boardSize self.w = w self.h = h self.offx = 5 self.offy = 5 self.gridw = 410 self.gridh = 410 self.gridspacing = 50 self.ovalDiamPart = 0.8 self.colors = ["blue", "red"] self.root.title("Reversi") self.interactive_player_ids = [] self.interactivePlayers = [] self.interractivePlayerName = 'Interactive' self.possiblePlayers = { self.interractivePlayerName :-1, } self.wrong_move = False ws = self.root.winfo_screenwidth() hs = self.root.winfo_screenheight() x = (ws / 2) - (self.w / 2) y = (hs / 2) - (self.h / 2) self.root.geometry('%dx%d+%d+%d' % (self.w, self.h, x, y)) self.draw_game_grid() self.draw_game_info_grid() self.game_state = GameState.STOPPED def set_game(self, game): ''' Sets the game to the GUI. ''' self.game = game def set_board(self , board): ''' Sets the game board to the GUI. ''' self.board = board def draw_stone(self, x, y, color): ''' Draw stone on position [x,y] in gui :param x: x coordinate of the stone :param y: y coordinate of the stone :param color: 0 for blue, 1 fro red ''' x_coord = (self.gridspacing * x) + (1.0 - self.ovalDiamPart) * self.gridspacing y_coord = (self.gridspacing * y) + (1.0 - self.ovalDiamPart) * self.gridspacing diameter = self.ovalDiamPart * self.gridspacing self.clear_stone(x, y) self.stone_board[x][y] = self.grid.create_oval(x_coord, y_coord, x_coord + diameter, y_coord + diameter, fill=self.colors[color]) def clear_stone(self, x, y): ''' Delete stone on position [x,y] from the gui :param x: x coordinate of the stone :param y: y coordinate of the stone ''' if self.stone_board[x][y] != -1: self.grid.delete(self.stone_board[x][y]) self.stone_board[x][y] = -1 def draw_game_info_grid(self): ''' Draw control and inform part of game to right side of the window. ''' self.info = Canvas(self.root, height=self.h - self.gridh, width=self.w - self.gridw) self.info.pack(side="left") label_stones = Label(self.info, text="Current stones:", font=("Helvetica", 10)) label_stones.grid(row=1, column=0) label_max_time = Label(self.info, text="Max time:", font=("Helvetica", 10)) label_max_time.grid(row=2, column=0) label_scale = Label(self.info, text='Game speed [ms]:', font=("Helvetica", 10), foreground='black') label_scale.grid(row=5, column=0) helv36 = font.Font(family="helvetica", size=16, weight='bold') self.scale_var = IntVar() scale = Scale(self.info, variable=self.scale_var, command=self.sleep_time_change_handler, from_=0, to=1000, resolution=10, width="15", orient=HORIZONTAL, length="225") scale.set(200) scale.grid(row=5, column=1, columnspan=3) self.button = Button(self.info, text="Play", width="20", height="2", command=self.play_button_click_handler) self.button['font'] = helv36 self.button.grid(row=6, column=0, columnspan=4) # labels for num stones, max time of move, etc self.label_player_stones = [-1, -1] self.label_player_max_time = [-1, -1] self.labels_inform = [-1, -1] self.labels_player_name = [-1, -1] self.option_menus = [-1, -1] self.option_menus_vars = [-1, -1] for i in range(2): self.label_player_stones[i] = Label(self.info, text='2', font=("Helvetica", 10), foreground=self.colors[i]) self.label_player_stones[i].grid(row=1, column=2 * (i + 1) - 1, columnspan=2) self.label_player_max_time[i] = Label(self.info, text="%.2f [ms]" % 0.0, font=("Helvetica", 10), foreground=self.colors[i]) self.label_player_max_time[i].grid(row=2, column=2 * (i + 1) - 1, columnspan=2) self.labels_inform[i] = Label(self.info, text='', font=("Helvetica", 10), foreground='black') self.labels_inform[i].grid(row=i + 3, column=0, columnspan=4) self.labels_player_name[i] = Label(self.info, text="Player%d:" % (i), font=("Helvetica", 12), foreground=self.colors[i]) self.labels_player_name[i].grid(row=0, column=2 * i) self.option_menus_vars[i] = StringVar(self.root) self.option_menus_vars[i].set(self.interractivePlayerName) self.option_menus[i] = OptionMenu(self.info, self.option_menus_vars[i], *self.possiblePlayers) self.option_menus[i].grid(row=0, column=2 * i + 1) def draw_game_grid(self): ''' Draw empty 8x8 grid on the left side of the window. ''' self.grid = Canvas(self.root, bg="white", height=self.gridh, width=self.gridw) self.grid.bind("<Button 1>", self.place_stone_click_handler) gridsize = self.boardSize offy = self.offy offx = self.offx w = self.gridw h = self.gridh spacing = self.gridspacing # line around self.grid.create_line(offx, offy, offx, h - offy, w - offx, h - offy, w - offx, offy, offx, offx) for x in range(0, gridsize): for y in range(0, gridsize): arrayText = '[' + str(y) + ',' + str(x) + ']' self.grid.create_text(offx + (spacing * x) + spacing / 2, offy + (spacing * y) + spacing / 2, text=arrayText) # line rows for rowy in range(offy + spacing, h - offy, spacing): self.grid.create_line(offx, rowy, w - offx, rowy) # line columns for colx in range(offx + spacing, w - offx, spacing): self.grid.create_line(colx, offy, colx, h - offy) self.grid.pack(side="left") def sleep_time_change_handler(self, event): ''' Called after scale value change, updates the wait time between moves. :param event: slider change event ''' self.game.sleep_time_ms = self.scale_var.get() def play_button_click_handler(self): ''' Button listener for Play/Pause/RePlay etc. On button click prints slider value and start game. ''' # set the players from dropdown menu if game is stopped if self.game_state == GameState.STOPPED: print("game_state " + str(self.game_state)) self.interactive_player_ids = [] for i in range(2): print(self.option_menus_vars[i].get()) if self.option_menus_vars[i].get() == self.interractivePlayerName: self.interactive_player_ids.append(i) if i == 0: self.game.player1.name = self.interractivePlayerName else: self.game.player2.name = self.interractivePlayerName else: if i == 0: player_class = self.possiblePlayers[self.option_menus_vars[i].get()] self.game.player1 = player_class(self.game.player1_color, self.game.player2_color) else: player_class = self.possiblePlayers[self.option_menus_vars[i].get()] self.game.player2 = player_class(self.game.player2_color, self.game.player1_color) self.game.clear_game() self.game.current_player = self.game.player1 self.game.current_player_color = self.game.player1_color print('player1 ' + str(self.game.player1_color)) print('player2 ' + str(self.game.player2_color)) #play game or start game if interactive if len(self.interactive_player_ids) != 0: if self.game_state == GameState.STOPPED: if not self.board.can_play(self.game.current_player, self.game.current_player_color): self.game.clear_game() self.button['text'] = 'Play' else: self.game_state = GameState.RUNNING self.button['text'] = 'RePlay' print('can play ', self.interactive_player_ids) inform_str = 'Player%d plays' % (self.interactive_player_ids[0]); self.inform(inform_str, 'green') if len(self.interactive_player_ids) == 1 and self.interactive_player_ids[0] == 1: self.game.play_game(self.interactive_player_ids[0]) else: self.game_state = GameState.STOPPED self.button['text'] = 'Play' self.game.clear_game() # self.game.play_game(self.interactivePlayerId) else: if self.game_state == GameState.STOPPED or self.game_state == GameState.PAUSED: print('start') print('player1 ' + str(self.game.player1_color)) print('player2 ' + str(self.game.player2_color)) self.button['text'] = 'Pause' self.game.sleepTimeMS = self.scale_var.get() if self.game_state == GameState.STOPPED: self.game.clear_game() self.game.pause(False) self.game_state = GameState.RUNNING print('player1 ' + str(self.game.player1_color)) print('player2 ' + str(self.game.player2_color)) self.game.play_game() print('game exited') if self.board.can_play(self.game.current_player, self.game.current_player_color) and not self.wrong_move: print('set pause state') self.button['text'] = 'Continue' self.game_state = GameState.PAUSED else: print('set stopped state') self.button['text'] = 'RePlay' self.game_state = GameState.STOPPED # self.game.clear_game() elif self.game_state == GameState.RUNNING: print('pause') self.game_state = GameState.PAUSED self.game.pause(True) def add_players(self, players): ''' Adds possible players to the gui. :param players: array of players to add. ''' for player_name in players.keys(): self.possiblePlayers[player_name] = players[player_name] for i in range(2): # self.info.delete(self.option_menus[i]) self.option_menus[i] = OptionMenu(self.info, self.option_menus_vars[i], *self.possiblePlayers) self.option_menus[i].grid(row=0, column=2 * i + 1) self.root.update() def print_score(self): ''' Set number of stones for both players. ''' stones = self.board.get_score() self.print_player_num_stones(0, stones[0]) self.print_player_num_stones(1, stones[1]) def print_num_stones(self, stones): ''' Set number of stones for both players. :param stones: array of player number of stones ''' self.print_player_num_stones(0, stones[0]) self.print_player_num_stones(1, stones[1]) def print_player_num_stones(self, playerID, stones): ''' Set player number of stones. :param playerID: 0 for player 1, 1 for player 2 :param maxTime: maximal time of player ''' self.label_player_stones[playerID]['text'] = str(stones) self.root.update() def print_move_max_times(self, maxTimesMS): ''' Print maximal times for both players to the gui. :param max_times_ms: array of max time needed for move. ''' self.print_player_move_max_time(0, maxTimesMS[0]) self.print_player_move_max_time(1, maxTimesMS[1]) def print_player_move_max_time(self, playerID, maxTime): ''' Set player maximal time. :param playerID: 0 for player 1, 1 for player 2 :param maxTime: maximal time of player ''' self.label_player_max_time[playerID]['text'] = '%.2f [ms]' % maxTime self.root.update() def print_board_state(self): ''' Show the state of the board in gui. ''' # self.board.print_board() for y in range(self.board.board_size): for x in range(self.board.board_size): if self.board.board[y][x] == -1: self.clear_stone(x, y) else: self.draw_stone(x, y, self.board.board[y][x]) self.root.update() def place_stone_click_handler(self, event): ''' For interactive player places stone to mouse click position. :param event: mouse click event ''' if self.game_state != GameState.STOPPED and len(self.interactive_player_ids) >= 1 and self.game.current_player_color in self.interactive_player_ids: pos_move = [int((event.y - self.offy) / self.gridspacing), int((event.x - self.offx) / self.gridspacing)] #print(type(pos_move)) #print(type(pos_move[0])) #print(type(pos_move[1])) if self.board.is_correct_move(pos_move, self.game.current_player, self.game.current_player_color): # print('correct move',pos_move) # self.inform('correct move to %d %d'%(pos_move[0],pos_move[1]), 'green') # self.board.pla next_player_id = self.game.play_move(pos_move) self.print_board_state() self.print_score() self.print_move_max_times(self.game.max_times_ms) inform_str = 'Player%d plays' % (self.game.current_player_color); self.inform(inform_str, 'green') if len(self.interactive_player_ids) == 1: self.game.play_game(self.interactive_player_ids[0]) if next_player_id == -1: self.game_state = GameState.STOPPED self.button['text'] = 'RePlay' self.game.print_final_info() else: print('incorrect move', pos_move) self.inform('incorrect move to %d %d' % (pos_move[0], pos_move[1]), 'red') def inform(self, text_strs, color_str): ''' Show inform text in gui. :param text_strs: string or string array of size 2 that is shown in gui :param color_str: color of shown text_strs ''' inform_str_all = ['', ''] if not isinstance(text_strs, list): inform_str_all[0] = text_strs else: inform_str_all = text_strs # print(inform_str_all) for i in range(2): self.labels_inform[i]['text'] = inform_str_all[i] self.labels_inform[i]['foreground'] = color_str self.root.update()
With Prime Minister Narendra Modi declaring war against black money, several ways are being devised by the corrupt to desperately save their illegal wealth. Many are choosing bank accounts of unsuspecting poor families to stash away their illegal wealth before the December deadline. But, a poor family in Kolkata has set an example by showing courage to say no to black money and refusing to fall into that trap. This, despite the lure of easy cash in the face of extreme adversity. TEEN SUFFERING FROM WILSON'S DISEASE Parents of 17-year-old Sumana Pal, who is suffering from the rare Wilson's disease, has refused to accept multiple cash donations in black money from people who have suddenly come forward with help. Despite facing an acute crisis of funds for their daughter's very expensive treatment, the family has decided not to accept any aid in black money. "We were worried about how to arrange money for her treatment. Surprisingly, many came forward to help but with their black money. They said if we keep a major portion of their money in our account and return them later, we can take a small percentage for Sumana's treatment," Sumana's father Kanan Behari Pal said. Diagnosed with the rare disorder which cripples the body's nervous system, Sumana has been fighting for her life since she was in class 6. For the last nine years, the Pal household has been running pillar to post to arrange funds for their daughter's treatment. While they need at least Rs 1.5 lakh every month for medical expenses, Sumana's father barely manages about Rs 20,000 from his small printing press business. Exhausting all their savings, the family has managed to keep Sumana alive only through donations. But the government's sudden demonetisation announcement has come as a bolt from the blue. "Medicines for her treatment are all imported as they are not manufactured in India. Each bottle of 100 capsules cost 600 pounds or close to Rs 60,000. She needs two bottles per month. Without that, she runs the risk of slipping into coma once again," Kakali Pal, her worried mother explained. APARTMENT SOLD OFF TO MEET MEDICAL EXPENSES To meet the medical expenses, the family sold off their own apartment and now live in a one room accommodation in the northern fringes of the city. Though struggling to make ends meet, they remain steadfast in their resolve. "We may be poor, but we have a self-esteem. We are determined to walk on the right path to keep Sumana alive," adds Kakali. While the family refuses to compromise, they desperately seek the prime minister's help to keep their daughter alive. "We fully endorse Modi Ji's fight against corruption but we hope that helps families like ours in return," said an emotional Behari Pal reminding that even he is fighting a battle, but of another kind - to save his only daughter. Watch the video
/*! * @author electricessence / https://github.com/electricessence/ * Licensing: MIT */ import {Func} from "./FunctionTypes"; import Lazy from "./Lazy"; export default class ResettableLazy<T> extends Lazy<T> { constructor(valueFactory:Func<T>, trapExceptions:boolean = false) { super(valueFactory, trapExceptions, true); // @ts-ignore // Force this override. this._disposableObjectName = 'ResettableLazy'; } static create<T>(valueFactory:Func<T>, trapExceptions:boolean = false) { return new ResettableLazy<T>(valueFactory, trapExceptions); } }
// FileSize returns the size of a file func FileSize(filename string) (int, error) { f, err := os.Open(filename) if err != nil { return -1, err } defer f.Close() fi, err := f.Stat() if err != nil { return -1, err } size := int(fi.Size()) return size, nil }
// GetPersistent returns a single instance for the specific store func GetPersistent(dbPath string, refreshDb bool) *Persistent { boltMapMx.RLock() if b, ok := boltMap[dbPath]; ok { boltMapMx.RUnlock() return b } boltMapMx.RUnlock() boltMapMx.Lock() defer boltMapMx.Unlock() if b, ok := boltMap[dbPath]; ok { return b } boltMap[dbPath] = newPersistent(dbPath, refreshDb) return boltMap[dbPath] }
<filename>modules/hub/src/vendor/connext/lib/getExchangeRates.ts import { ExchangeRates } from '../state/ConnextState/ExchangeRates' import { ConnextState } from '../state/store' export const GET_EXCHANGE_RATES_ERROR = 'No exchange rates are set' export default function getExchangeRates(state: ConnextState): ExchangeRates { const rate = state.runtime.exchangeRate if (!rate) { return { } } return rate.rates }
package metrics import ( "github.com/rcrowley/go-metrics" ) const ( sdDropped = "statsd.calls.dropped" sdFlushTime = "statsd.time.emit" sdQueueSize = "statsd.size.queue" ) // metricsCache keeps metrics.Meter and metrics.Timer to aggregate delayed calls before flushing them type metricsCache struct { data map[string]pair registry metrics.Registry // self-diagnose metrics droppedCalls pair queueSize pair } type pair struct { // meter is metrics.Histogram because default metrics.Meter has timer that locks its mutex every 5 seconds meter metrics.Histogram histogram metrics.Histogram } func newMetricsCache() *metricsCache { metricsCache := &metricsCache{ data: make(map[string]pair, 256), registry: metrics.NewRegistry(), } metricsCache.droppedCalls = metricsCache.getOrCreate(sdDropped) metricsCache.queueSize = metricsCache.getOrCreate(sdQueueSize) return metricsCache } // getOrCreate returns an existing pair. If it doesn't exist, it is created. // This function is not thread safe. func (cache *metricsCache) getOrCreate(name string) pair { pair, ok := cache.data[name] if !ok { pair = cache.newPair(name) cache.data[name] = pair } return pair } func (cache *metricsCache) newPair(name string) pair { return pair{ meter: metrics.NewRegisteredHistogram(name, cache.registry, newMovingWindowSample()), histogram: metrics.NewRegisteredHistogram(name, cache.registry, metrics.NewExpDecaySample(movingWindowSize, 0.015)), } }
Characterization of the Infectious Unit for Man of Two Respiratory Viruses 1 We have previously reported 50% human infectious doses (HID50) of adenovirus, type 4 (adeno 4), and Coxsackie virus A, type 21 (A21) (1, 2). The HID50) were determined in normal volunteers and expressed as 50% tissue culture infectious doses (TCID50) using the most sensitive assay system available for each virus. Determinations of HID50 were made for each virus with two different inoculation methods: nasal instillation of a small volume of virus suspension, which deposits entirely in the nasopharynx; and inhalation of a small particle aerosol, which deposits primarily in the lower respiratory tract. Adeno 4 was more infectious in small particle aerosol than in nasal drops; whereas the reverse was true for A21. While these data suggest important biologic differences between viruses, they do not provide absolute quantitation of infectious units and since the sensitivity of the respective assay systems is not known, they do not allow quantitative comparisons between viruses. In order to characterize the infectious unit of these respiratory viruses for man, and to provide data with which comparisons could be made between viruses, we made electron microscopic counts of viral particulates and numbers of virions in particulates in the virus suspensions of adeno 4 and A21 which were used for volunteer inoculations. In the present report these data are presented and compared to the previous data on infectivity of the virus suspensions for man and for tissue culture. Methods and Materials. Virus. The strains used in these studies were obtained from Marine recruits with acute respiratory disease. The adeno 4 volunteer inoculum (strain 78650) had been passaged once in human embryonic kidney tissue cultures (HEK). The harvest was frozen and thawed once, pooled, centrifuged at 1000g for 20 min, and filtered through 800 μg membrane filters (Millipore).
The hospital-physician relationship: past, present, and future. The traditional hospital-physician relationship in the United States was an implicit symbiotic collaboration sheltered by financial success. The health care economic challenges of the 1980s and 1990s unmasked the weaknesses of this relationship as hospitals and doctors often found themselves in direct competition in the struggle to maintain revenue. We recount and examine the history of the largely implicit American hospital-physician relationship and propose a means of establishing formal, explicit hospital-physician collaborations focused on delivering quality patient care and ensuring economic viability for both parties. We present the process of planning a joint hospital-physician ambulatory surgery center (ASC) at a not-for-profit academic institution as an example of a collaboration to negotiate a model embraced by both parties. However, the ultimate success of this new center, as measured in quality of patient care and economic viability, has yet to be determined.
All available police cars rushed to the carnival in Jersey City's Pershing Field when fights broke out among juveniles Sunday evening, officials said today. At about 7:20 p.m,. an officer patrolling the carnival responded to a street fight at the corner of Manhattan and Central avenues but as he attempted to break up the fight, other fights began to break out between Manhattan Avenue and Ferry Street, a police report says. The officer called for all available units to the area as North District police cars arrived, the crowd began to disperse in all directions before the officer could "take appropriate action" the report says, adding that the officer told responding units to slow down. No arrests were made and no one was injured, the report says, adding that the carnival continued to operate.
/** * @file lliohttpserver.h * @brief Declaration of function for creating an HTTP wire server * @see LLIOServerSocket, LLPumpIO * * $LicenseInfo:firstyear=2005&license=viewerlgpl$ * Second Life Viewer Source Code * Copyright (C) 2010, Linden Research, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; * version 2.1 of the License only. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ #ifndef LL_LLIOHTTPSERVER_H #define LL_LLIOHTTPSERVER_H #include "llchainio.h" #include "llhttpnode.h" class LLPumpIO; // common strings use for populating the context. bascally 'request', // 'wildcard', and 'headers'. extern const std::string CONTEXT_REQUEST; extern const std::string CONTEXT_RESPONSE; extern const std::string CONTEXT_VERB; extern const std::string CONTEXT_HEADERS; extern const std::string HTTP_VERB_GET; extern const std::string HTTP_VERB_PUT; extern const std::string HTTP_VERB_POST; extern const std::string HTTP_VERB_DELETE; extern const std::string HTTP_VERB_OPTIONS; class LLIOHTTPServer { public: typedef void (*timing_callback_t)(const char* hashed_name, F32 time, void* data); static LLHTTPNode& create(apr_pool_t* pool, LLPumpIO& pump, U16 port); /**< Creates an HTTP wire server on the pump for the given TCP port. * * Returns the root node of the new server. Add LLHTTPNode instances * to this root. * * Nodes that return NULL for getProtocolHandler(), will use the * default handler that interprets HTTP on the wire and converts * it into calls to get(), put(), post(), del() with appropriate * LLSD arguments and results. * * To have nodes that implement some other wire protocol (XML-RPC * for example), use the helper templates below. */ static void createPipe(LLPumpIO::chain_t& chain, const LLHTTPNode& root, const LLSD& ctx); /**< Create a pipe on the chain that handles HTTP requests. * The requests are served by the node tree given at root. * * This is primarily useful for unit testing. */ static void setTimingCallback(timing_callback_t callback, void* data); /**< Register a callback function that will be called every time * a GET, PUT, POST, or DELETE is handled. * * This is used to time the LLHTTPNode handler code, which often hits * the database or does other, slow operations. JC */ }; /* @name Helper Templates * * These templates make it easy to create nodes that use thier own protocol * handlers rather than the default. Typically, you subclass LLIOPipe to * implement the protocol, and then add a node using the templates: * * rootNode->addNode("thing", new LLHTTPNodeForPipe<LLThingPipe>); * * The templates are: * * LLChainIOFactoryForPipe * - a simple factory that builds instances of a pipe * * LLHTTPNodeForFacotry * - a HTTP node that uses a factory as the protocol handler * * LLHTTPNodeForPipe * - a HTTP node that uses a simple factory based on a pipe */ //@{ template<class Pipe> class LLChainIOFactoryForPipe : public LLChainIOFactory { public: virtual bool build(LLPumpIO::chain_t& chain, LLSD context) const { chain.push_back(LLIOPipe::ptr_t(new Pipe)); return true; } }; template<class Factory> class LLHTTPNodeForFactory : public LLHTTPNode { public: const LLChainIOFactory* getProtocolHandler() const { return &mProtocolHandler; } private: Factory mProtocolHandler; }; //@} template<class Pipe> class LLHTTPNodeForPipe : public LLHTTPNodeForFactory< LLChainIOFactoryForPipe<Pipe> > { }; #endif // LL_LLIOHTTPSERVER_H
def makeShape(turt, side_amount): side_counter = side_amount while side_counter > 0: turt.forward(100) turt.left(360/side_amount) side_counter -= 1
/* rescoff.c -- read and write resources in Windows COFF files. Copyright (C) 1997-2017 Free Software Foundation, Inc. Written by Ian Lance Taylor, Cygnus Support. Rewritten by Kai Tietz, Onevision. This file is part of GNU Binutils. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ /* This file contains function that read and write Windows resources in COFF files. */ #include "sysdep.h" #include "bfd.h" #include "bucomm.h" #include "libiberty.h" #include "windres.h" #include <assert.h> /* In order to use the address of a resource data entry, we need to get the image base of the file. Right now we extract it from internal BFD information. FIXME. */ #include "coff/internal.h" #include "libcoff.h" /* Information we extract from the file. */ struct coff_file_info { /* File name. */ const char *filename; /* Data read from the file. */ const bfd_byte *data; /* End of data read from file. */ const bfd_byte *data_end; /* Address of the resource section minus the image base of the file. */ rc_uint_type secaddr; }; /* A resource directory table in a COFF file. */ struct __attribute__ ((__packed__)) extern_res_directory { /* Characteristics. */ bfd_byte characteristics[4]; /* Time stamp. */ bfd_byte time[4]; /* Major version number. */ bfd_byte major[2]; /* Minor version number. */ bfd_byte minor[2]; /* Number of named directory entries. */ bfd_byte name_count[2]; /* Number of directory entries with IDs. */ bfd_byte id_count[2]; }; /* A resource directory entry in a COFF file. */ struct extern_res_entry { /* Name or ID. */ bfd_byte name[4]; /* Address of resource entry or subdirectory. */ bfd_byte rva[4]; }; /* A resource data entry in a COFF file. */ struct extern_res_data { /* Address of resource data. This is apparently a file relative address, rather than a section offset. */ bfd_byte rva[4]; /* Size of resource data. */ bfd_byte size[4]; /* Code page. */ bfd_byte codepage[4]; /* Reserved. */ bfd_byte reserved[4]; }; /* Local functions. */ static void overrun (const struct coff_file_info *, const char *); static rc_res_directory *read_coff_res_dir (windres_bfd *, const bfd_byte *, const struct coff_file_info *, const rc_res_id *, int); static rc_res_resource *read_coff_data_entry (windres_bfd *, const bfd_byte *, const struct coff_file_info *, const rc_res_id *); /* Read the resources in a COFF file. */ rc_res_directory * read_coff_rsrc (const char *filename, const char *target) { rc_res_directory *ret; bfd *abfd; windres_bfd wrbfd; char **matching; asection *sec; bfd_size_type size; bfd_byte *data; struct coff_file_info flaginfo; if (filename == NULL) fatal (_("filename required for COFF input")); abfd = bfd_openr (filename, target); if (abfd == NULL) bfd_fatal (filename); if (! bfd_check_format_matches (abfd, bfd_object, &matching)) { bfd_nonfatal (bfd_get_filename (abfd)); if (bfd_get_error () == bfd_error_file_ambiguously_recognized) list_matching_formats (matching); xexit (1); } sec = bfd_get_section_by_name (abfd, ".rsrc"); if (sec == NULL) { fatal (_("%s: no resource section"), filename); } set_windres_bfd (&wrbfd, abfd, sec, WR_KIND_BFD); size = bfd_section_size (abfd, sec); /* PR 17512: file: 1b25ba5d The call to get_file_size here may be expensive but there is no other way to determine if the section size is reasonable. */ if (size > (bfd_size_type) get_file_size (filename)) fatal (_("%s: .rsrc section is bigger than the file!"), filename); data = (bfd_byte *) res_alloc (size); get_windres_bfd_content (&wrbfd, data, 0, size); flaginfo.filename = filename; flaginfo.data = data; flaginfo.data_end = data + size; flaginfo.secaddr = (bfd_get_section_vma (abfd, sec) - pe_data (abfd)->pe_opthdr.ImageBase); /* Now just read in the top level resource directory. Note that we don't free data, since we create resource entries that point into it. If we ever want to free up the resource information we read, this will have to be cleaned up. */ ret = read_coff_res_dir (&wrbfd, data, &flaginfo, (const rc_res_id *) NULL, 0); bfd_close (abfd); return ret; } /* Give an error if we are out of bounds. */ static void overrun (const struct coff_file_info *flaginfo, const char *msg) { fatal (_("%s: %s: address out of bounds"), flaginfo->filename, msg); } /* Read a resource directory. */ static rc_res_directory * read_coff_res_dir (windres_bfd *wrbfd, const bfd_byte *data, const struct coff_file_info *flaginfo, const rc_res_id *type, int level) { const struct extern_res_directory *erd; rc_res_directory *rd; int name_count, id_count, i; rc_res_entry **pp; const struct extern_res_entry *ere; /* PR 17512: file: 09d80f53. Whilst in theory resources can nest to any level, in practice Microsoft only defines 3 levels. Corrupt files however might claim to use more. */ if (level > 4) overrun (flaginfo, _("Resources nest too deep")); if ((size_t) (flaginfo->data_end - data) < sizeof (struct extern_res_directory)) overrun (flaginfo, _("directory")); erd = (const struct extern_res_directory *) data; rd = (rc_res_directory *) res_alloc (sizeof (rc_res_directory)); rd->characteristics = windres_get_32 (wrbfd, erd->characteristics, 4); rd->time = windres_get_32 (wrbfd, erd->time, 4); rd->major = windres_get_16 (wrbfd, erd->major, 2); rd->minor = windres_get_16 (wrbfd, erd->minor, 2); rd->entries = NULL; name_count = windres_get_16 (wrbfd, erd->name_count, 2); id_count = windres_get_16 (wrbfd, erd->id_count, 2); pp = &rd->entries; /* The resource directory entries immediately follow the directory table. */ ere = (const struct extern_res_entry *) (erd + 1); for (i = 0; i < name_count; i++, ere++) { rc_uint_type name, rva; rc_res_entry *re; const bfd_byte *ers; int length, j; if ((const bfd_byte *) ere >= flaginfo->data_end) overrun (flaginfo, _("named directory entry")); name = windres_get_32 (wrbfd, ere->name, 4); rva = windres_get_32 (wrbfd, ere->rva, 4); /* For some reason the high bit in NAME is set. */ name &=~ 0x80000000; if (name > (rc_uint_type) (flaginfo->data_end - flaginfo->data)) overrun (flaginfo, _("directory entry name")); ers = flaginfo->data + name; re = (rc_res_entry *) res_alloc (sizeof *re); re->next = NULL; re->id.named = 1; length = windres_get_16 (wrbfd, ers, 2); re->id.u.n.length = length; re->id.u.n.name = (unichar *) res_alloc (length * sizeof (unichar)); for (j = 0; j < length; j++) { /* PR 17512: file: 05dc4a16. */ if (length < 0 || ers >= flaginfo->data_end || ers + j * 2 + 4 >= flaginfo->data_end) overrun (flaginfo, _("resource name")); re->id.u.n.name[j] = windres_get_16 (wrbfd, ers + j * 2 + 2, 2); } if (level == 0) type = &re->id; if ((rva & 0x80000000) != 0) { rva &=~ 0x80000000; if (rva >= (rc_uint_type) (flaginfo->data_end - flaginfo->data)) overrun (flaginfo, _("named subdirectory")); re->subdir = 1; re->u.dir = read_coff_res_dir (wrbfd, flaginfo->data + rva, flaginfo, type, level + 1); } else { if (rva >= (rc_uint_type) (flaginfo->data_end - flaginfo->data)) overrun (flaginfo, _("named resource")); re->subdir = 0; re->u.res = read_coff_data_entry (wrbfd, flaginfo->data + rva, flaginfo, type); } *pp = re; pp = &re->next; } for (i = 0; i < id_count; i++, ere++) { unsigned long name, rva; rc_res_entry *re; if ((const bfd_byte *) ere >= flaginfo->data_end) overrun (flaginfo, _("ID directory entry")); name = windres_get_32 (wrbfd, ere->name, 4); rva = windres_get_32 (wrbfd, ere->rva, 4); re = (rc_res_entry *) res_alloc (sizeof *re); re->next = NULL; re->id.named = 0; re->id.u.id = name; if (level == 0) type = &re->id; if ((rva & 0x80000000) != 0) { rva &=~ 0x80000000; if (rva >= (rc_uint_type) (flaginfo->data_end - flaginfo->data)) overrun (flaginfo, _("ID subdirectory")); re->subdir = 1; re->u.dir = read_coff_res_dir (wrbfd, flaginfo->data + rva, flaginfo, type, level + 1); } else { if (rva >= (rc_uint_type) (flaginfo->data_end - flaginfo->data)) overrun (flaginfo, _("ID resource")); re->subdir = 0; re->u.res = read_coff_data_entry (wrbfd, flaginfo->data + rva, flaginfo, type); } *pp = re; pp = &re->next; } return rd; } /* Read a resource data entry. */ static rc_res_resource * read_coff_data_entry (windres_bfd *wrbfd, const bfd_byte *data, const struct coff_file_info *flaginfo, const rc_res_id *type) { const struct extern_res_data *erd; rc_res_resource *r; rc_uint_type size, rva; const bfd_byte *resdata; if (type == NULL) fatal (_("resource type unknown")); if ((size_t) (flaginfo->data_end - data) < sizeof (struct extern_res_data)) overrun (flaginfo, _("data entry")); erd = (const struct extern_res_data *) data; size = windres_get_32 (wrbfd, erd->size, 4); rva = windres_get_32 (wrbfd, erd->rva, 4); if (rva < flaginfo->secaddr || rva - flaginfo->secaddr >= (rc_uint_type) (flaginfo->data_end - flaginfo->data)) overrun (flaginfo, _("resource data")); resdata = flaginfo->data + (rva - flaginfo->secaddr); if (size > (rc_uint_type) (flaginfo->data_end - resdata)) overrun (flaginfo, _("resource data size")); r = bin_to_res (wrbfd, *type, resdata, size); memset (&r->res_info, 0, sizeof (rc_res_res_info)); r->coff_info.codepage = windres_get_32 (wrbfd, erd->codepage, 4); r->coff_info.reserved = windres_get_32 (wrbfd, erd->reserved, 4); return r; } /* This structure is used to build a list of bindata structures. */ struct bindata_build { /* The data. */ bindata *d; /* The last structure we have added to the list. */ bindata *last; /* The size of the list as a whole. */ unsigned long length; }; struct coff_res_data_build { /* The data. */ coff_res_data *d; /* The last structure we have added to the list. */ coff_res_data *last; /* The size of the list as a whole. */ unsigned long length; }; /* This structure keeps track of information as we build the directory tree. */ struct coff_write_info { /* These fields are based on the BFD. */ /* The BFD itself. */ windres_bfd *wrbfd; /* Pointer to section symbol used to build RVA relocs. */ asymbol **sympp; /* These fields are computed initially, and then not changed. */ /* Length of directory tables and entries. */ unsigned long dirsize; /* Length of directory entry strings. */ unsigned long dirstrsize; /* Length of resource data entries. */ unsigned long dataentsize; /* These fields are updated as we add data. */ /* Directory tables and entries. */ struct bindata_build dirs; /* Directory entry strings. */ struct bindata_build dirstrs; /* Resource data entries. */ struct bindata_build dataents; /* Actual resource data. */ struct coff_res_data_build resources; /* Relocations. */ arelent **relocs; /* Number of relocations. */ unsigned int reloc_count; }; static void coff_bin_sizes (const rc_res_directory *, struct coff_write_info *); static bfd_byte *coff_alloc (struct bindata_build *, rc_uint_type); static void coff_to_bin (const rc_res_directory *, struct coff_write_info *); static void coff_res_to_bin (const rc_res_resource *, struct coff_write_info *); /* Write resources to a COFF file. RESOURCES should already be sorted. Right now we always create a new file. Someday we should also offer the ability to merge resources into an existing file. This would require doing the basic work of objcopy, just modifying or adding the .rsrc section. */ void write_coff_file (const char *filename, const char *target, const rc_res_directory *resources) { bfd *abfd; asection *sec; struct coff_write_info cwi; windres_bfd wrbfd; bindata *d; coff_res_data *rd; unsigned long length, offset; if (filename == NULL) fatal (_("filename required for COFF output")); abfd = bfd_openw (filename, target); if (abfd == NULL) bfd_fatal (filename); if (! bfd_set_format (abfd, bfd_object)) bfd_fatal ("bfd_set_format"); #if defined DLLTOOL_SH if (! bfd_set_arch_mach (abfd, bfd_arch_sh, 0)) bfd_fatal ("bfd_set_arch_mach(sh)"); #elif defined DLLTOOL_MIPS if (! bfd_set_arch_mach (abfd, bfd_arch_mips, 0)) bfd_fatal ("bfd_set_arch_mach(mips)"); #elif defined DLLTOOL_ARM if (! bfd_set_arch_mach (abfd, bfd_arch_arm, 0)) bfd_fatal ("bfd_set_arch_mach(arm)"); #else /* FIXME: This is obviously i386 specific. */ if (! bfd_set_arch_mach (abfd, bfd_arch_i386, 0)) bfd_fatal ("bfd_set_arch_mach(i386)"); #endif if (! bfd_set_file_flags (abfd, HAS_SYMS | HAS_RELOC)) bfd_fatal ("bfd_set_file_flags"); sec = bfd_make_section_with_flags (abfd, ".rsrc", (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_DATA)); if (sec == NULL) bfd_fatal ("bfd_make_section"); if (! bfd_set_symtab (abfd, sec->symbol_ptr_ptr, 1)) bfd_fatal ("bfd_set_symtab"); /* Requiring this is probably a bug in BFD. */ sec->output_section = sec; /* The order of data in the .rsrc section is resource directory tables and entries resource directory strings resource data entries actual resource data We build these different types of data in different lists. */ set_windres_bfd (&wrbfd, abfd, sec, WR_KIND_BFD); cwi.wrbfd = &wrbfd; cwi.sympp = sec->symbol_ptr_ptr; cwi.dirsize = 0; cwi.dirstrsize = 0; cwi.dataentsize = 0; cwi.dirs.d = NULL; cwi.dirs.last = NULL; cwi.dirs.length = 0; cwi.dirstrs.d = NULL; cwi.dirstrs.last = NULL; cwi.dirstrs.length = 0; cwi.dataents.d = NULL; cwi.dataents.last = NULL; cwi.dataents.length = 0; cwi.resources.d = NULL; cwi.resources.last = NULL; cwi.resources.length = 0; cwi.relocs = NULL; cwi.reloc_count = 0; /* Work out the sizes of the resource directory entries, so that we know the various offsets we will need. */ coff_bin_sizes (resources, &cwi); /* Force the directory strings to be 64 bit aligned. Every other structure is 64 bit aligned anyhow. */ cwi.dirstrsize = (cwi.dirstrsize + 7) & ~7; /* Actually convert the resources to binary. */ coff_to_bin (resources, &cwi); /* Add another few bytes to the directory strings if needed for alignment. */ if ((cwi.dirstrs.length & 7) != 0) { rc_uint_type pad = 8 - (cwi.dirstrs.length & 7); bfd_byte *ex; ex = coff_alloc (& cwi.dirstrs, pad); memset (ex, 0, pad); } /* Make sure that the data we built came out to the same size as we calculated initially. */ assert (cwi.dirs.length == cwi.dirsize); assert (cwi.dirstrs.length == cwi.dirstrsize); assert (cwi.dataents.length == cwi.dataentsize); length = (cwi.dirsize + cwi.dirstrsize + cwi.dataentsize + cwi.resources.length); if (! bfd_set_section_size (abfd, sec, length)) bfd_fatal ("bfd_set_section_size"); bfd_set_reloc (abfd, sec, cwi.relocs, cwi.reloc_count); offset = 0; for (d = cwi.dirs.d; d != NULL; d = d->next) { if (! bfd_set_section_contents (abfd, sec, d->data, offset, d->length)) bfd_fatal ("bfd_set_section_contents"); offset += d->length; } for (d = cwi.dirstrs.d; d != NULL; d = d->next) { set_windres_bfd_content (&wrbfd, d->data, offset, d->length); offset += d->length; } for (d = cwi.dataents.d; d != NULL; d = d->next) { set_windres_bfd_content (&wrbfd, d->data, offset, d->length); offset += d->length; } for (rd = cwi.resources.d; rd != NULL; rd = rd->next) { res_to_bin (cwi.wrbfd, (rc_uint_type) offset, rd->res); offset += rd->length; } assert (offset == length); if (! bfd_close (abfd)) bfd_fatal ("bfd_close"); /* We allocated the relocs array using malloc. */ free (cwi.relocs); } /* Work out the sizes of the various fixed size resource directory entries. This updates fields in CWI. */ static void coff_bin_sizes (const rc_res_directory *resdir, struct coff_write_info *cwi) { const rc_res_entry *re; cwi->dirsize += sizeof (struct extern_res_directory); for (re = resdir->entries; re != NULL; re = re->next) { cwi->dirsize += sizeof (struct extern_res_entry); if (re->id.named) cwi->dirstrsize += re->id.u.n.length * 2 + 2; if (re->subdir) coff_bin_sizes (re->u.dir, cwi); else cwi->dataentsize += sizeof (struct extern_res_data); } } /* Allocate data for a particular list. */ static bfd_byte * coff_alloc (struct bindata_build *bb, rc_uint_type size) { bindata *d; d = (bindata *) reswr_alloc (sizeof (bindata)); d->next = NULL; d->data = (bfd_byte *) reswr_alloc (size); d->length = size; if (bb->d == NULL) bb->d = d; else bb->last->next = d; bb->last = d; bb->length += size; return d->data; } /* Convert the resource directory RESDIR to binary. */ static void coff_to_bin (const rc_res_directory *resdir, struct coff_write_info *cwi) { struct extern_res_directory *erd; int ci, cn; const rc_res_entry *e; struct extern_res_entry *ere; /* Write out the directory table. */ erd = ((struct extern_res_directory *) coff_alloc (&cwi->dirs, sizeof (*erd))); windres_put_32 (cwi->wrbfd, erd->characteristics, resdir->characteristics); windres_put_32 (cwi->wrbfd, erd->time, resdir->time); windres_put_16 (cwi->wrbfd, erd->major, resdir->major); windres_put_16 (cwi->wrbfd, erd->minor, resdir->minor); ci = 0; cn = 0; for (e = resdir->entries; e != NULL; e = e->next) { if (e->id.named) ++cn; else ++ci; } windres_put_16 (cwi->wrbfd, erd->name_count, cn); windres_put_16 (cwi->wrbfd, erd->id_count, ci); /* Write out the data entries. Note that we allocate space for all the entries before writing them out. That permits a recursive call to work correctly when writing out subdirectories. */ ere = ((struct extern_res_entry *) coff_alloc (&cwi->dirs, (ci + cn) * sizeof (*ere))); for (e = resdir->entries; e != NULL; e = e->next, ere++) { if (! e->id.named) windres_put_32 (cwi->wrbfd, ere->name, e->id.u.id); else { bfd_byte *str; rc_uint_type i; /* For some reason existing files seem to have the high bit set on the address of the name, although that is not documented. */ windres_put_32 (cwi->wrbfd, ere->name, 0x80000000 | (cwi->dirsize + cwi->dirstrs.length)); str = coff_alloc (&cwi->dirstrs, e->id.u.n.length * 2 + 2); windres_put_16 (cwi->wrbfd, str, e->id.u.n.length); for (i = 0; i < e->id.u.n.length; i++) windres_put_16 (cwi->wrbfd, str + (i + 1) * sizeof (unichar), e->id.u.n.name[i]); } if (e->subdir) { windres_put_32 (cwi->wrbfd, ere->rva, 0x80000000 | cwi->dirs.length); coff_to_bin (e->u.dir, cwi); } else { windres_put_32 (cwi->wrbfd, ere->rva, cwi->dirsize + cwi->dirstrsize + cwi->dataents.length); coff_res_to_bin (e->u.res, cwi); } } } /* Convert the resource RES to binary. */ static void coff_res_to_bin (const rc_res_resource *res, struct coff_write_info *cwi) { arelent *r; struct extern_res_data *erd; coff_res_data *d; /* For some reason, although every other address is a section offset, the address of the resource data itself is an RVA. That means that we need to generate a relocation for it. We allocate the relocs array using malloc so that we can use realloc. FIXME: This relocation handling is correct for the i386, but probably not for any other target. */ r = (arelent *) reswr_alloc (sizeof (arelent)); r->sym_ptr_ptr = cwi->sympp; r->address = cwi->dirsize + cwi->dirstrsize + cwi->dataents.length; r->addend = 0; r->howto = bfd_reloc_type_lookup (WR_BFD (cwi->wrbfd), BFD_RELOC_RVA); if (r->howto == NULL) bfd_fatal (_("can't get BFD_RELOC_RVA relocation type")); cwi->relocs = xrealloc (cwi->relocs, (cwi->reloc_count + 2) * sizeof (arelent *)); cwi->relocs[cwi->reloc_count] = r; cwi->relocs[cwi->reloc_count + 1] = NULL; ++cwi->reloc_count; erd = (struct extern_res_data *) coff_alloc (&cwi->dataents, sizeof (*erd)); windres_put_32 (cwi->wrbfd, erd->rva, (cwi->dirsize + cwi->dirstrsize + cwi->dataentsize + cwi->resources.length)); windres_put_32 (cwi->wrbfd, erd->codepage, res->coff_info.codepage); windres_put_32 (cwi->wrbfd, erd->reserved, res->coff_info.reserved); d = (coff_res_data *) reswr_alloc (sizeof (coff_res_data)); d->length = res_to_bin (NULL, (rc_uint_type) 0, res); d->res = res; d->next = NULL; if (cwi->resources.d == NULL) cwi->resources.d = d; else cwi->resources.last->next = d; cwi->resources.last = d; cwi->resources.length += (d->length + 7) & ~7; windres_put_32 (cwi->wrbfd, erd->size, d->length); /* Force the next resource to have 64 bit alignment. */ d->length = (d->length + 7) & ~7; }
def extract_event_names_descriptions_updater(results): events = results.find_all('h3') descriptions = results.find_all('p') event_names = list(map(lambda event: event.text.split('. ')[-1], events)) descriptions = list(map(lambda description: description.text, descriptions))[4:-2] return zip(event_names, descriptions)
#include <bits/stdc++.h> #include <ext/pb_ds/assoc_container.hpp> #include <ext/pb_ds/tree_policy.hpp> using namespace std; using namespace __gnu_pbds; #define oset tree<int, null_type,less<int>, rb_tree_tag,tree_order_statistics_node_update> int main(){ int n, m, sm = 0; cin >> n >> m; vector<int> mn(n), mx(n), val(n); oset s; for(int i = 0; i < n; ++i) mn[i] = i, mx[i] = i, val[i] = i, s.insert(i); for(int i = 0; i < m; ++i){ int x; cin >> x, --x; mx[x] = max(mx[x], int(s.order_of_key(val[x]))); mn[x] = 0; s.erase(val[x]); val[x] = -- sm; s.insert(val[x]); } for(int i = 0; i < n; ++i){ mx[i] = max(mx[i], int(s.order_of_key(val[i]))); } for(int i = 0; i < n; ++i){ cout << mn[i] + 1 << ' ' << mx[i] + 1 << '\n'; } }
When instructions don't help: Knowing the optimal strategy facilitates rule-based but not information-integration category learning. Providing verbal or written instructions on how to perform optimally in a task is one of the most common ways to teach beginners. This practice is so widely accepted that scholarship primarily focuses on how to provide instructions, not whether these instructions help or not. Here we investigate the benefits of prior instruction on rule-based (RB) category-learning, in which the optimal strategy is a simple explicit rule, and information-integration (II) category-learning, in which the optimal strategy is similarity-based. Participants (N = 58) learned either RB or II categories, with or without verbal and written instruction about the optimal categorization strategy. Instructions significantly improved performance with RB categories but had no effect with II categories. The theoretical and practical implication of these results is discussed. (PsycInfo Database Record (c) 2021 APA, all rights reserved).
Towards 100 GbE FPGA-Based Flow Monitoring This paper explores the problem of flow metering in 100 GbE links, presenting a flow exporter architecture based on a FPGA acceleration card using only on-chip memory. Peak performance without packet sampling even at the maximum packet rate is assured and means to avoid data loss are provided, since a low level of aggregation is achieved. This is the first approach in a series of architectures that are built upon the previous one, where the resources of the custom hardware are gradually increased, improving the aggregation level, while the required commodity hardware resources for subsequent stages are consequently lowered. We consider that FPGA-fabric offers adequate flexibility and performance for this task and is capable of reducing overall system cost. A functional prototype of the system has been implemented on the Xilinx VCU118 development board configured to export TCP sessions records. This achievement represents a cornerstone of a 100 GbE FPGA flow exporter design, that aims for supporting in the order of tens of millions concurrent flows.
// $Id$ /************************************************************************** * Copyright(c) 1998-1999, ALICE Experiment at CERN, All rights reserved. * * * * Author: The ALICE Off-line Project. * * Contributors are mentioned in the code where appropriate. * * * * Permission to use, copy, modify and distribute this software and its * * documentation strictly for non-commercial purposes is hereby granted * * without fee, provided that the above copyright notice appears in all * * copies and that both the copyright notice and this permission notice * * appear in the supporting documentation. The authors make no claims * * about the suitability of this software for any purpose. It is * * provided "as is" without express or implied warranty. * **************************************************************************/ // @file AliHLTTRDUtils.cxx // @author <NAME> // @date // @brief Utilities needed the HLT::TRD code. // /////////////////////////////////////////////////////////////////////////////// // // // HLT TRD Utilities Class // // // /////////////////////////////////////////////////////////////////////////////// #include "AliHLTTRDUtils.h" #include <TClonesArray.h> #include "AliHLTTRDTrack.h" #include "AliHLTTRDTracklet.h" #include "AliHLTTRDCluster.h" #include "AliHLTExternalTrackParam.h" #include "AliTRDtransform.h" #include "AliESDEvent.h" #include "AliESDtrack.h" #include "AliPID.h" ClassImp(AliHLTTRDUtils) AliHLTUInt32_t AliHLTTRDUtils::AddClustersToOutput(const TClonesArray *const inClusterArray, AliHLTUInt8_t *const outBlockPtr, Int_t nTimeBins) { AliTRDcluster* cluster = 0; AliHLTUInt32_t addedSize = 0; Int_t lastDet = -1; if (inClusterArray){ AliHLTTRDClustersArray* clsArr = NULL; Int_t nbEntries = inClusterArray->GetEntries(); for (Int_t iCluster = 0; iCluster<nbEntries; iCluster++){ cluster = (AliTRDcluster*)(inClusterArray->At(iCluster)); if(lastDet!=cluster->GetDetector()){ lastDet=cluster->GetDetector(); clsArr = new(outBlockPtr+addedSize) AliHLTTRDClustersArray(lastDet); addedSize += sizeof(AliHLTTRDClustersArray); } new (&clsArr->fCluster[clsArr->fCount]) AliHLTTRDClustersArray::cluster_type(cluster); clsArr->fCount++; addedSize += sizeof(AliHLTTRDClustersArray::cluster_type); } } Int_t *TBptr = (Int_t*)(outBlockPtr+addedSize); *TBptr = nTimeBins; addedSize += sizeof(*TBptr); return addedSize; } AliHLTUInt32_t AliHLTTRDUtils::AddTracksToOutput(const TClonesArray *const inTrackArray, AliHLTUInt8_t *const output, Int_t nTimeBins) { Int_t *TBptr = (Int_t*)output; *TBptr = nTimeBins; AliTRDtrackV1* track = 0; AliHLTUInt32_t addedSize = sizeof(*TBptr); if (inTrackArray){ Int_t nbTracks = inTrackArray->GetEntries(); for (Int_t iTrack = 0; iTrack<nbTracks; iTrack++){ AliHLTUInt32_t trackSize=0; track = (AliTRDtrackV1*)(inTrackArray->At(iTrack)); AliHLTTRDTrack *hltTrack = new (output+addedSize) AliHLTTRDTrack(track); trackSize = hltTrack->GetSize(); addedSize += trackSize; } } return addedSize; } AliHLTUInt32_t AliHLTTRDUtils::AddTracksToOutputAlt(const TClonesArray *const inTrackArray, AliHLTUInt8_t *const block, Int_t nTimeBins) { AliHLTUInt32_t addedSize = 0; AliHLTUInt64_t *TBptr = (AliHLTUInt64_t*)block; *TBptr = nTimeBins; addedSize += sizeof(AliHLTUInt64_t); if(!inTrackArray) return addedSize; Int_t nbTracks = inTrackArray->GetEntriesFast(); for (Int_t i = 0; i<nbTracks; i++){ AliTRDtrackV1* inTrack = (AliTRDtrackV1*)(inTrackArray->At(i)); if(inTrack)addedSize+=AliHLTTRDTrack::SaveAt(block+addedSize, inTrack); } return addedSize; } /** * Read cluster to the TClonesArray from the memory */ //============================================================================ AliHLTUInt32_t AliHLTTRDUtils::ReadClusters(TClonesArray *const outArray, const void *const inputPtr, AliHLTUInt32_t size, Int_t* nTimeBins) { const AliHLTUInt8_t* inPtr = (AliHLTUInt8_t*)inputPtr; UInt_t curSize = 0; Int_t counter = outArray->GetEntriesFast(); if(nTimeBins){ *nTimeBins=*(Int_t*)(inPtr+size-sizeof(*nTimeBins)); } size-=sizeof(*nTimeBins); #ifndef HAVE_NOT_ALITRD_CLUSTERIZER_r42837 AliTRDtransform trans; #endif while (curSize < size) { AliHLTTRDClustersArray* clsArr = (AliHLTTRDClustersArray*)(inPtr+curSize); curSize+=sizeof(AliHLTTRDClustersArray); #ifndef HAVE_NOT_ALITRD_CLUSTERIZER_r42837 trans.SetDetector(clsArr->fDetector); #endif for(Int_t iCluster = 0; iCluster<clsArr->fCount; iCluster++){ AliTRDcluster* curTRDCluster = new((*outArray)[counter]) AliTRDcluster(); clsArr->fCluster[iCluster].ExportTRDCluster(curTRDCluster); curTRDCluster->SetDetector(clsArr->fDetector); #ifndef HAVE_NOT_ALITRD_CLUSTERIZER_r42837 trans.Transform(curTRDCluster); #endif curSize += sizeof(AliHLTTRDClustersArray::cluster_type); counter++; } } return counter; } AliHLTUInt32_t AliHLTTRDUtils::ReadTracks(TClonesArray *const outArray, const void *const inputPtr, AliHLTUInt32_t size, Int_t* nTimeBins) { if(nTimeBins){ *nTimeBins=*(Int_t*)inputPtr; //HLTDebug("Reading number of time bins from input block: %d", *nTimeBins); } AliHLTUInt8_t* iterPtr = ((AliHLTUInt8_t*)inputPtr)+sizeof(*nTimeBins); //cout << "\nReading tracks from the Memory\n ============= \n"; //HLTDebug ("\nReading tracks from the Memory\n ============= \n"); AliHLTTRDTrack * hltTrack; AliHLTUInt32_t trackSize = 0, curSize = sizeof(*nTimeBins); Int_t counter=outArray->GetEntriesFast(); while (curSize < size) { hltTrack = (AliHLTTRDTrack*) iterPtr; //HLTDebug("curSize %i, size %i",curSize, size); trackSize = hltTrack->GetSize(); //HLTDebug("GetSize() %i", trackSize); // hltTrack->ReadTrackletsFromMemory(iterPtr + sizeof(AliHLTTRDTrack)); AliTRDtrackV1* curTRDTrack = new((*outArray)[counter]) AliTRDtrackV1(); hltTrack->ExportTRDTrack(curTRDTrack); curSize += trackSize; iterPtr += trackSize; counter++; } //CheckTrackArray(outArray); return counter; } AliHLTUInt32_t AliHLTTRDUtils::ReadTracksAlt(TClonesArray *const outArray, const void *const inputPtr, AliHLTUInt32_t size, Int_t* nTimeBins) { const AliHLTUInt8_t *const block = ((AliHLTUInt8_t*)inputPtr); AliHLTUInt32_t readSize = 0; if(nTimeBins){ *nTimeBins=*(AliHLTUInt64_t*)block; //HLTDebug("Reading number of time bins from input block: %d", *nTimeBins); } readSize += sizeof(AliHLTUInt64_t); if(!outArray) return readSize; Int_t counter=outArray->GetEntriesFast(); while(readSize<size){ AliTRDtrackV1 *const outTrack = new((*outArray)[counter]) AliTRDtrackV1; readSize+=AliHLTTRDTrack::LoadFrom(outTrack, block+readSize); counter++; } return counter; } AliHLTUInt32_t AliHLTTRDUtils::AddESDToOutput(const AliESDEvent* const esd, AliHLTUInt8_t* const outBlockPtr) { AliESDtrack* esdTrack = 0; AliHLTUInt8_t* iterPtr = outBlockPtr; AliHLTTracksData* trksData = new(iterPtr) AliHLTTracksData; iterPtr += sizeof(AliHLTTracksData); trksData->fCount=0; if(esd){ Double_t pid[5]; for(Int_t i=0; i<esd->GetNumberOfTracks(); i++){ esdTrack=esd->GetTrack(i); if(!esdTrack)continue; AliHLTExternalTrackParam* trk = new(iterPtr) AliHLTExternalTrackParam; iterPtr += sizeof(AliHLTExternalTrackParam); trk->fAlpha = esdTrack->GetAlpha(); trk->fX = esdTrack->GetX(); trk->fY = esdTrack->GetY(); trk->fZ = esdTrack->GetZ(); trk->fSinPsi = esdTrack->GetSnp(); trk->fTgl = esdTrack->GetTgl(); trk->fq1Pt = esdTrack->GetSigned1Pt(); trk->fC[0] = esdTrack->GetSigmaY2(); trk->fC[1] = esdTrack->GetSigmaZY(); trk->fC[2] = esdTrack->GetSigmaZ2(); trk->fC[3] = esdTrack->GetSigmaSnpY(); trk->fC[4] = esdTrack->GetSigmaSnpZ(); trk->fC[5] = esdTrack->GetSigmaSnp2(); trk->fC[6] = esdTrack->GetSigmaTglY(); trk->fC[7] = esdTrack->GetSigmaTglZ(); trk->fC[8] = esdTrack->GetSigmaTglSnp(); trk->fC[9] = esdTrack->GetSigmaTgl2(); trk->fC[10] = esdTrack->GetSigma1PtY(); trk->fC[11] = esdTrack->GetSigma1PtZ(); trk->fC[12] = esdTrack->GetSigma1PtSnp(); trk->fC[13] = esdTrack->GetSigma1PtTgl(); trk->fC[14] = esdTrack->GetSigma1Pt2(); esdTrack->GetTRDpid(pid); //trk->fTRDpid = pid[AliPID::kElectron]; ... trk->fNPoints = 0; trksData->fCount++; } } return iterPtr - outBlockPtr; } void AliHLTTRDUtils::EmulateHLTClusters(TClonesArray* clusterArray) { AliHLTUInt32_t estimatedSize = (clusterArray->GetEntriesFast()+1)*sizeof(AliHLTTRDClustersArray::cluster_type); AliHLTUInt8_t* pBlock = (AliHLTUInt8_t*)malloc(estimatedSize); AliHLTUInt32_t size = AddClustersToOutput(clusterArray, pBlock); clusterArray->Delete(); ReadClusters(clusterArray, pBlock, size); free(pBlock); } void AliHLTTRDUtils::EmulateHLTTracks(TClonesArray* trackArray) { AliHLTUInt32_t estimatedSize = (trackArray->GetEntriesFast()+1)*(sizeof(AliHLTTRDTrack)+6*(sizeof(AliHLTTRDTracklet)+30*sizeof(AliHLTTRDClustersArray::cluster_type))); AliHLTUInt8_t* pBlock = (AliHLTUInt8_t*)malloc(estimatedSize); AliHLTUInt32_t size = AddTracksToOutput(trackArray, pBlock); trackArray->Delete(); ReadTracks(trackArray, pBlock, size); free(pBlock); } AliHLTUInt32_t AliHLTTRDUtils::GetSM(AliHLTUInt32_t spec) { spec = (spec&-spec); // use only least significant bit spec -= 1; // as spec is now power of 2, this creates ones.. int count = 0; // .. which are counted while (spec) { count++; spec &= spec - 1; } return count; }
<filename>robot-gitee-framework/handlers.go package framework import ( "github.com/opensourceways/go-gitee/gitee" "github.com/sirupsen/logrus" "github.com/opensourceways/community-robot-lib/config" ) // IssueHandler defines the function contract for a gitee.IssueEvent handler. type IssueHandler func(e *gitee.IssueEvent, cfg config.Config, log *logrus.Entry) error // PullRequestHandler defines the function contract for a gitee.PullRequestEvent handler. type PullRequestHandler func(e *gitee.PullRequestEvent, cfg config.Config, log *logrus.Entry) error // PushEventHandler defines the function contract for a gitee.PushEvent handler. type PushEventHandler func(e *gitee.PushEvent, cfg config.Config, log *logrus.Entry) error // NoteEventHandler defines the function contract for a gitee.NoteEvent handler. type NoteEventHandler func(e *gitee.NoteEvent, cfg config.Config, log *logrus.Entry) error type handlers struct { issueHandlers IssueHandler pullRequestHandler PullRequestHandler pushEventHandler PushEventHandler noteEventHandler NoteEventHandler } // RegisterIssueHandler registers a plugin's gitee.IssueEvent handler. func (h *handlers) RegisterIssueHandler(fn IssueHandler) { h.issueHandlers = fn } // RegisterPullRequestHandler registers a plugin's gitee.PullRequestEvent handler. func (h *handlers) RegisterPullRequestHandler(fn PullRequestHandler) { h.pullRequestHandler = fn } // RegisterPushEventHandler registers a plugin's gitee.PushEvent handler. func (h *handlers) RegisterPushEventHandler(fn PushEventHandler) { h.pushEventHandler = fn } // RegisterNoteEventHandler registers a plugin's gitee.NoteEvent handler. func (h *handlers) RegisterNoteEventHandler(fn NoteEventHandler) { h.noteEventHandler = fn }
// Constructor /** * @param id ID of the new body */ Body::Body(bodyindex id) : mID(id), mIsAlreadyInIsland(false), mIsAllowedToSleep(true), mIsActive(true), mIsSleeping(false), mSleepTime(0), mUserData(nullptr) { #ifdef IS_LOGGING_ACTIVE mLogger = nullptr; #endif }
import { connect } from 'preact-redux'; import App from './app.component'; import { createChangePage, createChangeScreenSize } from './data/page-config/actions'; import { IState } from './data/'; import { filterAccounts } from '../utils/blockchain/utils'; const mapStateToProps = (state: IState, ownProps) => { const accounts = filterAccounts( state.wallet.data || {}, !(state.userPreferences || ({} as any)).testNet, (state.userPreferences || ({} as any)).networks ); return { ...ownProps, accounts, walletStatus: state.wallet.status }; }; const mapDispatchToProps = { onScreenSizeChange: createChangeScreenSize, onRouteChange: createChangePage }; export default connect( mapStateToProps, mapDispatchToProps )(App);
/** * @param pessoa Objeto da classe Pessoa a ser convertido para ContentValues * @return Retorna um objeto do tipo ContentValues, com os dados da Pessoa */ private ContentValues preencherContentValues(Pessoa pessoa) { ContentValues values = new ContentValues(); values.put(Pessoa.NOME, pessoa.getNome()); values.put(Pessoa.SOBRENOME, pessoa.getSobrenome()); if (pessoa.getDtnasc() != null) { values.put(Pessoa.DTNASC, pessoa.getDtnasc().getTime()); } values.put(Pessoa.EMAIL, pessoa.getEmail()); values.put(Pessoa.TELEFONE, pessoa.getTelefone()); values.put(Pessoa.CELULAR, pessoa.getCelular()); values.put(Pessoa.ENDERECO, pessoa.getEndereco()); values.put(Pessoa.BAIRRO, pessoa.getBairro()); values.put(Pessoa.CIDADE, pessoa.getCidade()); values.put(Pessoa.ESTADO, pessoa.getEstado()); values.put(Pessoa.CEP, pessoa.getCep()); return values; }
pub mod address; pub use address::{AddressHeader, AddressMessage, AddressMessageBuffer, ADDRESS_HEADER_LEN}; pub mod link; pub use link::{LinkHeader, LinkMessage, LinkMessageBuffer, LINK_HEADER_LEN}; pub mod neighbour; pub use neighbour::{ NeighbourHeader, NeighbourMessage, NeighbourMessageBuffer, NEIGHBOUR_HEADER_LEN, }; pub mod neighbour_table; pub use neighbour_table::{ NeighbourTableHeader, NeighbourTableMessage, NeighbourTableMessageBuffer, NEIGHBOUR_TABLE_HEADER_LEN, }; pub mod nsid; pub use nsid::{NsidHeader, NsidMessage, NsidMessageBuffer, NSID_HEADER_LEN}; pub mod route; pub use route::{ RouteFlags, RouteHeader, RouteKind, RouteMessage, RouteMessageBuffer, RouteProtocol, RouteScope, RouteTable, ROUTE_HEADER_LEN, }; pub mod tc; pub use tc::{TcHeader, TcMessage, TcMessageBuffer, TC_HEADER_LEN}; pub mod constants; pub use self::constants::*; mod buffer; pub use self::buffer::*; mod message; pub use self::message::*; pub mod nlas { pub use super::address::nlas as address; pub use super::link::nlas as link; pub use super::neighbour::nlas as neighbour; pub use super::neighbour_table::nlas as neighbour_table; pub use super::nsid::nlas as nsid; pub use super::route::nlas as route; pub use super::tc::nlas as tc; pub use crate::utils::nla::*; } #[cfg(test)] mod test;
/** * An elasticsearch implementation of a service mapping storage. * * @author [email protected] */ public class ESServiceMappingStorage implements ServiceMappingStorage { private Map<String, String> config; private JestClient esClient; /** * Constructor. * @param esConfig */ public ESServiceMappingStorage(Map<String, String> esConfig) { this.config = esConfig; } /** * @return the esClient */ public synchronized JestClient getClient() { if (esClient == null) { esClient = ESClientFactory.createClient(config); } return esClient; } /** * @see io.fabric8.gateway.apiman.ServiceMappingStorage#put(java.lang.String, io.fabric8.gateway.api.apimanager.ServiceMapping, io.apiman.gateway.engine.async.IAsyncResultHandler) */ @Override public void put(final String path, final ServiceMapping mapping, final IAsyncResultHandler<Void> handler) { String id = idFromPath(path); Index index = new Index.Builder(mapping).refresh(false) .index(ESConstants.INDEX_NAME) .type("f8_service_mapping").id(id).build(); //$NON-NLS-1$ try { getClient().executeAsync(index, new JestResultHandler<JestResult>() { @Override public void completed(JestResult result) { if (!result.isSucceeded()) { handler.handle(AsyncResultImpl.create(new Exception( "Failed to store service mapping for path: " + path), //$NON-NLS-1$ Void.class)); } else { handler.handle(AsyncResultImpl.create((Void) null)); } } @Override public void failed(Exception e) { handler.handle(AsyncResultImpl.create(new Exception( "Error storing service mapping for path: " + path, e), //$NON-NLS-1$ Void.class)); } }); } catch (ExecutionException | InterruptedException | IOException e) { handler.handle(AsyncResultImpl.create(new Exception( "Error storing service mapping for path: " + path, e), //$NON-NLS-1$ Void.class)); } } /** * Creates an ES document id from the given path. * @param path */ private String idFromPath(String path) { return Base64.encodeBase64String(path.getBytes()); } /** * @see io.fabric8.gateway.apiman.ServiceMappingStorage#get(java.lang.String) */ @Override public ServiceMapping get(String path) { String id = idFromPath(path); Get get = new Get.Builder(ESConstants.INDEX_NAME, id).type("f8_service_mapping").build(); //$NON-NLS-1$ try { JestResult result = getClient().execute(get); if (result.isSucceeded()) { return result.getSourceAsObject(ServiceMapping.class); } return null; } catch (Exception e) { // TODO log this error return null; } } /** * @see io.fabric8.gateway.apiman.ServiceMappingStorage#remove(java.lang.String, io.apiman.gateway.engine.async.IAsyncResultHandler) */ @Override public void remove(final String path, final IAsyncResultHandler<Void> handler) { String id = idFromPath(path); Delete delete = new Delete.Builder(id).index(ESConstants.INDEX_NAME).type("f8_service_mapping").build(); //$NON-NLS-1$ try { getClient().executeAsync(delete, new JestResultHandler<JestResult>() { @Override public void completed(JestResult result) { if (result.isSucceeded()) { handler.handle(AsyncResultImpl.create((Void) null)); } else { handler.handle(AsyncResultImpl.create(new Exception("Failed to remove mapping at path: " + path), Void.class)); //$NON-NLS-1$ } } @Override public void failed(Exception e) { handler.handle(AsyncResultImpl.create(new Exception("Error removing mapping at path: " + path, e), Void.class)); //$NON-NLS-1$ } }); } catch (ExecutionException | InterruptedException | IOException e) { handler.handle(AsyncResultImpl.create(new Exception("Error removing mapping at path: " + path, e), Void.class)); //$NON-NLS-1$ } } }
Luke Sharrett/Bloomberg via Getty Images Vietnam War-era M16 rifles displayed at a vendor's booth during the Knob Creek Machine Gun Shoot in West Point, Kentucky, on Oct. 9, 2015. A Texas man who sued the federal government because it wouldn’t approve his application to manufacture a machine gun doesn’t have a constitutional right to possess the automatic weapon, an appeals court ruled. Jay Hollis sought permission to convert his AR-15, a popular semi-automatic firearm, into an M16 -- an automatic firearm that is banned under federal law, except for official use or lawfully obtained pre-1986 models. After he was rejected, Hollis mounted a constitutional challenge to the Gun Control Act of 1968 -- which Congress amended in 1986 to make it illegal to possess or transfer newly manufactured machine guns. Among other things, he argued that an "M-16 is the quintessential militia-styled arm for the modern day." In a unanimous ruling issued Thursday, the U.S. Court of Appeals for the 5th Circuit rejected Hollis’ arguments, categorically noting that "machine guns are not protected arms under the Second Amendment." The court explained that the leading Supreme Court precedent on the right to keep and bear arms, 2008’s District of Columbia v. Heller, only protected individual handgun possession for “defense of hearth and home.” "Today ... ordinary military weaponry is far more advanced than the weapons typically found at home and used for (self)-defense," the court said, adding that machine guns are "dangerous and unusual," and nothing like what militias might have used at the founding of the republic. "Heller rejected a functionalist interpretation of the Second Amendment premised on the effectiveness of militia service," the court of appeals said. Aided by a number of gun rights groups, Hollis had pressed a number of other arguments -- that anything that is "ordinary military equipment" is protected, that the Second Amendment really exists to allow a rebellion against the government, and that machine guns aren't really "dangerous and unusual." The 5th Circuit was largely unimpressed, calling the last argument "tantamount to asking us to overrule the Supreme Court." Interestingly, the court did survey the confused state of gun rights in the wake of the Heller decision, pointing to recent action and inaction by the Supreme Court and lower courts -- on stun guns, so-called "assault weapons" and the legal framework for weighing firearm bans -- to suggest that it can only do so much to bring clarity to the law. "We leave changes in Supreme Court caselaw to the Supreme Court," the 5th Circuit said. But if the high court's recent moves are any indication, those changes may not come any time soon. Just last week, the Supreme Court declined to review yet another Second Amendment challenge to strict gun control measures enacted in the wake of the elementary school massacre in Newtown, Connecticut -- more evidence that it may not be the right time to determine how far the Second Amendment extends.
/* Copyright (c) Microsoft Corporation All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, MERCHANTABLITY OR NON-INFRINGEMENT. See the Apache Version 2.0 License for specific language governing permissions and limitations under the License. */ #pragma once #include "recordparser.h" class RecordBundleInterfaceBase { public: RecordBundleInterfaceBase() { m_marshalerFactory.Attach(new MarshalerFactory()); } virtual ~RecordBundleInterfaceBase() {} DryadParserFactoryBase* GetParserFactory() { return m_parserFactory; } DryadMarshalerFactoryBase* GetMarshalerFactory() { return m_marshalerFactory; } DObjFactoryBase* GetRecordFactory() { return m_factory; } protected: class MarshalerFactory : public DryadMarshalerFactory { public: void MakeMarshaler(RChannelItemMarshalerRef* pMarshaler, DVErrorReporter* errorReporter) { pMarshaler->Attach(new RChannelStdItemMarshaler()); } }; DrRef<DObjFactoryBase> m_factory; DryadParserFactoryRef m_parserFactory; DryadMarshalerFactoryRef m_marshalerFactory; }; template< class _R > class PackedRecordBundleBase : public RecordBundleInterfaceBase { public: typedef _R RecordType; typedef PackedRecordArray<RecordType> Array; typedef RecordArrayReader<RecordType> Reader; typedef RecordArrayWriter<RecordType> WriterBase; class Writer : public WriterBase { public: Writer() {} Writer(PackedRecordBundleBase<_R>* bundle, SyncItemWriterBase* writer) { Initialize(bundle, writer); } void Initialize(PackedRecordBundleBase<_R>* bundle, SyncItemWriterBase* writer) { bundle->InitializeWriter(this, writer); } }; PackedRecordBundleBase() {} PackedRecordBundleBase(DObjFactoryBase* factory) { InitializeBase(factory); } virtual ~PackedRecordBundleBase() {} void InitializeBase(DObjFactoryBase* factory) { m_factory = factory; m_parserFactory.Attach(new ParserFactory(m_factory)); } void InitializeWriter(WriterBase* writer, SyncItemWriterBase* channelWriter) { writer->Initialize(channelWriter, m_factory); } private: class ParserFactory : public DryadParserFactory { public: ParserFactory(DObjFactoryBase* factory) { m_factory = factory; } void MakeParser(RChannelItemParserRef* pParser, DVErrorReporter* errorReporter) { pParser->Attach(new PackedRecordArrayParser(m_factory)); } DObjFactoryRef m_factory; }; }; template< class _R > class PackedRecordBundle : public PackedRecordBundleBase<_R> { public: typedef RecordArrayFactory<Array> Factory; PackedRecordBundle() { Initialize(RChannelItem::s_defaultRecordBatchSize); } PackedRecordBundle(UInt32 maxArraySize) { Initialize(maxArraySize); } void Initialize(UInt32 maxArraySize) { DrRef<Factory> factory; factory.Attach(new Factory(maxArraySize)); InitializeBase(factory); } }; template< class _R > class RecordBundleBase : public RecordBundleInterfaceBase { public: typedef _R RecordType; typedef RecordArray<RecordType> Array; typedef RecordArrayReader<RecordType> Reader; typedef RecordArrayWriter<RecordType> WriterBase; class Writer : public WriterBase { public: Writer() {} Writer(RecordBundleBase<_R>* bundle, SyncItemWriterBase* writer) { Initialize(bundle, writer); } void Initialize(RecordBundleBase<_R>* bundle, SyncItemWriterBase* writer) { bundle->InitializeWriter(this, writer); } }; RecordBundleBase() {} RecordBundleBase(DObjFactoryBase* factory) { InitializeBase(factory); } virtual ~RecordBundleBase() {} void InitializeBase(DObjFactoryBase* factory) { m_factory = factory; m_parserFactory.Attach(new ParserFactory(m_factory)); } void InitializeWriter(WriterBase* writer, SyncItemWriterBase* channelWriter) { writer->Initialize(channelWriter, m_factory); } private: class ParserFactory : public DryadParserFactory { public: ParserFactory(DObjFactoryBase* factory) { m_factory = factory; } void MakeParser(RChannelItemParserRef* pParser, DVErrorReporter* errorReporter) { pParser->Attach(new RChannelStdItemParser(m_factory)); } DObjFactoryRef m_factory; }; }; template< class _R > class RecordBundle : public RecordBundleBase<_R> { public: typedef RecordArrayFactory<Array> Factory; RecordBundle() { Initialize(RChannelItem::s_defaultRecordBatchSize); } RecordBundle(UInt32 maxArraySize) { Initialize(maxArraySize); } void Initialize(UInt32 maxArraySize) { DrRef<Factory> factory; factory.Attach(new Factory(maxArraySize)); InitializeBase(factory); } };
//////////////////////////////////////// // CalcCircleIndex /*! Calculate the circle of player indexes // \param int* paPlayerDeck : result circle */ void cPlayersOnTable::CalcCircleIndex(int* paPlayerDeck) { ASSERT(paPlayerDeck); paPlayerDeck[0] = m_lCurrent; int k = 1; while(k < m_lNumPlayers) { paPlayerDeck[k] = paPlayerDeck[k-1] + 1; if (paPlayerDeck[k] >= m_lNumPlayers) { paPlayerDeck[k] = 0; } k++; } }
import { makeCommand, Parsers } from "../../dist"; const cmd = makeCommand("hello", { description: "Send a hello message to either a user (in DM) or in a channel.", args: { where: { parser: Parsers.union(Parsers.user.discriminate("user"), Parsers.channel("text").discriminate("channel")), }, }, }); cmd.executor = async ({ where }) => { switch (where.variant) { case "user": await where.value.send(`Hello ${where.value.username} !`); break; case "channel": await where.value.send("Hello there !"); break; } }; export default cmd;