content
stringlengths
10
4.9M
package gov.healthit.chpl.validation.pendingListing.reviewer.duplicate; import static org.junit.Assert.assertEquals; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.ArgumentMatchers; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.context.MessageSource; import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; import gov.healthit.chpl.dto.PendingCertificationResultAdditionalSoftwareDTO; import gov.healthit.chpl.dto.PendingCertificationResultDTO; import gov.healthit.chpl.dto.PendingCertifiedProductDTO; import gov.healthit.chpl.util.ErrorMessageUtil; import gov.healthit.chpl.validation.pendingListing.reviewer.edition2014.duplicate.AdditionalSoftware2014DuplicateReviewer; @RunWith(SpringJUnit4ClassRunner.class) @ContextConfiguration(classes = { gov.healthit.chpl.CHPLTestConfig.class }) public class AdditionalSoftware2014DuplicateReviewerTest { @Autowired private MessageSource messageSource; @Mock private ErrorMessageUtil msgUtil = new ErrorMessageUtil(messageSource); private AdditionalSoftware2014DuplicateReviewer reviewer; @Before public void setup() { MockitoAnnotations.initMocks(this); reviewer = new AdditionalSoftware2014DuplicateReviewer(msgUtil); //TODO - Can this be extracted as some sort of generic method, so it can be used all error messages?? Mockito.doAnswer(new Answer<String>() { @Override public String answer(final InvocationOnMock invocation) throws Throwable { String message = "Certification %s contains duplicate Additional Software: CP Source '%s'. The duplicates have been removed."; Object[] args = invocation.getArguments(); return String.format(message, args[1], args[2]); } }).when(msgUtil).getMessage(ArgumentMatchers.eq("listing.criteria.duplicateAdditionalSoftwareCP.2014"), ArgumentMatchers.anyString(), ArgumentMatchers.anyString(), ArgumentMatchers.anyString()); Mockito.doAnswer(new Answer<String>() { @Override public String answer(final InvocationOnMock invocation) throws Throwable { String message = "Certification %s contains duplicate Additional Software: Non CP Source: '%s', Version '%s'. The duplicates have been removed."; Object[] args = invocation.getArguments(); return String.format(message, args[1], args[2], args[3]); } }).when(msgUtil).getMessage(ArgumentMatchers.eq("listing.criteria.duplicateAdditionalSoftwareNonCP.2014"), ArgumentMatchers.anyString(), ArgumentMatchers.anyString(), ArgumentMatchers.anyString()); } @Test public void testDuplicateCPExists() { PendingCertifiedProductDTO listing = new PendingCertifiedProductDTO(); PendingCertificationResultDTO cert = new PendingCertificationResultDTO(); PendingCertificationResultAdditionalSoftwareDTO as1 = new PendingCertificationResultAdditionalSoftwareDTO(); as1.setChplId("Chpl1"); PendingCertificationResultAdditionalSoftwareDTO as2 = new PendingCertificationResultAdditionalSoftwareDTO(); as2.setChplId("Chpl1"); cert.getAdditionalSoftware().add(as1); cert.getAdditionalSoftware().add(as2); reviewer.review(listing, cert); assertEquals(1, listing.getWarningMessages().size()); assertEquals(1, cert.getAdditionalSoftware().size()); } @Test public void testDuplicateCPsDoNotExist() { PendingCertifiedProductDTO listing = new PendingCertifiedProductDTO(); PendingCertificationResultDTO cert = new PendingCertificationResultDTO(); PendingCertificationResultAdditionalSoftwareDTO as1 = new PendingCertificationResultAdditionalSoftwareDTO(); as1.setChplId("Chpl1"); PendingCertificationResultAdditionalSoftwareDTO as2 = new PendingCertificationResultAdditionalSoftwareDTO(); as2.setChplId("Chpl2"); cert.getAdditionalSoftware().add(as1); cert.getAdditionalSoftware().add(as2); reviewer.review(listing, cert); assertEquals(0, listing.getWarningMessages().size()); assertEquals(2, cert.getAdditionalSoftware().size()); } @Test public void testDuplicateCPsExistInLargerSet() { PendingCertifiedProductDTO listing = new PendingCertifiedProductDTO(); PendingCertificationResultDTO cert = new PendingCertificationResultDTO(); PendingCertificationResultAdditionalSoftwareDTO as1 = new PendingCertificationResultAdditionalSoftwareDTO(); as1.setChplId("Chpl1"); PendingCertificationResultAdditionalSoftwareDTO as2 = new PendingCertificationResultAdditionalSoftwareDTO(); as2.setChplId("Chpl2"); PendingCertificationResultAdditionalSoftwareDTO as3 = new PendingCertificationResultAdditionalSoftwareDTO(); as3.setChplId("Chpl1"); PendingCertificationResultAdditionalSoftwareDTO as4 = new PendingCertificationResultAdditionalSoftwareDTO(); as4.setChplId("Chpl3"); cert.getAdditionalSoftware().add(as1); cert.getAdditionalSoftware().add(as2); cert.getAdditionalSoftware().add(as3); cert.getAdditionalSoftware().add(as4); reviewer.review(listing, cert); assertEquals(1, listing.getWarningMessages().size()); assertEquals(3, cert.getAdditionalSoftware().size()); } @Test public void testDuplicateNonCPExists() { PendingCertifiedProductDTO listing = new PendingCertifiedProductDTO(); PendingCertificationResultDTO cert = new PendingCertificationResultDTO(); PendingCertificationResultAdditionalSoftwareDTO as1 = new PendingCertificationResultAdditionalSoftwareDTO(); as1.setName("Chpl1"); as1.setVersion("v1"); PendingCertificationResultAdditionalSoftwareDTO as2 = new PendingCertificationResultAdditionalSoftwareDTO(); as2.setName("Chpl1"); as2.setVersion("v1"); cert.getAdditionalSoftware().add(as1); cert.getAdditionalSoftware().add(as2); reviewer.review(listing, cert); assertEquals(1, listing.getWarningMessages().size()); assertEquals(1, cert.getAdditionalSoftware().size()); } @Test public void testDuplicateNonCPsDoNotExist() { PendingCertifiedProductDTO listing = new PendingCertifiedProductDTO(); PendingCertificationResultDTO cert = new PendingCertificationResultDTO(); PendingCertificationResultAdditionalSoftwareDTO as1 = new PendingCertificationResultAdditionalSoftwareDTO(); as1.setName("Chpl1"); as1.setVersion("v1"); PendingCertificationResultAdditionalSoftwareDTO as2 = new PendingCertificationResultAdditionalSoftwareDTO(); as1.setName("Chpl2"); as1.setVersion("v2"); cert.getAdditionalSoftware().add(as1); cert.getAdditionalSoftware().add(as2); reviewer.review(listing, cert); assertEquals(0, listing.getWarningMessages().size()); assertEquals(2, cert.getAdditionalSoftware().size()); } @Test public void testDuplicateNonCPsExistInLargerSet() { PendingCertifiedProductDTO listing = new PendingCertifiedProductDTO(); PendingCertificationResultDTO cert = new PendingCertificationResultDTO(); PendingCertificationResultAdditionalSoftwareDTO as1 = new PendingCertificationResultAdditionalSoftwareDTO(); as1.setName("Chpl1"); as1.setVersion("v1"); PendingCertificationResultAdditionalSoftwareDTO as2 = new PendingCertificationResultAdditionalSoftwareDTO(); as2.setName("Chpl2"); as2.setVersion("v2"); PendingCertificationResultAdditionalSoftwareDTO as3 = new PendingCertificationResultAdditionalSoftwareDTO(); as3.setName("Chpl1"); as3.setVersion("v1"); PendingCertificationResultAdditionalSoftwareDTO as4 = new PendingCertificationResultAdditionalSoftwareDTO(); as4.setName("Chpl3"); as4.setVersion("v3"); cert.getAdditionalSoftware().add(as1); cert.getAdditionalSoftware().add(as2); cert.getAdditionalSoftware().add(as3); cert.getAdditionalSoftware().add(as4); reviewer.review(listing, cert); assertEquals(1, listing.getWarningMessages().size()); assertEquals(3, cert.getAdditionalSoftware().size()); } }
<reponame>stackbuilders/token-bucket {-| Module : Network.TokenBucket.Client Description : Client for Token Bucket Server Copyright : (c) Stack Builders Inc. 2014 License : MIT Maintainer : <EMAIL> Stability : experimental Portability : unknown Connects to a Token Bucket Server and requests tokens from the specified bucket. -} module Network.TokenBucket.Client (connect, get) where import GHC.IO.Handle ( Handle , BufferMode(..) , hGetLine , hPutStr , hClose , hSetBuffering ) import Data.Pool (Pool(..), createPool, withResource) import Network -- | Connects to the given host, using a connection pool. connect :: String -- ^ Host name running the token bucket server -> PortNumber -- ^ Port number the token bucket service is running on -> IO (Pool Handle) -- ^ A pool of handles to the token bucket server connect host port = createPool (createConnection host port) destroyConnection 1 30 50 -- | Tries to get a token from the given bucket. get :: Pool Handle -- ^ The pool of connections to the token bucket server -> String -- ^ The name of a token bucket -> IO (Either String Bool) -- ^ The result of the token bucket server request, or the String error from -- the server get pool bucket = do withResource pool (requestToken bucket) requestToken :: String -> Handle -> IO (Either String Bool) requestToken bucket handle = do _ <- hPutStr handle $ "get " ++ bucket ++ "\r\n" line <- hGetLine handle return $ case line of "1" -> Right True "0" -> Right False _ -> Left line createConnection :: String -> PortNumber -> IO Handle createConnection host port = do hdl <- connectTo host (PortNumber port) hSetBuffering hdl LineBuffering return hdl destroyConnection :: Handle -> IO () destroyConnection hdl = hClose hdl
<reponame>LaserFlash/qcyc-usagemaintenancetracker import { Injectable } from '@angular/core'; import { AngularFirestore, AngularFirestoreCollection } from '@angular/fire/firestore'; import { KnownBoatsService } from '../../../../core/constants/known-boats/known-boats.service'; import { UsageInfo, UsageInfoID } from '../../../../core/objects/usageInfo'; import { Observable } from 'rxjs/Observable'; import { BehaviorSubject } from 'rxjs/BehaviorSubject'; import { map, tap, scan, mergeMap, throttleTime } from 'rxjs/operators'; @Injectable() export class BoatUsageService { private offset = new Date(); public pageIndex = 0; public currentSelectedUsages: BehaviorSubject<UsageInfoID[]> = new BehaviorSubject(null); private previousUsageSet: any[] = []; public batch_size: number = 20; totalNumberOfUsages: BehaviorSubject<number> = new BehaviorSubject(0); constructor(private db: AngularFirestore, BOATS: KnownBoatsService) { this.db.doc('/stats/totalUsageItems').valueChanges().subscribe(numRecords => { this.totalNumberOfUsages.next((numRecords as any ).numberOfDocs); }) this.getBatch().subscribe(val => { this.currentSelectedUsages.next(val) }); } forwardBatch(offsetPos) { this.pageIndex++; this.offset = this.currentSelectedUsages.getValue()[offsetPos].endTime; this.previousRecord(); this.getBatch().subscribe(val => { this.currentSelectedUsages.next(val) }); } backBatch(offsetPos) { this.pageIndex--; this.offset = this.previousUsageSet.pop().usage[0].endTime; this.getPreviousBatch().subscribe(val => { this.currentSelectedUsages.next(val) }); } updateBatch(batch_size) { this.pageIndex = 0; this.batch_size = batch_size; this.offset = new Date(); this.previousUsageSet = []; this.getBatch().subscribe(val => { this.currentSelectedUsages.next(val) }); } /* Get a the next set of usage data from DB */ getBatch() { return this.db .collection<UsageInfo>('/boatUsage', ref => ref.orderBy('endTime', 'desc').startAfter(this.offset).limit(this.batch_size)) .snapshotChanges() .pipe( map(actions => actions.map(a => { const data = a.payload.doc.data() as UsageInfo; const id = a.payload.doc.id; return { id, ...data }; })) ); } /* Get the previous set of usage data from DB */ getPreviousBatch() { return this.db .collection<UsageInfo>('/boatUsage', ref => ref.orderBy('endTime', 'desc').startAt(this.offset).limit(this.batch_size)) .snapshotChanges() .pipe( map(actions => actions.map(a => { const data = a.payload.doc.data() as UsageInfo; const id = a.payload.doc.id; return { id, ...data }; })) ); } private previousRecord() { /* Only add to previous list if the item is not already the previous one */ if (this.previousUsageSet[this.previousUsageSet.length - 1] !== this.currentSelectedUsages.getValue()) { this.previousUsageSet.push({ 'usage': this.currentSelectedUsages.getValue() }); } } }
#ifndef CONSOLE_H #define CONSOLE_H void start_console(int cmdline_length); void stop_console(); void write_console(char* message); #endif /* CONSOLE_H */
<reponame>CestLaGalere/eaufrance """Support for the EauFrance service.""" from datetime import datetime, timedelta, time import pytz import logging import ast #from typing import Any, Callable, Dict, Optional import voluptuous as vol from homeassistant import config_entries, core from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( ATTR_ATTRIBUTION, CONF_NAME, CONF_DEVICE_CLASS, CONF_DEVICE_ID, ) from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity #from homeassistant.helpers.typing import ( # ConfigType, # DiscoveryInfoType, # HomeAssistantType, # ) from homeassistant.util import Throttle from homeassistant.util import dt as dt_util import requests _LOGGER = logging.getLogger(__name__) ATTRIBUTION = "Data provided by {0}" DEFAULT_NAME = "VC" MIN_TIME_BETWEEN_UPDATES = timedelta(hours=1) DEVICE_CLASS = { "H": "Height", "Q": "Flow", } PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_NAME): cv.string, vol.Optional(CONF_DEVICE_ID, default=""): cv.string, vol.Optional(CONF_DEVICE_CLASS): vol.In(DEVICE_CLASS), } ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): session = async_get_clientsession(hass) name = config.get(CONF_NAME) device_class = config.get(CONF_DEVICE_CLASS) device_id = config.get(CONF_DEVICE_ID) efd = EauFranceData(hass, device_id, device_class) async_add_entities( [VigicruesSensor.current(name, efd)], True, ) class VigicruesSensor(Entity): """Implementation of an EauFrance sensor.""" def __init__(self, name, efd): """Initialize the sensor.""" self._name = name self._efd = efd self._state = None self._unit_of_measurement = "" self._unique_id = efd.unique_id @classmethod def current(cls, name, efd): return cls(name, efd) @property def name(self): return self._name @property def unique_id(self): return self._unique_id @property def state(self): return self._state @property def unit_of_measurement(self): return self._unit_of_measurement @property def icon(self): if self._efd.device_class == "H": return "mdi:waves" return "mdi:fast-forward" @property def device_state_attributes(self): source = "EauFrance" return {ATTR_ATTRIBUTION: ATTRIBUTION.format(source)} def update(self): """Get the latest data from EauFrance and updates the state.""" #try: self._efd.update(self.hass) #except: # _LOGGER.error("Exception when getting EauFrance web update data") # return self._state = self._efd.data self._unit_of_measurement = self._efd.unit class EauFranceData(): """Get the latest data from EauFrance. device_class must be H or Q """ def __init__(self, hass, device_id, device_class): self._device_id = device_id self._device_class = device_class self._time_zone = hass.config.time_zone self.data = None if device_class == "H": self.unit = "m" else: self.unit = "m³/s" @property def device_class(self): return self._device_class @property def unique_id(self): return "edf_{}_{}".format(self._device_id, self._device_class) @Throttle(MIN_TIME_BETWEEN_UPDATES) def update(self, hass): # get readings from eurfrance website try: obs = self.get_first_reading() if obs is None: _LOGGER.warning("Failed to fetch data from EauFrance") return if "resultat_obs" not in obs: _LOGGER.warning("resultat_obs not found in:") _LOGGER.warning(obs) self.data = obs["resultat_obs"] / 1000 # show under 10m in cm, otherwise to 1 dp. if self.data < 10.0: self.data = round(self.data, 2) else: self.data = round(self.data, 1) except ConnectionError: _LOGGER.warning("Unable to connect to EauFrance URL") except TimeoutError: _LOGGER.warning("Timeout connecting to EauFrance URL") except Exception as e: _LOGGER.warning("{0} occurred in update. Details: {1}".format(e.__class__, e)) def get_device_history_url(self): """ Create url to get the last 4h of readings for this station Parameters device_id see https://hubeau.eaufrance.fr/page/api-hydrometrie#/hydrometrie/observations """ # example https://hubeau.eaufrance.fr/api/v1/hydrometrie/observations_tr?code_entite=O588251001&grandeur_hydro=Q&timestep=60&sort=desc&date_debut_obs=2021-02-16 base_url="https://hubeau.eaufrance.fr/api/v1/hydrometrie/observations_tr" # sort so the most recent is first - only need to read the first result params = { "code_entite" : self._device_id, "grandeur_hydro": self._device_class, "timestep": "60", "sort": "desc" } now_utc = dt_util.utcnow() start_of_period = now_utc - timedelta(hours = 2) # get 4 hours readings params.update({"date_debut_obs" : start_of_period.strftime("%Y-%m-%dT%H:%M:%S")}) #now = datetime.now() #_LOGGER.warning("now : {0}".format(now.strftime("%Y-%m-%dT%H:%M:%S"))) #_LOGGER.warning("utc : {0}".format(now_utc.strftime("%Y-%m-%dT%H:%M:%S"))) #_LOGGER.warning("startofperiod: {0}".format(start_of_period.strftime("%Y-%m-%dT%H:%M:%S"))) all_params = '&'.join('{0}={1}'.format(key, val) for key, val in params.items()) return base_url + "?" + all_params def get_results_data(self): """ Return the array of readings """ url = self.get_device_history_url() response = requests.get(url) if response.status_code != requests.codes.ok: raise Exception("requests: getting data: {0}\n{1}\n{2}".format(response.status_code, url, response.content)) content = response.content.decode() # returned example (split to ned lines for readability - no line breaks in reality) # after the date_obs and resultat_obs values #content = '{"count":10,"first":"https://hubeau.eaufrance.fr/api/v1/hydrometrie/observations_tr?code_entite=O588251001&grandeur_hydro=Q&timestep=60&sort=desc&date_debut_obs=2021-02-16&page=1&size=10","prev":null,"next":null,"api_version":"1.0.1","data":[{"code_site":"O5882510","code_station":"O588251001","grandeur_hydro":"Q","date_debut_serie":"2021-02-16T00:00:00Z","date_fin_serie":"2021-02-16T10:00:00Z","statut_serie":4,"code_systeme_alti_serie":31,"date_obs":"2021-02-16T10:00:00Z","resultat_obs":147629.0,"code_methode_obs":12,"libelle_methode_obs":"Interpolation","code_qualification_obs":16,"libelle_qualification_obs":"Non qualifiée","continuite_obs_hydro":true,"longitude":1.340482605,"latitude":44.091553707}]}' #content = '{ # "count":10, # "first":"https://hubeau.eaufrance.fr/api/v1/hydrometrie/observations_tr?code_entite=O588251001&grandeur_hydro=Q&timestep=60&sort=desc&date_debut_obs=2021-02-16&page=1&size=10", # "prev":null, # "next":null, # "api_version":"1.0.1", # "data":[ # {"code_site":"O5882510", # "code_station":"O588251001", # "grandeur_hydro":"Q", # "date_debut_serie":"2021-02-16T00:00:00Z", # "date_fin_serie":"2021-02-16T10:00:00Z", # "statut_serie":4,"code_systeme_alti_serie":31, # "date_obs":"2021-02-16T10:00:00Z", # "resultat_obs":147629.0, # "code_methode_obs":12, # "libelle_methode_obs":"Interpolation", # "code_qualification_obs":16, # "libelle_qualification_obs":"Non qualifiée", # "continuite_obs_hydro":true, # "longitude":1.340482605, # "latitude":44.091553707 # } # <more data> # ]}' # not true python dictionary so need to reformat slightly content = content.replace(":null", ":None") content = content.replace(":true", ":True") root = ast.literal_eval(content) if "data" not in root: _LOGGER.warning(root) raise Exception("data not found in root") count = root["count"] if count == 0: raise Exception("No observations returned for {0}".format(self._device_id)) d = root["data"] if len(d) == 0: _LOGGER.warning(root) raise Exception("data contains no readings") return d def get_first_reading(self): """ Extract data_obs and resultat_obs from the first reading as this is the most up to date reading available return dictionary with these two key/values """ d = self.get_results_data() first_reading = d[0] if "date_obs" not in first_reading or "resultat_obs" not in first_reading: _LOGGER.warning(first_reading) raise Exception("unexpected format of first_reading") reading = {"date_obs": first_reading["date_obs"], "resultat_obs": first_reading["resultat_obs"]} return reading
// Used to create shallow copies of accounts for transfer // from opts to real accounts in server struct. func (a *Account) shallowCopy() *Account { na := NewAccount(a.Name) na.Nkey = a.Nkey na.Issuer = a.Issuer na.imports = a.imports na.exports = a.exports return na }
On March 11, a hiker near North Bend on the slopes of the Cascade Mountains in Washington state found a sick bat lying on the trail. He took the little brown bat, Myotis lucifugus, to the Progressive Animal Welfare Society where it died two days later. Related Content The Little Brown Bat’s Mighty Talent At the time, a veterinarian at the clinic recognized that the bat had the signs of white-nose syndrome, a fungal infection caused by Pseudogymnoascus destructans—a diagnosis that the Washington Fish and Wildlife Department confirmed yesterday. In the past 10 years, over 7 million bats in 25 states from New York to Nebraska have died from mysterious disease, but this is the first time the fungus has been recorded west of the Rockies, setting off alarm bells along the West coast. “I think this is really bad,” Katie Gillies, director of the Imperiled Species Program at Bat Conservation International in Texas tells Darryl Fears at The Washington Post. “I really do think this is a big leap. Now we’re going to see it radiate from that new point. It’s like having breast cancer and finding that it’s metastasized.” According to the Washington Department of Fish and Wildlife, the white, fuzzy fungus grows on the nose, wings, and ears of bats as they hibernate during winter and can also damage skin tissue. The bats wake up more often during their long nap, burning through their limited fat reserves, which eventually leads to starvation. The disease can also cause wing damage, dehydration, and impaired circulation. So far, researchers are not sure where the disease comes from, but they suspect it was transported from Europe on the gear of cavers. It is spread from bat to bat and through spores that can contaminate clothing, though the disease is not harmful to humans. In the East, the disease has caused complete mortality of some bat colonies, and it has pushed the little brown bat, the most common bat in the U.S., to near extinction in New York and Pennsylvania. Many other bat species face catastrophic die-off rates over the next few decades. According to the Seattle PI, authorities are sure that the affected bat comes from Washington and is not an eastern bat that lost its way because it is a subspecies of little brown bat that only occurs in the West. Eleven other species of bats in the state are also at risk from the disease. Not only is the loss of bats bad for biodiversity, it also affects people. According to Fears, bats provide over $3 billion worth of insect removal to farmers annually, and one colony of 150 brown bats can eat enough cucumber beetles in a summer to prevent the hatching of 33 million rootworm larvae. “The implications of losing our bat population can be potentially quite dire,” Washington Fish and Wildlife veterinarian Katie Haman said during a news conference. “The long term implications of catastrophic declines in our bat populations could be really hefty. Bats are incredibly important and the predictions from what we've learned in the Eastern ecosystem is that this could have really huge impacts.” How the disease got to Washington and how long it has been in the state are not known. “This bat had the deterioration already, which suggests the fungus didn’t just get here this year,” Greg Falxa, a wildlife biologist with the Department of Fish and Game tells The Washington Post. “Who knows how it got here? Everything is speculation right now. We’re starting surveillance in that area.” But Mollie Mattson, a senior scientist with the Center for Biological Diversity has an opinion on how white-nose made it to the West. “This disease just made a jump of more than 1,000 miles, so it’s pretty reasonable to think this could be a human-caused transmission,” she says in a press release. “What’s absolutely heartbreaking about this news is that there were obvious things wildlife and land managers could have done to stem the spread, including prohibiting nonessential cave access into public land caves. They could have passed rules requiring that no caving gear or clothing from WNS-positive states be allowed in caves in unaffected states," she says. "This is a wake-up call for land managers in the West to do what's needed to keep white-nose syndrome from spinning out of control before it’s too late.”
Energy Efficient Phosphorus Recovery by Microbial Electrolysis Cell Induced Calcium Phosphate Precipitation Phosphorus (P) removal and recovery from waste streams is essential for a sustainable world. Here, we updated a previously developed abiotic electrochemical P recovery system to a bioelectrochemical system. The anode was inoculated with electroactive bacteria (electricigens) which are capable of oxidizing soluble organic substrates and releasing electrons. These electrons are then used for the reduction of water at the cathode, resulting in an increase of pH close to the cathode. Hence, phosphate can be removed with coexisting calcium ions as calcium phosphate at the surface of the cathode with a much lower energy input. Depending on the available substrate (sodium acetate) concentration, an average current density from 1.1 ± 0.1 to 6.6 ± 0.4 A/m 2 was achieved. This resulted in a P removal of 20.1 ± 1.5% to 73.9 ± 3.7%, a Ca removal of 10.5 ± 0.6% to 44.3 ± 1.7% and a Mg removal of 2.7 ± 1.9% to 16.3 ± 3.0%. The specific energy consumption and the purity of the solids were limited by the relative low P concentration (0.23 mM) in the domestic wastewater. The relative abundance of calcium phosphate in the recovered product increased from 23% to 66% and the energy consumption for recovery was decreased from 224 ± 7 kWh/kg P to just 56 ± 6 kWh/kg P when treating wastewater with higher P concentration (0.76 mM). An even lower energy demand of 21 ± 2 kWh/kg P was obtained with a platinized cathode. This highlights the promising potential of bioelectrochemical P recovery from P-rich waste streams.
def add_date(date_uri, year, g, month=None, day=None, label=None): if year and str(year) != "1900": g.add((date_uri, RDF.type, VIVO.DateTimeValue)) if day and month: g.add((date_uri, VIVO.dateTimePrecision, VIVO.yearMonthDayPrecision)) g.add((date_uri, VIVO.dateTime, Literal("%s-%02d-%02dT00:00:00" % ( year, month_str_to_month_int(month), day), datatype=XSD.dateTime))) g.add((date_uri, RDFS.label, Literal(label or "%s %s, %s" % (month_int_to_month_str(month), num_to_str(day), num_to_str(year))))) elif month: g.add((date_uri, VIVO.dateTimePrecision, VIVO.yearMonthPrecision)) g.add((date_uri, VIVO.dateTime, Literal("%s-%02d-01T00:00:00" % ( year, month_str_to_month_int(month)), datatype=XSD.dateTime))) g.add((date_uri, RDFS.label, Literal(label or "%s %s" % (month, num_to_str(year))))) else: g.add((date_uri, VIVO.dateTimePrecision, VIVO.yearPrecision)) g.add((date_uri, VIVO.dateTime, Literal("%s-01-01T00:00:00" % ( year), datatype=XSD.dateTime))) g.add((date_uri, RDFS.label, Literal(label or num_to_str(year)))) return True return False
/* The caller must lock the context. */ static void falcon_push(GQueue *queue, falcon_object_t *object) { GList *l = NULL; g_return_if_fail(queue); l = g_queue_find_custom(queue, falcon_object_get_name(object), falcon_object_compare); if (!l) g_queue_push_tail(queue, object); }
#include <bits/stdc++.h> #define endl "\n" #define test \ int t; \ cin >> t; #define ll long long #define er cout << "<-----Darshan Hear----->" << endl; #define puru return 0; using namespace std; int main() { int n, ans = 0; cin>>n; int p[n],q[n]; for (int i = 0; i < n; i++) { cin>>p[i]>>q[i]; if (q[i]-p[i]>=2) { ans++; } } cout<<ans<<endl; puru } /* George has recently entered the BSUCP (Berland State University for Cool Programmers). George has a friend Alex who has also entered the university. Now they are moving into a dormitory. George and Alex want to live in the same room. The dormitory has n rooms in total. At the moment the i-th room has pi people living in it and the room can accommodate qi people in total (pi ≤ qi). Your task is to count how many rooms has free place for both George and Alex. Input The first line contains a single integer n (1 ≤ n ≤ 100) — the number of rooms. The i-th of the next n lines contains two integers pi and qi (0 ≤ pi ≤ qi ≤ 100) — the number of people who already live in the i-th room and the room's capacity. Output Print a single integer — the number of rooms where George and Alex can move in. Examples inputCopy3 1 1 2 2 3 3 3 1 1 2 2 3 3 outputCopy 0 inputCopy 3 1 10 0 10 10 10 outputCopy 2 */
package koopa.cobol.grammar.directives; import koopa.core.data.markers.Start; import koopa.core.parsers.ParserCombinator; import koopa.core.parsers.FutureParser; import static koopa.core.parsers.combinators.Opt.NOSKIP; import static koopa.core.grammars.combinators.Scoped.Visibility.PUBLIC; import static koopa.core.grammars.combinators.Scoped.Visibility.PRIVATE; import static koopa.core.grammars.combinators.Scoped.Visibility.HIDING; import koopa.cobol.grammar.directives.DirectivesBaseGrammar; import static koopa.cobol.data.tags.CobolAreaTag.SEQUENCE_NUMBER_AREA; import static koopa.cobol.sources.SourceFormat.FIXED; import static koopa.cobol.sources.SourceFormat.FREE; import static koopa.cobol.sources.SourceFormat.VARIABLE; import static koopa.core.data.tags.SyntacticTag.NUMBER; import static koopa.core.data.tags.SyntacticTag.STRING; import static koopa.core.data.tags.SyntacticTag.WHITESPACE; import static koopa.core.data.tags.SyntacticTag.WORD; /** * <b>This is generated code.<b> * <p> * @see <code>src/cobol/koopa/cobol/grammar/directives/Directives.kg</code> */ public class DirectivesGrammar extends DirectivesBaseGrammar { private static DirectivesGrammar INSTANCE = null; protected DirectivesGrammar() { } public static DirectivesGrammar instance() { if (INSTANCE == null) INSTANCE = new DirectivesGrammar(); return INSTANCE; } // ======================================================== // directive // ........................................................ private ParserCombinator directiveParser = null; public final Start directive = Start.on(getNamespace(), "directive"); public ParserCombinator directive() { if (directiveParser == null) { FutureParser future = scoped("directive", PUBLIC, true); directiveParser = future; future.setParser( choice( iso(), ibm(), mf(), listing() ) ); } return directiveParser; } // ======================================================== // iso // ........................................................ private ParserCombinator isoParser = null; public final Start iso = Start.on(getNamespace(), "iso"); public ParserCombinator iso() { if (isoParser == null) { FutureParser future = scoped("iso", PUBLIC, true); isoParser = future; future.setParser( sequence( optional( as("sequenceNumber", sequence( tagged(FIXED), plus( sequence( ranged(1, 6), any() ) ) ) ) ), iso$indicator(), iso$instruction(), optional( inlineComment() ) ) ); } return isoParser; } // ======================================================== // indicator // ........................................................ private ParserCombinator iso$indicatorParser = null; protected final Start iso$indicator = Start.on(getNamespace(), "indicator"); protected ParserCombinator iso$indicator() { if (iso$indicatorParser == null) { FutureParser future = scoped("indicator", PRIVATE, true); iso$indicatorParser = future; future.setParser( sequence( choice( tagged(FREE), ranged(8, -1) ), sequence( literal(">"), opt(NOSKIP, literal(">") ) ) ) ); } return iso$indicatorParser; } // ======================================================== // instruction // ........................................................ private ParserCombinator iso$instructionParser = null; public final Start iso$instruction = Start.on(getNamespace(), "instruction"); public ParserCombinator iso$instruction() { if (iso$instructionParser == null) { FutureParser future = scoped("instruction", PUBLIC, true); iso$instructionParser = future; future.setParser( upto( choice( as("else", keyword("ELSE") ), as("endEvaluate", keyword("END-EVALUATE") ), as("endIf", keyword("END-IF") ), iso$instruction$source(), iso$instruction$callConvention(), iso$instruction$define(), iso$instruction$evaluate(), iso$instruction$when(), iso$instruction$flag02(), iso$instruction$flag85(), iso$instruction$flagNativeArithmetic(), iso$instruction$if(), iso$instruction$leapSecond(), iso$instruction$listing(), iso$instruction$page(), iso$instruction$propagate(), iso$instruction$checking(), iso$instruction$repository() ), // Closure: choice( eof(), inlineCommentIndicator() ) ) ); } return iso$instructionParser; } // ======================================================== // source // ........................................................ private ParserCombinator iso$instruction$sourceParser = null; public final Start iso$instruction$source = Start.on(getNamespace(), "source"); public ParserCombinator iso$instruction$source() { if (iso$instruction$sourceParser == null) { FutureParser future = scoped("source", PUBLIC, true); iso$instruction$sourceParser = future; future.setParser( sequence( keyword("SOURCE"), optional( keyword("FORMAT") ), optional( keyword("IS") ), as("format", choice( keyword("FIXED"), keyword("FREE") ) ) ) ); } return iso$instruction$sourceParser; } // ======================================================== // callConvention // ........................................................ private ParserCombinator iso$instruction$callConventionParser = null; public final Start iso$instruction$callConvention = Start.on(getNamespace(), "callConvention"); public ParserCombinator iso$instruction$callConvention() { if (iso$instruction$callConventionParser == null) { FutureParser future = scoped("callConvention", PUBLIC, true); iso$instruction$callConventionParser = future; future.setParser( sequence( keyword("CALL-CONVENTION"), as("unknown", plus( any() ) ) ) ); } return iso$instruction$callConventionParser; } // ======================================================== // define // ........................................................ private ParserCombinator iso$instruction$defineParser = null; public final Start iso$instruction$define = Start.on(getNamespace(), "define"); public ParserCombinator iso$instruction$define() { if (iso$instruction$defineParser == null) { FutureParser future = scoped("define", PUBLIC, true); iso$instruction$defineParser = future; future.setParser( sequence( keyword("DEFINE"), as("unknown", plus( any() ) ) ) ); } return iso$instruction$defineParser; } // ======================================================== // evaluate // ........................................................ private ParserCombinator iso$instruction$evaluateParser = null; public final Start iso$instruction$evaluate = Start.on(getNamespace(), "evaluate"); public ParserCombinator iso$instruction$evaluate() { if (iso$instruction$evaluateParser == null) { FutureParser future = scoped("evaluate", PUBLIC, true); iso$instruction$evaluateParser = future; future.setParser( sequence( keyword("EVALUATE"), choice( as("true", keyword("TRUE") ), as("unknown", plus( any() ) ) ) ) ); } return iso$instruction$evaluateParser; } // ======================================================== // when // ........................................................ private ParserCombinator iso$instruction$whenParser = null; public final Start iso$instruction$when = Start.on(getNamespace(), "when"); public ParserCombinator iso$instruction$when() { if (iso$instruction$whenParser == null) { FutureParser future = scoped("when", PUBLIC, true); iso$instruction$whenParser = future; future.setParser( sequence( keyword("WHEN"), as("unknown", plus( any() ) ) ) ); } return iso$instruction$whenParser; } // ======================================================== // flag02 // ........................................................ private ParserCombinator iso$instruction$flag02Parser = null; public final Start iso$instruction$flag02 = Start.on(getNamespace(), "flag02"); public ParserCombinator iso$instruction$flag02() { if (iso$instruction$flag02Parser == null) { FutureParser future = scoped("flag02", PUBLIC, true); iso$instruction$flag02Parser = future; future.setParser( sequence( sequence( keyword("FLAG-"), opt(NOSKIP, number("02") ) ), as("unknown", plus( any() ) ) ) ); } return iso$instruction$flag02Parser; } // ======================================================== // flag85 // ........................................................ private ParserCombinator iso$instruction$flag85Parser = null; public final Start iso$instruction$flag85 = Start.on(getNamespace(), "flag85"); public ParserCombinator iso$instruction$flag85() { if (iso$instruction$flag85Parser == null) { FutureParser future = scoped("flag85", PUBLIC, true); iso$instruction$flag85Parser = future; future.setParser( sequence( sequence( keyword("FLAG-"), opt(NOSKIP, number("85") ) ), as("unknown", plus( any() ) ) ) ); } return iso$instruction$flag85Parser; } // ======================================================== // flagNativeArithmetic // ........................................................ private ParserCombinator iso$instruction$flagNativeArithmeticParser = null; public final Start iso$instruction$flagNativeArithmetic = Start.on(getNamespace(), "flagNativeArithmetic"); public ParserCombinator iso$instruction$flagNativeArithmetic() { if (iso$instruction$flagNativeArithmeticParser == null) { FutureParser future = scoped("flagNativeArithmetic", PUBLIC, true); iso$instruction$flagNativeArithmeticParser = future; future.setParser( sequence( keyword("FLAG-NATIVE-ARITHMETIC"), as("unknown", plus( any() ) ) ) ); } return iso$instruction$flagNativeArithmeticParser; } // ======================================================== // if // ........................................................ private ParserCombinator iso$instruction$ifParser = null; public final Start iso$instruction$if = Start.on(getNamespace(), "if"); public ParserCombinator iso$instruction$if() { if (iso$instruction$ifParser == null) { FutureParser future = scoped("if", PUBLIC, true); iso$instruction$ifParser = future; future.setParser( sequence( keyword("IF"), as("unknown", plus( any() ) ) ) ); } return iso$instruction$ifParser; } // ======================================================== // leapSecond // ........................................................ private ParserCombinator iso$instruction$leapSecondParser = null; public final Start iso$instruction$leapSecond = Start.on(getNamespace(), "leapSecond"); public ParserCombinator iso$instruction$leapSecond() { if (iso$instruction$leapSecondParser == null) { FutureParser future = scoped("leapSecond", PUBLIC, true); iso$instruction$leapSecondParser = future; future.setParser( sequence( keyword("LEAP-SECOND"), as("unknown", plus( any() ) ) ) ); } return iso$instruction$leapSecondParser; } // ======================================================== // listing // ........................................................ private ParserCombinator iso$instruction$listingParser = null; public final Start iso$instruction$listing = Start.on(getNamespace(), "listing"); public ParserCombinator iso$instruction$listing() { if (iso$instruction$listingParser == null) { FutureParser future = scoped("listing", PUBLIC, true); iso$instruction$listingParser = future; future.setParser( sequence( keyword("LISTING"), as("unknown", plus( any() ) ) ) ); } return iso$instruction$listingParser; } // ======================================================== // page // ........................................................ private ParserCombinator iso$instruction$pageParser = null; public final Start iso$instruction$page = Start.on(getNamespace(), "page"); public ParserCombinator iso$instruction$page() { if (iso$instruction$pageParser == null) { FutureParser future = scoped("page", PUBLIC, true); iso$instruction$pageParser = future; future.setParser( sequence( keyword("PAGE"), as("unknown", plus( any() ) ) ) ); } return iso$instruction$pageParser; } // ======================================================== // propagate // ........................................................ private ParserCombinator iso$instruction$propagateParser = null; public final Start iso$instruction$propagate = Start.on(getNamespace(), "propagate"); public ParserCombinator iso$instruction$propagate() { if (iso$instruction$propagateParser == null) { FutureParser future = scoped("propagate", PUBLIC, true); iso$instruction$propagateParser = future; future.setParser( sequence( keyword("PROPAGATE"), as("unknown", plus( any() ) ) ) ); } return iso$instruction$propagateParser; } // ======================================================== // checking // ........................................................ private ParserCombinator iso$instruction$checkingParser = null; public final Start iso$instruction$checking = Start.on(getNamespace(), "checking"); public ParserCombinator iso$instruction$checking() { if (iso$instruction$checkingParser == null) { FutureParser future = scoped("checking", PUBLIC, true); iso$instruction$checkingParser = future; future.setParser( sequence( keyword("TURN"), as("unknown", plus( any() ) ) ) ); } return iso$instruction$checkingParser; } // ======================================================== // repository // ........................................................ private ParserCombinator iso$instruction$repositoryParser = null; public final Start iso$instruction$repository = Start.on(getNamespace(), "repository"); public ParserCombinator iso$instruction$repository() { if (iso$instruction$repositoryParser == null) { FutureParser future = scoped("repository", PUBLIC, true); iso$instruction$repositoryParser = future; future.setParser( sequence( keyword("REPOSITORY"), optional( keyword("UPDATE") ), choice( keyword("ON"), sequence( keyword("OFF"), optional( sequence( optional( keyword("WITH") ), keyword("CHECKING") ) ) ) ) ) ); } return iso$instruction$repositoryParser; } // ======================================================== // ibm // ........................................................ private ParserCombinator ibmParser = null; public final Start ibm = Start.on(getNamespace(), "ibm"); public ParserCombinator ibm() { if (ibmParser == null) { FutureParser future = scoped("ibm", PUBLIC, true); ibmParser = future; future.setParser( choice( ibm$basis(), ibm$process(), ibm$control(), ibm$delete(), ibm$enter(), ibm$insert(), ibm$readyTrace(), ibm$resetTrace() ) ); } return ibmParser; } // ======================================================== // basis // ........................................................ private ParserCombinator ibm$basisParser = null; public final Start ibm$basis = Start.on(getNamespace(), "basis"); public ParserCombinator ibm$basis() { if (ibm$basisParser == null) { FutureParser future = scoped("basis", PUBLIC, true); ibm$basisParser = future; future.setParser( sequence( optional( sequence( as("sequenceNumber", sequence( ranged(1, 6), tagged(NUMBER), any() ) ), opt(NOSKIP, sequence( tagged(WHITESPACE), any() ) ) ) ), keyword("BASIS"), anything() ) ); } return ibm$basisParser; } // ======================================================== // process // ........................................................ private ParserCombinator ibm$processParser = null; public final Start ibm$process = Start.on(getNamespace(), "process"); public ParserCombinator ibm$process() { if (ibm$processParser == null) { FutureParser future = scoped("process", PUBLIC, true); ibm$processParser = future; future.setParser( sequence( optional( sequence( as("sequenceNumber", sequence( ranged(1, 6), tagged(NUMBER), any(), star( sequence( ranged(1, 6), any() ) ) ) ), opt(NOSKIP, sequence( tagged(WHITESPACE), any() ) ) ) ), choice( keyword("CBL"), keyword("PROCESS") ), star( anything() ) ) ); } return ibm$processParser; } // ======================================================== // control // ........................................................ private ParserCombinator ibm$controlParser = null; public final Start ibm$control = Start.on(getNamespace(), "control"); public ParserCombinator ibm$control() { if (ibm$controlParser == null) { FutureParser future = scoped("control", PUBLIC, true); ibm$controlParser = future; future.setParser( sequence( ibm$control$cblControl(), ibm$control$option(), star( sequence( optional( literal(",") ), ibm$control$option() ) ), optional( literal(".") ) ) ); } return ibm$controlParser; } // ======================================================== // cblControl // ........................................................ private ParserCombinator ibm$control$cblControlParser = null; protected final Start ibm$control$cblControl = Start.on(getNamespace(), "cblControl"); protected ParserCombinator ibm$control$cblControl() { if (ibm$control$cblControlParser == null) { FutureParser future = scoped("cblControl", PRIVATE, true); ibm$control$cblControlParser = future; future.setParser( sequence( sequence( ranged(7, -1), literal("*") ), opt(NOSKIP, choice( keyword("CBL"), keyword("CONTROL") ) ) ) ); } return ibm$control$cblControlParser; } // ======================================================== // option // ........................................................ private ParserCombinator ibm$control$optionParser = null; public final Start ibm$control$option = Start.on(getNamespace(), "option"); public ParserCombinator ibm$control$option() { if (ibm$control$optionParser == null) { FutureParser future = scoped("option", PUBLIC, true); ibm$control$optionParser = future; future.setParser( choice( keyword("SOURCE"), keyword("NOSOURCE"), keyword("LIST"), keyword("NOLIST"), keyword("MAP"), keyword("NOMAP") ) ); } return ibm$control$optionParser; } // ======================================================== // delete // ........................................................ private ParserCombinator ibm$deleteParser = null; public final Start ibm$delete = Start.on(getNamespace(), "delete"); public ParserCombinator ibm$delete() { if (ibm$deleteParser == null) { FutureParser future = scoped("delete", PUBLIC, true); ibm$deleteParser = future; future.setParser( sequence( optional( sequence( as("sequenceNumber", sequence( ranged(1, 6), tagged(NUMBER), any() ) ), opt(NOSKIP, sequence( tagged(WHITESPACE), any() ) ) ) ), keyword("DELETE"), ibm$delete$sequenceNumberFieldOrRange(), star( sequence( literal(","), ibm$delete$sequenceNumberFieldOrRange() ) ) ) ); } return ibm$deleteParser; } // ======================================================== // sequenceNumberFieldOrRange // ........................................................ private ParserCombinator ibm$delete$sequenceNumberFieldOrRangeParser = null; protected final Start ibm$delete$sequenceNumberFieldOrRange = Start.on(getNamespace(), "sequenceNumberFieldOrRange"); protected ParserCombinator ibm$delete$sequenceNumberFieldOrRange() { if (ibm$delete$sequenceNumberFieldOrRangeParser == null) { FutureParser future = scoped("sequenceNumberFieldOrRange", PRIVATE, true); ibm$delete$sequenceNumberFieldOrRangeParser = future; future.setParser( choice( as("sequenceNumberRange", sequence( ibm$sequenceNumberField(), opt(NOSKIP, sequence( literal("-"), ibm$sequenceNumberField() ) ) ) ), ibm$sequenceNumberField() ) ); } return ibm$delete$sequenceNumberFieldOrRangeParser; } // ======================================================== // enter // ........................................................ private ParserCombinator ibm$enterParser = null; public final Start ibm$enter = Start.on(getNamespace(), "enter"); public ParserCombinator ibm$enter() { if (ibm$enterParser == null) { FutureParser future = scoped("enter", PUBLIC, true); ibm$enterParser = future; future.setParser( sequence( keyword("ENTER"), anything(), optional( sequence( not( literal(".") ), anything() ) ), literal(".") ) ); } return ibm$enterParser; } // ======================================================== // insert // ........................................................ private ParserCombinator ibm$insertParser = null; public final Start ibm$insert = Start.on(getNamespace(), "insert"); public ParserCombinator ibm$insert() { if (ibm$insertParser == null) { FutureParser future = scoped("insert", PUBLIC, true); ibm$insertParser = future; future.setParser( sequence( optional( sequence( as("sequenceNumber", sequence( ranged(1, 6), tagged(NUMBER), any() ) ), opt(NOSKIP, sequence( tagged(WHITESPACE), any() ) ) ) ), keyword("INSERT"), ibm$sequenceNumberField() ) ); } return ibm$insertParser; } // ======================================================== // readyTrace // ........................................................ private ParserCombinator ibm$readyTraceParser = null; public final Start ibm$readyTrace = Start.on(getNamespace(), "readyTrace"); public ParserCombinator ibm$readyTrace() { if (ibm$readyTraceParser == null) { FutureParser future = scoped("readyTrace", PUBLIC, true); ibm$readyTraceParser = future; future.setParser( sequence( keyword("READY"), keyword("TRACE"), literal(".") ) ); } return ibm$readyTraceParser; } // ======================================================== // resetTrace // ........................................................ private ParserCombinator ibm$resetTraceParser = null; public final Start ibm$resetTrace = Start.on(getNamespace(), "resetTrace"); public ParserCombinator ibm$resetTrace() { if (ibm$resetTraceParser == null) { FutureParser future = scoped("resetTrace", PUBLIC, true); ibm$resetTraceParser = future; future.setParser( sequence( keyword("RESET"), keyword("TRACE"), literal(".") ) ); } return ibm$resetTraceParser; } // ======================================================== // sequenceNumberField // ........................................................ private ParserCombinator ibm$sequenceNumberFieldParser = null; public final Start ibm$sequenceNumberField = Start.on(getNamespace(), "sequenceNumberField"); public ParserCombinator ibm$sequenceNumberField() { if (ibm$sequenceNumberFieldParser == null) { FutureParser future = scoped("sequenceNumberField", PUBLIC, true); ibm$sequenceNumberFieldParser = future; future.setParser( sequence( tagged(NUMBER), any() ) ); } return ibm$sequenceNumberFieldParser; } // ======================================================== // mf // ........................................................ private ParserCombinator mfParser = null; public final Start mf = Start.on(getNamespace(), "mf"); public ParserCombinator mf() { if (mfParser == null) { FutureParser future = scoped("mf", PUBLIC, true); mfParser = future; future.setParser( choice( mf$inc(), mf$include(), mf$set(), mf$display(), mf$else(), mf$end(), mf$if() ) ); } return mfParser; } // ======================================================== // inc // ........................................................ private ParserCombinator mf$incParser = null; public final Start mf$inc = Start.on(getNamespace(), "inc"); public ParserCombinator mf$inc() { if (mf$incParser == null) { FutureParser future = scoped("inc", PUBLIC, true); mf$incParser = future; future.setParser( sequence( sequence( sequence( ranged(1, 1), literal("-") ), opt(NOSKIP, literal("INC") ) ), anything(), optional( as("comment", star( anything() ) ) ) ) ); } return mf$incParser; } // ======================================================== // include // ........................................................ private ParserCombinator mf$includeParser = null; public final Start mf$include = Start.on(getNamespace(), "include"); public ParserCombinator mf$include() { if (mf$includeParser == null) { FutureParser future = scoped("include", PUBLIC, true); mf$includeParser = future; future.setParser( sequence( sequence( sequence( ranged(8, 8), literal("+") ), opt(NOSKIP, sequence( literal("+"), literal("INCLUDE") ) ) ), anything(), optional( as("comment", star( anything() ) ) ) ) ); } return mf$includeParser; } // ======================================================== // set // ........................................................ private ParserCombinator mf$setParser = null; public final Start mf$set = Start.on(getNamespace(), "set"); public ParserCombinator mf$set() { if (mf$setParser == null) { FutureParser future = scoped("set", PUBLIC, true); mf$setParser = future; future.setParser( sequence( sequence( mf$indicator(), opt(NOSKIP, keyword("SET") ) ), plus( choice( as("sourceformat", sequence( keyword("SOURCEFORMAT"), mf$set$parameter() ) ), as("directive", sequence( name(), optional( mf$set$parameter() ) ) ) ) ) ) ); } return mf$setParser; } // ======================================================== // parameter // ........................................................ private ParserCombinator mf$set$parameterParser = null; public final Start mf$set$parameter = Start.on(getNamespace(), "parameter"); public ParserCombinator mf$set$parameter() { if (mf$set$parameterParser == null) { FutureParser future = scoped("parameter", PUBLIC, true); mf$set$parameterParser = future; future.setParser( choice( sequence( tagged(STRING), any() ), sequence( literal("("), plus( sequence( not( literal(")") ), any() ) ), literal(")") ) ) ); } return mf$set$parameterParser; } // ======================================================== // display // ........................................................ private ParserCombinator mf$displayParser = null; public final Start mf$display = Start.on(getNamespace(), "display"); public ParserCombinator mf$display() { if (mf$displayParser == null) { FutureParser future = scoped("display", PUBLIC, true); mf$displayParser = future; future.setParser( sequence( sequence( mf$indicator(), opt(NOSKIP, keyword("DISPLAY") ) ), optional( sequence( keyword("VCS"), literal("=") ) ), plus( anything() ) ) ); } return mf$displayParser; } // ======================================================== // else // ........................................................ private ParserCombinator mf$elseParser = null; public final Start mf$else = Start.on(getNamespace(), "else"); public ParserCombinator mf$else() { if (mf$elseParser == null) { FutureParser future = scoped("else", PUBLIC, true); mf$elseParser = future; future.setParser( sequence( mf$indicator(), opt(NOSKIP, keyword("ELSE") ) ) ); } return mf$elseParser; } // ======================================================== // end // ........................................................ private ParserCombinator mf$endParser = null; public final Start mf$end = Start.on(getNamespace(), "end"); public ParserCombinator mf$end() { if (mf$endParser == null) { FutureParser future = scoped("end", PUBLIC, true); mf$endParser = future; future.setParser( sequence( mf$indicator(), opt(NOSKIP, keyword("END") ) ) ); } return mf$endParser; } // ======================================================== // if // ........................................................ private ParserCombinator mf$ifParser = null; public final Start mf$if = Start.on(getNamespace(), "if"); public ParserCombinator mf$if() { if (mf$ifParser == null) { FutureParser future = scoped("if", PUBLIC, true); mf$ifParser = future; future.setParser( sequence( sequence( mf$indicator(), opt(NOSKIP, keyword("IF") ) ), as("unknown", plus( any() ) ) ) ); } return mf$ifParser; } // ======================================================== // indicator // ........................................................ private ParserCombinator mf$indicatorParser = null; public final Start mf$indicator = Start.on(getNamespace(), "indicator"); public ParserCombinator mf$indicator() { if (mf$indicatorParser == null) { FutureParser future = scoped("indicator", PUBLIC, true); mf$indicatorParser = future; future.setParser( sequence( choice( sequence( tagged(FREE), ranged(1, 1) ), sequence( tagged(FIXED), ranged(7, 7) ), sequence( tagged(VARIABLE), ranged(7, 7) ) ), literal("$") ) ); } return mf$indicatorParser; } // ======================================================== // listing // ........................................................ private ParserCombinator listingParser = null; public final Start listing = Start.on(getNamespace(), "listing"); public ParserCombinator listing() { if (listingParser == null) { FutureParser future = scoped("listing", PUBLIC, true); listingParser = future; future.setParser( sequence( optional( as("sequenceNumber", sequence( choice( tagged(FIXED), tagged(VARIABLE) ), plus( sequence( ranged(1, 6), any() ) ) ) ) ), choice( listing$eject(), listing$skip(), listing$title() ) ) ); } return listingParser; } // ======================================================== // eject // ........................................................ private ParserCombinator listing$ejectParser = null; public final Start listing$eject = Start.on(getNamespace(), "eject"); public ParserCombinator listing$eject() { if (listing$ejectParser == null) { FutureParser future = scoped("eject", PUBLIC, true); listing$ejectParser = future; future.setParser( sequence( sequence( ranged(8, -1), keyword("EJECT") ), optional( literal(".") ) ) ); } return listing$ejectParser; } // ======================================================== // skip // ........................................................ private ParserCombinator listing$skipParser = null; public final Start listing$skip = Start.on(getNamespace(), "skip"); public ParserCombinator listing$skip() { if (listing$skipParser == null) { FutureParser future = scoped("skip", PUBLIC, true); listing$skipParser = future; future.setParser( sequence( sequence( ranged(8, -1), sequence( keyword("SKIP"), opt(NOSKIP, choice( number("1"), number("2"), number("3") ) ) ) ), optional( literal(".") ) ) ); } return listing$skipParser; } // ======================================================== // title // ........................................................ private ParserCombinator listing$titleParser = null; public final Start listing$title = Start.on(getNamespace(), "title"); public ParserCombinator listing$title() { if (listing$titleParser == null) { FutureParser future = scoped("title", PUBLIC, true); listing$titleParser = future; future.setParser( sequence( sequence( ranged(8, -1), keyword("TITLE") ), anything(), optional( literal(".") ) ) ); } return listing$titleParser; } // ======================================================== // inlineComment // ........................................................ private ParserCombinator inlineCommentParser = null; public final Start inlineComment = Start.on(getNamespace(), "inlineComment"); public ParserCombinator inlineComment() { if (inlineCommentParser == null) { FutureParser future = scoped("inlineComment", PUBLIC, true); inlineCommentParser = future; future.setParser( sequence( inlineCommentIndicator(), star( any() ) ) ); } return inlineCommentParser; } // ======================================================== // inlineCommentIndicator // ........................................................ private ParserCombinator inlineCommentIndicatorParser = null; protected final Start inlineCommentIndicator = Start.on(getNamespace(), "inlineCommentIndicator"); protected ParserCombinator inlineCommentIndicator() { if (inlineCommentIndicatorParser == null) { FutureParser future = scoped("inlineCommentIndicator", PRIVATE, true); inlineCommentIndicatorParser = future; future.setParser( sequence( literal("*"), opt(NOSKIP, literal(">") ) ) ); } return inlineCommentIndicatorParser; } // ======================================================== // name // ........................................................ private ParserCombinator nameParser = null; public final Start name = Start.on(getNamespace(), "name"); public ParserCombinator name() { if (nameParser == null) { FutureParser future = scoped("name", PUBLIC, true); nameParser = future; future.setParser( sequence( sequence( tagged(WORD), any() ), opt(NOSKIP, star( choice( sequence( tagged(WORD), any() ), sequence( tagged(NUMBER), any() ) ) ) ) ) ); } return nameParser; } // ======================================================== // anything // ........................................................ private ParserCombinator anythingParser = null; public final Start anything = Start.on(getNamespace(), "anything"); public ParserCombinator anything() { if (anythingParser == null) { FutureParser future = scoped("anything", PUBLIC, true); anythingParser = future; future.setParser( sequence( any(), optional( opt(NOSKIP, star( sequence( not( tagged(WHITESPACE) ), any() ) ) ) ) ) ); } return anythingParser; } }
/** * Set this query's projection * * @param select the {@link Projection} to set * @return this */ public SubSelect select(Projection select) { this.select = select; return this; }
/** * allows the user to pay * @param orderNum * @return: price to pay */ public double pay(int orderNum){ Table barTable= allTables.get(findTable(bar)); double price= barTable.getOrderPrice(orderNum); barTable.removeOrder(orderNum); int current= barTable.getFilledSeats(); barTable.setFilledSeats(current-1); return price; }
# Python side of the support for xmethods. # Copyright (C) 2013-2017 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Utilities for defining xmethods""" import gdb import re import sys if sys.version_info[0] > 2: # Python 3 removed basestring and long basestring = str long = int class XMethod(object): """Base class (or a template) for an xmethod description. Currently, the description requires only the 'name' and 'enabled' attributes. Description objects are managed by 'XMethodMatcher' objects (see below). Note that this is only a template for the interface of the XMethodMatcher.methods objects. One could use this class or choose to use an object which supports this exact same interface. Also, an XMethodMatcher can choose not use it 'methods' attribute. In such cases this class (or an equivalent) is not used. Attributes: name: The name of the xmethod. enabled: A boolean indicating if the xmethod is enabled. """ def __init__(self, name): self.name = name self.enabled = True class XMethodMatcher(object): """Abstract base class for matching an xmethod. When looking for xmethods, GDB invokes the `match' method of a registered xmethod matcher to match the object type and method name. The `match' method in concrete classes derived from this class should return an `XMethodWorker' object, or a list of `XMethodWorker' objects if there is a match (see below for 'XMethodWorker' class). Attributes: name: The name of the matcher. enabled: A boolean indicating if the matcher is enabled. methods: A sequence of objects of type 'XMethod', or objects which have at least the attributes of an 'XMethod' object. This list is used by the 'enable'/'disable'/'info' commands to enable/disable/list the xmethods registered with GDB. See the 'match' method below to know how this sequence is used. This attribute is None if the matcher chooses not have any xmethods managed by it. """ def __init__(self, name): """ Args: name: An identifying name for the xmethod or the group of xmethods returned by the `match' method. """ self.name = name self.enabled = True self.methods = None def match(self, class_type, method_name): """Match class type and method name. In derived classes, it should return an XMethodWorker object, or a sequence of 'XMethodWorker' objects. Only those xmethod workers whose corresponding 'XMethod' descriptor object is enabled should be returned. Args: class_type: The class type (gdb.Type object) to match. method_name: The name (string) of the method to match. """ raise NotImplementedError("XMethodMatcher match") class XMethodWorker(object): """Base class for all xmethod workers defined in Python. An xmethod worker is an object which matches the method arguments, and invokes the method when GDB wants it to. Internally, GDB first invokes the 'get_arg_types' method to perform overload resolution. If GDB selects to invoke this Python xmethod, then it invokes it via the overridden '__call__' method. The 'get_result_type' method is used to implement 'ptype' on the xmethod. Derived classes should override the 'get_arg_types', 'get_result_type' and '__call__' methods. """ def get_arg_types(self): """Return arguments types of an xmethod. A sequence of gdb.Type objects corresponding to the arguments of the xmethod are returned. If the xmethod takes no arguments, then 'None' or an empty sequence is returned. If the xmethod takes only a single argument, then a gdb.Type object or a sequence with a single gdb.Type element is returned. """ raise NotImplementedError("XMethodWorker get_arg_types") def get_result_type(self, *args): """Return the type of the result of the xmethod. Args: args: Arguments to the method. Each element of the tuple is a gdb.Value object. The first element is the 'this' pointer value. These are the same arguments passed to '__call__'. Returns: A gdb.Type object representing the type of the result of the xmethod. """ raise NotImplementedError("XMethodWorker get_result_type") def __call__(self, *args): """Invoke the xmethod. Args: args: Arguments to the method. Each element of the tuple is a gdb.Value object. The first element is the 'this' pointer value. Returns: A gdb.Value corresponding to the value returned by the xmethod. Returns 'None' if the method does not return anything. """ raise NotImplementedError("XMethodWorker __call__") class SimpleXMethodMatcher(XMethodMatcher): """A utility class to implement simple xmethod mathers and workers. See the __init__ method below for information on how instances of this class can be used. For simple classes and methods, one can choose to use this class. For complex xmethods, which need to replace/implement template methods on possibly template classes, one should implement their own xmethod matchers and workers. See py-xmethods.py in testsuite/gdb.python directory of the GDB source tree for examples. """ class SimpleXMethodWorker(XMethodWorker): def __init__(self, method_function, arg_types): self._arg_types = arg_types self._method_function = method_function def get_arg_types(self): return self._arg_types def __call__(self, *args): return self._method_function(*args) def __init__(self, name, class_matcher, method_matcher, method_function, *arg_types): """ Args: name: Name of the xmethod matcher. class_matcher: A regular expression used to match the name of the class whose method this xmethod is implementing/replacing. method_matcher: A regular expression used to match the name of the method this xmethod is implementing/replacing. method_function: A Python callable which would be called via the 'invoke' method of the worker returned by the objects of this class. This callable should accept the object (*this) as the first argument followed by the rest of the arguments to the method. All arguments to this function should be gdb.Value objects. arg_types: The gdb.Type objects corresponding to the arguments that this xmethod takes. It can be None, or an empty sequence, or a single gdb.Type object, or a sequence of gdb.Type objects. """ XMethodMatcher.__init__(self, name) assert callable(method_function), ( "The 'method_function' argument to 'SimpleXMethodMatcher' " "__init__ method should be a callable.") self._method_function = method_function self._class_matcher = class_matcher self._method_matcher = method_matcher self._arg_types = arg_types def match(self, class_type, method_name): cm = re.match(self._class_matcher, str(class_type.unqualified().tag)) mm = re.match(self._method_matcher, method_name) if cm and mm: return SimpleXMethodMatcher.SimpleXMethodWorker( self._method_function, self._arg_types) # A helper function for register_xmethod_matcher which returns an error # object if MATCHER is not having the requisite attributes in the proper # format. def _validate_xmethod_matcher(matcher): if not hasattr(matcher, "match"): return TypeError("Xmethod matcher is missing method: match") if not hasattr(matcher, "name"): return TypeError("Xmethod matcher is missing attribute: name") if not hasattr(matcher, "enabled"): return TypeError("Xmethod matcher is missing attribute: enabled") if not isinstance(matcher.name, basestring): return TypeError("Attribute 'name' of xmethod matcher is not a " "string") if matcher.name.find(";") >= 0: return ValueError("Xmethod matcher name cannot contain ';' in it") # A helper function for register_xmethod_matcher which looks up an # xmethod matcher with NAME in LOCUS. Returns the index of the xmethod # matcher in 'xmethods' sequence attribute of the LOCUS. If NAME is not # found in LOCUS, then -1 is returned. def _lookup_xmethod_matcher(locus, name): for i in range(0, len(locus.xmethods)): if locus.xmethods[i].name == name: return i return -1 def register_xmethod_matcher(locus, matcher, replace=False): """Registers a xmethod matcher MATCHER with a LOCUS. Arguments: locus: The locus in which the xmethods should be registered. It can be 'None' to indicate that the xmethods should be registered globally. Or, it could be a gdb.Objfile or a gdb.Progspace object in which the xmethods should be registered. matcher: The xmethod matcher to register with the LOCUS. It should be an instance of 'XMethodMatcher' class. replace: If True, replace any existing xmethod matcher with the same name in the locus. Otherwise, if a matcher with the same name exists in the locus, raise an exception. """ err = _validate_xmethod_matcher(matcher) if err: raise err if not locus: locus = gdb if locus == gdb: locus_name = "global" else: locus_name = locus.filename index = _lookup_xmethod_matcher(locus, matcher.name) if index >= 0: if replace: del locus.xmethods[index] else: raise RuntimeError("Xmethod matcher already registered with " "%s: %s" % (locus_name, matcher.name)) if gdb.parameter("verbose"): gdb.write("Registering xmethod matcher '%s' with %s' ...\n") locus.xmethods.insert(0, matcher)
<gh_stars>1-10 /* eslint no-undef: off */ /* eslint no-new: off */ import path from "path"; import Store from "../src"; test("Ensure the store can determine its age.", () => { const store = Store.from(path.resolve("./test/age.store.json")); expect(typeof store.age).toBe("number"); });
// SetHostTimer sets the number of generations for the host to // remain in its current status. func (sim *SequenceNodeEpidemic) SetHostTimer(id, interval int) { sim.Lock() defer sim.Unlock() sim.timers[id] = interval }
<reponame>Deepak2417/algorithms import java.util.*; // We use Disjoint Set Union to help us in constructing the MST, and checking cycles in MST public class KruskalAlgorithm { static class Edge implements Comparable<Edge> { int src, dest, weight; public Edge(int src, int dest, int weight) { this.src = src; this.dest = dest; this.weight = weight; } @Override public int compareTo(Edge edge) { // Function to help sort the edges return this.weight - edge.weight; } } public static int findParent(int src, int[] parents) { // If the parent of any vertex is the vertex itself // then this is the parent of the vertex of the current edge if(parents[src] == src) { return src; } else { parents[src] = findParent(parents[src], parents); return parents[src]; } } public static void union(int src, int dest, int[] parents, int[] size) { // Find the parent of both the vertices in the current edge, and merge the disjoint sets src = findParent(src, parents); dest = findParent(dest, parents); if(size[src] > size[dest]) { parents[dest] = src; size[src] += size[dest]; } else { parents[src] = dest; size[dest] += size[src]; } } public static boolean isCyclic(int src, int dest, int[] parents) { return findParent(src, parents) == findParent(dest, parents); } public static void kruskalAlgo(int nodes, int numEdges, Edge[] edges, int[][] graph) { // This array is to store the minimum spanning tree formed int[][] minSpanTree = new int[graph.length][graph.length]; // Sorting all the edges Arrays.sort(edges); // Parents and size array for creating disjoint sets int[] parents = new int[nodes+1]; int[] size = new int[nodes+1]; for(int i = 1; i < graph.length; i++) { parents[i] = i; size[i] = i; } int edgeCounter = 0; int edgedTaken = 1; // For connnecting all nodes, we must have at-least nodes-1 edges while(edgedTaken <= nodes - 1) { Edge e = edges[edgeCounter]; edgeCounter++; // Do not include if, cycle is created if(isCyclic(e.src, e.dest, parents)) continue; union(findParent(e.src, parents), findParent(e.dest, parents), parents, size); minSpanTree[e.src][e.dest] = e.weight; edgedTaken++; } /* Displaying Tree*/ System.out.println("The minimum spanning tree is as follows:"); for(int i=1; i<minSpanTree.length; i++) { for(int j=0; j<minSpanTree.length; j++) { if(minSpanTree[i][j] != 0) System.out.println(i + " " + j + " " + minSpanTree[i][j]); } } } public static void main(String[] args) { Scanner in = new Scanner(System.in); System.out.print("Enter the number of nodes: "); int nodes = in.nextInt(); int[][] graph = new int[nodes+1][nodes+1]; System.out.print("Enter the number of edges: "); int numEdges = in.nextInt(); Edge[] edges = new Edge[numEdges]; for(int i = 0; i<numEdges; i++) { System.out.println("Enter source, destination and weight:"); int src = in.nextInt(), dest = in.nextInt(), weight = in.nextInt(); // This is a check to remove multiple edges between two nodes // Edge with least weight remains if(graph[src][dest] == 0 || weight < graph[src][dest]) { // The graph is considered undirected graph[src][dest] = weight; graph[dest][src] = weight; edges[i] = new Edge(src, dest, weight); } } kruskalAlgo(nodes, numEdges, edges, graph); } }
<reponame>purebase/tsdx-react-with-storybook-starter<gh_stars>0 import React from "react"; import {FC} from "react"; export const GreenButton:FC = (props) => { return ( <button style={{backgroundColor: '#00ff00'}}> {props.children} </button> ); }
def _add_is_control_and_control_id(df, ctrl_id): if 'Experiment_Id' not in df: return df if ctrl_id is None: if 'Is_Control' not in df and 'Control_Id' in df and 'Experiment_Id' in df: df['Is_Control'] = df['Control_Id'] == df['Experiment_Id'] return df if not isinstance(ctrl_id, dict): df['Is_Control'] = df['Experiment_Id'] == ctrl_id df['Control_Id'] = ctrl_id return df ctrl_id_lookup = {} for ctrl, exp_ids in six.iteritems(ctrl_id): if not isinstance(exp_ids, list): raise ValueError('The experiment id(s) {} is not a list.'.format(exp_ids)) for e in exp_ids: ctrl_id_lookup[e] = ctrl ctrl_id_lookup[ctrl] = ctrl df['Is_Control'] = df['Experiment_Id'].apply(lambda k: k in ctrl_id) df['Control_Id'] = df['Experiment_Id'].apply(ctrl_id_lookup.get) return df
/** * Performs a move where the src piece captures the dest piece, * removing the dest piece form the board. * * @param src the source piece. * @param dest the dest piece. */ protected void movePiece(ArrayPiece src, ArrayPiece dest) { assert(src != null); assert(dest != null); movePiece(src.square, dest.square); }
/** * Sets the status of a received Cloudlet to {@link Cloudlet.Status#READY} * so that the Cloudlet can be selected to start running as soon as possible * by a {@link CloudletScheduler}. * * <p>This tag is commonly used when Cloudlets are created * from a trace file such as a {@link GoogleTaskEventsTraceReader Google Cluster Trace}.</p> * * @param evt the event data */ private void processCloudletReady(final SimEvent evt){ final Cloudlet cloudlet = (Cloudlet)evt.getData(); if(cloudlet.getStatus() == Cloudlet.Status.PAUSED) logCloudletStatusChange(cloudlet, "resume execution of"); else logCloudletStatusChange(cloudlet, "start executing"); cloudlet.getVm().getCloudletScheduler().cloudletReady(cloudlet); }
/** * Created by tatyanasoldatkina on 3/25/15. */ public class Like implements Serializable { @SerializedName("id") private long id; @SerializedName("created_at") private Date createdAt; @SerializedName("user") private User user; @SerializedName("shot") private Shot shot; public User getUser() { return user; } public void setUser(User user) { this.user = user; } public Shot getShot() { return shot; } public void setShot(Shot shot) { this.shot = shot; } public Date getCreatedAt() { return createdAt; } public void setCreatedAt(Date createdAt) { this.createdAt = createdAt; } public long getId() { return id; } public void setId(long id) { this.id = id; } }
def create_boilerplate_semantics_files(neurons: List[int]): with open("input_semantics.csv", "w") as f: for i in range(neurons[0]): f.write("input neuron %i\n" % i) with open("output_semantics.csv", "w") as f: for i in range(neurons[-1]): f.write("output neuron %i\n" % i)
/** * Main class to represent transaction of money between {@link org.nipu.jmt.account.Account}. * * @author Nikita_Puzankov */ public class Transaction { private final Long fromAccountNumber; private final Long toAccountNumber; private final BigDecimal amount; public Transaction(Long fromAccountNumber, Long toAccountNumber, BigDecimal amount) { this.fromAccountNumber = fromAccountNumber; this.toAccountNumber = toAccountNumber; this.amount = amount; } public Long getFromAccountNumber() { return fromAccountNumber; } public Long getToAccountNumber() { return toAccountNumber; } public BigDecimal getAmount() { return amount; } @Override public String toString() { return "Transaction{" + "fromAccountNumber=" + fromAccountNumber + ", toAccountNumber=" + toAccountNumber + ", amount=" + amount + '}'; } }
<filename>rustfst/src/tests_openfst/test_weights.rs use std::fs::read_to_string; use anyhow::Result; use serde::{Deserialize, Serialize}; use crate::semirings::{ GallicWeight, GallicWeightLeft, GallicWeightMin, GallicWeightRestrict, GallicWeightRight, LogWeight, ProductWeight, ReverseBack, SerializableSemiring, StringWeightLeft, StringWeightRestrict, StringWeightRight, TropicalWeight, WeightQuantize, }; use crate::{Tr, KDELTA}; use self::super::get_path_folder; #[derive(Serialize, Deserialize, Debug)] pub struct ParsedWeightOperationResult { name: String, tr_type: String, weight_type: String, weight_1: String, weight_2: String, one: String, zero: String, plus: String, times: String, } impl ParsedWeightOperationResult { pub fn parse<W: SerializableSemiring>(self) -> ParsedWeightTestData<W> { ParsedWeightTestData { name: self.name, weight_type: self.weight_type, tr_type: self.tr_type, weight_1: W::parse_text(self.weight_1.as_str()).unwrap().1, weight_2: W::parse_text(self.weight_2.as_str()).unwrap().1, one: W::parse_text(self.one.as_str()).unwrap().1, zero: W::parse_text(self.zero.as_str()).unwrap().1, plus: W::parse_text(self.plus.as_str()).unwrap().1, times: W::parse_text(self.times.as_str()).unwrap().1, } } } pub struct ParsedWeightTestData<W> { #[allow(unused)] name: String, weight_type: String, tr_type: String, weight_1: W, weight_2: W, one: W, zero: W, plus: W, times: W, } fn do_run_test_openfst_weight<W: SerializableSemiring + WeightQuantize>( test_data: ParsedWeightTestData<W>, ) -> Result<()> { assert_eq!(W::one(), test_data.one); assert_eq!(W::zero(), test_data.zero); assert_eq!( test_data .weight_1 .times(&test_data.weight_2)? .quantize(KDELTA) .unwrap(), test_data.times.quantize(KDELTA).unwrap() ); assert_eq!( test_data .weight_1 .plus(&test_data.weight_2)? .quantize(KDELTA) .unwrap(), test_data.plus.quantize(KDELTA).unwrap() ); assert_eq!(W::weight_type(), test_data.weight_type); assert_eq!(Tr::<W>::tr_type(), test_data.tr_type); assert_eq!( test_data.weight_1.reverse()?.reverse_back()?, test_data.weight_1 ); Ok(()) } fn run_test_openfst_weight(test_name: &str) -> Result<()> { let absolute_path_folder = get_path_folder("weights")?; let mut path_metadata = absolute_path_folder.clone(); path_metadata.push(format!("{}.json", test_name)); let string = read_to_string(&path_metadata) .map_err(|_| format_err!("Can't open {:?}", &path_metadata))?; let parsed_operation_result: ParsedWeightOperationResult = serde_json::from_str(&string).unwrap(); // TODO: Infer the Rust weight type from the serialized weight type. match parsed_operation_result.weight_type.as_str() { "tropical" => { let parsed_test_data = parsed_operation_result.parse::<TropicalWeight>(); do_run_test_openfst_weight(parsed_test_data)?; } "log" => { let parsed_test_data = parsed_operation_result.parse::<LogWeight>(); do_run_test_openfst_weight(parsed_test_data)?; } "tropical_X_log" => { let parsed_test_data = parsed_operation_result.parse::<ProductWeight<TropicalWeight, LogWeight>>(); do_run_test_openfst_weight(parsed_test_data)?; } "log_X_tropical" => { let parsed_test_data = parsed_operation_result.parse::<ProductWeight<LogWeight, TropicalWeight>>(); do_run_test_openfst_weight(parsed_test_data)?; } "left_string" => { let parsed_test_data = parsed_operation_result.parse::<StringWeightLeft>(); do_run_test_openfst_weight(parsed_test_data)?; } "right_string" => { let parsed_test_data = parsed_operation_result.parse::<StringWeightRight>(); do_run_test_openfst_weight(parsed_test_data)?; } "restricted_string" => { let parsed_test_data = parsed_operation_result.parse::<StringWeightRestrict>(); do_run_test_openfst_weight(parsed_test_data)?; } "left_gallic" => { let parsed_test_data = parsed_operation_result.parse::<GallicWeightLeft<TropicalWeight>>(); do_run_test_openfst_weight(parsed_test_data)?; } "right_gallic" => { let parsed_test_data = parsed_operation_result.parse::<GallicWeightRight<TropicalWeight>>(); do_run_test_openfst_weight(parsed_test_data)?; } "restricted_gallic" => { let parsed_test_data = parsed_operation_result.parse::<GallicWeightRestrict<TropicalWeight>>(); do_run_test_openfst_weight(parsed_test_data)?; } "min_gallic" => { let parsed_test_data = parsed_operation_result.parse::<GallicWeightMin<TropicalWeight>>(); do_run_test_openfst_weight(parsed_test_data)?; } "gallic" => { let parsed_test_data = parsed_operation_result.parse::<GallicWeight<TropicalWeight>>(); do_run_test_openfst_weight(parsed_test_data)?; } _ => bail!( "Unknown weight_type : {:?}", parsed_operation_result.weight_type ), } Ok(()) } #[test] fn test_openfst_weight_001() -> Result<()> { run_test_openfst_weight("weight_001").map_err(|v| v.into()) } #[test] fn test_openfst_weight_002() -> Result<()> { run_test_openfst_weight("weight_002").map_err(|v| v.into()) } #[test] fn test_openfst_weight_003() -> Result<()> { run_test_openfst_weight("weight_003").map_err(|v| v.into()) } #[test] fn test_openfst_weight_004() -> Result<()> { run_test_openfst_weight("weight_004").map_err(|v| v.into()) } #[test] fn test_openfst_weight_005() -> Result<()> { run_test_openfst_weight("weight_005").map_err(|v| v.into()) } #[test] fn test_openfst_weight_006() -> Result<()> { run_test_openfst_weight("weight_006").map_err(|v| v.into()) } #[test] fn test_openfst_weight_007() -> Result<()> { run_test_openfst_weight("weight_007").map_err(|v| v.into()) } #[test] fn test_openfst_weight_008() -> Result<()> { run_test_openfst_weight("weight_008").map_err(|v| v.into()) } #[test] fn test_openfst_weight_009() -> Result<()> { run_test_openfst_weight("weight_009").map_err(|v| v.into()) } #[test] fn test_openfst_weight_010() -> Result<()> { run_test_openfst_weight("weight_010").map_err(|v| v.into()) } #[test] fn test_openfst_weight_011() -> Result<()> { run_test_openfst_weight("weight_011").map_err(|v| v.into()) } #[test] fn test_openfst_weight_012() -> Result<()> { run_test_openfst_weight("weight_012").map_err(|v| v.into()) }
/** * Performs the passed-in closure with the write lock locked and unlocks the write lock automatically * after the closure finishes. * * @param cl The closure to perform with the write lock held */ public void withWriteLock(final Closure cl) { writeLock().lock(); try { cl.call(); } finally { writeLock().unlock(); } }
<gh_stars>1-10 #pragma once #include <cinttypes> #include <limits> #include <ostream> #include <set> #include <string> #include <vector> #include "cista/containers/hash_map.h" #include "cista/containers/hash_set.h" #include "cista/containers/vector.h" namespace soro { using pixel_coord_t = uint32_t; using distance_in_m = uint32_t; struct pixel_pos { static constexpr auto const INVALID = std::numeric_limits<pixel_coord_t>::max(); bool valid() const { return x_ != INVALID && y_ != INVALID; } friend std::ostream& operator<<(std::ostream&, pixel_pos const&); pixel_coord_t x_{INVALID}, y_{INVALID}; }; struct pixel { pixel_pos pos_; char content_; }; struct node; struct station { unsigned id_; std::string name_; }; struct edge { void add_node(node*); node* opposite(node*) const; unsigned id_{0U}; node *from_{nullptr}, *to_{nullptr}; distance_in_m dist_{0U}; cista::raw::vector<pixel> draw_representation_; }; struct node { enum class type : uint8_t { END_NODE, APPROACH_SIGNAL, MAIN_SIGNAL, END_OF_TRAIN_DETECTOR, SWITCH, SINGLE_SLIP_SWITCH, LEVEL_JUNCTION } type_; station* station_; std::string name_; cista::raw::hash_map<edge*, cista::raw::hash_set<edge*>> traversals_; cista::raw::hash_map<edge*, edge*> action_traversal_; edge* end_node_edge_{nullptr}; cista::raw::vector<pixel> draw_representation_; }; std::string_view type_str(node::type); std::ostream& operator<<(std::ostream&, node::type); struct network { void print(std::vector<edge*> const& highlight_edges = {}) const; friend std::ostream& operator<<(std::ostream&, network const&); std::vector<std::unique_ptr<node>> nodes_; std::vector<std::unique_ptr<edge>> edges_; std::vector<std::unique_ptr<station>> stations_; }; } // namespace soro
// TLSALPNChallengeBlocks returns PEM blocks (certPEMBlock, keyPEMBlock) with the acmeValidation-v1 extension // and domain name for the `tls-alpn-01` challenge. func TLSALPNChallengeBlocks(domain, keyAuth string) ([]byte, []byte, error) { zBytes := sha256.Sum256([]byte(keyAuth)) value, err := asn1.Marshal(zBytes[:sha256.Size]) if err != nil { return nil, nil, err } extensions := []pkix.Extension{ { Id: idPeAcmeIdentifierV1, Critical: true, Value: value, }, } tempPrivKey, err := generatePrivateKey(RSA2048) if err != nil { return nil, nil, err } rsaPrivKey := tempPrivKey.(*rsa.PrivateKey) tempCertPEM, err := generatePemCert(rsaPrivKey, domain, extensions) if err != nil { return nil, nil, err } rsaPrivPEM := pemEncode(rsaPrivKey) return tempCertPEM, rsaPrivPEM, nil }
n = int(input()) MOD = 10 ** 9 + 7 ans1 = 1 ans2 = 1 for i in range(1,n+1): ans1 *= i ans1 %= MOD for i in range(n-1): ans2 *= 2 ans2 %= MOD print((ans1 - ans2) % MOD)
The Adelphi University men’s lacrosse team takes the field to audio of a Donald Trump campaign speech. Barstool Sports posted a video of the team going onto the field with one player carrying the American flag. The following is the excerpt from a Trump speech used in the audio along with music building up in the background: In all of our cities and in all of our towns, I make this promise: We will make America strong again. We will make America proud again. We will make America safe again. And we will make America great again! God bless you and good night. I love you! “The men’s lacrosse team has traditionally chosen patriotic music for its pre-game warm-up,” head coach Gordon Purdie said in a statement of his Division II team. “It was not intended to provoke or be taken as a political statement. We’re sorry if anyone was offended.” Follow Trent Baker on Twitter @MagnifiTrent
#include "../../cudaGraph/graph/graph.hpp" namespace cudaGraph { void graphToCSV(Graph &g) { std::vector<int> sources; std::vector<int> destinations; std::vector<int> weights; for (int i = 0; i < g.vertices.size(); i++) { int startIndex = g.startIndices[i]; int endIndex = g.endIndices[i]; for(int j = startIndex; j < endIndex; j++) { sources.push_back(g.vertices[i]); destinations.push_back(g.vertices[g.edges[j]]); if (g.weights.size() > 0) { weights.push_back(g.weights[j]); } } } std::ofstream myfile; myfile.open("output.csv"); for(int i = 0; i < sources.size(); i++) { if (g.weights.size() > 0) { myfile << std::to_string(sources[i]) << "," << std::to_string(destinations[i]) << "," << std::to_string(weights[i]) << "\n"; } else { myfile << std::to_string(sources[i]) << "," << std::to_string(destinations[i]) << "\n"; } } } }
<reponame>a313008430/fairygui-wx import { Stage, broadcastEvent } from "../core/Stage"; export enum ScaleMode { ConstantPixelSize, ScaleWithScreenSize, ConstantPhysicalSize } export enum ScreenMatchMode { MatchWidthOrHeight, MatchWidth, MatchHeight } export class UIContentScaler { public static get scaleFactor() { return _scaleFactor; } public static get scaleLevel() { return _scaleLevel; } public static scaleWithScreenSize(designResolutionX: number, designResolutionY: number, screenMatchMode?: ScreenMatchMode) { _designResolutionX = designResolutionX; _designResolutionY = designResolutionY; _scaleMode = ScaleMode.ScaleWithScreenSize; _screenMatchMode = screenMatchMode || ScreenMatchMode.MatchWidthOrHeight; refresh(); } public static setConstant(constantScaleFactor?: number) { _scaleMode = ScaleMode.ConstantPixelSize; _constantScaleFactor = constantScaleFactor || 1; refresh(); } } var _scaleMode: ScaleMode = ScaleMode.ConstantPixelSize; var _screenMatchMode: ScreenMatchMode; var _designResolutionX: number = 1136; var _designResolutionY: number = 640; // var _fallbackScreenDPI: number; // var _defaultSpriteDPI: number; var _constantScaleFactor: number = 1; var _ignoreOrientation: boolean; var _scaleFactor: number = 1; var _scaleLevel: number = 0; Stage.eventDispatcher.on("size_changed", refresh); function refresh() { let screenWidth: number = Stage.width; let screenHeight: number = Stage.height; if (_scaleMode == ScaleMode.ScaleWithScreenSize) { if (_designResolutionX == 0 || _designResolutionY == 0) return; let dx = _designResolutionX; let dy = _designResolutionY; if (!_ignoreOrientation && (screenWidth > screenHeight && dx < dy || screenWidth < screenHeight && dx > dy)) { //scale should not change when orientation change let tmp = dx; dx = dy; dy = tmp; } if (_screenMatchMode == ScreenMatchMode.MatchWidthOrHeight) { let s1 = screenWidth / dx; let s2 = screenHeight / dy; _scaleFactor = Math.min(s1, s2); } else if (_screenMatchMode == ScreenMatchMode.MatchWidth) _scaleFactor = screenWidth / dx; else _scaleFactor = screenHeight / dy; } else if (_scaleMode == ScaleMode.ConstantPhysicalSize) { // let dpi = Screen.dpi; // if (dpi == 0) // dpi = _fallbackScreenDPI; // if (dpi == 0) // dpi = 96; // _scaleFactor = dpi / (defaultSpriteDPI == 0 ? 96 : defaultSpriteDPI); } else _scaleFactor = _constantScaleFactor; if (_scaleFactor > 10) _scaleFactor = 10; if (_scaleFactor > 3) _scaleLevel = 3; //x4 else if (_scaleFactor > 2) _scaleLevel = 2; //x3 else if (_scaleFactor > 1) _scaleLevel = 1; //x2 else _scaleLevel = 0; broadcastEvent(Stage.scene, "content_scale_factor_changed"); }
import torch.optim.lr_scheduler as lr_scheduler import utils.saver class ReduceLROnPlateau(lr_scheduler.ReduceLROnPlateau, utils.saver.Saver): name = "scheduler-ReduceLROnPlateau" def __init__(self, optimizer, mode="min", factor=0.1, patience=10, min_lr=1e-8, threshold=1e-6, threshold_mode="abs", cooldown=0): lr_scheduler.ReduceLROnPlateau.__init__(self, optimizer=optimizer, mode=mode, factor=factor, patience=patience, threshold=threshold, threshold_mode=threshold_mode, cooldown=cooldown, min_lr=min_lr, verbose=False, eps=1e-12) utils.saver.Saver.__init__(self)
def _make_request(session, resource, json_data=None, stream=False, use_http_get=False): url = session.get_url(resource) if use_http_get: response = _requests_session.get( url, verify=session.verify_ssl_certs, stream=stream ) else: if json_data is None: json_data = {} json_data[CoordConsts.SVC_KEY_VERSION] = pybatfish.__version__ multipart_data = MultipartEncoder(json_data) headers = {"Content-Type": multipart_data.content_type} response = _requests_session.post( url, data=multipart_data, verify=session.verify_ssl_certs, stream=stream, headers=headers, ) response.raise_for_status() return response
Rumor: Not A Hero is Resident Evil 7’s True Ending, Includes Real Final Boss Man, today has just been awesome for RE news. Resident Evil 7‘s Gold Edition, along with the final release date of the long anticipated DLC chapters Not A Hero and End of Zoe, were finally announced today with some minor details, but thanks to NeoGaf user DuskGolem (who originally leaked Not A Hero‘s delay days before Capcom confirmed it), we might have a few more. Despite Golem’s pedigree with these leaks, we’ll still count this as being only a rumor, but it sounds like it could potentially be amazing if it’s true. Fans were somewhat disappointed with Resident Evil 7‘s final showdown, which was admittedly pretty lackluster (Spoilers: facing Ethan off against the final iteration of the game’s main monster, which only takes a handful of bullets to put down), and it looks like that may have been intentional. According to DuskGolem, Not A Hero will feature Resident Evil 7′s true ending, and true final boss, meaning that the reason Not A Hero is free is because it’s actually a major part of the story and not just some extra stuff like the other paid DLCs. It takes place literally immediately after the RE7 ending, it’s basically RE7’s “true end” and has a proper final boss and everything. It has a decent amount of new stuff, you go to some new areas as well as expansions on some of the final areas in the game, there’s some new types of Molded not in the main game, and that’s probably all I should say. It’s more action-focused, but that’s not to say there’s no horror. I don’t know if this is in the revised version anymore, but the original one you could punch enemies. Some of this Golem leaked before, like the inclusion of new Molded types, and the ability to punch enemies (which looks like it’s staying in the final build (one of the screenshots showcases Chris giving a Molded one of his patented knuckle sandwiches). I can’t wait; this news has me more excited than I thought I’d be for Not A Hero, considering how long its been in limbo. As for End of Zoe, the final paid DLC, Golem only reiterates that someone from Kojima Productions’ cancelled P.T. directed it, and that it takes place in an entirely new area, which is actually a first for the game’s DLC. Every other piece thus far has, at best, takes place in a modified area from the main game. I would like to throw in my own two cents in, though. The trailer that released today shows a few brief moments from this content, mainly of Zoe’s hand twitching inside her mold encasing, resting at a swampy beach… which is interesting. Spoilers, obviously, but the only way she ends up like this is if Ethan chooses her over his wife, Mia, towards the game’s conclusion. As the pair make their escape, Zoe is overtaken by the mold, and the boat is capsized. This wouldn’t be remarkable, outside of what it means for the game’s ending. See, if we choose to save Zoe over Mia, we are treated to what I can only call the “bad” ending to Resident Evil 7. Mia is killed and Ethan makes piece with her passing as the Umbrella Corporation flies him to safety. This is a pretty big deal, seeing as it not only means that Ethan escaped alone, but also that it’s the first time the series has gone with the “Bad” ending of a game as canon. In fact, for the most part, RE‘s canon endings (when there were multiple endings) are often a combination of events, rather than being one specific ending that we can point to. RE1‘s true ending is a combination of the best possible endings for both Chris and Jill; RE2 is generally accepted to be Claire A/Leon B, although, certain elements seem to cross over. RE3 actually has no canon ending, outside of knowing that Barry rescues Jill and Carlos — Nicholi’s fate is deliberately left up to the fans. It’s interesting to think about at least, until we finally get to play on December 12th. I might be totally wrong, and the DLC might not even directly acknowledge which ending it’s built off of, considering Mia manages to wake up aboard the crashed oil tanker regardless of who you choose. We’ll keep you updated as more information is made available. [Source]
/*! * Copyright 2021 Cognite AS */ import { callActionWithIndicesAsync } from './callActionWithIndicesAsync'; describe('test callActionWithIndicesAsync', () => { it('calls an action with specified indices range', async () => { const start = 10; const end = 100000; const timesToCall = end - start + 1; const action = jest.fn(); await callActionWithIndicesAsync(start, end, action); expect(action).toBeCalledTimes(timesToCall); expect(action).toHaveBeenNthCalledWith(1, start); expect(action).toHaveBeenNthCalledWith(1 + 1234, start + 1234); expect(action).toHaveBeenNthCalledWith(timesToCall, end); }); });
<gh_stars>1-10 /* Copyright (c) 2013 OpenPlans. All rights reserved. * This code is licensed under the BSD New License, available at the root * application directory. */ package org.geogit.storage; import org.geogit.api.ObjectId; import com.google.common.annotations.Beta; import com.google.common.base.Optional; import com.google.common.collect.ImmutableList; @Beta public interface GraphDatabase { public static final String SPARSE_FLAG = "sparse"; /** * Initializes/opens the databse. It's safe to call this method multiple times, and only the * first call shall take effect. */ public void open(); /** * @return true if the database is open, false otherwise */ public boolean isOpen(); /** * Closes the database. */ public void close(); /** * Determines if the given commit exists in the graph database. * * @param commitId the commit id to search for * @return true if the commit exists, false otherwise */ public boolean exists(final ObjectId commitId); /** * Retrieves all of the parents for the given commit * * @param commitid * @return * @throws IllegalArgumentException */ public ImmutableList<ObjectId> getParents(ObjectId commitId) throws IllegalArgumentException; public ImmutableList<ObjectId> getChildren(ObjectId commitId) throws IllegalArgumentException; /** * Adds a commit to the database with the given parents. If a commit with the same id already * exists, it will not be inserted. * * @param commitId the commit id to insert * @param parentIds the commit ids of the commit's parents * @return true if the commit id was inserted, false otherwise */ public boolean put(final ObjectId commitId, ImmutableList<ObjectId> parentIds); /** * Maps a commit to another original commit. This is used in sparse repositories. * * @param mapped the id of the mapped commit * @param original the commit to map to */ public void map(final ObjectId mapped, final ObjectId original); /** * Gets the id of the commit that this commit is mapped to. * * @param commitId the commit to find the mapping of * @return the mapped commit id */ public ObjectId getMapping(final ObjectId commitId); /** * Gets the number of ancestors of the commit until it reaches one with no parents, for example * the root or an orphaned commit. * * @param commitId the commit id to start from * @return the depth of the commit */ public int getDepth(final ObjectId commitId); /** * Finds the lowest common ancestor of two commits. * * @param leftId the commit id of the left commit * @param rightId the commit id of the right commit * @return An {@link Optional} of the lowest common ancestor of the two commits, or * {@link Optional#absent()} if a common ancestor could not be found. */ public Optional<ObjectId> findLowestCommonAncestor(ObjectId leftId, ObjectId rightId); /** * Set a property on the provided commit node. * * @param commitId the id of the commit */ public void setProperty(ObjectId commitId, String propertyName, String propertyValue); /** * Determines if there are any sparse commits between the start commit and the end commit, not * including the end commit. * * @param start the start commit * @param end the end commit * @return true if there are any sparse commits between start and end */ public boolean isSparsePath(ObjectId start, ObjectId end); }
<reponame>water-gulugulu/avw package request import ( "github.com/dgrijalva/jwt-go" uuid "github.com/satori/go.uuid" ) // Custom claims structure type CustomClaims struct { UUID uuid.UUID // 唯一标示 ID uint // 用户ID Username string // 用户名 NickName string // 用户名 AuthorityId string // 权限ID BufferTime int64 jwt.StandardClaims }
<gh_stars>10-100 // TODO: Use WTF-8 rather than UTF-16 #![allow(clippy::type_complexity)] mod local; use async_trait::async_trait; use futures::{future::LocalBoxFuture, ready}; use pin_project::pin_project; use std::{ convert::TryFrom, error::Error, ffi, fmt, future::Future, io, pin::Pin, sync::Arc, task::{Context, Poll} }; use widestring::U16String; use crate::pool::ProcessSend; pub use local::LocalFile; const PAGE_SIZE: usize = 10 * 1024 * 1024; // `Reader` reads this many bytes at a time #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] pub struct OsString { buf: U16String, } impl OsString { pub fn new() -> Self { Self { buf: U16String::new(), } } pub fn to_string_lossy(&self) -> String { self.buf.to_string_lossy() } pub fn display<'a>(&'a self) -> impl fmt::Display + 'a { struct Display<'a>(&'a OsString); impl<'a> fmt::Display for Display<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.to_string_lossy().fmt(f) } } Display(self) } } impl From<Vec<u8>> for OsString { fn from(from: Vec<u8>) -> Self { Self { buf: String::from_utf8(from) .expect("Not yet imlemented: Handling non-UTF-8") .into(), } // TODO } } impl From<String> for OsString { fn from(from: String) -> Self { Self { buf: from.into() } } } impl From<&str> for OsString { fn from(from: &str) -> Self { Self { buf: U16String::from_str(from), } } } impl From<ffi::OsString> for OsString { fn from(from: ffi::OsString) -> Self { Self { buf: U16String::from_os_str(&from), } } } impl From<&ffi::OsStr> for OsString { fn from(from: &ffi::OsStr) -> Self { Self { buf: U16String::from_os_str(from), } } } pub struct InvalidOsString; impl TryFrom<OsString> for ffi::OsString { type Error = InvalidOsString; fn try_from(from: OsString) -> Result<Self, Self::Error> { Ok(from.buf.to_os_string()) // TODO: this is lossy but it should error } } impl PartialEq<Vec<u8>> for OsString { fn eq(&self, other: &Vec<u8>) -> bool { self == &OsString::from(other.clone()) } } impl PartialEq<String> for OsString { fn eq(&self, other: &String) -> bool { self == &OsString::from(other.clone()) } } impl PartialEq<str> for OsString { fn eq(&self, other: &str) -> bool { self == &OsString::from(other) } } impl PartialEq<ffi::OsString> for OsString { fn eq(&self, other: &ffi::OsString) -> bool { self == &OsString::from(other.clone()) } } impl PartialEq<ffi::OsStr> for OsString { fn eq(&self, other: &ffi::OsStr) -> bool { self == &OsString::from(other) } } impl fmt::Debug for OsString { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.display()) } } #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] pub struct PathBuf { components: Vec<OsString>, file_name: Option<OsString>, } impl PathBuf { pub fn new() -> Self { Self { components: Vec::new(), file_name: None, } } pub fn push<S>(&mut self, component: S) where S: Into<OsString>, { assert!(self.file_name.is_none()); self.components.push(component.into()); } pub fn pop(&mut self) -> Option<OsString> { assert!(self.file_name.is_none()); self.components.pop() } pub fn last(&self) -> Option<&OsString> { assert!(self.file_name.is_none()); self.components.last() } pub fn set_file_name<S>(&mut self, file_name: Option<S>) where S: Into<OsString>, { self.file_name = file_name.map(Into::into); } pub fn is_file(&self) -> bool { self.file_name.is_some() } pub fn file_name(&self) -> Option<&OsString> { self.file_name.as_ref() } pub fn depth(&self) -> usize { self.components.len() } pub fn iter<'a>(&'a self) -> impl Iterator<Item = &OsString> + 'a { self.components.iter() } pub fn display<'a>(&'a self) -> impl fmt::Display + 'a { struct Display<'a>(&'a PathBuf); impl<'a> fmt::Display for Display<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut res: fmt::Result = self .0 .iter() .map(|component| write!(f, "{}/", component.to_string_lossy())) .collect(); if let Some(file_name) = self.0.file_name() { res = res.and_then(|()| write!(f, "{}", file_name.to_string_lossy())); } res } } Display(self) } } impl fmt::Debug for PathBuf { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.display()) } } #[async_trait(?Send)] pub trait Directory: File { async fn partitions_filter<F>( self, f: F, ) -> Result<Vec<<Self as File>::Partition>, <Self as File>::Error> where F: FnMut(&PathBuf) -> bool; } #[async_trait(?Send)] pub trait File { type Partition: Partition; type Error: Error + Clone + PartialEq + 'static; async fn partitions(self) -> Result<Vec<Self::Partition>, Self::Error>; } #[async_trait(?Send)] pub trait Partition: Clone + fmt::Debug + ProcessSend + 'static { type Page: Page; type Error: Error + Clone + PartialEq + ProcessSend + 'static; async fn pages(self) -> Result<Vec<Self::Page>, Self::Error>; } #[allow(clippy::len_without_is_empty)] pub trait Page { type Error: Error + Clone + PartialEq + Into<io::Error> + ProcessSend + 'static; fn len(&self) -> LocalBoxFuture<'static, Result<u64, Self::Error>>; fn read( &self, offset: u64, len: usize, ) -> LocalBoxFuture<'static, Result<Box<[u8]>, Self::Error>>; fn write( &self, offset: u64, buf: Box<[u8]>, ) -> LocalBoxFuture<'static, Result<(), Self::Error>>; fn reader(self) -> Reader<Self> where Self: Sized, { Reader::new(self) } } impl<T: ?Sized> Page for &T where T: Page, { type Error = T::Error; fn len(&self) -> LocalBoxFuture<'static, Result<u64, Self::Error>> { (**self).len() } fn read( &self, offset: u64, len: usize, ) -> LocalBoxFuture<'static, Result<Box<[u8]>, Self::Error>> { (**self).read(offset, len) } fn write( &self, offset: u64, buf: Box<[u8]>, ) -> LocalBoxFuture<'static, Result<(), Self::Error>> { (**self).write(offset, buf) } } impl<T: ?Sized> Page for Arc<T> where T: Page, { type Error = T::Error; fn len(&self) -> LocalBoxFuture<'static, Result<u64, Self::Error>> { (**self).len() } fn read( &self, offset: u64, len: usize, ) -> LocalBoxFuture<'static, Result<Box<[u8]>, Self::Error>> { (**self).read(offset, len) } fn write( &self, offset: u64, buf: Box<[u8]>, ) -> LocalBoxFuture<'static, Result<(), Self::Error>> { (**self).write(offset, buf) } } #[pin_project] pub struct Reader<P> where P: Page, { #[pin] page: P, #[pin] pending: Option<LocalBoxFuture<'static, Result<Box<[u8]>, P::Error>>>, offset: u64, } #[allow(clippy::len_without_is_empty)] impl<P> Reader<P> where P: Page, { fn new(page: P) -> Self { Self { page, pending: None, offset: 0, } } } impl<P> futures::io::AsyncRead for Reader<P> where P: Page, { fn poll_read( self: Pin<&mut Self>, cx: &mut Context, buf: &mut [u8], ) -> Poll<io::Result<usize>> { let mut self_ = self.project(); if self_.pending.is_none() { let start = *self_.offset; let len = buf.len(); let len = len.min(PAGE_SIZE); let pending = self_.page.read(start, len); *self_.pending = Some(pending); } let ret = ready!(self_.pending.as_mut().as_pin_mut().unwrap().poll(cx)); *self_.pending = None; let ret = ret .map(|buf_| { buf[..buf_.len()].copy_from_slice(&buf_); buf_.len() }) .map_err(Into::into); *self_.offset += u64::try_from(ret.as_ref().ok().cloned().unwrap_or(0)).unwrap(); Poll::Ready(ret) } } // impl<P> io::Seek for Reader<P> // where // P: Page, // { // fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> { // let len = self.page.len(); // self.offset = match pos { // io::SeekFrom::Start(n) => Some(n), // io::SeekFrom::End(n) if n >= 0 => len.checked_add(u64::try_from(n).unwrap()), // io::SeekFrom::End(n) => { // let n = u64::try_from(-(n + 1)).unwrap() + 1; // len.checked_sub(n) // } // io::SeekFrom::Current(n) if n >= 0 => { // self.offset.checked_add(u64::try_from(n).unwrap()) // } // io::SeekFrom::Current(n) => { // let n = u64::try_from(-(n + 1)).unwrap() + 1; // self.offset.checked_sub(n) // } // } // .ok_or_else(|| { // io::Error::new( // io::ErrorKind::InvalidInput, // "invalid seek to a negative or overflowing position", // ) // })?; // Ok(self.offset) // } // }
<gh_stars>10-100 from SpheralCompiledPackages import PhysicalConstants #------------------------------------------------------------------------------- # MKS units. #------------------------------------------------------------------------------- class MKS(PhysicalConstants): def __init__(self): PhysicalConstants.__init__(self, 1.0, # Unit length (m) 1.0, # Unit mass (kg) 1.0, # Unit time (sec) 1.0, # Unit temp (kelvin) 1.0) # Unit charge (coulomb) return #------------------------------------------------------------------------------- # CGS units. #------------------------------------------------------------------------------- class CGS(PhysicalConstants): def __init__(self): PhysicalConstants.__init__(self, 0.01, # Unit length (m) 0.001, # Unit mass (kg) 1.0, # Unit time (sec) 1.0, # Unit temp (kelvin) 1.0) # Unit charge (coulomb) return #------------------------------------------------------------------------------- # Cosmological units (Mpc, Mmsun, Myr) #------------------------------------------------------------------------------- class Cosmological(PhysicalConstants): def __init__(self): PhysicalConstants.__init__(self, 3.08567757e22, # Unit length (m) 1.9891e36, # Unit mass (kg) 3.155674e19, # Unit time (sec) 1.0, # Unit temp (kelvin) 1.0) # Unit charge (coulomb) return #------------------------------------------------------------------------------- # Solar units. (AU, Msun, yr) #------------------------------------------------------------------------------- class Solar(PhysicalConstants): def __init__(self): PhysicalConstants.__init__(self, 149597870700.0, # Unit length (m) 1.98892e30, # Unit mass (kg) 365.25*3600*24, # Unit time (sec) 1.0, # Unit temp (kelvin) 1.0) # Unit charge (coulomb) return
/* EXAMPLE osmium_tiles Convert WGS84 longitude and latitude to Mercator coordinates and tile coordinates. DEMONSTRATES USE OF: * the Location and Coordinates classes * the Mercator projection function * the Tile class LICENSE The code in this example file is released into the Public Domain. */ #include <cstdlib> // for std::atoi, std::atof #include <iostream> // for std::cout, std::cerr // The Location contains a longitude and latitude and is usually used inside // a node to store its location in the world. #include <osmium/osm/location.hpp> // Needed for the Mercator projection function. Osmium supports the Mercator // projection out of the box, or pretty much any projection using the Proj.4 // library (with the osmium::geom::Projection class). #include <osmium/geom/mercator_projection.hpp> // The Tile class handles tile coordinates and zoom levels. #include <osmium/geom/tile.hpp> int main(int argc, char* argv[]) { if (argc != 4) { std::cerr << "Usage: " << argv[0] << " ZOOM LON LAT\n"; return 1; } const int zoom = std::atoi(argv[1]); // NOLINT(cert-err34-c) if (zoom < 0 || zoom > 30) { std::cerr << "ERROR: Zoom must be between 0 and 30\n"; return 1; } osmium::Location location{}; try { location.set_lon(argv[2]); location.set_lat(argv[3]); } catch (const osmium::invalid_location&) { std::cerr << "ERROR: Location is invalid\n"; return 1; } // A location can store some invalid locations, ie locations outside the // -180 to 180 and -90 to 90 degree range. This function checks for that. if (!location.valid()) { std::cerr << "ERROR: Location is invalid\n"; return 1; } std::cout << "WGS84: lon=" << location.lon() << " lat=" << location.lat() << "\n"; // Project the coordinates using a helper function. You can also use the // osmium::geom::MercatorProjection class. const osmium::geom::Coordinates c = osmium::geom::lonlat_to_mercator(location); std::cout << "Mercator: x=" << c.x << " y=" << c.y << "\n"; // Create a tile at this location. This will also internally use the // Mercator projection and then calculate the tile coordinates. const osmium::geom::Tile tile{uint32_t(zoom), location}; std::cout << "Tile: zoom=" << tile.z << " x=" << tile.x << " y=" << tile.y << "\n"; }
// Print all entries of 2D DMDA global vector to stdout, including which // processor each entry lives on, and the corresponding subscripting // indices PetscErrorCode printf_DM_2d(const Vec gvec, const DM dm) { PetscErrorCode ierr = 0; #if VERBOSE > 2 PetscPrintf(PETSC_COMM_WORLD,"Starting main::printf_DM_2d in fault.cpp.\n"); #endif PetscMPIInt rank; MPI_Comm_rank(PETSC_COMM_WORLD,&rank); PetscInt i,j,mStart,m,nStart,n; DMDAGetCorners(dm,&mStart,&nStart,0,&m,&n,0); PetscScalar **gxArr; DMDAVecGetArray(dm,gvec,&gxArr); for (j=nStart;j<nStart+n;j++) { for (i=mStart;i<mStart+m;i++) { PetscPrintf(PETSC_COMM_SELF,"%i: gxArr[%i][%i] = %g\n", rank,j,i,gxArr[j][i]); } } DMDAVecRestoreArray(dm,gvec,&gxArr); #if VERBOSE > 2 PetscPrintf(PETSC_COMM_WORLD,"Ending main::printf_DM_2d in fault.cpp.\n"); #endif return ierr; }
<reponame>Unity-Technologies/drone-pose-estimation-navigation #!/usr/bin/env python3 # -*- coding: utf-8 -*- import numpy as np import torch import torchvision import os import sys sys.path.append('../model/pose_estimation') from PIL import Image, ImageOps from model import PoseEstimationNetwork from util import convert_image_to_tensor def preprocess_image(path_image, device): """ Pre-processing on the image Args: path_image (str): path to the image that will be the input of the model device (torch.device): device on which is the script is running """ image = convert_image_to_tensor(path_image, 224, 224) image = [image] image = list(img.to(device) for img in image) return image global model model = None def run_model_main(image_data, image_width, image_height, model_file_name): global model device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") if model is None: checkpoint = torch.load(model_file_name, map_location=device) model = PoseEstimationNetwork(scale_translation=1) model.load_state_dict(checkpoint["model"]) model = model.to(device) model.eval() image_path = _save_image(image_data) image = preprocess_image(image_path, device) output_translation_drone, output_translation_cube = model(torch.stack(image).reshape(-1, 3, 224, 224)) output_translation_drone = output_translation_drone.to(device) output_translation_cube = output_translation_cube.to(device) output_translation_drone = output_translation_drone.detach().cpu().numpy() output_translation_cube = output_translation_cube.detach().cpu().numpy() return output_translation_drone, output_translation_cube count = 0 PACKAGE_LOCATION = "." def _save_image(image_data): """ convert raw image data to a png and save it Args: req (PoseEstimationService msg): service request that contains the image data Returns: image_path (str): location of saved png file """ global count count += 1 image = Image.frombytes('RGBA', (640,480), image_data) image = ImageOps.flip(image) image_name = "Input" + str(count) + ".png" if not os.path.exists(PACKAGE_LOCATION + "/images/"): os.makedirs(PACKAGE_LOCATION + "/images/") image_path = PACKAGE_LOCATION + "/images/" + image_name image.save(image_path) return image_path
/* tslint:disable */ /* eslint-disable */ // @generated // This file was automatically generated and should not be edited. import { BuildBuildStatus } from "./globalTypes"; // ==================================================== // GraphQL query operation: Builds // ==================================================== export interface Builds_builds_edges_node_app_customer { __typename: "UserNode"; /** * The ID of the object. */ id: string; /** * Requerido. 150 carácteres como máximo. Únicamente letras, dígitos y @/./+/-/_ */ username: string; } export interface Builds_builds_edges_node_app { __typename: "StoreAppType"; /** * The ID of the object. */ id: string; name: string; isActive: boolean | null; customer: Builds_builds_edges_node_app_customer | null; } export interface Builds_builds_edges_node { __typename: "BuildType"; /** * The ID of the object. */ id: string; buildId: any; date: any; buildStatus: BuildBuildStatus; app: Builds_builds_edges_node_app | null; } export interface Builds_builds_edges { __typename: "BuildTypeEdge"; /** * The item at the end of the edge */ node: Builds_builds_edges_node | null; } export interface Builds_builds { __typename: "BuildTypeConnection"; /** * Contains the nodes in this connection. */ edges: (Builds_builds_edges | null)[]; } export interface Builds { builds: Builds_builds | null; } export interface BuildsVariables { first?: number | null; last?: number | null; }
Technical Requirements Analysis of Power Grid Development for High Proportion of Renewable Energy At present, China’s energy and power system is in a critical period of upgrading and transformation, and new energy power generation will occupy an increasingly important position in the energy field. In the future, new energy will be connected to the power grid on a large scale, which will bring challenges to the safe, stable and efficient operation of the power grid. Therefore, it is necessary to cultivate corresponding technologies to support the future development of the power grid. This paper starts with an analysis of China’s energy transformation vision, clarifies the many challenges that the power grid will face, and then proposes five key technical requirements for the future development of the power grid. Introduction At present, the global response to climate change is accelerating, resources and environmental constraints are continuously strengthened, energy development faces new situations and new challenges, the world's energy landscape is deeply adjusted, and resources development and utilization methods and energy utilization levels are undergoing profound changes. As an important foundation to support the modern economic system, the upgrading and transformation of the energy and power system will directly affect the sustainable development of China's economy and society and China's competitiveness in the new round of global industrial transformation. Therefore, it is imperative to build a new generation of energy systems that meet the requirements of China's energy transformation to achieve a clean, lowcarbon, safe and efficient energy supply. In the realization of China's energy transformation, the power system will play an irreplaceable role. Therefore, it is necessary to clarify the future national power structure and development layout of China's energy transformation pattern and grid development challenges, and then put forward the key technology requirements for grid development oriented to high proportion of new energy access. China's energy transformation prospects According to the research on the development prospects of various power sources, in 2020, 2025 and 2035, the installed capacity of power sources in China reached 2.11 billion, 2.74 billion and 3.88 billion kilowatts, of which non-fossil energy accounted for 41% in 2017 and increased to 43%, 49% and 60%. The installed capacity of new energy such as wind power and solar power will increase rapidly, and thermal power will still maintain a certain scale, but it will gradually shift from the main body of electricity supply to the main body of power supply. The blueprint for the future development of the new generation of power system and the key technological approaches are clearly oriented, that is, to increase the proportion of new energy, to strengthen the capacity of energy allocation and transmission, and to optimize the way of energy utilization. The layout of future power development and power flow are shown in Figure 1. In the new development situation, in order to meet the resource endowment conditions and economic and social needs of our country, the future power grid will be integrated with distributed power, energy storage, integrated energy systems, advanced information and communication technologies, and further extended into flexible and efficient wisdom. The energy network will provide safe, reliable, efficient and diversified energy supply, and provide strong support for building China's modern economic system. Power grid development challenge Looking into the future, the profound changes in the way energy and energy production and consumption bring a series of major challenges to the power grid, mainly reflected in the following four aspects: (1) High proportion of new energy, power grid balance adjustment capability needs to be enhanced China's new energy output and load demand have reverse distribution characteristics in both time and space dimensions, and the proportion of new energy access is greatly increased, which will bring severe challenges to the balance adjustment capability of the power grid. First, the issue of new energy power balance is prominent. Second, the system's dynamic adjustment capability needs to be strengthened. (2) The shape of the power grid changes, and the safe operation presents new features With the rapid development of new energy and direct current transmission, power electronics technology is widely used, and the shape and operation characteristics of the power grid have undergone profound changes. First, new problems in the safe operation of the system are highlighted, and the difficulty of safe operation is increased. Second, the system operation characteristics change, and the flexible control requirements are further improved. (3) The power reform is deepening, and the institutional mechanisms need to be improved In the past two years, the cost of new energy development and utilization has continued to decline. The National Development and Reform Commission and the National Energy Administration have also introduced a number of policies in promoting new energy consumption and promoting affordable Internet access. However, compared with conventional power sources, the cost of new energy power generation is still high. Our country's renewable energy subsidies have experienced funding gaps and the effective price incentive mechanism for thermal power to participate in peak shaving, energy storage technology development, and power load participation system adjustment has not yet been formed, and the accuracy of policy and market design needs to be improved. In short, in the context of energy change, the opportunities and challenges of the grid coexist. From the perspective of the external environment, the in-depth advancement of energy transformation poses new challenges to the functional form and operation mode of the power grid; in terms of internal factors, high proportion of new energy brings greater risks and impacts to the power grid. As a result, higher requirements are placed on the technical requirements for the development of the power grid. Power grid development technology requirements Focusing on the construction of a clean, low-carbon, safe and efficient energy system, based on China's energy development status, it is necessary to focus on the following five major fields of major technical areas that have a global impact on grid development: Efficient, low-cost, long-life energy storage technology Energy storage can fundamentally change the operation mode of the traditional grid "real-time supply and demand balance" and enhance the flexibility of the power system, which is an important support for the power grid. China's energy storage development is still in the cultivation stage. The engineering application still takes the transition from experimental demonstration to commercial operation as the main goal. High safety, long life, high efficiency and low cost are the overall goals of energy storage technology development. It is expected that the energy storage will be initially commercialized in 2035, and the energy storage technology with independent intellectual property rights will be fully applied in the field of safe operation of the power grid to improve the compatibility and acceptance of the power grid. At present, in order to meet the development needs of the new generation of power system, the key problems to be solved urgently in the field of energy storage are to enhance and improve the energy density, power density, response time, energy storage efficiency, cycle performance, economy and reliability of energy storage devices. Technological research is needed in battery materials, manufacturing process, system integration and operation and maintenance. High reliability and low loss power electronics The wide application of high-performance power electronics technology can significantly improve and realize the controllability of the generation, transmission, distribution and use of power systems, and is an important guarantee and basic condition for realizing the safe and stable operation of the power grid. At present, China still has a large gap between the manufacturing technology of power electronic devices and the international advanced level. In terms of wide bandgap power electronic devices, there are many problems in material and process, such as complex manufacturing and many difficulties in the new DC transmission and DC networking technology. In our country, the device design, manufacturing and test technology still need to be strengthened, and the key technologies need to be developed independently. High reliability and low loss are the overall goals of future development of power electronics technology. It is necessary to further increase investment in research and development, and promote the autonomy, controllability and safety of the devices, thereby improving the efficiency and reliability of power transmission and increasing the capacity of the power grid. In the aspect of wide bandgap power electronic devices, the application research of silicon carbide (SiC) devices and their applications in electrical devices such as switching devices have been promoted, and basic theoretical research on Gallium nitride (GaN) materials has been carried out, and related switching devices have been developed. In the new DC transmission and DC grid technologies, the research focuses on the research of transmission technology such as capacitive commutation HVDC transmission technology, compact converter station technology and multi-terminal HVDC transmission technology, and promotes the realization of multi-gigawatt-level voltage source commutation HVDC transmission and its demonstration engineering application in large-scale power grid access. High strength insulation technology and superconducting power transmission technology The new transmission and distribution electric line technology is the basis for the development of power grids at all levels in China, and is an important way and a cornerstone for ensuring the reliability of power supply and achieving low-carbon power transmission. At present, the development of highstrength insulation technology still needs to solve core key technologies such as gas insulated pipeline technology and high-strength insulation materials for cables. The development of high strength insulation technology has significantly improved the performance of power transmission and transformation equipment, and has important practical significance for ensuring long-term safe and stable operation of the power grid. Superconducting power transmission is the use of a superconductor in a superconducting state to have a high-density unimpeded current carrying capacity to realize large-capacity power transmission. The development of superconducting power transmission technology has been in the critical stage of transition from experimental demonstration to practical engineering application, and further breakthroughs are still needed in cable performance, core technology and economy. The development and application of superconducting power transmission technology will likely subvert the traditional transmission line. In terms of high-strength insulation technology, the overall goal of future development is to improve transmission capacity, increase insulation strength, reduce loss, and achieve environmental friendliness and intelligence. By mastering the key technologies of high-strength insulation materials and mastering the core preparation process, the localization of advanced materials insulation materials can be realized in the future. In terms of superconducting power transmission technology, the overall goal of future development is high capacity, high efficiency, high reliability, and good compatibility with conventional power systems. By 2035, superconducting materials are expected to achieve mature production technology and lower prices. Superconducting power transmission has the basis of economic competition with conventional technology/equipment, and can realize multiple functions and operating modes. Operational stability analysis and control technology With the rapid development of new energy and direct current transmission, the widespread application of power electronics technology, profound changes in grid operation characteristics and mechanisms, the development of safety and stability analysis and control technology will provide a solid foundation and core guarantee for the grid. In terms of operational stability analysis, key technologies include: future power grid operation mechanism analysis technology, key technologies for large power grid simulation, key modeling techniques, and trend analysis of large-scale new energy grid-connected grid stability. In terms of operational stability control, key technologies include: future grid operation control, active and reactive power regulation technology, and large grid operation evaluation system construction technology. In the future, new energy will gradually become the main power source, and the grid operation will be in a more complex and ever-changing scenario. The responsibility and pressure for comprehensive improvement of grid security and operational efficiency are enormous. Focusing on meeting the future morphological changes and operational requirements of the power grid, the development of a new generation of power system operational stability analysis and control methods will be more actively adapted to the development trend of larger-scale power response, faster multi-control and broad participation, and respond to the great challenges of traditional basic theory. In the direction of grid The application of artificial intelligence to power systems in China include expert system and artificial neural network, which are mainly applied to power grid security, stability and operation, power asset management and intelligent operation and maintenance. But overall, it is still in the research and exploration stage, and there are few successful engineering applications. In the energy Internet, the deep integration of energy and information is the overall goal of technology development. It is expected that the commercial operation of the energy Internet will be initially realized in 2035. In the new generation of artificial intelligence, artificial intelligence will gradually become a powerful measure to solve complex power grid problems and an effective tool to promote the evolution of power systems. In response to the above gaps and shortcomings, in order to fully support the role of energy Internet in the new generation of power system, it is necessary to strengthen and improve the multi-energy coordination and control, information physical integration, information opening and sharing, marketization level of energy internet, and it needs to carry out technical research on multi-energy complementarity, energy information integration analysis, market mechanism and so on.In the new generation of electric power artificial intelligence, the following technologies need to be studied in depth: smart sensor technology, artificial intelligence platform technology, cutting-edge machine learning technology and intelligent robot technology. All in all, the energy transformation requires comprehensive improvement of the various technical levels required for the development of the power grid, thereby improving the new energy access capability and safe and stable operation level of the power grid, vigorously promoting the efficient and flexible allocation of resources, and establishing intelligent, interactive and diversified energy supply service system. Conclusion China's energy structure has long been based on fossil energy, especially coal. The pressure of ecoenvironmental protection and pollution reduction is prominent. It is imperative to vigorously promote energy transformation and optimize energy structure. At present, China's energy and power system is in a critical period of upgrading and transformation, and new energy power generation will occupy an increasingly important position in the energy field. In the future, new energy will be connected to the power grid on a large scale, which will bring many challenges to the safe, stable and efficient operation of the power grid. Therefore, it is necessary to cultivate corresponding technologies to support the future development of the power grid. In the five key technical fields, efficient, low-cost, long-life energy storage technology focuses on enhancing the flexibility of the power grid; high reliability and low loss power electronics technology can significantly improve and realize the controllability of the transmission, transmission, distribution and use of power systems; high strength insulation technology and superconducting power transmission technology play an important role in ensuring reliable lowcarbon, high-capacity transmission of power supply; the operational stability analysis and control technology is mainly used to comprehensively improve the safety and operational efficiency of the power grid; the energy Internet and the new generation of artificial intelligence technology can realize the deep integration of energy and information, which is the key to the future development of the power grid. The five technologies will comprehensively improve the energy transformation capability, resource allocation capability and intelligent interaction capability of the power system.
import { Box, useColorModeValue } from '@chakra-ui/react'; import { useTranslation } from 'next-i18next'; import React from 'react'; const Footer = () => { const { t } = useTranslation(); return ( <Box py={14} alignItems='center' textAlign='center' color={useColorModeValue('black', 'grey')}> {new Date().getFullYear()} <NAME>. {t('footer.all_rights_reserved')} </Box> ); }; export default Footer;
/**************************************************************************** ePMC - an extensible probabilistic model checker Copyright (C) 2017 This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. *****************************************************************************/ package epmc.jani; import org.junit.BeforeClass; import org.junit.Test; import epmc.jani.model.UtilModelParser; import epmc.modelchecker.UtilModelChecker; import static epmc.ModelNamesPRISM.*; import static epmc.modelchecker.TestHelper.*; import java.util.LinkedHashSet; import java.util.Set; public final class ConvertTestPRISM { /** * Set up the tests. */ @BeforeClass public static void initialise() { prepare(); } @Test public void brpTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(BRP_MODEL) .putConstant("N", "16") .putConstant("MAX", "2") .setExploreAll() .run(); System.out.println(statistics); } @Test public void cellTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(CELL_MODEL) .putConstant("N", "11") .setExploreAll() .run(); System.out.println(statistics); } @Test public void clusterTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(CLUSTER_MODEL) .putConstant("N", "16") .putConstant("MAX", "8") // .setPrismFlatten(true) .setExploreAll() .run(); System.out.println(statistics); } @Test public void coinTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(String.format(COIN_MODEL, 4)) .putConstant("K", "4") .setExploreAll() .run(); System.out.println(statistics); } @Test public void csmaTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(String.format(CSMA_MODEL, 2, 6)) .setExploreAll() .run(); System.out.println(statistics); } @Test public void diceTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(DICE_MODEL) .setExploreAll() .run(); System.out.println(statistics); } @Test public void twoDiceTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(TWO_DICE_MODEL) .setExploreAll() .run(); System.out.println(UtilModelParser.prettyString(statistics.getJaniModel())); } @Test public void diningCryptTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(String.format(DINING_CRYPT_MODEL, 3)) .setExploreAll() .run(); System.out.println(statistics); } @Test public void embeddedTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(EMBEDDED_MODEL) .putConstant("MAX_COUNT", 4) .setExploreAll() .run(); System.out.println(statistics); } @Test public void firewireAbstTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(FIREWIRE_ABST_MODEL) .putConstant("delay", "1") .putConstant("fast", "0.1") .setExploreAll() // .setPrintJANI(true) .run(); System.out.println(statistics); } @Test public void firewireImplTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(FIREWIRE_IMPL_MODEL) .putConstant("delay", "5") .putConstant("fast", "0.4") .setExploreAll() .run(); System.out.println(statistics); } @Test public void fmsTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .putConstant("n", 3) .setModelName(FMS_MODEL) .setExploreAll() .run(); System.out.println(statistics); } @Test public void kanbanTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .putConstant("t", 2) .setModelName(KANBAN_MODEL) .setExploreAll() .run(); System.out.println(statistics); } @Test public void asyncLeaderTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(String.format(LEADER_ASYNC_MODEL, 3)) .setExploreAll() .run(); System.out.println(statistics); } @Test public void syncLeaderTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(String.format(LEADER_SYNC_MODEL, 3, 3)) .setExploreAll() .run(); System.out.println(statistics); } @Test public void knaclTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(KNACL_MODEL) .putConstant("N1", "5") .putConstant("N2", "5") .putConstant("N3", "5") .setExploreAll() .run(); System.out.println(statistics); } @Test public void naclTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(NACL_MODEL) .putConstant("N1", "10") .putConstant("N2", "10") .setExploreAll() .run(); System.out.println(statistics); } @Test public void mcTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(MC_MODEL) .putConstant("N1", "10") .putConstant("N2", "10") .setExploreAll() .run(); System.out.println(statistics); } @Test public void mutualTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(String.format(MUTUAL_MODEL, 4)) // .setExploreAll() .run(); Set<Object> nodeProperties = new LinkedHashSet<>(); nodeProperties.add(UtilModelChecker.parseExpression("p1=10")); nodeProperties.add(UtilModelChecker.parseExpression("p2=10")); nodeProperties.add(UtilModelChecker.parseExpression("p3=10")); nodeProperties.add(UtilModelChecker.parseExpression("p4=10")); // GraphExplicit graph = exploreToGraph(statistics.getJaniModel(), nodeProperties); // System.out.println(graph); } @Test public void peer2peerTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(String.format(PEER2PEER_MODEL, 4, 4)) .setExploreAll() .run(); System.out.println(statistics); } @Test public void philTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(String.format(PHIL_MODEL, 3)) .setExploreAll() .run(); System.out.println(statistics); } @Test public void philNoFairTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(String.format(PHIL_NOFAIR_MODEL, 3)) .setExploreAll() .run(); System.out.println(statistics); } @Test public void philLssTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(String.format(PHIL_LSS_MODEL, 3)) .putConstant("K", "3") .setExploreAll() .run(); System.out.println(statistics); System.out.println(UtilModelParser.prettyString(statistics.getJaniModel())); } @Test public void pollingTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(String.format(POLLING_MODEL, 3)) .setExploreAll() .run(); System.out.println(statistics); } @Test public void rabinTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(String.format(RABIN_MODEL, 3)) .setExploreAll() .run(); System.out.println(statistics); } @Test public void beauquierTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(String.format(BEAUQUIER_MODEL, 3)) .setExploreAll() .run(); System.out.println(statistics); } @Test public void hermanTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(String.format(HERMAN_MODEL, 3)) .setExploreAll() .run(); System.out.println(statistics); } @Test public void ijTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(String.format(IJ_MODEL, 3)) .setExploreAll() .run(); System.out.println(statistics); } @Test public void tandemTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(TANDEM_MODEL) .putConstant("c", "3") .setExploreAll() .run(); System.out.println(statistics); } @Test public void wlanTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(String.format(WLAN_MODEL, 1)) .putConstant("TRANS_TIME_MAX", "5") .setExploreAll() .run(); System.out.println(statistics); } @Test public void wlanCollideTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(String.format(WLAN_COLLIDE_MODEL, 1)) .putConstant("TRANS_TIME_MAX", "3") .putConstant("COL", "3") .setExploreAll() .run(); System.out.println(statistics); } @Test public void wlanTimeBoundTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(String.format(WLAN_TIME_BOUNDED_MODEL, 1)) .putConstant("TRANS_TIME_MAX", "5") .putConstant("DEADLINE", "4") .setExploreAll() .run(); System.out.println(statistics); } @Test public void zeroconfTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(ZEROCONF_MODEL) .putConstant("reset", "false") .putConstant("N", "3") .putConstant("K", "2") .putConstant("err", "0.2") .setExploreAll() .run(); System.out.println(statistics); } @Test public void zeroconfTimeBoundedTest() { ConvertTestStatistics statistics = new ConvertTestConfiguration() .setModelName(ZEROCONF_TIME_BOUNDED_MODEL) .putConstant("reset", "false") .putConstant("T", "5") .putConstant("N", "3") .putConstant("K", "2") .putConstant("err", "0.2") .setExploreAll() .run(); System.out.println(statistics); } }
/** * This is a subclass of JInternalFrame which displays documents. * * @author Steve Wilson */ @SuppressWarnings("serial") public class MetalworksDocumentFrame extends JInternalFrame { static int openFrameCount = 0; static final int offset = 30; public MetalworksDocumentFrame() { super("", true, true, true, true); openFrameCount++; setTitle("Untitled Message " + openFrameCount); JPanel top = new JPanel(); top.setBorder(new EmptyBorder(10, 10, 10, 10)); top.setLayout(new BorderLayout()); top.add(buildAddressPanel(), BorderLayout.NORTH); JTextArea content = new JTextArea(15, 30); content.setBorder(new EmptyBorder(0, 5, 0, 5)); content.setLineWrap(true); JScrollPane textScroller = new JScrollPane(content, JScrollPane.VERTICAL_SCROLLBAR_ALWAYS, JScrollPane.HORIZONTAL_SCROLLBAR_AS_NEEDED); top.add(textScroller, BorderLayout.CENTER); setContentPane(top); pack(); setLocation(offset * openFrameCount, offset * openFrameCount); } private JPanel buildAddressPanel() { JPanel p = new JPanel(); p.setLayout(new LabeledPairLayout()); JLabel toLabel = new JLabel("To: ", JLabel.RIGHT); JTextField toField = new JTextField(25); p.add(toLabel, "label"); p.add(toField, "field"); JLabel subLabel = new JLabel("Subj: ", JLabel.RIGHT); JTextField subField = new JTextField(25); p.add(subLabel, "label"); p.add(subField, "field"); JLabel ccLabel = new JLabel("cc: ", JLabel.RIGHT); JTextField ccField = new JTextField(25); p.add(ccLabel, "label"); p.add(ccField, "field"); return p; } class LabeledPairLayout implements LayoutManager { List<Component> labels = new ArrayList<Component>(); List<Component> fields = new ArrayList<Component>(); int yGap = 2; int xGap = 2; public void addLayoutComponent(String s, Component c) { if (s.equals("label")) { labels.add(c); } else { fields.add(c); } } public void layoutContainer(Container c) { Insets insets = c.getInsets(); int labelWidth = 0; for (Component comp : labels) { labelWidth = Math.max(labelWidth, comp.getPreferredSize().width); } int yPos = insets.top; Iterator<Component> fieldIter = fields.listIterator(); Iterator<Component> labelIter = labels.listIterator(); while (labelIter.hasNext() && fieldIter.hasNext()) { JComponent label = (JComponent) labelIter.next(); JComponent field = (JComponent) fieldIter.next(); int height = Math.max(label.getPreferredSize().height, field. getPreferredSize().height); label.setBounds(insets.left, yPos, labelWidth, height); field.setBounds(insets.left + labelWidth + xGap, yPos, c.getSize().width - (labelWidth + xGap + insets.left + insets.right), height); yPos += (height + yGap); } } public Dimension minimumLayoutSize(Container c) { Insets insets = c.getInsets(); int labelWidth = 0; for (Component comp : labels) { labelWidth = Math.max(labelWidth, comp.getPreferredSize().width); } int yPos = insets.top; Iterator<Component> labelIter = labels.listIterator(); Iterator<Component> fieldIter = fields.listIterator(); while (labelIter.hasNext() && fieldIter.hasNext()) { Component label = labelIter.next(); Component field = fieldIter.next(); int height = Math.max(label.getPreferredSize().height, field. getPreferredSize().height); yPos += (height + yGap); } return new Dimension(labelWidth * 3, yPos); } public Dimension preferredLayoutSize(Container c) { Dimension d = minimumLayoutSize(c); d.width *= 2; return d; } public void removeLayoutComponent(Component c) { } } }
<gh_stars>0 import { Person } from './person.entity'; import { Body, Controller, Delete, Get, Param, Patch, Post, Query, UseGuards } from '@nestjs/common'; import { AuthGuard } from '@nestjs/passport'; import { PeopleService } from './person.service'; import { PersonDto, PersonParams, PersonQuery } from './person.dto'; import { CarService } from '../car/car.service'; import { ApiBearerAuth, ApiTags } from '@nestjs/swagger'; @ApiBearerAuth() @ApiTags('people') @Controller() export class PersonController { constructor(private peopleService: PeopleService, private carService: CarService) { } @UseGuards(AuthGuard('jwt')) @Get() async findQuery(@Query() query: PersonQuery): Promise<Person[]> { return this.peopleService.personRepository.find({ where: query }); } @UseGuards(AuthGuard('jwt')) @Get(':id') async findOne(@Param() { id }: PersonParams): Promise<Person> { return this.peopleService.personRepository.findOne({ relations: ['car'], where: { id } }); } @UseGuards(AuthGuard('jwt')) @Post() async create(@Body() personDto: PersonDto): Promise<Person> { const insertResult = await this.peopleService.personRepository.insert(personDto); const car = await this.carService.carRepository.findOne(personDto.carId || 1); return { id: insertResult.identifiers[0].id, ...personDto, car }; } @UseGuards(AuthGuard('jwt')) @Patch(':id') async patch(@Param() params: PersonParams, @Body() personDto: PersonDto) { this.peopleService.personRepository.update(params.id, personDto); } @UseGuards(AuthGuard('jwt')) @Delete(':id') async delete(@Param() params: PersonParams) { this.peopleService.personRepository.delete(params.id); } }
import type { NextPage } from "next"; import Image from "next/image"; import { motion } from "framer-motion"; import Square from "./../assets/vector/square.svg"; import Triangle from "./../assets/vector/triangle.svg"; import Circle from "./../assets/vector/circle.svg"; import Code from "./../assets/vector/code-bg.svg"; import Star from "./../assets/vector/star.svg"; const Background: NextPage = () => { return ( <div className='background'> <motion.figure initial={{ x: "600px", y: 0, rotate: "0deg", opacity: 0 }} animate={{ x: "1200px", y: 0, rotate: "80deg", opacity: 1 }} transition={{ duration: 1, type: "spring", stiffness: 100, delay: 0.5 }} exit={{ x: "600px", y: 0, rotate: "0deg", opacity: 0 }} > <Square /> </motion.figure> <motion.figure initial={{ x: "300px", y: 0, rotate: "0deg", opacity: 0 }} animate={{ x: "-300px", rotate: "80deg", opacity: 1 }} transition={{ duration: 1, type: "spring", stiffness: 100, delay: 0.5 }} exit={{ x: "300px", y: 0, rotate: "0deg", opacity: 0 }} > <Triangle /> </motion.figure> <motion.figure initial={{ x: 0, y: 0, rotate: "0deg", opacity: 0 }} animate={{ x: "-500px", y: "-300px", rotate: "80deg", opacity: 1 }} transition={{ duration: 1, type: "spring", stiffness: 100, delay: 0.5 }} exit={{ x: 0, y: 0, rotate: "0deg", opacity: 0 }} > <Circle /> </motion.figure> <motion.figure initial={{ x: "-300px", y: 0, rotate: "0deg", opacity: 0 }} animate={{ x: "-300px", y: "-300px", rotate: "180deg", opacity: 1 }} transition={{ duration: 1, type: "spring", stiffness: 100, delay: 0.5 }} exit={{ x: "-300px", y: 0, rotate: "0deg", opacity: 0 }} > <Code /> </motion.figure> <motion.figure initial={{ x: "-600px", y: 0, rotate: "0deg", opacity: 0 }} animate={{ x: "-100px", y: "-300px", rotate: "80deg", opacity: 1 }} transition={{ duration: 1, type: "spring", stiffness: 100, delay: 0.5 }} exit={{ x: "-600px", y: 0, rotate: "0deg", opacity: 0 }} > <Star /> </motion.figure> </div> ); }; export default Background;
/** * Returns a new effect that repeats this effect the specified number of times * or until the first failure. Repeats are in addition to the first execution, * so that `io.repeatN(1)` yields an effect that executes `io`, and then if * that succeeds, executes `io` an additional time. * * @tsplus fluent ets/Effect repeatN */ export function repeatN_<R, E, A>( self: Effect<R, E, A>, n: number, __tsplusTrace?: string ): Effect<R, E, A> { return Effect.suspendSucceed(() => { function loop(n: number): Effect<R, E, A> { return self.flatMap((a) => n <= 0 ? Effect.succeedNow(a) : Effect.yieldNow > loop(n - 1)) } return loop(n) }) } /** * Returns a new effect that repeats this effect the specified number of times * or until the first failure. Repeats are in addition to the first execution, * so that `io.repeatN(1)` yields an effect that executes `io`, and then if * that succeeds, executes `io` an additional time. * * @tsplus static ets/Effect/Aspects repeatN */ export const repeatN = Pipeable(repeatN_)
// NewSubmodule creates a new entry in the Submodule list of the // OpenconfigModuleCatalog_Organizations_Organization_Modules_Module_Submodules struct. The keys of the list are populated from the input // arguments. func (t *OpenconfigModuleCatalog_Organizations_Organization_Modules_Module_Submodules) NewSubmodule(Name string) (*OpenconfigModuleCatalog_Organizations_Organization_Modules_Module_Submodules_Submodule, error) { if t.Submodule == nil { t.Submodule = make(map[string]*OpenconfigModuleCatalog_Organizations_Organization_Modules_Module_Submodules_Submodule) } key := Name if _, ok := t.Submodule[key]; ok { return nil, fmt.Errorf("duplicate key %v for list Submodule", key) } t.Submodule[key] = &OpenconfigModuleCatalog_Organizations_Organization_Modules_Module_Submodules_Submodule{ Name: &Name, } return t.Submodule[key], nil }
export * from './Binding'; export * from './Expression'; export * from './StringTemplate'; export * from './View'; export * from './SubscribableView'; export * from './Store'; export * from './ExposedRecordView'; export * from './ExposedValueView'; export * from './ReadOnlyDataView'; export * from './ZoomIntoPropertyView'; export * from './StructuredSelector'; export * from './computable'; export * from './getSelector'; export * from './isSelector'; export * from './Grouper'; export * from './comparer'; export * from './enableFatArrowExpansion'; export * from './ops/index'; export * from './diff/index'; export * from './Ref'; export * from './ArrayRef'; export * from './StoreProxy'; export * from "./AugmentedViewBase"; export * from "./ArrayElementView"; export * from "./getAccessor"; export * from "./defaultCompare";
<reponame>ieugen/calcite<filename>core/src/main/java/org/apache/calcite/util/TimeString.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to you under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.calcite.util; import org.apache.calcite.avatica.util.DateTimeUtils; import com.google.common.base.Preconditions; import com.google.common.base.Strings; import org.checkerframework.checker.nullness.qual.Nullable; import java.util.Calendar; import java.util.regex.Pattern; /** * Time literal. * * <p>Immutable, internally represented as a string (in ISO format), * and can support unlimited precision (milliseconds, nanoseconds). */ public class TimeString implements Comparable<TimeString> { private static final Pattern PATTERN = Pattern.compile("[0-9][0-9]:[0-9][0-9]:[0-9][0-9](\\.[0-9]*[1-9])?"); final String v; /** Internal constructor, no validation. */ private TimeString(String v, @SuppressWarnings("unused") boolean ignore) { this.v = v; } /** Creates a TimeString. */ @SuppressWarnings("method.invocation.invalid") public TimeString(String v) { this(v, false); Preconditions.checkArgument(PATTERN.matcher(v).matches(), "Invalid time format:", v); Preconditions.checkArgument(getHour() >= 0 && getHour() < 24, "Hour out of range:", getHour()); Preconditions.checkArgument(getMinute() >= 0 && getMinute() < 60, "Minute out of range:", getMinute()); Preconditions.checkArgument(getSecond() >= 0 && getSecond() < 60, "Second out of range:", getSecond()); } /** Creates a TimeString for hour, minute, second and millisecond values. */ public TimeString(int h, int m, int s) { this(hms(h, m, s), false); } /** Validates an hour-minute-second value and converts to a string. */ private static String hms(int h, int m, int s) { Preconditions.checkArgument(h >= 0 && h < 24, "Hour out of range:", h); Preconditions.checkArgument(m >= 0 && m < 60, "Minute out of range:", m); Preconditions.checkArgument(s >= 0 && s < 60, "Second out of range:", s); final StringBuilder b = new StringBuilder(); DateTimeStringUtils.hms(b, h, m, s); return b.toString(); } /** Sets the fraction field of a {@code TimeString} to a given number * of milliseconds. Nukes the value set via {@link #withNanos}. * * <p>For example, * {@code new TimeString(1970, 1, 1, 2, 3, 4).withMillis(56)} * yields {@code TIME '1970-01-01 02:03:04.056'}. */ public TimeString withMillis(int millis) { Preconditions.checkArgument(millis >= 0 && millis < 1000); return withFraction(DateTimeStringUtils.pad(3, millis)); } /** Sets the fraction field of a {@code TimeString} to a given number * of nanoseconds. Nukes the value set via {@link #withMillis(int)}. * * <p>For example, * {@code new TimeString(1970, 1, 1, 2, 3, 4).withNanos(56789)} * yields {@code TIME '1970-01-01 02:03:04.000056789'}. */ public TimeString withNanos(int nanos) { Preconditions.checkArgument(nanos >= 0 && nanos < 1000000000); return withFraction(DateTimeStringUtils.pad(9, nanos)); } /** Sets the fraction field of a {@code TimeString}. * The precision is determined by the number of leading zeros. * Trailing zeros are stripped. * * <p>For example, * {@code new TimeString(1970, 1, 1, 2, 3, 4).withFraction("00506000")} * yields {@code TIME '1970-01-01 02:03:04.00506'}. */ public TimeString withFraction(String fraction) { String v = this.v; int i = v.indexOf('.'); if (i >= 0) { v = v.substring(0, i); } while (fraction.endsWith("0")) { fraction = fraction.substring(0, fraction.length() - 1); } if (fraction.length() > 0) { v = v + "." + fraction; } return new TimeString(v); } @Override public String toString() { return v; } @Override public boolean equals(@Nullable Object o) { // The value is in canonical form (no trailing zeros). return o == this || o instanceof TimeString && ((TimeString) o).v.equals(v); } @Override public int hashCode() { return v.hashCode(); } @Override public int compareTo(TimeString o) { return v.compareTo(o.v); } /** Creates a TimeString from a Calendar. */ public static TimeString fromCalendarFields(Calendar calendar) { return new TimeString( calendar.get(Calendar.HOUR_OF_DAY), calendar.get(Calendar.MINUTE), calendar.get(Calendar.SECOND)) .withMillis(calendar.get(Calendar.MILLISECOND)); } public static TimeString fromMillisOfDay(int i) { return new TimeString(DateTimeUtils.unixTimeToString(i)) .withMillis((int) DateTimeUtils.floorMod(i, 1000)); } public TimeString round(int precision) { Preconditions.checkArgument(precision >= 0); int targetLength = 9 + precision; if (v.length() <= targetLength) { return this; } String v = this.v.substring(0, targetLength); while (v.length() >= 9 && (v.endsWith("0") || v.endsWith("."))) { v = v.substring(0, v.length() - 1); } return new TimeString(v); } public int getMillisOfDay() { int h = Integer.valueOf(v.substring(0, 2)); int m = Integer.valueOf(v.substring(3, 5)); int s = Integer.valueOf(v.substring(6, 8)); int ms = getMillisInSecond(); return (int) (h * DateTimeUtils.MILLIS_PER_HOUR + m * DateTimeUtils.MILLIS_PER_MINUTE + s * DateTimeUtils.MILLIS_PER_SECOND + ms); } private int getMillisInSecond() { switch (v.length()) { case 8: // "12:34:56" return 0; case 10: // "12:34:56.7" return Integer.valueOf(v.substring(9)) * 100; case 11: // "12:34:56.78" return Integer.valueOf(v.substring(9)) * 10; case 12: // "12:34:56.789" default: // "12:34:56.7890000012345" return Integer.valueOf(v.substring(9, 12)); } } private int getHour() { return Integer.parseInt(v.substring(0, 2)); } private int getMinute() { return Integer.parseInt(this.v.substring(3, 5)); } private int getSecond() { return Integer.parseInt(this.v.substring(6, 8)); } public Calendar toCalendar() { return Util.calendar(getMillisOfDay()); } /** Converts this TimestampString to a string, truncated or padded with * zeros to a given precision. */ public String toString(int precision) { Preconditions.checkArgument(precision >= 0); final int p = precision(); if (precision < p) { return round(precision).toString(precision); } if (precision > p) { String s = v; if (p == 0) { s += "."; } return s + Strings.repeat("0", precision - p); } return v; } private int precision() { return v.length() < 9 ? 0 : (v.length() - 9); } }
/* Input: string1, string2 * Output: boolean (true/false) * */ class OneAway { public static boolean areStringsOneOrZeroEditsAway(String string1, String string2) { int string1Length = string1.length(); int string2Length = string2.length(); if(Math.abs(string1Length - string2Length) > 1) { return false; } if(Math.abs(string1Length - string2Length) == 1) { String biggerString; String smallerString; if(string1Length > string2Length) { biggerString = string1; smallerString = string2; } else { biggerString = string2; smallerString = string1; } int i = 0; int j = 0; int diffCount = 0; while(i < biggerString.length()) { if(diffCount > 1) return false; if(biggerString.charAt(i) != biggerString.charAt(j)) { i++; diffCount++; continue; } i++; j++; } } if(Math.abs(string1Length - string2Length) == 0) { int i = 0; int diffCount = 0; while(i < string1Length) { if(diffCount > 1) return false; if(string1.charAt(i) != string2.charAt(i)) { diffCount++; } i++; } } return true; } public static void main(String args[]) { if(args.length < 2) { System.out.println("Proper usage: java OneAway string1 string2"); System.exit(0); } String string1 = args[0]; String string2 = args[1]; System.out.println(areStringsOneOrZeroEditsAway(string1, string2)); } }
<reponame>dingjingmaster/graceful-grub<gh_stars>0 #include <string.h> #include <grub/elf.h> #include <grub/module_verifier.h> #include <grub/util/misc.h> #if defined(MODULEVERIFIER_ELF32) # define SUFFIX(x) x ## 32 # define ELFCLASSXX ELFCLASS32 # define Elf_Ehdr Elf32_Ehdr # define Elf_Phdr Elf32_Phdr # define Elf_Nhdr Elf32_Nhdr # define Elf_Addr Elf32_Addr # define Elf_Sym Elf32_Sym # define Elf_Off Elf32_Off # define Elf_Shdr Elf32_Shdr # define Elf_Rela Elf32_Rela # define Elf_Rel Elf32_Rel # define Elf_Word Elf32_Word # define Elf_Half Elf32_Half # define Elf_Section Elf32_Section # define ELF_R_SYM(val) ELF32_R_SYM(val) # define ELF_R_TYPE(val) ELF32_R_TYPE(val) # define ELF_ST_TYPE(val) ELF32_ST_TYPE(val) #elif defined(MODULEVERIFIER_ELF64) # define SUFFIX(x) x ## 64 # define ELFCLASSXX ELFCLASS64 # define Elf_Ehdr Elf64_Ehdr # define Elf_Phdr Elf64_Phdr # define Elf_Nhdr Elf64_Nhdr # define Elf_Addr Elf64_Addr # define Elf_Sym Elf64_Sym # define Elf_Off Elf64_Off # define Elf_Shdr Elf64_Shdr # define Elf_Rela Elf64_Rela # define Elf_Rel Elf64_Rel # define Elf_Word Elf64_Word # define Elf_Half Elf64_Half # define Elf_Section Elf64_Section # define ELF_R_SYM(val) ELF64_R_SYM(val) # define ELF_R_TYPE(val) ELF64_R_TYPE(val) # define ELF_ST_TYPE(val) ELF64_ST_TYPE(val) #else #error "I'm confused" #endif #define grub_target_to_host32(x) (grub_target_to_host32_real (arch, (x))) #define grub_host_to_target32(x) (grub_host_to_target32_real (arch, (x))) #define grub_target_to_host64(x) (grub_target_to_host64_real (arch, (x))) #define grub_host_to_target64(x) (grub_host_to_target64_real (arch, (x))) #define grub_host_to_target_addr(x) (grub_host_to_target_addr_real (arch, (x))) #define grub_target_to_host16(x) (grub_target_to_host16_real (arch, (x))) #define grub_host_to_target16(x) (grub_host_to_target16_real (arch, (x))) #define grub_target_to_host(val) grub_target_to_host_real(arch, (val)) static inline grub_uint32_t grub_target_to_host32_real (const struct grub_module_verifier_arch *arch, grub_uint32_t in) { if (arch->bigendian) return grub_be_to_cpu32 (in); else return grub_le_to_cpu32 (in); } static inline grub_uint64_t grub_target_to_host64_real (const struct grub_module_verifier_arch *arch, grub_uint64_t in) { if (arch->bigendian) return grub_be_to_cpu64 (in); else return grub_le_to_cpu64 (in); } static inline grub_uint64_t grub_host_to_target64_real (const struct grub_module_verifier_arch *arch, grub_uint64_t in) { if (arch->bigendian) return grub_cpu_to_be64 (in); else return grub_cpu_to_le64 (in); } static inline grub_uint32_t grub_host_to_target32_real (const struct grub_module_verifier_arch *arch, grub_uint32_t in) { if (arch->bigendian) return grub_cpu_to_be32 (in); else return grub_cpu_to_le32 (in); } static inline grub_uint16_t grub_target_to_host16_real (const struct grub_module_verifier_arch *arch, grub_uint16_t in) { if (arch->bigendian) return grub_be_to_cpu16 (in); else return grub_le_to_cpu16 (in); } static inline grub_uint16_t grub_host_to_target16_real (const struct grub_module_verifier_arch *arch, grub_uint16_t in) { if (arch->bigendian) return grub_cpu_to_be16 (in); else return grub_cpu_to_le16 (in); } static inline grub_uint64_t grub_host_to_target_addr_real (const struct grub_module_verifier_arch *arch, grub_uint64_t in) { if (arch->voidp_sizeof == 8) return grub_host_to_target64_real (arch, in); else return grub_host_to_target32_real (arch, in); } static inline grub_uint64_t grub_target_to_host_real (const struct grub_module_verifier_arch *arch, grub_uint64_t in) { if (arch->voidp_sizeof == 8) return grub_target_to_host64_real (arch, in); else return grub_target_to_host32_real (arch, in); } static Elf_Shdr * find_section (const struct grub_module_verifier_arch *arch, Elf_Ehdr *e, const char *name) { Elf_Shdr *s; const char *str; unsigned i; s = (Elf_Shdr *) ((char *) e + grub_target_to_host (e->e_shoff) + grub_target_to_host16 (e->e_shstrndx) * grub_target_to_host16 (e->e_shentsize)); str = (char *) e + grub_target_to_host (s->sh_offset); for (i = 0, s = (Elf_Shdr *) ((char *) e + grub_target_to_host (e->e_shoff)); i < grub_target_to_host16 (e->e_shnum); i++, s = (Elf_Shdr *) ((char *) s + grub_target_to_host16 (e->e_shentsize))) if (strcmp (str + grub_target_to_host32 (s->sh_name), name) == 0) return s; return NULL; } static void check_license (const char * const filename, const struct grub_module_verifier_arch *arch, Elf_Ehdr *e) { Elf_Shdr *s = find_section (arch, e, ".module_license"); if (s && (strcmp ((char *) e + grub_target_to_host(s->sh_offset), "LICENSE=GPLv3") == 0 || strcmp ((char *) e + grub_target_to_host(s->sh_offset), "LICENSE=GPLv3+") == 0 || strcmp ((char *) e + grub_target_to_host(s->sh_offset), "LICENSE=GPLv2+") == 0)) return; grub_util_error ("%s: incompatible license", filename); } static Elf_Sym * get_symtab (const struct grub_module_verifier_arch *arch, Elf_Ehdr *e, Elf_Word *size, Elf_Word *entsize) { unsigned i; Elf_Shdr *s, *sections; Elf_Sym *sym; sections = (Elf_Shdr *) ((char *) e + grub_target_to_host (e->e_shoff)); for (i = 0, s = sections; i < grub_target_to_host16 (e->e_shnum); i++, s = (Elf_Shdr *) ((char *) s + grub_target_to_host16 (e->e_shentsize))) if (grub_target_to_host32 (s->sh_type) == SHT_SYMTAB) break; if (i == grub_target_to_host16 (e->e_shnum)) return NULL; sym = (Elf_Sym *) ((char *) e + grub_target_to_host (s->sh_offset)); *size = grub_target_to_host (s->sh_size); *entsize = grub_target_to_host (s->sh_entsize); return sym; } static int is_whitelisted (const char *modname, const char **whitelist) { const char **ptr; if (!whitelist) return 0; if (!modname) return 0; for (ptr = whitelist; *ptr; ptr++) if (strcmp (modname, *ptr) == 0) return 1; return 0; } static void check_symbols (const struct grub_module_verifier_arch *arch, Elf_Ehdr *e, const char *modname, const char **whitelist_empty) { Elf_Sym *sym; Elf_Word size, entsize; unsigned i; /* Module without symbol table and without .moddeps section is useless at boot time, so catch it early to prevent build errors */ sym = get_symtab (arch, e, &size, &entsize); if (!sym) { Elf_Shdr *s; /* However some modules are dependencies-only, e.g. insmod all_video pulls in all video drivers. Some platforms e.g. xen have no video drivers, so the module does nothing. */ if (is_whitelisted (modname, whitelist_empty)) return; s = find_section (arch, e, ".moddeps"); if (!s) grub_util_error ("%s: no symbol table and no .moddeps section", modname); if (!s->sh_size) grub_util_error ("%s: no symbol table and empty .moddeps section", modname); return; } for (i = 0; i < size / entsize; i++, sym = (Elf_Sym *) ((char *) sym + entsize)) { unsigned char type = ELF_ST_TYPE (sym->st_info); switch (type) { case STT_NOTYPE: case STT_OBJECT: case STT_FUNC: case STT_SECTION: case STT_FILE: break; default: return grub_util_error ("%s: unknown symbol type `%d'", modname, (int) type); } } } static int is_symbol_local(Elf_Sym *sym) { switch (ELF_ST_TYPE (sym->st_info)) { case STT_NOTYPE: case STT_OBJECT: if (sym->st_name != 0 && sym->st_shndx == 0) return 0; return 1; case STT_FUNC: case STT_SECTION: return 1; default: return 0; } } static void section_check_relocations (const char * const modname, const struct grub_module_verifier_arch *arch, void *ehdr, Elf_Shdr *s, size_t target_seg_size) { Elf_Rel *rel, *max; Elf_Sym *symtab; Elf_Word symtabsize, symtabentsize; symtab = get_symtab (arch, ehdr, &symtabsize, &symtabentsize); if (!symtab) grub_util_error ("%s: relocation without symbol table", modname); for (rel = (Elf_Rel *) ((char *) ehdr + grub_target_to_host (s->sh_offset)), max = (Elf_Rel *) ((char *) rel + grub_target_to_host (s->sh_size)); rel < max; rel = (Elf_Rel *) ((char *) rel + grub_target_to_host (s->sh_entsize))) { Elf_Sym *sym; unsigned i; if (target_seg_size < grub_target_to_host (rel->r_offset)) grub_util_error ("%s: reloc offset is out of the segment", modname); grub_uint32_t type = ELF_R_TYPE (grub_target_to_host (rel->r_info)); if (arch->machine == EM_SPARCV9) type &= 0xff; for (i = 0; arch->supported_relocations[i] != -1; i++) if (type == arch->supported_relocations[i]) break; if (arch->supported_relocations[i] != -1) continue; if (!arch->short_relocations) grub_util_error ("%s: unsupported relocation 0x%x", modname, type); for (i = 0; arch->short_relocations[i] != -1; i++) if (type == arch->short_relocations[i]) break; if (arch->short_relocations[i] == -1) grub_util_error ("%s: unsupported relocation 0x%x", modname, type); sym = (Elf_Sym *) ((char *) symtab + symtabentsize * ELF_R_SYM (grub_target_to_host (rel->r_info))); if (is_symbol_local (sym)) continue; grub_util_error ("%s: relocation 0x%x is not module-local", modname, type); } #if defined(MODULEVERIFIER_ELF64) if (arch->machine == EM_AARCH64) { unsigned unmatched_adr_got_page = 0; Elf_Rela *rel2; for (rel = (Elf_Rel *) ((char *) ehdr + grub_target_to_host (s->sh_offset)), max = (Elf_Rel *) ((char *) rel + grub_target_to_host (s->sh_size)); rel < max; rel = (Elf_Rel *) ((char *) rel + grub_target_to_host (s->sh_entsize))) { switch (ELF_R_TYPE (grub_target_to_host (rel->r_info))) { case R_AARCH64_ADR_GOT_PAGE: unmatched_adr_got_page++; for (rel2 = (Elf_Rela *) ((char *) rel + grub_target_to_host (s->sh_entsize)); rel2 < (Elf_Rela *) max; rel2 = (Elf_Rela *) ((char *) rel2 + grub_target_to_host (s->sh_entsize))) if (ELF_R_SYM (rel2->r_info) == ELF_R_SYM (rel->r_info) && ((Elf_Rela *) rel)->r_addend == rel2->r_addend && ELF_R_TYPE (rel2->r_info) == R_AARCH64_LD64_GOT_LO12_NC) break; if (rel2 >= (Elf_Rela *) max) grub_util_error ("%s: ADR_GOT_PAGE without matching LD64_GOT_LO12_NC", modname); break; case R_AARCH64_LD64_GOT_LO12_NC: if (unmatched_adr_got_page == 0) grub_util_error ("%s: LD64_GOT_LO12_NC without matching ADR_GOT_PAGE", modname); unmatched_adr_got_page--; break; } } } #endif } static void check_relocations (const char * const modname, const struct grub_module_verifier_arch *arch, Elf_Ehdr *e) { Elf_Shdr *s; unsigned i; for (i = 0, s = (Elf_Shdr *) ((char *) e + grub_target_to_host (e->e_shoff)); i < grub_target_to_host16 (e->e_shnum); i++, s = (Elf_Shdr *) ((char *) s + grub_target_to_host16 (e->e_shentsize))) if (grub_target_to_host32 (s->sh_type) == SHT_REL || grub_target_to_host32 (s->sh_type) == SHT_RELA) { Elf_Shdr *ts; if (grub_target_to_host32 (s->sh_type) == SHT_REL && !(arch->flags & GRUB_MODULE_VERIFY_SUPPORTS_REL)) grub_util_error ("%s: unsupported SHT_REL", modname); if (grub_target_to_host32 (s->sh_type) == SHT_RELA && !(arch->flags & GRUB_MODULE_VERIFY_SUPPORTS_RELA)) grub_util_error ("%s: unsupported SHT_RELA", modname); /* Find the target segment. */ if (grub_target_to_host32 (s->sh_info) >= grub_target_to_host16 (e->e_shnum)) grub_util_error ("%s: orphaned reloc section", modname); ts = (Elf_Shdr *) ((char *) e + grub_target_to_host (e->e_shoff) + grub_target_to_host32 (s->sh_info) * grub_target_to_host16 (e->e_shentsize)); section_check_relocations (modname, arch, e, s, grub_target_to_host (ts->sh_size)); } } void SUFFIX(grub_module_verify) (const char * const filename, void *module_img, size_t size, const struct grub_module_verifier_arch *arch, const char **whitelist_empty) { Elf_Ehdr *e = module_img; /* Check the header size. */ if (size < sizeof (Elf_Ehdr)) grub_util_error ("%s: ELF header smaller than expected", filename); /* Check the magic numbers. */ if (e->e_ident[EI_MAG0] != ELFMAG0 || e->e_ident[EI_MAG1] != ELFMAG1 || e->e_ident[EI_MAG2] != ELFMAG2 || e->e_ident[EI_MAG3] != ELFMAG3 || e->e_ident[EI_VERSION] != EV_CURRENT || grub_target_to_host32 (e->e_version) != EV_CURRENT) grub_util_error ("%s: invalid arch-independent ELF magic", filename); if (e->e_ident[EI_CLASS] != ELFCLASSXX || e->e_ident[EI_DATA] != (arch->bigendian ? ELFDATA2MSB : ELFDATA2LSB) || grub_target_to_host16 (e->e_machine) != arch->machine) grub_util_error ("%s: invalid arch-dependent ELF magic", filename); if (grub_target_to_host16 (e->e_type) != ET_REL) { grub_util_error ("%s: this ELF file is not of the right type", filename); } /* Make sure that every section is within the core. */ if (size < grub_target_to_host (e->e_shoff) + (grub_uint32_t) grub_target_to_host16 (e->e_shentsize) * grub_target_to_host16(e->e_shnum)) { grub_util_error ("%s: ELF sections outside core", filename); } check_license (filename, arch, e); Elf_Shdr *s; const char *modname; s = find_section (arch, e, ".modname"); if (!s) grub_util_error ("%s: no module name found", filename); modname = (const char *) e + grub_target_to_host (s->sh_offset); check_symbols(arch, e, modname, whitelist_empty); check_relocations(modname, arch, e); }
package com.github.mufanh.filecoin.backend.filscan; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.*; import com.github.mufanh.jsonrpc4j.*; import lombok.extern.slf4j.Slf4j; import okhttp3.OkHttpClient; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import java.io.IOException; import java.lang.reflect.Type; import java.util.concurrent.TimeUnit; /** * @author xinquan.huangxq */ @Configuration @Slf4j public class FilscanConfiguration { @Bean public FilscanService filscanService(FilscanProperties filscanProperties) { OkHttpClient client = new OkHttpClient.Builder() .connectTimeout(filscanProperties.getConnectTimeout(), TimeUnit.SECONDS) .readTimeout(filscanProperties.getReadTimeout(), TimeUnit.SECONDS) .writeTimeout(filscanProperties.getWriteTimeout(), TimeUnit.SECONDS) .build(); JsonRpcRetrofit jsonRpcRetrofit = new JsonRpcRetrofit.Builder() .httpUrl(filscanProperties.getApiGateway()) .jsonBodyConverter(new FilscanJsonBodyConverter()) .callFactory(client) .build(); return jsonRpcRetrofit.create(FilscanService.class); } private static class FilscanJsonBodyConverter implements JsonBodyConverter { private static final ObjectMapper mapper = new ObjectMapper() .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) .configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); @Override public String convertRequest(JsonRpcRequest request) throws JsonConvertException { try { String requestJson = mapper.writeValueAsString(request); log.debug("JSON-RPC request : \n{}", requestJson); return requestJson; } catch (JsonProcessingException e) { throw new JsonConvertException("JSON-RPC request convert error.", e); } } @Override public <T> JsonRpcResponse<T> convertResponse(Type type, String response) throws JsonConvertException { log.debug("JSON-RPC response : \n{}", response); try { JsonRpcResponse<T> jsonRpcResponse = new JsonRpcResponse<>(); JsonNode jsonNode = mapper.readTree(response); if (jsonNode == null) { return jsonRpcResponse; } JsonNode id = jsonNode.get("id"); if (id != null) { jsonRpcResponse.setId(id.asLong()); } JsonNode jsonrpc = jsonNode.get("jsonrpc"); if (jsonrpc != null) { jsonRpcResponse.setJsonrpc(jsonrpc.asText()); } jsonRpcResponse.setError(parseJsonNode(jsonNode.get("error"), JsonRpcResponse.Error.class)); JsonNode resultNode = jsonNode.get("result"); if (resultNode != null) { jsonRpcResponse.setResult(parseJsonNode(resultNode.get("data"), type)); } return jsonRpcResponse; } catch (Exception e) { throw new JsonConvertException("JSON-RPC response convert error.", e); } } private static <T> T parseJsonNode(JsonNode jsonNode, Type type) throws IOException { if (jsonNode == null) { return null; } JsonParser parser = mapper.treeAsTokens(jsonNode); JavaType javaType = mapper.getTypeFactory().constructType(type); return mapper.readValue(parser, javaType); } } }
// GetNamespaces - Return list of all namespaces in cluster func GetNamespaces(w http.ResponseWriter, r *http.Request) { namespaces, err := clientset.CoreV1().Namespaces().List(metav1.ListOptions{}) if err != nil { fmt.Println("### Kubernetes API error", err.Error()) http.Error(w, err.Error(), http.StatusInternalServerError) } namespacesJSON, _ := json.Marshal(namespaces.Items) w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Add("Content-Type", "application/json") w.Write(namespacesJSON) }
<reponame>Owl66/Physics3D #pragma once #include <cstddef> namespace P3D { void* aligned_malloc(std::size_t size, std::size_t align); void aligned_free(void* ptr); };
package com.nineeyes.jp.flutter_ui_examples.platform_view_example.plugins.simple; import android.content.Context; import android.content.Intent; import io.flutter.Log; import io.flutter.plugin.common.EventChannel; import io.flutter.plugin.common.MethodCall; import io.flutter.plugin.common.MethodChannel; import io.flutter.plugin.common.PluginRegistry; import com.nineeyes.jp.flutter_ui_examples.HelloWorldActivity; /** * Native連携プラグインの基本形 */ public class SimplePlatformPlugin implements MethodChannel.MethodCallHandler, EventChannel.StreamHandler, PluginRegistry.ActivityResultListener { private static final String METHOD_CHANNEL = "local_plugins/SimplePlatformPlugin/method"; private static final String EVENT_CHANNEL = "local_plugins/SimplePlatformPlugin/event"; Context mContext = null; EventChannel.EventSink mEventSink = null; public SimplePlatformPlugin(Context context) { mContext = context; } public static void registerWith(PluginRegistry.Registrar registrar) { SimplePlatformPlugin my_instance = new SimplePlatformPlugin((Context) registrar.activity()); // MethodChannel is 'Flutter' -> 'OSNative' MethodChannel channel = new MethodChannel(registrar.messenger(), METHOD_CHANNEL); channel.setMethodCallHandler(my_instance); // EventChannel is 'OSNative' -> 'Flutter' EventChannel eventchannel = new EventChannel(registrar.messenger(), EVENT_CHANNEL); eventchannel.setStreamHandler(my_instance); registrar.addActivityResultListener(my_instance); } @Override public void onMethodCall(MethodCall call, MethodChannel.Result result) { String success_response = "FlutterからJavaプログラムのメソッドがコールされました"; Log.d("SimplePlatformPlugin", call.toString()); if (call.method.equals("helloWorld")) { result.success(success_response); if (mEventSink != null) { mEventSink.success(success_response); } } else if (call.method.equals("switchView")) { Intent intent = new Intent(mContext, HelloWorldActivity.class); mContext.startActivity(intent); } else { result.notImplemented(); } } @Override public void onListen(Object arguments, EventChannel.EventSink events) { mEventSink = events; } @Override public void onCancel(Object arguments) { } @Override public boolean onActivityResult(int requestCode, int resultCode, Intent data) { return false; } }
#include<iostream> #include<cstdio> using namespace std; long long t,n,a,b; char c[2000001]; int main(){ long long t,cnt1,cnt; long long ans; scanf("%lld",&t); while(t--){ scanf("%lld%lld%lld",&n,&a,&b); scanf("%s",c); ans=0; cnt=0; cnt1=0; long long now=0,now1=0; for(int i=0;i<=n-1;i++){ if(c[i]=='1'){ break; } now++; } if(now==n){ cout<<n*a+(n+1)*b<<endl; continue; } for(int i=n-1;i>=0;i--){ if(c[i]=='1'){ break; } now1++; } ans+=(now1+now+1)*a+(now1+now)*b; for(int i=now;i<=n-now1;i++){ if(c[i]=='0'){ cnt++; if(cnt1>0){ ans+=(cnt1+1)*b*2+(cnt1+1)*a; cnt1=0; } } else{ cnt1++; if(cnt>0){ if(cnt<=2*a/b+1){ ans+=(cnt-1)*a+2*(cnt-1)*b; //cout<<"fuck"<<endl; } else{ ans+=(cnt+1)*a+(cnt-1)*b; } cnt=0; } } } cout<<ans<<endl; } }
Russian River Brewing maxes out On Sunday, the unthinkable happened at Russian River Brewing's restaurant in Santa Rosa: it ran out of beer, at least the stuff in a bottle. "We had a crazy weekend and we had already allocated everything out (to other stores and restaurants) so we didn't have any more beer," owner Natalie Cilurzo said. Workers hustled over to the main brewery off Santa Rosa Avenue and snagged a few cases of the flagship Pliny the Elder IPA. "That probably got us through about 11:30 in the morning. And then that was that," she said. Sunday's unexpected beer crisis was the result of two simple facts. First, local and national demand for Russian River's brews has reached the level of a near frenzy. And second, the brewery has reached the absolute limits of its capacity, around 14,100 barrels per year, or about 437,100 gallons. "We're literally wringing out the wet shirt" to get every last drop of beer, Cilurzo said. From humble beginnings as a small brewpub on Fourth Street in 2004, Russian River has grown into one of the most talked-about breweries on the planet. The beers routinely appear at or near the top of lists of popular and critical favorites. The readers of the website Beer Advocate give Russian River three of the top 10 spots on the "Top 250 beers" list. Zymurgy magazine, the journal of the American Homebrewers Association, has rated Pliny the Elder as the best beer in America for five consecutive years. Today, the company has 75 employees. It does not disclose its revenues. Yet for all the buzz, the beer is only available in three states — California, Colorado, and Oregon — along with a few bars in the beer-mad city of Philadelphia. There is a years-long waiting list for restaurants and beer stores to get their hands on anything from Russian River. Stores lucky enough to get some Russian River end up selling out almost immediately. "Now everyone is onto the fact that we get our delivery on Friday; they start lining up about 11 or 12 to get it," said Jason Schneider, store manager of Bottle Barn in Santa Rosa. The store doesn't even bother to put the beer on the public shelves. Employees load it straight into a cooler in the back and hand it out only to customers who request it — and there is a strict four bottle per person limit. Even so, Bottle Barn is usually sold out by Friday afternoon, he said. In Rohnert Park, the owners of the store Beercraft, which specializes in small-production, hard-to-find beer, primarily from California, have been on the Russian River waiting list for more than a year. Customers frequently ask about the beer, but there is just none to be had. "We're OK with it," owner J.T. Fenn said. "We're just patiently waiting and nagging them politely." In Colorado, distributor Elite Brands spends all year meeting with bars to negotiate the tiny allocation of limited release Pliny the Younger, available on draft only for just two weeks every February. Even in doling out the year-round beers, beer manager John Sliter said, it is important to be clear with customers: Russian River makes what it makes and there is no point in asking for more. "We understood right from the get-go that they were not one of the breweries that will be doubling capacity every year," he said.
/** * Description not yet available. * \param */ double boundpin(const prevariable& xx, double fmin, double fmax,const double& s) { double tinv; double x=value(xx); if (x < fmin) { if (ad_printf) (*ad_printf)("variable out of bounds in boundpin: variable = %lg", x); if (ad_printf) (*ad_printf)("; min = %lg", fmin); if (ad_printf) (*ad_printf)("; max = %lg\n", fmax); x=dmin(fmin+.001,fmin+.01*(fmax-fmin)); } if (x > fmax) { if (ad_printf) (*ad_printf)("variable out of bounds in boundpin: variable = %lg", x); if (ad_printf) (*ad_printf)("; min = %lg", fmin); if (ad_printf) (*ad_printf)("; max = %lg\n", fmax); x=dmax(fmax-.001,fmax-.01*(fmax-fmin)); } tinv=::asin(2.*(x-fmin)/(fmax-fmin)-1.)/1.57079632679489661; return(s*tinv); }
/** * Creates ExtSource and UserExtSource if necessary for the purpose of joining users identities. * * @param user User to add UES to * @param actor Actor to add * @param extSourceName ExtSource name to add * @param extSourceType ExtSource type to add * @param loa loa in ext source * @throws PerunException when anything fails */ private void createExtSourceAndUserExtSource(User user, String actor, String extSourceName, String extSourceType, int loa) throws PerunException { ExtSource extSource = new ExtSource(extSourceName, extSourceType); try { extSource = perun.getExtSourcesManagerBl().getExtSourceByName(registrarSession, extSourceName); } catch (ExtSourceNotExistsException ex) { extSource = perun.getExtSourcesManager().createExtSource(registrarSession, extSource, null); } UserExtSource ues = new UserExtSource(); ues.setLogin(actor); ues.setLoa(loa); ues.setExtSource(extSource); perun.getUsersManager().addUserExtSource(registrarSession, user, ues); }
Effective Coordination Numbers in Ultrathin Metallic Films Nanostructures have become a major field in a material science. With molecular beam epitaxy it is possible to engineer multilayers with unusual anisotropic properties and metastable strucutres. For ultrathin films, or particles of small size, the coordination numbers or their ratios. We have calculated the effective coordination numbers N i , i=1 to 5 for thin films with different crystal structures. The thickness of the film is specified by the number of layers. Nz in the z-direction and the area in the xy plane by the number of rows of atoms along the side of a square domain. For fixed N z , with the x-ray beam unpolarized or its electric vector parallel or perpendicular to the substrate (xy plane), the effective N i approach hyperbolically for crystals from which the N i can be calculated for both N XY and N z ≥ 2. Comparison is made with the effective coordination numbers obtained from polarized EXAFS measurements on epitaxial metallic films less than 10 monolayers thick.
<reponame>devyn/fai extern crate fai; extern crate env_logger; extern crate getopts; extern crate byteorder; use std::env; use std::io::prelude::*; use std::fs::File; use std::process::exit; use std::time::Duration; use std::str::FromStr; use getopts::Options; use byteorder::*; use fai::data::State; use fai::machine::Machine; use fai::event_pool::EventPool; use fai::ram::Ram; use fai::stdio_console::StdioConsole; use fai::hardware::HardwareMessage; use fai::device::{DeviceConfig, DeviceModel}; fn print_usage(program: &str, opts: Options) { let brief = format!("Usage: {} <file.bin> [options]", program); print!("{}", opts.usage(&brief)); } fn main() { let args: Vec<String> = env::args().collect(); let program = args[0].clone(); env_logger::init().unwrap(); let mut opts = Options::new(); opts.optflag("h", "help", "Show this message"); opts.optopt("", "tick-rate", "How frequently to tick the event pool. Default: 10000 Hz", "HERTZ"); opts.optopt("", "load-address", "Address (in hex) to write the program to in memory. Default: 11000", "ADDR"); opts.optopt("", "stack-pointer", "Address (in hex) to start the stack at. Default: 10e00", "ADDR"); opts.optopt("m", "ram-size", "Number of words (in hex) of RAM to make available to the machine. \ RAM will be mounted at 10000. \ Default: 2000", "WORDS"); let matches = opts.parse(&args[1..]).unwrap(); if matches.opt_present("help") { print_usage(&program, opts); return; } let bin_path = match matches.free.len() { 1 => matches.free[0].clone(), _ => { print_usage(&program, opts); exit(1); } }; fn f64_option(opt: Option<String>, default: f64) -> f64 { opt.as_ref().map(|s| f64::from_str(&s)).unwrap_or(Ok(default)).unwrap() } fn u32_hex_option(opt: Option<String>, default: u32) -> u32 { opt.as_ref().map(|s| u32::from_str_radix(&s, 16)).unwrap_or(Ok(default)).unwrap() } let tick_rate = f64_option(matches.opt_str("tick-rate"), 10_000.0); let tick_dur = { let tick_s = 1. / tick_rate; let seconds = tick_s as u64; let ns = ((tick_s % 1.) * 1_000_000_000.) as u32; Duration::new(seconds, ns) }; let load_address = u32_hex_option(matches.opt_str("load-address"), 0x11000); assert!(load_address >= 0x10000); let stack_pointer = u32_hex_option(matches.opt_str("stack-pointer"), 0x10e00); let ram_size = u32_hex_option(matches.opt_str("ram-size"), 0x2000); let mut ram = Ram::new(ram_size); { let mut input = File::open(bin_path).unwrap(); let mut ptr = load_address as usize; let mut buffer = [0; 4]; while input.read_exact(&mut buffer).is_ok() { ram.words_mut()[ptr - 0x10000] = LittleEndian::read_u32(&buffer); ptr += 1; } } let mut default_state = State::default(); default_state.sp = stack_pointer; default_state.ip = load_address; let machine = Machine::new(default_state); let stdio_console = StdioConsole::new(); let mut pool = EventPool::new(); let machine_id = pool.add_hardware(machine); let ram_id = pool.add_hardware(ram); let console_id = pool.add_hardware(stdio_console); pool.connect(machine_id, ram_id); pool.connect(machine_id, console_id); let configs = vec![ DeviceConfig { id: console_id, model: DeviceModel::DebugConsole.number(), interrupt: 0xffff_0001, memmap_base: 0x8c00, memmap_size: DeviceModel::DebugConsole.memory_size().unwrap() }, DeviceConfig { id: ram_id, model: DeviceModel::Ram.number(), interrupt: 0xffff_0002, memmap_base: 0x10000, memmap_size: ram_size }, ]; pool.dispatch().send(HardwareMessage::InitializeMachine(machine_id, configs)); pool.tick_real_clock(tick_dur); }
// Get removes an exact number of items from the channel and blocks until that // number of items are available. If the channel is closed then an error is // returned. func Get(queue <-chan byte, number int) ([]byte, error) { var result []byte ReadLoop: for { select { case i, ok := <-queue: if ok { result = append(result, i) if len(result) == number { break ReadLoop } } else { return nil, errors.New("queue channel is closed") } } } return result, nil }
// GetNSPath inspects a container by its name/id and returns an netns path using the pid of a container func (c *DockerRuntime) GetNSPath(ctx context.Context, containerId string) (string, error) { nctx, cancelFn := context.WithTimeout(ctx, c.config.Timeout) defer cancelFn() cJSON, err := c.Client.ContainerInspect(nctx, containerId) if err != nil { return "", err } return "/proc/" + strconv.Itoa(cJSON.State.Pid) + "/ns/net", nil }
. The paper deals with the influence of glutaminic acid on the functional activity of the sympatho-adrenal system and the concentration of calcium in urine under conditions of the alcoholic abstinence syndrome development. Changes in the functional activity of sympatho-adrenal system and in the concentration of calcium in the process of the abstinence syndrome development are shown to be of the phase character. It is established that in the period of the developed abstinence syndrome glutaminic acid produces a normalizing action on the excretion of adrenaline and dopamine and also facilitates a decrease in the level of calcium in urine.
/** * Created by MrFu on 15/3/21. */ public class Trace { private static String TAG = "MrFu"; public static void debug(String content){ if(!TextUtils.isEmpty(content)) Log.i(TAG, content); } public static void debug(int content) { Log.i(TAG, "" + content); } public static void debug(long content) { Log.i(TAG, "" + content); } }
<gh_stars>1-10 from collections import defaultdict import random def fix_word(word): n = "" for l in range(len(word)): if str(l).isalnum() or l == '!' or l == "." or l == '?' or l == "," or l == "'": n += word[l] return(n) def create_markov_chain(text): words = text.split() markov_dict = defaultdict(list) for current_word, next_word in zip(words[0:-1], words[1:]): current_word = fix_word(current_word) next_word = fix_word(next_word) markov_dict[current_word].append(next_word) markov_dict = dict(markov_dict) return markov_dict def update_chain(data, chain, query): for item in data: a = create_markov_chain(item[query]) for x, y in a.items(): if x in chain.keys(): chain[x] += y else: chain[x] = y return chain def gen_sentence(chain, words): correct_start = False while correct_start == False: word1 = random.choice(list(chain.keys())) if word1[0].isupper(): correct_start = True word2 = "" sentence = word1 count = 1 while count < words: count += 1 word2 = random.choice(chain[word1]) if not word2 in chain.keys(): count = words sentence += " " + word2 word1 = word2 return sentence
<gh_stars>0 package testcomponent.heyongrui.com.base.injection.component; import dagger.Component; import testcomponent.heyongrui.com.base.base.BaseActivity; import testcomponent.heyongrui.com.base.injection.annotation.PerActivity; import testcomponent.heyongrui.com.base.injection.module.ActivityModule; /** * This component inject dependencies to all Activities across the application */ @PerActivity @Component(dependencies = BaseAppComponent.class, modules = ActivityModule.class) public interface ActivityComponent { void inject(BaseActivity baseActivity); }
import React, { useState } from 'react'; import { Formik, Form } from 'formik'; import { Wrapper } from '../components/Wrapper'; import withApollo from '../utils/apollo/withApollo'; import { InputField, Button } from '../components/htmlElements/'; import { useForgotPasswordMutation } from '../generated/graphql'; import { FormHeader } from '../components/utils/'; const ForgotPassword: React.FC = ({}) => { const [complete, setComplete] = useState(false); const [forgotPassword] = useForgotPasswordMutation(); return ( <Wrapper navbar> <FormHeader text="Reset your Password"/> <div className='mt-8 sm:mx-auto sm:w-full sm:max-w-md'> <div className='bg-white dark:bg-dark py-8 px-4 shadow sm:rounded-lg sm:px-10'> <Formik initialValues={{ email: '', }} onSubmit={async ({ email }, { setErrors }) => { await forgotPassword({ variables: { email } }); setComplete(true); }} > {({ values, handleChange, isSubmitting }) => complete ? ( <div> if an account with that email exists, we sent you can email </div> ) : ( <Form className='space-y-6'> <InputField name='email' placeholder='email' type='email' label='Email' /> <Button loading={isSubmitting} text='login' type='submit' /> </Form> ) } </Formik> </div> </div> </Wrapper> ); }; export default withApollo({ ssr: false })(ForgotPassword);
Iteration strategies for successful positioning of innovative products into new markets We have studied the experience and practice of entrepreneurs and their respective firms in Portugal to position innovative products into the United States (US) and other markets. Employing a case study approach, we examined the manner dialogue with market stakeholders impacted value propositions in the form of formal pitch documents and oral presentations. These studies revealed an initial tendency of firms to pitch their innovation through generalized claims to non-specific audiences in the marketplace. Following interaction with stakeholders in the new market, the value propositions were iterated to become highly specific and sometimes quantitative. Specifically, value propositions of successful entrepreneurs were changed along four dimensions: (1) specifying the business argument to particular customers; (2) refocusing the technology use case; (3) adapting technology or product design; and (4) providing financial incentive for partners, or a combination of these. We further saw an apparent requirement for innovators to participate in this iteration process if they wish to successfully reach a new market. Ultimately, the presentation of new, innovative products does not resemble selling a finished product as supposed in a value-as-exchange offering, but requires active interplay and co-creation with business stakeholders in the new market to transform the innovation into a fitted value-in-use solution.
<reponame>MilosKatic/Angular-Graphics-Card-Shop import { Component, OnInit } from '@angular/core'; import { CardService } from './services/card.service'; import { CardList } from './model/card-list'; @Component({ selector: 'app-graphic-cards', templateUrl: './graphic-cards.component.html', styleUrls: ['./graphic-cards.component.css'] }) export class GraphicCardsComponent implements OnInit { cards: CardList; sizes: number[] = [5, 10, 20]; params = { "page": 1, "pageSize": 5 } constructor(private cardService: CardService) { } ngOnInit() { this.updateParams(); } updateParams(params?: any) { if(params) { this.params.pageSize = params.pageSize || this.params.pageSize; this.params.page = params.page || this.params.page; } this.cardService.getCards(this.params).subscribe(res => this.cards = res); } }
<gh_stars>1000+ import numpy as np import torchvision.datasets as datasets from torch.utils.data import DataLoader, SubsetRandomSampler class mnist_data(object): def __init__( self, shuffle, transform_train, transform_test, num_workers=0, create_validation_set=True, batch_size=128, validation_size=0.2, random_seed=1, ): self.shuffle = shuffle self.validation_size = validation_size self.transform_train = transform_train self.transform_test = transform_test self.random_seed = random_seed self.create_validation_set = create_validation_set self.batch_size = batch_size self.num_workers = num_workers def download_data(self): mnist_trainset = datasets.MNIST( root="./data", train=True, download=True, transform=self.transform_train ) mnist_testset = datasets.MNIST( root="./data", train=False, download=True, transform=self.transform_test ) return mnist_trainset, mnist_testset def create_validationset(self, mnist_trainset): num_train = len(mnist_trainset) indices = list(range(num_train)) split = int(self.validation_size * num_train) if self.shuffle: np.random.seed(self.random_seed) np.random.shuffle(indices) train_idx, valid_idx = indices[split:], indices[:split] train_sampler = SubsetRandomSampler(train_idx) validation_sampler = SubsetRandomSampler(valid_idx) loader_train = DataLoader( dataset=mnist_trainset, batch_size=self.batch_size, sampler=train_sampler, num_workers=self.num_workers, ) loader_validation = DataLoader( dataset=mnist_trainset, batch_size=self.batch_size, sampler=validation_sampler, num_workers=self.num_workers, ) return loader_train, loader_validation def main(self): mnist_trainset, mnist_testset = self.download_data() if self.create_validation_set: loader_train, loader_validation = self.create_validationset(mnist_trainset) loader_test = DataLoader( dataset=mnist_testset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, ) return loader_train, loader_validation, loader_test else: loader_train = DataLoader( dataset=mnist_trainset, batch_size=self.batch_size, shuffle=self.shuffle, num_workers=self.num_workers, ) loader_test = DataLoader( dataset=mnist_testset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, ) return loader_train, loader_test
<filename>array1/sum3_test.go package array1 import "testing" func TestSum3(t *testing.T) { expected := 6 actual := sum3([]int{1, 2, 3}) if expected != actual { t.Fatalf("expected %d but actual is %d", expected, actual) } }
High temperature fretting tribometer – study of the dynamic behaviour and tangential load measurement – Experimental approaches to study fretting are heavily dependent on the performances of the test devices, especially considering the extensive use of quantitative parameters extracted from fretting loops. The dynamic response of a newly designed high temperature test bench is analysed to define the practical test range and identify dynamic disturbance sources. Displacement and load metrology are studied, leading to a direct comparison of two tangential load measurement methods. Accounting for the inertia loads and the elastic properties of the guiding system allows to increase the frequency operating range of the test bench. Introduction The industrial frame for the design of this new high temperature fretting test device is aeronautical components subject to a vibratory environment. Aircraft components are severely strained because of the air flow around the fuselage and engine vibrations during normal flight. Take-off and landing phases also cause high amplitude shocks. These modern mechanical structures are submitted to ever harsher loading conditions, both static and dynamic, while also requiring longer lifespan, and lighter weight. These constraints activate different damage mechanisms on contacting surfaces submitted to relative motion such as bolted joints. Weight and static stiffness requirement are well satisfied using predictive numerical design methods, however, the dynamical behaviour is still largely unpredictable without building prototypes. This is due to the challenge of modelling the complex phenomena occurring at the contacting interfaces, which depend on many parameters: materials, machining process, normal load at the interface, working temperature; the interface response can change with the accumulation of damage. Fretting is a predominant source of damage in mechanical assemblies. It combines wear and fatigue at the contacting surface, and can lead to material loss as well as crack nucleation and propagation . a Corresponding author: [email protected] Scientific fretting studies can be classified in three different categories . Fundamental research tests (laboratory studies of elementary systems and geometries) are dedicated to understand the basic mechanisms of the tribological phenomena, and to develop and calibrate analytical and numerical models . The second level is simulation tests, where contacting surfaces are kept elementary, but in which test conditions are chosen to be representative of real life operating conditions of a mechanical system. Finally we find validation tests for which part of or the whole mechanical system is rebuilt and tested. The two latter kinds of tests often yield application dependant results. Test rig dependence of the results is a major issue for dissemination and standardisation of test results among different laboratories. Their kinematic and dynamic behaviour and their influence on the contacting surfaces also need to be extensively understood in order to develop accurate theoretical models. Since the end of the 90s, fretting damage assessment became more and more tied to energy based approach , especially when wear mechanisms are involved. The tangential contact stiffness is also a key parameter for finite elements modelling of mechanical assemblies. Accurate fretting loops (tangential load versus relative displacement) are therefore required to obtain relevant results. Friction is generally supposed to be independent from the test frequency for metallic materials, so many studies focus on low frequency range test conditions Article published by EDP Sciences (under 1 Hz or up to a few Hz) to reduce the disturbances caused by the dynamic response of the test bench. Specifically designed test rigs can reach higher values (up to a few hundred Hz). Li et al. introduced the concept of deliberately reducing the stiffness of a part of the test bench to ensure a better metrology, while retaining high fundamental frequency of the mechanical assembly (160 Hz). These stiffness goals imposed the design of heavy parts, thus amplifying the inertia effect, which becomes significant above 40 Hz. Matlik et al. designed a high temperature fretting rig with bulk fatigue loading using piezo-electric stacks, and successfully reached operating tests frequencies above 600 Hz. Although it is widely accepted that rigidity and inertia of the test rigs have an effect on the accuracy of the results obtained, not many studies have been published about these issues. Each research laboratory has established and perfected its own set of design methods and know-how. This paper aims to go further in the understanding of the influence of the fretting test benches behaviour on tests results and the ensuing operating range limitations. We will present a way to expand the available frequency range by comparing two classical tangential load sensors position. The robustness of the load measurement method and effect of high temperature (600 • C) will also be analysed. Test setup The new fretting tribometer (Vibro Thermo Tribometer -VTTM) is designed as an ambient and high temperature evolution of a cryotechnical environment fretting test rig (Vibro Cryo TriboMeter -VCTM) already in use at LISMMA laboratory . It is based on the same principle, but optimized in order to increase the frequency, displacement amplitude and normal load operating range. The VCTM was designed for low amplitude (≤20 μm) high contact pressure fretting fatigue tests, on ball on flat configurations. The VTTM is designed to study flat on flat contact configurations in gross sliding fretting conditions, focusing more on energy dissipation. The description of a few key technical solutions chosen for this bench will be accompanied by short reviews of the other kinds of solutions found in literature. General principle The tribometer is designed to apply two independent loadings: a static normal load and a cyclic tangential one. The normal load is applied via calibrated dead weights (from 10 N up to 400 N) or a traction cable (from 150 N up to 1000 N), and measured thanks to a strain gauge load cell. The tangential load is exerted by an electromagnetic actuator that can apply 500 N on a 1-5000 Hz frequency range and monitored by two piezoelectric load cells. Displacement amplitude with a span from 1 μm up to 1 mm is measured via a specific high temperature contact-less Eddy current sensor. Tests can be carried out in ambient temperature and up to 600 • C using a hot air blower with extra isolation parts around the specimen. Instead of cutting the heat flow by inserting isolation parts in the assembly, the mechanical parts exposed to high temperatures were machined from an Inconel 718 alloy in order to act as heat sinks in conjunction with forced air cooling. Displacement measurement Accurately measuring the relative displacement between the test specimen is one of the most important feature of a fretting tribometer. The range of relative displacements involved between the contacting surfaces can reach a few micrometers, and will rarely exceed a few hundred micrometers for most fretting industrial cases. Higher displacement amplitude would tend to be classified as reciprocal sliding study. Different technologies can be found in the literature. They can be first separated between contact and contactless methods, the latter being preferable to limit the perturbations added by the extra contact point. Contact measurement methods generally involve LVDT (Linear Variable Differential Transformer) sensors . Contactless methods regroup Eddy current sensors and optical methods: digital image correlation , LDV (Laser Doppler Vibrometers) and photonic optic fibres sensors . Eddy current sensors require a specific target with size constraints, and compatible material requirements. LVD uses Doppler effect to measure target velocity, and photonic optic fibres sensors are based on the variation of the light intensity reflected by the surfaces which can be related to the distance to the transducer. Another distinction can be made between direct and relative measurement methods. Kartal et al. highlight that no matter how stiff the base and specimen holding assembly is designed, there will be some unwanted motion of the fixed specimen. The most accurate way to get the specimen motion is to use a relative measurement method. One way to proceed is to use two sensors , one aimed at the fixed specimen, the other at the moving one. A proper design can ensure that the mechanical chain between a unique sensor and its target is stiff enough to be considered as a rigid body. The last key issue regarding displacement measurement is the proximity to the contact interface. The reference point and the target of the displacement sensor have to be as close as possible to the contacting surfaces of the specimen to reduce the disturbance of the test bench mechanical parts. Optical methods require heavy postprocessing, but they are most suited to satisfy this design constraint as long as a direct access to the specimen can be guaranteed. Specific environment application and the associated issues(insulation and heating system, immer-sion of the specimen) add a considerable technical and design challenge. A detailed mechanical analysis of the measured displacement and repartition of the loads on our test bench is presented in Figure 2. If we set aside the inertia effects, the relations between the different forces can be expressed according to the following equation: The measured relative displacement δ mes has two error components: δ error1 between the surface of the flat target and the contacting surface of the track, and δ error2 which is the displacement error between the active surface of the Eddy current sensor, and the contacting surface of the pin. The load repartition analysis indicates that no significant deformation of the loading arm can occur, therefore δ error2 is mainly caused by pin bending and rotation as illustrated in the close up. However, δ error1 is the combination of the track bending, its rotation and the elastic deformation of the track support under the action of F el2 . Depending on the test setup and conditions, this contribution can be noticeable, even though it remains a low influence parameter. The so-called "bench stiffness" is thus essentially due to bulk specimen related elastic behaviour, and their locking mechanism. Filippi had come to a similar conclusion when he identified specimen rotation (parallax error) as a predominant source of experimental error. It depends on their shape and material, but also on the load applied by the locking screws. It would be advised to tighten the specimen with a torque wrench to improve the repeatability of the tests. Tangential load measurement The other key parameter monitored for fretting tests is the tangential load transmitted through the surfaces in contact. For global sliding mode, this load gives access to the friction coefficient. Fretting studies can be undertaken in a wide range of frequencies (rarely lower than 1 Hz up to a few hundred), but always involve dynamic phenomena. The typical load measurement method is a uni-directional piezoelectric load cell. 3D load cells can also be found in the literature . Test-benches are often built around similar principles which are either dictated by practical design limitations, specific environment constraints, or based on previous scientific studies. Normalization process for fretting studies is still underway, and is more advanced for fretting-fatigue tests. For fretting-wear tests some essential design guidelines have already been established. As far as the tangential load is concerned, inertial loads resulting from the dynamic motion of the mechanical parts disturb the measurement of the actual tangential load at the contact interface. It is therefore advised to design the bench so that the measurement of the transmitted load occurs on the static specimen. The test bench presented in this study combines two piezo-electric load cells: one placed between the electromagnetic actuator and the moving mechanical assembly, and the other one between the static assembly and the base. Both sensors measure the tangential contact load. This dual setup enables a comparative study of the two different design choices and their limitations. Frequency range limitations in global slip regime An experimental determination of the dynamic response of the test rig is necessary to validate the global design . It can also be used as a reference to assess the ageing of these components or to detect structural damage . All the individual parts of the test bench were designed so that their first resonance mode is higher than the expected test frequency range. The lowest frequency, that of the base support, is estimated at about 300 Hz. However no mechanical assembly can insure perfect bonds between the different parts so the behaviour of a full structure is more complex. The observation of the fretting loops obtained under different test conditions is an empirical way of checking the practical test range of the device. The accuracy of the loops can be evaluated based on the friction coefficient, the dissipated energy (area within the curve) and the tangential contact stiffness. The industrial frame required to compare the potential damping that different coatings and surface treatments provide, in fretting-wear and frettingfatigue conditions, without bulk fatigue loading. These tests were first started on an already existing frettingfatigue test bench, the VCTM and have been described in a conference paper . Fretting loops in Figure 3 illustrate the observed disturbance (thin blue line) of the tangential load signal, which appeared for global sliding regime only. Frequency sensitivity tests have been carried out: the higher the frequency, the more intense the disturbance grows. At 80 Hz, the amplitude of the disturbance is even greater than the friction force (Fig. 3d). Lowering the test frequency at 20 Hz revealed a dampened behaviour (Fig. 3a). The frequency of the sinusoidal signal was estimated at about 400 Hz, and remains constant even when the test frequency varies. The new test bench presented in this article was designed in order to reduce this disturbance. The results obtained on the VTTM in similar test conditions (red dashed lines) show a noticeable improvement. However, the phenomenon is still present at frequencies higher than 35 Hz, and the disturbance shows a more complex behaviour with multiple harmonics. Our analysis of the synchronized load, displacement and acceleration signals showed that the disturbance is initiated at the beginning of the reverse motion. Multiple characterization tests were carried out in order to identify potential sources: the influence of the guiding blades stiffness, the mass of the mobile assembly, the loading arm's dual pivot bearings tightening, the base support bracket. Ramalho et al. had identified a resonance mode of the loading arm which caused a similar phenomenon. Our test rig analysis seems to point towards incriminating the first mode of the base support as the main cause of disturbance. Even massive base supports that could be considered as rigid bodies can influence the bench behaviour which is another design constraint that has to be considered. From these observations, we can establish a conservative empirical design rule for fretting wear tests: the first resonance mode of each individual part (save for the guiding system) should be at least 10 times the desired maximum test frequency in global sliding regime. Figure 4 displays a schematic view of the test setup and the forces involved. The dual sensor setup allows a direct comparison of two tangential load F t measurement methods. We calculate F t1 using sensor (1) (signal F mes1 ) which is inserted between the oscillating part holding the track and the electromagnetic actuator. F t2 results from sensor (2) (signal F mes1 ) which is fixed to the base and holding the pin-holding arm in place with a preloaded ball joint according to the most common design. Mechanical analysis The reference method uses load sensor 2 signal to track the transmitted load through the contact. The position of the sensor ensures a direct measurement of the transmitted load. The acceleration levels measured close to the loading arm's (7) center of gravity lead us to neglect these inertia effects for this study. The dual pivot guiding mechanism also opposes a negligible resistance compared to the measured tangential contact forces. The design of the different parts ensured that no significant disturbing load pollutes the signal (resulting torque or extra friction forces). The new method proposed measures the load exerted by the actuator F shaker . The signal acquired by the sensor F mes1 is not equal to the tangential load F t1 at the contact interface but also includes the elastic reaction F el1 and F el2 of the guiding blades (6) and the inertial forces of the moving assembly (8) F i = −γ m mobile . Instead of limiting the operating range so that they can be neglected, we implemented a correction protocol accunting for these contributions. Writing the fundamental principle of the dynamics on the system {track + support} results in the following equations: with the following expressions for the elastic forces: The stiffness of the blade guided oscillating assembly might depend on the test frequency f and the applied normal load N . A contact-less frequency characterisation of the behaviour of this assembly was done for two different blades setups. The results are detailed in the next paragraph. The displacement δ of the mobile assembly is not exactly equivalent to the relative displacement δ r measured by the test setup (Fig. 2). The loading arm was designed so as to have a rigid body behaviour in the expected range of test conditions. However it might undergo some elastic deformations δ arm under the tangential loading transmitted through the contact. The real blades displacement is expressed as δ blades = δ r + δ arm . Tests under severe conditions were carried out and resulted in negligible δ arm values. We will not add this correction term for the rest of this study. The elastic contributions of the two blades will also be regrouped. The acceleration is measured thanks to a piezoelectric accelerometer positioned as shown in Figure 4. Other accelerometers were positioned in different places during the validation process of the bench; they showed no relevant cross direction acceleration. The mass m mobile of the moving part can be estimated from the electromagnetic actuator data sheets and the weighing of the different components of the assembly. It was estimated at 2.67 kg with less than 5% error. However, the dynamic contribution of the mass may be different from its static value. Mobile assembly characterization In order to correct the F mes1 signal, K blades and m mobile have to be determined. The mobile assembly is a mass spring system, with theoretically no damping when the contact is unloaded. However, it is composed of multiple parts assembled together, causing some energy dissipation at the junctions on top of the internal damping of the bulk materials. It is known that structural damping of bulk metallic materials is around 0.01% so we will neglect these damping sources at first. The previous equation can be written this way: Performing a Fourier transform gives: with ω 0 = K blades /m mobile the fundamental resonance frequency. Two blade setups (a stiff one and a compliant one) were designed according to Henein's work and tested. Figure 5 presents the FFT (Fast Fourrier Transformation) of the signal for the two setups at ambient temperature (blue continuous line) and high temperature (600 • C) for the rigid blade (green continuous line). The FFT of a theoretical perfect mass spring system is also plotted, using the K blades and m mobile identified on the experimental curves. The blade stiffness and mobile mass values are reported in Table 1. The close up on the resonance peak shows that the system has a very limited damping. For the prismatic blades, the results are less accurate because of the lower input voltage, and lower amplitude of the measured tangential force. The inertial mass contributions are inferior to the static mass of the mobile assembly. The design of the bench ensures that even high temperature tests do not significantly alter the behaviour of the mobile assembly. Materials and specimens The following experimental fretting loops were extracted from various test campaigns. Ball-on-plane and plane-on-plane contact configurations involved different materials (Inconel 718, Ti6Al4V, stainless steel), surface treatments (DLCs, WS2 ionic implantation) and test conditions (normal load, frequency, amplitude and temperature). Fretting loops analysis Fretting loops are the reference tool for qualitative fretting studies. The loops using F t1 , F t2 and F mes1 were plotted in Figure 6 for global sliding test conditions corresponding to those in Figure 3. The contribution of the inertial forces was also singled out for F t1 . A key result from these tests is the shape of the F t1 fretting loops. F t2 reference loops were noticeably disturbed by the vibration issues identified earlier at frequencies superior to 35 Hz. The sensor measuring F mes1 is aligned on a vibration node of the base structure resulting in cleaner loops. With a simple and robust correction methodology, using this signal increases the available range of frequency for global sliding fretting tests to at least 80 Hz. Further testing would be required for a more precise characterization of the practical frequency range. Blades stiffness The dynamic behaviour of the mobile assembly may be modified when the contacting surface of the specimen is loaded. In order to study its sensitivity to frequency, normal load and specimen configuration, another stiffness identification protocol was tested. Figure 6a illustrates the measurement of K 2 blade stiffness value, using the stable sliding phase as a reference. This part of the fretting cycle is supposed to be horizontal provided surface damage is limited. K 2 is calculated by a mean square regression. These tests aim to study the influence of the frequency and the normal load on the behaviour of the mobile assembly. A stable global sliding fretting regime should be 408-page 7 achieved for these tests in order to measure the K 2 stiffness introduced earlier. Wear has to be limited, and the sliding phase as flat as possible. Since uncoated contacts are tested, we can expect a severe damaging tribological behaviour. Correct conditions are achieved by applying a low normal load (10 N), and adding a drop of lubricant at the contact interface to improve friction. As a consequence, the friction force generated by these tests conditions was low, approaching the lower range of the bench capabilities. We repeated the process using uncoated titanium on steel specimen in a plane-on-plane contact configuration, with a normal load increased to 150 N in order to obtain higher friction force values. The K 2 values for previous sliding tests (80 N load) were also calculated. All results obtained are displayed in Table 2. Standard deviation of the measured values is lower than 5 N.mm −1 . We could not achieve a global sliding regime at 80 Hz for the 150 N load because of the cumulative damage of the specimen. Even with the drop of lubricant, the high normal load caused severe wear of the contacting surfaces. The frequency and the normal load's influence on the elastic behaviour of the mobile assembly is low in the range of our test conditions within a 5% er- Sensitivity to the identified parameters We ran a sensitivity study of the shape of the fretting loops to K blades and m mobile values. The dissipated energy, the friction coefficient and the tangential stiffness values were chosen as a quantitative reference. Figure 7 illustrates the resulting loops for F t1 (green) and F t2 (red) at 30 and 50 Hz. F t1 signal was also plotted for a 100% variation of either K blades or m mobile . K blades variations do not affect the inside area of the loops (dissipated energy E d ) or the mean value of the friction coefficient. It may however impact the standard deviation of the latest. m mobile variations do not significantly alter E d value either: a 100% uncertainty only causes under 2% spread. Uncertainty on both parameters do not affect the tangential stiffness calculated on the elastic unloading phase of the loops. At 30 Hz the difference between E d1 (F t1 loop) and E d2 (F t2 loop) is less than 5%. However at 50 Hz, it can reach up to 20%. Test conditions The correction method was applied on the F mes1 signal acquired on previous test campaigns covering a wide range of test conditions. Figure 8 compares the fretting loops obtained for both measurement methods. At low frequency (10 Hz) and high displacement amplitude (500 μm) the guiding blades stiffness is the main contribution (Fig. 8a), and both loops are nearly identical. As the frequency increases (30 Hz) but remains in the acceptable operating range for the reference signal (F t2 ), the new measurement method improves noticeably the shape of the curve, eliminating small amplitude oscillations during the sliding phase (Fig. 8b). Above the frequency limit (50 Hz), the dynamic disturbance is negligible in partial slip regime (Fig. 8c). However in global sliding regime (Fig. 8d), we find the expected heavy distortion of the F t2 loops, which is reduced on the corrected F t1 loop. We can identify a different behaviour depending on the direction of the motion. Conclusion This work focused on a better understanding of the influence of the test bench behaviour on fretting tests results. -The dynamic response of test rigs is a key issue for fretting studies, which involve very small amplitude alternative motion. The different parts as well as the global assembly have to be designed in order to rise the resonance frequencies at least an order of magnitude above the expected operating range. Depending on the design choices, even massive base supports can cause unwanted disturbance. -To be able to extract accurate and inherent material response results, the usual design approach limits the operating range so that the different influence parameters are negligible compared to the contact behaviour. Unfortunately, financial limitations or technical challenge can make it hard to reach high test frequencies (very useful for fatigue analysis) for global sliding regime studies, especially when high normal loads are expected. -A dual load sensor setup enabled a direct comparison between two positions: on the fixed arm (reference position) or on the mobile assembly. For the latter, iner-tial forces and elastic behaviour of the guiding system have been taken into account. -A simple and robust unloaded characterization protocol for the mass-spring behaviour of the mobile assembly was implemented. The fretting loops plotted using this measurement method remain accurate for higher operating frequencies (up to at least 80 Hz when the standard setup limit is 30 Hz) and lower normal loads, greatly extending the capabilities of the device. -The newly designed test rig with the proposed load measurement method allows fretting fatigue tests (in sticking or partial slip regime) up to a few hundred Hz, and fretting-wear type tests up to at least 100 Hz. The upper frequency limit is still being studied. These results show that different design approaches may be undertaken for fretting test benches. The loading arm usually combines a normal loading and tangential load measurement functions. Eliminating the latter function for this part would eliminate the need for one degree of 408-page 10 freedom and help achieve wider operating range. Combined with normalized test rigs characterization protocols, it would be a step to deliver more absolute contact behaviour results, and facilitate comparative studies between different laboratories and test equipments.
<reponame>Tengda-He/trace-analytics /* * SPDX-License-Identifier: Apache-2.0 * * The OpenSearch Contributors require contributions made to * this file be licensed under the Apache-2.0 license or a * compatible open source license. * * Modifications Copyright OpenSearch Contributors. See * GitHub history for details. */ import { EuiButton, EuiButtonEmpty, EuiButtonIcon, EuiCallOut, EuiDatePicker, EuiDatePickerRange, EuiFlexGroup, EuiFlexItem, EuiFlyoutBody, EuiFlyoutFooter, EuiFlyoutHeader, EuiFormRow, EuiIcon, EuiLoadingChart, EuiSelect, EuiSelectOption, EuiSpacer, EuiText, EuiTitle, ShortDate, } from '@elastic/eui'; import _ from 'lodash'; import { UI_DATE_FORMAT } from '../../../../../common/constants/shared'; import React, { useEffect, useState } from 'react'; import { FlyoutContainers } from '../../helpers/flyout_containers'; import { displayVisualization, getQueryResponse, isDateValid } from '../../helpers/utils'; import { convertDateTime } from '../../helpers/utils'; import PPLService from '../../../../services/requests/ppl'; import { CoreStart } from '../../../../../../../src/core/public'; import { CUSTOM_PANELS_API_PREFIX } from '../../../../../common/constants/custom_panels'; import { pplResponse, SavedVisualizationType, VisualizationType, } from '../../../../../common/types/custom_panels'; import './visualization_flyout.scss'; /* * VisaulizationFlyout - This module create a flyout to add visualization * * Props taken in as params are: * panelId: panel Id of current operational panel * closeFlyout: function to close the flyout * start: start time in date filter * end: end time in date filter * setToast: function to set toast in the panel * http: http core service * pplService: ppl requestor service * setPanelVisualizations: function set the visualization list in panel * isFlyoutReplacement: boolean to see if the flyout is trigger for add or replace visualization * replaceVisualizationId: string id of the visualization to be replaced */ type Props = { panelId: string; pplFilterValue: string; closeFlyout: () => void; start: ShortDate; end: ShortDate; setToast: ( title: string, color?: string, text?: React.ReactChild | undefined, side?: string | undefined ) => void; http: CoreStart['http']; pplService: PPLService; setPanelVisualizations: React.Dispatch<React.SetStateAction<VisualizationType[]>>; isFlyoutReplacement?: boolean | undefined; replaceVisualizationId?: string | undefined; }; export const VisaulizationFlyout = ({ panelId, pplFilterValue, closeFlyout, start, end, setToast, http, pplService, setPanelVisualizations, isFlyoutReplacement, replaceVisualizationId, }: Props) => { const [newVisualizationTitle, setNewVisualizationTitle] = useState(''); const [newVisualizationType, setNewVisualizationType] = useState(''); const [newVisualizationTimeField, setNewVisualizationTimeField] = useState(''); const [pplQuery, setPPLQuery] = useState(''); const [previewData, setPreviewData] = useState<pplResponse>({} as pplResponse); const [previewArea, setPreviewArea] = useState(<></>); const [showPreviewArea, setShowPreviewArea] = useState(false); const [previewIconType, setPreviewIconType] = useState('arrowRight'); const [previewLoading, setPreviewLoading] = useState(false); const [isPreviewError, setIsPreviewError] = useState(''); const [savedVisualizations, setSavedVisualizations] = useState<SavedVisualizationType[]>([]); const [visualizationOptions, setVisualizationOptions] = useState<EuiSelectOption[]>([]); const [selectValue, setSelectValue] = useState(''); // DateTimePicker States const startDate = convertDateTime(start, true, false); const endDate = convertDateTime(end, false, false); const onPreviewClick = () => { if (previewIconType == 'arrowRight') { setPreviewIconType('arrowUp'); setShowPreviewArea(true); } else { setPreviewIconType('arrowRight'); setShowPreviewArea(false); } }; const isInputValid = () => { if (!isDateValid(convertDateTime(start), convertDateTime(end, false), setToast, 'left')) { return false; } if (selectValue === '') { setToast('Please make a valid selection', 'danger', undefined, 'left'); return false; } return true; }; const addVisualization = () => { if (!isInputValid()) return; if (isFlyoutReplacement) { http .post(`${CUSTOM_PANELS_API_PREFIX}/visualizations/replace`, { body: JSON.stringify({ panelId: panelId, savedVisualizationId: selectValue, oldVisualizationId: replaceVisualizationId, }), }) .then(async (res) => { setPanelVisualizations(res.visualizations); setToast(`Visualization ${newVisualizationTitle} successfully added!`, 'success'); }) .catch((err) => { setToast(`Error in adding ${newVisualizationTitle} visualization to the panel`, 'danger'); console.error(err); }); } else { http .post(`${CUSTOM_PANELS_API_PREFIX}/visualizations`, { body: JSON.stringify({ panelId: panelId, savedVisualizationId: selectValue, }), }) .then(async (res) => { setPanelVisualizations(res.visualizations); setToast(`Visualization ${newVisualizationTitle} successfully added!`, 'success'); }) .catch((err) => { setToast(`Error in adding ${newVisualizationTitle} visualization to the panel`, 'danger'); console.error(err); }); } closeFlyout(); }; const onRefreshPreview = () => { if (!isInputValid()) return; getQueryResponse( pplService, pplQuery, newVisualizationType, start, end, setPreviewData, setPreviewLoading, setIsPreviewError, pplFilterValue, newVisualizationTimeField ); }; const timeRange = ( <EuiFormRow label="Panel Time Range"> <EuiDatePickerRange className="date-picker-height" readOnly startDateControl={ <EuiDatePicker selected={startDate} startDate={startDate} endDate={endDate} isInvalid={startDate > endDate} aria-label="Start date" dateFormat={uiSettingsService.get('dateFormat')} /> } endDateControl={ <EuiDatePicker selected={endDate} startDate={startDate} endDate={endDate} isInvalid={startDate > endDate} aria-label="End date" dateFormat={uiSettingsService.get('dateFormat')} /> } /> </EuiFormRow> ); const flyoutHeader = ( <EuiFlyoutHeader hasBorder> <EuiTitle size="m"> <h2 id="addVisualizationFlyout"> {isFlyoutReplacement ? 'Replace Visualization' : 'Select Existing Visualization'} </h2> </EuiTitle> </EuiFlyoutHeader> ); const onChangeSelection = (e: React.ChangeEvent<HTMLSelectElement>) => { setSelectValue(e.target.value); }; const emptySavedVisualizations = ( <EuiCallOut iconType="help"> <p>No saved visualizations found!</p> </EuiCallOut> ); const flyoutBody = savedVisualizations.length > 0 ? ( <EuiFlyoutBody> <> <EuiSpacer size="l" /> <EuiFormRow label="Visualization name"> <EuiSelect hasNoInitialSelection onChange={(e) => onChangeSelection(e)} options={visualizationOptions} /> </EuiFormRow> <EuiSpacer size="l" /> <EuiSpacer size="l" /> <EuiFlexGroup alignItems="center"> <EuiFlexItem grow={false}> <EuiButtonEmpty iconSide="left" onClick={onPreviewClick} iconType={previewIconType} size="s" isLoading={previewLoading} > Preview </EuiButtonEmpty> </EuiFlexItem> <EuiFlexItem grow={false}> <EuiButtonIcon aria-label="refreshPreview" iconType="refresh" onClick={onRefreshPreview} /> </EuiFlexItem> </EuiFlexGroup> <EuiSpacer size="m" /> {showPreviewArea && previewArea} <EuiSpacer size="m" /> </> </EuiFlyoutBody> ) : ( <EuiFlyoutBody banner={emptySavedVisualizations}> <> <div> You don't have any saved visualizations. Please use the "create new visualization" option in add visualization menu. </div> </> </EuiFlyoutBody> ); const flyoutFooter = ( <EuiFlyoutFooter> <EuiFlexGroup gutterSize="s" justifyContent="spaceBetween"> <EuiFlexItem grow={false}> <EuiButton onClick={closeFlyout}>Cancel</EuiButton> </EuiFlexItem> <EuiFlexItem grow={false}> <EuiButton onClick={addVisualization} fill> Add </EuiButton> </EuiFlexItem> </EuiFlexGroup> </EuiFlyoutFooter> ); // Fetch all saved visualizations const fetchSavedVisualizations = async () => { return http .get(`${CUSTOM_PANELS_API_PREFIX}/visualizations`) .then((res) => { if (res.visualizations.length > 0) { setSavedVisualizations(res.visualizations); setVisualizationOptions( res.visualizations.map((visualization: SavedVisualizationType) => { return { value: visualization.id, text: visualization.name }; }) ); } }) .catch((err) => { console.error('Issue in fetching the operational panels', err); }); }; useEffect(() => { const previewTemplate = ( <> {timeRange} <EuiFlexGroup> <EuiFlexItem> {previewLoading ? ( <EuiLoadingChart size="xl" mono className="visualization-loading-chart" /> ) : isPreviewError != '' ? ( <div className="visualization-error-div"> <EuiSpacer size="l" /> <EuiIcon type="alert" color="danger" size="l" /> <EuiSpacer size="l" /> <EuiText> <h2>Error in rendering the visualizaiton</h2> </EuiText> <EuiSpacer size="l" /> <EuiText> <p>{isPreviewError}</p> </EuiText> </div> ) : ( <div className="visualization-div"> {displayVisualization(previewData, newVisualizationType)} </div> )} </EuiFlexItem> </EuiFlexGroup> </> ); setPreviewArea(previewTemplate); }, [previewLoading]); // On change of selected visualization change options useEffect(() => { for (var i = 0; i < savedVisualizations.length; i++) { const visualization = savedVisualizations[i]; if (visualization.id === selectValue) { setPPLQuery(visualization.query); setNewVisualizationTitle(visualization.name); setNewVisualizationType(visualization.type); setNewVisualizationTimeField(visualization.timeField); break; } } }, [selectValue]); // load saved visualizations useEffect(() => { fetchSavedVisualizations(); }, []); return ( <FlyoutContainers closeFlyout={closeFlyout} flyoutHeader={flyoutHeader} flyoutBody={flyoutBody} flyoutFooter={flyoutFooter} ariaLabel="addVisualizationFlyout" /> ); };
Blackrock Depths Heroic Guides, The Pinnacle 4, Deck Talk: BRM Theorycraft Founder's Pack Last Chance - Goes Away April 7th, Matchmaking Discussion Azeroth Armory: Forging Gorehowl Era 3 Has Started (NA) The second Era of 2015 has begun on the NA servers, and the previous leaderboards have been archived. Bear in mind that rankings earned now in this Era might not last much with Patch 2.2 being deployed soon (hopefully this Tuesday) and the incoming changes. Also congratulations to the top players (listed below) who managed to rank among the top on Era 2! Era 2 Leaderboards Barbarian - Klankster Crusader - MrsBBQ Demon Hunter - Gabynator Monk - Gabynator Witch Doctor - FriendlyFade Wizard - Jaetch 2 Players - beardeer and VocaloidNyan 3 Players - Quirkitized, Cino and Cuba 4 Players - Quirkitized, Verdrix, Cino and Cuba Crusader Summary for 2.2 MannerCookie has put together a video summarizing some important changes coming to the Crusader class on Patch 2.2, along with his thoughts on what will be the top builds. Definitely worth checking out!
/* Synchronously wait on our semaphore for data to be published that matches our subscription's "glob", but also respond to out-of-band signals, typically indicating "end of data". This is deprecated now in favor of the lighter-weight Wait2, but will be retained for some time to be used possibly for diagnostics/testing/validation purposes. */ int RMsQueue::Wait(char* tag, size_t* tagN, void* data, size_t* dataN, int flags) { dumpQueue("<%d>::Wait(...%d)...\n", xp2pg(this), flags); if (state) return RMsStatusSignaled; semaphore.wait(); if (state) { dumpQueue("<%d>::Wait(...%d)...WAIT => SIGNALED\n", xp2pg(this), flags); return RMsStatusSignaled; } dumpQueue("<%d>::Wait(...%d)...back from WAIT\n", xp2pg(this), flags); td_pair_t td; if (read < NQuick) td = quickE[read]; else td = ((pq_pag_t*)pg2xp(pageE[qp2pq(read)]))->pqTD[qp2pi(read)]; dumpQueue("<%d>::Wait(...%d)...%x:%x\n", xp2pg(this), flags, td.tag, td.data); int status = flags; if (flags & RMsGetTag) { const auto n = rp2n(td.tag); if (tag != nullptr) { if (n <= *tagN - 1) memcpy(tag, rp2xp(td.tag), n), tag[n] = '\0'; else status ^= RMsGetTag; } else status |= RMsOnlyLength; *tagN = n; } if (flags & RMsGetData) { const auto n = rp2n(td.data); if (data != nullptr) { if (n <= *dataN) memcpy(data, rp2xp(td.data), n); else status ^= RMsGetData; } else status |= RMsOnlyLength; *dataN = n; } if (status == flags) { rmsRoot->FreePair(td), read++; if (read == write) { lock_guard<RSpinLock> acquire(spin); if (read == write) read = 0, write = 0; } } dumpQueue("<%d>::Wait('%s'...%d)...%d\n", xp2pg(this), tag, flags, status); return status; }
/** * Wrapped vertex of a triangular, cartesian 2D domain. * * @since 0.1.0 */ class TriangularCartesian2D_Vertex { TriangularCartesian2D_Vertex_t *vertex; public: TriangularCartesian2D_Vertex(TriangularCartesian2D_Vertex_t &initial_vertex); PointCartesian2D to_point(); TriangularCartesian2D_Vertex_t & get_vertex(); }
// create queue owner=client # success public void testCreateClientQueueSuccess() throws Exception { String queueOwner = "client"; _jmx.createQueue("test", QUEUE_NAME, queueOwner, true); }
Off-label drug use in human immunodeficiency virus disease. We wished to determine the extent to which drugs used to treat HIV disease and its clinical manifestations are prescribed for conditions other than those listed on the U.S. Food and Drug Administration's approved drug label, how such "off-label" use varies by patient characteristics and type of HIV-related medical condition, and the extent to which physicians alter the way they treat HIV-related conditions because of reimbursement problems associated with off-label drug use. We surveyed 1,530 primary care providers for people with HIV disease between February and May 1993. A three-part survey instrument was used to obtain data on the drugs prescribed for the last three patients with HIV disease treated by the provider, the preferred choice of therapy for 32 specific HIV-related conditions, and the extent to which providers faced reimbursement problems regarding the use of drugs for off-label indications. Three drug compendia were used as cited sources of off-label drug uses. In all, 387 (32%) evaluable surveys were returned, yielding data on 1,148 patients. The majority (81%) of patients received at least one drug off-label, and almost half (40%) of all reported drug therapy was off-label. Most off-label drug use was for treatment and prevention of HIV-related opportunistic infections, which frequently represented the community standard of practice (e.g., trimethoprim/sulfamethoxazole for prevention of Pneumocystis carinii pneumonia), or the de facto standard of practice when no licensed therapies were available (e.g., drugs for treatment of Mycobacterium avium complex, MAC). More than 75% of off-label usage was cited in at least one of the three authoritative medical compendia. The use of drugs for off-label indications in HIV care is common and frequently represents community standards of care. Reliance on drug compendia for support of off-label drug use accounts for the majority of such uses, although many legitimate off-label uses may not be included because of compendia publication lag. The prevalence of off-label drug use in routine clinical practice and the development of newer and more costly drugs for treatment of HIV and its medical complications argues for the articulation of an explicit national reimbursement policy for off-label uses of prescription drugs so that medically appropriate therapies will be available to those with insurance in a rational, consistent way.
(Above) The Fender 60th Anniversary Classic Player 50s Strat was the winner last year. Who will take this prestigious prize in 2015? There's been a slew of impressive new electric guitars released over the past 12 months, but now Guitarist magazine wants you to choose your favourite by placing your vote for the electric guitar of the year 2015. Read more: Fender Eric Johnson Thinline Stratocaster Run in association with the UK's Music Industries Associaton (MIA), this award is designed to celebrate the very best in performance, quality, value for money and retail success. Our shortlist of guitars was compiled by the Guitarist team and the MIA's members, and recognises the finest new products to have come to market since autumn 2014. Tell us who you think should win the Guitarist electric guitar of the year award 2015 by voting in the poll below.
Classification of Sonar Targets in Air: A Neural Network Approach Ultrasonic sonar sensors are commonly used for contactless distance measurements in application areas such as automotive and mobile robotics. They can also be exploited to identify and classify sound-reflecting objects (targets), which may then be used as landmarks for navigation. In the presented work, sonar targets of different geometric shapes and sizes are classified with custom-engineered features. Artificial neural networks (ANNs) with multiple hidden layers are applied as classifiers and different features are tested as well as compared. We concentrate on features that are related to target strength estimates derived from pulse-compressed echoes. In doing so, one is able to distinguish different target geometries with a high rate of success and to perform tests with ANNs regarding their capabilities for size discrimination of targets with the same geometric shape. A comparison of achievable classifier performance with wideband and narrowband chirp excitation signals was conducted as well. The research indicates that our engineered features and excitation signals are suitable for the target classification task. Introduction Air-based ultrasonic sonar sensors are often deployed for obstacle avoidance and navigation purposes in application areas such as automotive, factory automation as well as mobile ground and airborne robotics . In these application areas, ultrasonic sonar sensors show distinct advantages in comparison to other sensor technologies, which are based on other physical principles, such as LIDAR (Light Detection And Ranging), cameras and RADAR (Radio Detection And Ranging) sensors. Most importantly, they are not susceptible to obstacles' optical and electromagnetic properties. Therefore, they do not depend on obstacle color, lighting as well as transparency or material-related radar cross section. Another important aspect is that direct physical contact is not necessary. Consequently, ultrasonic sensors are especially well-suited for robot operation in low-visibility scenarios, e.g., in a dark room and outdoors at night, and for avoidance of transparent plastic or glass obstacles. Ultrasonic sensors may be included as primary sensors or as vital components in a complementary sensor fusion setup that comprises additional sensors. Such a setup would combine the different sensors' advantages, which may thus lead to improved system robustness. Often, ultrasonic sensors are only applied to determine the distance to closest obstacles due to their low angular resolution in comparison to other sensors, but by incorporation of distinct acoustic targets, we aim to facilitate landmark-based localization as well as mapping and make position predictions more precise . Incorporation of fixed landmarks is particularly beneficial for navigation in unstructured and changing environments. This may be the case for living assistance robots at home (ambient assisted living ), where the environment is constantly changed by people. Another use case can be indoor as well as outdoor robot farming applications, in which vegetation changes its shape due to plant growth and farming activities, such as cutting off branches and harvesting fruits . In order to realize landmark-based navigation, it is first necessary to perform target identification, on which this article puts its main emphasis. For this, geometrically different shaped as well as sized targets are deployed and classification is performed on these. We insonify the targets with narrowband as well as broadband chirp signals, emitted by an electrostatic speaker, record the echoes with a measurement microphone, classify the echo signals and compare the classification results. Broadband signals are used since their cross-correlation functions are narrow and they are in general better suited to deduce spectral object features than narrowband signals due to larger frequency ranges that are covered in a single echo. Likewise, echolocating bats are also known to emit broadband signals as soon as they have to resolve objects in front of vegetation . It was shown that they are able to classify different geometrical objects independent of their size as well as same geometrical objects of different sizes . Artificial neural networks (ANNs) are employed as classifiers. ANNs represent an interesting option as they are able to generalize from a limited amount of training data and are known to cope well with noisy data. Additionally, ANNs are also able to learn features from raw data that are not obvious to a human observer and which might thus remain unaccounted for in a merely rule-based classifier. We engineered features based on target strength (TS) and also incorporated preprocessed raw data input for our feature vectors, so that feature learning is possible as well . Notable research in this area was conducted by Ayrulu et al. who performed tests with ANNs for obstacle detection with single-frequency pulses from piezoelectric sensors. They focused on engineered features, which were based on echo time of flight as well as magnitude and showed possible merit of amplitude features . Dmitrieva et al. performed sonar target classification with chirp signals in water. They classified spherical targets that consist of different materials and demonstrated that ANNs perform best for their application in comparison to other machine learning-based classifiers, such as support vector machines . Eliakim et al. also used broadband chirps for robot navigation. They utilized generic features from audio processing for binary obstacle classification ("plant/no plant" ). In this contribution, we present specific recognition of multiple different targets in air with a comparison of narrowband as well as broadband chirps with ANNs as classifiers, based on specifically engineered TS features as ANN input. In the referenced literature, only one type of excitation signal is used in each article (single-frequency/narrowband or wideband ) and no comparison is attempted. In addition, generic features for speech recognition , alternative raw-data representations and engineered amplitude features are fed into ANNs but no directly calculated target strength estimate features, which are based on pulse-compressed echo signals in our case. First results from the authors are published in . The aim of our work is to engineer suitable features, perform target classification and compare feature quality based on classification results, so that suitable features for acoustic landmark identification can be selected as well as optimized. Materials and Methods Echo measurements were performed for different target positions and angles. During these, a signal was emitted by an ultrasonic speaker, reflected off a target, then received by a measurement microphone and digitized by an ADC interface card. Calculations for feature preprocessing and target classification were conducted on a desktop computer with Matlab R2018b. Measurement Setup and Procedure The measurement setup (as shown in Figure 1) consists of a two-axis translation stage, a rotation stage on top of the translation stage on which the targets were attached, a 1/4" Bruel&Kjaer measurement microphone (Type 4939-A-011) with an amplifier (G.R.A.S. 12AK), a wideband electrostatic ultrasonic speaker (Senscomp 7000 series, also formerly known as capacitive Polaroid transducers ) with a custom-built high voltage amplifier (0 V to 400 V, 0 kHz to 200 kHz, sine) and a National Instruments data acquisition device for analog IO (NI-USB-6356, 1.25 MS/s, 16 bit). The speaker is capable of sound emission in a frequency range of 20 kHz up to more than 150 kHz at sound pressure levels above 85 dB (with the standard reference value of 20 µPa for sound in air), which has been experimentally verified. As an alternative, transducers based on ferroelectric materials, such as PVDF (Polyvinylidene Fluoride) and EMFi (Electro Mechanical Film) , may be used as they are also suitable for wideband ultrasound emission, but must be custom-built. The custom-built amplifier is necessary as the ultrasonic speaker requires a bias voltage of 200 V and a maximum peak-peak voltage of 400 V. The microphone and the speaker are mounted closely together (20 mm center distance) at the end of the x-axis translation stage. All measurements were performed in an anechoic chamber, so there was no influence of other sound sources from outside the chamber. It has to be noted that the chamber walls are optimized for absorption of audible sound but strongly reflect ultrasound waves. As a consequence, our whole measurement setup had to be optimized especially so that there is no detectable direct echo from itself nor the walls that would interfere with the main target echoes. This includes that the targets, microphone and speaker are located 1 m above the floor. Moreover, all parts are placed with the largest distance possible to the closest walls (at least 1 m). Surfaces facing the setup are covered with Basotect material, which absorbs acoustic waves in the ultrasonic range. In doing so, it could be achieved that echoes resulting from multiple reflections, appear after the target echoes in the measured waveforms and do not interfere with these. For ANN training, validation and test, sample echoes were required. The targets were automatically moved along a grid and were also rotated-x-direction (0.5 m to 1.8 m, 0.1 m steps); y-direction (−0.15 m to 0.15 m, 0.05 m steps); angles (α, −60 • to 60 • , 15 • steps, compare Figure 2). We applied downward modulated, rectified, wideband chirp signals for electrical excitation of the electrostatic Senscomp speaker (wb, 150 kHz to 20 kHz, 1 ms duration). In additional measurements, narrowband chirp signals (nb, 52 kHz to 48 kHz, 1 ms duration) were employed since we are also interested in the performance that may be achieved if a common narrowband ultrasonic sensor is utilized, such as a piezoelectric-based transducer . Chirp signals were chosen as they make it possible to gain information regarding a large portion of the spectrum from a single echo. . Target shapes (disc, cylinder and hollow hemisphere) with characteristic dimension d and rotation axes (0 • for objects facing speaker/microphone with "flat" side, arbitrary for cylinder due to rotation symmetry). Targets We collected and characterized ultrasound echoes from six different targets. Those can be grouped into three basic target shapes ( Figure 2): flat, convex and concave. The mentioned grouping was chosen because the shapes show quite different reflective behavior with respect to insonification angle and echo magnitude, as visualized by the acoustic fingerprints in Figure 3. The targets in can be grouped accordingly. More details regarding the acoustic fingerprints will be examined later in this section. As we wanted to use basic and generic shapes, we chose to use discs, cylinders and hollow hemispheres. For each shape, we analyzed two different sizes so that binary classification was performed as far as size discrimination is concerned. The characteristic dimension d was chosen to be 60 mm and 100 mm, respectively. The disc thickness is 4 mm and the cylinders as well as the hemispheres have 2 mm wall thickness. All targets were manufactured with a 3D printer (Ultimaker 2) and consist of ABS plastic. The three different geometric shapes show characteristic acoustic fingerprints ( Figure 3)-spectral TS versus rotation angle plots (see Section 2.4 for detailed explanations regarding TS). At flat targets, a single reflection occurs which shows low angular spread and high magnitude. At convex targets, there is also a single reflection but with wide angular spread and, consequently, lower magnitudes for the reflected wave as its energy is distributed across a larger volume. Inside concave targets, multiple reflections are possible which may lead to specific spectral properties due to interference . Concave targets can also have retroreflecting properties, such as a corner reflector for radar systems. As a consequence, echoes for the selected shapes should significantly differ in magnitude and spectral composition, particularly with respect to the insonification angle. Thus, the shapes should be well-distinguishable. More detailed explanations regarding the hollow hemispheres' acoustic properties can be found in . Note that the depicted acoustic fingerprints are for illustration purposes only and are based on additional measurements, whose data is not part of the ANN training data. The measurement procedure for the acoustic fingerprints is different as well. The data for the fingerprints was obtained at 1 m distance between speaker and targets ("echoes") or speaker and microphone ("transmission"), respectively. The targets were rotated from −90 • to 90 • at 5 • steps (compare Figure 2 for 0 • orientation) and swept single-frequency sine burst excitation was used with a frequency step size of 0.5 kHz. To obtain TS values, the ratios between transmission and echo RMS values are calculated. This approach is necessary for reliable results since the employed electrostatic ultrasonic speaker shows noticeable harmonic distortion at all excitation frequencies, which has been observed in laboratory measurements. Thus, before RMS calculations for the echo fingerprints, the signals are narrowly bandpass-filtered at each center frequency so that only the relevant baseband components are considered. Neural Networks Multi-layer perceptrons (MLPs) are applied as ANNs ( Figure 4). We selected MLPs on purpose since they are more susceptible to variations in feature quality in comparison to other types of ANNs and should hence be better suited for estimates of feature performance. Our ANNs comprise four hidden layers in order to achieve good generalization and to avoid overfitting to training data. Due to the multi-layer structure, more meaningful and thus compressed information for the classification task needs to be represented/passed by each node if compared to a single layer network with the same total amount of nodes. This reduces the risk of overfitting to the training data . The hidden layers comprise 10, 5, 5 and 3 neurons, respectively. To obtain a well-working network, we utilized a randomized parameter study in which several different hidden layer as well as node numbers were tested. The selected network architecture shows the best performance for all tested feature sets. Risk of overfitting was also minimized by choosing a low number of nodes for the hidden layers in comparison to other networks that are applied for similar classification tasks . Seven output classes were created-one for each target and a separate one for non-target samples. For each class, roughly the same number of samples was created so that there is an even distribution among the classes. A scaled conjugate gradient backpropagation algorithm with a crossentropy error function was chosen for training (see and for details). Supervised learning is performed as the target positions as well as the target classes are known from the measurement procedure (see Section 2.1). Therefore, labeled data sets could be created. Of each dataset, 20 % are picked for training, 10 % for validation and 70 % for testing. We chose the mentioned distribution as a larger training set might result in overfitting due to redundant echoes caused by symmetry in the measurement setup. ANN performance evaluation is based on prediction accuracy, precision, recall as well as F 1 scores for test sets that have no common samples with their corresponding training or validation sets (see and for details). The measures can be obtained from confusion matrices, which show classification result counts, grouped by actual input classes and predicted output classes. Accuracy is to be maximized towards 100 % and is defined as (compare and ). where a denotes the total accuracy, n hit the total number of correct classifications in the test set and n S the total number of samples in the test set. Accuracy alone is usually not sufficient as a performance measure because it is only a measure for overall performance but does not contain information on ANNs' performances for different classes. Thus, an ANN's overall performance can seem very good, but it will still be possible for single classes to be identified very badly if multiple classes exist and especially if data set sizes vary for different classes. Hence, for evaluation of single-class classification performance, precision and recall are applied . Precision and recall values are both to be maximized towards 100 % and are calculated for each target class by the given equations (compare and ). p = n TP,cls n FP,cls + n TP,cls (2) and r = n TP,cls n FN,cls + n TP,cls , in which p is precision, r is recall, n TP,cls is the total number of true positives for a given class, n FP,cls is the total number of false positives for a given class, and n FN,cls is the total number of false negatives for a given class. Precision is a measure for certainty of correct classification for a sample of a specific output class, whereas recall is the percentage of correctly identified samples of available samples for a target class. Recall is also known as sensitivity in statistics . The meaning of the terms true positive, true negative, false positive and false negative shall be illustrated. For the multi-class case here, we always consider the current target class, for which precision and recall are calculated, as positives and all other classes are summarized as negatives. This means for the mentioned terms: • true positive: correct classification of current target class (e.g., hemisphere classified as hemisphere), • true negative: correct classification of current non-target class (e.g., non-hemisphere classifed as non-hemisphere, where non-hemisphere may be anything but a hemisphere), • false positive: wrong classification of current non-target class (e.g., non-hemisphere classifed as hemisphere), • false negative: wrong classification of current target class (e.g., hemisphere classifed as non-hemisphere). Evaluation of feature performance based on precision and recall can be cumbersome since a 2D plot must be created for each trained ANN. As a consequence, the F 1 score is introduced as a scalar measure, which combines precision and recall. The F 1 score is to be maximized towards 100 % and is calculated as Care must be taken for classes with small sample counts, but in our case this does not apply as our samples are evenly distributed among the different classes. For more detailed explanations regarding ANNs and their performance measures, see and . Echo Preprocessing and Features The recorded raw echo signals (top left in Figure 5) are processed before classification by ANNs. The signals are preprocessed since known relations can be extracted from data efficiently with rule-based approaches and so the ANNs do not need to learn those relations from the training data. Accordingly, learning focuses on aspects of information that are not modeled explicitly but may be important for the given classification task. The data which contains relevant information is part of the input to ANNs and is generally denoted as "input feature vector" or just "features" in the machine learning context . Possible input features are raw data, alternative raw data representations (e.g., based on transformation to frequency or time-frequency spaces) as well as raw data on which basic mathematical operations are performed, such as multiplication of elements, squaring elements, calculation of various norms etc. Such features are often used for deep learning . In addition, there are also specifically engineered features that are motivated by domain knowledge (common for traditional machine learning and pattern recognition ) as well as combinations of previously mentioned features. For our application, we created and evaluated aforementioned feature types, which are explained later on in this section. The main echo preprocessing and feature calculation steps are sketched in Figure 5. First, bandpass filtering is performed according to the excitation signal bandwidth. This is done to remove out-of-band noise, which is not related to the target echoes and is not relevant for classification. Then, each echo signal's cross-correlation r yx with the corresponding emitted acoustic chirp signal is calculated (top right part of Figure 5). Therefore, much irrelevant/unrelated in-band information is filtered out as well, such as noise and uncorrelated sounds from other sources. Consequently, the ANNs do not need to learn how to filter out a large portion of the information that is not relevant. The peaks in correlated signals can now be considered echoes from targets. Accordingly, peak positions correspond to targets' propagation delays. This approach is also known as pulse compression in the area of sonar and radar systems (see also , and ). The emitted chirp signal had been recorded before at 1 m distance and was averaged ten times to improve its signal to noise ratio-in contrast to the measured target echoes, for which no averaging was performed (single echoes). STFT STFT t t t t t nt t nt Figure 5. Main calculation steps from raw target/non-target echo pressure signal p RX to spectrogram feature (Spect), total TS estimate feature (TStot via r t /r nt from cross-correlation r yx with excitation signal) and spectral TS estimate feature (TSspect). ROIs (ROI t /ROI nt ) are illustrated by dotted lines and enclosed arrows. Echo delays are indicated by t t and t nt . From each of the recorded echoes (raw as well as pulse-compressed), a region of interest (ROI) of 2 ms is selected for feature calculations (indicated by dotted lines with enclosed arrows in Figure 5). For each target's echo, its ROI (ROI t in Figure 5) is centered around the largest corresponding peak in its pulse-compressed echo. Peak detection in pulse-compressed echo signals is a common method for identification of possible targets in sonar as well as radar systems (see also and ) and is therefore assumed to be a valid step before target classification in our case. ROIs for non-target samples (ROI nt in Figure 5) are randomly put outside target ROIs, but inside possible ranges for target echoes. Non-target ROIs are also centered around their highest peak in the pulse-compressed waveforms. ROIs in general are selected so that feature calculations do not need to be performed on the whole recorded waveforms, which would lead to larger ANN input feature vectors and therewith increased calculation cost. Another advantage of ROI selection is that only parts of the recorded signals are presented to the ANNs, which are actually related to the targets. So, no features are learned which may result from the measurement setup, such as additional echoes that might appear due to multiple reflections off targets and the setup. In doing so, training only happens on relevant parts of our echo signals and the risk of problems due to long feature vectors is largely reduced, e.g., overfitting due to curse of dimensionality . The ROI length does not represent the proposed sonar's working range, which was tested up to a target distance of 1.8 m in our case. Therefore, total echo recording durations were set to 15 ms. The distance limitation is given due to the sizes of the translation stages as well as the anechoic chamber dimensions (compare also Section 2.1). Tests are planned for greater distances outside the anechoic chamber in the future. In particular, we consider three features: (i) Spect: the raw echoes' spectrogram representation (bottom left plot in Figure 5), (ii) TStot: an estimate of total TS and (iii) TSspect: a specifically engineered feature, which is related to the targets' spectral TS (bottom right plot in Figure 5). TStot is based on the pulse-compressed echoes' peak magnitudes (r t , r nt in Figure 5), whereas TSspect is based on a short-time fourier transform (STFT) of the pulse-compressed echoes. Specific relations will be shown later in this section and in the supplementary materials in detail. For Spect and TSspect, a frequency ROI was selected according to the excitation chirps' bandwidths. Based on the mentioned features, we defined different feature sets for performance comparison: A feature vector was chosen to consist of a feature set for three adjacent measurement grid positions. This is equivalent to a sonar sensor traveling by a target and recording multiple echoes (e.g., robot passing by). Also, to each feature vector, the echo propagation delay is added as additional element. In order to obtain 1D-Feature vectors for our ANNs, 2D data is flattened into 1D arrays. We chose the spectrogram (STFT) as time-frequency-based raw data representation since this is a common approach for speech recognition and because ANNs are also known to handle pictorial information quite well . Another possible time-frequency signal representation is the wavelet transform, which was examined by Ayrulu and Barshan in . TStot and TSspect are related to an object's TS, which is a relative measure for acoustic intensity reflected off a target. A compact description of the relations is given in the rest of this section, more detailed explanations can be found in the Supplementary Materials. Figure 6 shows the relations for acoustic intensity levels along an echo's transmission path, which can be put as follows (in analogy to the sonar equation from ): Here, IL is the input level at the receiver, SL is the source level at the transmitter, TS is the target strength and TL is the transmission loss, respectively. For our calculations, the underlying assumption is that TL is only caused by geometric spreading. Compensation of frequency-dependent effects is left to the ANNs. The echo p RX,T (t) from the target can consist of multiple reflections i at times t i with different magnitudes a i The total target strength TS 1 for a target with a single echo, such as a disc or a cylinder, is used for TStot and consists of a variable part ∆TS 1 as well as a constant part TS 1,const with (see also Supplementary Materials) and TS 1,const = 20 log 10 c t 0 2 · 1 m − 10 log 10 r T xx,TX (0) r 0 , where t 0 is an arbitrary time constant, t T is the target's main peak in the pulse-compressed echo, r T yx (t T ) is the pulse-compressed echo's value at t T , r 0 can be chosen as an arbitrary constant, c is the speed of sound in air and r T xx,TX (0) is the acoustic excitation signal's auto-correlation function value at time 0, respectively. For target echoes, r t is used for r T yx (t T ) and for non-target echoes, r nt is used (compare Figure 5). We suppose that the impinging echo waves' surface curvature across the microphone surface is negligible due to the small membrane area and the wave's geometric spread over a large propagation distance in comparison, which leads to a plane wave assumption. Hence, intensity estimates are deduced from pressure measurements since sound pressure level and acoustic intensity level can be assumed to be equal for plane waves. This simplification suffices in our case as we are mainly interested in an estimate of target strength instead of a highly precise measurement. Compensation of possible variations due to the simplifications is left to the ANNs. Apart from the total target strength TS 1 , also the spectral target strength TS( f ) contains significant information for a target, especially for ones with multiple echoes such as a hollow hemisphere. TS( f ) depends on the frequency f and consists of a variable part ∆ TS( f ) as well as a constant part TS const ( f ) with ∆ TS( f ) = 10 log 10 S T yx ( f ) S 0 + 20 log 10 t T t 0 (11) and TS const ( f ) = 20 log 10 c t 0 2 · 1 m − 10 log 10 where S T yx ( f ) denotes the cross-power-spectral density of the echo signal with the excitation signal, which can be obtained from r T yx (t) by Fourier transform. S 0 can be chosen arbitrarily and S T xx,TX ( f ) is the excitation signal's auto-power-spectral density, which can be obtained from r T xx,TX (t) by Fourier transform. The derivation of the relations can be found in the supplementary materials. We decided to represent the signal by an STFT for TSspect due to the same reasons as for the raw signal. The window size was chosen to be close to the excitation chirp duration (see for considerations regarding STFT window size selection). Consequently, a window size of 1024 samples and an overlap of 50 % are set for the STFT. It is apparent from the TS equations that arbitrary excitation signals can be selected for target insonification. Therefore, use of rectified signals is possible and harmonic distortion is not a problem. We also performed tests with electrical sine as well as rectified chirp excitation and no difference in ANN performance results was detectable. Thus, less complex as well as smaller, lighter and cheaper amplifiers can be built for the ultrasonic speakers in comparison to amplifiers for sine signal excitation . This is especially important for mobile robotic applications. Results and Discussion For each feature set, 20 ANNs were trained and evaluated by their accuracy as depicted in Figure 7a. The best as well as the worst ANN's accuracy are shown for each feature set. In addition, mean accuracy as well as standard deviation are given and provide information regarding the number of necessary ANN training runs to find a well-performing solution. It can be seen that the best performing networks are found for Spect, FC1, TSspect and FC2. For those, best accuracies are close together. It can be concluded from Figure 7a that adding TStot to Spect (FC1) leads to noticeable improvement. We assume the ANNs do not need to learn how to devise TStot from spectral data by themselves in this case. Adding TStot to TSspect (FC2) leads to almost no noticeable improvement. Supposedly, this is the case since TStot will be easy to derive for an ANN if TSspect is given. If only TStot is used, it can be noticed that performance will not be as high as for the STFT-based features but also less effort for computation is necessary, as will be discussed later in this section. Consequently, TStot can be suitable for systems with limited resources, for which a tradeoff between classification accuracy and computational effort must be made. We also looked at ANN performance for different classes with respect to the feature sets as indicated by the F 1 scores in Figure 7b. In this article, ANN accuracies and F 1 scores are presented, but for article preparation also confusion matrices as well as precision and recall values were additionally checked. The x-axis labels correspond to the following classes: nt no target, he30 hemisphere 30 mm radius, he50 hemisphere 50 mm radius, cy30 cylinder 30 mm radius, cy50 cylinder 50 mm radius, di30 disc 30 mm radius, di50 disc 50 mm radius, respectively. We compared the results of the networks which show the best accuracy for their feature sets, since those are the ones that would be selected for actual system deployment. It can be noticed that high scores are achieved for cylinders and hemispheres but that there is significant discrepancy for discs. Accordingly, disc size discrimination is the main challenge here and the target classes are thus regrouped by shapes to check that assumption, as shown in Figure 8a,b. So, only shape classification is performed and no size misclassifications are considered. It can be observed that significantly higher performance is achieved in this case, especially for disc-shaped targets. We suspect the main reason for difficulties with disc size discrimination is that small changes in insonification angle lead to drastic changes in echo magnitude. This can be deduced from the discs' acoustic fingerprints for which there is only a very narrow angle that shows significant TS with a steep fall of TS with increasing angles (see Figure 3 for comparison). Results for narrowband excitation are presented in Figures 9 and 10. It can be seen that, for the given setup and task, the best results are close to the ones for wideband excitation. Accuracy values as well as F 1 scores are slightly lower for classification with respect to target size in comparison to the wideband case. Hence, additional information regarding target size will most likely be contained in wideband echo signals, which should consequently be used if targets of the same shape but differing sizes shall be set up and correctly identified in a robot environment. The targets that are approached by bats, such as small insects and plant pitchers as well as blossoms, are more diverse and show smaller geometric details, such as body and limb structure as well as leave shape, which lead to the necessity of wideband signals with narrow cross-correlation functions for proper resolution and subsequent classification by bats . In contrast, the targets that are used for our application do not possess such small variations in geometry as well as size and, thus, narrowband excitation seems to suffice, especially if only target shapes shall be identified. For narrowband excitation, more training runs may be necessary to find best solutions for some features due to lower mean values. In addition, TSspect and FC2 show reduced performance values in comparison to wideband excitation. Presumably, this is due to wider peaks in the calculated cross-correlation functions, which are caused by smaller signal bandwidth. Consequently, there are less sharp and distinctive features in the pulse-compressed waveforms, which need to be extracted by ANNs. The good narrowband results motivate replacement of the electrostatic speaker by robust and cheap piezoelectric transducers, which ought to be part of future work . Furthermore, the measurement microphone may be replaced by MEMS microphones, which are also much lower in price and more robust . Calculation times for ANN training, execution, feature preprocessing as well as total execution are presented in Table 1 as benchmarks to give the reader an impression of computation cost. Values were obtained on a desktop computer with Intel Core i7-8700 CPU, 32 GB RAM, Ubuntu 18.04 LTS operating system, Matlab R2018b and no GPU acceleration available. Averaging was performed across all samples and ANNs. It can be noticed that TStot calculations require the least time and that spectrogram-based features' computation times are noticeably longer, with negligible differences amongst them. We presume the time increase is primarily caused by STFT calculations and larger feature vectors. Another observation that can be made is that ANN training for narrowband excitation takes longer than for the wideband case. Presumably, since more relevant information for classification needs to be extracted from a smaller frequency range. The benchmarks also indicate that an implementation with continuous operation should be possible. This is the case since the recording time for echo signals (15 ms in this contribution) is larger than total calculation times (preprocessing and ANN execution, less than 10 ms) and can therefore be finished before the end of the recording of subsequent echo signals. Please note that the currently implemented code is neither optimized for execution speed nor for hardware utilization and, consequently, even better performance should be possible. For embedded implementation on a robot, elaborate hardware/software codesign with widespread design space exploration is supposed to be beneficial to achieve an efficient real-time-capable implementation . For that purpose, different hardware architectures can be employed, such as microcontrollers, DSPs (digital signal processors), FPGAs (Field Programmable Gate Arrays) or complete SOCs (Systems On Chip) . The main processing steps-cross-correlation, peak detection, STFT and ANN execution-do have a major impact on calculation cost but can be parallelized to a high degree and also various other techniques can be employed to achieve a suitable implementation with regard to application requirements on aforementioned hardware. Potential solutions can comprise pipelining, divide and conquer approaches, approximate computing, replacement of float operations by integer arithmetic where applicable, LUTs (Look-Up Tables) etc. Also, calculations of constant parts of TStot and TSspect can be omitted (Equations (9) and (12)) because only their variable parts are relevant for target differentiation. Certain limitations of the research need to be accounted for, which are to be addressed in ongoing as well as future work. Classification was performed with a well-defined set of targets and non-target echoes that can be well-separated from the targets. It thus remains to be investigated how robustly the system will perform if noisy echoes from cluttered spaces are included, which may often be the case for applications outside the laboratory environment. Most likely, narrowband performance will degrade as less spectral information is available. Additionally, we suspect that TSspect and FC2 will perform noticeably better than Spect and FC1 as uncorrelated parts of the recorded signals are filtered out during preprocessing. Funding: This research received no external funding. Acknowledgments: We especially thank Hans-Michael Günther from the Chair of Sensor Technology in Erlangen. He provided practical electronics support and designed as well as built the high-voltage amplifier for the ultrasonic speaker. Conflicts of Interest: The authors declare no conflict of interest. Abbreviations The following abbreviations are used in this manuscript: Appendix A. Performance Results The results from Figures 7-10 are given as tables in order to give the reader the opportunity to better identify as well as comprehend relations and differences in the data.
<reponame>Romanski1/serenity<filename>LibHTML/CSS/StyleValue.cpp #include "StyleValue.h" StyleValue::StyleValue(Type type) : m_type(type) { } StyleValue::~StyleValue() { } NonnullRefPtr<StyleValue> StyleValue::parse(const StringView& str) { return adopt(*new PrimitiveStyleValue(str)); }
/** * Unbind context. * * \param dpy the display handle. * \param scrn the screen number. * \param draw drawable. * \param read Current reading drawable. * \param gc context. * * \return \c GL_TRUE on success, or \c GL_FALSE on failure. * * \internal * This function calls __DriverAPIRec::UnbindContext, and then decrements * __DRIdrawablePrivateRec::refcount which must be non-zero for a successful * return. * * While casting the opaque private pointers associated with the parameters * into their respective real types it also assures they are not \c NULL. */ static GLboolean driUnbindContext3(__DRInativeDisplay *dpy, int scrn, __DRIid draw, __DRIid read, __DRIcontext *ctx) { __DRIscreen *pDRIScreen; __DRIdrawable *pdraw; __DRIdrawable *pread; __DRIcontextPrivate *pcp; __DRIscreenPrivate *psp; __DRIdrawablePrivate *pdp; __DRIdrawablePrivate *prp; if (ctx == NULL || draw == None || read == None) { return GL_FALSE; } pDRIScreen = glx_find_dri_screen(dpy, scrn); if ( (pDRIScreen == NULL) || (pDRIScreen->private == NULL) ) { return GL_FALSE; } psp = (__DRIscreenPrivate *)pDRIScreen->private; pcp = (__DRIcontextPrivate *)ctx->private; pdraw = __driFindDrawable(psp->drawHash, draw); if (!pdraw) { return GL_FALSE; } pdp = (__DRIdrawablePrivate *)pdraw->private; pread = __driFindDrawable(psp->drawHash, read); if (!pread) { return GL_FALSE; } prp = (__DRIdrawablePrivate *)pread->private; (*psp->DriverAPI.UnbindContext)(pcp); if (pdp->refcount == 0) { return GL_FALSE; } pdp->refcount--; if (prp != pdp) { if (prp->refcount == 0) { return GL_FALSE; } prp->refcount--; } #if 0 pcp->driDrawablePriv = NULL; pdp->driContextPriv = &psp->dummyContextPriv; #endif return GL_TRUE; }
A moment nearly six years in the making. Fire and flame returned to the historic LC-39A launch pad at the Kennedy Space Center as SpaceX conducted the static fire of its Falcon 9 rocket tasked with lofting the SpX-10 Dragon resupply mission to the International Space Station. The ignition of the Falcon 9’s engines occurred at 16:30 local time on Sunday, 12 February – marking the first time since the Space Shuttle Atlantis left Kennedy on 8 July 2011 that the sound and thrust of rocket engines have graced pad 39A. The path to static fire: LC-39A was already deep into its reconfiguration – more accurately described as a complete rebuild – for the Falcon rocket family at the time of the AMOS-6 static fire mishap on 1 September 2016. At the time of AMOS-6, Pad A was nowhere near the top of the priority list for SpaceX, which was marching through – at a quick cadence – its 2016 launch manifest with SLC-40 at the Cape Canaveral Air Force Station. At the time of the mishap, SpaceX had hoped to have LC-39A at the Kennedy Space Center (KSC) “ready” to host its first Falcon mission by November 2016. Once the AMOS-6 accident occurred, the readiness of 39A quickly catapulted to the top of the priority list for SpaceX as the company completed its investigation into the static fire RUD (Rapid Unscheduled Disassembly) and successfully returned the Falcon 9 to flight with the Iridium NEXT mission in January 2017 from Vandenberg Air Force Base in California. While LC-39A – from the Shuttle era – contained elements SpaceX could use, such as a flame trench, a launch pad, and a sound suppression system, extensive modifications to the pad had to be made to accommodate the Falcon 9 and Falcon Heavy rockets. Perhaps the most visible of these changes involved the construction of the Horizontal Integration Facility (HIF) on top of the former crawlerway inside the pad perimeter gate as well as the installation of rainbirds to douse the pad and rockets with sound suppression water. Additionally, electrical connections, data cables, propellant feed lines, and a host of other reconstructive efforts had to be undertaken at the pad to accommodate the Falcon rocket family. In essence, SpaceX was constructing an entire new pad at KSC – with the physical, aboveground features of 39A remaining largely as they were at the completion of the Shuttle program – with the Fixed Service Structure (FSS) and portions of the Rotating Service Structure (RSS) still standing. Moreover, a unique element that has drawn attention to Pad A’s readiness to host a Falcon rocket is that the pad itself is the critical item in determining when the first Falcon flight from 39A can occur. Normally, delays in development and construction of a rocket outpace the delays to pad construction, verification, and validation. In this case, the use of LC-39A and the delays to its readiness were thrust into the spotlight after AMOS-6. Nonetheless, the SpaceX team is to be commended for the superb and time-consuming work they have undertaken at the pad. In the last two weeks, highly visible progress toward pad readiness occurred – following the pad’s earlier “completion and activation” for verification and validation testing. In late January, the Transport/Erector/Launcher (TEL) went vertical at the pad for the first time – testing connections and the new “throwback” release feature (which sounds more dramatic than it will be in reality) that will debut with SpX-10 in which the TEL will retract at T0 instead of earlier in the count as it has on all other Falcon 9 launches. With those tests complete, SpaceX continued to tweak and refine the pad and TEL. By 5 February, workers and engineers were into final documentation and validation of Pad A – an internal review process to confirm that the pad was indeed rocket ready. At the same time, the first and second stages of the Falcon 9 were mated together in the HIF by the night of 6 February, with the TEL moving into the HIF on the 8th for mating to the Falcon 9. On 7 February, SpaceX formally amended their request to the Eastern Range for the date of the static fire, moving it from Wednesday the 8th to Thursday the 9th within a window of 1600 EST – 2200 EST. Curiously, SpaceX also requested a backup opportunity for Friday the 10th during the same 1600 – 2200 EST time window. That window was subsequently extended through midnight local time. However, the attempt was then moved to Saturday for unknown reasons, with the window opening at 1000 EST. SpaceX engineers still used Friday to maneuver the vehicle to the vertical position to allow for final checks ahead of Saturday’s planned fueling. The reason for the close-to-static-fire postponements likely stemmed from the fact that once the Falcon 9 was mated to the TEL in the HIF, it then had to be transported up the ramp of 39A, positioned to vertical, and connected to the pad – at which point a whole new series of tests that could only be performed once a Falcon was on the pad could begin. Saturday’s attempt then suffered an unknown set back, resulting in the test moving to Sunday. As with any new launch pad, identification and fixes of anomalies are to be expected. During Sunday’s attempt, two issues required work ahead of what was an eventual 16:30 EST firing of the Falcon 9’s engines for 3.5 seconds – later confirmed by SpaceX. Ahead of the test, security notices to all KSC personnel announced that security would set up roadblocks to the LC-39A and 39B areas. This followed the expected and previously announced closure of Pad B for the static fires and launches of the Falcon 9 from Pad A. The initial notice stated that Pad B would be closed all day for static fires and closed during the launch periods for all Falcon 9 missions from LC-39A. A subsequent release noted the exact reasons why 39B would be closed for the events. According to a document acquired by NASASpaceflight.com and available for download on L2, pad 39B lies within the 2-mile radius around 39A that is the “contamination control buffer around the payload once it arrives in the LC-39A area.” While this alone would not be enough to close Pad B for static fires, the only roadway accessing Pad B lies right at the edge of the 5,100 foot radius around LC-39A that is the “SpaceX keep out zone” for the static fire as coordinated by SpaceX and the Federal Aviation Administration. For reference, the same document reveals that the Blast Danger Area (BDA) for the static fire is a 2,700 foot radius around Pad A – which does not come close to Pad B. The document, while only encompassing the static fire, did lend some explanation for why Playalinda Beach, located north of LC-39A, was to be open to the public for the static fires and launches of Falcon 9’s from Pad A – though that was changed Thursday, and the beach will now be closed for both events. For every Space Shuttle launch, Playalinda Beach was a restricted area as it lay within the BDA for the Shuttle; however, since Falcon 9 has a much smaller BDA, the beach was originally to have remained open for Falcon 9 launch viewings. A subsequent risk assessment determined that closing the beach was the better option. Moreover, the document confirms that the KSC Emergency Operations Center will be active and will monitor the static fires of Falcon 9s from Pad A. The document states that “in the unlikely event of an anomaly, [the KSC Emergency Operations Center] will assist in the response and communicate any necessary actions to the appropriate personnel; even if there were a catastrophic anomaly LC-39A, it would pose no danger or threat to personnel.” For the SpX-10 static fire, once the Falcon 9 was vertical on Pad A and its electrical, data, and propellant connections were secured and tested, the vehicle performed a standard countdown with the SpaceX launch team to verify all of the connections and propellant flow capabilities for the new pad. The countdown culminated with a 3.5 second firing of all nine Merlin 1D engines at the base of the Falcon 9 first stage. After the engine firing occurs, the teams will move into post-fire securing operations, safing the vehicle before de-tanking it and rolling it back to the HIF for payload mate. SpX-10 launch date moved to Feb. 18 due to range conflict: At the beginning of the week, SpaceX and NASA had been targeting a launch date of 14 February during an 1131-1136 EST window for the CRS-10 mission to the International Space Station. However, that date subsequently moved to a new official target date of 18 February due to a range conflict. The conflict, curiously, was not with the ability of the Falcon 9 to launch but rather with the ability of the first stage to return to Landing Zone 1 (LZ-1) back at the Cape. While the precise nature of the conflict is not known, it is understood that a portion of the range asset necessary for the Falcon 9 first stage’s return was already spoken for by another customer during the period of the 14-17 February. In coordination with NASA, SpaceX slipped the launch to the 18th to ensure that the first stage could indeed return to the Cape. Liftoff is currently set for 1001 EST on 18 February. An historic pad marks KSC transition to a multi-user spaceport: Flames last touched LC-39A on 8 July 2011 during the final flight of the Space Shuttle Program. After that, Pad A lay dormant, with its immediate future clouded in uncertainty as NASA searched for commercial companies who might wish to take possession of the pad – which NASA had deemed largely unnecessary for the Space Launch System program less the use of its LH2 storage facility for the massive BEO (Beyond Earth Orbit) rocket’s hydrogen fuel supply. This uncertainty persisted for three years until the formal signing of documents that triggered the beginning of a 20-year lease of pad 39A and its facilities to SpaceX on 14 April 2014. With all eyes now on SpX-10 and the first SpaceX flight from 39A, the pad is also set to host in 2018 the historic first launch of humans aboard commercial rockets as well as the first human-rated spacecraft to Mars. With this, SpaceX will add to the impressive career of 39A that stretches back to 1962. In May 1961, when President Kennedy announced the goal of “landing a man on the moon and returning him safely to Earth” before the end of the decade, it quickly became evident that the existing pads at the Cape Canaveral Air Force Station would not be sufficient for the mighty Saturn V rocket. In 1962, NASA entered into agreements to acquire 218 square miles of land upon which the Launch Operations Center, subsequently renamed the Kennedy Space Center following President Kennedy’s assassination, was constructed. Originally, the plan for KSC called for three launch pads, with the launch complex itself officially named Launch Complex 39 and its pads receiving alphanumeric designations of “A” to “C” – with 39A being the northernmost pad and 39C being the southernmost. As less funding than expected materialized, plans for the original 39A were scrapped, and NASA pursued a two pad system that reorganized the pad numbering system. Thus, the southernmost pad – which entered its construction phase first – became pad 39A instead of 39C. Construction workers completed Pad 39A in 1965 and final outfitting proceeded into 1967. On 26 August 1967, Pad A received its first rocket – the Apollo 4 Saturn V. With the launch of Apollo 4 came the first of 94 launches from Pad A to date – 12 for Apollo and 82 for Shuttle. Through those 94 missions, none was as historic as the pad’s use on 16 July 1969 to launch Neil Armstrong, Buzz Aldrin, and Michael Collins on Apollo 11 for the first human lunar landing. Following the launch of all the crewed Apollo lunar landing missions, Pad A then hosted the launch of the U.S.’s first space station, Skylab, on 14 May 1973. With the launch of Skylab (the last un-crewed launch to occur from Pad A), the pad was deactivated and formal preparations to transform it into a standing structure model for the Space Shuttle began. In 1979, 39A hosted its first Space Shuttle stack as the Shuttle Enterprise arrived for fit checks and validation and verification efforts that could only be achieved with a Shuttle at the pad. The use of Enterprise for these checks made Pad A the only pad to host all six Shuttle Orbiters. The Space Shuttle era then began from the pad on 12 April 1981 – with Pad A hosting the inaugural flights of Columbia, Challenger, Discovery, and Atlantis. The pad hosted the first 24 Shuttle missions in exclusivity – from STS-1/Columbia to STS-61C/Columbia. Pad A also hosted the very first construction flight of the International Space Station (Endeavour’s STS-88 mission in December 1998) as well as the mission which completed U.S. assembly of the Station – Endeavour’s STS-134 mission in May 2011. Sadly, Pad A’s history includes tragedy, as the pad was the launching point for the Columbia and her crew on 16 January 2003 on STS-107. Then, as it did at the beginning of the Shuttle program, Pad A hosted the final 18 Shuttle missions in exclusivity – from STS-117/Atlantis to the emotional final flight of STS-135/Atlantis. Through all of this history, Pad A has been a single user, single spacecraft launch platform. While SpaceX’s exclusive lease will keep LC-39A as a single user pad, it will now serve two variants of the same rocket family – the Falcon 9 and the Falcon Heavy. But more importantly, SpaceX’s lease of the pad and its upcoming inaugural launch for a commercial company officially makes the Kennedy Space Center a multi-user spaceport – a prime goal of the facility following the retirement of the shuttle fleet. (Images: Bill Harwood CBS News, SpaceX, NASA, US Launch Report, L2 and Marek Cyzio, and L2 artist Nathan Koga – The full gallery of Nathan’s (SpaceX Dragon to MCT, SLS, Commercial Crew and more) L2 images can be *found here*)
<filename>backend/api/corona_tweet_analysis/views.py import mongoengine from json import loads, dumps from django.shortcuts import render from django.core.exceptions import PermissionDenied from corona_tweet_analysis.utils.base_view import BaseViewManager from corona_tweet_analysis.utils.responses import send_response from corona_tweet_analysis.utils.constants import SUCCESS, FAIL, INVALID_PARAMETERS, BAD_REQUEST, UNAUTHORIZED from corona_tweet_analysis.models import TwitterData, Category, CoronaReport, Data from corona_tweet_analysis import serializers from rest_framework.authentication import TokenAuthentication from rest_framework import permissions, generics from rest_framework.response import Response from rest_framework.decorators import permission_classes from corona_tweet_analysis.serializers import TwitterDataSerializer, CategorySerializer class CategoryView(generics.ListAPIView): queryset = Category.objects.all() serializer_class = CategorySerializer class CoronaWorldReportView(generics.ListCreateAPIView): http_method_names = ['get', 'put'] def get(self, request, *args, **kwargs): corona_report = CoronaReport.objects.order_by('-created_at').first() data = loads(corona_report.to_json()) world = {} for country in data['data']: if country['name'] == "World": world = country return Response({ 'status': SUCCESS, 'data': world, 'created_at': corona_report.created_at }) def put(self, request, *args, **kwargs): permission_classes = [permissions.IsAdminUser, permissions.IsAuthenticated] if request.user.is_authenticated == False or request.user.is_superuser == False: raise PermissionDenied corona_report = CoronaReport.objects.order_by('-created_at').first() new_cases = request.query_params.get('new_cases') new_deaths = request.query_params.get('new_deaths') total_deaths = request.query_params.get('total_deaths') total_cases = request.query_params.get('total_cases') if not (new_cases or new_deaths or total_deaths or total_cases): return send_response({'status': SUCCESS, 'message':'No update values were given'}) report_data = loads(corona_report.to_json()) data_objects_list = [] for data in report_data['data']: if data['name'] == 'World': data['new_cases'] = int(new_cases) if new_cases else data['new_cases'] data['new_deaths'] = int(new_deaths) if new_deaths else data['new_deaths'] data['total_deaths'] = int(total_deaths) if total_deaths else data['total_deaths'] data['total_cases'] = int(total_cases) if total_cases else data['total_cases'] data_obj = Data(name=data['name'], new_cases=data['new_cases'], new_deaths=data['new_deaths'], total_deaths=data['total_deaths'], total_cases=data['total_cases']) data_objects_list.append(data_obj) new_report = CoronaReport(data=data_objects_list) new_report.save() return send_response({'status': SUCCESS, 'message':'Corona Report updated'}) class CoronaReportView(generics.ListCreateAPIView): http_method_names = ['get', 'put'] def get(self, request, *args, **kwargs): country = request.query_params.get('country') if not country: return send_response({'status': INVALID_PARAMETERS, 'message':'Country not sent'}) country_data_report = CoronaReport.objects(data__name=country).order_by('-created_at').first() if not country_data_report: return send_response({'status': INVALID_PARAMETERS, 'message':'Country not found'}) corona_report = CoronaReport.objects.order_by('-created_at').first() report_data = loads(corona_report.to_json()) created_at = report_data['created_at'] data = {} for country_data in report_data['data']: if country_data['name'] == country: data = country_data return Response({ 'status': SUCCESS, 'data': data, 'created_at': corona_report.created_at }) def put(self, request, *args, **kwargs): permission_classes = [permissions.IsAdminUser, permissions.IsAuthenticated] if request.user.is_authenticated == False or request.user.is_superuser == False: raise PermissionDenied country = request.query_params.get('country') if not country: return send_response({'status': INVALID_PARAMETERS, 'message':'Country not sent'}) country_data_report = CoronaReport.objects(data__name=country).order_by('-created_at').first() if not country_data_report: return send_response({'status': INVALID_PARAMETERS, 'message':'Country not found'}) corona_report = CoronaReport.objects.order_by('-created_at').first() new_cases = request.query_params.get('new_cases') new_deaths = request.query_params.get('new_deaths') total_deaths = request.query_params.get('total_deaths') total_cases = request.query_params.get('total_cases') if not (new_cases or new_deaths or total_deaths or total_cases): return send_response({'status': SUCCESS, 'message':'No update values were given'}) report_data = loads(corona_report.to_json()) data_objects_list = [] for data in report_data['data']: if data['name'] == country: data['new_cases'] = int(new_cases) if new_cases else data['new_cases'] data['new_deaths'] = int(new_deaths) if new_deaths else data['new_deaths'] data['total_deaths'] = int(total_deaths) if total_deaths else data['total_deaths'] data['total_cases'] = int(total_cases) if total_cases else data['total_cases'] data_obj = Data(name=data['name'], new_cases=data['new_cases'], new_deaths=data['new_deaths'], total_deaths=data['total_deaths'], total_cases=data['total_cases']) data_objects_list.append(data_obj) new_report = CoronaReport(data=data_objects_list) new_report.save() return send_response({'status': SUCCESS, 'message':'Corona Report updated'}) class TwitterDataView(generics.ListAPIView): queryset = TwitterData.objects(is_spam__ne=True).order_by('-created_at', '-_id') serializer_class = TwitterDataSerializer def get(self, request, *args, **kwargs): category = request.query_params.get('category') if category: category_obj = Category.objects(_id=category).first() if not category_obj: return send_response({'status': INVALID_PARAMETERS, 'message':'Category not found'}) else: self.queryset = self.queryset(category=category).order_by('-created_at', '-_id') return super().get(request, *args, **kwargs) class SpamCountView(generics.ListCreateAPIView): http_method_names = ['put'] queryset = TwitterData.objects.all() serializer_class = TwitterDataSerializer authentication_classes = (TokenAuthentication,) permission_classes = [permissions.IsAuthenticated] def put(self, request, *args, **kwargs): try: tweet_id = request.query_params.get('tweet_id') if not tweet_id: return send_response({'status': INVALID_PARAMETERS, 'message':'Tweet id is required'}) tweet = TwitterData.objects(id=tweet_id).first() if not tweet: return send_response({'status': FAIL, 'message':'Tweet not found'}) # Handling spam tweets spam_users = tweet.spam_users spam_count = tweet.spam_count is_spam = False if request.user.email in spam_users: return send_response({'status': BAD_REQUEST, 'data': 'You have already mark this as spam'}) else: spam_users.append(request.user.email) spam_count = tweet.spam_count + 1 if len(spam_users) > 10 or request.user.is_superuser: is_spam = True tweet.update(spam_count=spam_count, is_spam=is_spam, spam_users=spam_users) return send_response({'status': SUCCESS, 'data': 'Spam count updated'}) except Exception as err: return send_response({'status': FAIL}) class StatisticsView(generics.ListCreateAPIView): queryset = TwitterData.objects.all() serializer_class = TwitterDataSerializer def get(self, request, *args, **kwargs): try: statistics_dict = {} country_confirmed_dict = {} # get the number of infected cases for eah country countries = TwitterData.objects(country__ne='--NA--').distinct('country') for country in countries: recovered_count = TwitterData.objects(category='INFECTED', country=country).count() country_confirmed_dict[country] = recovered_count # Calculate the number of infected cases, deaths and recovery cases based on category infected_count = TwitterData.objects(category='INFECTED').count() death_count = TwitterData.objects(category='DEATH').count() recovered_count = TwitterData.objects(category='RECOVERED').count() statistics_dict['country_confirmed_cases'] = country_confirmed_dict statistics_dict['infected_count'] = infected_count statistics_dict['death_count'] = death_count statistics_dict['recovered_count'] = recovered_count return send_response({'status': SUCCESS, 'data': statistics_dict}) except Exception as err: return send_response({'status': FAIL})
# -*- coding: utf-8 -*- """ Created on Mon Jan 31 17:26:48 2022 @author: kakdemi """ import pandas as pd #reading 2018 load values load_2018 = pd.read_excel('Hourly_demand_2018.xlsx',header=0) #reading solar behind the meter timeseries Solar_ISO_BTM_2012 = pd.read_excel('../ISONE_data_file/Scenarios/Renewable_timeseries/Solar_ISO_BTM.xlsx',header=0,sheet_name='All Zones Time Series - 2018') Solar_ISO_BTM_2030 = pd.read_excel('../ISONE_data_file/Scenarios/Renewable_timeseries/Solar_ISO_BTM.xlsx',header=0,sheet_name='All Zones Time Series - 2030') Solar_ISO_BTM_2040 = pd.read_excel('../ISONE_data_file/Scenarios/Renewable_timeseries/Solar_ISO_BTM.xlsx',header=0,sheet_name='All Zones Time Series - 2040') Solar_NREL_BTM_2012 = pd.read_excel('../ISONE_data_file/Scenarios/Renewable_timeseries/Solar_NREL_BTM.xlsx',header=0,sheet_name='All Zones Time Series - 2018') Solar_NREL_BTM_2030 = pd.read_excel('../ISONE_data_file/Scenarios/Renewable_timeseries/Solar_NREL_BTM.xlsx',header=0,sheet_name='All Zones Time Series - 2030') Solar_NREL_BTM_2040 = pd.read_excel('../ISONE_data_file/Scenarios/Renewable_timeseries/Solar_NREL_BTM.xlsx',header=0,sheet_name='All Zones Time Series - 2040') #creating and saving load timeseries for each solar BTM case load_ISO_2030 = load_2018 + Solar_ISO_BTM_2012 - Solar_ISO_BTM_2030 load_ISO_2030.to_excel('Hourly_demand_2030_ISO.xlsx',index=False) load_ISO_2040 = load_2018 + Solar_ISO_BTM_2012 - Solar_ISO_BTM_2040 load_ISO_2040.to_excel('Hourly_demand_2040_ISO.xlsx',index=False) load_NREL_2030 = load_2018 + Solar_NREL_BTM_2012 - Solar_NREL_BTM_2030 load_NREL_2030.to_excel('Hourly_demand_2030_NREL.xlsx',index=False) load_NREL_2040 = load_2018 + Solar_NREL_BTM_2012 - Solar_NREL_BTM_2040 load_NREL_2040.to_excel('Hourly_demand_2040_NREL.xlsx',index=False)
def optimize(self, migrations=[], iterations=3): if iterations == 0: return migrations for subject in reversed(sorted(self.enabled_hypervisors, key=lambda h: abs(h.score))): if subject.score < 0: improvement = self.right_divergent else: improvement = self.left_divergent if not improvement: continue needed_migrations = self._mix_hypervisors(subject, improvement) self.use_snapshot(validate=False) if needed_migrations: migrations.extend(self._plan_migrations(needed_migrations)) optimizing = True while optimizing: optimizing = False for i in range(len(migrations) - 1): if migrations[i].server == migrations[i+1].server: optimizing = True migrations = migrations[:i] + \ [Migration(migrations[i].server, migrations[i].source, migrations[i+1].destination)] + \ migrations[i+2:] break self.snapshot(validate=False) self._validate_migrations(migrations) return self.optimize(migrations=migrations, iterations=iterations-1) return migrations
<reponame>koddsson/island.is<filename>apps/judicial-system/api/src/app/modules/auth/auth.controller.spec.ts import { mock } from 'jest-mock-extended' import { Test, TestingModule } from '@nestjs/testing' import { Logger, LOGGER_PROVIDER } from '@island.is/logging' import { AuthController } from './auth.controller' import { AuthService } from './auth.service' import { UserService } from '../user' describe('Auth Controller', () => { let controller: AuthController beforeEach(async () => { const module: TestingModule = await Test.createTestingModule({ controllers: [AuthController], providers: [ AuthService, UserService, { provide: 'IslandisLogin', useValue: {} }, { provide: LOGGER_PROVIDER, useValue: mock<Logger>() }, ], }).compile() controller = module.get<AuthController>(AuthController) }) it('should be defined', () => { expect(controller).toBeDefined() }) })
def ellipsoidal_area(a, b, x1, y1, x2, y2): if x2 < x1: reverse = -1 else: reverse = 1 _, x1, y1, x2, y2 = _canonical_configuration(x1, y1, x2, y2) phi1 = y1*pi/180.0 phi2 = y2*pi/180.0 lambda12 = (x2-x1)*pi/180.0 az, baz, _ = ellipsoidal_inverse(a, b, x1, y1, x2, y2) alpha1 = az * pi/180 alpha2 = (baz-pi) * pi/180 return reverse * _ellipsoidal_area(a, b, lambda12, phi1, phi2, alpha1, alpha2)
Intelligent polymeric micelles: development and application as drug delivery for docetaxel Abstract Recent years, docetaxel (DTX)-loaded intelligent polymeric micelles have been regarded as a promising vehicle for DTX for the reason that compared with conventional DTX-loaded micelles, DTX-loaded intelligent micelles not only preserve the basic functions of micelles such as DTX solubilization, enhanced accumulation in tumor tissue, and improved bioavailability and biocompatibility of DTX, but also possess other new properties, for instance, tumor-specific DTX delivery and series of responses to endogenous or exogenous stimulations. In this paper, basic theories and action mechanism of intelligent polymeric micelles are discussed in detail, especially the related theories of DTX-loaded stimuli-responsive micelles. The relevant examples of stimuli-responsive DTX-loaded micelles are also provided in this paper to sufficiently illustrate the advantages of relevant technology for the clinical application of anticancer drug, especially for the medical application of DTX.
#!/usr/bin/env python """Main framework for running BAI algorithms""" __author__ = "<NAME>" __copyright__ = "Copyright 2021, USC" import time import numpy as np from numpy.linalg import inv, norm, eig, svd, qr import pandas as pd from itertools import product, repeat import math import os import sys import platform from collections import Counter from sklearn.preprocessing import MinMaxScaler # , StandardScaler, minmax_scale from sklearn.tree import DecisionTreeRegressor from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error, f1_score, precision_score, recall_score from sklearn.cluster import KMeans from utils import intersection, write_csv, get_pst_time, sigmoid, inv_wrap, find_two_closest, make_dir import multiprocessing as mp from numpy.random import normal as npNormal from numpy.random import binomial as npBinomial, multinomial from optDesign import fw_opt_FB, minvol_todd from scipy import optimize from scipy.optimize.nonlin import NoConvergence from sklearn.gaussian_process.kernels import Matern, RBF, RationalQuadratic, \ ExpSineSquared, DotProduct, WhiteKernel from sklearn.gaussian_process import GaussianProcessRegressor from scipy.optimize import minimize from peace import PEACE mns_ = MinMaxScaler() np.random.seed(110) class super_class(): def __init__(self, seed_, N_1, num_arms, dim, reward_dist, max_depth_dtr, budget_per_arm, sigma2, fb, multithr, rewards_base='auto', BG_par={}, algos=None, GPUCB_par=None, params={}, BperArm=1): self.params = params self.fb = fb self.deltas = "" self.rewards_base = rewards_base self.df_train_fix: pd.DataFrame = None if self.params['synt_num'] in [1, 5] or self.rewards_base == 'aml': self.params['random_exp'] = False self.params['synt1_omega'] = 1/10 self.N_1 = N_1 self.log_step = max(int(N_1/10), 1) self.trgt_feat = None self.df_train = None self.arms = None self.theta = None self.dim = dim self.num_arms = num_arms if self.params['synt_num']==1: self.num_arms = self.dim + 1 elif self.params['synt_num']==5: self.num_arms = self.dim self.sigma2 = sigma2 self.prior_sigma = params['prior_sigma'] self.seed_ = seed_ self._set_seeds() # self.rs = np.random.RandomState(seed=self.seed_) # self.seeds = self.rs.randint(100, 1000, N_1) self.sample_id = 0 self.X_train, self.y_train = None, None self.max_depth_dtr=max_depth_dtr self.multithr = multithr self.BG_par = BG_par self.algos = algos self.alpha_linucb = 1/np.sqrt(np.log(2/.001)/2) self.GPUCB_par = GPUCB_par self.reward_dist = reward_dist self.read_data() self.budget_per_arm = budget_per_arm self.BperArm = BperArm self.budget = budget_per_arm * self.num_arms if BperArm else budget_per_arm self.GSE_on = 0 if any(['GSE' in algo for algo in algos]): self.GSE_on = 1 self.n_stage_SE() if reward_dist == 'Gaus': self.reward = self.gauss_reward else: self.reward = self.bern_reward if self.rewards_base == 'aml': self.reward = self.aml._sample_eval_size print("K", self.num_arms, self.algos, "B/K" if BperArm else "B", self.budget_per_arm, "d", self.dim, self.params) def _synt_data(self): synt_num = self.params['synt_num'] if not self.params['random_exp']: self.rs = np.random.RandomState(seed=110) if synt_num == 1: self.num_arms = self.dim+1 self.arms = np.eye(self.dim) v = np.zeros((1, self.dim)) v[0, 0], v[0, 1] = math.cos(self.params['synt1_omega']), math.sin(self.params['synt1_omega']) self.arms = np.vstack([self.arms, v]) self.theta = np.eye(1, self.dim) elif synt_num == 3: self.arms = self.rs.random(size=(self.num_arms, self.dim)) - .5 # unif(-.5, .5) # self.theta = np.random.normal(0, self.prior_sigma, size=(1, self.dim)) self.theta = self.rs.normal(0, 3 * self.prior_sigma / self.dim, size=(1, self.dim)) # normal elif synt_num == 5: # LinGapE paper example self.arms = np.eye(self.dim) self.theta = np.zeros(self.dim) self.theta[0] = self.params['delta_s5'] elif synt_num == 6: # ALBA paper, OD-LinBAI 5.2 too self.arms = self.rs.random(size=(self.num_arms, self.dim))*2-1 # unif(-1, 1) self.arms /= norm(self.arms, axis=1)[:, np.newaxis] x, y, _, _ = find_two_closest(self.arms) self.theta = (1 - self.params['alpha_s6']) * x + self.params['alpha_s6']*y elif synt_num == 7: # Section 5.1 from OD-LinBAI paper self.dim = 2 pi4 = np.pi/4 tmp_arms = [[np.cos(pi4+phi), np.sin(pi4+phi)] for phi in npNormal(0, 0.09, size=self.num_arms-2)] tmp_arms += [[np.cos(3*pi4), np.sin(3*pi4)]] tmp_arms = [[1, 0]] + tmp_arms self.arms = np.array(tmp_arms) self.theta = np.array([1, 0]) self.df_train = pd.DataFrame(self.arms) ##### self.trgt_feat = "trgt" if self.params['synt_num'] == 3: tmp = np.dot(self.df_train.to_numpy(), self.theta.T) self.df_train[self.trgt_feat] = sigmoid(tmp) # self.df_train[self.trgt_feat] = np.maximum(np.sign(1 / (1 + np.exp(-tmp))-0.5), 0).astype(int) else: self.df_train[self.trgt_feat] = np.dot(self.df_train.to_numpy(), self.theta.T) return self.df_train def _set_seeds(self): self.rs = np.random.RandomState(seed=self.seed_) self.seeds = self.rs.randint(10, 1e8, self.N_1) def _set_replic_seed(self, sim_id): _seed = self.seeds[sim_id] np.random.seed(_seed) self.rs = np.random.RandomState(seed=_seed) def nan_replace_mean(self, feat_name, dtype_): df_temp = self.df_train[self.df_train[feat_name] != '?'].copy() normalised_mean = df_temp[feat_name].astype(dtype_).mean() self.df_train[feat_name] = self.df_train[feat_name].replace('?', normalised_mean).astype(dtype_) def get_deltas(self, y): return np.sort(np.max(y) - y) def scale_data(self): for i in self.df_train: self.df_train.loc[:, i] = mns_.fit_transform(self.df_train.loc[:, i].values.reshape(-1, 1)) def read_data(self): if self.rewards_base == 'auto': self.trgt_feat = 'price' self.df_train = pd.read_csv("./Auto/autodata/Automobile_data.csv") df_data = self.df_train.replace("?", np.nan) tmpList = [(self.trgt_feat, int)] for feat_name, dtype_ in tmpList: self.nan_replace_mean(feat_name, dtype_) subset_ = ['curb-weight', 'width', 'engine-size', 'city-mpg', 'highway-mpg', self.trgt_feat] self.df_train = self.df_train[subset_] # Standard Scalar self.df_train = self.df_train.astype(float) self.scale_data() self.dim = self.df_train.shape[1]-1 elif self.rewards_base == 'pmsm': cols = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] self.df_train = pd.read_csv('./PMSM-data/pmsm_temperature_data.csv', usecols=cols) self.df_train = self.df_train.astype(float) self.scale_data() self.trgt_feat = 'motor_speed' self.dim = self.df_train.shape[1]-1 if self.rewards_base == 'synt': self.df_train_fix = self._synt_data() else: self.df_train_fix = self.bootstrap(ss=self.num_arms) if self.rewards_base != 'aml': self.deltas = self.get_deltas(self.df_train_fix[self.trgt_feat]) self.SE_dim = 1 * self.dim def gauss_reward(self, mu, sigma2, size, rs=None, config=None): tmp = self.rs.normal(mu, np.sqrt(sigma2), size=size) # print(tmp) return tmp def bern_reward(self, mu, sigma2, size, rs=None, config=None): return npBinomial(1, mu, size) def optimal_design(self, X): cur_num_arms, d = X.shape """<NAME>, Lattimore 2019""" pi = np.ones(cur_num_arms)/cur_num_arms # pi_0 in <NAME> X = [a.reshape(d, 1) for a in X] eps = self.params['optimal_design_eps'] lambda_ = self.params['SE_lambda'] gpi_k = float('inf') k = 0 while gpi_k > d+eps: k+=1 Vpi_k = lambda_*np.eye(d) for i, a in enumerate(X): Vpi_k += pi[i] * np.dot(a, a.T) # Vpi_k = np.matrix.sum([pi[i] * a * a.T for i, a in enumerate(X)]) Vpi_k = inv(Vpi_k) a_Vpi = [np.dot(np.dot(a.T, Vpi_k), a) for a in X] a_k_idx = np.argmax(a_Vpi) gpi_k = a_Vpi[a_k_idx] a_k = X[a_k_idx] gamma_ = ((1/d*gpi_k-1)/(gpi_k-1))[0][0] pi *= (1-gamma_) pi[a_k_idx] += gamma_ return pi def g_opt_gready(self, X, budget, type): """G greedy and XY greedy from Soare et al. 2014 i.e. sequential optimal design""" cur_num_arms, dim = X.shape X = np.hstack([np.ones((cur_num_arms, 1)), X]) dim += 1 X = [a.reshape(dim, 1) for a in X] A = np.eye(dim) n_samples = np.zeros(cur_num_arms) sample_path = [] if type == 'G-greedy': xpmax_f = lambda tmp: max([xp.T.dot(tmp).dot(xp) for xp in X]) elif type == 'XY-greedy': xpmax_f = lambda tmp: max([(xp - xpp).T.dot(tmp).dot(xp - xpp) for xp, xpp in product(X, X)]) for i in range(int(budget)): xargmin, xmin = None, float('inf') for xc, x in enumerate(X): tmp = inv(A + np.dot(x, x.T)) xpmax = xpmax_f(tmp) if xpmax < xmin: xargmin, xmin = xc, xpmax A = A+np.dot(X[xargmin], X[xargmin].T) n_samples[xargmin] += 1 sample_path.append(xargmin) return n_samples.astype(int) def se_train_explore(self, sim_id, df_train, budget, num_plays_ind, km=False, not_elim_arms=None, _memory=1, pre_reward=[], pre_X_train=None, prev_y_train=None, optimal_design=0, params={}, thetahat=0, algoname=None, stage_cntr=None): X_test, y_test = df_train.drop(self.trgt_feat, axis=1).to_numpy(), df_train[self.trgt_feat] # cur_num_arms = self.X_test.shape[0] cur_num_arms = X_test.shape[0] trgt = df_train[self.trgt_feat].to_numpy() tmp_trgt_feat = [] tmp_df_train = pd.DataFrame() """Exploration""" n_samples = None if optimal_design: if cur_num_arms > 2: pi = self.optimal_design(1*X_test) else: pi = np.ones(cur_num_arms)/cur_num_arms n_samples = multinomial(budget, pi) # n_samples = np.floor(pi*budget).astype(int) elif 'Greedy' in params: n_samples = self.g_opt_gready(1 * X_test, budget, type='G-greedy') elif 'G-opt' in params: # n_samples = minvol_todd(X=1 * X_test, budget=budget, tol=self.params['SE_proj_tol']) if (algoname == 'OD-LinBAI' and self.params['OC_cons']) \ or (algoname in ['GSE-Lin-Todd', 'GSE-Lin-Todd-1']): # and stage_cntr == 0: # n_samples = Epeeling(X=1 * X_test, budget=budget, design='g', tol=self.params['SE_proj_tol']) n_samples = minvol_todd(X=1 * X_test, budget=budget, tol=self.params['SE_proj_tol']) else: n_samples = fw_opt_FB(X=1 * X_test, budget=budget, design='g') elif 'Peace' in params: X_test_tmp = X_test z0 = X_test_tmp[np.argmax(X_test_tmp.dot(thetahat))] peace = PEACE(X_test_tmp, z0=z0, b=1, theta0=thetahat, delta=.8) lam, n_samples = peace.computeAlloc(B=budget) bdiffn = budget - np.sum(n_samples) if bdiffn > 0: # n_samples += multinomial(bdiffn, lam) n_samples += multinomial(bdiffn, np.ones(cur_num_arms)/cur_num_arms) elif bdiffn < 0: n_samples -= multinomial(-bdiffn, np.ones(cur_num_arms)/cur_num_arms) else: """uniform exploring""" budget_per_arm, exes = int(budget / cur_num_arms), int(budget % cur_num_arms) n_samples = np.ones(cur_num_arms)*budget_per_arm n_samples[self.rs.choice(cur_num_arms, size=exes, replace=False)] += 1 n_samples = np.array(n_samples).astype(int) for i in range(cur_num_arms): tmp_trgt_feat += list(self.reward(trgt[i], self.sigma2, size=n_samples[i], rs=self.rs, config=X_test[i])) num_plays_ind[not_elim_arms[i]] += n_samples[i] tmp_df_train = tmp_df_train.append([df_train.iloc[i]] * n_samples[i]) tmp_trgt_feat = np.array(tmp_trgt_feat).reshape(sum(n_samples), 1) df_train = tmp_df_train df_train[self.trgt_feat] = tmp_trgt_feat if _memory: """With memory""" X_train = pd.concat([pre_X_train, df_train.drop(self.trgt_feat, axis=1)], ignore_index=True) y_train = pd.concat([prev_y_train, df_train[self.trgt_feat]], ignore_index=True) else: """No memory""" X_train, y_train = \ df_train.drop(self.trgt_feat, axis=1), df_train[self.trgt_feat] return X_train, y_train, X_test, y_test def _linear_reg(self, sim_id, X_train, X_test, y_train, y_test, _lambda): """Linear regression from scratch""" X_train = X_train.to_numpy() V_t = np.dot(X_train.T, X_train) V_t = inv_wrap(V_t, _lambda) b_t = np.dot(X_train.T, y_train) theta = np.dot(V_t, b_t) pred = np.array([np.dot(x.T, theta) for x in X_test]) if self.rewards_base != 'aml': mae = mean_absolute_error(y_test, pred) mse = mean_squared_error(y_test, pred) r2s = r2_score(y_test, pred) pred_max = np.where(pred==pred.max())[0] y_test = y_test.to_numpy() y_test_max = np.where(y_test==y_test.max())[0] crct = len(intersection(pred_max, y_test_max)) > 0 else: # we don't have the y_test (ground truth for AutoML) mae, mse, r2s, crct = 0, 0, 0, 0 ret = {} ret['acc'], ret['mae'], ret['mse'], \ ret['r2s'], ret['pred'], ret['_lambda'] = crct, mae, mse, r2s, pred, _lambda ret['thetahat'] = theta return ret def _Logistic_reg(self, sim_id, X_train, X_test, y_train, y_test, _lambda): """GLM for Logistic regression""" cur_num_arms, dim = X_train.shape df = lambda theta: np.dot((sigmoid(np.dot(X_train, theta))-y_train), X_train) try: theta = optimize.newton_krylov(df, np.zeros(dim)) converged = True except NoConvergence as e: theta = e.args[0] converged = False except ValueError: theta = npNormal(loc=0, scale=1, size=dim) pred = np.dot(theta, X_test.T) mae = mean_absolute_error(y_test, pred) mse = mean_squared_error(y_test, pred) r2s = r2_score(y_test, pred) pred_max = np.where(pred == pred.max())[0] y_test_max = np.where(y_test == y_test.max())[0] crct = len(intersection(pred_max, y_test_max)) > 0 ret = {} ret['acc'] = crct ret['mae'], ret['mse'], \ ret['r2s'], ret['pred'], ret['_lambda'] = mae, mse, r2s, pred, _lambda return ret def bootstrap(self, ss, sim_id=None): if not sim_id: sim_id = self.sample_id self.sample_id += 1 # assert self.params['synt_num'] != 1, "synt 1 called bootstrap" # if self.rewards_base == "synt": # sample_df = self._synt_data() # else: # sample_df = self.df_train.sample(n=ss, replace=True, random_state=self.seeds[sim_id]) # sample_df.reset_index(drop=True, inplace=True) sample_df = self.df_train.sample(n=ss, replace=True, random_state=self.seeds[sim_id]) sample_df.reset_index(drop=True, inplace=True) return sample_df #, sample_df[self.trgt_feat].to_numpy() def simple_regret(self, y, recoms): if self.rewards_base == 'aml': return -1, -1 else: crt_idx = np.argmax(y) if crt_idx in recoms: crct = True simplereg = 0 else: crct = False simplereg = y[crt_idx] - y[int(recoms[0])] return crct, simplereg def linUCB_disjoint(self, sample_df, sim_id): """ A Contextual-Bandit Approach to Personalized News Article Recommendation, 2010 :param sample_df: the sampled dataframe :param sim_id: the id of the simulation run :return: """ n_trial = self.budget X, y = np.array(sample_df.drop(self.trgt_feat, axis=1)), np.array(sample_df[self.trgt_feat]) num_plays_ind = np.zeros(self.num_arms) n_feature = X.shape[1] arm_choice, r_payoff = [np.empty(n_trial) for _ in range(2)] theta = np.empty(shape=(self.num_arms, n_feature)) p = np.empty(shape=(self.num_arms)) # 1.2.intermediate object A = np.array([np.diag(np.ones(shape=n_feature)) for _ in np.arange(self.num_arms)]) b = np.array([np.zeros(shape=n_feature) for _ in np.arange(self.num_arms)]) # 2. Algo for t in np.arange(n_trial): # Compute estimates (theta) and prediction (p) for all arms for a in np.arange(self.num_arms): inv_A = np.linalg.inv(A[a]) # caching matrix inversion result because used twice theta[a] = inv_A.dot(b[a]) p[a] = theta[a].dot(X[a]) + self.alpha_linucb * np.sqrt(X[a].dot(inv_A).dot(X[a])) # chosing best arms chosen_arm = int(np.argmax(p)) x_chosen_arm = X[chosen_arm] r_payoff[t] = self.gauss_reward(y[chosen_arm], self.sigma2, size=1) # r_payoff[t] = self.reward_lin(theta_true, x_chosen_arm) num_plays_ind[chosen_arm] += 1 arm_choice[t] = chosen_arm # update intermediate objects (A and b) A[chosen_arm] += np.outer(x_chosen_arm, x_chosen_arm.T) b[chosen_arm] += r_payoff[t] * x_chosen_arm arm_choice_dic = Counter(arm_choice) if 0: # take the average X_best_est = np.average([cnt*X[int(arm)] for arm, cnt in arm_choice_dic.items()], axis=0) best = np.argmin([norm(X[arm]-X_best_est)/norm(X[arm]+X_best_est) for arm in np.arange(self.num_arms)]) else: """Most Freq played (mf)""" best = arm_choice_dic.most_common(1)[0][0] # take the most used crct, simplereg = self.simple_regret(y, [best]) if sim_id % self.log_step == 0: print('linUCB disjoint sample id {}, budget_per_arm' ' {}: correct {}'.format(sim_id, self.budget_per_arm, self.alpha_linucb, crct)) ret = {} ret['acc'] = crct ret['recoms'] = best ret['simple_regret'] = simplereg ret['num_plays_ind'] = num_plays_ind return ret def n_stage_SE(self): if 'SE_eta' in self.params: self.SE_n_stagess = (np.log(self.num_arms) / np.log(self.params['SE_eta'])) # assert self.SE_n_stagess.is_integer(), "Number of arms must be a power of eta" # self.SE_n_stagess = int(self.SE_n_stagess) self.SE_n_stagess = np.ceil(self.SE_n_stagess).astype(int) def proj_se(self, sample_df, OC_proj): X = sample_df.drop(self.trgt_feat, axis=1).to_numpy() rankX = np.linalg.matrix_rank(X) if rankX != X.shape[1]: Xsvd = svd(np.dot(X.T, X)) X = X.dot(Xsvd[0][:, :rankX]) sample_df.iloc[:, :rankX] = X sample_df.drop([i for i in range(3, sample_df.shape[1] - 1)], axis=1, inplace=True) self.dim = sample_df.shape[1] return sample_df def suces_elim(self, sample_df, sim_id, Lin=0, km=False, _memory=1, pre_reward=[], optimal_design=0, params={}, algoname='GSE'): self.SE_dim = 1*self.dim y = sample_df[self.trgt_feat].copy() X = sample_df.drop(self.trgt_feat, axis=1) d_tilde = np.linalg.matrix_rank(X) eta = self.params['SE_eta'] # print(sim_id, "is", sample_df) num_stages = int(np.log(self.num_arms)/np.log(eta)) if self.GSE_on: num_stages = self.SE_n_stagess if algoname in ['GSE-Lin-FWG-1', 'GSE-Lin-Todd-1']: num_stages = 1 eta = self.num_arms if algoname == 'OD-LinBAI': num_stages = np.ceil(np.log(self.dim) / np.log(2)) num_stages = int(num_stages) tmp = int(np.ceil(np.log2(d_tilde)) - 1) tmp = np.sum([np.ceil(d_tilde/2**r) for r in range(1, tmp)]) if tmp>1 else 0 m_OD_LinBAI = (self.budget - min([self.num_arms, d_tilde * (d_tilde + 1) / 2]) - tmp)/\ np.ceil(np.log2(d_tilde)) # stage_budgets = np.ones(num_stages) * m_OD_LinBAI stage_b = int(self.budget / num_stages) stage_budgets = np.ones(num_stages) * min(m_OD_LinBAI, stage_b) # stage_b_exes = 0 else: stage_b, stage_b_exes = int(self.budget / num_stages), int(self.budget % num_stages) stage_budgets = np.ones(num_stages) * stage_b if stage_b_exes > 0: stage_budgets[-stage_b_exes:] += 1 not_elim_arms = sample_df.index.to_list() _mae, _mse, _r2s, stage_cntr = 0, 0, 0, 0 f1, recall, precision = 0, 0, 0 num_plays_ind = {i: 0 for i in not_elim_arms} # for stage in range(num_stages): X_train, y_train = None, None _lambda = self.params['SE_lambda'] thetahat = np.zeros(self.dim) while len(not_elim_arms) > 1: if (self.params['SE_proj'] and algoname[:3]=="GSE") or (algoname == 'OD-LinBAI' and self.params['OC_proj']): sample_df = self.proj_se(sample_df, self.params['OC_proj']) if stage_cntr==0: thetahat = np.zeros(self.dim) X_train, y_train, X_test, y_test = \ self.se_train_explore(sim_id, sample_df, stage_budgets[stage_cntr], num_plays_ind, km, not_elim_arms, _memory=_memory, pre_reward=pre_reward, pre_X_train=X_train, prev_y_train=y_train, optimal_design=optimal_design, params=params, thetahat=thetahat, algoname=algoname, stage_cntr=stage_cntr) if Lin: # ret = self.Lin(sim_id, self.X_train, self.X_test, self.y_train, self.y_test) ret = self._linear_reg(sim_id, X_train, X_test, y_train, y_test, _lambda) thetahat = ret['thetahat'] elif 'Log' in params: ret = self._Logistic_reg(sim_id, X_train, X_test, y_train, y_test, _lambda) else: raise _mae, _mse, _r2s = _mae+ret['mae'], _mse+ret['mse'], _r2s+ret['r2s'] pred_argsort = ret['pred'].argsort() middle = len(not_elim_arms)//eta # not_elim_arms = [i for c,i in enumerate(not_elim_arms) # if pred_argsort[c] >= middle] # eliminate with median # not_elim_arms = pred_argsort[middle:] if 'SD' in params: sample_df = sample_df.drop(not_elim_arms[pred_argsort[0]]) del not_elim_arms[pred_argsort[0]] else: if algoname == 'OD-LinBAI': not_elim_arms = [not_elim_arms[i] for i in pred_argsort[-int(np.ceil(self.dim/2**(stage_cntr+1))):]] else: if middle > 0: not_elim_arms = [not_elim_arms[i] for i in pred_argsort[-middle:]] else: not_elim_arms = [not_elim_arms[pred_argsort[-1]]] sample_df = sample_df.copy().loc[[i for i in not_elim_arms]] # sample_df.reset_index(inplace=True) stage_cntr += 1 crct, simplereg = self.simple_regret(y, not_elim_arms) if sim_id % self.log_step == 0: print('{} sample id {}, km {}, memory {}, optimal_design {}, ' 'eta {}, budget_per_arm {}, params: {}, correct {}'.format(algoname, sim_id, km, _memory, optimal_design, eta, self.budget_per_arm, params, crct)) res = {} res['simple_regret'] = simplereg res['recoms'] = not_elim_arms[0] res['eta'] = eta if 0 and 'Log' in params: res['f1'], res['recall'], res['precision'] = f1/stage_cntr, recall/stage_cntr, precision/stage_cntr else: res['mae'], res['mse'], res['r2s'] = _mae/stage_cntr, _mse/stage_cntr, _r2s/stage_cntr res['acc'], res['num_plays_ind'], res['_lambda'] = crct, np.array(list(num_plays_ind.values())), _lambda # ret['mae'], ret['mse'], ret['r2s'], ret['acc'] = _mae / cntr, _mse / cntr, _r2s / cntr, crct return res def beta_bayesGap(self, beta_numerator, mu_hat, sigma2_hat): upbs, lowbs = mu_hat + 3 * sigma2_hat, mu_hat - 3 * sigma2_hat upbs_argmax = np.argmax(upbs) second_best = np.sort(upbs, axis=0)[-2] delta_hats = [upbs[upbs_argmax]-lowbs[i] for i in range(self.num_arms)] delta_hats[upbs_argmax] = second_best-lowbs[upbs_argmax] # delta for the maximizer of upbs Heps = np.sum([max(1/2*(delta_hats[i]+self.BG_par['eps']), self.BG_par['eps']) for i in range(self.num_arms)]) beta = beta_numerator/Heps return beta def bayesGap(self, sample_df, sim_id): """ On correlation and budget constraints in model-based bandit optimization with application to automatic machine learning, Hoffman, 2014 :param sample_df: :param sim_id: :return: """ tmpX = sample_df.drop([self.trgt_feat], axis=1).to_numpy() """Design matrix""" BG_kernel = self.params['BG_kernel'] el = self.params['BG_kernel_l'] if BG_kernel in ['exp', 'empirical']: if BG_kernel == 'exp': G = np.array([[np.exp(-norm(x-xp)**2/el) for x in tmpX] for xp in tmpX]) else: G = np.cov(tmpX) try: D, V = eig(G) X = np.dot(V, np.diag(np.sqrt(D))) except np.linalg.LinAlgError: print('eig no convergence') X = tmpX # else: # print('eig went wrong') # X = tmpX elif BG_kernel == 'Matern': kernel = 1.0 * Matern(length_scale=el, nu=1.5) X = kernel(tmpX) elif BG_kernel == 'RBF': kernel = 1.0 * RBF(1.0) X = kernel(tmpX) elif BG_kernel == 'ExpSineSquared': kernel = ExpSineSquared(length_scale=1, periodicity=1) X = kernel(tmpX) elif BG_kernel == 'DotProduct': kernel = DotProduct() + WhiteKernel() X = kernel(tmpX) elif BG_kernel == 'RationalQuadratic': kernel = RationalQuadratic(length_scale=1.0, alpha=1.5) X = kernel(tmpX) else: X = tmpX Y = sample_df[self.trgt_feat].copy().to_numpy() # Y or X^t thetha # crt_idx = sample_df[self.trgt_feat].idxmax() dim = X.shape[1] kappa = np.sum(1/norm(X, axis=1)**2) beta_numerator = (self.budget-self.num_arms)/self.sigma2+kappa/self.BG_par['eta']**2 beta_numerator /= 4 num_plays_ind = np.zeros(self.num_arms) # init Yt = [] for a in range(self.num_arms): Yt.append(self.reward(mu=Y[a], sigma2=self.sigma2, size=1, rs=self.rs, config=X[a])) num_plays_ind[a] += 1 init_sz = len(Yt) Yt = np.array(Yt).reshape(self.num_arms, 1) Xt = 1 * X if self.params['BG_GPtune']: tune_phase_size = int(self.budget/3)-self.num_arms gpr = GaussianProcessRegressor(kernel=kernel, alpha=.4 ** 2, random_state=0) for _ in range(tune_phase_size): a = np.random.randint(low=0, high=self.num_arms) Yt = np.append(Yt, self.reward(mu=Y[a], sigma2=self.sigma2, size=1, rs=self.rs, config=X[a])) Xt = np.append(Xt, [X[a]], axis=0) num_plays_ind[a] += 1 gpr.fit(Xt, Yt) l = gpr.kernel_.k2.get_params()['length_scale'] sigma_f = np.sqrt(gpr.kernel_.k1.get_params()['constant_value']) histJt, histjt = [], [] recom, recom_BJt = None, float('inf') for t in range(self.budget-self.num_arms): # print(f't {t}') Sigma_inv = np.dot(Xt.T, Xt)/self.sigma2 + np.eye(dim)/self.BG_par['eta']**2 # Sigma = inv(Sigma_inv) # if np.isnan(Sigma).all(): # Sigma = inv(Sigma_inv+self.params['SE_lambda']*np.eye(Sigma_inv.shape[0])) Sigma = inv_wrap(Sigma_inv, self.params['SE_lambda']) theta_hat = np.dot(Sigma, np.dot(Xt.T, Yt))/self.sigma2 mu_hat = np.dot(X, theta_hat).reshape(self.num_arms, 1) sigma2_hat = np.diagonal(np.dot(np.dot(X, Sigma), X.T)).reshape(self.num_arms, 1) Betat = self.beta_bayesGap(beta_numerator, mu_hat, sigma2_hat) U, L = mu_hat+Betat*sigma2_hat, mu_hat-Betat*sigma2_hat argmax_U = np.argmax(U) second_best = np.sort(U, axis=0)[-2] Bt = [U[argmax_U]-L[k] for k in range(self.num_arms)] Bt[argmax_U] = second_best-L[argmax_U] Jt = np.argmin(Bt) Utmp = 1*U Utmp[Jt]=np.min(U)-1 jt = np.argmax(Utmp) histJt.append(Jt); histjt.append(jt) if Bt[Jt] < recom_BJt: recom, recom_BJt = Jt, Bt[Jt] at = Jt if sigma2_hat[Jt] >= sigma2_hat[jt] else jt Yt = np.append(Yt, self.reward(mu=Y[at], sigma2=self.sigma2, size=1, rs=self.rs, config=X[at])) num_plays_ind[at] += 1 Xt = np.append(Xt, [X[at]], axis=0) if recom==None: print("recom is none") recom=0 crct, simplereg = self.simple_regret(Y, [recom]) if sim_id % 100 == 0: print('BayesGap sample id {} num arms {}, ' 'budget_per_arm {}: corrct {}'.format(sim_id, self.num_arms, self.budget_per_arm, crct)) ret = {} ret['recoms'] = recom ret['eta']=self.BG_par['eta'] ret['simple_regret'] = simplereg ret['acc'], ret['num_plays_ind'] = crct, num_plays_ind return ret def ucbglm(self, sample_df, sim_id): """ UCB-GLM from ''Provably Optimal Algorithms for Generalized Linear Contextual Bandits'' <NAME> :param sample_df: :param sim_id: :return: """ alp = self.params['UCB_GLM_alpha'] lamb = self.params['SE_lambda'] UCB_GLM_mu_f = self.params['UCB_GLM_mu_f'] X = sample_df.drop([self.trgt_feat], axis=1).to_numpy() y = sample_df[self.trgt_feat].copy().to_numpy() # Y dim = X.shape[1] num_plays_ind = np.zeros(self.num_arms) assert self.num_arms == X.shape[0], "error in num arms vs X.shape" Yt = [] for a in range(self.num_arms): Yt.append(self.reward(mu=y[a], sigma2=self.sigma2, size=1, rs=self.rs, config=X[a])) num_plays_ind[a] += 1 Yt = np.array(Yt) V = np.dot(X.T, X) Xt = np.copy(X) for t in range(self.budget-self.num_arms): df = lambda theta: np.squeeze(np.dot((np.expand_dims(UCB_GLM_mu_f(np.dot(Xt, theta)), 1)-Yt).T, Xt), 0) try: theta = optimize.newton_krylov(df, np.zeros(dim)) converged = True except NoConvergence as e: theta = e.args[0] converged = False except ValueError: try: theta = optimize.fsolve(df, np.zeros(dim)) # print("fsolve") except ValueError: raise # theta = np.ones(dim) Vinv = inv_wrap(V, lamb) Xv = np.array([np.dot(np.dot(xp.T, Vinv), xp) for xp in X]) at = np.argmax(np.dot(X, theta)+alp*Xv) Yt = np.append(Yt, self.reward(mu=y[at], sigma2=self.sigma2, size=1, rs=self.rs, config=X[at])) Yt = np.expand_dims(Yt, 1) num_plays_ind[at] += 1 x_at = X[at].reshape(dim, 1) Xt = np.append(Xt, x_at.T, axis=0) V += np.dot(x_at, x_at.T) recom = np.argmax(num_plays_ind) crct, simplereg = self.simple_regret(y, [recom]) if sim_id%self.log_step ==0: print('UCB-GLM sample id {} num arms {}, ' 'budget_per_arm {}: corrct {}'.format(sim_id, self.num_arms, self.budget_per_arm, crct)) ret = {} ret['recoms'] = recom ret['simple_regret'] = simplereg ret['acc'], ret['num_plays_ind'] = crct, num_plays_ind return ret def linGapE_confidence_bound(self, x, A, t, reg, delta): L = 1 tmp = np.sqrt(x.dot(np.linalg.inv(A)).dot(x)) res = tmp * (self.sigma2 * np.sqrt(self.dim * np.log(self.num_arms**2 * (1 + t * L**2) / reg / delta)) + np.sqrt(reg) * 2) return res def linGapE_decide_arm(self, y, A, X, it, jt, K, d, arm_selections, greedy): if greedy: tmp = [y.dot(np.linalg.inv(A + self.matrix_dot(x))).dot(y) for x in X] else: # (12) in LinGapE paper fun = lambda w: np.linalg.norm(w, ord=1) cons = ({'type': 'eq', 'fun': lambda w: np.sum(w[:, np.newaxis] * X, axis=0) - y}) bnds = list(repeat((0, None), K)) # bnds = Bounds(0,None) min_res11 = minimize(fun, np.ones(K) / K, method='SLSQP', bounds=bnds, constraints=cons) wstar = min_res11.x pstar = np.abs(wstar) / np.sum(np.abs(wstar)) tmp = [(arm_selections[i] / pstar[i] if pstar[i] > 0 else float('inf')) for i in range(K)] return np.argmin(tmp) def matrix_dot(self, a): return np.expand_dims(a, axis=1).dot(np.expand_dims(a, axis=0)) def linGapE(self, sample_df, sim_id, greedy=0): """ LinGapE algorithm from https://github.com/liyuan9988/LinGapE, author :return: """ X = sample_df.drop([self.trgt_feat], axis=1).to_numpy() y = sample_df[self.trgt_feat].copy().to_numpy() # Y d = X.shape[1] self.dim = d K = self.num_arms assert self.num_arms == X.shape[0], "error in num arms vs X.shape" reg = self.params['LinGapE']['reg'] # 1 delta = self.params['LinGapE']['delta'] # 0.1 # greedy = self.params['LinGapE']['greedy'] epsilon = 2 * (1 - np.cos(self.params['synt1_omega'])) sigma2 = self.sigma2 A = np.eye(d) * reg b = np.zeros(d) # arm_selections = np.ones(K) num_plays_ind = np.ones(K) t = K for i in range(K): A += self.matrix_dot(X[i]) # r = (self.theta.dot(X[i]) + np.random.randn() * sigma2) r = self.reward(y[i], sigma2, 1, rs=self.rs, config=X[i]) b += X[i] * r theta_hat = np.linalg.solve(A, b) est_reward = X.dot(theta_hat) it = np.argmax(est_reward) jt = np.argmax(est_reward - est_reward[it] + np.array([self.linGapE_confidence_bound(x - X[it], A, t, reg, delta) for x in X])) B = est_reward[jt] - est_reward[it] + self.linGapE_confidence_bound(X[it] - X[jt], A, t, reg, delta) while (self.fb > 0 and t < self.budget) or (self.fb == 0 and B > epsilon): a = self.linGapE_decide_arm(X[it] - X[jt], A, X, it, jt, K, d, num_plays_ind, greedy) A += self.matrix_dot(X[a]) # b += X[a] * (self.theta.dot(X[a]) + np.random.randn() * sigma2) b += X[a] * self.reward(y[a], sigma2, 1, rs=self.rs, config=X[a]) num_plays_ind[a] += 1 t += 1 theta_hat = np.linalg.solve(A, b) est_reward = X.dot(theta_hat) if (t % 5000 == 0): print(num_plays_ind) print(B) print(it, jt) it = np.argmax(est_reward) jt = np.argmax(est_reward - np.max(est_reward) + np.array([self.linGapE_confidence_bound(x - X[it], A, t, reg, delta) for x in X])) B = est_reward[jt] - est_reward[it] + self.linGapE_confidence_bound(X[it] - X[jt], A, t, reg, delta) crct, simplereg = self.simple_regret(y, [it]) if sim_id%self.log_step ==0: print(f"LinGapE {greedy} sample id {sim_id} num arms {self.num_arms}, " f"budget_per_arm {self.budget_per_arm}: corrct {crct}") ret = {} ret['recoms'] = it ret['simple_regret'] = simplereg ret['acc'], ret['num_plays_ind'] = crct, num_plays_ind return ret def __call__(self, sim_id): # sim_id = X self._set_replic_seed(sim_id) ret = {} if self.params['random_exp']: # sample_df, _ = self.bootstrap(ss=self.num_arms, sim_id=sim_id) if self.rewards_base == "synt": sample_df = self._synt_data() else: sample_df = self.bootstrap(ss=self.num_arms, sim_id=sim_id) else: sample_df = self.df_train_fix """Well maintained algorithms""" # Lin if 'GSE-Lin' in self.algos: ret['GSE-Lin'] = self.suces_elim(sample_df, sim_id, Lin=1, _memory=0) if 'GSE-Lin-FWG' in self.algos: ret['GSE-Lin-FWG'] = self.suces_elim(sample_df, sim_id, _memory=0, Lin=1, params={'G-opt': 1}) if 'GSE-Lin-Wynn' in self.algos: ret['GSE-Lin-Wynn'] = self.suces_elim(sample_df, sim_id, Lin=1, _memory=0, optimal_design=1) if 'GSE-Lin-Greedy' in self.algos: ret['GSE-Lin-Greedy'] = self.suces_elim(sample_df, sim_id, Lin=1, _memory=0, params={'Greedy': 1}) # Log if 'GSE-Log' in self.algos: ret['GSE-Log'] = self.suces_elim(sample_df, sim_id, _memory=0, params={'Log': 1}) if 'GSE-Log-FWG' in self.algos: ret['GSE-Log-FWG'] = self.suces_elim(sample_df, sim_id, _memory=0, params={'Log': 1, 'G-opt': 1}) if 'GSE-Log-Wynn' in self.algos: ret['GSE-Log-Wynn'] = self.suces_elim(sample_df, sim_id, _memory=0, optimal_design=1, params={'Log': 1}) if 'GSE-Log-Greedy' in self.algos: ret['GSE-Log-Greedy'] = self.suces_elim(sample_df, sim_id, _memory=0, params={'Log': 1, 'Greedy': 1}) # Else if 'UCB-GLM' in self.algos: ret['UCB-GLM'] = self.ucbglm(sample_df, sim_id) if 'LinUCB' in self.algos: ret['LinUCB'] = self.linUCB_disjoint(sample_df, sim_id) if 'BayesGap' in self.algos: ret['BayesGap'] = self.bayesGap(sample_df, sim_id) if 'LinGapE' in self.algos: ret['LinGapE'] = self.linGapE(sample_df, sim_id) if 'LinGapE-Greedy' in self.algos: ret['LinGapE-Greedy'] = self.linGapE(sample_df, sim_id, greedy=1) if 'Peace' in self.algos: ret['Peace'] = self.suces_elim(sample_df, sim_id, Lin=1, _memory=0, params={'Peace': 1}, algoname='Peace') if 'OD-LinBAI' in self.algos: ret['OD-LinBAI'] = self.suces_elim(sample_df, sim_id, Lin=1, _memory=0, algoname='OD-LinBAI', params={'G-opt': 1}) if 'GSE-Lin-FWG-1' in self.algos: ret['GSE-Lin-FWG-1'] = self.suces_elim(sample_df, sim_id, _memory=0, Lin=1, params={'G-opt': 1}, algoname='GSE-Lin-FWG-1') if 'GSE-Lin-Todd' in self.algos: ret['GSE-Lin-Todd'] = self.suces_elim(sample_df, sim_id, _memory=0, Lin=1, params={'G-opt': 1}, algoname='GSE-Lin-Todd') if 'GSE-Lin-Todd-1' in self.algos: ret['GSE-Lin-Todd-1'] = self.suces_elim(sample_df, sim_id, _memory=0, Lin=1, params={'G-opt': 1}, algoname='GSE-Lin-Todd-1') # cal Deltas if self.params['random_exp']: for algo in self.algos: ret[algo]['deltas'] = self.get_deltas(sample_df[self.trgt_feat]) return ret def get_results(self, poolobjs): print('Collecting the results ...') for algo in self.algos: res = {} res[algo] = {} algo_res = [i[algo] for i in poolobjs] # keys = algo_res[0].keys() for indic in ['acc', 'simple_regret', 'mae', 'mse', 'r2s', 'f1', 'recall', 'precision', 'eta', 'num_plays_ind']: # for indic in keys: if indic in algo_res[0]: res[algo][indic] = sum([i[indic] for i in algo_res])/self.N_1 res[algo]['sd acc'] = np.sqrt(res[algo]['acc']*(1-res[algo]['acc'])/self.N_1) res[algo]['sd simple_regret'] = np.std([i["simple_regret"] for i in algo_res]) res[algo]['num_plays'] = sum(res[algo]['num_plays_ind']) tmp_recoms = Counter([i['recoms'] for i in algo_res]) res[algo]['recoms'] = [tmp_recoms[i] for i in range(self.num_arms)] # Specific results if algo == 'LinUCB': res[algo]['alpha'] = self.alpha_linucb if 'BayesGap' in algo: res[algo]['kernel'] = self.params['BG_kernel'] res[algo]['BG_kernel_l'] = self.params['BG_kernel_l'] if 'embed' in algo: res[algo]['model'] = algo_res[0]['model'] if 'GSE' in algo or 'SD' in algo: res[algo]['_lambda'] = algo_res[0]['_lambda'] if algo == 'UCB-GLM': res[algo]['UCB_GLM_alpha'] = self.params['UCB_GLM_alpha'] res[algo]['UCB_GLM_mu_f'] = self.params['UCB_GLM_mu_f'] if algo in ['LinGapE', 'LinGapE-Greedy']: res[algo]['LinGapE_reg'] = self.params['LinGapE']['reg'] res[algo]['LinGapE_delta'] = self.params['LinGapE']['delta'] # res[algo]['LinGapE_greedy'] = self.params['LinGapE']['greedy'] if self.rewards_base == 'aml': res[algo]['aml_model'] = self.params['aml_model'] res[algo]['RMSE'] = 0 for config, counts in res[algo]['recoms'].items(): res[algo]['RMSE'] += counts * self.aml.final_eval(rs=self.rs, config=self.df_train_fix.iloc[config]) res[algo]['RMSE'] /= self.N_1 row = {'algorithm': algo, 'N_1': self.N_1, 'num_arms': self.num_arms, 'dim': self.dim, 'max_depth': self.max_depth_dtr, 'sigma2': self.sigma2, 'budget_per_arm' if self.BperArm else "Budget": self.budget_per_arm} row.update(res[algo]) if self.params['random_exp']: arms = 'random' theta = 'random' deltas_ = np.average([i["deltas"] for i in algo_res], axis=0) else: arms = self.arms theta = self.theta deltas_ = self.deltas if self.rewards_base=="synt": rewards_base_ = self.rewards_base + str(self.params['synt_num']) prior_sigma_ = self.params['prior_sigma'] else: rewards_base_ = self.rewards_base prior_sigma_ = "" row.update({'rewards_base': rewards_base_, 'theta': theta, 'arms': arms, 'synt1_omega': self.params['synt1_omega'], 'time': get_pst_time(), 'prior_sigma': prior_sigma_, 'optimal_design_eps': self.params['optimal_design_eps'], 'deltas': deltas_, 'SE_proj': self.params['SE_proj'], 'SE_proj_tol': self.params['SE_proj_tol'], 'SE_lambda': self.params['SE_lambda'], 'SE_dim': self.SE_dim, 'params': self.params, 'BperArm':self.BperArm}) file_name = 'dtr_res' print(row) write_csv(list(row.keys()), list(row.values()), file_name) return res def exp_dtr(self): if self.multithr: if platform.system() == 'Linux': # for solving the freeze in linux try: mp.set_start_method('spawn') except RuntimeError: pass # mp.set_start_method("spawn", force=True) rr = [(sim_id) for sim_id in range(self.N_1)] num_cpu = mp.cpu_count() # num_cpu = 6 with mp.Pool(num_cpu) as pool: poolobjs = pool.map(self, rr) # print(poolobjs) poolobjs = np.array(poolobjs) else: poolobjs = [] for sim_id in range(self.N_1): poolobjs.append(self((sim_id))) self.get_results(poolobjs) # print(self.SE_dim) def BAImain(): print(os.getcwd()) N_1 = 1000 max_depth_dtr = 20 multithr = 1 multithr = 0 reward_dist = 'Gaus' rewards_base = 'auto' rewards_base = 'pmsm' rewards_base = 'synt' # rewards_base = 'aml' aml_model = 'random_forest' """synt data""" random_exp = True # random_exp = False prior_sigma = 1 BperArm = 0 # budget per arm or total budget ###### BperArm = 1 # budget per arm or total budget ###### budget_per_arm = [10, 20, 50] budget_per_arm = [20, 50] # budget_per_arm = [20] # budget_per_arm = [2*2**6] # budget_per_arm = [5**3] budget_per_arm = [50] if rewards_base in ['auto', 'pmsm']: sigma2 = .1 else: sigma2 = 10 # sigma2 = 5 # sigma2 = 1 synt_num, dims, num_arms = 1, [7, 15, 31, 63, 127], [8] # synt1, hard instance of BAI, k is set =d+1 # synt_num, dims, num_arms = 3, [5, 7, 10, 12], [8] # synt3 uniform covar, normal theta, logistic # synt_num, dims, num_arms = None, [None], [8, 16] # auto, pmsm # synt_num, dims, num_arms = 5, [16], [16] # synt 5, LinGapE 2018, Delta example, d=k, k is set =d delta_s5 = [.01, .03, .05, .1, .3] # delta_s5 = [.1] synt_num, dims, num_arms, = 6, [4], [8, 16, 32, 64] # synt 6, ALBA 2018 k=100 in paper, alpha_s6 = 0.01 OD_LinBAIpaper = 0 # use to get the OD-LinBAI setting # synt_num, dims, num_arms = 7, [2], [8, 16, 64] # synt 7, OD-LinBAI 2021 paper if synt_num != 5: delta_s5 = [-1] if rewards_base == 'synt' and synt_num == 3: algos = ['GSE-Log', 'GSE-Log-FWG', # 'GSE-Log-Greedy', 'GSE-Log-Wynn', 'BayesGap', 'GSE-Lin', 'GSE-Lin-FWG', # 'GSE-Lin-Greedy', 'GSE-Lin-Wynn', 'UCB-GLM'] # synt 3 # algos = ['BayesGap'] # algos = ['GSE-Log-FWG', 'UCB-GLM'] # algos = ['UCB-GLM'] else: algos = ['GSE-Lin', 'GSE-Lin-FWG', 'GSE-Lin-Greedy', 'GSE-Lin-Wynn', 'LinUCB', 'BayesGap', 'LinGapE', 'Peace'] # , 'LinGapE-Greedy'] # synt 1 2 4 5 6 auto, pmsm # algos = ['GSE-Lin', 'GSE-Lin-FWG'] # algos = ['GSE-Lin', 'LinUCB', 'BayesGap'] # algos = ['BayesGap'] # algos = ['LinGapE', 'LinGapE-Greedy'] # algos = ['LinGapE'] algos = ['Peace'] # algos = ['OD-LinBAI', 'GSE-Lin-FWG'] # algos = ['OD-LinBAI'] # algos = ['GSE-Lin-FWG'] # algos = ['GSE-Lin-FWG-1'] # algos = ['GSE-Lin-FWG', 'GSE-Lin-FWG-1'] # algos = ['GSE-Lin-FWG', 'GSE-Lin-FWG-1', 'OD-LinBAI', 'GSE-Lin-Todd', 'GSE-Lin-Todd-1'] # algos = ['GSE-Lin-FWG', 'OD-LinBAI', 'GSE-Lin-Todd'] # algos = ['GSE-Lin-FWG', 'LinUCB', 'BayesGap', 'LinGapE-Greedy', 'OD-LinBAI']#, 'Peace'] # algos = ['LinGapE-Greedy', 'OD-LinBAI'] # SE pars eta_SE = 2 # eta_SE = 125 SE_lambda = 1e-8 SE_proj = 1 # SE_proj = 0 SE_proj_tol = 1e-7 # G-opt Wynn optimal_design_eps = 1e-2 optimal_design_eps = 1e-3 # optimal_design_eps = 1e-4 # optimal_design_eps = 1e-5 # GPUCB gpucb_kernel = 'Sq_Exp' # 'lin' gpucb_delta = .01 # UCB-GLM UCB_GLM_alpha = 1/2 UCB_GLM_mu_f = sigmoid # LinGapE LinGapE_reg = SE_lambda LinGapE_delta = .5 LinGapE_delta = .9 LinGapE_delta = .4 # LinGapE_delta = .1 # LinGapE_delta = .01 # LinGapE_greedy = 1 # LinGapE_greedy = 0 # BayesGap_pars BG_eta = 50 BG_eps = 0 BG_kernel = '' # BG_kernel = 'empirical' BG_kernel = 'exp' # BG_kernel = 'Matern' # BG_kernel = 'RBF' # BG_kernel = 'ExpSineSquared' # BG_kernel = 'DotProduct' # BG_kernel = 'RationalQuadratic' BG_kernel_l = [10] # BG_kernel_l = [1e-6, 1e-3, .1, 1, 10, 1e2] # BG_kernel_l = [1e3, 5e3, 1e4, 1e5, 1e6] # BG_kernel_l = [1e-7, 1e-8, 1e-9] # BG_kernel_l = [200,500,700] if BG_kernel not in ['exp', 'Matern']: BG_kernel_l = [-1] BG_GPtune = 0 OC_proj = 1 OC_cons = 1 if len(sys.argv) > 1: # For cluster runs cntr_1 = iter(range(4, 1000)) cntr = iter(range(1, len(sys.argv))) multithr = 1 multithr = 0 N_1 = int(sys.argv[next(cntr)]) num_arms = [int(sys.argv[next(cntr)])] budget_per_arm = [int(sys.argv[next(cntr)])] if len(sys.argv) > next(cntr_1): sigma2 = float(sys.argv[next(cntr)]) if len(sys.argv) > next(cntr_1): rewards_base = sys.argv[next(cntr)] if len(sys.argv) > next(cntr_1): dims = [int(sys.argv[next(cntr)])] if len(sys.argv) > next(cntr_1): prior_sigma = float(sys.argv[next(cntr)]) if len(sys.argv) > next(cntr_1): synt_num = int(sys.argv[next(cntr)]) if len(sys.argv) > next(cntr_1): random_exp = int(sys.argv[next(cntr)]) if len(sys.argv) > next(cntr_1): delta_s5 = [float(sys.argv[next(cntr)])] if len(sys.argv) > next(cntr_1): BperArm = [int(sys.argv[next(cntr)])] algos = [] while len(sys.argv) > next(cntr_1): algos += [sys.argv[next(cntr)]] # grid search optimal kernels I found if rewards_base == 'synt': if synt_num in [1]: BG_kernel = 'exp' BG_kernel_l = [1e-3] elif synt_num in [3]: # BG_kernel = 'exp' # BG_kernel_l = [1e6] BG_kernel = 'Matern' BG_kernel_l = [10] elif synt_num in [6]: BG_kernel = 'Matern' BG_kernel_l = [10] elif synt_num in [5]: BG_kernel = 'exp' BG_kernel_l = [1000] elif rewards_base == 'auto': BG_kernel = 'exp' BG_kernel_l = [1e-6] elif rewards_base == 'pmsm': BG_kernel = 'empirical' BG_kernel_l = [-1] if synt_num == 3: reward_dist = 'Bern' if rewards_base in ['auto', 'pmsm']: OC_proj = 0 SE_proj = 0 make_dir('./result') for algo, num_arm, b, d, BG_l, dls5 in product(algos, num_arms, budget_per_arm, dims, BG_kernel_l, delta_s5): if synt_num == 6 and OD_LinBAIpaper: num_arm = 2**d b = 2 * num_arm st = time.time() exp_class1 = super_class(seed_=110, N_1=N_1, num_arms=num_arm, dim=d, reward_dist=reward_dist, max_depth_dtr=max_depth_dtr, budget_per_arm=b, sigma2=sigma2, fb=1, multithr=multithr, rewards_base=rewards_base, BG_par={'eta': BG_eta, 'eps': BG_eps}, algos=[algo], # algos=algos GPUCB_par={'kernel': gpucb_kernel, 'delta': gpucb_delta}, params={'SE_eta': eta_SE, 'SE_lambda': SE_lambda, 'synt_num': synt_num, 'prior_sigma': prior_sigma, 'random_exp': random_exp, 'optimal_design_eps': optimal_design_eps, 'delta_s5': dls5, 'alpha_s6': alpha_s6, 'BG_kernel': BG_kernel, 'BG_kernel_l':BG_l, 'BG_GPtune':BG_GPtune, 'SE_proj': SE_proj, 'SE_proj_tol':SE_proj_tol, 'UCB_GLM_alpha':UCB_GLM_alpha, 'UCB_GLM_mu_f':UCB_GLM_mu_f, 'LinGapE': {'reg': LinGapE_reg, 'delta': LinGapE_delta}, 'aml_model': aml_model, 'OC_proj': OC_proj, 'OC_cons': OC_cons}, BperArm=BperArm) # exp_class1.read_data() # tmp = auto_class1.bootstrap() exp_class1.exp_dtr() if __name__ == "__main__": BAImain()
<filename>python_research/experiments/sota_models/utils/monte_carlo.py from random import shuffle import numpy as np from python_research.experiments.sota_models.utils.sets_prep import generate_samples, prep_dataset, unravel_dataset def prep_monte_carlo(args) -> tuple: """ Finds the size of the smallest population among all classes, then divides on three sets: - Training set takes (1 - (args.val_size + args.test_size) * lowest_class_population) samples. - Validation set takes (args.val_size * lowest_class_population) samples. args.val_size is the fraction of samples designed for validation set. - Testing set takes (args.test_size * lowest_class_population) samples. args.test_size is the fraction of samples designed for testing set. :param args: Parsed arguments. :return: Training, Validation and Testing objects. """ print("Monte Carlo data prep:") samples = generate_samples(args=args) samples_by_classes = [[] for _ in range(args.classes)] for x in samples: samples_by_classes[x[1]].append(x[0].transpose()) [shuffle(x) for x in samples_by_classes] lowest_class_population = len(samples_by_classes[0]) for class_ in samples_by_classes: if len(class_) < lowest_class_population: lowest_class_population = len(class_) test_set_size = int(lowest_class_population * args.test_size) test_set = [[] for _ in range(args.classes)] for idx, class_ in enumerate(samples_by_classes): chosen_indexes = np.random.choice(len(class_), test_set_size, replace=False) assert len(np.unique(chosen_indexes)) == len(chosen_indexes) for index in chosen_indexes: test_set[idx].append([class_[index], idx]) samples_by_classes[idx] = np.delete(np.asarray(class_), [chosen_indexes], axis=0) val_set_size = int(lowest_class_population * args.val_size) val_set = [[] for _ in range(args.classes)] for idx, class_ in enumerate(samples_by_classes): chosen_indexes = np.random.choice(len(class_), val_set_size, replace=False) assert len(np.unique(chosen_indexes)) == len(chosen_indexes) for index in chosen_indexes: val_set[idx].append([class_[index], idx]) samples_by_classes[idx] = np.delete(np.asarray(class_), [chosen_indexes], axis=0) train_set_size = int(lowest_class_population * (1 - (args.val_size + args.test_size))) train_set = [[] for _ in range(args.classes)] for idx, class_ in enumerate(samples_by_classes): chosen_indexes = np.random.choice(len(class_), train_set_size, replace=False) assert len(np.unique(chosen_indexes)) == len(chosen_indexes) for index in chosen_indexes: train_set[idx].append([class_[index], idx]) samples_by_classes[idx] = np.delete(np.asarray(class_), [chosen_indexes], axis=0) train_set, val_set, test_set = unravel_dataset(train_set=train_set, val_set=val_set, test_set=test_set) return prep_dataset(train_set=train_set, val_set=val_set, test_set=test_set)
<gh_stars>10-100 // Copyright 2020 the Blobloom authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Benchmarks for the basic operations live in the benchmarks/ subpackage. package blobloom import ( "math/rand" "sync" "sync/atomic" "testing" ) // Baseline for BenchmarkAddSync. func benchmarkAddLocked(b *testing.B, nbits uint64) { b.Helper() const nhashes = 22 // Large number of hashes to create collisions. var ( f = New(nbits, nhashes) mu sync.Mutex seed uint32 ) b.ResetTimer() b.RunParallel(func(pb *testing.PB) { r := rand.New(rand.NewSource(int64(atomic.AddUint32(&seed, 1)))) for pb.Next() { mu.Lock() f.Add(r.Uint64()) mu.Unlock() } }) } func BenchmarkAddLocked128kB(b *testing.B) { benchmarkAddLocked(b, 1<<20) } func BenchmarkAddLocked1MB(b *testing.B) { benchmarkAddLocked(b, 1<<23) } func BenchmarkAddLocked16MB(b *testing.B) { benchmarkAddLocked(b, 1<<27) } func benchmarkAddSync(b *testing.B, nbits uint64) { b.Helper() const nhashes = 22 // Large number of hashes to create collisions. f := NewSync(nbits, nhashes) var seed uint32 b.ResetTimer() b.RunParallel(func(pb *testing.PB) { r := rand.New(rand.NewSource(int64(atomic.AddUint32(&seed, 1)))) for pb.Next() { f.Add(r.Uint64()) } }) } func BenchmarkAddSync128kB(b *testing.B) { benchmarkAddSync(b, 1<<20) } func BenchmarkAddSync1MB(b *testing.B) { benchmarkAddSync(b, 1<<23) } func BenchmarkAddSync16MB(b *testing.B) { benchmarkAddSync(b, 1<<27) } func BenchmarkCardinalityDense(b *testing.B) { f := New(1<<20, 2) for i := range f.b { for j := range f.b[i] { f.b[i][j] = rand.Uint32() } } b.ResetTimer() for i := 0; i < b.N; i++ { f.Cardinality() } } func BenchmarkCardinalitySparse(b *testing.B) { f := New(1<<20, 2) for i := 0; i < len(f.b); i += 2 { for _, j := range []int{4, 8, 13} { f.b[i][j] = rand.Uint32() } } b.ResetTimer() for i := 0; i < b.N; i++ { f.Cardinality() } } func BenchmarkOnescount(b *testing.B) { var blk block for i := range blk { blk[i] = rand.Uint32() } b.ResetTimer() for i := 0; i < b.N; i++ { onescount(&blk) } } func BenchmarkUnion(b *testing.B) { const n = 1e6 var ( cfg = Config{Capacity: n, FPRate: 1e-5} f = NewOptimized(cfg) g = NewOptimized(cfg) fRef = NewOptimized(cfg) gRef = NewOptimized(cfg) hashes = randomU64(n, 0xcb6231119) ) b.Logf("NumBits = %d", f.NumBits()) for _, h := range hashes[:n/2] { fRef.Add(h) } for _, h := range hashes[n/2:] { gRef.Add(h) } b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() f.Clear() f.Union(fRef) g.Clear() g.Union(gRef) b.StartTimer() f.Union(g) } }
import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; public class Main { public static void main(String[] args) throws IOException { //Enter data using BufferReader BufferedReader reader = new BufferedReader(new InputStreamReader(System.in)); // Reading data using readLine String ntestCasesStr = reader.readLine(); int nTestCases = Integer.parseInt(ntestCasesStr); for (int i=0;i<nTestCases;i++) { String caseLine = reader.readLine(); String[] arrOfStr = caseLine.split(" ", 5); CalculateRequirements(Integer.parseInt(arrOfStr[0]), Integer.parseInt(arrOfStr[1]), Integer.parseInt(arrOfStr[2]), Integer.parseInt(arrOfStr[3]), Integer.parseInt(arrOfStr[4])); } } public static void CalculateRequirements(int a, int b, int c, int d, int k) { int npens = (int)Math.ceil((double)a/c); int npencils = (int)Math.ceil((double)b/d); //System.out.println("npens: " + npens + " npencils:" + npencils); if ( k >= npens + npencils ) { System.out.println(npens + " " + npencils); } else { System.out.println(-1); } } }