content
stringlengths
10
4.9M
/* We might not handle wrap around of the RDI register correctly... * In that if we do wrap around the effect will manifest in the higher bits of the register */ int v3_handle_svm_io_ins(struct guest_info * core, struct svm_io_info * io_info) { struct v3_io_hook * hook = v3_get_io_hook(core->vm_info, io_info->port); int read_size = 0; addr_t dst_addr = 0; uint_t rep_num = 1; ullong_t mask = 0; struct v3_segment * theseg = &(core->segments.es); addr_t inst_ptr; int direction = 1; struct rflags * flags = (struct rflags *)&(core->ctrl_regs.rflags); if (flags->df) { direction = -1; } if (v3_gva_to_hva(core, get_addr_linear(core, core->rip, &(core->segments.cs)), &inst_ptr) == -1) { PrintError(core->vm_info, core, "Can't access instruction\n"); return -1; } while (is_prefix_byte(*((char *)inst_ptr))) { switch (*((char *)inst_ptr)) { case PREFIX_CS_OVERRIDE: theseg = &(core->segments.cs); break; case PREFIX_SS_OVERRIDE: theseg = &(core->segments.ss); break; case PREFIX_DS_OVERRIDE: theseg = &(core->segments.ds); break; case PREFIX_ES_OVERRIDE: theseg = &(core->segments.es); break; case PREFIX_FS_OVERRIDE: theseg = &(core->segments.fs); break; case PREFIX_GS_OVERRIDE: theseg = &(core->segments.gs); break; default: break; } inst_ptr++; } PrintDebug(core->vm_info, core, "INS on port %d (0x%x)\n", io_info->port, io_info->port); if (io_info->sz8) { read_size = 1; } else if (io_info->sz16) { read_size = 2; } else if (io_info->sz32) { read_size = 4; } else { PrintError(core->vm_info, core, "io_info Invalid Size\n"); return -1; } if (io_info->addr16) { mask = 0xffff; } else if (io_info->addr32) { mask = 0xffffffff; } else if (io_info->addr64) { mask = 0xffffffffffffffffLL; } else { mask = get_gpr_mask(core); PrintDebug(core->vm_info, core, "INS io_info invalid address size, mask=0x%p, io_info=0x%p\n", (void *)(addr_t)mask, (void *)(addr_t)(io_info)); } if (io_info->rep) { rep_num = core->vm_regs.rcx & mask; } PrintDebug(core->vm_info, core, "INS size=%d for %d steps\n", read_size, rep_num); while (rep_num > 0) { addr_t host_addr; dst_addr = get_addr_linear(core, (core->vm_regs.rdi & mask), theseg); if (v3_gva_to_hva(core, dst_addr, &host_addr) == -1) { PrintError(core->vm_info, core, "Could not convert Guest VA to host VA\n"); return -1; } if (hook == NULL) { PrintDebug(core->vm_info, core, "INS operation on unhooked IO port 0x%x - returning zeros\n", io_info->port); memset((char*)host_addr,0,read_size); } else { if (hook->read(core, io_info->port, (char *)host_addr, read_size, hook->priv_data) != read_size) { PrintError(core->vm_info, core, "Read Failure for ins on port 0x%x\n", io_info->port); return -1; } } core->vm_regs.rdi += (read_size * direction); if (io_info->rep) { core->vm_regs.rcx--; } rep_num--; } return 0; }
def on_connection_lost(self, func: Callable[['Protocol', Exception], None]) -> None: self._on_connection_lost = func
def _choose_refinement_input(self): simulation_path = os.path.join( self.global_simulation.pele_dir, self.global_simulation.output ) n_best_poses = int( self.global_simulation.iterations * self.global_simulation.pele_steps * (self.global_simulation.cpus - 1) * 0.25 ) if not self.args.debug: with cd(simulation_path): files_out, _, _, _, output_energy = bs.main( str(self.args.be_column), n_structs=n_best_poses, path=".", topology=self.global_simulation.topology, logger=self.global_simulation.logger, ) snapshot = 0 files_out = [ os.path.join( self.global_simulation.pele_dir, self.global_simulation.output, f, ) for f in files_out ] input_pool = [ [ f, snapshot, self.global_simulation.residue, self.global_simulation.topology, ] for f in files_out ] all_coords = parallelize(_extract_coords, input_pool, 1) coords = [list(c[0:3]) for c in all_coords] dataframe = pd.DataFrame( list(zip(files_out, output_energy, coords)), columns=["File", "Binding energy", "1st atom coordinates"], ) self.dataframe = dataframe.sort_values( ["Binding energy"], ascending=True ) inputs = self._check_ligand_distances() directory = os.path.join(self.working_folder, "refinement_input") if not os.path.isdir(directory): os.makedirs(directory, exist_ok=True) for i in inputs: os.system("cp {} {}/.".format(i, directory))
// Creates two CacheMachines and register them with the `project_kernel` std::tuple<std::shared_ptr<CacheMachine>, std::shared_ptr<CacheMachine>> register_project_kernel_with_cache_machines( std::shared_ptr<kernel> project_kernel, std::shared_ptr<Context> context, int cache_level_override) { std::shared_ptr<CacheMachine> inputCacheMachine = std::make_shared<CacheMachine>(context, "", true, cache_level_override); std::shared_ptr<CacheMachine> outputCacheMachine = std::make_shared<CacheMachine>(context, "", true, 0); project_kernel->input_.register_cache("1", inputCacheMachine); project_kernel->output_.register_cache("1", outputCacheMachine); return std::make_tuple(inputCacheMachine, outputCacheMachine); }
import gym import random import numpy as np from jaipur_game import * # Press the green button in the gutter to run the script. if __name__ == '__main__': h = Hand() h2 = Hand() pile = Deck() pile.deal_hand(h) pile.deal_hand(h2) h.read_cards() h2.read_cards() pile.read_cards() middle = MiddlePile() print("Cards left: " + middle.num_cards().__str__()) middle.read_cards() # See PyCharm help at https://www.jetbrains.com/help/pycharm/
def xtype_from_derivation(derivation: str) -> str: if derivation.startswith("m/44'"): return 'standard' if derivation.startswith("m/45'"): return 'standard' raise Exception('Unknown bip43 derivation purpose %s' % derivation[:5])
def _checkKnownRunOrder(order): if order not in _runOrders: raise usage.UsageError( "--order must be one of: %s. See --help-orders for details" % (", ".join(repr(order) for order in _runOrders),)) return order
Last Words With RuPaul's Drag Race's Sasha Belle The Advocate spoke with RuPaul's Drag Race's Sasha Belle after he sashayed away. Another week, another queen! Sasha Belle has sashayed away from RuPaul's Drag Race, after a high-flying episode that tasked the contestants to perform an airline musical. The Advocate spoke to Belle about the up-in-the-air striptease that "crashed and burned," what it's like to be blown by the Pit Crew (with a leaf blower), and the experience of lipsyncing for one's life in front of Olivia Newton-John. The Advocate: How did you prepare for RuPaul’s Drag Race? Sasha Belle: I worked hard every year to get better. I tried to learn from my mistakes. I learned how to sew, and my seamstress and I came up with some sickening runways — except for the jetset eleganza. That was a last-minute switch because of a zipper malfunction. What did it feel like to be blown by the Pit Crew? Oh my God, check please! I absolutely love Miles, so it was great to have some alone time... with tools. What was the most difficult part of this week’s main challenge? Having such a small part. I don't blame Ginger, she obviously made the right decision because Kasha killed that scat. But I was so worried about not standing out, I tried to do a striptease that crashed and burned. But I really loved this challenge, and had a great time. If you had gotten the part you wanted, do you think it would have made a difference? Oh, I don't know. I probably would have found some other way to sabotage myself. [laughs] I was overthinking everything. What is your preferred airline? Supermodel Airlines. They don't keep their wigs in the overhead storage. Looking back on the episode, would you have done anything differently to avoid elimination? I would have relaxed and just played my part, instead of trying to upstage anyone. And I would have brought a backup "jetset eleganza" look. What was it like lipsyncing an Olivia Newton-John song, in front of Olivia Newton-John? Awesome! She was so sweet, so I wasn't that nervous about her. I was more nervous about Michelle. I look up to her so much, and I didn't want to let her down by doing something stupid, like letting my titty pop out. Which Grease character do you most identify with? I'm more of a Grease 2 fan. I need a coo-oo-oo-ool rider. Has your experience on the improved your drag, or impacted how you perform? Yes! I have always made a point to not take myself too seriously. But watching yourself lose a TV show is a master class in humble pie. It was a great lesson. Too bad not everyone on the season is going to learn it. Is there an aspect of your drag or yourself that you felt you didn’t have time to convey, since you were eliminated in the second episode? I wanted to act! That's my thing, and I couldn't wait to do the Long Island Medium for Snatch Game. I also feel like all the goofy times I had with my new friends were left on the editing floor. I actually had a lot of fun. Who is your favorite among the remaining queens this season? Ginger. I think she has everything it takes to win. She is so funny. What first attracted you to drag? I think how ridiculous it is. We are spoofing gender, and it was so refreshing after an entire childhood of trying to fit into society's idea of what boys should act like. What inspired your drag name? My first name was Frisbee Jenkins. But I switched it to Sasha because of Beyonce, a.k.a Sasha Fierce. And Belle is from my drag mother Pretty Belle. What should a drag queen always keep in her purse? Singles, so you can tip your fellow queens and bartenders. What’s next after RuPaul? Hopefully, RuPaul. I'll Shangela Season 8 if I can fit in the box. Why did the drag queen cross the road? Never! She would have a minion do it for her. Thanks, Sasha! Watch Belle give "Drunk Makeover" tips below.
<reponame>MateuszStarczyk/cinema-app-public import { Component, OnInit, Input, Output, EventEmitter } from '@angular/core'; import { ManageMoviesService } from 'src/app/admin/shared/services/manage-movies.service'; import { Screening } from 'src/app/admin/shared/models/screening'; import { Room } from 'src/app/admin/shared/models/room'; @Component({ selector: 'app-add-screening', templateUrl: './add-screening.component.html', styleUrls: ['./add-screening.component.scss'], }) export class AddScreeningComponent implements OnInit { isLoading = false; rooms: Room[] = []; newScreening = new Screening(); @Input() movieId: string; @Output() save: EventEmitter<any> = new EventEmitter(); constructor(private readonly manageMoviesService: ManageMoviesService) {} ngOnInit(): void { this.newScreening.movieId = this.movieId; this.loadRooms(); } loadRooms() { this.manageMoviesService .getRooms() .subscribe(rooms => { this.rooms = rooms; }); } addScreening() { this.isLoading = true; this.manageMoviesService .addScreening(this.newScreening) .subscribe(() => { this.newScreening.startDate = new Date(); this.newScreening.roomId = null; this.save.emit(); }) .add(() => (this.isLoading = false)); } }
/** * Created by tully. * <p> * Subject in the Observer pattern. */ public class Subject { private final List<Observer> observers = new ArrayList<>(); /** * register new observer. * * @param observer to register */ public void register(Observer observer) { observers.add(observer); } /** * remove observer from the list * * @param observer to remove */ public void unregister(Observer observer) { observers.remove(observer); } /** * Send notifications * * @param event */ public void notify(Event event) { observers.forEach(observer -> observer.notify(event)); } }
Is hemiepiphytism an adaptation to high irradiance? Testing seedling responses to light levels and drought in hemiepiphytic and non-hemiepiphytic Ficus. The epiphytic growth habit in many Ficus species during their juvenile stages has commonly been hypothesized to be an adaptation for avoiding deep shade in the forest understory, but this has never been tested experimentally. We examined growth and ecophysiology in seedlings of three hemiepiphytic (Hs) and three non-hemiepiphytic (NHs) Ficus species grown under different irradiance levels. Both Hs and NHs exhibited characteristics of high light requiring species, such as high plasticity to growth irradiance and relatively high maximum photosynthetic assimilation rates. Diurnal measurements of leaf gas exchange showed that Hs have much shorter active photosynthetic periods than NHs; moreover, leaves of Hs have lower xylem hydraulic conductivity but stronger drought tolerance as indicated by much lower rates of leaf diebacks during the drought treatment. Seedlings of NHs had 3.3- and 13.3-fold greater height and biomass than those of Hs species after growing in the nursery for 5 months, indicating a trade-off between growth and drought tolerance due to the conflicting requirements for xylem conductivity and cavitation resistance. This study does not support the shade-avoidance hypothesis; rather, it suggests that the canopy regeneration in Hs is an adaptation to avoid alternative terrestrial growth-related risks imposed to tiny Ficus seedlings. The NHs with terrestrial regeneration reduce these risks by having an initial burst of growth to rapidly gain relatively large seedling sizes, while in Hs seedlings more conservative water use and greater drought tolerance for surviving the canopy environment are intrinsically associated with slow growth.
We’re three days from the start of NBA free agency and the Cleveland Cavaliers still don’t have a general manager. That’s scary stuff, when you consider what the Boston Celtics are trying to do to improve, and what the Houston Rockets are doing. Hell, the Lakers have gotten significantly better. The Cavs, meanwhile, with the 2nd best player in NBA history just a year away from free agency, aren’t factors at all. As if that wasn’t enough, this bombshell dropped in Cleveland earlier this week: Cavs owner Dan Gilbert allegedly nearly traded for Paul George and Eric Bledsoe – Cleveland would have lost Kyrie Irving – but when the billionaire went to LeBron asking for assurances he’d stay, The King said he couldn’t do that. In the rearview now but heard from very reliable source talks were very real pic.twitter.com/lxGyZKjZVM — Vince Grzegorek (@vincethepolack) June 26, 2017 Two ways to look at this: * Yikes! Nooooooo! There’s no chance LeBron is coming back, and we are losing him to the Lakers in 2018. * Wait, Dan Gilbert is doing everything he can do in hopes that LeBron stays? This is great! We’re keeping LeBron! The NBA offseason remains fantasssssssstic.
/** * Wrap the provided aggregator so that it behaves (almost) as if it had been * collected directly. */ public Aggregator wrap(final Aggregator in) { return new Aggregator() { @Override public boolean needsScores() { return in.needsScores(); } @Override public void close() throws ElasticsearchException { in.close(); } @Override public String name() { return in.name(); } @Override public Aggregator parent() { return in.parent(); } @Override public AggregationContext context() { return in.context(); } @Override public Aggregator subAggregator(String name) { return in.subAggregator(name); } @Override public InternalAggregation buildAggregation(long bucket) throws IOException { if (selectedBuckets == null) { throw new ElasticsearchIllegalStateException("Collection has not been replayed yet."); } final long rebasedBucket = selectedBuckets.find(bucket); if (rebasedBucket == -1) { throw new ElasticsearchIllegalStateException("Cannot build for a bucket which has not been collected"); } return in.buildAggregation(rebasedBucket); } @Override public InternalAggregation buildEmptyAggregation() { return in.buildEmptyAggregation(); } @Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException { throw new ElasticsearchIllegalStateException("Deferred collectors cannot be collected directly. They must be collected through the recording wrapper."); } @Override public void preCollection() throws IOException { throw new ElasticsearchIllegalStateException("Deferred collectors cannot be collected directly. They must be collected through the recording wrapper."); } @Override public void postCollection() throws IOException { throw new ElasticsearchIllegalStateException("Deferred collectors cannot be collected directly. They must be collected through the recording wrapper."); } }; }
{-| Description: Advancement of SDL state. -} module Graphics.UI.SDL.State.Advance ( nextState ) where import Control.Monad (liftM, foldM) import Control.Applicative ((<$>)) import qualified Data.Set as S import qualified Data.BitSet.Word as BW import qualified Data.Map.Strict as M import Control.Lens import Data.Maybe (fromJust) import Control.Monad.IO.ExClass import Graphics.UI.SDL.State.Types import Graphics.UI.SDL.Events.Types import Graphics.UI.SDL.Video.Window import Graphics.UI.SDL.Video.Mouse import Graphics.UI.SDL.Video.Keyboard.Types import Control.Lens.Instances () -- | Advance an SDL state. -- During advancement it would query all necessary data to keep the state -- consistent and up to date, given that all events that were received by -- SDL were fed into it. nextState :: forall m. MonadIO' m => StateData -> [EventData] -> m StateData nextState s0 es = foldM (flip upd) s0 { _rawEvents = reverse es } es where upd :: EventData -> StateData -> m StateData upd (Window i Shown) = \s -> (\r -> s & windowState.at i ?~ r) <$> def where def :: m WindowState def = do -- May fail if there are no other references to Window. Just w <- getWindowFromID i _wpos <- getWindowPosition w _wsize <- getWindowSize w return WindowState { _keysPressed = S.empty , _scansPressed = S.empty , _modsPressed = BW.empty , _mouseState = M.empty , _wshown = True , _mouseFocus = False , _kbdFocus = False , .. } upd (Window i e) = windowState.at i %%~ liftM Just . winUpd e . fromJust upd e = return . case e of Window i Hidden -> rm i Window i Closed -> rm i _ -> id where rm i = windowState.at i .~ Nothing winUpd (Mouse i e) = mouseState.at i %%~ liftM (Just . mouseUpd e) . maybe def return where def = do (_mousePos, _mousePressed) <- getRelativeMouseState return MouseState { .. } winUpd e = return . case e of Moved wp -> wpos .~ wp SizeChanged ws -> wsize .~ ws WinEntered -> mouseFocus .~ True WinLeft -> mouseFocus .~ False FocusGained -> kbdFocus .~ True FocusLost -> kbdFocus .~ False Keyboard KeyboardEvent { _kstate, _keySym = KeySym { .. } } -> case _kstate of Pressed -> (keysPressed.at _keyCode ?~ ()) . (scansPressed.at _scanCode ?~ ()) . (modsPressed .~ _keyMod) Released -> (keysPressed.at _keyCode .~ Nothing) . (scansPressed.at _scanCode .~ Nothing) . (modsPressed .~ _keyMod) _ -> id mouseUpd (MMotion MouseMotionEvent { .. }) = (mousePos .~ _mmpos) mouseUpd (MButton MouseButtonEvent { .. }) = case _mstate of Pressed -> (mousePressed.at _mbutton ?~ ()) . (mousePos .~ _mbpos) Released -> (mousePressed.at _mbutton .~ Nothing) . (mousePos .~ _mbpos) mouseUpd _ = id
package com.avides.spring.rabbit.test.support; import lombok.AllArgsConstructor; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NoArgsConstructor; import lombok.Setter; import lombok.ToString; @Getter @Setter @EqualsAndHashCode @ToString public class TestClass { private float floatProperty; private String stringProperty; private Integer integerProperty; private SubTestClass subTestClass; @NoArgsConstructor @AllArgsConstructor @Getter @Setter @EqualsAndHashCode @ToString static class SubTestClass { private int intProperty; private Double doubleProperty; } public static TestClass buildBase() { TestClass testClass = new TestClass(); testClass.setFloatProperty(2); return testClass; } public static TestClass buildComplete() { TestClass testClass = buildBase(); testClass.setStringProperty("string"); testClass.setIntegerProperty(Integer.valueOf(3)); testClass.setSubTestClass(new SubTestClass(4, Double.valueOf(5))); return testClass; } }
// Unwrap will unwrap an error and do // type assertion to it func Unwrap() { err := error(ErrorTyped{errors.New("an error occurred")}) err = errors.Wrap(err, "wrapped") fmt.Println("wrapped error: ", err) switch errors.Cause(err).(type) { case ErrorTyped: fmt.Println("a typed error occurred: ", err) default: fmt.Println("an unknown error occurred") } }
/* TestResourceRecordSetsAdditionVisible verifies that added RRS is visible after addition */ func TestResourceRecordSetsAdditionVisible(t *testing.T) { zone := firstZone(t) sets := rrs(t, zone) rrset := getExampleRrs(zone) addRrsetOrFail(t, sets, rrset) defer sets.StartChangeset().Remove(rrset).Apply() t.Logf("Successfully added resource record set: %v", rrset) found := false for _, record := range listRrsOrFail(t, sets) { if record.Name() == rrset.Name() { found = true break } } if !found { t.Errorf("Failed to find added resource record set %s", rrset.Name()) } }
<reponame>msdundar/kanarya package kanarya import ( "os" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" ) // S3Client initializes a new S3 client that can be used in S3 actions. // Endpoint will be set to a localstack endpoint when running tests, // otherwise it will use the default AWS location. Localstack requires // S3ForcePathStyle is set to true, however, on production it will be set // to false. func S3Client(region string) *s3.S3 { pathStyle := false if os.Getenv("CI") == "true" { pathStyle = true } return s3.New( session.Must(session.NewSessionWithOptions(session.Options{ SharedConfigState: session.SharedConfigEnable, })), &aws.Config{ Region: aws.String(region), Endpoint: aws.String(os.Getenv("AWS_S3_ENDPOINT")), S3ForcePathStyle: aws.Bool(pathStyle), }, ) }
// Copyright 2019 Twitter, Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #[macro_use] extern crate logger; pub fn main() { println!("A simple demo of the logger"); logger::Logger::new() .label("demo") .level(logger::Level::Trace) .init() .expect("Failed to initialize logger"); trace!("Some tracing message"); debug!("Some debugging message"); info!("Just some general info"); warn!("You might want to know this"); error!("You need to know this"); fatal!("Something really bad happened! Terminating program"); // code below would be unreachable }
// ConnectSSLSocket creates a new SSL socket connection for the ESP8266/ESP32. // Currently only supports single connection mode. func (d *Device) ConnectSSLSocket(addr, port string) error { protocol := "SSL" val := "\"" + protocol + "\",\"" + addr + "\"," + port + ",120" d.Set(TCPConnect, val) _, err := d.Response(6000) if err != nil { return err } return nil }
/* * block_add2_kernel.h * * Created on: Nov 20, 2013 * Author: smanzer */ #ifndef BLOCK_ADD2_KERNEL_H_ #define BLOCK_ADD2_KERNEL_H_ #include "block_kernel_i.h" namespace libtensor { template<typename T> class block_add2_kernel: public libtensor::block_kernel_i<block_add2_kernel<T>,T> { private: static const char* k_clazz; //!< Class name T m_lhs_scalar; T m_rhs_scalar; public: block_add2_kernel(T lhs_scalar,T rhs_scalar) : m_lhs_scalar(lhs_scalar),m_rhs_scalar(rhs_scalar) {} void operator()(const std::vector<T*>& ptrs, const std::vector< dim_list >& dim_lists); }; template<typename T> const char* block_add2_kernel<T>::k_clazz = "block_contract2_kernel<T>"; } /* namespace libtensor */ template<typename T> void libtensor::block_add2_kernel<T>::operator()( const std::vector<T*>& ptrs, const std::vector<dim_list>& dim_lists) { size_t first_size = dim_lists[0].size(); const dim_list& first_dims = dim_lists[0]; #ifdef LIBTENSOR_DEBUG if(dim_lists.size() != 3 || ptrs.size() != dim_lists.size()) { throw bad_parameter(g_ns, k_clazz,"operator()(...)", __FILE__, __LINE__, "invalid number of pointers or dim_lists"); } //Check that dimensions for all blocks are the same for(size_t i = 1; i < dim_lists.size(); ++i) { const dim_list& cur_dims = dim_lists[i]; if(cur_dims.size() != first_size) { throw bad_parameter(g_ns, k_clazz,"operator()(...)", __FILE__, __LINE__, "dim lists are not all the same size"); } for(size_t j = 0; j < first_size; ++j) { if(cur_dims[j] != first_dims[j]) { throw bad_parameter(g_ns, k_clazz,"operator()(...)", __FILE__, __LINE__, "dimensions do not match. Permuted addition is not currently supported."); } } } #endif //Just do the add size_t n_elements = 1; for(size_t i = 0; i < first_size; ++i) { n_elements *= first_dims[i]; } for(size_t i = 0; i < n_elements; ++i) { ptrs[0][i] = m_lhs_scalar*ptrs[1][i] + m_rhs_scalar*ptrs[2][i]; } } #endif /* BLOCK_ADD2_KERNEL_H_ */
def make_pattern(count : int, size:int) -> list: patterns = [(200,-80)] for i in range(count-1): block = patterns[-1] while block in patterns: curr = randint(1, 4) if curr == 1: block = (block[0], block[1] -size) elif curr == 2: block = (block[0] + size, block[1]) elif curr == 3: block == (block[0], block[1] + size) elif curr == 4: block = (block[0] - size, block[1]) patterns.append(block) return patterns
// TODO: try to make use of the really good standard // string compare functions for superior performance static inline int string_cmp(string l, string r) { if (string_len(l) < string_len(r)) { while (l.start != l.end) { if (*l.start != *r.start) return *l.start - *r.start; r.start++; l.start++; } return -1; } while (r.start != r.end) { if (*r.start != *l.start) return *l.start - *r.start; l.start++; r.start++; } if (l.start == l.end) return 0; return 1; }
UN-designated terrorist Hafiz Saeed be freed + Bugti has also applied to India for asylum + I am still the most wanted person in Pakistan while top terrorists like Osama Bin laden, Hafiz saeed were being pro… https://t.co/z5lMPuXi6v — Brahumdagh Bugti (@BBugti) 1511371849000 designated a 'global terrorist' + I am the president of Baloch Republican Party and we believe in political means of struggle. In order to counter ou… https://t.co/ZwE7lmbpYb — Brahumdagh Bugti (@BBugti) 1511370567000 NEW DELHI: On a day that a Pakistani court ordered that, far away Switzerland rejected the asylum application of another Pakistani, who along with his Baloch community, is a victim of human rights abuses by his country's government and army.The bitter coincidence didn't go unnoticed by the Pakistani in Switzerland, Brahumdagh Bugti , who is the leader of the banned Baloch Republican Party (BRP).. In January, TOI reported that the Indian government was delaying acting on that application as it wanted to wait and see how bilateral relations shape up under the then new Pakistan army chief.It's now November. Relations with Pakistan, which is unwilling to act on terror, have only become worse. And Saeed being freed by a Pakistani court yesterday, has added insult to grievous injury.Will India now take Bugti's asylum application more seriously?After the Swiss rejected his asylum application yesterday, Bugti tweeted saying that while he's still "the most wanted person in Pakistan", terrorists like Saeed are not just being protected "by the Pakistan army" they are being helped in their activities.Bugti has a point.Saeed isby the United Nations. The US has a $10 million bounty on him. And he is the mastermind of the horrific 2008 Mumbai terror attack that killed 166 people and left more than 300 wounded. In fact, the Lahore court order yesterday to free Saeed comes just four days before the ninth anniversary of the bloody Mumbai attack.By comparison, Bugti's CV is a yawn. He's leader of the BRP, which wants Balochistan's freedom from Pakistan. In 1948, Balochistan was forced to accede to Pakistan. The Pakistani government calls Bugti and other Baloch leaders terrorists, even as its own army and intelligence wing have been accused of large scale human rights violations, murder and rape of the Baloch people."In Balochistan and Sindh, ethnic and religious minorities endure severe persecution by state forces and terrorists. These communities are repeatedly targeted by the controversial and often misapplied Blasphemy and Anti-Terrorism Laws...," said a report in April by the Unrepresented Nations and Peoples Organization, an international pro-democracy organizationStill, it appears Pakistan - and, analysts say, China, too- appear to have convinced the Swiss authorities of Bugti's alleged link to terrorists.A source in the Swiss government told Pakistan's Geo TV that Bugti's asylum application was turned down because of his links with "incidents of terrorism, violence and militant activities".Bugti denies as "fake" the terror link allegations.Geo TV said Bugti has the right to appeal. He has been in Switzerland since 2010 when he applied for asylum. He applied to India for asylum in September 2016."Our children are dying. Considering the latest turn of events, the Indian government should grant asylum to us," he said in an interview to TOI's sister publication Navbharat Times, in September last year
/** * Created by Administrator on 2017/8/31. */ @Controller @RequestMapping("${adminPath}/sys/menu") public class MenuController { private Logger logger = Logger.getLogger(MenuController.class); @Resource private SysMenuService sysMenuService; @Resource private SysRoleService sysRoleService; /** * 跳转 菜单首页 * @param model * @return */ @RequestMapping(method= RequestMethod.GET,value = {"/index"},name = "菜单首页") public String index(HttpServletRequest request,Model model) { request.setAttribute("menuList",sysMenuService.selectAll()); return "/sys/menuIndex"; } /** * 角色分页查询 * @return */ @ResponseBody @RequestMapping(method= RequestMethod.POST,value="/getMenuList",name = "角色添加的菜单数据") public List<Map> getMenuList(String flag, Integer roleId){ List<Map> sysMenuList = new ArrayList<Map>(); List<SysMenu> sysMenus = sysMenuService.selectAll(); try{ //添加时候使用 查询所有的 if ("0".equals(flag)){ if (sysMenus.size()>0){ for (SysMenu sysmenu:sysMenus) { Map map = new HashMap(); map.put("id",sysmenu.getId()); map.put("pId",sysmenu.getParentid()); map.put("name",sysmenu.getName()); sysMenuList.add(map); } } }else{//修改 List<String> roleMenuIds =sysRoleService.selectRoleMenu(roleId);//查询本人的权限 for (SysMenu sysmenu:sysMenus) { Map map = new HashMap(); map.put("id",sysmenu.getId()); map.put("pId",sysmenu.getParentid()); map.put("name",sysmenu.getName()); //包含这个字符串 if (isBelongList(roleMenuIds,String.valueOf(sysmenu.getId()))) map.put("checked",true); sysMenuList.add(map); } } }catch (Exception e){ logger.error("getMenuList-=-:"+e.toString()); } return sysMenuList; } /** * 左侧菜单树 * @param userName * @return */ @ResponseBody @RequestMapping(method= RequestMethod.GET,value="/getMenuListByUserName",name = "获取左侧菜单树") public List<MenuTreeNode> getMenuListByUserName(String userName){ //定义空的list List<MenuTreeNode> sysMenuList = new ArrayList<MenuTreeNode>(); //根据用户查询所有的权限 List<VuserRoleMenu> vuserRoleMenuList = sysMenuService.getMenuListByUserName(userName); if (vuserRoleMenuList.size() > 0){ //取最大的父级 VuserRoleMenu vuserRoleMenu = vuserRoleMenuList.get(0); //取第一条数据 MenuTreeNode menuTreeNode = new MenuTreeNode(vuserRoleMenu.getMenuid(),vuserRoleMenu.getParentId(),false,vuserRoleMenu.getMenuname(),false,vuserRoleMenu.getIcon(),vuserRoleMenu.getHref(),vuserRoleMenu.getTarget()); menuTreeNode.getChildren().add(new MenuTreeNode(10010,0,false,"我的工作台",true,"","","")); for (int i = 1; i < vuserRoleMenuList.size(); i++) { //循环添加子集数据 MenuTreeNode menuTreeNode2 = new MenuTreeNode(vuserRoleMenuList.get(i).getMenuid(),vuserRoleMenuList.get(i).getParentId(),false,vuserRoleMenuList.get(i).getMenuname(),false,vuserRoleMenuList.get(i).getIcon(), vuserRoleMenuList.get(i).getHref(),vuserRoleMenuList.get(i).getTarget()); menuTreeNode.add(menuTreeNode2); } return menuTreeNode.getChildren(); }else { return sysMenuList; } } /** * 判断对象是否有这个值 * @param roleMenuIds * @param str * @return */ private boolean isBelongList(List<String> roleMenuIds,String str){ boolean bResult = false; try{ for (String temp : roleMenuIds) { if (temp.equalsIgnoreCase(str)) { bResult = true; break; } } }catch (Exception ex){ } return bResult; } }
/** * Creates the wave form. * * @param audioBytes * the audio bytes */ public void createWaveForm(byte[] audioBytes) { lines.removeAllElements(); Dimension d = getSize(); int w = d.width; int h = d.height - 15; audioData = null; audioData = wd.extractFloatDataFromAudioInputStream(audioInputStream); int frames_per_pixel = wd.getAudioBytes().length / wd.getFormat().getFrameSize() / w; byte my_byte = 0; double y_last = 0; int numChannels = wd.getFormat().getChannels(); for (double x = 0; x < w && audioData != null; x++) { int idx = (int) (frames_per_pixel * numChannels * x); if (wd.getFormat().getSampleSizeInBits() == 8) { my_byte = (byte) audioData[idx]; } else { my_byte = (byte) (128 * audioData[idx] / 32768); } double y_new = (double) (h * (128 - my_byte) / 256); lines.add(new Line2D.Double(x, y_last, x, y_new)); y_last = y_new; } repaint(); }
package org.nodel; /* * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ /** * Avoids having to create several instances of the Java Random class. */ public class Random { /** * (see getter) */ private java.util.Random _random = new java.util.Random(); /** * Returns a reference to this shared java.util.Random instance. */ public java.util.Random random() { return _random; } /** * (for convenience - see #java.util.Random) */ public int nextInt() { return _random.nextInt(); } /** * (for convenience - see #java.util.Random) * @param toExcl from 0 (inclusive) to 'toExcl' (exclusive). */ public int nextInt(int toExcl) { return _random.nextInt(toExcl); } /** * Returns random double from 0 (incl.) to 1 (excl.) * (for convenience - see #java.util.Random) */ public double nextDouble() { return _random.nextDouble(); } /** * (hidden) */ private Random() { } /** * (singleton) */ private static class LazyHolder { private static final Random INSTANCE = new Random(); } /** * (singleton) */ public static Random shared() { return LazyHolder.INSTANCE; } }
Despite all the development attempts and mass urbanization efforts, nobody can deny the fact that a large part of Indian population still lives in the different villages located in different parts of the nation. As Gandhi observed, it is a futile attempt to urbanize the Indian society and turn all the villages into cities. His ideas pin-pointed the fact that development can be made on a wide scale based on the villages of this nation. All that the rural society of this nation lacked was education, which also paved way for different other conditions detrimental to the development of this nation like, poverty, superstition, mal nutrition, female feticide, high child mortality rate, uncontrolled child birth, ignorance about sound health condition, etc.However, what is believed nowadays is that it is rural education in India that can bring in a wave of change over a period of time, if not drastically. The aspect of rural development has been discussed in several important Government meetings organized for bringing in a feasible amount of change. However, the snag that holds people in certain parts of the country from effective development efforts is the fact that they are widely unaware of the necessity, process and result of development. If the people of rural India do not involve themselves in the process of development out there, it is almost impossible to make the procedure happen and become successful from outside. Education is the only tool that can expand the knowledge base of the people and help them to start thinking about their long term benefit. It helps them exploring new and effective avenues to be a part of the development in their own yard and yield maximum benefit out of the process.The process of rural education in India has to be planned in such a systematic order that more and more people living in rural India can come under the umbrella without investing much time or money. The concerned authority of the Government of India as well as the NGOs (non-government organization) have already put in a lot of effort to improve the process of rural education in India.In most cases, it has become important for the plans to be implemented so that people with very limited resources also agree to get involved in the process. After all, educating people living in the rural part of this nation is never possible until the rural audience find the process interesting and engaging.
“Inherent Vice,” writer-director Paul Thomas Anderson’s adaptation of Thomas Pynchon’s darkly comic 2009 crime novel, has been selected as the centerpiece gala at the 52nd annual New York Film Festival, Variety has learned. The Oct. 4 bow will mark the world premiere of Anderson’s highly anticipated seventh feature, a 1970s Southern California-set detective yarn starring Joaquin Phoenix, Josh Brolin, Reese Witherspoon, Owen Wilson and Benicio Del Toro. Warner Bros. has slated the film for a Dec. 12 theatrical release. Following the announcement that David Fincher’s “Gone Girl” would make its world premiere as the fest’s opening-night entry (first reported by Variety on July 16), the selection of “Inherent Vice” bolsters NYFF’s rep as a key launchpad for high-profile fall titles, in addition to its long-standing position as an elite international-cinema showcase. For the past two years, the festival has reserved its gala slots for world premieres of such pics as “Life of Pi” and “Captain Phillips” (opening night); “Not Fade Away” and “The Secret Life of Walter Mitty” (centerpiece); and “Flight” and “Her” (closing night). While “Inherent Vice” is set to make its official world premiere at NYFF, it’s worth noting that Anderson’s films have a history of turning up in unexpected places. “The Master” had a number of unannounced showings Stateside before its unveiling in competition at Venice in 2012, while “There Will Be Blood” secretly world-premiered at Fantastic Fest in 2007. Like Fincher, whose “The Social Network” opened NYFF in 2010, Anderson is no stranger to the festival. “Punch-Drunk Love” was the centerpiece gala in 2002, and “Boogie Nights” bowed there in 1997. Presented by the Film Society of Lincoln Center under director of programming Kent Jones, the New York Film Festival will announce the rest of its main slate in the coming weeks. The event runs Sept. 26-Oct. 12. (Ramin Setoodeh and Scott Foundas in New York contributed to this report.)
/** * @brief Fills each XFMC_NORSRAMInitStruct member with its default value. * @param XFMC_NORSRAMInitStruct pointer to a XFMC_NorSramInitTpye * structure which will be initialized. */ void XFMC_InitNorSramStruct(XFMC_NorSramInitTpye* XFMC_NORSRAMInitStruct) { XFMC_NORSRAMInitStruct->Bank = XFMC_BANK1_NORSRAM1; XFMC_NORSRAMInitStruct->DataAddrMux = XFMC_DATA_ADDR_MUX_ENABLE; XFMC_NORSRAMInitStruct->MemType = XFMC_MEM_TYPE_SRAM; XFMC_NORSRAMInitStruct->MemDataWidth = XFMC_MEM_DATA_WIDTH_8B; XFMC_NORSRAMInitStruct->BurstAccMode = XFMC_BURST_ACC_MODE_DISABLE; XFMC_NORSRAMInitStruct->AsynchroWait = XFMC_ASYNCHRO_WAIT_DISABLE; XFMC_NORSRAMInitStruct->WaitSigPolarity = XFMC_WAIT_SIGNAL_POLARITY_LOW; XFMC_NORSRAMInitStruct->WrapMode = XFMC_WRAP_MODE_DISABLE; XFMC_NORSRAMInitStruct->WaitSigConfig = XFMC_WAIT_SIG_ACTIVE_BEFORE_WAIT_STATE; XFMC_NORSRAMInitStruct->WriteEnable = XFMC_WRITE_ENABLE; XFMC_NORSRAMInitStruct->WaitSigEnable = XFMC_WAIT_SIGNAL_ENABLE; XFMC_NORSRAMInitStruct->ExtModeEnable = XFMC_EXTENDED_DISABLE; XFMC_NORSRAMInitStruct->WriteBurstEnable = XFMC_WRITE_BURST_DISABLE; XFMC_NORSRAMInitStruct->RWTimingStruct->AddrSetTime = 0xF; XFMC_NORSRAMInitStruct->RWTimingStruct->AddrHoldTime = 0xF; XFMC_NORSRAMInitStruct->RWTimingStruct->DataSetTime = 0xFF; XFMC_NORSRAMInitStruct->RWTimingStruct->BusRecoveryCycle = 0xF; XFMC_NORSRAMInitStruct->RWTimingStruct->ClkDiv = 0xF; XFMC_NORSRAMInitStruct->RWTimingStruct->DataLatency = 0xF; XFMC_NORSRAMInitStruct->RWTimingStruct->AccMode = XFMC_ACC_MODE_A; XFMC_NORSRAMInitStruct->WTimingStruct->AddrSetTime = 0xF; XFMC_NORSRAMInitStruct->WTimingStruct->AddrHoldTime = 0xF; XFMC_NORSRAMInitStruct->WTimingStruct->DataSetTime = 0xFF; XFMC_NORSRAMInitStruct->WTimingStruct->BusRecoveryCycle = 0xF; XFMC_NORSRAMInitStruct->WTimingStruct->ClkDiv = 0xF; XFMC_NORSRAMInitStruct->WTimingStruct->DataLatency = 0xF; XFMC_NORSRAMInitStruct->WTimingStruct->AccMode = XFMC_ACC_MODE_A; }
import { deflate, inflate } from "pako"; import { FactorioBlueprint } from "../types/factorio"; const VERSION = "0"; const UINT_CHUNK_SIZE = 65536; export const uint8ToString = (array: Uint8Array) => { var c = []; for (var i = 0; i < array.length; i += UINT_CHUNK_SIZE) { c.push( String.fromCharCode.apply( null, array.subarray(i, i + UINT_CHUNK_SIZE) as unknown as number[] ) ); } return c.join(""); }; /** * Encodes a blueprint object to an importable string. */ export const encode = (blueprint: FactorioBlueprint) => { const jsonString = JSON.stringify(blueprint); const compressed = deflate(jsonString); const str = uint8ToString(compressed); const base64 = btoa(str); return VERSION + base64; }; /** * Parses a string into a blueprint object. */ export const decode = (bluebrintString: string) => { const base64 = bluebrintString.substring(1); const charArray = atob(base64); const compressed = Uint8Array.from(charArray, (c) => c.charCodeAt(0)); const jsonString = inflate(compressed, { to: "string" }); return JSON.parse(jsonString) as FactorioBlueprint; };
/** * Process the PIV 'VERIFY' command * * @param apdu The incoming APDU object */ private void processPIV_VERIFY(APDU apdu) { final byte CONST_P1_AUTH = (byte) 0x00; final byte CONST_P1_RESET = (byte) 0xFF; byte[] buffer = apdu.getBuffer(); short length = (short) (buffer[ISO7816.OFFSET_LC] & 0xFF); /* * PRE-CONDITIONS */ /* * EXECUTION STEPS */ if (buffer[ISO7816.OFFSET_P1] == CONST_P1_AUTH && length == ZERO_SHORT) { piv.verifyGetStatus(buffer[ISO7816.OFFSET_P2]); return; } if (buffer[ISO7816.OFFSET_P1] == CONST_P1_RESET && length == ZERO_SHORT) { piv.verifyResetStatus(buffer[ISO7816.OFFSET_P2]); return; } if (buffer[ISO7816.OFFSET_P1] == CONST_P1_AUTH && length != ZERO_SHORT) { piv.verify(buffer[ISO7816.OFFSET_P2], buffer, ISO7816.OFFSET_CDATA, length); return; } ISOException.throwIt(ISO7816.SW_INCORRECT_P1P2); }
package com.laytonsmith.abstraction; /** * * @author Layton */ public interface MCNote extends AbstractionObject { }
#include <iostream> using namespace std; int mtx[4][6]={ 4 , 5, 6, 3, 2, 1, 11,12,13,10, 9, 8, 4 , 5, 6, 3, 2, 1, 11,12,13,10, 9, 8, }; int main(){ long long int n,x=0; char a; cin>>n>>a;--n; x+=n/4*16;n%=4; x+=mtx[n][a-'a']; cout<<x; }
""" Epsilon-based selectors. Apply a reinforcement-learning approach to select prefixes. """ from collections import Counter, OrderedDict, defaultdict from ipaddress import ip_address, ip_network from zeph.queries import GetLinkDiscoveries, GetNodeDiscoveries from zeph.selectors.abstract import AbstractSelector class AbstractEpsilonSelector(AbstractSelector): def __init__( self, database_url: str, epsilon: int, authorized_prefixes, bgp_awareness: bool = True, ): self.database_url = database_url self.epsilon = epsilon self.authorized_prefixes = authorized_prefixes self.bgp_awareness = bgp_awareness self.rank_per_agent = {} def ip_to_network(self, ip, v4_length=24, v6_length=64) -> str: ip_mapped = ip.ipv4_mapped if ip_mapped: return ip_network(f"{ip_mapped}/{v4_length}") return ip_network(f"{ip}/{v6_length}") def compute_discoveries_nodes(self, measurement_uuid, agents_uuid) -> dict: """Get the discoveries (nodes) per agents.""" if measurement_uuid is None: return directives = {} for agent_uuid in agents_uuid: measurement_id = ( self._sanitize_uuid(measurement_uuid) + "__" + self._sanitize_uuid(agent_uuid) ) for data in GetNodeDiscoveries().execute_iter( self.database_url, measurement_id ): directives[ ( agent_uuid, self.ip_to_network(ip_address(data["probe_dst_prefix"])), data["probe_protocol"], ) ] = set(data["discoveries"]) return directives def compute_discoveries_links(self, measurement_uuid, agents_uuid) -> dict: """Get the discoveries (links) per agents.""" if measurement_uuid is None: return directives = {} for agent_uuid in agents_uuid: measurement_id = ( self._sanitize_uuid(measurement_uuid) + "__" + self._sanitize_uuid(agent_uuid) ) for data in GetLinkDiscoveries().execute_iter( self.database_url, measurement_id ): directives[ ( agent_uuid, self.ip_to_network(ip_address(data["probe_dst_prefix"])), data["probe_protocol"], ) ] = set([tuple(link) for link in data["discoveries"]]) return directives def select(self, agent_uuid, budget: int, exploitation_only=False): """ epsilon-based policy : * select e where eB will be used for exploration. and (1 - e)B is used for exploitation * Get the (1-e)B previous prefixes that maximize the discoveries * Pick random eB prefixes not already used in the exploration set """ if not self.rank_per_agent: # Snapshot #0 : No previous measurement # Select random prefixes depending on budget only return [], self._select_random(agent_uuid, budget) # Compute the number of prefixes for exploration [eB] / exploitation [(1-e)B] n_prefixes_exploration = int(self.epsilon * budget) n_prefixes_exploitation = budget - n_prefixes_exploration # Get the rank for the agent rank = self.rank_per_agent.get(agent_uuid) if rank is None: # The agent did not participated to the previous measurement return [], self._select_random(agent_uuid, budget) if exploitation_only: return set(rank), set(rank) # Pick the (1-e)B prefix with the best reward prefixes_exploitation = set(rank[:n_prefixes_exploitation]) # Add random prefixes until the budget is completely burned return prefixes_exploitation.copy(), self._select_random( agent_uuid, budget, preset=prefixes_exploitation ) class EpsilonRewardSelector(AbstractEpsilonSelector): def compute_rank(self, subsets): """Compute the prefixes reward per agent based on the discoveries.""" if subsets is None: return # Count the discoveries discoveries_counter = Counter() for subset, discoveries in subsets.items(): discoveries_counter.update(discoveries) # Compute the reward (#unique_discoveries per prefix per agent) rewards_per_agent = defaultdict(dict) for subset, discoveries in subsets.items(): agent, prefix, _ = subset rewards_per_agent[agent][prefix] = [ discovery for discovery in discoveries if discoveries_counter[discovery] == 1 ] rank_per_agent = dict() for source_ip, rewards_per_prefix in rewards_per_agent.items(): rank = [(k, len(v)) for k, v in rewards_per_prefix.items()] rank = sorted(rank, key=lambda x: x[1], reverse=True) rank_per_agent[source_ip] = [prefix[0] for prefix in rank] return rank_per_agent class EpsilonNaiveSelector(AbstractEpsilonSelector): def compute_rank(self, subsets): """Compute the prefixes rank per agent based on the discoveries.""" if subsets is None: return total_discoveries = set() rank_per_agent = defaultdict(list) for subset, discoveries in subsets.items(): total_discoveries.update(discoveries) # Sort the subsets by size in descending order subsets = OrderedDict( sorted( [(k, v) for k, v in subsets.items()], key=lambda x: len(x[1]), reverse=True, ) ) covered = set() for subset in subsets: if covered == total_discoveries: break if subsets[subset] - covered: rank_per_agent[subset[0]].append(subset[1]) covered.update(subsets[subset]) return rank_per_agent class EpsilonGreedySelector(AbstractEpsilonSelector): def compute_rank(self, subsets): """Compute the prefixes rank per agent based on the discoveries.""" if subsets is None: return total_discoveries = set() rank_per_agent = defaultdict(list) for subset, discoveries in subsets.items(): total_discoveries.update(discoveries) covered = set() while covered != total_discoveries and subsets: subset = max(subsets, key=lambda subset: len(subsets[subset] - covered)) rank_per_agent[subset[0]].append(subset[1]) covered.update(subsets[subset]) del subsets[subset] return rank_per_agent class EpsilonDFGSelector(AbstractEpsilonSelector): def compute_rank(self, subsets, p=1.05): """Compute the prefixes rank per agent based on the discoveries.""" if subsets is None: return rank_per_agent = defaultdict(list) # Populate the subcollections subcollections = defaultdict(list) for subset in subsets: k = 0 while p ** (k + 1) < len(subsets[subset]): k += 1 subcollections[k].append(subset) k_max = max(subcollections.keys()) covered = set() # k = k_max ... 1 for k in range(k_max, 0, -1): for subset in subcollections[k]: if len(subsets[subset] - covered) >= p ** k: rank_per_agent[subset[0]].append(subset[1]) covered.update(subsets[subset]) else: subsets[subset] = subsets[subset] - covered k_prime = 0 while p ** (k_prime + 1) < len(subsets[subset]): k_prime += 1 subcollections[k_prime].append(subset) # k = 0 for subset in subcollections[0]: if len(subsets[subset] - covered) == 1: rank_per_agent[subset[0]].append(subset[1]) covered.update(subsets[subset]) return rank_per_agent
import re def special_match(str, search=re.compile(r'[^1-3+]').search): return not bool(search(str)) if __name__ == "__main__": inputstring = input() if(inputstring.__len__() >= 0 & inputstring.__len__() <= 100): if (special_match(inputstring)): sumData = inputstring.split('+') sumData.sort() outputString = "" for index in range(sumData.__len__()): if (sumData[index] == ""): exit(0) if (sumData.__len__() ==1 or index == sumData.__len__()-1): outputString += ""+ sumData[index] continue outputString += "" +sumData[index]+"+" print(outputString)
Social autopsy study identifies determinants of neonatal mortality in Doume, Nguelemendouka and Abong–Mbang health districts, Eastern Region of Cameroon Background Reducing preventable medical causes of neonatal death for faster progress toward the MGD4 will require Cameroon to adequately address the social factors contributing to these deaths. The objective of this paper is to explore the social, behavioral and health systems determinants of newborn death in Doume, Nguelemendouka and Abong–Mbang health districts, in Eastern Region of Cameroon, from 2007–2010. Methods Data come from the 2012 Verbal/Social Autopsy (VASA) study, which aimed to determine the biological causes and social, behavioral and health systems determinants of under–five deaths in Doume, Nguelemendouka and Abong–Mbang health districts in Eastern Region of Cameroon. The analysis of the data was guided by the review of the coverage of key interventions along the continuum of normal maternal and newborn care and by the description of breakdowns in the care provided for severe neonatal illnesses within the Pathway to Survival conceptual framework. Results One hundred sixty–four newborn deaths were confirmed from the VASA survey. The majority of the deceased newborns were living in households with poor socio–economic conditions. Most (60–80%) neonates were born to mothers who had one or more pregnancy or labor and delivery complications. Only 23% of the deceased newborns benefited from hygienic cord care after birth. Half received appropriate thermal care and only 6% were breastfed within one hour after birth. Sixty percent of the deaths occurred during the first day of life. Fifty–five percent of the babies were born at home. More than half of the deaths (57%) occurred at home. Of the 64 neonates born at a health facility, about 63% died in the health facility without leaving. Careseeking was delayed for several neonates who became sick after the first week of life and whose illnesses were less serious at the onset until they became more severely ill. Cost, including for transport, health care and other expenses, emerged as main barriers to formal care–seeking both for the mothers and their newborns. Conclusions This study presents an opportunity to strengthen maternal and newborn health by increasing the coverage of essential and low cost interventions that could have saved the lives of many newborns in eastern Cameroon.
<reponame>escaped/cookiecutter-pypackage import os import shutil from pathlib import Path def remove_files(files): for file in files: try: os.remove(file) except IOError: pass def prepare_django(project_root): uses_django = '{{ cookiecutter.uses_django }}' == 'y' if uses_django: return # remove django related files files = [ project_root / 'tests' / 'conftest.py', project_root / 'tests' / 'models.py', project_root / 'tests' / 'urls.py', project_root / 'env.example', project_root / 'manage.py', project_root / '{{ cookiecutter.project_slug }}' / 'settings.py', project_root / '{{ cookiecutter.project_slug }}' / 'urls.py', project_root / '{{ cookiecutter.project_slug }}' / 'wsgi.py', ] remove_files(files) if __name__ == '__main__': project_root = Path(os.path.curdir) prepare_django(project_root)
<filename>src/gps/ProjectionImpl.cpp<gh_stars>1-10 #include <memory> #include "tk/GpsDatumParametersX.h" #include "tk/GpsGridParametersX.h" #include "tk/GpsConstantsX.h" #include "Errors.hpp" #include "DatumParametersImpl.hpp" #include "GridParametersImpl.hpp" #include "ProjectionImpl.hpp" namespace gps { ProjectionImpl::ProjectionImpl() : m_projection(new CProjections()) { clear(); } void ProjectionImpl::toUTM(DatumParameters& toDatum) { // Here were out of memory checks for pGridSrc ... // Check UTM zone try { checkZone(true); } catch (const std::exception& /*e*/) { // if (lResult) // { // TraceFunction(_T("<< CGpsProjection::ToUTM (Invalid UTM zone provided)\n")); // goto _EndToUTM; // } // TODO: Throw Invalid UTM Zone } std::unique_ptr<CGpsGridParametersX> pGridDst(new CGpsGridParametersX()); // Detect UTM zone parameters based on latitude / longitude if (m_zone) { m_zone = getUTMParameters(pGridDst.get()); } else { m_zone = getUTMParameters(m_latitude, m_longitude, pGridDst.get()); } // Set Destination datum // lResult = pDatum->FromOleObject(toDatum); DatumParametersImpl& impl = static_cast<DatumParametersImpl&>(toDatum); CGpsDatumParametersX* datumParameters = impl.getImpl(); std::unique_ptr<CGpsGridParametersX> pGridSrc(new CGpsGridParametersX()); // = WGS84 by default pGridDst->m_cGpsDatumParameters = *datumParameters; // Perform the transformation doTransform(*pGridSrc, *pGridDst); } void ProjectionImpl::putLongitude(double longitude) { m_longitude = longitude; } void ProjectionImpl::putLatitude(double latitude) { m_latitude = latitude; } std::string ProjectionImpl::getVersion() { throw std::runtime_error("The method or operation is not implemented."); } void ProjectionImpl::clear() { m_latitude = 0.0; m_longitude = 0.0; m_altitudeI = 0.0; m_altitudeO = 0.0; m_northing = 0.0; m_easting = 0.0; m_zone = 0L; } void ProjectionImpl::checkZone(bool enableAutoZone) { if (!enableAutoZone && m_zone == 0L) { throw std::runtime_error(errGPS_INVALIDZONE); } if (std::abs(m_zone) > 60L) { throw std::runtime_error(errGPS_INVALIDZONE); } } long ProjectionImpl::getUTMParameters(double latitude, double longitude, CGpsGridParametersX* pParams) { // was: lZone = long((lLongitude + 180) / 6) + 1L; long zone = long((longitude + 180.0) / 6.0) + 1L; // Clear pParams->Clear(); // Set Projection pParams->m_lProjection = GPS_PROJECTION_TRANSVERSEMERCATOR; // Scale Factor pParams->m_fScaleFactor = 0.9996; // Longitude Of Origin pParams->m_fOriginLongitude = ((zone - 1L) * 6.0) - 180.0 + 3.0; if (latitude >= 0.0) { pParams->m_fFalseEasting = 500000.0; pParams->m_fFalseNorthing = 0.0; zone *= 1L; } else { pParams->m_fFalseEasting = 500000.0; pParams->m_fFalseNorthing = 10000000.0; zone *= -1L; } return zone; } long ProjectionImpl::getUTMParameters(CGpsGridParametersX* pParams) { // Clear pParams->Clear(); // Set Projection pParams->m_lProjection = GPS_PROJECTION_TRANSVERSEMERCATOR; // Scale Factor pParams->m_fScaleFactor = 0.9996; // Longitude Of Origin pParams->m_fOriginLongitude = ((std::abs(m_zone) - 1L) * 6.0) - 180.0 + 3.0; if (m_zone > 0L) { pParams->m_fFalseEasting = 500000.0; pParams->m_fFalseNorthing = 0.0; } else { pParams->m_fFalseEasting = 500000.0; pParams->m_fFalseNorthing = 10000000.0; } return m_zone; } void ProjectionImpl::doTransform(CGpsGridParametersX& gridSrc, CGpsGridParametersX& gridDst) { CGpsDatumParametersX& datumSrc = gridSrc.m_cGpsDatumParameters; // Check Source Datume checkSourceDatum(datumSrc); CGpsDatumParametersX& datumDst = gridDst.m_cGpsDatumParameters; // Check Destination Datum checkDestinationDatum(datumDst); // Is Source Grid Easting / Northing, then apply unit conversion if (gridSrc.m_lProjection) { m_projection->m_fEasting = m_easting; m_projection->m_fNorthing = m_northing; m_projection->m_fAltitude = m_altitudeI; } else { m_projection->m_fLatitude = m_latitude; m_projection->m_fLongitude = m_longitude; m_projection->m_fAltitude = m_altitudeI; } // Convert from CGpsGridParametersX to CCfgMapProjection CCfgMapProjection projSrc; CCfgMapProjection projDst; gridSrc.ToProjectionStruct(projSrc); gridDst.ToProjectionStruct(projDst); // Set Grids m_projection->SetGridSrc(projSrc); m_projection->SetGridDst(projDst); // Go m_projection->Forward(); // Is Destination Grid Easting / Northing, then apply unit conversion if (gridDst.m_lProjection) { m_easting = m_projection->m_fEasting; m_northing = m_projection->m_fNorthing; m_altitudeO = m_projection->m_fAltitude; } else { m_latitude = m_projection->m_fLatitude; m_longitude = m_projection->m_fLongitude; m_altitudeO = m_projection->m_fAltitude; } } void ProjectionImpl::checkSourceDatum(const CGpsDatumParametersX& datumSrc) { if (datumSrc.m_fAxis <= 0.0) { throw std::runtime_error(errGPS_INVALIDSRCAXIS); } // When using ellipsoid, check flattening if (datumSrc.m_fFlattening != 0.0) { if ((datumSrc.m_fFlattening < 250.0) || (datumSrc.m_fFlattening > 350.0)) { throw std::runtime_error(errGPS_INVALIDSRCFLATTENING); } } // When using gridfile, check existence // No gridfiles anymore if (datumSrc.m_lGridType) { if (datumSrc.m_strGrid.empty()) { throw std::runtime_error(errGPS_INVALIDSRCDATUMGRIDFILE); } } } void ProjectionImpl::checkDestinationDatum(const CGpsDatumParametersX& datumDst) { // SH: The two checks (dest, src) are almost identical, except for the error codes if (datumDst.m_fAxis <= 0.0) { throw std::runtime_error(errGPS_INVALIDDSTAXIS); } // When using ellipsoid, check flattening if (datumDst.m_fFlattening != 0.0) { if ((datumDst.m_fFlattening < 250.0) || (datumDst.m_fFlattening > 350.0)) { throw std::runtime_error(errGPS_INVALIDDSTFLATTENING); } } // When using gridfile, check existence if (datumDst.m_lGridType) { if (datumDst.m_strGrid.empty()) { throw std::runtime_error(errGPS_INVALIDDSTDATUMGRIDFILE); } } } double ProjectionImpl::getEasting() { return m_easting; } double ProjectionImpl::getNorthing() { return m_northing; } long ProjectionImpl::getZone() { return m_zone; } double ProjectionImpl::getLongitude() { return m_longitude; } double ProjectionImpl::getLatitude() { return m_latitude; } void ProjectionImpl::putEasting(double easting) { m_easting = easting; } void ProjectionImpl::putNorthing(double northing) { m_northing = northing; } void ProjectionImpl::transformGrid(GridParameters& src, GridParameters& dst) { GridParametersImpl& srcImpl = static_cast<GridParametersImpl&>(src); GridParametersImpl& dstImpl = static_cast<GridParametersImpl&>(dst); doTransform(srcImpl.getImpl(), dstImpl.getImpl()); } }
/** * * @author cy122 * * This class is related to the return status of command. * Interpreter will be a publisher of the COMMAND_STATUS channel, and Controller will subscribe to it. * */ public class CommandStatus extends Message{ /*success is true when the command runs successfully, is false when the command has syntax error*/ public Boolean success = true; /* this is the return value of the command, for example, if controller publishes 'fd fd fd fd 5' in INTERPRET Channel, * controller will subscribe to the COMMAND_STATUS Channel, and get a CommandStatus Object contains returnValue=5.0, success=true. */ public Double returnValue = 0.0; public CommandStatus(Boolean success, Double returnValue){ this.success=success; this.returnValue=returnValue; } public CommandStatus(Boolean success){ this.success=success; } public CommandStatus(Double returnValue){ this.returnValue=returnValue; } }
def _calcSampleName(self, oCollection): if oCollection.sValue is not None: asSampleName = [oCollection.sValue, 'in',]; elif oCollection.sType == self._oModel.ksTypeElapsed: asSampleName = ['Elapsed time', 'for', ]; elif oCollection.sType == self._oModel.ksTypeResult: asSampleName = ['Error count', 'for',]; else: return 'Invalid collection type: "%s"' % (oCollection.sType,); sTestName = ', '.join(oCollection.asTests if oCollection.asTests[0] else oCollection.asTests[1:]); if sTestName == '': if not oCollection.aoSeries: return asSampleName[0]; if len(oCollection.aoSeries) > 1: idTestCase = oCollection.aoSeries[0].idTestCase; for oSeries in oCollection.aoSeries: if oSeries.idTestCase != idTestCase: return asSampleName[0]; sTestName = oCollection.aoSeries[0].oTestCase.sName; return ' '.join(asSampleName) + ' ' + sTestName;
/** * Returns true if the experiment has had batch information successfully filled in. This will be true even if there * is only one batch. It does not reflect the presence or absence of a batch effect. * * @param datasetArg can either be the ExpressionExperiment ID or its short name (e.g. GSE1234). Retrieval by ID * is more efficient. Only datasets that user has access to will be available. */ @GET @Path("/{dataset}/hasbatch") @Produces(MediaType.APPLICATION_JSON) @Consumes(MediaType.APPLICATION_FORM_URLENCODED) @Operation(summary = "Indicate of a dataset has batch information") public ResponseDataObject<Boolean> datasetHasBatch( @PathParam("dataset") DatasetArg<Object> datasetArg, @Context final HttpServletResponse sr ) { ExpressionExperiment ee = datasetArg.getEntity( expressionExperimentService ); return Responder.respond( this.auditEventService.hasEvent( ee, BatchInformationFetchingEvent.class ) ); }
#include <bits/stdc++.h> using namespace std; typedef long long ll; const int N = 1005; const int M = 1000000007; int n; int a[N]; bool vis[N]; int main() { while (~scanf("%d", &n)) { for (int i = 0; i < n; i++) { scanf("%d", &a[i]); } sort(a, a + n); int ans = 0; memset(vis, 0, sizeof(vis)); for (int i = 0; i < n; i++) { if (!vis[i]) { int pos, crt = a[i]; vis[i] = true; while ((pos = upper_bound(a + i + 1, a + n, crt) - a) != n) { while (pos < n && vis[pos]) { pos++; } if (pos < n) { vis[pos] = true; ans++; crt = a[pos]; } else { break; } } } } printf("%d\n", ans); } }
//FIXME: append functions with Calendar for using common instance public class CalendarHelper { private static String minDecline[] = {"минута", "минуты", "минут"}; private static String hourDecline[] = {"час", "часа", "часов"}; private static String lessMinute = "менее минуты"; private static String zeroDuration = "00:00"; public final static String SEPARATOR = ":"; public static final long INTERVAL_MINUTE = 60 * 1000; public static final long INTERVAL_HOUR = 60 * INTERVAL_MINUTE; public static final long INTERVAL_DAY = 24 * INTERVAL_HOUR; public static long getTimeFromDateUTC(int year, int month, int day, int hour, int minute) { Calendar c = Calendar.getInstance(TimeZone.getTimeZone("UTC")); c.set(Calendar.YEAR, year); c.set(Calendar.MONTH, month); c.set(Calendar.DAY_OF_MONTH, day); c.set(Calendar.HOUR_OF_DAY, hour); c.set(Calendar.MINUTE, minute); c.set(Calendar.SECOND, 0); c.set(Calendar.MILLISECOND, 0); return c.getTimeInMillis(); } public static Calendar getCalendarUTC(long time) { Calendar c = Calendar.getInstance(TimeZone.getTimeZone("UTC")); c.setTimeInMillis(time); return c; } public static long getNowWithOffset() { Calendar c = Calendar.getInstance(); long offset = (c.getTimeZone().useDaylightTime() ? c.get(Calendar.DST_OFFSET) : c.get(Calendar.ZONE_OFFSET)); return (c.getTimeInMillis() + offset); } public static int getMonthUTC(long time) { Calendar calendar = getCalendarUTC(time); return calendar.get(Calendar.MONTH); } public static int getYearUTC(long time) { Calendar calendar = getCalendarUTC(time); return calendar.get(Calendar.YEAR); } public static int getEcmaDayOfWeekUTC(long time) { Calendar calendar = getCalendarUTC(time); return calendar.get(Calendar.DAY_OF_WEEK) - 1; } public static int getDayUTC(long time) { Calendar calendar = getCalendarUTC(time); return calendar.get(Calendar.DAY_OF_MONTH); } public static int getHoursUTC(long time) { Calendar calendar = getCalendarUTC(time); return calendar.get(Calendar.HOUR_OF_DAY); } public static int getMinutesUTC(long time) { Calendar calendar = getCalendarUTC(time); return calendar.get(Calendar.MINUTE); } public static int getSecondsUTC(long time) { Calendar calendar = getCalendarUTC(time); return calendar.get(Calendar.SECOND); } public static int getDaysInMonthUTC(long time) { Calendar calendar = getCalendarUTC(time); return calendar.getActualMaximum(Calendar.DAY_OF_MONTH); } public static int getYmUTC(long time) { return getYearUTC(time) * 100 + getMonthUTC(time); } /** * Используется для сравнения двух дат * * @param time1 что сравнивать * @param time2 с чем сравнивать * @return true если time1 равен time2 */ public static boolean datesEqual(long time1, long time2) { if (getYearUTC(time1) != getYearUTC(time2)) { return false; } //else{ nop } if (getMonthUTC(time1) != getMonthUTC(time2)) { return false; } //else{ nop } if (getDayUTC(time1) != getDayUTC(time2)) { return false; } //else{ nop } return true; } public static boolean monthsEqual(long time1, long time2) { if (getYearUTC(time1) != getYearUTC(time2)) { return false; } //else{ nop } if (getMonthUTC(time1) != getMonthUTC(time2)) { return false; } //else{ nop } return true; } public static int compareDates(long time1, long time2) { if (!datesEqual(time1, time2) && time1 < time2) { return -1; } //else{ nop } if (!datesEqual(time1, time2) && time1 > time2) { return 1; } //else{ nop } return 0; } public static int compareMonths(long time1, long time2) { if (monthsEqual(time1, time2)) { return 0; } else { return (time1 < time2 ? -1 : 1); } } public static long getNextDayTime(long time) { Calendar c = getCalendarUTC(time); c.add(Calendar.DAY_OF_MONTH, 1); return c.getTimeInMillis(); } public static long getPreviousDayTime(long time) { Calendar c = getCalendarUTC(time); c.add(Calendar.DAY_OF_MONTH, -1); return c.getTimeInMillis(); } public static long getNextMonthTime(long time) { Calendar c = getCalendarUTC(time); c.add(Calendar.MONTH, 1); return c.getTimeInMillis(); } public static long getPreviousMonthTime(long time) { Calendar c = getCalendarUTC(time); c.add(Calendar.MONTH, -1); return c.getTimeInMillis(); } public static String getTimeString(long time, String separator) { return addZero(getHoursUTC(time)) + separator + addZero(getMinutesUTC(time)); } public static String getTimeString(long time) { return addZero(getHoursUTC(time)) + SEPARATOR + addZero(getMinutesUTC(time)); } public static String getTimeStringUTC(long time, String separator) { return addZero(getHoursUTC(time)) + separator + addZero(getMinutesUTC(time)); } public static String getTimeStringUTC(long time) { return addZero(getHoursUTC(time)) + SEPARATOR + addZero(getMinutesUTC(time)); } //FIXME: split functions by value type - time and duration? public static String getDurationString(long duration) { if(duration < INTERVAL_DAY) { int hour = getHoursUTC(duration); int min = getMinutesUTC(duration); if (hour <= 0 && min <= 0) { int sec = getSecondsUTC(duration); if (sec > 0) { return lessMinute; } else { return zeroDuration; } } else { return addZero(getHoursUTC(duration)) + SEPARATOR + addZero(getMinutesUTC(duration)); } } else{ return ""; } } public static String getMonthYearStringUTC(long time) { Calendar c = getCalendarUTC(time); DateFormatSymbols symbols = DateFormatSymbols.getInstance(Locale.getDefault()); return getNominativeCaseMonth(time) + " " + Integer.toString(c.get(Calendar.YEAR)); } public static String getNominativeCaseMonth(long time) { return new SimpleDateFormat("LLLL", Locale.getDefault()).format(new Date(time)); } public static String getGenitiveCaseMonth(long time) { Calendar c = getCalendarUTC(time); return c.getDisplayName(Calendar.MONTH, Calendar.LONG, Locale.getDefault()); } public static String getTimeStringCase(long time) { int hour = getHoursUTC(time); int min = getMinutesUTC(time); String str = ""; if (hour > 0) { str += " " + hourCase(hour); } //else{ nop } if (min > 0) { str += " " + minutesCase(min); } else if(hour <= 0){ int sec = getSecondsUTC(time); if(sec > 0){ str += " " + lessMinute; } //else{ nop } } //else{ nop } return str; } public static String minutesCase(int min) { String str = " " + min + " "; int n = Math.abs(min) % 100; int n1 = n % 10; if (n > 10 && n < 20) { return str + minDecline[2]; } if (n1 > 1 && n1 < 5) { return str + minDecline[1]; } if (n1 == 1) { return str + minDecline[0]; } return str + minDecline[2]; } public static String hourCase(int hour) { String str = hour + " "; if (hour > 4 && hour < 21) { return str + hourDecline[2]; } if (hour == 1 || hour == 21) { return str + hourDecline[0]; } return str + hourDecline[1]; } }
import { createSelector } from 'reselect'; export const settingsSelector = (state: any) => state.settings; export const selectTheme = createSelector(settingsSelector, (settings) => settings.theme); export const selectLangage = createSelector(settingsSelector, (settings) => settings.langage);
<reponame>Ananya-Jha-code/Hepo-Hepo<gh_stars>1-10 from django.db import models from django.contrib import admin class Room(models.Model): name = models.CharField(max_length=1000) class RoomAdmin(admin.ModelAdmin): list_display = ('name',) class Messages(models.Model): username = models.CharField(max_length=100000) def __str__(self): return self.username
/** * Abstract platform generator. * Implements methods that can be utilized by all platform generators. */ public abstract class AbstractPlatformGenerator implements IPlatformGenerator { /** * DataContainer, which contains all collections of model elements */ protected DataContainer dataContainer; protected ResourceSet unprocessedInput; protected ResourceSet processedInput; protected String rootFolder; protected String basePackageName; LocalDateTime lastGenerationRun = null; @Override public void doGenerate(ResourceSet input, IExtendedFileSystemAccess fsa) { // Throttle to avoid constant regeneration if(lastGenerationRun != null && lastGenerationRun.plus(20, ChronoUnit.SECONDS).isAfter(LocalDateTime.now())) { System.out.println("Throttling repeated generation (" + lastGenerationRun + ")"); return; } lastGenerationRun = LocalDateTime.now(); System.out.println("Generation started at " + lastGenerationRun); ///////////////////////////////////////// // Setup ///////////////////////////////////////// // Pre process model (M2M transformation) // Note: input is not being passed back to concrete Xtend generator classes (parameters are final by default) unprocessedInput = new ResourceSetImpl(); EcoreUtil2.clone(unprocessedInput, input); processedInput = MD2Preprocessor.getPreprocessedModel(input); // Initialize DataContainer dataContainer = new DataContainer(processedInput); // Extract base package name basePackageName = MD2GeneratorUtil.getBasePackageName(processedInput) + '.' + getPlatformPrefix(); // Extend the root folder with a default sub-folder to which all files are generated rootFolder = (getDefaultSubfolder() != null) ? basePackageName + "/" + getDefaultSubfolder() : basePackageName; ///////////////////////////////////////// // Feasibility check ///////////////////////////////////////// // Check whether a main block has been defined. Otherwise do not run the generator. if (dataContainer.main == null) { System.out.println("No main block found. Quit gracefully."); return; } ///////////////////////////////////////// // Clean current project folder ///////////////////////////////////////// fsa.deleteDirectory(basePackageName); ///////////////////////////////////////// // Trigger actual generation process ///////////////////////////////////////// doGenerate(fsa); } @Override public String getPlatformPrefix() { return this.getClass().getCanonicalName(); } @Override public void doGenerate(Resource input, IFileSystemAccess fsa) { throw new UnsupportedOperationException("Use the following method instead: " + "doGenerate(ResourceSet input, IExtendedFileSystemAccess fsa)"); } /** * Specify the name of a default sub-folder of the root folder to which all files are generated. * Is supposed to be overwritten by the actual generator implementation if a sub-folder should be used. * * @return File name of the sub folder or null if no sub folder should be used. */ public String getDefaultSubfolder() { return null; } /** * Actual generator method that is supposed to be implemented by the concrete generators. * @param fsa */ public abstract void doGenerate(IExtendedFileSystemAccess fsa); }
Writing for Racked, Arabella Sicardi offers a fascinating dispatch from the makeup master class Kim Kardashian and her main glam squad man Mario Dedivanovic offered last month in Los Angeles. It turns out that blending is the new contouring, Sicardi reports, among other things of note like that "banana powder" is a no-no, and that people traveled from Mongolia (!!!) to attend the class. Sicardi also offers a step-by-step breakdown of the makeover Dedivanovic performed — and really, isn't good makeup a performance these days? — on Kim onstage during the event. The look requires 40 separate creams, blushes, powders, and gels, applied in over 50 steps, and costs $1,200 in products, or nearly $1,700 with tools also; that's quite the makeup bag. Here's the pricy breakdown: And then it's time for a nap, during which you'll roll over and smudge your lipstick and have to start over, and thus your life remains in a vicious circle of makeup and restless sleep until death, sweet death, ends things for you and funeral home makeup is the best you'll ever have. Really though, this is a makeup routine whose combined cost is, at minimum, $1,676.72. (That's calculated as follows: the cheapest option wherever possible, and three sets of false lashes — "a bunch," if you will — as well as the five times Tom Ford's $52 lip color.) Yes, once you've bought the tools, they're reusable, and yes, the products do last more than once, but that's still a lot of money to throw down for a facial beat-down. To be clear though, Kim's face is beat to perfection — and she probably gets her products for free these days anyway. At least, you know, when she gets the designer brand name's right: @KimKardashian Dear Kim - let us know which one you need and we'll send it to you. And Mr. Armani's first name is Giorgio. — Armani (@armani) June 15, 2015 Follow Alex on Twitter.
// // Copyright 2019 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // package sched import ( "sort" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/google/schedviz/tracedata/trace" ) // threadInferrer consumes a stream of threadTransitions, with nondecreasing // Timestamps, and produces a corresponding stream of threadTransitions with // formerly-Unknown states and CPUs inferred from neighboring // threadTransitions. // threadInferrer uses the following collectionOptions field: // * dropOnConflict: if true, threadTransitions that conflict with their // neighbors, and which allow for dropping on that conflict, are dropped. type threadInferrer struct { pid PID options *collectionOptions lastTimestamp trace.Timestamp pendingTransitions []*threadTransition } func newThreadInferrer(pid PID, options *collectionOptions) *threadInferrer { return &threadInferrer{ pid: pid, options: options, lastTimestamp: UnknownTimestamp, } } // findConflict iterates through the pendingTransitions, looking for // disagreements between adjacent transitions on CPU or state. If a // disagreement is found, the indices of the disagreeing transitions are // returned, as well as true. If no disagreement is found, false is // returned. func (inferrer *threadInferrer) findConflict() (idx1, idx2 int, conflicted bool) { currentCPU := UnknownCPU currentCPUIdx := 0 currentState := AnyState currentStateIdx := 0 for idx, tt := range inferrer.pendingTransitions { if tt.dropped { continue } // Find CPU conflicts. if currentCPU == UnknownCPU { currentCPU = tt.PrevCPU currentCPUIdx = idx } else if tt.PrevCPU != UnknownCPU && tt.PrevCPU != currentCPU { return currentCPUIdx, idx, true } if tt.CPUPropagatesThrough { if tt.NextCPU != UnknownCPU && tt.NextCPU != currentCPU { return currentCPUIdx, idx, true } } else { currentCPU = tt.NextCPU currentCPUIdx = idx } // Find state conflicts. Update currentStateIdx whenever the currentState // is further restricted. var merged bool if currentState != tt.PrevState && tt.PrevState != AnyState { if currentState == AnyState { currentState = tt.PrevState currentStateIdx = idx } else { currentState, merged = mergeState(currentState, tt.PrevState) if !merged { return currentStateIdx, idx, true } currentStateIdx = idx } } if tt.StatePropagatesThrough { if currentState != tt.NextState && tt.NextState != AnyState { if currentState == AnyState { currentState = tt.NextState currentStateIdx = idx } else { currentState, merged = mergeState(currentState, tt.NextState) if !merged { return currentStateIdx, idx, true } currentStateIdx = idx } } } else { currentState = tt.NextState currentStateIdx = idx } } return 0, 0, false } // handleConflicts searches for state or CPU conflicts among // pendingTransitions. Upon finding such a conflict, it resolves it according // to the conflicting transitions' policies, returning whether a retry should // be attempted, and any terminal error encountered. If retry is not requested // and no error is returned, no conflicts were detected. func (inferrer *threadInferrer) handleConflicts() (retry bool, err error) { idx1, idx2, conflict := inferrer.findConflict() if !conflict { // If no conflict, there's nothing to handle. return false, nil } // If the conflict outcome is to insert a synthetic transition, it may // reflect a CPU conflict, a state conflict, or both. We check for both // CPU and state conflicts, and after both, if needed, insert the // appropriate synthetic transition. insertSynthetic := false syntheticPrevState, syntheticNextState := AnyState, AnyState syntheticStatePropagatesThrough := true syntheticPrevCPU, syntheticNextCPU := UnknownCPU, UnknownCPU syntheticCPUPropagatesThrough := true tt1, tt2 := inferrer.pendingTransitions[idx1], inferrer.pendingTransitions[idx2] // Check for a disagreement on state, and handle failing and dropping if // necessary. if tt1.NextCPU != Unknown && tt2.PrevCPU != Unknown && tt1.NextCPU != tt2.NextCPU { // We have a CPU conflict. resolution := resolveConflict(tt1.onForwardsCPUConflict, tt2.onBackwardsCPUConflict) switch resolution { case Fail: return false, status.Errorf(codes.InvalidArgument, "inference error (CPU) between '%s' and '%s'", tt1, tt2) case Drop: // We dropped something, and must retry the check pass. if (tt1.onForwardsCPUConflict & Drop) == Drop { tt1.dropped = true } if (tt2.onBackwardsCPUConflict & Drop) == Drop { tt2.dropped = true } return true, nil case InsertSynthetic: insertSynthetic = true syntheticPrevCPU = tt1.NextCPU syntheticNextCPU = tt2.PrevCPU syntheticCPUPropagatesThrough = false } } if _, merged := mergeState(tt1.NextState, tt2.PrevState); !merged { // We have a state conflict. resolution := resolveConflict(tt1.onForwardsStateConflict, tt2.onBackwardsStateConflict) switch resolution { case Fail: return false, status.Errorf(codes.InvalidArgument, "inference error (thread state) between '%s' and '%s'", tt1, tt2) case Drop: // We dropped something, and must retry the check pass. if (tt1.onForwardsStateConflict & Drop) == Drop { tt1.dropped = true } if (tt2.onBackwardsStateConflict & Drop) == Drop { tt2.dropped = true } return true, nil case InsertSynthetic: insertSynthetic = true syntheticPrevState = tt1.NextState syntheticNextState = tt2.PrevState syntheticStatePropagatesThrough = false } } if insertSynthetic { syntheticTransition := &threadTransition{ EventID: Unknown, Timestamp: tt1.Timestamp + (tt2.Timestamp-tt1.Timestamp)/2, PID: tt1.PID, PrevCommand: UnknownCommand, NextCommand: UnknownCommand, PrevPriority: UnknownPriority, NextPriority: UnknownPriority, PrevCPU: syntheticPrevCPU, NextCPU: syntheticNextCPU, CPUPropagatesThrough: syntheticCPUPropagatesThrough, PrevState: syntheticPrevState, NextState: syntheticNextState, StatePropagatesThrough: syntheticStatePropagatesThrough, synthetic: true, } inferrer.pendingTransitions = append(inferrer.pendingTransitions, syntheticTransition) sort.Slice(inferrer.pendingTransitions, func(a, b int) bool { return inferrer.pendingTransitions[a].Timestamp < inferrer.pendingTransitions[b].Timestamp }) // We inserted something, and must retry the check pass. return true, nil } return false, nil } // inferForwards scans forwards through pendingTransitions, tracking last-known // CPU and state, and updating unknown CPUs and states with the last-known // data. func (inferrer *threadInferrer) inferForwards() error { var lastKnownCPU, lastKnownState *threadTransition for _, tt := range inferrer.pendingTransitions { if tt.dropped { continue } if lastKnownCPU != nil { if !tt.setCPUForwards(lastKnownCPU.NextCPU) { return status.Errorf(codes.Internal, "inference error (CPU): at time %d, failed to propagate CPU %d forwards", tt.Timestamp, lastKnownCPU.NextCPU) } } if lastKnownState != nil { if !tt.setStateForwards(lastKnownState.NextState) { return status.Errorf(codes.Internal, "inference error (state): at time %d, failed to propagate state %d forwards", tt.Timestamp, lastKnownState.NextState) } } if tt.NextCPU == UnknownCPU { lastKnownCPU = nil } else { lastKnownCPU = tt } lastKnownState = tt } return nil } // inferBackwards scans backwards through pendingTransitions, tracking // last-known CPU and state, and updating unknown CPUs and states with the // last-known data. func (inferrer *threadInferrer) inferBackwards() error { var lastKnownCPU, lastKnownState *threadTransition for idx := len(inferrer.pendingTransitions) - 1; idx >= 0; idx-- { tt := inferrer.pendingTransitions[idx] if tt.dropped { continue } if lastKnownCPU != nil { if !tt.setCPUBackwards(lastKnownCPU.PrevCPU) { return status.Errorf(codes.Internal, "inference error (CPU): at time %d, failed to propagate CPU %d backwards", tt.Timestamp, lastKnownCPU.PrevCPU) } } if lastKnownState != nil { if !tt.setStateBackwards(lastKnownState.PrevState) { return status.Errorf(codes.Internal, "inference error (state): at time %d, failed to propagate state %d backwards", tt.Timestamp, lastKnownState.PrevState) } } if tt.PrevCPU == UnknownCPU { lastKnownCPU = nil } else { lastKnownCPU = tt } lastKnownState = tt } return nil } // inferPending performs an inference pass on the receiver's pending // transitions. It can only guarantee sensical inferences (or errors) if // pendingTransitions contains all transitions between two adjacent forward // barriers (where the start-of-trace is considered a forward barrier.) // If this were not the case, inferences could be made and committed, that // would be invalidated on subsequent passes, when it's too late to do anything // about. // inferPending should only be called when the pendingTransitions ends with a // forward barrier or when no further transitions will be forthcoming from the // trace. lastBatch specifies which of these cases applies: if true, // inferences are made on the entire pending transition set, and the entire // set is returned and drained. If false, the last transition is expected to // be a forward barrier; all transitions before the last are inferred and // returned, and the last is retained as the sole remaining entry in // pendingTransitions. func (inferrer *threadInferrer) inferPending(lastBatch bool) ([]*threadTransition, error) { // Check for inference errors. On finding an error, if the transition can be // dropped, do so and retry, otherwise return an error. for { if retry, err := inferrer.handleConflicts(); err != nil { return nil, err } else if retry { continue } break } // If this is not the last batch, we must ensure that the last pending // transition is still a forward barrier -- that barrier may have been // dropped in the check phase. If it isn't, we return early and continue // to build our pending transitions; if it is, we proceed to inference. if !lastBatch { if !inferrer.pendingTransitions[len(inferrer.pendingTransitions)-1].isForwardBarrier() { return nil, nil } } // Now, infer forwards and backwards, populating Unknown values in // pending transitions. if err := inferrer.inferForwards(); err != nil { return nil, err } if err := inferrer.inferBackwards(); err != nil { return nil, err } // If this is the last batch, return all pending transitions. Otherwise, // return all but the last. cutPoint := len(inferrer.pendingTransitions) if !lastBatch { cutPoint-- } if cutPoint < 0 { return nil, nil } ret := make([]*threadTransition, 0, cutPoint) var newPt = []*threadTransition{} for idx, pt := range inferrer.pendingTransitions { if idx < cutPoint { ret = append(ret, pt) } else { newPt = append(newPt, pt) } } inferrer.pendingTransitions = newPt return ret, nil } // addTransition adds the provided threadTransition to the receiver, and // returns zero or more fully-inferred threadTransitions. func (inferrer *threadInferrer) addTransition(nextTT *threadTransition) ([]*threadTransition, error) { if nextTT.PID != inferrer.pid { return nil, status.Errorf(codes.InvalidArgument, "incorrect PID for %s", nextTT) } if nextTT.Timestamp == UnknownTimestamp { return nil, status.Errorf(codes.InvalidArgument, "missing timestamp in threadTransition") } if inferrer.lastTimestamp != UnknownTimestamp && inferrer.lastTimestamp > nextTT.Timestamp { return nil, status.Errorf(codes.InvalidArgument, "out-of-order threadTransitions at %d", nextTT.Timestamp) } if nextTT.PrevState&UnknownState != 0 || nextTT.NextState&UnknownState != 0 { return nil, status.Errorf(codes.InvalidArgument, "threadTransitions may not specify UnknownState as prev or next state") } inferrer.lastTimestamp = nextTT.Timestamp inferrer.pendingTransitions = append(inferrer.pendingTransitions, nextTT) // Only infer on the pending transitions if we just placed a forward barrier // at the end. if nextTT.isForwardBarrier() { return inferrer.inferPending( /*lastBatch*/ false) } return nil, nil } // drain infers (as much as is possible) on the pendingTransitions, then // returns them. func (inferrer *threadInferrer) drain() ([]*threadTransition, error) { ret, err := inferrer.inferPending( /*lastBatch*/ true) inferrer.lastTimestamp = UnknownTimestamp inferrer.pendingTransitions = nil return ret, err }
package com.example.admin.week1projectflick.adapter; import android.app.Activity; import android.support.annotation.NonNull; import android.support.v4.app.FragmentManager; import android.support.v7.widget.RecyclerView; import android.view.View; import com.example.admin.week1projectflick.R; import com.example.admin.week1projectflick.model.Youtube; import com.google.android.youtube.player.YouTubeInitializationResult; import com.google.android.youtube.player.YouTubePlayer; import com.google.android.youtube.player.YouTubePlayerFragment; public class TrailerViewHolder extends RecyclerView.ViewHolder { private final String MyYouTubeApiKey = "<KEY>"; private FragmentManager mFragmentManager; Boolean popularVideo; public TrailerViewHolder(@NonNull View itemView,Boolean popularVideo) { super(itemView); this.popularVideo=popularVideo; } public void bind(final Youtube trailer, Activity activity) { YouTubePlayerFragment youTubePlayerFragment = (YouTubePlayerFragment) activity.getFragmentManager().findFragmentById(R.id.youtubeFragment); youTubePlayerFragment.initialize(MyYouTubeApiKey, new YouTubePlayer.OnInitializedListener() { @Override public void onInitializationSuccess(YouTubePlayer.Provider provider, final YouTubePlayer youTubePlayer, boolean b) { youTubePlayer.setPlayerStyle(YouTubePlayer.PlayerStyle.DEFAULT); youTubePlayer.cueVideo(trailer.getSource()); if (popularVideo) { popularVideo = false; youTubePlayer.setFullscreen(true); youTubePlayer.play(); } youTubePlayer.setPlayerStateChangeListener(new YouTubePlayer.PlayerStateChangeListener() { @Override public void onLoading() { } @Override public void onLoaded(String s) { } @Override public void onAdStarted() { } @Override public void onVideoStarted() { youTubePlayer.setFullscreen(true); } @Override public void onVideoEnded() { } @Override public void onError(YouTubePlayer.ErrorReason errorReason) { } }); } @Override public void onInitializationFailure(YouTubePlayer.Provider provider, YouTubeInitializationResult youTubeInitializationResult) { } }); } }
//this is for future use throughout the app in case we need the emails and passwords @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_start); email = findViewById(R.id.editTxtEmail); password = findViewById(R.id.editTxtPassword); auth = FirebaseAuth.getInstance(); user = FirebaseAuth.getInstance().getCurrentUser(); userEmail = ""; userPassword = ""; btnLogin = (Button) findViewById(R.id.btnLogin); btnRegister = findViewById(R.id.btnRegister); btnLogin.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { String txtEmail = email.getText().toString(); String txtPassword = password.getText().toString(); if(TextUtils.isEmpty(txtEmail) || TextUtils.isEmpty(txtPassword)) Toast.makeText(StartActivity.this,"Please enter all of the information",Toast.LENGTH_SHORT).show(); else{ loginUser(txtEmail,txtPassword); } } }); btnRegister.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { startActivity(new Intent(StartActivity.this,RegisterActivity.class)); } }); txtForgotPassword = findViewById(R.id.txtForgotPassword); txtForgotPassword.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { startActivity(new Intent(StartActivity.this, ForgotPasswordActivity.class)); } }); }
/** * A character literal eating lexer as defined in the Java Language * Standard. */ public class CharacterLiteral implements LexEater { private static final String CHARACTER = "Character"; private static final char ENCLOSING = '\''; @Override public LexicalElement.CharacterLiteral apply(StringBuilder sb) { if (sb.length() == 0 || sb.charAt(0) != ENCLOSING) { return null; } final StringBuilder output = createOutput(sb, CHARACTER); final StringBuilder original = createOutput(sb, CHARACTER); sb.deleteCharAt(0); while (sb.length() > 0 && sb.charAt(0) != ENCLOSING) { final char ch = sb.charAt(0); if (ch == '\\') { handleEscape(sb, output,original); } else { handleNormalCharacter(sb, output,original); } } if (sb.length() == 0) { throw new IllegalArgumentException("Character is not terminated before eol"); } sb.deleteCharAt(0); return new LexicalElement.CharacterLiteral(output.toString(),original.toString()); } }
n = int(input()) # aabcaab l = [] for i in range(0,n): k = i%4 if k == 0 or k == 1: l.append('a') elif k == 2 or k == 3: l.append('b') s = "" print(s.join(l))
President Trump Won’t Take Immigration Executive Order to Supreme Court Until His Nominee Neil Gorsuch on Bench Guest post by Joe Hoft Last night we reported that instead of messing with a long and drawn out court battle, President Trump is expected to draw up a new executive order next week to restrict immigration in order to keep America safe. Trump said on Air Force One – We are actively considering changes or other executive orders that will keep our country safe from terrorism.” NBC News reported that the Trump administration began working on a new executive order a few days before the Ninth Circuit Court ruled against it. The reason Trump is doing this is because of the current vacancy on the Supreme Court. The current make up of the court is four liberal judges appointed by liberal Presidents Clinton and Obama and four judges appointed by Republican Presidents Herbert W. Bush and George W. Bush. The legal move President Trump is taking to keep the immigration orders in place is to not go to the Supreme Court yet because of the possibility that the lower court ruling will stay in place. With an eight-justice court, a majority decision requires a 5-3 vote. If the supreme court is deadlocked 4-4, the lower court’s decision in the case is upheld but it does not create a legal precedent. “The best way to say it, if it’s 4-4, it’s as if the court had never even heard the case,” said Russell Wheeler, a federal courts expert at the Brookings Institution. “The decision below stands, but it has no precedential value. President Trump knows that if he waits until his Supreme Court pick, Judge Neil Gorsuch, is in place his executive order will be upheld because the law is on his side. The courts that have ruled against the President have done so not on the merits of the law, but on their own political beliefs. President Trump noted that the entire opinion from the 9th Circuit Court of Appeals did not even once cite the statute that the President’s executive order was based upon. LAWFARE: “Remarkably, in the entire opinion, the panel did not bother even to cite this (the) statute.” A disgraceful decision! — Donald J. Trump (@realDonaldTrump) February 10, 2017
<gh_stars>1-10 /* * Copyright (C) 2005 <NAME> - University of Parma - Italy * Copyright (C) 2009 The Sipdroid Open Source Project * * This file is part of MjSip (http://www.mjsip.org) * * MjSip is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * MjSip is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with MjSip; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Author(s): * <NAME> (<EMAIL>) * <NAME>, Hughes Systique Corp. (Reason: Android specific change, optmization, bug fix) */ package org.zoolu.sip.address; import org.zoolu.sip.provider.SipParser; import org.zoolu.tools.Parser; import java.util.Vector; /** * <P> * Class <i>SipURL</i> implements SIP URLs. * <P> * A SIP URL is a string of the form of: <BR> * <BLOCKQUOTE> * * <PRE> * &amp;nbsp&amp;nbsp sip:[user@]hostname[:port][;parameters] * </PRE> * * </BLOCKQUOTE> * <P> * If <i>port</i> number is ommitted, -1 is returned */ public class SipURL { protected String url; protected static final String transport_param = "transport"; protected static final String maddr_param = "maddr"; protected static final String ttl_param = "ttl"; protected static final String lr_param = "lr"; /** * Creates a new SipURL based on a hostname or on a sip url as * sip:[user@]hostname[:port][;param1=value1].. */ public SipURL(String sipurl) { if (sipurl.startsWith("sip:")) url = new String(sipurl); else url = "sip:" + sipurl; } /** Creates a new SipURL */ public SipURL(String username, String hostname) { init(username, hostname, -1); } /** Creates a new SipURL */ public SipURL(String hostname, int portnumber) { init(null, hostname, portnumber); } /** Creates a new SipURL */ public SipURL(String username, String hostname, int portnumber) { init(username, hostname, portnumber); } /** Inits the SipURL */ private void init(String username, String hostname, int portnumber) { StringBuffer sb = new StringBuffer("sip:"); if (username != null){ sb.append(username); if(username.indexOf('@') < 0){ sb.append('@'); sb.append(hostname); } } else sb.append(hostname); if (portnumber > 0) if(username == null || username.indexOf(':') < 0) sb.append(":" + portnumber); url = sb.toString(); } /** Creates and returns a copy of the URL */ public Object clone() { return new SipURL(url); } /** Indicates whether some other Object is "equal to" this URL */ public boolean equals(Object obj) { SipURL newurl = (SipURL) obj; return url.toString().equals(newurl.toString()); } /** Gets user name of SipURL (Returns null if user name does not exist) */ public String getUserName() { int begin = 4; // skip "sip:" int end = url.indexOf('@', begin); if (end < 0) return null; else return url.substring(begin, end); } /** Gets host of SipURL */ public String getHost() { char[] host_terminators = { ':', ';', '?' }; Parser par = new Parser(url); int begin = par.indexOf('@'); // skip "sip:user@" if (begin < 0) begin = 4; // skip "sip:" else begin++; // skip "@" par.setPos(begin); int end = par.indexOf(host_terminators); if (end < 0) return url.substring(begin); else return url.substring(begin, end); } /** Gets port of SipURL; returns -1 if port is not specidfied */ public int getPort() { char[] port_terminators = { ';', '?' }; Parser par = new Parser(url, 4); // skip "sip:" int begin = par.indexOf(':'); if (begin < 0) return -1; else { begin++; par.setPos(begin); int end = par.indexOf(port_terminators); if (end < 0) return Integer.parseInt(url.substring(begin)); else return Integer.parseInt(url.substring(begin, end)); } } /** Gets boolean value to indicate if SipURL has user name */ public boolean hasUserName() { return getUserName() != null; } /** Gets boolean value to indicate if SipURL has port */ public boolean hasPort() { return getPort() >= 0; } /** Whether two SipURLs are equals */ public boolean equals(SipURL sip_url) { return (url == sip_url.url); } /** Gets string representation of URL */ public String toString() { return url; } /** * Gets the value of specified parameter. * * @return null if parameter does not exist. */ public String getParameter(String name) { SipParser par = new SipParser(url); return ((SipParser) par.goTo(';').skipChar()).getParameter(name); } /** * Gets a String Vector of parameter names. * * @return null if no parameter is present */ /* HSC CHANGES START */ public Vector<String> getParameters() { SipParser par = new SipParser(url); Vector<String> result = ((SipParser) par.goTo(';').skipChar()) .getParameters(); /* HSC CHANGES END */ return result; } /** Whether there is the specified parameter */ public boolean hasParameter(String name) { SipParser par = new SipParser(url); return ((SipParser) par.goTo(';').skipChar()).hasParameter(name); } /** Whether there are any parameters */ public boolean hasParameters() { if (url != null && url.indexOf(';') >= 0) return true; else return false; } /** Adds a new parameter without a value */ public void addParameter(String name) { url = url + ";" + name; } /** Adds a new parameter with value */ public void addParameter(String name, String value) { if (value != null) url = url + ";" + name + "=" + value; else url = url + ";" + name; } /** Removes all parameters (if any) */ public void removeParameters() { int index = url.indexOf(';'); if (index >= 0) url = url.substring(0, index); } /** Removes specified parameter (if present) */ public void removeParameter(String name) { int index = url.indexOf(';'); if (index < 0) return; Parser par = new Parser(url, index); while (par.hasMore()) { int begin_param = par.getPos(); par.skipChar(); if (par.getWord(SipParser.param_separators).equals(name)) { String top = url.substring(0, begin_param); par.goToSkippingQuoted(';'); String bottom = ""; if (par.hasMore()) bottom = url.substring(par.getPos()); url = top.concat(bottom); return; } par.goTo(';'); } } /** * Gets the value of transport parameter. * * @return null if no transport parameter is present. */ public String getTransport() { return getParameter(transport_param); } /** Whether transport parameter is present */ public boolean hasTransport() { return hasParameter(transport_param); } /** Adds transport parameter */ public void addTransport(String proto) { addParameter(transport_param, proto.toLowerCase()); } /** * Gets the value of maddr parameter. * * @return null if no maddr parameter is present. */ public String getMaddr() { return getParameter(maddr_param); } /** Whether maddr parameter is present */ public boolean hasMaddr() { return hasParameter(maddr_param); } /** Adds maddr parameter */ public void addMaddr(String maddr) { addParameter(maddr_param, maddr); } /** * Gets the value of ttl parameter. * * @return 1 if no ttl parameter is present. */ public int getTtl() { try { return Integer.parseInt(getParameter(ttl_param)); } catch (Exception e) { return 1; } } /** Whether ttl parameter is present */ public boolean hasTtl() { return hasParameter(ttl_param); } /** Adds ttl parameter */ public void addTtl(int ttl) { addParameter(ttl_param, Integer.toString(ttl)); } /** Whether lr (loose-route) parameter is present */ public boolean hasLr() { return hasParameter(lr_param); } /** Adds lr parameter */ public void addLr() { addParameter(lr_param); } }
def save_scree(pca, fname): var = pca.explained_variance_ratio_ percent_variance = np.round(var* 100, decimals =2) fig, ax = plt.subplots(figsize=(10, 10)) ax.bar(x= range(1,7), height=percent_variance) plt.ylabel('Percentate of Variance Explained') plt.xlabel('Principal Component') plt.title('PCA Scree Plot') plt.rcParams.update({'font.size': 22}) fig.tight_layout() fig.savefig(fname)
'''input 2 100000000000000000000000000000000000000000000000000000000000000000000000000000000000000 100000000000000000000000000000000000000000000000000000000000000000000000000000000000000 ''' def is_per(s): if s[0] != '1': return False for i in xrange(1, len(s)): if s[i] != '0': return False return True n = input() A = raw_input().split() res = '1' zer = 0 for i in xrange(n): if A[i] == '0': res = '0' break if not is_per(A[i]): res = A[i] else: zer += len(A[i]) - 1 if res[0] == '0': print '0' else: print res + '0' * zer
import { isPlatformSupported } from '../helper' import { supportedError } from '../errors' import windows from './windows' import mac from './mac' import linux from './linux' import { extendedPingOptions, pingResponse } from '../types' import { ERROR_MESSAGES } from '../messages' import parser from "./parser.interface"; //create instance of parser based on operating system function parserFactory(platform: string, output?: string[], options?: extendedPingOptions): pingResponse { let parser: parser; let isWindows: boolean = false; if (!isPlatformSupported(platform)) { throw new supportedError(ERROR_MESSAGES.PLATFORM_NOT_SUPPORTED.replace('platform', platform)); } if (platform === 'win32') { parser = new windows(defaultResponse, options); isWindows = true; } else if (platform === 'darwin') { parser = new mac(defaultResponse, options); } else { parser = new linux(defaultResponse, options); } let result = parseOutput(parser, isWindows, output); return result; } //parsing output line by line function parseOutput(parser: parser, isWindows: boolean, output?: string[]): pingResponse { let lines = output?.join('').split('\n'); let state = 0; let parsedOutput: pingResponse = defaultResponse; lines?.forEach((line) => { line = line.replace(stripRegex, ''); if (line.length === 0) { // Do nothing if this is an empty line } else if (state === states.HEADER) { parser.processHeader(line); state = states.BODY } else if (state === states.BODY) { (!checkIfBodyEnded(line, isWindows)) ? parser.processBody(line) : state = states.FOOTER } else if (state === states.FOOTER) { parsedOutput = parser.processFooter(line) } }); let result = createResult(parsedOutput, lines); return result; } //function to check if body ended and footer began function checkIfBodyEnded(line: string, windows: boolean): boolean { if (windows) { let isPingSummaryLineShown = line.slice(-1) === ':'; if (isPingSummaryLineShown) { return true; } } else { // Change state if it see a '---' if (line.indexOf('---') >= 0) { return true } } return false; } //Function to calculate and create the result function createResult(result: pingResponse, lines?: Array<string>): pingResponse { // Concat output result.output = lines?.join('\n'); // Determine alive result.alive = result?.times?.length > 0; // Update time at first successful line if (result.alive) { result.time = result.times[0]; } // Get stddev if (result.stddev === undefined && result.alive) { let N = result.times.length; const mean = result.times.reduce((a: number, b: number) => a + b) / N; const stddev = Math.sqrt(result.times.map(x => Math.pow(x - mean, 2)).reduce((a: number, b: number) => a + b) / N); result.stddev = stddev; } // Fix min, avg, max, stddev up to 3 decimal points ['min', 'avg', 'max', 'stddev', 'packetLoss'].forEach((key) => { let v = (result as any)[key]; if (typeof v === 'number') { (result as any)[key] = v.toFixed(3); } }); return result; } //Default response object const defaultResponse: pingResponse = { host: undefined, numericHost: undefined, alive: false, output: undefined, time: undefined, times: [], min: undefined, max: undefined, avg: undefined, stddev: undefined, packetLoss: undefined, bufferSize: undefined }; //to strip space present at end of string const stripRegex: RegExp = /[ ]*\r?\n?$/g; //States of parsing - local use only const states = { HEADER: 0, BODY: 1, FOOTER: 2 }; export default parserFactory;
<gh_stars>100-1000 import Formsy from 'formsy-react'; import { FormsyProps } from 'formsy-react/dist/Formsy'; import { InjectedProps } from 'formsy-react/dist/withFormsy'; import hoistNonReactStatics from 'hoist-non-react-statics'; import PropTypes from 'prop-types'; import React, { Component } from 'react'; import { Form as SemanticUIForm, StrictFormProps } from 'semantic-ui-react'; import FormsyCheckbox from './FormsyCheckbox'; import FormsyDropdown, { IFormsyDropdownProps } from './FormsyDropdown'; import FormsyInput, { IFormsyInputProps } from './FormsyInput'; import FormsyRadioGroup, { IFormsyRadioGroupProps } from './FormsyRadioGroup'; import FormsySelect from './FormsySelect'; import FormsyTextArea from './FormsyTextArea'; type IFormProps = Partial<FormsyProps> & Omit<StrictFormProps, 'onSubmit'>; class Form extends Component<IFormProps & { forwardedRef: any }> { static propTypes = { as: PropTypes.any, children: PropTypes.node, onSubmit: PropTypes.func, }; static defaultProps = { as: 'div', }; static Checkbox = FormsyCheckbox; static Button = SemanticUIForm.Button; static Radio = SemanticUIForm.Radio; static Field = SemanticUIForm.Field; static Group = SemanticUIForm.Group; static Input = (props: Omit<IFormsyInputProps, keyof InjectedProps<any>>) => ( <FormsyInput inputAs={SemanticUIForm.Input} {...props} /> ); static TextArea = ( props: Omit< IFormsyInputProps<React.TextareaHTMLAttributes<any>>, keyof InjectedProps<any> > ) => <FormsyTextArea inputAs={SemanticUIForm.TextArea} {...props} />; static Select = ( props: Omit<IFormsyDropdownProps, keyof InjectedProps<any>> ) => <FormsySelect inputAs={SemanticUIForm.Select} {...(props as any)} />; static RadioGroup = ( props: Omit<IFormsyRadioGroupProps, keyof InjectedProps<any>> ) => <FormsyRadioGroup formRadioGroup {...(props as any)} />; static Dropdown = ( props: Omit<IFormsyDropdownProps, keyof InjectedProps<any>> ) => <FormsyDropdown inputAs={SemanticUIForm.Dropdown} {...(props as any)} />; render() { const { children } = this.props; const { mapping, validationErrors, onValid, onValidSubmit, onInvalid, onInvalidSubmit, onChange, preventExternalInvalidation, onError, onSubmit, forwardedRef, ...nonFormsyReactFormProps } = this.props; const { as, error, inverted, loading, reply, size, success, warning, widths, forwardedRef: _forwardedRef, className, ...nonSemanticUIFormProps } = this.props; return ( <Formsy noValidate ref={forwardedRef} onSubmit={onSubmit} {...nonSemanticUIFormProps} > <SemanticUIForm as={as} {...nonFormsyReactFormProps}> {children} </SemanticUIForm> </Formsy> ); } } export default hoistNonReactStatics( React.forwardRef<Formsy, IFormProps>((props: IFormProps, ref) => ( <Form {...props} forwardedRef={ref} /> )), Form );
Uncovering Learning Processes Using Competence-based Knowledge Structuring and Hasse Diagrams Learning analytics means gathering a broad range of data, bringing the various sources together, and analyzing them. However, to draw educational insights from the results of the analyses, these results must be visualized and presented to the educators and learners. This task is often accomplished by using dashboards equipped with conventional and often simple visualizations such as bar charts or traffic lights. In this paper we want to introduce a method for utilizing the strengths of directed graphs, namely Hasse diagrams, and a competence-oriented approach of structuring knowledge and learning domains. After a brief theoretical introduction, this paper highlights and discusses potential advantages and gives an outlook to recent challenges for research.
package cli import ( "fmt" "net" "strconv" "strings" "github.com/lob/rack/sdk" "github.com/convox/stdcli" ) func init() { register("proxy", "proxy a connection inside the rack", Proxy, stdcli.CommandOptions{ Flags: []stdcli.Flag{flagRack}, Usage: "<[port:]host:hostport> [[port:]host:hostport]...", Validate: stdcli.ArgsMin(1), }) } var ProxyCloser = make(chan error) func Proxy(rack sdk.Interface, c *stdcli.Context) error { for _, arg := range c.Args { parts := strings.SplitN(arg, ":", 3) var host string var port, hostport int switch len(parts) { case 2: host = parts[0] p, err := strconv.Atoi(parts[1]) if err != nil { return err } port = p hostport = p case 3: host = parts[1] p, err := strconv.Atoi(parts[0]) if err != nil { return err } port = p p, err = strconv.Atoi(parts[2]) if err != nil { return err } hostport = p default: return fmt.Errorf("invalid argument: %s", arg) } go proxy(rack, c, port, host, hostport) } // block until something sends data on this channel return <-ProxyCloser } func proxy(rack sdk.Interface, c *stdcli.Context, localport int, remotehost string, remoteport int) { c.Writef("proxying localhost:%d to %s:%d\n", localport, remotehost, remoteport) listener, err := net.Listen("tcp4", fmt.Sprintf("127.0.0.1:%d", localport)) if err != nil { c.Error(err) return } defer listener.Close() for { cn, err := listener.Accept() if err != nil { c.Error(err) return } c.Writef("connect: %d\n", localport) go func() { defer cn.Close() if err := rack.Proxy(remotehost, remoteport, cn); err != nil { c.Error(err) } }() } }
import pickle import pandas as pd class TP2EAC(object): def __init__(self): self.scaler = pickle.load(open('preparation/scaler.pkl','rb')) def data_preparation(self, df): scaler = pickle.load(open('preparation/scaler.pkl','rb')) df_scaled = scaler.transform(df) columns = ["age","education","capital_gain","capital_loss","hours_per_week","status_civic"] df_result = pd.DataFrame(df_scaled, columns=columns) return df_result
<reponame>Dreadwyrm/lhos_frameworks_av /* * Copyright (C) 2004-2010 NXP Software * Copyright (C) 2010 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /****************************************************************************************/ /* */ /* Includes */ /* */ /****************************************************************************************/ #include "LVREV.h" #include "LVREV_Tables.h" /****************************************************************************************/ /* */ /* Tables */ /* */ /****************************************************************************************/ /* Table with supported sampling rates. The table can be indexed using LVM_Fs_en */ const LVM_UINT32 LVM_FsTable[] = { 8000 , 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 88200, 96000, 176400, 192000 }; /* Table with supported sampling rates. The table can be indexed using LVM_Fs_en */ LVM_UINT32 LVM_GetFsFromTable(LVM_Fs_en FsIndex){ if (FsIndex > LVM_FS_192000) return 0; return (LVM_FsTable[FsIndex]); } /* In order to maintain consistant input and out put signal strengths output gain/attenuation is applied. This gain depends on T60 and Rooms size parameters. These polynomial coefficients are calculated experimentally. First value in the table is room size second value is A0 third value is A1 fourth value is A2 fifth value is A3 sixth value is A4 shift value is to be added array (to use LVM_Polynomial function) The gain is calculated using variable x=(T60*32767/7000)*32768; first values is used to get polynomial set for given room size, For room sizes which are not in the table, linear interpolation can be used. */ /* Normalizing output including Reverb Level part (only shift up)*/ const LVM_FLOAT LVREV_GainPolyTable[24][5]={{1,1.045909f,7.681098f,-7.211500f,3.025605f,}, {2,1.088194f,10.291749f,-11.513787f,5.265817f,}, {3,0.988919f,8.299956f,-8.920862f,3.979806f,}, {4,1.035927f,10.182567f,-10.346134f,4.546533f,}, {5,1.130313f,12.538727f,-13.627023f,6.165208f,}, {6,1.060743f,8.091713f,-8.588079f,3.834230f,}, {7,1.040381f,10.406566f,-11.176650f,5.075132f,}, {8,1.026944f,8.387302f,-8.689796f,3.895863f,}, {9,1.013312f,9.727236f,-10.534165f,4.742272f,}, {10,0.996095f,8.492249f,-7.947677f,3.478917f,}, {13,1.079346f,8.894425f,-9.641768f,4.434442f,}, {15,0.994327f,7.441335f,-8.003979f,3.581177f,}, {17,0.991067f,7.208373f,-7.257859f,3.167774f,}, {20,1.033445f,7.476371f,-7.546960f,3.369703f,}, {25,0.982830f,5.913867f,-5.638448f,2.420932f,}, {30,0.928782f,5.035343f,-4.492104f,1.844904f,}, {40,0.953714f,5.060232f,-4.472204f,1.829642f,}, {50,0.899258f,4.273357f,-3.537492f,1.387576f,}, {60,0.943584f,4.093228f,-3.469658f,1.410911f,}, {70,0.926021f,3.973125f,-3.331985f,1.344690f,}, {75,0.894853f,2.871747f,-1.438758f,0.311856f,}, {80,0.935122f,2.991857f,-2.038882f,0.686395f,}, {90,0.953872f,2.880315f,-2.122365f,0.784032f,}, {100,0.951005f,2.894294f,-2.009086f,0.698316f,}, }; /* End of file */
// Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CONTENT_PUBLIC_BROWSER_AUDIO_SERVICE_H_ #define CONTENT_PUBLIC_BROWSER_AUDIO_SERVICE_H_ #include "base/callback.h" #include "content/common/content_export.h" #include "mojo/public/cpp/bindings/pending_receiver.h" #include "services/audio/public/mojom/audio_service.mojom.h" #include "services/audio/public/mojom/stream_factory.mojom.h" namespace media { class AudioSystem; } namespace content { // Returns the browser's main control interface into the Audio Service, which // is started lazily and may run either in-process or in a dedicated sandboxed // subprocess. CONTENT_EXPORT audio::mojom::AudioService& GetAudioService(); // Creates an instance of AudioSystem for use with the Audio Service, bound to // the thread it's used on for the first time. CONTENT_EXPORT std::unique_ptr<media::AudioSystem> CreateAudioSystemForAudioService(); // Returns a callback that can be invoked from any sequence to safely bind a // StreamFactory interface receiver in the Audio Service. using AudioServiceStreamFactoryBinder = base::RepeatingCallback<void( mojo::PendingReceiver<audio::mojom::StreamFactory>)>; CONTENT_EXPORT AudioServiceStreamFactoryBinder GetAudioServiceStreamFactoryBinder(); } // namespace content #endif // CONTENT_PUBLIC_BROWSER_AUDIO_SERVICE_H_
// // Created by leke on 9/7/19. // #ifndef COMPILE_SCHEDULER_INPUT_PARSER_H #define COMPILE_SCHEDULER_INPUT_PARSER_H #include <fstream> #include <map> #include <memory> #include <string> #include <fstream> #include <vector> #include "source_file.h" using namespace std; struct InputMetadata { int filesCount; int targetsCount; int serversCount; }; class InputParser { private: string _inputSourcePath; ifstream _file; InputMetadata _metadata; vector<shared_ptr<SourceFile>> _parsedSourceFiles; map<string, shared_ptr<SourceFile>> _parsedSourceFilesMap; void _open(); void _close(); void _parseInputMetadata(); void _parseSourceFiles(); void _parseFileDependencies(shared_ptr<SourceFile> &sourceFile); void _parseFileTargets(); void _addParsedSourceFile(shared_ptr<SourceFile> &sourceFile); shared_ptr<SourceFile> _findSourceFile(string id); public: InputParser(string inputSourcePath); vector<shared_ptr<SourceFile>> parse(InputMetadata& inputMetadata); }; #endif //COMPILE_SCHEDULER_INPUT_PARSER_H
package com.nielsmasdorp.speculum.presenters; import com.nielsmasdorp.speculum.models.Configuration; /** * @author <NAME> (NielsMasdorp) */ public interface SetupPresenter { void showError(String error); void validate(String location, String subreddit, String pollingDelay, boolean celsius, boolean voiceCommands, boolean rememberConfig, boolean simpleLayout); void launchMainActivity(Configuration configuration); }
/** * The {@code Core} class is responsible for everything, this class is the one that controls every other core * components, making it the most important out of all classes. * * <p>This class will control everything in the project. A single class that governs everything in the project. * * @author Guilherme Theodoro * @since 0.0.1 * @version 0.0.1 */ public class Core extends CoreVariables { private final CoreConsole coreConsole; private final CoreDatabase coreDatabase; public Core(String corePropertiesFile, String configPropertiesFile) throws IOException, SQLException { super(corePropertiesFile, configPropertiesFile); this.coreConsole = new CoreConsole(this); this.coreDatabase = new CoreDatabase(this); } public String databaseUrlToString() { return "jdbc:mysql://" + getDatabaseHost() + "/" + getDatabaseName() + "?useTimezone=true&serverTimezone=UTC&useSSL=true"; } public CoreConsole coreConsole() { return this.coreConsole; } public CoreDatabase coreDatabase() { return this.coreDatabase; } }
Experimental investigations of Jominy End Quench test using CuO nanofluids CuO nanofluids M Baskaran, KCK Vijayakumar M.Bharathiraja 1 Assistant Professor, Department of Mechatronics Engineering, K.S.Rangasamy College of Technology, Tiruchengode – 637 215, India. [email protected] 2 Principal, Department of Mechanical Engineering, Vivekanandha Institute of Engineering & Technology for Women, Tiruchengode – 637 205, India. [email protected] 3 Assistant Professor (Senior Grade), Department of Automobile Engineering, Kongu Engineering College, Perundurai – 638052, India. [email protected] INTRODUCTION Applications of steel in engineering industry require a complete knowledge of mechanical properties and manufacturing processes. The common factors to be considered in the automotive industry or modern manufacturing processes are hardness, strength, toughness and low wear resistance. It can be achieved only through modern manufacturing processes. There are different modern manufacturing processes, among this heat treatment is the important one and frequently used. Heat treatment is the process of heating of steel to austenite temperature, then soaking for a specified time period to obtain a homogeneous transformation of austenite structure. Heated specimens are cooled by different methods. Among the various heat treatment methods, quenching is the desirable method to obtain the high hardness value and wear resistance. As per the deep literature survey, it was observed that the Jominy end quench test is the standard method to measure the hardenability of steel. The purpose of using nanofluids as a quenching medium is to improve the heat transfer rate during the cooling process. In many industrial applications, the conventional heat transfer fluids such as refrigerants, water, engine oil, ethylene glycol are used as a cooling medium. Even though the improvement in energy efficiency is possible from the topology and configuration points of view, but much more focus has been required in the perspective of heat transfer fluid. Despite of considerable research and developments in enhanced heat transfer techniques, cooling capabilities have been constrained because of the poor thermal conductivities of traditional heat transfer fluids. The development of energyefficient heat transfer fluids enhances the thermal conductivity in heat transfer process. One such prominent advancement in the development of heat transfer fluids, is the use of nano-sized (1 -100 nm) solid particles as an additive suspended in the base fluid which will result with enhanced heat transfer rate. On the other hand, as of today only few publications exist concerning the quenching behavior of nanofluids. First, Park et al performed quenching experiments of a high-temperature copper sphere in alumina nanofluids to investigate the effect of the nanoparticles on film boiling heat transfer. The nanoparticle concentration in their experiments was very high (from 5 to 20 Vol %), and a sub cooling range from 20 to 80 K was explored. J.R., Hong et al , who carried out quenching experiments of a nickel-plated copper sphere in a pool of water-based nanofluids containing carbon nanotubes (CNT). Based on the assumption that the copper sphere could be thermally lumped, they obtained boiling curves from the temperature-time history of the sphere, using the transient calorimeter technique. Their boiling curves suggested that the CNT nanofluids yield an enhanced CHF, transition boiling heat transfer rate, and minimum heat flux (Leiden frost point) as compared to water. They also found that the wettability of the sphere surface improved due to the surfactant used in the preparation of the CNT nanofluids, and that the CNT deposited on the surface during quenching. J.R., Hong et al investigated the quenching characteristics of water and water-based nanofluids with silicon nanoparticles using a bare platinum wire and a silicon nanoparticle-coated platinum wire. The latter was prepared by boiling a bare platinum wire in the silicon nanofluid. No meaningful differences in the boiling heat transfer characteristics between water and the silicon nanofluid were observed during the experiments with the bare wire. However, a considerably higher heat transfer coefficient was obtained with the nanoparticle-coated wire in the nuclei and the transition boiling regions. From the above literature, the nanofluid does not experience abnormal heat transfer capability. But it shows little impact in heat transfer, thus the intensity of this work is to study the effect of heat transfer in Jominy end quench test on EN8 steel and mild steel. This paper is aimed to compare the hardness value at different points on different samples quenched by using nanofluids. The heat transfer has been recorded and the corresponding cooling curve to be plotted by using DAQ kit. Figure 1 shows the methodology of the Jominy end quench test. It involves the selection of nanoparticle, preparation of nanofluid, preparation of specimens and experimentation. Preparation of Nanofluid A CuO nanoparticle has been selected for preparing the nanofluid, because of their higher thermal conductivity, compared with low cost than Carbon nanotubes, Diamond, Silver nitrate particles. The nanofluids are prepared by dispersing the nanoparticle in de-ionized water by two step method. The CuO nanoparticle was purchased from Sigma Aldrich with the mean size of particles is less than 50nm. The volume concentration of CuO particles is listed in Table 1. Table 2 shows the quantity of nanoparticle needed for the preparation of different volumetric concentration of nanofluids. The measured nanoparticles are dispersed in a base fluid (De-ionized water) in mixing container. Then the mixture is stirred around 1000 rpm by using a magnetic stirrer. Prepared nanofluid was visually examined after 12 hours to check the stability of nanofluid. The nanoparticles were agglomerated in the bottom surface of the container to reduce the agglomeration of particles in nanofluid is sonicated for one hour using ultrasonic sonicator. The agglomeration of particles is considerably reduced by sonication process. Figure 2 shows the stability of nanofluid at different time interval, and finally to investigate the effect of volumetric concentration of nanoparticle on the cooling rate and quenching process of nanofluids. It was experimented that the nanofluid is stable after 24 hours of sonication, but after 48 hours, the particles were slightly settled down in the container. Specimen Preparation Two different materials based specimen specifically mild steel and EN-8 steel shafts were purchased with the diameter of 32mm and having a length of 500mm. The shaft material is purchased as a single shaft to avoid changes in property due to changes in material and manufacture. Then the 500 mm length shaft is machined into 4 pieces of having a 25mm diameter and a length of 110mm. The standard dimensions of the specimen have been referred from previous literatures. Head of the work piece is 28 mm in diameter and 20 mm in length for holding the workpiece in the clamp of the shaft. Then the shaft is drilled perpendicular to the axis of the shaft for 12mm to place the sensor. Four numbers of samples were prepared with mild steel and EN-8 steel to conduct the experiment. Figure 3 shows the schematic view of the specimen before and after the quench. Table 3 shows the chemical composition of mild steel and EN8 steel. Experimental Setup The experimental setup as shown in the Figure 4 is used to conduct the experiment of the Jominy end quench test. This setup consists of a clamp, reservoir, submersible pump, water jet, magnetic stirrer, DAQ and K -type thermocouple. The work piece was held vertically by using clamps and the reservoir was placed at the bottom of the setup, in which the pump was placed to suck the water from the reservoir and pass it to the workpiece through the water jet. Three drills of depth 12 mm are used to place the thermocouples. The cooling rate of the work piece is measured at the three different places by placing the K -type thermocouples inside the drills on the work piece, thermocouples will sustain up to the temperature of 1200oC. The thermocouples are connected with NI-DAQ 9211 temperature DAQ kit. The reservoir is used to store the nanofluid which is used as a quenching medium. A submersible pump is used to pump the nanofluid from the reservoir towards the base of the work piece. Muffle furnace used to heat the work piece to austenite temperature, and placed in the clamp of the setup for quenching process. The entire arrangement of the experiment is conducted by pumping the nanofluid from the reservoir to the base of the work piece, at the same time the cooling curves are recorded by using thermocouple which are connected to the NI-DAQ kit, then the graphs have been generated through LabVIEW software. Experimental Procedure The prepared EN-8 steel specimen is cleaned by using an emery sheet to remove the unwanted rust and dust over it. Afterwards washed with water and dried. Then the specimen is heated up to 900°C using muffle furnace and the temperature of the specimen is maintained for one hour to complete the austenite formation. At the same time, the nanofluid in the reservoir is stirred by using a magnetic stirrer to avoid the agglomeration of particles. Then the heated specimen is placed in a clamp. Instantly the pump is switched on and it pumps the nanofluid on the workpiece as per Jominy end quench test procedure. The Figure 5 illustrates the cooling of specimen by supplying the nanofluid. During the quenching process, the reduction in temperature is recorded by using DAQ at specified time intervals. Hardenability The Rockwell hardness value of the specimen has been recorded and the results were plotted. EN-8 Material The Depends upon the heat transfer rate the hardenability of each trial varies with increases in heat transfer rate thereby hardenability increases and vice versa. Thermal conductivity of CuO nanoparticles is high as compared with distilled water, higher thermal conductivity and heat transfer causes better hardenability. This effect has been proven in this experiment by hardness test result as described above. These are all the hardness value attained in the mild steel rod. According to the heat transfer rate, the hardenability of the material varies (i.e) the heat transfer rate is proportional to the hardenability. The thermal conductivity of CuO particles is higher than the distilled water. Heat transfer rate increases with the increase in thermal conductivity, which causes better hardenability. This effect has been proven in this experiment by the hardness results as described above. Cooling curves of EN-8 and Mild steel rod The cooling curve shows that the reduction in temperature with respect to time during the quenching process. The cooling rate of specimen is low, when water is used as a quenching medium. The Figure 9 shows that the nanofluid with the low volume concentration of nanoparticle (0.01%) has exhibited more heat transfer rate than the rest of other concentrations. Among the nanofluid concentrations the one with high takes more time to cool than the one with low concentration of the nanoparticle. This was confirmed by using nanofluid as a quenching medium the heat transfer rate increases instead of using water. Nanofluid has better thermal conductivity results with high heat transfer rate than distilled water. CONCLUSION: The hardenability investigation has been carried out on EN 8 and Mild steel specimen by using CuO nanofluid. The Jominy end quench hardenability of the material decreases with an increase in the distance of quenching. Though, the heat transfer rate of nanofluid is more when compared with distilled water. Heat transfer rate of quenching medium is mainly influenced by the concentration of the nanoparticle. Quenchant with low concentration of nanoparticle has exhibits better thermal conductivity than the fluid with high concentration. Since the thermal conductivity of the Copper Oxide nanoparticle is high, therefore the nanofluid preparation from the copper oxide has high thermal conductivity. The hardenability graph of the mild steel and the EN-8 material shows that the concentration of the nanofluid with 0.05% of nanoparticles results in higher hardness than comparing with the others. Cooling curve of the EN-8 and mild steel specimens are experimented separately. The EN-8 curve shows that the cooling rate of distilled water is low as compared with the nanofluids of three different volume concentrations. Out of these three concentrations, the 0.01% of nanoparticle has a high cooling rate as compared to other concentration. Similar results have been observed from the mild steel specimen.
use crate::errors::{BulletproofError, BulletproofErrorKind, R1CSError, R1CSErrorKind}; use crate::r1cs::gadgets::helper_constraints::mimc::mimc; use crate::r1cs::gadgets::helper_constraints::poseidon::{ PoseidonParams, Poseidon_hash_2, Poseidon_hash_4, Poseidon_hash_4_constraints, Poseidon_hash_8, Poseidon_hash_8_constraints, SboxType, }; use crate::r1cs::{ConstraintSystem, LinearCombination, Prover, Variable, Verifier}; use amcl_wrapper::commitment::commit_to_field_element; use amcl_wrapper::field_elem::FieldElement; use amcl_wrapper::group_elem_g1::G1; use criterion::AxisScale::Linear; // The interfaced defined here are expected to change as we add more Bulletproof // friendly hash functions. pub trait Arity2MerkleTreeHash { fn is_num_inputs_correct(inputs: &[FieldElement]) -> Result<(), BulletproofError> { if inputs.len() != 2 { Err(BulletproofErrorKind::IncorrectNoOfInputsForMerkleTreeHash { found: inputs.len(), expected: 2, } .into()) } else { Ok(()) } } fn hash(&self, inputs: Vec<FieldElement>) -> Result<FieldElement, BulletproofError>; } pub trait Arity4MerkleTreeHash { fn is_num_inputs_correct(inputs: &[FieldElement]) -> Result<(), BulletproofError> { if inputs.len() != 4 { Err(BulletproofErrorKind::IncorrectNoOfInputsForMerkleTreeHash { found: inputs.len(), expected: 4, } .into()) } else { Ok(()) } } fn hash(&self, inputs: Vec<FieldElement>) -> Result<FieldElement, BulletproofError>; } pub trait Arity8MerkleTreeHash { fn is_num_inputs_correct(inputs: &[FieldElement]) -> Result<(), BulletproofError> { if inputs.len() != 8 { Err(BulletproofErrorKind::IncorrectNoOfInputsForMerkleTreeHash { found: inputs.len(), expected: 8, } .into()) } else { Ok(()) } } fn hash(&self, inputs: Vec<FieldElement>) -> Result<FieldElement, BulletproofError>; } pub trait Arity4MerkleTreeHashConstraints { /// This is for hash function specific setup. Like Poseidon needs a variable allocated for /// capacity constant. Done for the prover and must be done once and only once fn prover_setup(&mut self, prover: &mut Prover) -> Result<(), R1CSError>; /// This is for hash function specific setup. Like Poseidon needs a variable allocated for /// capacity constant. Done for the verifier and must be done once and only once. /// The `g` and `h` are needed to commit to capacity constant in case of Poseidon. If the /// specific hash function does not need it, they should be `None`. fn verifier_setup( &mut self, verifier: &mut Verifier, g: Option<&G1>, h: Option<&G1>, ) -> Result<(), R1CSError>; // TODO: It would be better to make inputs an array of size 4 fn hash<CS: ConstraintSystem>( &self, cs: &mut CS, inputs: Vec<LinearCombination>, ) -> Result<LinearCombination, R1CSError>; } pub trait Arity8MerkleTreeHashConstraints { /// This is for hash function specific setup. Like Poseidon needs a variable allocated for /// capacity constant. Done for the prover and must be done once and only once fn prover_setup(&mut self, prover: &mut Prover) -> Result<(), R1CSError>; /// This is for hash function specific setup. Like Poseidon needs a variable allocated for /// capacity constant. Done for the verifier and must be done once and only once. /// The `g` and `h` are needed to commit to capacity constant in case of Poseidon. If the /// specific hash function does not need it, they should be `None`. fn verifier_setup( &mut self, verifier: &mut Verifier, g: Option<&G1>, h: Option<&G1>, ) -> Result<(), R1CSError>; // TODO: It would be better to make inputs an array of size 8 fn hash<CS: ConstraintSystem>( &self, cs: &mut CS, inputs: Vec<LinearCombination>, ) -> Result<LinearCombination, R1CSError>; } pub struct MiMC_2<'a> { pub constants: &'a [FieldElement], } pub struct PoseidonHash_2<'a> { pub params: &'a PoseidonParams, pub sbox: &'a SboxType, } pub struct PoseidonHash_4<'a> { pub params: &'a PoseidonParams, pub sbox: &'a SboxType, } pub struct PoseidonHash_8<'a> { pub params: &'a PoseidonParams, pub sbox: &'a SboxType, } pub struct PoseidonHashConstraints<'a> { pub params: &'a PoseidonParams, pub sbox: &'a SboxType, pub capacity_const: u64, capacity_const_var: Option<Variable>, } impl<'a> Arity2MerkleTreeHash for MiMC_2<'a> { fn hash(&self, inputs: Vec<FieldElement>) -> Result<FieldElement, BulletproofError> { Ok(mimc(&inputs[0], &inputs[1], self.constants)) } } impl<'a> Arity2MerkleTreeHash for PoseidonHash_2<'a> { fn hash(&self, inputs: Vec<FieldElement>) -> Result<FieldElement, BulletproofError> { Self::is_num_inputs_correct(&inputs)?; Poseidon_hash_2(inputs, &self.params, &self.sbox) } } impl<'a> Arity4MerkleTreeHash for PoseidonHash_4<'a> { fn hash(&self, inputs: Vec<FieldElement>) -> Result<FieldElement, BulletproofError> { Self::is_num_inputs_correct(&inputs)?; Poseidon_hash_4(inputs, &self.params, &self.sbox) } } impl<'a> Arity8MerkleTreeHash for PoseidonHash_8<'a> { fn hash(&self, inputs: Vec<FieldElement>) -> Result<FieldElement, BulletproofError> { Self::is_num_inputs_correct(&inputs)?; Poseidon_hash_8(inputs, &self.params, &self.sbox) } } impl<'a> PoseidonHashConstraints<'a> { pub fn new(params: &'a PoseidonParams, sbox: &'a SboxType, capacity_const: u64) -> Self { Self { params, sbox, capacity_const, capacity_const_var: None, } } fn prover_commit_to_capacity_const(&mut self, prover: &mut Prover) -> Result<(), R1CSError> { if self.capacity_const_var.is_some() { return Err(R1CSErrorKind::GadgetError {description: String::from("Poseidon: capacity_const_var should be None but is Some. Setup has already been called once.")}.into()); } let (_, var) = prover.commit( FieldElement::from(self.capacity_const), FieldElement::zero(), ); self.capacity_const_var = Some(var); Ok(()) } fn verifier_commit_to_capacity_const( &mut self, verifier: &mut Verifier, g: Option<&G1>, h: Option<&G1>, ) -> Result<(), R1CSError> { if self.capacity_const_var.is_some() { return Err(R1CSErrorKind::GadgetError {description: String::from("Poseidon: capacity_const_var should be None but is Some. Setup has already been called once.")}.into()); } if g.is_none() || h.is_none() { return Err(R1CSErrorKind::GadgetError { description: String::from("Poseidon: Neither g or h should be None"), } .into()); } let comm = commit_to_field_element( g.unwrap(), h.unwrap(), &FieldElement::from(self.capacity_const), &FieldElement::zero(), ); let var = verifier.commit(comm); self.capacity_const_var = Some(var); Ok(()) } fn get_capacity_constant_lc(&self) -> Result<LinearCombination, R1CSError> { if self.capacity_const_var.is_none() { return Err(R1CSErrorKind::GadgetError {description: String::from("Poseidon: capacity_const_var should be Some but is None. Setup not called yet.")}.into()); } let cap_const_lc = self .capacity_const_var .as_ref() .map(|c| LinearCombination::from(*c)) .unwrap(); Ok(cap_const_lc) } } impl<'a> Arity4MerkleTreeHashConstraints for PoseidonHashConstraints<'a> { fn prover_setup(&mut self, prover: &mut Prover) -> Result<(), R1CSError> { self.prover_commit_to_capacity_const(prover) } fn verifier_setup( &mut self, verifier: &mut Verifier, g: Option<&G1>, h: Option<&G1>, ) -> Result<(), R1CSError> { self.verifier_commit_to_capacity_const(verifier, g, h) } fn hash<CS: ConstraintSystem>( &self, cs: &mut CS, inputs: Vec<LinearCombination>, ) -> Result<LinearCombination, R1CSError> { Poseidon_hash_4_constraints::<CS>( cs, inputs, self.get_capacity_constant_lc()?, self.params, self.sbox, ) } } impl<'a> Arity8MerkleTreeHashConstraints for PoseidonHashConstraints<'a> { fn prover_setup(&mut self, prover: &mut Prover) -> Result<(), R1CSError> { self.prover_commit_to_capacity_const(prover) } fn verifier_setup( &mut self, verifier: &mut Verifier, g: Option<&G1>, h: Option<&G1>, ) -> Result<(), R1CSError> { self.verifier_commit_to_capacity_const(verifier, g, h) } fn hash<CS: ConstraintSystem>( &self, cs: &mut CS, inputs: Vec<LinearCombination>, ) -> Result<LinearCombination, R1CSError> { Poseidon_hash_8_constraints::<CS>( cs, inputs, self.get_capacity_constant_lc()?, self.params, self.sbox, ) } }
class MessagesHandlerMixIn: """a mix-in class containing all the messages related methods for the main lint class """ def __init__(self): # dictionary of registered messages self._messages = {} self._msgs_state = {} self._module_msgs_state = {} # None self._msg_cats_state = {} self._module_msg_cats_state = None self.msg_status = 0 def register_messages(self, checker): """register a dictionary of messages Keys are message ids, values are a 2-uple with the message type and the message itself message ids should be a string of len 4, where the to first characters are the checker id and the two last the message id in this checker """ msgs_dict = checker.msgs chkid = None for msgid, (msg, msgdescr) in msgs_dict.items(): # avoid duplicate / malformed ids assert not self._messages.has_key(msgid), \ 'Message id %r is already defined' % msgid assert chkid is None or chkid == msgid[1:3], \ 'Inconsistent checker part in message id %r' % msgid chkid = msgid[1:3] self._messages[msgid] = Message(checker, msgid, msg, msgdescr) def get_message_help(self, msg_id, checkerref=False): """return the help string for the given message id""" msg = self.check_message_id(msg_id) desc = normalize_text(' '.join(msg.descr.split()), indent=' ') if checkerref: desc += ' This message belongs to the %s checker.' % \ msg.checker.name title = msg.msg if title != '%s': title = title.splitlines()[0] return ':%s: *%s*\n%s' % (msg.msgid, title, desc) return ':%s:\n%s' % (msg.msgid, desc) def disable_message(self, msg_id, scope='package', line=None): """don't output message of the given id""" assert scope in ('package', 'module') msg = self.check_message_id(msg_id) if scope == 'module': assert line > 0 try: self._module_msgs_state[msg.msgid][line] = False except KeyError: self._module_msgs_state[msg.msgid] = {line: False} if msg_id != 'I0011': self.add_message('I0011', line=line, args=msg.msgid) else: msgs = self._msgs_state msgs[msg.msgid] = False # sync configuration object self.config.disable_msg = [mid for mid, val in msgs.items() if not val] def enable_message(self, msg_id, scope='package', line=None): """reenable message of the given id""" assert scope in ('package', 'module') msg = self.check_message_id(msg_id) msg.checker.enabled = True # ensure the related checker is enabled if scope == 'module': assert line > 0 try: self._module_msgs_state[msg.msgid][line] = True except KeyError: self._module_msgs_state[msg.msgid] = {line: True} self.add_message('I0012', line=line, args=msg.msgid) else: msgs = self._msgs_state msgs[msg.msgid] = True # sync configuration object self.config.enable_msg = [mid for mid, val in msgs.items() if val] def _cat_ids(self, categories): for catid in categories: catid = catid.upper() if not catid in MSG_TYPES: raise Exception('Unknown category identifier %s' % catid) yield catid def disable_message_category(self, categories, scope='package', line=None): """don't output message in the given category""" assert scope in ('package', 'module') for catid in self._cat_ids(categories): if scope == 'module': self.add_message('I0011', line=line, args=catid) self._module_msg_cats_state[catid] = False else: self._msg_cats_state[catid] = False def enable_message_category(self, categories, scope='package', line=None): """reenable message of the given category""" assert scope in ('package', 'module') for catid in self._cat_ids(categories): if scope == 'module': self.add_message('I0012', line=line, args=catid) self._module_msg_cats_state[catid] = True else: self._msg_cats_state[catid] = True def check_message_id(self, msg_id): """raise UnknownMessage if the message id is not defined""" msg_id = msg_id.upper() try: return self._messages[msg_id] except KeyError: raise UnknownMessage('No such message id %s' % msg_id) def is_message_enabled(self, msg_id, line=None): """return true if the message associated to the given message id is enabled """ try: if not self._module_msg_cats_state[msg_id[0]]: return False except (KeyError, TypeError): if not self._msg_cats_state.get(msg_id[0], True): return False if line is None: return self._msgs_state.get(msg_id, True) try: return self._module_msgs_state[msg_id][line] except (KeyError, TypeError): return self._msgs_state.get(msg_id, True) def add_message(self, msg_id, line=None, node=None, args=None): """add the message corresponding to the given id. If provided, msg is expanded using args astng checkers should provide the node argument, raw checkers should provide the line argument. """ if line is None and node is not None: line = node.fromlineno # should this message be displayed if not self.is_message_enabled(msg_id, line): return # update stats msg_cat = MSG_TYPES[msg_id[0]] self.msg_status |= MSG_TYPES_STATUS[msg_id[0]] self.stats[msg_cat] += 1 self.stats['by_module'][self.current_name][msg_cat] += 1 try: self.stats['by_msg'][msg_id] += 1 except KeyError: self.stats['by_msg'][msg_id] = 1 msg = self._messages[msg_id].msg # expand message ? if args: msg %= args # get module and object if node is None: module, obj = self.current_name, '' path = self.current_file else: module, obj = get_module_and_frameid(node) path = node.root().file # add the message self.reporter.add_message(msg_id, (path, module, obj, line or 1), msg) def help_message(self, msgids): """display help messages for the given message identifiers""" for msg_id in msgids: try: print self.get_message_help(msg_id, True) print except UnknownMessage, ex: print ex print continue def list_checkers_messages(self, checker): """print checker's messages in reST format""" for msg_id in sort_msgs(checker.msgs.keys()): print self.get_message_help(msg_id, False) def print_full_documentation(self): """output a full documentation in ReST format""" for checker in sort_checkers(self._checkers.values()): if checker.name == 'master': prefix = 'Main ' if checker.options: for section, options in checker.options_by_section(): if section is None: title = 'General options' else: title = '%s options' % section.capitalize() print title print '~' * len(title) rest_format_section(sys.stdout, None, options) print else: prefix = '' title = '%s checker' % checker.name.capitalize() print title print '-' * len(title) if checker.__doc__: # __doc__ is None with -OO print linesep.join([l.strip() for l in checker.__doc__.splitlines()]) if checker.options: title = 'Options' print title print '~' * len(title) for section, options in checker.options_by_section(): rest_format_section(sys.stdout, section, options) print if checker.msgs: title = ('%smessages' % prefix).capitalize() print title print '~' * len(title) self.list_checkers_messages( checker) print if getattr(checker, 'reports', None): title = ('%sreports' % prefix).capitalize() print title print '~' * len(title) for report in checker.reports: print ':%s: %s' % report[:2] print print def list_messages(self): """output full messages list documentation in ReST format""" for checker in sort_checkers(self._checkers.values()): if checker.msgs: self.list_checkers_messages( checker) print def list_sorted_messages(self): """output full sorted messages list in ReST format""" msg_ids = [] for checker in self._checkers.values(): for msg_id in checker.msgs.keys(): msg_ids.append(msg_id) msg_ids.sort() for msg_id in msg_ids: print self.get_message_help(msg_id, False) print
/** * Creates a serverSocket (virtual) through the real socket. This can then be used to accept new * socket connections from a MultiplexerClient. * * @param serverSocketName * unique name that will be used by clients when creating an vsocket. * @return new ServerSocket that can be used to accept new socket connections. */ public VirtualServerSocket createServerSocket(String serverSocketName) throws IOException { LOG.fine("serverSocketName=" + serverSocketName); VirtualServerSocket ss; try { ss = getServerSocketController().getServerSocket(serverSocketName); } catch (Exception e) { throw new IOException("Exception while creating server socket.", e); } return ss; }
<reponame>UlrichBerntien/Codewars-Katas fn positive_sum(slice: &[i32]) -> i32 { slice.iter().filter(|&&x| x > 0).sum() }
/** * shift and recycle pages if we are currently at last or first, * ensure that users can peek hidden pages on 2 sides * @param position current item position */ private void syncPages(int position) { int first = 0, last = mPagerAdapter.getCount() - 1; if (position == last) { mPagerAdapter.shiftLeft(); setCurrentItem(first + 1, false); } else if (position == 0) { mPagerAdapter.shiftRight(); setCurrentItem(last - 1, false); } else { if (position > 0) { mPagerAdapter.bind(position - 1); } if (position < mPagerAdapter.getCount() - 1) { mPagerAdapter.bind(position + 1); } } }
import { List } from '../../../collections/List'; declare module '../../../collections/List' { namespace List { /** Returns an List sequence containing exactly one value. */ function single<T>(element: T): List<T>; } } function single<T>(element: T): List<T> { return List.fromRest(element); } List.single = single;
def _parse_description(self, response, i): if i == 1: return response.xpath('//*[@id="node-full"]/div/div[2]/p[1]/text()').extract_first() elif i ==2: return response.xpath('//*[@id="node-full"]/div/div[2]/p[3]/text()').extract_first() else: return response.xpath('//*[@id="node-full"]/div/div[2]/p[5]/text()').extract_first()
Hemochromatosis, alcoholism and unhealthy dietary fat: a case report Background Hereditary hemochromatosis is an autosomal recessive disorder where the clinical phenotype of skin pigmentation and organ damage occurs only in homozygotes. Simple heterozygotes, that is, just C282Y, typically do not develop iron overload. Here we present a case where a simple heterozygote in combination with alcoholism developed high ferritin and high transferrin saturation levels indicative of iron overload. Though alcoholism alone could explain her presentation, we hypothesize that an inflammatory cocktail of iron and alcohol probably caused our patient to succumb to acute liver failure at a very young age. Case presentation A 29-year-old Caucasian woman presented to the hospital with progressively worsening yellowish discoloration of her eyes and skin associated with anorexia, nausea, vomiting, diffuse abdominal discomfort, increasing abdominal girth, dark urine and pale stools for about 2 weeks. Family history was significant for hereditary hemochromatosis. Her father was a simple heterozygote and her grandmother was homozygous for C282Y. Physical examination showed scleral icterus, distended abdomen with hepatomegaly and mild generalized tenderness. Lab test results showed an elevated white blood cell count, ferritin 539 ng/dL, transferrin saturation 58.23%, elevated liver enzymes, elevated international normalized ratio (INR), low albumin, Alcoholic Liver Disease/Nonalcoholic Fatty Liver Disease (ALD/NAFLD) Index (ANI) of 2.6, suggesting a 93.2% probability of alcoholic liver disease, and phosphatidyl ethanol level of 537ng/ml. Genetic testing showed that the patient was heterozygous for human homeostatic iron regulator protein (HFE) C282Y mutation and the normal allele. Computed tomography (CT) of the abdomen revealed hepatomegaly, portal hypertension and generalized anasarca. Magnetic resonance cholangiopancreatography (MRCP) showed negative results for bile duct pathology. Workup for other causes of liver disease was negative. A diagnosis of acute alcoholic hepatitis was made, with Maddrey’s discriminant function of > 32, so prednisolone was started. Her bilirubin and INR continued to increase despite steroids, and the patient unfortunately died. Conclusion Our case highlights the importance of considering hemochromatosis in the differential diagnosis of young patients presenting with liver failure, including cases suggestive of alcoholism as the likely etiology. Larger studies are needed to investigate the role of non-iron factors like alcohol and viral hepatitis in the progression of liver disease in simple heterozygotes with hereditary hemochromatosis, given the high prevalence of this mutation in persons of Northern European descent. Background Hereditary hemochromatosis (HH) (Fig. 1) is an autosomal recessive disorder where the clinical phenotype of skin pigmentation and organ damage occurs only in homozygotes; even in homozygotes, the phenotype has a broad spectrum depending on sex and Open Access *Correspondence: [email protected] 1 Internal Medicine, Meharry Medical College, Nashville, USA Full list of author information is available at the end of the article penetrance which is age-related . With regard to heterozygotes, it is mostly the compound heterozygotes-C282Y and the H63D or S65C variant allele-that develop iron overload . Simple heterozygotes-that is, just C282Y-almost never develop iron overload or organ damage . Although the role of non-iron-related factors like alcohol in modulating the iron threshold required to induce liver damage is well known, the strength of their association in each of the HH phenotypes remains an area that is largely unexplored. We present a case where a simple heterozygote with alcoholism developed high ferritin and high transferrin saturation indicative of iron overload. This is very rare considering the young age, female sex and the genotype of the patient. The iron overload coupled with probable unhealthy dietary habits in the setting of alcoholism (more fat, less essential nutrients as reported in studies) resulted in an inflammatory cocktail and caused our patient to succumb to acute liver failure at a young age. Case presentation A 29-year-old Caucasian woman presented to the hospital with 2 weeks of progressively worsening yellowish discoloration of her eyes and skin associated with anorexia, nausea, vomiting, diffuse abdominal discomfort, increasing abdominal girth, dark urine and pale stools. Past medical history was significant for prior episodes of hospitalization for acute alcoholic intoxication including an episode a few months prior. Imaging at that time showed hepatic steatosis but no features suggestive of hepatic cirrhosis or portal hypertension. Family history was significant for hereditary hemochromatosis. The patient's father was heterozygous for C282Y and the paternal grandmother was homozygous for C282Y. The patient reported drinking about 1-2 glasses of wine every day, and denied smoking and illicit drug use. Vitals signs were as follows: pulse rate 94 beats per minute, respiratory rate 20 per minute, blood pressure 112/78 mmHg, temperature 36.9 °C and oxygen saturation 100% on room air. Physical examination showed scleral icterus, distended abdomen with hepatomegaly and mild generalized tenderness. The patient was diagnosed with acute alcoholic hepatitis, and Gastroenterology was consulted. The patient's Maddrey's discriminant function was 46.4 (a score > 32 indicates poor prognosis and that the patient might benefit from glucocorticoid therapy), so orally administered prednisolone 40 mg/day was started. Although the patient's total bilirubin (TBIL) and INR initially improved after initiating steroidal therapy, a rebound increase was noted (Table 1), raising concerns for impending liver failure. Also, her creatinine increased from 0.5 to 4 mg/dL. Nephrology was consulted, and a diagnosis of hepatorenal syndrome type 1 was favored. In addition to worsening TBIL, INR and creatinine, the patient developed encephalopathy, succumbed to the disease and died. Discussion In this article we focus on the mechanisms of liver injury from the effects of iron, alcohol and dietary habits, and it may not be surprising to see that some of these mechanisms overlap. Mechanism of liver injury with iron Excess iron in the hepatocytes and Kupffer cells results in the Fenton reaction and reactive oxygen species production. The free radicals induce lipid peroxidation, which damages the mitochondria, resulting in release of cytochrome c and liver cell apoptosis. Iron overload also stimulates the production of proinflammatory and profibrogenic cytokines including transforming growth factor beta (TGF-β). TGF-β leads to the activation of hepatic stellate cells and excess collagen production. Excess collagen and cross-linking coupled with iron inhibit activation of the liver progenitor cells required for the regeneration of liver cells, resulting in fibrosis . Mechanism of liver injury with alcohol Alcohol is metabolized to acetaldehyde. Acetaldehyde results in the generation of reactive oxygen species, which causes lipid peroxidation and cell membrane and DNA damage. Damaged hepatocytes express antigens which are otherwise hidden from the immune system, resulting in immune stimulation. Chronically heightened immune activity results in immune exhaustion, overwhelming bacterial infection, multi-organ damage and death. Also, chronic alcohol abuse results in overgrowth of gut bacteria, and this along with alcohol-induced leaky gut results in increased delivery of endotoxins to the liver and liver damage . Mechanism of liver injury with non-healthy dietary habits Excess dietary fat increases insulin resistance and hyperinsulinemia, which leads to accumulation of fatty acids. Accumulated fatty acids result in the generation of lipotoxic species, hepatocellular oxidant stress and cell death. The dying hepatocytes release signals and express antigens which are otherwise hidden from the immune system. turning on the immunogenic and fibrogenesis cascade . Individual susceptibility to fatty acid-induced oxidant stress depends on other factors including iron overload states such as HFE and alcoholism . Thus, many of the mechanisms of liver injury from iron, alcohol and unhealthy dietary habits overlap (Fig. 4). Although non-HH factors like alcoholism, NAFLD and nonalcoholic steatohepatitis (NASH) are associated with hyperferritinemia from chronic inflammation, the patient's elevated transferrin saturation can only be explained by her HH status. While heavy alcohol consumption alone could cause severe liver damage, we hypothesize that her HFE status and possible unhealthy dietary fat in the setting of alcoholism accelerated the progression of liver disease. Studies have shown that alcoholics consume a higher amount of fatty food and carbohydrates along with lower consumption of vegetables and dairy products, which could have a detrimental effect on health . Clinicians must continually probe for factors like personal or family history of hemochromatosis, dietary habits and alcoholism using different strategies and reformatting questions. This is especially important because early recognition followed by referral to specialized centers for treatment for hereditary hemochromatosis and detoxification would be pivotal in the prognosis of these patients. Our patient persistently denied any unhealthy alcohol use until later in the disease course. This, coupled with her blood alcohol level of < 3 at admission, very high white blood cell counts, young age and female sex, pointed more towards other differentials like autoimmune hepatitis and infectious etiologies. Although we were fortunate enough to be redirected towards alcohol as the etiology from reports of stomatocytosis in the peripheral blood, high ANI and very high phosphatidyl ethanol level, the patient unfortunately succumbed to her acute liver failure. Conclusion Considering the high prevalence of HH and the rising mortality from alcoholic liver disease among young adults , more studies exploring the role of alcohol in the development of liver damage in simple heterozygotes and vice versa are essential to determine whether all alcoholics have to be screened for hereditary hemochromatosis. This is because more than one factor may often be involved in the pathogenesis and progression of liver dysfunction.
/** * A partially applied {@link AssertRelation}, where one value is provided along with a coder to * serialize/deserialize them. */ private static class CheckRelationAgainstExpected<T> implements SerializableFunction<T, Void> { private final AssertRelation<T, T> relation; private final byte[] encodedExpected; private final Coder<T> coder; public CheckRelationAgainstExpected(AssertRelation<T, T> relation, T expected, Coder<T> coder) { this.relation = relation; this.coder = coder; try { this.encodedExpected = CoderUtils.encodeToByteArray(coder, expected); } catch (IOException coderException) { throw new RuntimeException(coderException); } } @Override public Void apply(T actual) { try { T expected = CoderUtils.decodeFromByteArray(coder, encodedExpected); return relation.assertFor(expected).apply(actual); } catch (IOException coderException) { throw new RuntimeException(coderException); } } }
def mosaic(self): if len(self.tiles) > 1: hdflist = sorted(glob.glob(self.fullPath + '/*.hdf')) for i in range(0,len(hdflist),2): ms = pymodis.convertmodis_gdal.createMosaicGDAL(hdfnames = [hdflist[i], hdflist[i+1]], subset = self.subset, outformat = 'GTiff') ms.run(str(hdflist[i].split('.h')[0]) + 'mos.tif') ms.write_vrt(output = str(hdflist[i].split('.h')[0]), separate = True) mosaicCount = len(glob.glob(self.fullPath + '/*mos.tif')) logger.log('SUCCESS', 'Mosaic complete! MODIS tiles %s were successfully mosaicked into %d mosaic images.' % (str(self.tiles), mosaicCount))
/** * Start rpc processor and connect to the proxy server. * @param host proxy server host. * @param port proxy server port. * @param key proxy server key. */ synchronized void connect(String host, int port, String key) { this.host = host; this.port = port; this.key = key; running = true; this.notify(); }
. Aetiology of infertility in Central Africa remains largely controversial. The male factor has been less studied than the female factor. We present data obtained on spermatic parameters in comparison between fertile and infertile groups. Values obtained in fertile men are similar to those observed in European or American countries. Very low values of spermatozoa concentration, percentage of motile and morphologically normal forms were found in infertile male.
n = int(input()) arr = [input() for i in range(n)] groups = 0 prev = None i = 0 while i < n: if arr[i] != prev: groups += 1 prev = arr[i] i += 1 print(groups)
<reponame>memory125/magic-golang package main import ( "bufio" "fmt" "io" "io/ioutil" "os" ) // 利用os包和file读取文件 func readFileByOsRead() { fileObj, err := os.Open("D:\\project\\go\\gopath\\src\\code.wing.com\\fundamental\\poem.txt") if err != nil { fmt.Printf("File open failed! error is %v.\n", err) return } // 关闭文件 defer fileObj.Close() var tmp = make([]byte, 128) for { b, err := fileObj.Read(tmp) if err == io.EOF { fmt.Println("All file has read out!!!!") return } if err != nil { fmt.Printf("File read failed! error is %v.\n", err) return } fmt.Printf("%d bytes has read out!\n", b) fmt.Println(string(tmp[:b])) if b < 128 { return } } } // 利用bufio包读取文件 func readFileByBufio() { fileObj, err := os.Open("D:\\project\\go\\gopath\\src\\code.wing.com\\fundamental\\poem.txt") if err != nil { fmt.Printf("File open failed! error is %v.\n", err) return } // 关闭文件 defer fileObj.Close() // 创建用来从文件中读内容的对象 reader := bufio.NewReader(fileObj) for { str, err := reader.ReadString('\n') if err == io.EOF { fmt.Println("All file has read out!!!!") return } if err != nil { fmt.Printf("File read failed! error is %v.\n", err) return } fmt.Print(str) } } // 使用ioutil读取文件 func readFileByIOutils() { b, err := ioutil.ReadFile("D:\\project\\go\\gopath\\src\\code.wing.com\\fundamental\\poem.txt") if err != nil { fmt.Printf("File read failed! error is %v.\n", err) return } fmt.Print(string(b)) } func main() { // readFileByOsRead() //readFileByBufio() readFileByIOutils() }
// ********** YATIN KWATRA ********** //AC bakshi Rabha #include <bits/stdc++.h> #define ll long long #define ull unsigned long long #define FIO ios_base::sync_with_stdio(false);cin.tie(NULL);cout.tie(NULL); #define endl "\n" #define fo(i,a,b) for(int i = a; i<=b ; i++) #define rfo(i,a,b) for(int i = a; i>=b ; i--) #define vii vector<int> #define vll vector<long long > #define all(v) v.begin(),v.end() #define mp make_pair #define pb push_back #define pob pop_back #define ff first #define ss second #define pii pair<int,int> #define pll pair<long long> #define mll map<long,long> #define mii map<int,int> #define vvii vector<vii> #define mod 1000000007 using namespace std; void INPUT() { #ifndef ONLINE_JUDGE freopen("input.txt", "r", stdin); freopen("output.txt", "w", stdout); #endif } bool cmp(pii a , pii b) { int s1 = a.ff + a.ss; int s2 = b.ff + b.ss; if(s1<=s2) return true; return false; } void prime(ll n , vll &v) { if(n <=1) return; while((n%2) == 0) { n/=2; v.pb(2); if(v.size() == 6) return; } for(ll j = 3 ; j*j <= n ; j+=2) { while((n%j) == 0) { n/=j; v.pb(j); if(v.size() == 6) return; } } if(n>2) v.pb(n); return; } void solve() { ll n; cin >> n; vll v; prime(n,v); ll a = -1 , b = -1 , c = -1; int ok = 0; fo(i,0,v.size()-1) { if(a == -1) a = v[i]; else if(b == -1) { if(v[i] != v[i-1]) b = v[i]; else if(i< (v.size()-1)) b = v[i]*v[i+1]; } else break; } c = n / (a*b); if((a!=b and b!=c and c!=a) and a>=2 and b>=2 and c>=2) { cout << "YES\n" << a << " " << b << " " << c << endl; } else cout << "NO\n"; } int main() { FIO // INPUT(); int t; t = 1; cin >> t; while(t--) { solve(); } return 0; }
/** * Adds a {@link Fight} * @param result The {@link Fight} received * @return A double containing running battle stats by team */ double add(Fight result) { if (result.winnerTeam.equalsIgnoreCase("heroes")) { heroes = heroes + 1; } else { villains = villains + 1; } return ((double) heroes / (heroes + villains)); }
<filename>com/google/inject/internal/ErrorHandler.java package com.google.inject.internal; import com.google.inject.spi.Message; abstract interface ErrorHandler { public abstract void handle(Object paramObject, Errors paramErrors); public abstract void handle(Message paramMessage); }
The hardline Democrat, who lost last year’s presidential election despite winning the popular vote, labelled the Republican a “divisive” leader. Hillary Clinton said US president Donald Trump is a danger to America and to the world, and doesn’t have the temperament to be at the helm at one of the world’s largest superpowers. Mrs Clinton said in an interview with the French daily Le Monde: “I think Mr Trump poses a clear and present danger to America and to the world because he wasn’t ready to become president. He doesn’t have the right temperament nor the right leadership skills to be a head of state. “His actions have deeply divided America and its allies. He has injected instability and unpredictability into international affairs. I personally think he is more dangerous than he is helpless.
In today's tutorial we're going to use JavaScript to create a simple, flexible, parallax effect where a logo seemingly changes colors with the background it's on when the user scrolls. The Slip Scroll effect in action We'll be creating a “default” element which holds true to its placement ( position: fixed ), and a bunch of “moveable” elements whose position is dependent on that “default” element. We’ll use JavaScript to make this happen every time the user scrolls. Note: to cover all bases I've provided the explanation in video and written form. Watch the Video Subscribe to Tuts+ Web Design on Youtube Read the Tutorial Base Markup We'll start by creating a couple of containing elements. Let’s make one of their backgrounds dark and one light so we can have a contrasting image contained in them. Let’s also go ahead and make our first image the "default" image by giving it a class of default , whilst the other images will get the class of moveable . Base Styles Now let’s make sure our images don’t end up scrolling outside of their containers by setting overflow: hidden . We’ll also go ahead and say these containers have relative position, so the absolutely positioned elements will align to these containers instead of directly to the fixed element when we write our JavaScript. For the sake of scrollability, let’s give these containers a min-height of around 400px . And to hold our logos away from the edges, let's give them some padding of 1em . Each container needs some contrasty color so: And finally, as promised, let's set our default and moveable CSS so one is stuck with the page as the user scrolls, and the other is moving along with it without bumping into other elements: That should take care of the markup and styling. If you view the page, you should see the default logo scrolling down and hiding behind the other containers, whilst all the moveable logos should appear as normal elements in the top-left of their respective containers. Introducing JavaScript Now for the fun part, making it work with JavaScript. First we'll load jQuery and our custom script at the bottom of our index.html : Create and open a file named js/slipScroll.js . Within that file, the first thing we’ll do is create a function called setLogo and throw this function into a jQuery scroll event so that every time the user scrolls a pixel, this event fires. We'll also want to make sure we fire this event when the user first arrives at the page (before they scroll): Getting Things Working Now for the magic. Let’s say that every single instance of .moveable on the page should change its CSS top position to however far the default image is from the top of the page, minus however far this .moveable element's container is from the top of the page. Refresh your page, and voila! You’ve just created a pseudo-parallax scrolling effect from scratch in just a few lines of code. Conclusion I encourage you to tinker around with this solution. Try playing with the JavaScript function itself to see what kind of weird offsets you get if you add or remove a few pixels. Try using different elements (navigation anyone?) instead of just images for your scrollable content. I hope you learned a lot and feel free to drop comments below if you have any questions.
// Copyright The Shipwright Contributors // // SPDX-License-Identifier: Apache-2.0 package buildrun import ( "context" "encoding/json" "fmt" "regexp" "strconv" "strings" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "knative.dev/pkg/apis" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" buildv1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" "github.com/shipwright-io/build/pkg/config" "github.com/shipwright-io/build/pkg/ctxlog" buildmetrics "github.com/shipwright-io/build/pkg/metrics" "github.com/shipwright-io/build/pkg/reconciler/buildrun/resources" ) const ( namespace string = "namespace" name string = "name" generatedNameRegex = "-[a-z0-9]{5,5}$" ) // blank assignment to verify that ReconcileBuildRun implements reconcile.Reconciler var _ reconcile.Reconciler = &ReconcileBuildRun{} // ReconcileBuildRun reconciles a BuildRun object type ReconcileBuildRun struct { // This client, initialized using mgr.Client() above, is a split client // that reads objects from the cache and writes to the apiserver config *config.Config client client.Client scheme *runtime.Scheme setOwnerReferenceFunc setOwnerReferenceFunc } // NewReconciler returns a new reconcile.Reconciler func NewReconciler(c *config.Config, mgr manager.Manager, ownerRef setOwnerReferenceFunc) reconcile.Reconciler { return &ReconcileBuildRun{ config: c, client: mgr.GetClient(), scheme: mgr.GetScheme(), setOwnerReferenceFunc: ownerRef, } } // Reconcile reads that state of the cluster for a Build object and makes changes based on the state read // and what is in the Build.Spec func (r *ReconcileBuildRun) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { var buildRun *buildv1alpha1.BuildRun var build *buildv1alpha1.Build updateBuildRunRequired := false // Set the ctx to be Background, as the top-level context for incoming requests. ctx, cancel := context.WithTimeout(ctx, r.config.CtxTimeOut) defer cancel() ctxlog.Debug(ctx, "starting reconciling request from a BuildRun or TaskRun event", namespace, request.Namespace, name, request.Name) // with build run cancel, it is now possible for a build run update to stem from something other than a task run update, // so we can no longer assume that a build run event will not come in after the build run has a task run ref in its status buildRun = &buildv1alpha1.BuildRun{} getBuildRunErr := r.GetBuildRunObject(ctx, request.Name, request.Namespace, buildRun) lastTaskRun := &v1beta1.TaskRun{} getTaskRunErr := r.client.Get(ctx, types.NamespacedName{Name: request.Name, Namespace: request.Namespace}, lastTaskRun) if getBuildRunErr != nil && getTaskRunErr != nil { if !apierrors.IsNotFound(getBuildRunErr) { return reconcile.Result{}, getBuildRunErr } if !apierrors.IsNotFound(getTaskRunErr) { return reconcile.Result{}, getTaskRunErr } // If the BuildRun and TaskRun are not found, it might mean that we are running a Reconcile after a TaskRun was deleted. If this is the case, we need // to identify from the request the BuildRun name associate to it and update the BuildRun Status. r.VerifyRequestName(ctx, request, buildRun) return reconcile.Result{}, nil } // Validating buildrun name is a valid label value if errs := validation.IsValidLabelValue(buildRun.Name); len(errs) > 0 { // stop reconciling and mark the BuildRun as Failed if updateErr := resources.UpdateConditionWithFalseStatus( ctx, r.client, buildRun, strings.Join(errs, ", "), resources.BuildRunNameInvalid, ); updateErr != nil { return reconcile.Result{}, updateErr } return reconcile.Result{}, nil } // if this is a build run event after we've set the task run ref, get the task run using the task run name stored in the build run if getBuildRunErr == nil && apierrors.IsNotFound(getTaskRunErr) && buildRun.Status.LatestTaskRunRef != nil { getTaskRunErr = r.client.Get(ctx, types.NamespacedName{Name: *buildRun.Status.LatestTaskRunRef, Namespace: request.Namespace}, lastTaskRun) } // for existing TaskRuns update the BuildRun Status, if there is no TaskRun, then create one if getTaskRunErr != nil { if apierrors.IsNotFound(getTaskRunErr) { build = &buildv1alpha1.Build{} err := resources.GetBuildObject(ctx, r.client, buildRun, build) if err != nil { if !resources.IsClientStatusUpdateError(err) && buildRun.Status.IsFailed(buildv1alpha1.Succeeded) { return reconcile.Result{}, nil } // system call failure, reconcile again return reconcile.Result{}, err } // Validate if the Build was successfully registered if build.Status.Registered == "" { err := fmt.Errorf("the Build is not yet validated, build: %s", build.Name) // reconcile again until it gets a registration value return reconcile.Result{}, err } if build.Status.Registered != corev1.ConditionTrue { // stop reconciling and mark the BuildRun as Failed // we only reconcile again if the status.Update call fails message := fmt.Sprintf("the Build is not registered correctly, build: %s, registered status: %s, reason: %s", build.Name, build.Status.Registered, build.Status.Reason) if updateErr := resources.UpdateConditionWithFalseStatus(ctx, r.client, buildRun, message, resources.ConditionBuildRegistrationFailed); updateErr != nil { return reconcile.Result{}, updateErr } return reconcile.Result{}, nil } // Ensure the build-related labels on the BuildRun if buildRun.GetLabels() == nil { buildRun.Labels = make(map[string]string) } // make sure the BuildRun has not already been cancelled if buildRun.IsCanceled() { if updateErr := resources.UpdateConditionWithFalseStatus(ctx, r.client, buildRun, "the BuildRun is marked canceled.", buildv1alpha1.BuildRunStateCancel); updateErr != nil { return reconcile.Result{}, updateErr } return reconcile.Result{}, nil } // Set OwnerReference for Build and BuildRun only when build.shipwright.io/build-run-deletion is set "true" if build.GetAnnotations()[buildv1alpha1.AnnotationBuildRunDeletion] == "true" && !resources.IsOwnedByBuild(build, buildRun.OwnerReferences) { if err := r.setOwnerReferenceFunc(build, buildRun, r.scheme); err != nil { build.Status.Reason = buildv1alpha1.SetOwnerReferenceFailed build.Status.Message = fmt.Sprintf("unexpected error when trying to set the ownerreference: %v", err) if err := r.client.Status().Update(ctx, build); err != nil { return reconcile.Result{}, err } } ctxlog.Info(ctx, fmt.Sprintf("updating BuildRun %s OwnerReferences, owner is Build %s", buildRun.Name, build.Name), namespace, request.Namespace, name, request.Name) updateBuildRunRequired = true } buildGeneration := strconv.FormatInt(build.Generation, 10) if buildRun.GetLabels()[buildv1alpha1.LabelBuild] != build.Name || buildRun.GetLabels()[buildv1alpha1.LabelBuildGeneration] != buildGeneration { buildRun.Labels[buildv1alpha1.LabelBuild] = build.Name buildRun.Labels[buildv1alpha1.LabelBuildGeneration] = buildGeneration ctxlog.Info(ctx, "updating BuildRun labels", namespace, request.Namespace, name, request.Name) updateBuildRunRequired = true } if updateBuildRunRequired { if err = r.client.Update(ctx, buildRun); err != nil { return reconcile.Result{}, err } ctxlog.Info(ctx, fmt.Sprintf("successfully updated BuildRun %s", buildRun.Name), namespace, request.Namespace, name, request.Name) } // Set the Build spec in the BuildRun status buildRun.Status.BuildSpec = &build.Spec ctxlog.Info(ctx, "updating BuildRun status", namespace, request.Namespace, name, request.Name) if err = r.client.Status().Update(ctx, buildRun); err != nil { return reconcile.Result{}, err } // Choose a service account to use svcAccount, err := resources.RetrieveServiceAccount(ctx, r.client, build, buildRun) if err != nil { if !resources.IsClientStatusUpdateError(err) && buildRun.Status.IsFailed(buildv1alpha1.Succeeded) { return reconcile.Result{}, nil } // system call failure, reconcile again return reconcile.Result{}, err } strategy, err := r.getReferencedStrategy(ctx, build, buildRun) if err != nil { if !resources.IsClientStatusUpdateError(err) && buildRun.Status.IsFailed(buildv1alpha1.Succeeded) { return reconcile.Result{}, nil } return reconcile.Result{}, err } // Create the TaskRun, this needs to be the last step in this block to be idempotent generatedTaskRun, err := r.createTaskRun(ctx, svcAccount, strategy, build, buildRun) if err != nil { if !resources.IsClientStatusUpdateError(err) && buildRun.Status.IsFailed(buildv1alpha1.Succeeded) { ctxlog.Info(ctx, "taskRun generation failed", namespace, request.Namespace, name, request.Name) return reconcile.Result{}, nil } // system call failure, reconcile again return reconcile.Result{}, err } ctxlog.Info(ctx, "creating TaskRun from BuildRun", namespace, request.Namespace, name, generatedTaskRun.GenerateName, "BuildRun", buildRun.Name) if err = r.client.Create(ctx, generatedTaskRun); err != nil { // system call failure, reconcile again return reconcile.Result{}, err } // Set the LastTaskRunRef in the BuildRun status buildRun.Status.LatestTaskRunRef = &generatedTaskRun.Name ctxlog.Info(ctx, "updating BuildRun status with TaskRun name", namespace, request.Namespace, name, request.Name, "TaskRun", generatedTaskRun.Name) if err = r.client.Status().Update(ctx, buildRun); err != nil { // we ignore the error here to prevent another reconciliation that would create another TaskRun, // the LatestTaskRunRef field will also be set in the reconciliation from a TaskRun // risk is that when the controller is now restarted before the field is set, another TaskRun will be created ctxlog.Error(ctx, err, "Failed to update BuildRun status is ignored", namespace, request.Namespace, name, request.Name) } // Increase BuildRun count in metrics buildmetrics.BuildRunCountInc( buildRun.Status.BuildSpec.StrategyName(), buildRun.Namespace, buildRun.Spec.BuildRef.Name, buildRun.Name, ) // Report buildrun ramp-up duration (time between buildrun creation and taskrun creation) buildmetrics.BuildRunRampUpDurationObserve( buildRun.Status.BuildSpec.StrategyName(), buildRun.Namespace, buildRun.Spec.BuildRef.Name, buildRun.Name, generatedTaskRun.CreationTimestamp.Time.Sub(buildRun.CreationTimestamp.Time), ) } else { return reconcile.Result{}, getTaskRunErr } } else { ctxlog.Info(ctx, "taskRun already exists", namespace, request.Namespace, name, request.Name) if getBuildRunErr != nil && !apierrors.IsNotFound(getBuildRunErr) { return reconcile.Result{}, getBuildRunErr } else if apierrors.IsNotFound(getBuildRunErr) { // this is a TR event, try getting the br from the label on the tr err := r.GetBuildRunObject(ctx, lastTaskRun.Labels[buildv1alpha1.LabelBuildRun], request.Namespace, buildRun) if err != nil && !apierrors.IsNotFound(err) { return reconcile.Result{}, err } if err != nil && apierrors.IsNotFound(err) { return reconcile.Result{}, nil } } if buildRun.IsCanceled() && !lastTaskRun.IsCancelled() { ctxlog.Info(ctx, "buildRun marked for cancellation, patching task run", namespace, request.Namespace, name, request.Name) // patch tekton taskrun a la tkn to start tekton's cancelling logic trueParam := true if err := r.patchTaskRun(ctx, lastTaskRun, "replace", "/spec/status", v1beta1.TaskRunSpecStatusCancelled, metav1.PatchOptions{Force: &trueParam}); err != nil { return reconcile.Result{}, err } } // Check if the BuildRun is already finished, this happens if the build controller is restarted. // It then reconciles all TaskRuns. This is valuable if the build controller was down while the TaskRun // finishes which would be missed otherwise. But, if the TaskRun was already completed and the status // synchronized into the BuildRun, then yet another reconciliation is not necessary. if buildRun.Status.CompletionTime != nil { ctxlog.Info(ctx, "buildRun already marked completed", namespace, request.Namespace, name, request.Name) return reconcile.Result{}, nil } if len(lastTaskRun.Status.TaskRunResults) > 0 { ctxlog.Info(ctx, "surfacing taskRun results to BuildRun status", namespace, request.Namespace, name, request.Name) resources.UpdateBuildRunUsingTaskResults(ctx, buildRun, lastTaskRun.Status.TaskRunResults, request) } trCondition := lastTaskRun.Status.GetCondition(apis.ConditionSucceeded) if trCondition != nil { if err := resources.UpdateBuildRunUsingTaskRunCondition(ctx, r.client, buildRun, lastTaskRun, trCondition); err != nil { return reconcile.Result{}, err } taskRunStatus := trCondition.Status // check if we should delete the generated service account by checking the build run spec and that the task run is complete if taskRunStatus == corev1.ConditionTrue || taskRunStatus == corev1.ConditionFalse { if err := resources.DeleteServiceAccount(ctx, r.client, buildRun); err != nil { ctxlog.Error(ctx, err, "Error during deletion of generated service account.") return reconcile.Result{}, err } } buildRun.Status.LatestTaskRunRef = &lastTaskRun.Name if buildRun.Status.StartTime == nil && lastTaskRun.Status.StartTime != nil { buildRun.Status.StartTime = lastTaskRun.Status.StartTime // Report the buildrun established duration (time between the creation of the buildrun and the start of the buildrun) buildmetrics.BuildRunEstablishObserve( buildRun.Status.BuildSpec.StrategyName(), buildRun.Namespace, buildRun.Spec.BuildRef.Name, buildRun.Name, buildRun.Status.StartTime.Time.Sub(buildRun.CreationTimestamp.Time), ) } if lastTaskRun.Status.CompletionTime != nil && buildRun.Status.CompletionTime == nil { buildRun.Status.CompletionTime = lastTaskRun.Status.CompletionTime // buildrun completion duration (total time between the creation of the buildrun and the buildrun completion) buildmetrics.BuildRunCompletionObserve( buildRun.Status.BuildSpec.StrategyName(), buildRun.Namespace, buildRun.Spec.BuildRef.Name, buildRun.Name, buildRun.Status.CompletionTime.Time.Sub(buildRun.CreationTimestamp.Time), ) // Look for the pod created by the taskrun var pod = &corev1.Pod{} if err := r.client.Get(ctx, types.NamespacedName{Namespace: request.Namespace, Name: lastTaskRun.Status.PodName}, pod); err == nil { if len(pod.Status.InitContainerStatuses) > 0 { lastInitPodIdx := len(pod.Status.InitContainerStatuses) - 1 lastInitPod := pod.Status.InitContainerStatuses[lastInitPodIdx] if lastInitPod.State.Terminated != nil { // taskrun pod ramp-up (time between pod creation and last init container completion) buildmetrics.TaskRunPodRampUpDurationObserve( buildRun.Status.BuildSpec.StrategyName(), buildRun.Namespace, buildRun.Spec.BuildRef.Name, buildRun.Name, lastInitPod.State.Terminated.FinishedAt.Sub(pod.CreationTimestamp.Time), ) } } // taskrun ramp-up duration (time between taskrun creation and taskrun pod creation) buildmetrics.TaskRunRampUpDurationObserve( buildRun.Status.BuildSpec.StrategyName(), buildRun.Namespace, buildRun.Spec.BuildRef.Name, buildRun.Name, pod.CreationTimestamp.Time.Sub(lastTaskRun.CreationTimestamp.Time), ) } } ctxlog.Info(ctx, "updating buildRun status", namespace, request.Namespace, name, request.Name) if err := r.client.Status().Update(ctx, buildRun); err != nil { return reconcile.Result{}, err } } } ctxlog.Debug(ctx, "finishing reconciling request from a BuildRun or TaskRun event", namespace, request.Namespace, name, request.Name) return reconcile.Result{}, nil } // GetBuildRunObject retrieves an existing BuildRun based on a name and namespace func (r *ReconcileBuildRun) GetBuildRunObject(ctx context.Context, objectName string, objectNS string, buildRun *buildv1alpha1.BuildRun) error { if err := r.client.Get(ctx, types.NamespacedName{Name: objectName, Namespace: objectNS}, buildRun); err != nil { return err } return nil } // VerifyRequestName parse a Reconcile request name and looks for an associated BuildRun name // If the BuildRun object exists and is not yet completed, it will update it with an error. func (r *ReconcileBuildRun) VerifyRequestName(ctx context.Context, request reconcile.Request, buildRun *buildv1alpha1.BuildRun) { regxBuildRun, _ := regexp.Compile(generatedNameRegex) // Check if the name belongs to a TaskRun generated name https://regex101.com/r/Wjs3bV/10 // and extract the BuildRun name matched := regxBuildRun.MatchString(request.Name) if matched { if split := regxBuildRun.Split(request.Name, 2); len(split) > 0 { // Update the related BuildRun err := r.GetBuildRunObject(ctx, split[0], request.Namespace, buildRun) if err == nil && buildRun.Status.CompletionTime == nil { // We ignore the errors from the following call, because the parent call of this function will always // return back a reconcile.Result{}, nil. This is done to avoid infinite reconcile loops when a BuildRun // does not longer exists _ = resources.UpdateConditionWithFalseStatus(ctx, r.client, buildRun, fmt.Sprintf("taskRun %s doesn't exist", request.Name), resources.ConditionTaskRunIsMissing) } } } } func (r *ReconcileBuildRun) getReferencedStrategy(ctx context.Context, build *buildv1alpha1.Build, buildRun *buildv1alpha1.BuildRun) (strategy buildv1alpha1.BuilderStrategy, err error) { if build.Spec.Strategy.Kind == nil { // If the strategy Kind is not specified, we default to a namespaced-scope strategy ctxlog.Info(ctx, "missing strategy Kind, defaulting to a namespaced-scope one", buildRun.Name, build.Name, namespace) strategy, err = resources.RetrieveBuildStrategy(ctx, r.client, build) if err != nil { if apierrors.IsNotFound(err) { if updateErr := resources.UpdateConditionWithFalseStatus(ctx, r.client, buildRun, err.Error(), resources.BuildStrategyNotFound); updateErr != nil { return nil, resources.HandleError("failed to get referenced strategy", err, updateErr) } } } return strategy, err } switch *build.Spec.Strategy.Kind { case buildv1alpha1.NamespacedBuildStrategyKind: strategy, err = resources.RetrieveBuildStrategy(ctx, r.client, build) if err != nil { if apierrors.IsNotFound(err) { if updateErr := resources.UpdateConditionWithFalseStatus(ctx, r.client, buildRun, err.Error(), resources.BuildStrategyNotFound); updateErr != nil { return nil, resources.HandleError("failed to get referenced strategy", err, updateErr) } } } case buildv1alpha1.ClusterBuildStrategyKind: strategy, err = resources.RetrieveClusterBuildStrategy(ctx, r.client, build) if err != nil { if apierrors.IsNotFound(err) { if updateErr := resources.UpdateConditionWithFalseStatus(ctx, r.client, buildRun, err.Error(), resources.ClusterBuildStrategyNotFound); updateErr != nil { return nil, resources.HandleError("failed to get referenced strategy", err, updateErr) } } } default: err = fmt.Errorf("unknown strategy %s", string(*build.Spec.Strategy.Kind)) if updateErr := resources.UpdateConditionWithFalseStatus(ctx, r.client, buildRun, err.Error(), resources.ConditionUnknownStrategyKind); updateErr != nil { return nil, resources.HandleError("failed to get referenced strategy", err, updateErr) } } return strategy, err } func (r *ReconcileBuildRun) createTaskRun(ctx context.Context, serviceAccount *corev1.ServiceAccount, strategy buildv1alpha1.BuilderStrategy, build *buildv1alpha1.Build, buildRun *buildv1alpha1.BuildRun) (*v1beta1.TaskRun, error) { var ( generatedTaskRun *v1beta1.TaskRun ) generatedTaskRun, err := resources.GenerateTaskRun(r.config, build, buildRun, serviceAccount.Name, strategy) if err != nil { if updateErr := resources.UpdateConditionWithFalseStatus(ctx, r.client, buildRun, err.Error(), resources.ConditionTaskRunGenerationFailed); updateErr != nil { return nil, resources.HandleError("failed to create taskrun runtime object", err, updateErr) } return nil, err } // Set OwnerReference for BuildRun and TaskRun if err := r.setOwnerReferenceFunc(buildRun, generatedTaskRun, r.scheme); err != nil { if updateErr := resources.UpdateConditionWithFalseStatus(ctx, r.client, buildRun, err.Error(), resources.ConditionSetOwnerReferenceFailed); updateErr != nil { return nil, resources.HandleError("failed to create taskrun runtime object", err, updateErr) } return nil, err } return generatedTaskRun, nil } type patchStringValue struct { Op string `json:"op"` Path string `json:"path"` Value string `json:"value"` } func (r *ReconcileBuildRun) patchTaskRun(ctx context.Context, tr *v1beta1.TaskRun, op, path, value string, opts metav1.PatchOptions) error { payload := []patchStringValue{{ Op: op, Path: path, Value: value, }} data, err := json.Marshal(payload) if err != nil { return err } patch := client.RawPatch(types.JSONPatchType, data) patchOpt := client.PatchOptions{Raw: &opts} return r.client.Patch(ctx, tr, patch, &patchOpt) }
package client import ( "fmt" "io/ioutil" "net/http" "strings" "github.com/akamai/AkamaiOPEN-edgegrid-golang/jsonhooks-v1" ) // APIError exposes an Akamai OPEN Edgegrid Error type APIError struct { error Type string `json:"type"` Title string `json:"title"` Status int `json:"status"` Detail string `json:"detail"` Errors []APIErrorDetail `json:"errors"` Problems []APIErrorDetail `json:"problems"` Instance string `json:"instance"` Method string `json:"method"` ServerIP string `json:"serverIp"` ClientIP string `json:"clientIp"` RequestID string `json:"requestId"` RequestTime string `json:"requestTime"` Response *http.Response `json:"-"` RawBody string `json:"-"` } type APIErrorDetail struct { Type string `json:"type"` Title string `json:"title"` Detail string `json:"detail"` RejectedValue string `json:"rejectedValue"` } func (error APIError) Error() string { var errorDetails string if len(error.Errors) > 0 { for _, e := range error.Errors { errorDetails = fmt.Sprintf("%s \n %s", errorDetails, e) } } if len(error.Problems) > 0 { for _, e := range error.Problems { errorDetails = fmt.Sprintf("%s \n %s", errorDetails, e) } } return strings.TrimSpace(fmt.Sprintf("API Error: %d %s %s More Info %s\n %s", error.Status, error.Title, error.Detail, error.Type, errorDetails)) } // NewAPIError creates a new API error based on a Response, // or http.Response-like. func NewAPIError(response *http.Response) APIError { // TODO: handle this error body, _ := ioutil.ReadAll(response.Body) return NewAPIErrorFromBody(response, body) } // NewAPIErrorFromBody creates a new API error, allowing you to pass in a body // // This function is intended to be used after the body has already been read for // other purposes. func NewAPIErrorFromBody(response *http.Response, body []byte) APIError { error := APIError{} if err := jsonhooks.Unmarshal(body, &error); err != nil { error.Status = response.StatusCode error.Title = response.Status } error.Response = response error.RawBody = string(body) return error } // IsInformational determines if a response was informational (1XX status) func IsInformational(r *http.Response) bool { return r.StatusCode > 99 && r.StatusCode < 200 } // IsSuccess determines if a response was successful (2XX status) func IsSuccess(r *http.Response) bool { return r.StatusCode > 199 && r.StatusCode < 300 } // IsRedirection determines if a response was a redirect (3XX status) func IsRedirection(r *http.Response) bool { return r.StatusCode > 299 && r.StatusCode < 400 } // IsClientError determines if a response was a client error (4XX status) func IsClientError(r *http.Response) bool { return r.StatusCode > 399 && r.StatusCode < 500 } // IsServerError determines if a response was a server error (5XX status) func IsServerError(r *http.Response) bool { return r.StatusCode > 499 && r.StatusCode < 600 } // IsError determines if the response was a client or server error (4XX or 5XX status) func IsError(r *http.Response) bool { return r.StatusCode > 399 && r.StatusCode < 600 }
<filename>ProjectEuler.Problem.024.py from ProjectEulerCommons.Base import * Answer( int(''.join(map(str, first_true_value(permutations(list(range(10))), pred = lambda enum: enum[0] == 1000000 - 1)))) ) """ ------------------------------------------------ ProjectEuler.Problem.024.py The Answer is: 2783915460 Time Elasped: 1.132969617843628sec ------------------------------------------------ """
/** * Cancel all executors may be executing. * * @return {@code false} if one of {@link #mFutures} if the task could not be cancelled, * typically because it has already completed normally; */ public boolean cancelAll() { for (final Future<R> future : mFutures) { if (future.isDone() || future.isCancelled()) continue; /* not allow task to be completed. */ if (!future.cancel(true)) return false; } return true; }
/** * Initializes all user-created {@link Device}s * @return True if the list has changed. */ private boolean initUserDevices() { synchronized (mLock) { if (mUserDevices != null) { return false; } mUserDevices = HashBasedTable.create(); File userDevicesFile = null; try { userDevicesFile = new File( mAndroidFolder, SdkConstants.FN_DEVICES_XML); if (mFop.exists(userDevicesFile)) { mUserDevices.putAll(DeviceParser.parse(userDevicesFile)); return true; } } catch (SAXException e) { if (userDevicesFile != null) { String base = userDevicesFile.getAbsoluteFile() + ".old"; File renamedConfig = new File(base); int i = 0; while (mFop.exists(renamedConfig)) { renamedConfig = new File(base + '.' + (i++)); } mLog.error(e, "Error parsing %1$s, backing up to %2$s", userDevicesFile.getAbsolutePath(), renamedConfig.getAbsolutePath()); userDevicesFile.renameTo(renamedConfig); } } catch (ParserConfigurationException e) { mLog.error(e, "Error parsing %1$s", userDevicesFile == null ? "(null)" : userDevicesFile.getAbsolutePath()); } catch (IOException e) { mLog.error(e, "Error parsing %1$s", userDevicesFile == null ? "(null)" : userDevicesFile.getAbsolutePath()); } } return false; }
If you would like to see more articles like this please support our coverage of the space program by becoming a Spaceflight Now Member . If everyone who enjoys our website helps fund it, we can expand and improve our coverage further. Technicians planned to attach two Boeing-built communications satellites to a SpaceX Falcon 9 rocket Monday, a day after launch controllers fueled the booster and fired its nine Merlin first stage engines in a key preflight test. The two-stage Falcon 9 rocket completed the static fire test Sunday at Cape Canaveral’s Complex 40 launch pad, throttling up its main engines to 1.5 million pounds of thrust for a few seconds to verify the vehicle’s readiness for flight. SpaceX conducted the static fire test without the mission’s two satellite passengers on-board. Workers planned to return the rocket to its hangar and mate the two spacecraft, owned by Asia Broadcast Satellite of Hong Kong and Paris-based Eutelsat, to the rocket as soon as Monday. The satellites were encapsulated inside the Falcon 9’s clamshell-like nose cone earlier in the launch campaign. After final integrated tests, the Falcon 9 rocket will be rolled out of the hangar on the southern perimeter of the Complex 40 launch pad to the liftoff position, then erect it vertical for Wednesday’s countdown. Liftoff on Wednesday is set for 10:29 a.m. EDT (1429 GMT) at the opening of a 44-minute launch window. The official forecast issued by the U.S. Air Force’s 45th Weather Squadron predicts an 80 percent chance of favorable weather, with partly cloudy skies and warm temperatures. The only chance of a violation of the Falcon 9’s launch criteria is with cumulus clouds, according to the forecast released Monday. “On launch day, the sea breeze is expected to set up after the end of the morning launch window, triggering convection in the afternoon,” Air Force meteorologists wrote. “During the launch window, light southwest winds will allow for cumulus development over the spaceport due to strong surface heating. Therefore, the primary weather threat will be cumulus clouds.” Like all Falcon 9 flights so far this year, SpaceX will attempt to recover the rocket’s first stage booster on a barge positioned downrange from the launch site. For this mission, the drone ship will hold position about 420 miles (680 kilometers) east of Cape Canaveral in the Atlantic Ocean. SpaceX has retrieved the first stage booster after the last three Falcon 9 launches on April 8, May 6 and May 27. Combined with a launcher recovered with a touchdown on land in December, SpaceX now has four previously-flown boosters in its inventory. One of the rockets — the vehicle recovered in December — is destined to go on vertical display outside the company’s headquarters in Hawthorne, California. SpaceX plans to launch the booster that landed on the drone ship in April on another flight in September or October, according to Elon Musk, the firm’s founder and chief executive. A rocket that descended back to a successful landing May 6 will go through a series of extensive ground tests to formally qualify other identical vehicles for multiple missions. The nine Merlin 1D main engines on each rocket, a subset of which re-ignite on descent to slow for landing, are already qualified for multiple flights, according to Gwynne Shotwell, SpaceX’s president and chief operating officer. Plans for the booster recovered after the last Falcon 9 launch May 27, which lofted the Thaicom 8 communications satellite, have not been announced. Wednesday’s mission will mark the 26th flight of a Falcon 9 rocket since it debuted in 2010, and sixth Falcon 9 launch this year. After dropping its first stage for a propulsive landing attempt in the Atlantic, the Falcon 9’s single-engine second stage will propel itself into orbit about nine minutes after liftoff. A second firing of the vacuum-rated Merlin upper stage engine will lob the two satellites aboard the rocket into a highly elliptical “supersynchronous” transfer orbit reaching tens of thousands of miles above Earth. Separation of the two satellites, which are stacked atop one another inside the Falcon 9 fairing, is expected about a half-hour after liftoff Wednesday. Based on the Boeing 702SP satellite bus, the payloads do not carry any chemical rocket propellant for maneuvers in space. Instead, the satellites will drive themselves to operating posts in geostationary orbit over the equator using plasma thrusters, a more efficient propulsion system than conventional rocket engines. Using ionized xenon gas, the electric thrusters will fire almost continuously to reshape the satellites’ orbits, but their low thrust means it will take at least six months to reach their final positions more than 22,000 miles (35,786 kilometers) over the equator. Communications platforms burning chemical liquid fuel can do the job in a few weeks. But the tradeoff made sense for Eutelsat and Asia Broadcast Satellite. Without heavy propellant tanks, each satellite weighs less than 5,000 pounds (between 2,000 and 2,200 kilograms), allowing the spacecraft to share a launch opportunity on a Falcon 9 rocket, which can normally only carry one large telecom payload into orbit at a time. Accounting for the Falcon 9’s relatively low prices, each customer paid about $30 million to launch each satellite. Mexican satellite operator Satmex, which was acquired by Eutelsat in 2014, and Asia Broadcast Satellite ordered four “all-electric” satellites from Boeing in 2012. The first pair launched on a Falcon 9 rocket in March 2015, marking the first such all-electric communications spacecraft ever flown. The second duo going up Wednesday — ABS 2A and Eutelsat 117 West B — will enter service around the end of the year or in early 2017. ABS 2A hosts 48 Ku-band transponders to serve customers in South Asia, Southeast Asia, Russia, sub-Saharan Africa, and the Middle East and North Africa, according to ABS. Parked in geostationary orbit at 75 degrees east longitude, the craft will broadcast direct-to-home television services, and support mobile communications clients. It will be located near the ABS 2 satellite launched aboard an Ariane 5 rocket in February 2014. Eutelsat 117 West B will steer into an orbital slot at 116.8 degrees west longitude, beaming video programming across Latin America. Its coverage range will extend from the southern United States to Patagonia, with an emphasis on large markets in Mexico, South America and the Caribbean. The commercial satellite also carries a navigation payload supplied by the U.S. Federal Aviation Administration to provide more accurate GPS position information to private and commercial aircraft transiting the United States. The navigation data from the Wide Area Augmentation System, or WAAS, beacon supports precision landing approaches in inclement weather, allows air traffic controllers to assign more direct flight paths, and reduces separation distances required between aircraft in flight. The FAA’s existing GPS augmentation system is comprised of payloads aboard aging satellites. Email the author. Follow Stephen Clark on Twitter: @StephenClark1.
/** * tlibs test file * @author Tobias Weber <[email protected]> * @license GPLv2 or GPLv3 */ // g++ -std=c++11 -o rand rand.cpp ../math/rand.cpp ../log/log.cpp -lm -lpthread #include "../math/rand.h" #include "../math/stat.h" #include "../string/string.h" #include <iostream> #include <iomanip> #include <thread> #include <chrono> #include <boost/type_traits/function_traits.hpp> using t_real = double; using t_seed = typename boost::function_traits<decltype(tl::init_rand_seed)>::arg1_type; std::mutex mtx; void rnd_fkt(t_seed iSeed) { mtx.lock(); tl::init_rand_seed(iSeed); std::cout << "seed: " << iSeed << std::endl; std::cout << tl::rand01<float>() << " " << tl::rand01<float>() << std::endl; std::cout << tl::rand_minmax<float>(0., 10.) << std::endl; std::cout << tl::rand_minmax<int>(0, 10) << std::endl; std::cout << tl::rand_binomial<int, float>(100, 0.5) << std::endl; auto vecRnd = tl::rand_norm_nd<>({1., 2., 3.}, {0.25, 0.5, 0.75}); for(auto d : vecRnd) std::cout << d << ", "; std::cout << std::endl; vecRnd = tl::rand_norm_nd<>({1., 2., 3.}, {0.25, 0.5, 0.75}); for(auto d : vecRnd) std::cout << d << ", "; std::cout << std::endl; auto vecRnd2 = tl::rand_exp_nd<>({1., 2., 3.}); for(auto d : vecRnd2) std::cout << d << ", "; std::cout << std::endl; mtx.unlock(); } std::tuple<std::vector<t_real>, std::vector<t_real>, std::vector<t_real>, std::vector<t_real>, std::vector<t_real>> rnd2(std::size_t N, t_real p=0.5, t_real l1=1., t_real l2=-1.) { t_real x{0}; std::vector<t_real> ns, xs, xs_mean, xs_std, xs2_mean; ns.reserve(N); xs.reserve(N); xs_mean.reserve(N); xs_std.reserve(N); xs2_mean.reserve(N); for(std::size_t n=0; n<N; ++n) { t_real r01 = tl::rand01<t_real>(); x += (r01 < p ? l1 : l2); ns.push_back(n); xs.push_back(x); xs_mean.push_back(tl::mean_value(xs)); // <x> xs2_mean.push_back(tl::mean_square_value(xs)); // <x^2> //xs_std.push_back(tl::std_dev(xs, 0)); // sqrt(<x^2> - <x>^2) xs_std.push_back(std::sqrt(*xs2_mean.rbegin() - *xs_mean.rbegin()* *xs_mean.rbegin())); // sqrt(<x^2> - <x>^2) } return std::make_tuple(ns, xs, xs_mean, xs_std, xs2_mean); } std::tuple<std::vector<t_real>, std::vector<t_real>> rnd3(std::size_t N, t_real inc=1., t_real max=1.) { std::vector<t_real> Ns; std::vector<t_real> Ks; std::vector<t_real> means; Ks.reserve(N); means.reserve(N); for(std::size_t n=0; n<N; ++n) { t_real k{0}; t_real x{0}; while(1) { x += tl::rand01<t_real>(); k += inc; if(x > max) break; } Ns.push_back(n); Ks.push_back(k); means.push_back(tl::mean_value(Ks)); } return std::make_tuple(Ns, means); } int main(int argc, char** argv) { // test 1 { std::cout << "--------------------------------------------------------------------------------" << std::endl; std::cout << "Test 1" << std::endl; std::cout << "--------------------------------------------------------------------------------" << std::endl; std::thread th1([]{ rnd_fkt(1); }); std::thread th2([]{ rnd_fkt(2); }); th1.join(); th2.join(); std::cout << "--------------------------------------------------------------------------------\n" << std::endl; } std::size_t N = 64; if(argc > 1) N = tl::str_to_var<std::size_t>(std::string{argv[1]}); // init seed to seconds since epoch tl::init_rand_seed(std::chrono::duration_cast<std::chrono::duration<t_seed, std::ratio<1, 1>>>( std::chrono::system_clock::now().time_since_epoch()).count()); // test 2 { std::cout << "--------------------------------------------------------------------------------" << std::endl; std::cout << "Test 2" << std::endl; std::cout << "--------------------------------------------------------------------------------" << std::endl; std::size_t skip = 0; if(argc > 2) skip = tl::str_to_var<std::size_t>(std::string{argv[2]}); std::vector<t_real> ns, xs, xs_mean, xs_std, xs2_mean; std::tie(ns, xs, xs_mean, xs_std, xs2_mean) = rnd2(N, 0.5, 1., -1.); std::cout.precision(5); std::cout << "# " << std::setw(10) << "n" << " " << std::setw(10) << "x" << " " << std::setw(10) << "<x>" << " " << std::setw(10) << "std_dev" << " " << std::setw(10) << "<x^2>" << "\n"; for(std::size_t i=0; i<ns.size(); ++i) { if(skip && i%skip!=0) continue; std::cout << " " << std::setw(10) << ns[i] << " " << std::setw(10) << xs[i] << " " << std::setw(10) << xs_mean[i] << " " << std::setw(10) << xs_std[i] << " " << std::setw(10) << xs2_mean[i] << "\n"; } std::cout << "--------------------------------------------------------------------------------\n" << std::endl; } // test 3 { std::cout << "--------------------------------------------------------------------------------" << std::endl; std::cout << "Test 3" << std::endl; std::cout << "--------------------------------------------------------------------------------" << std::endl; std::vector<t_real> ns, means; std::tie(ns, means) = rnd3(N, 1., 1.); std::cout.precision(5); std::cout << "# " << std::setw(10) << "n" << " " << std::setw(10) << "<k>" << "\n"; for(std::size_t i=0; i<ns.size(); ++i) { std::cout << " " << std::setw(10) << ns[i] << " " << std::setw(10) << means[i] << "\n"; } std::cout << "--------------------------------------------------------------------------------\n" << std::endl; } return 0; }
// PutCardIntoField puts a card into the playfield for a player func (game *Game) PutCardIntoField(card Card, player *Player) (scores []Score, err error) { field, scores, err := game.Field.Play(card) if err != nil { return } game.Field = field player.AddScore(scores) game.CheckForWinner(player) player.PlayingHand = player.PlayingHand.RemoveCard(card) player.Discard.Played = append(player.Discard.Played, card) return }
def remove_loop(self, item: Union[str, List[str], Tuple[str], 'loop_mod.Loop', List['loop_mod.Loop'], Tuple['loop_mod.Loop']]) -> None: parsed_list: list if isinstance(item, tuple): parsed_list = list(item) elif isinstance(item, list): parsed_list = item elif isinstance(item, (str, loop_mod.Loop)): parsed_list = [item] else: raise ValueError('The item you provided was not one or more loop objects or loop categories (strings). ' f'Item type: {type(item)}') loop_names = self.loop_dict loops_to_remove = [] for loop in parsed_list: if isinstance(loop, str): formatted_loop = loop.lower() if not formatted_loop.startswith('_'): formatted_loop = f"_{loop}" if formatted_loop not in loop_names: raise ValueError('At least one loop specified to remove was not found in this saveframe. First ' f'missing loop: {loop}') loops_to_remove.append(loop_names[formatted_loop]) elif isinstance(loop, loop_mod.Loop): if loop not in self._loops: raise ValueError('At least one loop specified to remove was not found in this saveframe. First ' f'missing loop: {loop}') loops_to_remove.append(loop) else: raise ValueError('One of the items you provided was not a loop object or loop category (string). ' f'Item: {repr(loop)}') self._loops = [_ for _ in self._loops if _ not in loops_to_remove]
#include<stdio.h> int main() { int i,j,n,k,p; __int64 t=0,t1=0,a[100001]={1}; scanf("%d %d",&n,&p); scanf("%I64d",&a[1]); for(i=2;i<=n;i++) { scanf("%I64d",&a[i]); while((a[i]-a[a[0]])>p)a[0]++; t+=((i-a[0])*(i-a[0]-1))/2; } printf("%I64d",t); return 0; }
/** * @author Mark Hayes */ public class JoinTest extends TestBase implements TransactionWorker { private static final String MATCH_DATA = "d4"; // matches both keys = "yes" private static final String MATCH_KEY = "k4"; // matches both keys = "yes" private static final String[] VALUES = {"yes", "yes"}; private Environment env; private TransactionRunner runner; private StoredClassCatalog catalog; private TupleSerialFactory factory; private Database store; private SecondaryDatabase index1; private SecondaryDatabase index2; private StoredMap storeMap; private StoredMap indexMap1; private StoredMap indexMap2; public JoinTest() { customName = "JoinTest"; } @Before public void setUp() throws Exception { SharedTestUtils.printTestName(customName); env = TestEnv.TXN.open(customName); runner = new TransactionRunner(env); createDatabase(); } @After public void tearDown() { try { if (index1 != null) { index1.close(); } if (index2 != null) { index2.close(); } if (store != null) { store.close(); } if (catalog != null) { catalog.close(); } if (env != null) { env.close(); } } catch (Exception e) { System.out.println("Ignored exception during tearDown: " + e); } finally { /* Ensure that GC can cleanup. */ index1 = null; index2 = null; store = null; catalog = null; env = null; runner = null; factory = null; storeMap = null; indexMap1 = null; indexMap2 = null; } } @Test public void runTest() throws Exception { runner.run(this); } public void doWork() { createViews(); writeAndRead(); } private void createDatabase() throws Exception { catalog = new StoredClassCatalog(openDb("catalog.db")); factory = new TupleSerialFactory(catalog); assertSame(catalog, factory.getCatalog()); store = openDb("store.db"); index1 = openSecondaryDb(store, "index1.db", "1"); index2 = openSecondaryDb(store, "index2.db", "2"); } private Database openDb(String file) throws Exception { DatabaseConfig config = new DatabaseConfig(); DbCompat.setTypeBtree(config); config.setTransactional(true); config.setAllowCreate(true); return DbCompat.testOpenDatabase(env, null, file, null, config); } private SecondaryDatabase openSecondaryDb(Database primary, String file, String keyName) throws Exception { SecondaryConfig secConfig = new SecondaryConfig(); DbCompat.setTypeBtree(secConfig); secConfig.setTransactional(true); secConfig.setAllowCreate(true); DbCompat.setSortedDuplicates(secConfig, true); secConfig.setKeyCreator(factory.getKeyCreator(MarshalledObject.class, keyName)); return DbCompat.testOpenSecondaryDatabase (env, null, file, null, primary, secConfig); } private void createViews() { storeMap = factory.newMap(store, String.class, MarshalledObject.class, true); indexMap1 = factory.newMap(index1, String.class, MarshalledObject.class, true); indexMap2 = factory.newMap(index2, String.class, MarshalledObject.class, true); } private void writeAndRead() { // write records: Data, PrimaryKey, IndexKey1, IndexKey2 assertNull(storeMap.put(null, new MarshalledObject("d1", "k1", "no", "yes"))); assertNull(storeMap.put(null, new MarshalledObject("d2", "k2", "no", "no"))); assertNull(storeMap.put(null, new MarshalledObject("d3", "k3", "no", "yes"))); assertNull(storeMap.put(null, new MarshalledObject("d4", "k4", "yes", "yes"))); assertNull(storeMap.put(null, new MarshalledObject("d5", "k5", "yes", "no"))); Object o; Map.Entry e; // join values with index maps o = doJoin((StoredCollection) storeMap.values()); assertEquals(MATCH_DATA, ((MarshalledObject) o).getData()); // join keySet with index maps o = doJoin((StoredCollection) storeMap.keySet()); assertEquals(MATCH_KEY, o); // join entrySet with index maps o = doJoin((StoredCollection) storeMap.entrySet()); e = (Map.Entry) o; assertEquals(MATCH_KEY, e.getKey()); assertEquals(MATCH_DATA, ((MarshalledObject) e.getValue()).getData()); } private Object doJoin(StoredCollection coll) { StoredContainer[] indices = { indexMap1, indexMap2 }; StoredIterator i = coll.join(indices, VALUES, null); try { assertTrue(i.hasNext()); Object result = i.next(); assertNotNull(result); assertFalse(i.hasNext()); return result; } finally { i.close(); } } }
/** A base pair which is classified and quantified with numerical parameters. */ @Value.Immutable public abstract class QuantifiedBasePair implements ClassifiedBasePair { @Value.Parameter(order = 1) public abstract BasePair basePair(); @Value.Default public InteractionType interactionType() { return ClassifiedBasePair.super.interactionType(); } @Value.Default public Saenger saenger() { return ClassifiedBasePair.super.saenger(); } @Value.Default public LeontisWesthof leontisWesthof() { return ClassifiedBasePair.super.leontisWesthof(); } @Value.Default public BPh bph() { return ClassifiedBasePair.super.bph(); } @Value.Default public BR br() { return ClassifiedBasePair.super.br(); } @Value.Default @Value.Auxiliary public boolean isRepresented() { return ClassifiedBasePair.super.isRepresented(); } @Override public ClassifiedBasePair invert() { return ImmutableQuantifiedBasePair.copyOf(this) .withBasePair(basePair().invert()) .withInteractionType(interactionType().invert()) .withLeontisWesthof(leontisWesthof().invert()); } /** @return The value of shear parameter. */ @Value.Parameter(order = 2) @Value.Auxiliary public abstract double shear(); /** @return The value of stretch parameter. */ @Value.Parameter(order = 3) @Value.Auxiliary public abstract double stretch(); /** @return The value of stagger parameter. */ @Value.Parameter(order = 4) @Value.Auxiliary public abstract double stagger(); /** @return The value of buckle parameter. */ @Value.Parameter(order = 5) @Value.Auxiliary public abstract double buckle(); /** @return The value of propeller parameter. */ @Value.Parameter(order = 6) @Value.Auxiliary public abstract double propeller(); /** @return The value of opening parameter. */ @Value.Parameter(order = 7) @Value.Auxiliary public abstract double opening(); }
type NanoSecond = u64; type Inch = u64; #[allow(non_camel_case_types)] type u64_t = u64; fn main() { // Aliases do NOT provide extra type safety! // Aliases are NOT new types. let nanoseconds: NanoSecond = 5 as u64_t; let inches: Inch = 2 as u64_t; print!( "{}ns + {}in = {} ??", nanoseconds, inches, nanoseconds + inches ); // Aliases only reduce boilerplate. }