content
stringlengths
10
4.9M
// CreateComponent create an has component from a given name, namespace, application, devfile and a container image func (h *SuiteController) CreateComponentFromDevfile(applicationName, componentName, namespace, gitSourceURL, devfile, containerImageSource, outputContainerImage, secret string) (*appservice.Component, error) { var containerImage string if outputContainerImage != "" { containerImage = outputContainerImage } else { containerImage = containerImageSource } component := &appservice.Component{ ObjectMeta: metav1.ObjectMeta{ Name: componentName, Namespace: namespace, }, Spec: appservice.ComponentSpec{ ComponentName: componentName, Application: applicationName, Source: appservice.ComponentSource{ ComponentSourceUnion: appservice.ComponentSourceUnion{ GitSource: &appservice.GitSource{ URL: gitSourceURL, DevfileURL: devfile, }, }, }, Secret: secret, ContainerImage: containerImage, Replicas: 1, TargetPort: 8080, Route: "", }, } err := h.KubeRest().Create(context.TODO(), component) if err != nil { return nil, err } return component, nil }
def delete(key, backend=None): backend = backend or 'default' cache = get_cache(backend) key = _generate_key(key) val = cache.delete(key) logger.debug("Cache DELETE: %s" % key) return val
/// STEP 1: use counting sort to sort LMS chars fn sort_lms_chars(&mut self, n1: usize) { let mut lo_char = T::one(); // sentinel dealt with as a special case let mut hi_char; let mut output_curr_head = self.n - n1 + 1; // plus one to skip sentinel // println!("sigma: {}|half_n: {}", self.sigma, self.n / 2 + 1); let mut half_n = T::from(self.n / 2).unwrap() + T::one(); // * since the sentinel is dealt with as a special case at the end, we can use one more slot if self.n % 2 == 1 { // * when n is odd, we can use one more slot. // * for example, when n = 9, there will be a maximum of 4 LMS chars, // * one of which is sentinel, so there are 3 LMS chars need to be sorted // * by counting sort, leaving 6 slots for counting array half_n = half_n + T::one(); } loop { hi_char = lo_char + half_n; let range = (hi_char - lo_char).to_usize().unwrap(); let mut s_i_is_s = false; // `s[n - 2]` must be L, because it is greater than the sentinel at `s[n - 1]` let mut s_im1_is_s; let mut s_i = self.s[self.n - 2]; let mut s_im1; for i_minus_1 in (0..self.n - 2).rev() { s_im1 = self.s[i_minus_1]; s_im1_is_s = s_im1 < s_i || (s_im1 == s_i && s_i_is_s); if !s_im1_is_s && s_i_is_s { if lo_char <= s_i && s_i < hi_char { let idx_in_counting_arr = (s_i - lo_char).to_usize().unwrap(); self.sa[idx_in_counting_arr] += 1; } } s_i = s_im1; s_i_is_s = s_im1_is_s; } // accumulation let mut prev = self.sa[0]; let mut curr; for i in 1..range { curr = &mut self.sa[i]; *curr += prev; prev = *curr; } // `prev` is the total number of LMS chars in this interval, and // will be added to `output_curr_head` after processing this interval // Scan S again to place LMS chars s_i_is_s = false; // `s[n - 2]` must be L, because it is greater than the sentinel at `s[n - 1]` s_i = self.s[self.n - 2]; // `i_minus_1` ranges from `n-3` to `0` inclusive, meaning `i` ranges from `n-2` to `1` inclusive. // `s[0]` must not be an LMS character by definition so it is fine that `i` does not include `0`. // `s[n-1]` is the sentinel character which is dealt with as a special case later let mut i = self.n - 2; for i_minus_1 in (0..self.n - 2).rev() { s_im1 = self.s[i_minus_1]; s_im1_is_s = s_im1 < s_i || (s_im1 == s_i && s_i_is_s); if !s_im1_is_s && s_i_is_s { // `s[i]` is LMS if lo_char <= s_i && s_i < hi_char { // println!("{} {} {}", lo_char, s_i, hi_char); // if in current interval let idx_in_counting_arr = (s_i - lo_char).to_usize().unwrap(); let idx_in_output_without_offset = &mut self.sa[idx_in_counting_arr]; *idx_in_output_without_offset -= 1; self.sa[output_curr_head + *idx_in_output_without_offset] = i; } } i = i_minus_1; s_i = s_im1; s_i_is_s = s_im1_is_s; } self.sa[0..range].fill(0); // clear the counting array if hi_char > self.sigma { break; } lo_char = hi_char; output_curr_head += prev; // recall that prev is the number of LMS chars in this interval } self.sa[self.n - n1] = self.n - 1; // sentinel as a special case }
<reponame>wd40bomber7/entitylagmeasure package com.wd.elm; import me.ryanhamshire.GriefPrevention.Claim; import me.ryanhamshire.GriefPrevention.GriefPrevention; import org.bukkit.Location; public class GriefPreventionConnector { public static String LookupPlayerFromLocation(Location loc) { try { Claim claim = GriefPrevention.instance.dataStore.getClaimAt(loc, true, null); return claim.getOwnerName(); } catch (Exception ex) { // No grief prevention return ""; } } }
<gh_stars>1-10 package osgl.func; /*- * #%L * OSGL Core * %% * Copyright (C) 2017 OSGL (Open Source General Library) * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import org.junit.Before; import org.junit.Test; import org.junit.experimental.runners.Enclosed; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; import osgl.exception.E; import osgl.ut.TestBase; import java.util.ArrayList; import java.util.List; import java.util.function.BiConsumer; @RunWith(Enclosed.class) public class Proc2Test { @RunWith(MockitoJUnitRunner.class) public static class Proc2TestBase extends TestBase { protected List<String> strings1 = new ArrayList<>(); protected List<String> strings2 = new ArrayList<>(); protected Proc2<String, String> addToStrings = (s1, s2) -> { strings1.add(s1); strings2.add(s2); }; @Mock private Proc2<Object, Object> mockProc2; @Before public void prepare() { strings1.clear(); strings2.clear(); } @Test public void itShallCallRunIfInvokeAccept() { Object p1 = new Object(); Object p2 = new Object(); Mockito.doCallRealMethod().when(mockProc2).accept(p1, p2); mockProc2.accept(p1, p2); Mockito.verify(mockProc2, Mockito.times(1)).run(p1, p2); } } public static class CompositionTest extends Proc2TestBase { private BiConsumer<String, String> after = (s1, s2) -> { strings1.add("'" + s1 + "'"); strings2.add("'" + s2 + "'"); }; private BiConsumer<String, String> before = after; @Test public void itShallRunAfterProcedureAfterThisProcedure() { addToStrings.andThen(after).run("foo", "bar"); eq("foo", strings1.get(0)); eq("'foo'", strings1.get(1)); eq("bar", strings2.get(0)); eq("'bar'", strings2.get(1)); } @Test public void itShallRunBeforeProcedureBeforeThisProcedure() { addToStrings.nowThat(before).run("foo", "bar"); eq("foo", strings1.get(1)); eq("'foo'", strings1.get(0)); eq("bar", strings2.get(1)); eq("'bar'", strings2.get(0)); } } public static class FallbackTest extends Proc2TestBase { Proc2<String, String> failCase = (s1, s2) -> {throw E.unexpected();}; BiConsumer<String, String> fallback = (s1, s2) -> { strings1.add("**" + s1 + "**"); strings2.add("**" + s2 + "**"); }; @Test public void itShallNotCallfallbackIfNoException() { addToStrings.runOrElse("foo", "bar", fallback); yes(strings1.contains("foo")); no(strings1.contains("**foo**")); yes(strings2.contains("bar")); no(strings2.contains("**bar**")); strings1.clear(); strings2.clear(); addToStrings.orElse(fallback).run("foo", "bar"); yes(strings1.contains("foo")); no(strings1.contains("**foo**")); yes(strings2.contains("bar")); no(strings2.contains("**bar**")); } @Test public void itShallCallFallbackIfExceptionEncountered() { failCase.runOrElse("foo", "bar", fallback); no(strings1.contains("foo")); yes(strings1.contains("**foo**")); no(strings2.contains("bar")); yes(strings2.contains("**bar**")); strings1.clear(); strings2.clear(); failCase.orElse(fallback).run("foo", "bar"); no(strings1.contains("foo")); yes(strings1.contains("**foo**")); no(strings2.contains("bar")); yes(strings2.contains("**bar**")); } } public static class ConversionTest extends Proc2TestBase { @Test @SuppressWarnings("ReturnValueIgnored") public void testToFunction() { addToStrings.toFunction().apply("foo", "bar"); yes(strings1.contains("foo")); yes(strings2.contains("bar")); } @Test public void testCurrying() { addToStrings.curry("bar").run("foo"); yes(strings1.contains("foo")); yes(strings2.contains("bar")); } } public static class FactoryTest extends Proc2TestBase { @Test public void testOfConsumer() { BiConsumer<CharSequence, CharSequence> consumer = (cs1, cs2) -> { strings1.add(cs1.toString()); strings2.add(cs2.toString()); }; Proc2.of(consumer).run("foo", "bar"); yes(strings1.contains("foo")); yes(strings2.contains("bar")); } } }
/** * Created by SteveYang on 17/1/22. */ public class ChatMessage extends TinyMessage implements Serializable{ public static final String MESSAGE_KEY = "msg"; private String message; private long time; public ChatMessage(String message, long time){ this.message = message; this.time = time; } public String getMessage(){ return this.message; } public long getTime(){ return this.time; } public String toJSONString(){ JSONObject json = new JSONObject(); try{ json.put(MESSAGE_KEY,this.message); json.put(CLIENT_TIME_KEY,this.time); }catch (Exception ignore){} return json.toString(); } public String getHumanReadableTime(){ return Util.getHumanReadableTime(this.time); } }
use super::super::mp; use super::super::{Long, ULong}; use super::integer::Integer; impl From<ULong> for Integer { fn from(from: ULong) -> Integer { let mut x: mp::__mpz_struct = unsafe { std::mem::MaybeUninit::uninit().assume_init() }; unsafe { mp::__gmpz_init_set_ui(&mut x, from); } Integer { data: x } } } impl From<Long> for Integer { fn from(from: Long) -> Integer { let mut x: mp::__mpz_struct = unsafe { std::mem::MaybeUninit::uninit().assume_init() }; unsafe { mp::__gmpz_init_set_si(&mut x, from); } Integer { data: x } } } impl From<f64> for Integer { fn from(from: f64) -> Integer { let mut x: mp::__mpz_struct = unsafe { std::mem::MaybeUninit::uninit().assume_init() }; unsafe { mp::__gmpz_init_set_d(&mut x, from); } Integer { data: x } } } impl From<&Integer> for ULong { fn from(from: &Integer) -> ULong { unsafe { mp::__gmpz_get_ui(&from.data) } } } impl From<&Integer> for Long { fn from(from: &Integer) -> Long { unsafe { mp::__gmpz_get_si(&from.data) } } } impl From<&Integer> for f64 { fn from(from: &Integer) -> f64 { unsafe { mp::__gmpz_get_d(&from.data) } } }
Flufenamic acid blocks depolarizing afterpotentials and phasic firing in rat supraoptic neurones Depolarizing afterpotentials (DAPs) that follow action potentials in magnocellular neurosecretory cells (MNCs) are thought to underlie the generation of phasic firing, a pattern that optimizes vasopressin release from the neurohypophysis. Previous work has suggested that the DAP may result from the Ca2+‐dependent reduction of a resting K+ conductance. Here we examined the effects of flufenamic acid (FFA), a blocker of Ca2+‐dependent non‐selective cation (CAN) channels, on DAPs and phasic firing using intracellular recordings from supraoptic MNCs in superfused explants of rat hypothalamus. Application of FFA, but not solvent (0.1 % DMSO), reversibly inhibited (IC50+ 13.8 μm; R+ 0.97) DAPs and phasic firing with a similar time course, but had no significant effects (P > 0.05) on membrane potential, spike threshold and input resistance, nor on the frequency and amplitude of spontaneous synaptic potentials. Moreover, FFA did not affect (P > 0.05) the amplitude, duration, undershoot, or frequency‐dependent broadening of action potentials elicited during the spike trains used to evoke DAPs. These findings suggest that FFA inhibits the DAP by directly blocking the channels responsible for its production, rather than by interfering with Ca2+ influx. They also support a role for DAPs in the generation of phasic firing in MNCs. Finally, the absence of a depolarization and increased membrane resistance upon application of FFA suggests that the DAP in MNCs may not be due to the inhibition of resting K+ current, but to the activation of CAN channels.
#include <bits/stdc++.h> using namespace std; int main(){ int n,i; while (scanf("%d",&n)==1){ if (n<=5) printf("-1\n"); else { for (i=2;i<=4;++i) printf("1 %d\n",i); for (i=5;i<=n;++i) printf("2 %d\n",i); } for (i=1;i<n;++i) printf("%d %d\n",i,i+1); } return 0; }
class DiscordMessageBase: """The base class for representing Discord messages :param str message: (Optional) The Discord message; default is None :param str user: (Optional) The sender of the message; defualt is None :param int cmd_type: (Optional) The slash command type used to send the message; default is CommandType.NONE """ def __init__( self, message: Optional[str] = None, user: Optional[str] = None, cmd_type: int = CommandType.NONE, ) -> None: self._message = message self._user = user self._cmd_type = cmd_type def __repr__(self) -> str: return "{0}: {1}".format(self.user, self.message) def __str__(self) -> str: return str(self._message) def __eq__(self, other: Any) -> bool: if other is None or not isinstance( other, DiscordMessageBase ): # TODO: can probably just use isinstance() return False if other._message == self._message and other._user == self._user: return True return False def __add__(self, other: str) -> "DiscordMessageBase": if isinstance(other, DiscordMessageBase): self._message += other._message return self if isinstance(other, str): self._message += other return self raise TypeError("Can only add strings or other Discord messages") def __contains__(self, value: str) -> bool: if isinstance(value, str): return value in self._message raise TypeError("Can only check messages for strings") @property def user(self) -> Optional[str]: """The user that sent the message, including the number after username""" return self._user @property def message(self) -> Optional[str]: """The message that was sent""" return self._message @property def username(self) -> Optional[str]: """The username of the person that sent the message""" return self._user[:-5] if self._user is not None else None @property def cmd_type(self) -> Optional[int]: """The slash command type used to send the message""" return self._cmd_type @cmd_type.setter def cmd_type(self, command: int) -> None: self._cmd_type = command def to_json(self) -> Dict[str, Any]: """Converts the message object into an equivalent dict, must be implemented in subclasses of DiscordMessageBase""" raise NotImplementedError("Must be defined in subclass") def from_json(self, payload: Dict[str, Any]) -> None: """Converts a dict into the equivalent DiscordMessageBase object, must be implemented in subclasses of DiscordMessageBase """ raise NotImplementedError("Must be defined in subclass")
//////////////////////////////////////////////////////////////////////// // // Description: // Returns TRUE if action is an instance of a action of the given type // or an instance of a subclass of it. // // Use: public SbBool SoAction::isOfType(SoType type) const { return getTypeId().isDerivedFrom(type); }
package com.telenav.mesakit.plugins.josm.graph.view.tabs.query; import com.telenav.kivakit.kernel.language.progress.ProgressReporter; import com.telenav.kivakit.kernel.language.progress.reporters.Progress; import com.telenav.kivakit.kernel.language.strings.Strings; import com.telenav.kivakit.kernel.language.values.count.Count; import com.telenav.kivakit.kernel.language.values.count.Maximum; import com.telenav.kivakit.kernel.language.values.mutable.MutableValue; import com.telenav.kivakit.ui.desktop.component.icon.search.MagnifyingGlass; import com.telenav.kivakit.ui.desktop.component.panel.stack.CardPanel; import com.telenav.kivakit.ui.desktop.component.progress.ProgressPanel; import com.telenav.kivakit.ui.desktop.layout.Borders; import com.telenav.kivakit.ui.desktop.layout.HorizontalBox; import com.telenav.kivakit.ui.desktop.layout.Size; import com.telenav.kivakit.ui.desktop.layout.Spacing; import com.telenav.kivakit.ui.desktop.layout.VerticalBoxLayout; import com.telenav.kivakit.ui.desktop.theme.KivaKitColors; import com.telenav.kivakit.ui.desktop.theme.KivaKitTheme; import com.telenav.mesakit.graph.collections.EdgeSet; import com.telenav.mesakit.graph.query.GraphQuery; import com.telenav.mesakit.map.geography.shape.rectangle.Rectangle; import com.telenav.mesakit.plugins.josm.graph.view.GraphLayer; import com.telenav.mesakit.plugins.josm.graph.view.GraphPanel; import com.telenav.mesakit.plugins.josm.graph.view.tabs.search.UserFeedback; import javax.swing.JPanel; import javax.swing.JTextField; import javax.swing.SwingUtilities; import javax.swing.UIManager; import java.awt.event.ActionListener; /** * @author jonathanl (shibo) */ public class QueryPanel extends JPanel { private enum Mode { QUERY_TOOLS, PROGRESS_BAR } private JTextField searchField; private MatchesPanel matches; private final GraphPanel graphPanel; private CardPanel cardPanel; private final ProgressReporter searchProgress = Progress.create(); private GraphQuery graphQuery; public QueryPanel(GraphPanel graphPanel) { this.graphPanel = graphPanel; Borders.applyMargin(this, 8); // Add the cards panel and the matches panel in a vertical box new VerticalBoxLayout(this) .add(cardPanel()) .add(matchesPanel()); // then start by playing the query tools card mode(Mode.QUERY_TOOLS); } public MatchesPanel matchesPanel() { if (matches == null) { matches = new MatchesPanel(graphPanel); } return matches; } private CardPanel cardPanel() { if (cardPanel == null) { // Add the query tools and progress panel as cards cardPanel = new CardPanel(); cardPanel.addCard(queryTools(), "query-tools"); cardPanel.addCard(progressPanel(), "progress-bar"); Size.heightOf(32).maximum(cardPanel); } return cardPanel; } private void feedback(UserFeedback feedback) { if (feedback != null) { if (feedback.status() != null) { graphPanel.status(feedback.status()); } } } private void mode(Mode mode) { SwingUtilities.invokeLater(() -> { if (mode == Mode.PROGRESS_BAR) { cardPanel.show("progress-bar"); } else { cardPanel.show("query-tools"); } }); } private ProgressPanel progressPanel() { // Create progress panel that tracks search progress searchProgress.reset(); return new ProgressPanel(searchProgress, 300, completion -> { // and when progress completes, stop any query and show the search card graphQuery.stop(); mode(Mode.QUERY_TOOLS); }); } private HorizontalBox queryTools() { var theme = KivaKitTheme.get(); graphPanel.overrideMenuAcceleratorKeys(searchField()); // Search when the query button is pushed or return is hit in the search field ActionListener search = searchAction(graphPanel); var searchButton = theme.newButton("query"); searchButton.setFont(theme.fontNormal()); searchButton.addActionListener(search); searchField().addActionListener(search); // Add the search field and button to a query tools box return new HorizontalBox(Spacing.MANUAL_SPACING, 24) .add(new MagnifyingGlass()) .add(searchField()) .add(searchButton); } private ActionListener searchAction(GraphPanel graphPanel) { return event -> { if (graphPanel.layer() != null) { var searchString = searchField().getText(); if (!Strings.isEmpty(searchString)) { searchField().setSelectionStart(0); searchField().setSelectionEnd(searchString.length()); if (searchString.startsWith("select")) { var viewBounds = graphPanel.layer().model().bounds(); mode(Mode.PROGRESS_BAR); select(searchString, viewBounds); } } } else { graphPanel.status("No graph layer is selected. To load a graph use File/Open..."); } }; } private JTextField searchField() { if (searchField == null) { searchField = KivaKitTheme.get().newTextField(); Borders.applyMargin(searchField, 8); Size.widthOf(1_000).preferred(cardPanel); UIManager.getDefaults().put("TextPane.background", KivaKitColors.DARK_CHARCOAL.asAwtColor()); } return searchField; } private void select(String query, Rectangle viewBounds) { matchesPanel().clear(); new Thread(() -> { try { // Get the candidate edges within the view area var candidates = graphPanel.layer().graph().edgesIntersecting(viewBounds); // and if the number of candidates var count = candidates.count(); // is large enough, then show the progress bar SwingUtilities.invokeLater(() -> mode(count.isGreaterThan(Count._10_000) ? Mode.PROGRESS_BAR : Mode.QUERY_TOOLS)); var error = new MutableValue<String>(); graphQuery = new GraphQuery(); searchProgress.reset(); searchProgress.steps(count.asMaximum()); var result = graphQuery.execute(searchProgress, candidates, query, Maximum.maximum(1_000), error::set); if (error.get() != null) { feedback(UserFeedback.status(error.get())); } else { var edges = new EdgeSet(); for (var element : result) { if (element != null) { edges.add(element); } } graphPanel.layer().show(edges, GraphLayer.Show.HIGHLIGHT_ONLY); SwingUtilities.invokeLater(() -> matchesPanel().addAll(result)); feedback(UserFeedback.html("Found " + edges.size() + " matching edges")); } } catch (Exception e) { feedback(UserFeedback.status(e.getMessage())); } mode(Mode.QUERY_TOOLS); }, "QuerySelect").start(); } }
import { CONTEXT, MEDICATION_ATC, MEDICATION_PRESCRIPTION_TYPES, MEDICATION_ADMINISTRATIONS } from '../../constants' import apiRequest from '../apiRequest' import { cleanValueSet } from 'utils/cleanValueSet' import { codeSort } from '../../utils/alphabeticalSort' import { capitalizeFirstLetter } from '../../utils/capitalize' export const fetchAtcData = async (searchValue?: string, noStar?: boolean) => { noStar = noStar === undefined ? true : noStar if (CONTEXT === 'arkhn') { return [] } else if (CONTEXT === 'fakedata') { return [] } else { if (!searchValue) { return [] } const _searchValue = noStar ? searchValue ? `&code=${searchValue.trim().replace(/[\-\[\]\/\{\}\(\)\*\+\?\.\\\^\$\|]/g, '\\$&')}` //eslint-disable-line : '' : searchValue ? `&_text=${searchValue.trim().replace(/[\-\[\]\/\{\}\(\)\*\+\?\.\\\^\$\|]/g, '\\$&')}*` //eslint-disable-line : '' const res = await apiRequest.get<any>(`/ValueSet?url=${MEDICATION_ATC}${_searchValue}&size=0`) const data = res && res.data && res.data.entry && res.data.resourceType === 'Bundle' ? res.data.entry[0].resource?.compose?.include[0].concept : [] return data && data.length > 0 ? data.sort(codeSort).map((_data: { code: string; display: string }) => ({ id: _data.code, label: `${_data.code} - ${capitalizeFirstLetter(_data.display)}`, subItems: [{ id: 'loading', label: 'loading', subItems: [] }] })) : [] } } export const fetchAtcHierarchy = async (atcParent: string) => { if (CONTEXT === 'arkhn') { return null } else if (CONTEXT === 'fakedata') { return null } else { if (!atcParent) { const res = await apiRequest.get<any>(`/ValueSet?url=${MEDICATION_ATC}`) let ATCList = res && res.data && res.data.entry && res.data.entry[0] && res.data.resourceType === 'Bundle' ? res.data.entry[0].resource.compose.include[0].concept : [] ATCList = ATCList && ATCList.length > 0 ? ATCList.sort(codeSort) .map((atcData: any) => ({ id: atcData.code, label: `${atcData.code} - ${atcData.display}`, subItems: [{ id: 'loading', label: 'loading', subItems: [] }] })) // V--[ @TODO: This is a hot fix, remove this after a clean of data ]--V .filter((atcData: any) => atcData.label.search(new RegExp(/^[A-Z] - /, 'gi')) !== -1) .filter((atcData: any) => atcData.label.search(new RegExp(/^[X-Y] - /, 'gi')) !== 0) : [] return ATCList } else { const json = { resourceType: 'ValueSet', url: MEDICATION_ATC, compose: { include: [ { filter: [ { op: 'is-a', value: atcParent ?? '' } ] } ] } } const res = await apiRequest.post<any>(`/ValueSet/$expand`, JSON.stringify(json)) let ATCList = res && res.data && res.data.expansion && res.data.expansion.contains && res.data.resourceType === 'ValueSet' ? res.data.expansion.contains : [] ATCList = ATCList && ATCList.length > 0 ? ATCList.sort(codeSort).map((atcData: any) => ({ id: atcData.code, label: `${atcData.code} - ${atcData.display}`, subItems: [{ id: 'loading', label: 'loading', subItems: [] }] })) : [] return ATCList } } } export const fetchPrescriptionTypes = async () => { if (CONTEXT === 'arkhn') { return [] } else if (CONTEXT === 'fakedata') { return [] } else { try { const res = await apiRequest.get<any>(`/ValueSet?url=${MEDICATION_PRESCRIPTION_TYPES}`) const data = res.data.entry[0].resource?.compose?.include[0].concept || [] if (data && data.length > 0) { return cleanValueSet(data) } else { return [] } } catch (error) { return [] } } } export const fetchAdministrations = async () => { if (CONTEXT === 'arkhn') { return [] } else if (CONTEXT === 'fakedata') { return [] } else { try { const res = await apiRequest.get<any>(`/ValueSet?url=${MEDICATION_ADMINISTRATIONS}`) const data = res.data.entry[0].resource?.compose?.include[0].concept || [] if (data && data.length > 0) { return cleanValueSet(data) } else { return [] } } catch (error) { return [] } } }
Alan Diaz/Associated Press Tennis ace Serena Williams is often unstoppable on the court, but a stint with DVT in 2011 nearly took her down, according to an article by ABC News. Williams, now 37, had undergone two surgeries on her foot after cutting it on a piece of glass. After a cross-country flight from New York to Los Angeles, she was having her foot checked out by a doctor when she was rushed to the hospital to treat complications stemming from a previous pulmonary embolism. Between the two surgery recovery periods and the long flight, even this super-fit athlete was at risk for a DVT. Further complications arose when Williams gave birth to her first daughter in September 2017, noted an article published in February 2018 in the magazine Vogue. A day after her C-section, Williams was feeling short of breath and correctly assumed she was having another pulmonary embolism. Her wound from the C-section came open from her coughing fits, and when she returned to surgery, they found a hematoma in her abdomen and her surgeon placed a filter into a major vein in her leg to prevent more blood clots from traveling into the lungs. After a successful recovery and time spent with her newborn daughter, Williams was cleared by her doctors to compete in the the 2018 U.S. Open.
/** * @author Eugene Savin */ public class SignatureAttribute extends Attribute { @FieldOrder(index = 3) private short signatureIndex; public short getSignatureIndex() { return signatureIndex; } public void setSignatureIndex(short signatureIndex) { this.signatureIndex = signatureIndex; } }
Geothermal exploration involves a high degree of uncertainty and financial risk, and requires reliable exploration data to constrain development decisions. Geothermal potentials are usually in relation to several parameters such as acidic volcanic and intrusive rocks, volcanoes, faults, hot springs, geothermal alterations, etc. These parameters are considerable using geological, petrological, geochemical and geophysical analyses. In this research, the binary index overlay and fuzzy logic methods were used for integrating the available data (volcanic and intrusive rocks, volcanoes, hot springs and faults) utilizing ArcGIS v.10.2 software. This research has been done on a case study, NW Iran (East Azarbayejan Province 1:250,000 Sheet). Therefore, it was recognized that the central parts of the study area have more potential for detailed exploration in the future, and it was proved that these combination methods can be very practical in geothermal resource studies.
Indian scientists developing snake robot News oi-GizBot Bureau Indian scientists are developing snake robots that could help save lives in disasters and accidents and aid surveillance. Two prototypes of the Snake Robot for Search and Rescue Missions, called SARP (Snake-like Articulated Robot Platform) have been designed by scientists of the department of mechanical and aerospace engineering at Indian Institute of Technology-Hyderabad (IIT-H). SEE ALSO: Microsoft To Hold a Massive launch event in October: Here's what is expected! "In a disaster site, like a collapsed building in an earthquake, a building on fire, or a dangerous environment, like a nuclear power plant in an accident, a snake robot can be used to access difficult-to-reach spaces and look for survivors under the debris," R. Prasanth Kumar, associate professor at the department told IANS. "It can then relay valuable information about the environment and help rescue workers in planning their missions," Kumar said. Developed from fire-proof ABS plastic, the snake-like motion of the prototypes (about a metre in length) helps in navigation of rough terrain, he said. The robots can also communicate with each other. "When deployed in a search and rescue operation or a surveillance mission (defence-related), snake robots communicate with each other and with a central station from a cyber-physical system through various sensors such as video camera, GPS, Infrared and ultrasonic range finders," Kumar said. Further, the robots could also touch and identify survivors. "We are working on improving the semi-autonomous performance of snake robot for navigation and haptic feedback for survivor detection. Haptics is the science of applying touch (tactile) sensation and control to interaction with computer applications and the robot would be able to touch and identify survivors," explained Kumar. SEE ALSO: Huawei Honor 7i Announced Featuring a Flip-up Camera and Side Fingerprint Sensor The project is part of 'Innovation hub for cyber-physical systems' sponsored by the department of electronics and information technology, ministry of communications and IT. The prototypes have been built with motors sourced from abroad but when manufactured locally, the cost would be around Rs. 20,000, he said. Source IANS
/* returns distance to block in dir */ int algo_distance(int *js_level, const int w, const int h, const int x, const int y, const int dir, const int *ignored_ids, const int ignored_ids_len) { int distance = 0; if (dir == -2) { for (int i = y; i >= 0; --i) { const int id = js_level[x + i * w]; char in_ignored = 0; for (int j = 0; j < ignored_ids_len; ++j) { if (ignored_ids[j] == id) { in_ignored = 1; break; } } if (!in_ignored) return distance; ++distance; } } else if (dir == 1) { for (int i = x; i < w; ++i) { const int id = js_level[i + y * w]; char in_ignored = 0; for (int j = 0; j < ignored_ids_len; ++j) { if (ignored_ids[j] == id) { in_ignored = 1; break; } } if (!in_ignored) return distance; ++distance; } } else if (dir == 2) { for (int i = y; i < h; ++i) { const int id = js_level[x + i * w]; char in_ignored = 0; for (int j = 0; j < ignored_ids_len; ++j) { if (ignored_ids[j] == id) { in_ignored = 1; break; } } if (!in_ignored) return distance; ++distance; } } else if (dir == -1) { for (int i = x; i >= 0; --i) { const int id = js_level[i + y * w]; char in_ignored = 0; for (int j = 0; j < ignored_ids_len; ++j) { if (ignored_ids[j] == id) { in_ignored = 1; break; } } if (!in_ignored) return distance; ++distance; } } return distance; }
// NewUPFInterfaceInfo parse the InterfaceUpfInfoItem to generate UPFInterfaceInfo func NewUPFInterfaceInfo(i *factory.InterfaceUpfInfoItem) *UPFInterfaceInfo { interfaceInfo := new(UPFInterfaceInfo) interfaceInfo.IPv4EndPointAddresses = make([]net.IP, 0) interfaceInfo.IPv6EndPointAddresses = make([]net.IP, 0) logger.CtxLog.Infoln("Endpoints:", i.Endpoints) for _, endpoint := range i.Endpoints { eIP := net.ParseIP(endpoint) if eIP == nil { interfaceInfo.EndpointFQDN = endpoint } else if eIPv4 := eIP.To4(); eIPv4 == nil { interfaceInfo.IPv6EndPointAddresses = append(interfaceInfo.IPv6EndPointAddresses, eIP) } else { interfaceInfo.IPv4EndPointAddresses = append(interfaceInfo.IPv4EndPointAddresses, eIPv4) } } interfaceInfo.NetworkInstance = i.NetworkInstance return interfaceInfo }
class TrigramModel: def __init__(self, corpusData, relatedBigram, delta=0): self.trainingCorpus = corpusData self.trigramCountList = defaultdict(lambda: 0) self.delta = delta self.relatedBigram = relatedBigram self.train_trigram_model() """ Splits corpus into trigram for training the model as well as storing counts to be used later """ def train_trigram_model(self): for sentence in self.trainingCorpus.corpusArray: unigram1 = ProcessCorpus.startSymbol unigram2 = ProcessCorpus.startSymbol unigram3 = '' self.relatedBigram.bigramCountList[(unigram1, unigram2)] = self.relatedBigram.bigramCountList[(unigram1, unigram2)] + 1 for word in sentence: unigram3 = word self.trigramCountList[(unigram1, unigram2, unigram3)] = self.trigramCountList[(unigram1, unigram2, unigram3)] + 1 unigram1 = unigram2 unigram2 = word unigram3 = ProcessCorpus.stopSymbol self.trigramCountList[(unigram1, unigram2, unigram3)] = self.trigramCountList[(unigram1, unigram2, unigram3)] + 1 """ Scores the log probability of a given sentence using the information computed in the train function Using Laplace smoothing to prevent undefined probability in zero history situations """ def score_probability_of_sentence(self, sentence): score = 0.0 unigram1 = ProcessCorpus.startSymbol unigram2 = ProcessCorpus.startSymbol unigram3 = '' for word in sentence: unigram3 = word trigramFrequency = self.trigramCountList[(unigram1, unigram2, unigram3)] bigramFrequency = self.relatedBigram.bigramCountList[(unigram1, unigram2)] #Used laplace smoothing #NOTE score += (log((trigramFrequency + 1) - self.delta, 2) - (log(bigramFrequency + len(self.trainingCorpus.wordCountList), 2))) unigram1 = unigram2 unigram2 = word unigram3 = ProcessCorpus.stopSymbol trigramFrequency = self.trigramCountList[(unigram1, unigram2, unigram3)] bigramFrequency = self.relatedBigram.bigramCountList[(unigram1, unigram2)] score += (log((trigramFrequency + 1) - self.delta, 2) - (log(bigramFrequency + len(self.trainingCorpus.wordCountList), 2))) return score """ Scores the MLE probability of a given trigram from the trained data """ def score_mle_probability(self, trigram): score = 0.0 unigram1, unigram2, unigram3 = trigram trigramFrequency = self.trigramCountList[trigram] bigramFrequency = self.relatedBigram.bigramCountList[(unigram1, unigram2)] #Used laplace smoothing here #NOTE score += (log((trigramFrequency + 1) - self.delta, 2) - (log(bigramFrequency + len(self.trainingCorpus.wordCountList), 2))) return score
<filename>src/domain/models/temperature.model.ts<gh_stars>0 import { JsonObject, JsonProperty } from 'json2typescript'; import { TemperatureUnit } from '../enums/temperature-unit.enum'; @JsonObject('Temperature') export class Temperature { @JsonProperty('value', Number) value: number; constructor(value: number) { this.value = Math.round(value); } getFahrenheit(): number { return Math.round(this.value); } getCelsius() { return Math.round(((this.value - 32) * 5) / 9); } toString(type?: TemperatureUnit): string { const temperature = type && type === TemperatureUnit.Metric ? this.getCelsius() : this.getFahrenheit(); if (isNaN(temperature)) { return ''; } return temperature.toString(); } }
<reponame>andypandy47/Jank #pragma once #include "Jank/Application.h" #include "Jank/Log.h" #include "Jank/Layer.h" #include "Jank/ImGui/ImGuiLayer.h" #include "Jank/Core/Timestep.h" #include "Jank/Core/Colour.h" #include "Jank/KeyCodes.h" #include "Jank/MouseButtonCodes.h" #include "Jank/Input.h" //--Renderer--------------------- #include "Jank/Renderer/Renderer.h" #include "Jank/Renderer/RenderCommand.h" #include "Jank/Renderer/VertexArray.h" #include "Jank/Renderer/Shader.h" #include "Jank/Renderer/Buffer.h" #include "Jank/Renderer/Texture.h" #include "Jank/Renderer/OrthographicCamera.h" //------------------------------- //--Entry Point------------------ #include "Jank//EntryPoint.h" //-------------------------------
// throws Exception if queue is empty int dequeue() throws Exception { if (isEmpty()) throw new Exception("Queue empty"); int value = queue.get(front); front = (front + 1) % capacity; elemCount--; return value; }
<reponame>akadop/fun<gh_stars>100-1000 /** * Subclassing `Error` in TypeScript: * https://stackoverflow.com/a/41102306/376773 */ interface LambdaErrorPayload { errorMessage?: string; errorType?: string; stackTrace?: string | string[]; } export class LambdaError extends Error { constructor(data: LambdaErrorPayload = {}) { super(data.errorMessage || 'Unspecified runtime initialization error'); Object.setPrototypeOf(this, new.target.prototype); Object.defineProperty(this, 'name', { value: data.errorType || this.constructor.name }); if (Array.isArray(data.stackTrace)) { this.stack = [ `${this.name}: ${this.message}`, ...data.stackTrace ].join('\n'); } else if (typeof data.stackTrace === 'string') { this.stack = data.stackTrace; } } }
In a few years, or even in a few months, we’ll probably see Psychologists examine and study the unprecedented level of gaming addiction we’ve seen from Pokemon Go players over the past few weeks. If you thought Candy Crush was bad, well, Pokemon Go takes things to an entirely new level. In recent weeks, we’ve seen stories of Pokemon Go players crashing cars, getting shot at, falling off cliffs, aimlessly wandering into traffic and even creating Walking Dead-style stampedes in cities across the globe. Apparently the search for virtual monsters is so strong that players can’t help but throw all of their common sense out the window once they fire up the game. DON’T MISS: iPhone 7 shows up in new hands-on video comparison with iPhone 6s In the most recent instance of a Pokemon Go addiction gone too far, a distracted driver playing the popular game rammed his Toyota Rav4 straight into a parked police car. You see, in the Pokemon Go dominated world we now live in, the criminally dumb reveal themselves in droves. Word of the somewhat hilarious crash was originally made public by the Baltimore Police Department who tweeted about the incident along with some accompanying video from a policeman’s body cam. Thankfully, no officers were inside the patrol car at the time of the incident. Further, the driver of the Rav4 escaped uninjured as well. When officers confronted the driver, he admitted that he had his head down while playing Pokemon Go. Later in the video, the driver can be heard saying, “That’s what I get for playing this dumbass game.” Touche my friend, touche.
/** * Ulozi obrazek jako jednu stranku PDF souboru. * @param imageData * @param dpi * @param filename * @throws Exception */ public static final byte[] writeAsPdf(byte[] imageData, int dpi) throws Exception { ByteArrayOutputStream bos = new ByteArrayOutputStream(); Document document = new Document(); PdfWriter.getInstance(document, bos); document.open(); Image image = Image.getInstance(imageData); image.setAbsolutePosition(0, 0); image.scaleAbsolute(cmToPt(21), cmToPt(29.7)); document.add(image); document.close(); return bos.toByteArray(); }
// Copyright 2021 bilibili-base // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package apimanager import ( "context" "errors" "fmt" "net" "net/http" "net/url" "sort" "strings" "sync" "github.com/golang/protobuf/jsonpb" "github.com/gorilla/mux" "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" "github.com/prometheus/client_golang/prometheus" "github.com/spf13/pflag" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/bilibili-base/powermock/apis/v1alpha1" "github.com/bilibili-base/powermock/pkg/interact" "github.com/bilibili-base/powermock/pkg/pluginregistry" "github.com/bilibili-base/powermock/pkg/pluginregistry/storage/memory" "github.com/bilibili-base/powermock/pkg/util" "github.com/bilibili-base/powermock/pkg/util/logger" ) // Provider defines the APIManager interface // It is used to manage MockAPI, plug-ins, and generate MockResponse type Provider interface { v1alpha1.MockServer MockResponse(ctx context.Context, request *interact.Request) (*interact.Response, error) Start(ctx context.Context, cancelFunc context.CancelFunc) error } // Manager is the implement of APIManager type Manager struct { cfg *Config storage pluginregistry.StoragePlugin pluginRegistry pluginregistry.Registry // does not support deletion // https://github.com/gorilla/mux/issues/82 // readonly mux *mux.Router // map[uniqueKey]*v1alpha1.MockAPI // readonly apis map[string]*v1alpha1.MockAPI // used to protect the pointer of mux, apis lock sync.RWMutex v1alpha1.UnimplementedMockServer registerer prometheus.Registerer logger.Logger } // Config defines the config structure type Config struct { GRPCAddress string HTTPAddress string } // NewConfig is used to init config with default values func NewConfig() *Config { return &Config{ GRPCAddress: "0.0.0.0:30000", HTTPAddress: "0.0.0.0:30001", } } // RegisterFlagsWithPrefix is used to register flags func (c *Config) RegisterFlagsWithPrefix(prefix string, f *pflag.FlagSet) { f.StringVar(&c.GRPCAddress, prefix+"apiManager.grpcAddress", c.GRPCAddress, "gRPC service listener address") f.StringVar(&c.HTTPAddress, prefix+"apiManager.httpAddress", c.HTTPAddress, "http service listener address") } // Validate is used to validate config and returns error on failure func (c *Config) Validate() error { if c.HTTPAddress == "" && c.GRPCAddress == "" { return errors.New("[apiManager] grpcAddress and httpAddress cannot be empty at the same time") } return nil } // New is used to init service func New(cfg *Config, pluginRegistry pluginregistry.Registry, logger logger.Logger, registerer prometheus.Registerer) (Provider, error) { service := &Manager{ cfg: cfg, registerer: registerer, pluginRegistry: pluginRegistry, mux: mux.NewRouter(), apis: map[string]*v1alpha1.MockAPI{}, Logger: logger.NewLogger("apiManager"), } return service, nil } // Start is used to start the service func (s *Manager) Start(ctx context.Context, cancelFunc context.CancelFunc) error { if err := s.setupStorage(); err != nil { return err } if err := s.loadAPIs(ctx); err != nil { return err } if err := s.setupGRPCServer(ctx, cancelFunc); err != nil { return err } if err := s.setupHTTPServer(ctx, cancelFunc); err != nil { return err } s.setupAnnouncementReceiver(ctx, cancelFunc) return nil } // SaveMockAPI is used to create or update MockAPI func (s *Manager) SaveMockAPI(ctx context.Context, request *v1alpha1.SaveMockAPIRequest) (*v1alpha1.SaveMockAPIResponse, error) { api := request.GetData() if api == nil { return nil, errors.New("api is nil") } var encoder jsonpb.Marshaler data, err := encoder.MarshalToString(api) if err != nil { return nil, err } if err := s.storage.Set(ctx, api.GetUniqueKey(), data); err != nil { return nil, err } return &v1alpha1.SaveMockAPIResponse{}, nil } // DeleteMockAPI is used to delete MockAPI func (s *Manager) DeleteMockAPI(ctx context.Context, request *v1alpha1.DeleteMockAPIRequest) (*v1alpha1.DeleteMockAPIResponse, error) { uniqueKey := request.GetUniqueKey() if err := s.storage.Delete(ctx, uniqueKey); err != nil { return nil, err } return &v1alpha1.DeleteMockAPIResponse{}, nil } // ListMockAPI is used to list MockAPIs func (s *Manager) ListMockAPI(ctx context.Context, request *v1alpha1.ListMockAPIRequest) (*v1alpha1.ListMockAPIResponse, error) { s.lock.RLock() apis := s.apis s.lock.RUnlock() var total uint64 var uniqueKeys []string keywords := request.GetKeywords() for _, mockAPI := range apis { total++ if keywords != "" && !strings.Contains(mockAPI.GetUniqueKey(), keywords) { continue } uniqueKeys = append(uniqueKeys, mockAPI.GetUniqueKey()) } sort.Strings(uniqueKeys) pagination := util.GetPagination(request.GetPagination()) if err := util.PaginateSlice(pagination, &uniqueKeys); err != nil { return nil, err } data := make([]*v1alpha1.MockAPI, 0, len(uniqueKeys)) for _, key := range uniqueKeys { mockAPI, ok := apis[key] if ok { data = append(data, mockAPI) } } return &v1alpha1.ListMockAPIResponse{ Data: data, }, nil } // MatchAPI is used to match MockAPI func (s *Manager) MatchAPI(host, path, method string) (*v1alpha1.MockAPI, bool) { s.lock.RLock() m := s.mux apis := s.apis s.lock.RUnlock() var match mux.RouteMatch matched := m.Match(&http.Request{ Method: method, URL: &url.URL{Path: path}, Host: host, }, &match) if !matched { return nil, false } api := apis[match.Route.GetName()] if api != nil { return api, true } return nil, false } // MockResponse is used to mock response func (s *Manager) MockResponse(ctx context.Context, request *interact.Request) (*interact.Response, error) { api, ok := s.MatchAPI(request.Host, request.Path, request.Method) if !ok { return nil, fmt.Errorf("unable to find mock config of %s", request.Path) } mockCase, err := s.getMatchedCase(ctx, request, api) if err != nil { return nil, err } response := interact.NewDefaultResponse(request) for _, plugin := range s.pluginRegistry.MockPlugins() { abort, err := plugin.MockResponse(ctx, mockCase.GetResponse(), request, response) if err != nil { return nil, newPluginError(codes.Internal, plugin.Name(), err) } if abort { return response, nil } } return response, nil } func (s *Manager) setupStorage() error { storagePlugin := s.pluginRegistry.StoragePlugin() if storagePlugin != nil { s.storage = storagePlugin return nil } storage, err := memory.New(memory.NewConfig(), s.NewLogger(".memory"), s.registerer) if err != nil { return err } s.storage = storage return nil } func (s *Manager) setupHTTPServer(ctx context.Context, cancelFunc func()) error { addr := s.cfg.HTTPAddress if addr == "" { return nil } s.LogInfo(nil, "starting api manager on http address: %s", addr) serverMux := runtime.NewServeMux() err := v1alpha1.RegisterMockHandlerFromEndpoint(context.TODO(), serverMux, s.cfg.GRPCAddress, []grpc.DialOption{grpc.WithInsecure()}) if err != nil { return err } server := &http.Server{ Addr: s.cfg.HTTPAddress, Handler: serverMux, } util.StartServiceAsync(ctx, cancelFunc, s.Logger.NewLogger("http"), func() error { return server.ListenAndServe() }, func() error { return server.Shutdown(context.TODO()) }) return nil } func (s *Manager) setupGRPCServer(ctx context.Context, cancelFunc func()) error { addr := s.cfg.GRPCAddress if addr == "" { return nil } s.LogInfo(nil, "starting api manager on gRPC address: %s", addr) listener, err := net.Listen("tcp", addr) if err != nil { return err } server := grpc.NewServer(grpc.UnaryInterceptor(util.GRPCLoggingMiddleware(s.Logger))) v1alpha1.RegisterMockServer(server, s) util.StartServiceAsync(ctx, cancelFunc, s.Logger.NewLogger("gRPC"), func() error { return server.Serve(listener) }, func() error { server.GracefulStop() return nil }) return nil } func (s *Manager) setupAnnouncementReceiver(ctx context.Context, cancelFunc func()) { util.StartServiceAsync(ctx, cancelFunc, s.Logger, func() error { for { select { case _, ok := <-s.storage.GetAnnouncement(): if !ok { s.LogWarn(nil, "storage announcement closed") return nil } s.LogInfo(nil, "storage announcement received") if err := s.loadAPIs(ctx); err != nil { s.LogError(nil, "failed to load apis: %s", err) } case <-ctx.Done(): s.LogWarn(nil, "apiManager stops watching announcements") return nil } } }, func() error { return nil }) } func (s *Manager) loadAPIs(ctx context.Context) error { pairs, err := s.storage.List(ctx) if err != nil { return err } apis := map[string]*v1alpha1.MockAPI{} s.LogInfo(nil, "load apis from storage, total %d", len(pairs)) for key, val := range pairs { var api v1alpha1.MockAPI if err := jsonpb.UnmarshalString(val, &api); err != nil { return fmt.Errorf("failed to load(%s): %s", key, err) } apis[key] = &api s.LogInfo(map[string]interface{}{ "uniqueKey": api.GetUniqueKey(), "path": api.GetPath(), }, "apis is loaded") } s.lock.Lock() s.apis = apis s.mux = buildMux(apis, s.Logger) s.lock.Unlock() return nil } func (s *Manager) getMatchedCase(ctx context.Context, request *interact.Request, api *v1alpha1.MockAPI) (*v1alpha1.MockAPI_Case, error) { for _, mockCase := range api.Cases { for _, plugin := range s.pluginRegistry.MatchPlugins() { condition := mockCase.GetCondition() if condition == nil { return mockCase, nil } matched, err := plugin.Match(ctx, request, condition) if err != nil { return nil, newPluginError(codes.Internal, plugin.Name(), err) } if matched { return mockCase, nil } } } return nil, status.Error(codes.NotFound, "no case matched") } func buildMux(apis map[string]*v1alpha1.MockAPI, log logger.Logger) *mux.Router { router := mux.NewRouter() for _, mockAPI := range apis { if err := addAPI(router, mockAPI); err != nil { log.LogWarn(map[string]interface{}{ "uniqueKey": mockAPI.GetUniqueKey(), }, "failed to add api when buildMux: %s", err) } } return router } func addAPI(router *mux.Router, api *v1alpha1.MockAPI) error { if api.GetUniqueKey() == "" { return errors.New("unique key is required") } if api.Path == "" { return errors.New("path is required") } route := router.Path(api.Path) if api.Host != "" { route = route.Host(api.Host) } if api.Method != "" { route = route.Methods(api.Method) } route.Name(api.UniqueKey) return nil } func newPluginError(code codes.Code, name string, err error) error { return status.Error(code, fmt.Sprintf("plugin(%s): %s", name, err)) }
import dotenv from 'dotenv'; import { VoiceText } from '../../src/index'; dotenv.config(); describe('VoiceText#fetchBuffer', () => { test('case valid', async () => { const target = new VoiceText({ apiKey: process.env['API_KEY'] ?? '' }); expect(Buffer.isBuffer(await target.fetchBuffer())).toBe(true); }); test('case invalid', async () => { const target = new VoiceText({ apiKey: 'invalid' }); await expect(() => target.fetchBuffer()).rejects.toThrow(); }); });
<reponame>fossabot/FReD<gh_stars>0 package main import ( "fmt" "time" ) type ReplicaSuite struct { c *Config } func (t *ReplicaSuite) Name() string { return "Replication" } func (t *ReplicaSuite) RunTests() { // Fun with replicas logNodeAction(t.c.nodeA, "Create keygroup KGRep") t.c.nodeA.CreateKeygroup("KGRep", true, 0, false) logNodeAction(t.c.nodeA, "Adding nodeB as Replica node for KGRep") t.c.nodeA.AddKeygroupReplica("KGRep", t.c.nodeB.ID, 0, false) logNodeAction(t.c.nodeB, "Putting a valuein KGRep") t.c.nodeB.PutItem("KGRep", "KGRepItem", "val", false) logNodeAction(t.c.nodeB, "Deleting the value from KGRep") t.c.nodeB.DeleteItem("KGRep", "KGRepItem", false) logNodeAction(t.c.nodeB, "Getting the deleted value in KGRep") _ = t.c.nodeB.GetItem("KGRep", "KGRepItem", true) // Test sending data between nodes logNodeAction(t.c.nodeB, "Creating a new Keygroup (KGN) in nodeB, setting nodeA as Replica node") t.c.nodeB.CreateKeygroup("KGN", true, 0, false) t.c.nodeB.AddKeygroupReplica("KGN", t.c.nodeA.ID, 0, false) logNodeAction(t.c.nodeB, "Putting something in KGN on nodeB, testing whether nodeA gets Replica (sleep 1.5s in between)") t.c.nodeB.PutItem("KGN", "Item", "Value", false) time.Sleep(1500 * time.Millisecond) resp := t.c.nodeA.GetItem("KGN", "Item", false) if resp != "Value" { logNodeFailure(t.c.nodeA, "resp is \"Value\"", resp) } logNodeAction(t.c.nodeA, "Putting something in KGN on nodeA, testing whether nodeB gets Replica (sleep 1.5s in between)") t.c.nodeA.PutItem("KGN", "Item2", "Value2", false) time.Sleep(1500 * time.Millisecond) resp = t.c.nodeB.GetItem("KGN", "Item2", false) if resp != "Value2" { logNodeFailure(t.c.nodeA, "resp is \"Value2\"", resp) } logNodeAction(t.c.nodeA, "Adding a replica for a nonexisting Keygroup") t.c.nodeA.AddKeygroupReplica("trololololo", t.c.nodeB.ID, 0, true) logNodeAction(t.c.nodeC, "Creating an already existing keygroup with another node") t.c.nodeC.CreateKeygroup("KGN", true, 0, true) logNodeAction(t.c.nodeC, "Telling a node that is not part of the keygroup that it is now part of that keygroup") t.c.nodeC.AddKeygroupReplica("KGN", t.c.nodeC.ID, 0, false) logNodeAction(t.c.nodeA, "Creating a new Keygroup (kgall) with all three nodes as replica") t.c.nodeA.CreateKeygroup("kgall", true, 0, false) t.c.nodeA.AddKeygroupReplica("kgall", t.c.nodeB.ID, 0, false) t.c.nodeB.AddKeygroupReplica("kgall", t.c.nodeC.ID, 0, false) logNodeAction(t.c.nodeC, "... sending data to one node, checking whether all nodes get the replica (sleep 1.5s)") t.c.nodeC.PutItem("kgall", "item", "value", false) time.Sleep(1500 * time.Millisecond) respA := t.c.nodeA.GetItem("kgall", "item", false) respB := t.c.nodeB.GetItem("kgall", "item", false) if respA != "value" || respB != "value" { logNodeFailure(t.c.nodeA, "both nodes respond with 'value'", fmt.Sprintf("NodeA: %s, NodeB: %s", respA, respB)) } logNodeAction(t.c.nodeB, "...removing node from the keygroup all and checking whether it still has the data (sleep 1.5s)") t.c.nodeB.DeleteKeygroupReplica("kgall", t.c.nodeB.ID, false) time.Sleep(1500 * time.Millisecond) respB = t.c.nodeB.GetItem("kgall", "item", true) logNodeAction(t.c.nodeB, fmt.Sprintf("Got Response %s", respB)) logNodeAction(t.c.nodeB, "...re-adding the node to the keygroup all and checking whether it now gets the data (sleep 1.5s)") t.c.nodeA.AddKeygroupReplica("kgall", t.c.nodeB.ID, 0, false) time.Sleep(1500 * time.Millisecond) respA = t.c.nodeA.GetItem("kgall", "item", false) if respA != "value" { logNodeFailure(t.c.nodeA, "resp is \"value\"", resp) } respB = t.c.nodeB.GetItem("kgall", "item", false) if respB != "value" { logNodeFailure(t.c.nodeB, "resp is \"value\"", resp) } // delete the last node from a keygroup logNodeAction(t.c.nodeA, "Preparing to delete all members from a keygroup...") t.c.nodeA.CreateKeygroup("deletetest", true, 0, false) t.c.nodeA.PutItem("deletetest", "item", "value", false) t.c.nodeA.AddKeygroupReplica("deletetest", t.c.nodeB.ID, 0, false) t.c.nodeA.DeleteKeygroupReplica("deletetest", t.c.nodeA.ID, false) // NodeB is the only replica left logNodeAction(t.c.nodeB, "Removing last member of a keygroup delete-test") t.c.nodeB.DeleteKeygroupReplica("deletetest", t.c.nodeB.ID, true) } func NewReplicaSuite(c *Config) *ReplicaSuite { return &ReplicaSuite{ c: c, } }
<filename>src/assets/js/components/Navigation/index.tsx<gh_stars>1-10 import classNames from 'classnames'; import React from 'react'; import { useTranslation } from 'react-i18next'; import { NavLink } from 'react-router-dom'; import styles from './style.module.css'; interface Route { name: string; path: string; } export interface NavigationProps { inline?: boolean; routes: Route[]; } function Navigation(props: NavigationProps): JSX.Element { const { inline, routes } = props; const { t } = useTranslation(); return ( <nav className={styles.navigation}> <ul className={classNames(styles.navigation__list, { [styles['navigation__list--inline']]: inline, })} > {routes.map((route) => { const { name, path } = route; return ( <li key={path}> <NavLink to={path} activeClassName="is-active" exact> {t(name)} </NavLink> </li> ); })} </ul> </nav> ); }; export default Navigation;
import React, { useState } from 'react'; import { makeStyles, Theme } from '@material-ui/core/styles'; import { Button, Chip, IconButton, Popover } from '@material-ui/core'; import KeyboardArrowLeftIcon from '@material-ui/icons/KeyboardArrowLeft'; import KeyboardArrowDownIcon from '@material-ui/icons/KeyboardArrowDown'; import { KeyboardDatePicker } from '@material-ui/pickers'; import moment, { Moment } from 'moment'; const useStyles = makeStyles((theme: Theme) => ({ main: { display: "flex" }, quickPickItem: { marginRight: theme.spacing(1), }, timeRangeChip: { marginRight: theme.spacing(1), }, expandContent: { padding: theme.spacing(4), maxWidth: theme.spacing(35), }, betweenText: { marginLeft: theme.spacing(4), marginRight: theme.spacing(4), color: theme.palette.text.primary, }, actionContainer: { marginTop: theme.spacing(2), textAlign: 'end', }, timeRangeChipContainer: { display: 'flex', justifyContent: 'center', alignItems: 'center', }, })); export interface TimeRangeFilterPropsType { onApplyTimeRange: (timeRange?:string,startTime?: Moment, endTime?: Moment) => void timeRange?:string startTime?:string endTime?:string } const quickPick = [ { title: '7天', timeKey: '7d', getTime: () => ({ start: moment().endOf('day').subtract(7, 'days'), end: moment(), }), }, { title: '30天', timeKey: '30d', getTime: () => ({ start: moment().endOf('day').subtract(30, 'days'), end: moment(), }), }, { title: '90天', timeKey: '90d', getTime: () => ({ start: moment().endOf('day').subtract(90, 'days'), end: moment(), }), }, ]; export default function TimeRangeFilter({ onApplyTimeRange,timeRange }: TimeRangeFilterPropsType) { const classes = useStyles(); const [anchorEl, setAnchorEL] = useState(null); const [inputStartTime,setInputStartTime] = useState<Moment | undefined>(moment()) const [inputEndTime,setInputEndTime] = useState<Moment | undefined>(moment()) const setStartTime = (time : Moment | null) => { if (time === null || time === undefined){ setInputStartTime(undefined) }else{ setInputStartTime(time) } } const setEndTime = (time : Moment | null) => { if (time === null || time === undefined){ setInputEndTime(undefined) }else{ setInputEndTime(time) } } const onExpandButtonClick = (e: any) => { setAnchorEL(e.currentTarget); }; const onPopoverCancel = () => { setAnchorEL(null); }; const quickPickChips = quickPick.map(item => { const onQuickPickChipClick = () => { const { start, end } = item.getTime(); if (timeRange === item.timeKey){ onApplyTimeRange(undefined,undefined, undefined); }else{ onApplyTimeRange(item.timeKey,start, end); } }; return ( <Chip key={item.timeKey} label={item.title} onClick={onQuickPickChipClick} clickable={true} className={classes.quickPickItem} color={'primary'} variant={(timeRange && timeRange === item.timeKey) ? 'default' : 'outlined'} /> ); }); const onApplyClick = () => { onApplyTimeRange("custom",inputStartTime,inputEndTime) }; const renderTimeRangeChip = () => { const onTimeRangeClick = () => { if (timeRange === "custom"){ onApplyTimeRange(undefined,undefined, undefined); }else{ onApplyTimeRange("custom",inputStartTime,inputEndTime) } }; const text = `${inputStartTime ? inputStartTime.format('YYYY-MM-DD') : '任何'} 至 ${inputEndTime ? inputEndTime.format('YYYY-MM-DD') : '任何'}`; return ( <Chip label={text} color={'primary'} variant={timeRange === "custom" ? 'default' : 'outlined'} onClick={onTimeRangeClick} clickable={true} /> ); }; return ( <div className={classes.main}> {timeRange && timeRange === "custom" ? renderTimeRangeChip() : quickPickChips} <IconButton onClick={onExpandButtonClick} size={'small'} color={'primary'}> {Boolean(anchorEl) ? <KeyboardArrowDownIcon/> : <KeyboardArrowLeftIcon/>} </IconButton> <Popover anchorEl={anchorEl} open={Boolean(anchorEl)} onClose={onPopoverCancel} anchorOrigin={{ vertical: 'bottom', horizontal: 'right', }} transformOrigin={{ vertical: 'top', horizontal: 'right', }} style={{ marginTop: 16 }} elevation={1} > <div className={classes.expandContent}> {timeRange && timeRange === "custom" && quickPickChips} <KeyboardDatePicker variant="inline" format="YYYY-MM-DD" margin="normal" id="date-picker-inline" label="开始时间" KeyboardButtonProps={{ 'aria-label': 'change date', }} value={inputStartTime} onChange={setStartTime} /> <KeyboardDatePicker variant="inline" format="YYYY-MM-DD" margin="normal" id="date-picker-inline" label="结束时间" KeyboardButtonProps={{ 'aria-label': 'change date', }} value={inputEndTime} onChange={setEndTime} /> <div className={classes.actionContainer}> <Button color={'primary'} variant={timeRange && timeRange === "custom" ? 'contained' : 'outlined'} disableElevation={true} onClick={onApplyClick} >设置 </Button> </div> </div> </Popover> </div> ); }
#include <bits/stdc++.h> #include <iostream> #include <cstdio> #include <math.h> #define ll long long #define MAXN 500+5 using namespace std; int memo[MAXN][2]; int A[MAXN][MAXN]; int S[MAXN]; int main() { int x,y,r; cin >> x >> y >> r; for (int i=0; i<x; i++){ S[i] = 0; for (int j=0; j<y; j++){ cin >> A[i][j]; } } for (int i=0; i<x; i++){ int s = 0; int maxn =0; for (int j=0; j<y; j++){ if (A[i][j]==1) s++; else s=0; S[i] = maxn = max(maxn,s); } } int tx, ty; for (int i=0; i<r; i++){ cin >> tx >> ty; tx--;ty--; A[tx][ty] = 1 - A[tx][ty]; int s =0; int maxn = 0; for (int j=0; j<y; j++){ if (A[tx][j]==1) s++; else s=0; S[tx] = maxn = max(maxn,s); } maxn = S[0]; for (int j=1; j<x; j++) maxn = max(maxn, S[j]); cout << maxn << endl; } return 0; }
package com.hantsylabs.example.spring.test.webdriver; import org.hamcrest.core.IsEqual; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.openqa.selenium.WebDriver; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; import org.springframework.test.context.web.WebAppConfiguration; import org.springframework.test.web.servlet.htmlunit.webdriver.MockMvcHtmlUnitDriverBuilder; import org.springframework.test.web.servlet.htmlunit.webdriver.WebConnectionHtmlUnitDriver; import org.springframework.web.context.WebApplicationContext; import com.gargoylesoftware.htmlunit.BrowserVersion; import com.hantsylabs.example.spring.config.AppConfig; import com.hantsylabs.example.spring.config.WebConfig; import com.hantsylabs.example.spring.test.Assertions; import com.hantsylabs.example.spring.test.MockDataConfig; import com.hantsylabs.example.spring.test.webdriver.pages.CreateTaskPage; import com.hantsylabs.example.spring.test.webdriver.pages.TaskListPage; @RunWith(SpringJUnit4ClassRunner.class) @ContextConfiguration(classes={AppConfig.class, MockDataConfig.class, WebConfig.class}) @WebAppConfiguration public class MockMvcHtmlUnitWebDriverCreateTaskTests { @Autowired WebApplicationContext context; WebDriver driver; @Before public void setUp() throws Exception { driver = MockMvcHtmlUnitDriverBuilder .webAppContextSetup(context) .withDelegate(new WebConnectionHtmlUnitDriver(BrowserVersion.CHROME)) .contextPath("") .build(); } @After public void tearDown() throws Exception { if(driver !=null){ driver.close(); } } @Test public void testCreateTasks() { CreateTaskPage createTask = CreateTaskPage.to(driver); TaskListPage taskList = createTask.newTask( "first task", "description of first task"); Assertions.assertThat(taskList.getErrors()).isEqualTo("Task is created sucessfully!"); Assertions.assertThat(taskList.getPageTitle()).isEqualTo("TASK LIST"); } @Test public void testCreateTaskWithEmptyFields() { CreateTaskPage createTask = CreateTaskPage.to(driver); CreateTaskPage createTaskPage = createTask.newTaskWithEmptyFields(); Assertions.assertThat(createTaskPage.getTitleError()).isEqualTo("may not be empty"); Assertions.assertThat(createTaskPage.getDescriptionError()).isEqualTo("size must be between 10 and 200"); // } }
import java.util.Scanner; public class WrongSubtraction { private static int subtract(int input) { if (input >= 10) { return input%10 == 0 ? input / 10 : input - 1; } else if (input > 1) { return input - 1; } return input; } public static void main(String[] args) { Scanner scanner = new Scanner(System.in); int input = scanner.nextInt(); int k = scanner.nextInt(); for (int i = 0; i < k; i++) { input = subtract(input); } System.out.println(input); } }
def pop(self, head=False, blocking=False): if head and blocking: return self.deserialize(self._client.blpop(self.list_key)[1]) elif head: return self.deserialize(self._client.lpop(self.list_key)) elif blocking: return self.deserialize(self._client.brpop(self.list_key)[1]) else: return self.deserialize(self._client.rpop(self.list_key))
/** * A base class for scenario specific HUDs. */ public abstract class ScenarioHUD extends BWindow { public ScenarioHUD (BStyleSheet style, BLayoutManager layout) { super(style, layout); } public abstract void pieceWasAffected (Piece piece, String effect); }
Did I mention what a weasel-y character RNC chairman Reince Priebus is? The RNC’s resident Baghdad Bob continues to insult our intelligence with asinine statements about GOP nominee Trump’s worth as a candidate. The latest cringe-worthy narrative to come out of Mr. Priebus was in regards to Trump’s vile suggestion of Senator Ted Cruz’s dad being tied to the assassination of President John F. Kennedy. Priebus couldn’t avoid responding on this one, due in no small part to pointed questioning on Twitter by RedStaters. Dear @Reince, I understand from your nominee that Ted Cruz's dad helped kill Kennedy. Will your party push for an investigation? — Caleb Howe (@CalebHowe) July 22, 2016 Speaking on Sunday, Priebus, with a straight face, uttered this explanation: “He’s got a right to talk about whatever he wants to talk about, however, I don’t think he was ever saying this was some sort of factual information,” Priebus said at a press conference on the eve of the Democratic National Convention. “It was something he referred to. He’s talked about it, he’s gotten off from it. As far as I’m concerned, we can move on from it.” Let me shorten that up: Trump says stupid stuff. Can we pretend we don’t hear it? No, Mr. Priebus. You can’t blow something so incendiary and insulting off as just casual conversation. This is your candidate, and he’s running for the highest office in the land. Those of us who are actually civilized and reasonably sane expect our representatives to hold higher standards of conduct. “I think he mentioned it in passing and everybody glommed onto it and it became a controversy, but I think as far as the overall picture of Donald Trump —” Priebus said, “I mean, that is one rhetorical issue that you can all debate until the cows come home, but it doesn’t identify the Donald Trump campaign.” He publicly linked a sitting senator’s father to the assassination of a U.S. president. I’ve been saying this a lot, lately: Are you high?! Telling us, in essence, that your candidate is prone to lying isn’t a confidence builder. The alleged Russian mob fixer who runs Trump’s campaign suggested that it was Cruz, himself, who reignited the controversy, after refusing to endorse Trump at the convention and then stating later that he didn’t tend to endorse those who attack his family. The facts are, Trump’s entire campaign, from the day he announced to this very moment has been built on insulting, abusive, and erratic behavior. Most of it has been directed towards other Republicans – that is, when he’s giving minorities, vets, and handicapped citizens a break. Priebus may think whitewashing these incidents will make it all go away, but we’re 3 months away from the general election and Trump is as big of a public relations nightmare now as he was in the beginning. You can’t shine a turd, Mr. Priebus.
#include <stdio.h> #include <stdlib.h> long int max(long int n , long int a[]); int main(void) { // your code goes here long int num; while(scanf("%ld", &num) != EOF){ long int* arr=(long int*)malloc(sizeof(long int)*(num+1)); printf("%ld\n",max(num,arr)); } return 0; } long int max(long int n , long int a[]) { if (a[n]!=0) return a[n]; else if(n==0||n==1) { a[n]=n; return a[n]; } long long int m = max(n/2,a)+max(n/3,a)+max(n/4,a); a[n] = n>m?n:m; return a[n]; }
Anyone who scans my Reddit history knows that losing my father last year was a very hard experience to navigate and that it has had a lasting impression on me. When my gift arrived today, something so heartfelt and close to me was unexpected; I was expecting a t-shirt, or a dvd from my Amazon wish list. When I opened the box to find a wonderful canvas print of one of my favorite photos of my father, I was speechless. The original photo was taken in the late 70s as an homage to a famous Burt Reynolds pose at the time. Dad worked in the grocery business and had struck a pose in the back room. I cannot possibly put into words how much this gift means to me and how much I will cherish it. Thank you so very much. This is a tremendous gift!
<gh_stars>0 package com.company; import java.math.BigInteger; import java.security.SecureRandom; import java.util.Random; import java.util.Scanner; public class RSA { static BigInteger p; static BigInteger q; static BigInteger n; static BigInteger phi; static BigInteger e; static BigInteger d; static void RSA(int msg){ int BIT_LENGTH = 200; // Generate random primes Random rand = new SecureRandom(); p = BigInteger.probablePrime(BIT_LENGTH / 2, rand); q = BigInteger.probablePrime(BIT_LENGTH / 2, rand); // Calculate products n = p.multiply(q); phi = p.subtract(BigInteger.ONE) .multiply(q.subtract(BigInteger.ONE)); System.out.println("La valeur de phi = " + phi); // Generate public and private exponents do e = new BigInteger(phi.bitLength(), rand); while (e.compareTo(BigInteger.ONE) <= 0 || e.compareTo(phi) >= 0 || !e.gcd(phi).equals(BigInteger.ONE)); System.out.println("La valeur de e = " + e); d = e.modInverse(phi); System.out.println("La valeur de d = " + d); } public static BigInteger getE() { return e; } public static BigInteger getD() { return d; } public static BigInteger enc(int msg, BigInteger e, BigInteger n){ return BigInteger.valueOf(msg).modPow(e, n); } public static BigInteger edec(BigInteger enc,BigInteger d,BigInteger n){ return enc.modPow(d, n); } }
<reponame>WPRDC/wprdc-components import { Geog, GeographyType } from '../../types'; import { FC } from 'react'; import { BreadcrumbItemLinkProps } from '../Breadcrumbs'; export interface GeographySectionProps { geog?: Geog; geogIsLoading?: boolean; headingLevel?: 1 | 2 | 3 | 4 | 5 | 6; LinkComponent?: FC<BreadcrumbItemLinkProps>; } export interface ConnectedGeographySectionProps extends Omit<GeographySectionProps, 'geog'> { geogType?: GeographyType; geogID?: string; }
/* This function checks if the connection request initiated with * kmo_sock_connect() has been completed. * This function sets the KMO error string. It returns -1 on failure. */ int kmo_sock_connect_check(int fd, char *host) { int error; int len = sizeof(error); if (getsockopt(fd, SOL_SOCKET, SO_ERROR, (char *) &error, &len)) { kmo_seterror("cannot get socket option: %s", kmo_sock_err()); return -1; } if (error != 0) { errno = error; kmo_seterror("cannot connect to %s: %s", host, kmo_sock_err()); return -1; } return 0; }
Impulsive effects on stochastic bidirectional associative memory neural networks with reaction-diffusion and leakage delays In this paper, the problem of global asymptotic stability analysis for stochastic reaction-diffusion bidirectional associative memory neural networks (BAMNNs) with mixed delays and impulsive effects are investigated. The mixed delays consists of time delays in the leakage terms and continuously distributed delays. Based on the Lyapunov–Krasovskii functional (LKF), It 's differential formula and linear matrix inequality (LMI) method, some sufficient conditions for global asymptotic stability in mean square of the equilibrium point of the systems are derived. The feasibility of the conditions are verified using the MATLAB LMI toolbox. Finally, two examples are provided to illustrate the effectiveness and validity of the derived main results.
// FirstPage navigates to the first page func (ct *Cointop) FirstPage() error { ct.debuglog("firstPage()") if ct.IsFirstPage() { return nil } ct.State.page = 0 ct.UpdateTable() ct.RowChanged() return nil }
/* * Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package hydra.gemfirexd; import hydra.BasePrms; /** * A class used to store keys for fabric server configuration settings. * The settings are used to create instances of {@link FabricServerDescription}. * <p> * The number of description instances is gated by {@link #names}. For other * parameters, if fewer values than names are given, the remaining instances * will use the last value in the list. See $JTESTS/hydra/hydra.txt for more * details. * <p> * Unused parameters default to null, except where noted. This uses the * product default, except where noted. * <p> * Values and fields of a parameter can be set to {@link #DEFAULT}, * except where noted. This uses the product default, except where noted. * <p> * Values, fields, and subfields can be set to {@link #NONE} where noted, with * the documented effect. * <p> * Values and fields of a parameter can use oneof, range, or robing * except where noted, but each description created will use a fixed value * chosen at test configuration time. Use as a task attribute is illegal. */ public class FabricServerPrms extends BasePrms { static { setValues(FabricServerPrms.class); } public static final String DEFAULT_DISTRIBUTED_SYSTEM_NAME = "ds"; /** * (String(s)) * Logical names of the fabric server descriptions. Each name must be unique. * Defaults to null. Not for use with oneof, range, or robing. */ public static Long names; /** * (int(s)) * The "ack-severe-alert-threshold" property, in seconds. */ public static Long ackSevereAlertThreshold; /** * (int(s)) * The "ack-wait-threshold" property, in seconds. */ public static Long ackWaitThreshold; /** * (int(s)) * The "archive-disk-space-limit" property, in megabytes. */ public static Long archiveDiskSpaceLimit; /** * (int(s)) * The "archive-file-size-limit" property, in megabytes. */ public static Long archiveFileSizeLimit; /** * (int(s)) * The "async-distribution-timeout" property, in milliseconds. */ public static Long asyncDistributionTimeout; /** * (int(s)) * The "async-max-queue-size" property, in megabytes. */ public static Long asyncMaxQueueSize; /** * (int(s)) * The "async-queue-timeout" property, in milliseconds. */ public static Long asyncQueueTimeout; /** * (boolean(s)) * Whether automatic reconnect after a forced disconnect is disabled. */ public static Long disableAutoReconnect; /** * (Comma-separated Lists of String(s)) * Names of logical hydra client configurations, as found in {@link * hydra.ClientPrms#names}. No client name can be listed more than once. * Can be specified as {@link #NONE} (default). * <p> * This parameter is used to wire each logical fabric server description * to specific hydra clients. For example, it is used in the topology * include files in $JTESTS/hydraconfig/gemfirexd for p2p, hct, and wan, * which is then used by certain methods in {@link FabricServerHelper} * to complete the wiring. */ public static Long clientNames; /** * (boolean(s)) * The "conserve-sockets" property. */ public static Long conserveSockets; /** * (boolean(s)) * The "disable-tcp" property. */ public static Long disableTcp; /** * (String(s)) * Logical name of the distributed system associated with each of the * {@link names}. Defaults to {@link #DEFAULT_DISTRIBUTED_SYSTEM_NAME}. * <p> * To create a loner distributed system, use {@link hydra.gemfirexd.LonerPrms}. * Loners can be used to connect thin clients to a distributed system * for statistics collection. */ public static Long distributedSystem; /** * (boolean(s)) * The "enable-network-partition-detection" property. */ public static Long enableNetworkPartitionDetection; /** * (boolean(s)) * The "enable-stats" property. Defaults to false. * This turns on GemFireXD statement-level statistics globally * using a system property. To turn them on per-connection, use the * connection property enable-stats in the client (note: peers only). */ public static Long enableStatsGlobally; /** * (boolean(s)) * The "enable-time-statistics" property. Defaults to true, which overrides * the product default. This turns on GemFire-level time statistics. */ public static Long enableTimeStatistics; /** * (boolean(s)) * The "enable-timestats" property. Defaults to false. * This turns on GemFireXD statement-level time statistics globally * using a system property. To turn them on per-connection, use the * connection property enable-timestats in the client (note: peers only). */ public static Long enableTimeStatsGlobally; /** * (Boolean(s)) * The enforce-unique-host property. */ public static Long enforceUniqueHost; /** * (String(s)) * Name of logical fabric security configuration, as found in {@link * FabricSecurityPrms#names}. Can be specified as {@link #NONE} (default). */ public static Long fabricSecurityName; /** * (boolean(s)) * The "host-data" property. Note that the product overrides this setting * for stand-alone locators, which never host data. */ public static Long hostData; /** * (boolean(s)) * The "lock-memory" property. Locks both heap and off-heap memory. * <p> * When set true, required jna-3.5.1.jar in the product library to be added to * the classpath. * <p> * The heap memory locked is the amount the JVM is started with. Set -Xms the * same as -Xmx to lock all heap. */ public static Long lockMemory; /** * (int(s)) * The "log-disk-space-limit" property, in megabytes. */ public static Long logDiskSpaceLimit; /** * (int(s)) * The "log-file-size-limit" property, in megabytes. */ public static Long logFileSizeLimit; /** * (String(s)) * The "log-level" property. */ public static Long logLevel; /** * (int(s)) * The "max-num-reconnect-tries" property. */ public static Long maxNumReconnectTries; /** * (int(s)) * The "max-wait-time-reconnect" property, in milliseconds. */ public static Long maxWaitTimeForReconnect; /** * (String(s)) * The "mcast-address" property. Defaults to a random address chosen * by hydra and based on the IP protocol such that all descriptions in * the same distributed system use the same address. */ public static Long mcastAddress; /** * (boolean(s)) * Whether multicast distribution is enabled. Defaults to false. * <p> * All logical fabric server descriptions in the same {@link * #distributedSystem} must use the same value for this parameter. * If set true, hydra autogenerates a multicast port if one is not * specified in {@link #mcastPort}. */ public static Long mcastDistributionEnabled; /** * (int(s)) * The byte allowance portion of the "mcast-flow-control" * property, which is used by both multicast and UDP. * Used with {@link #mcastFlowControlRechargeBlockMs} and * {@link #mcastFlowControlRechargeThreshold}. */ public static Long mcastFlowControlByteAllowance; /** * (int(s)) * The recharge block milliseconds portion of the "mcast-flow-control" * property, which is used by both multicast and UDP. * Used with {@link #mcastFlowControlByteAllowance} and * {@link #mcastFlowControlRechargeThreshold}. */ public static Long mcastFlowControlRechargeBlockMs; /** * (float(s)) * The recharge threshold portion of the "mcast-flow-control" * property, which is used by both multicast and UDP. * Used with {@link #mcastFlowControlByteAllowance} and * {@link #mcastFlowControlRechargeBlockMs}. */ public static Long mcastFlowControlRechargeThreshold; /** * (int(s)) * The "mcast-port" property. Defaults to a random available port chosen * by hydra such that all descriptions in the same distributed system use * the same port. It is set to "0" when {@link #mcastDistributionEnabled} * is false. */ public static Long mcastPort; /** * (int(s)) * The "mcast-recv-buffer-size" property, in bytes. */ public static Long mcastRecvBufferSize; /** * (int(s)) * The "mcast-send-buffer-size" property, in bytes. */ public static Long mcastSendBufferSize; /** * (int(s)) * The "mcast-ttl" property. Defaults to "0", which overrides the product * default. */ public static Long mcastTtl; /** * (Pairs of hyphen-separated int(s)) * The "membership-port-range" property. * <p> * For example: * hydra.gemfirexd.FabricServerPrms-membershipPortRange = 60000-61000; */ public static Long membershipPortRange; /** * (int(s)) * The "member-timeout" property, in milliseconds. */ public static Long memberTimeout; /** * (String(s)) * The total size of off-heap memory in the form <n>[g|m]. * <n> is the size. [g|m] indicates whether the size should be interpreted as * gigabytes or megabytes. */ public static Long offHeapMemorySize; /** * (boolean(s)) * The "persist-dd" property. Defaults to true. * <p> * If true, hydra will automatically set the "sys-disk-dir" property to a * directory named for the logical VM ID, such as vm_3_client2_disk, with * the same path as the system disk directory, which defaults to the same * path as the system directory. */ public static Long persistDD; public static boolean persistDD() { Long key = persistDD; return tasktab().booleanAt(key, tab().booleanAt(key, true)); } /** * (boolean(s)) * Whether to make indexes persistent. Defaults to true. */ public static Long persistIndexes; /** * (boolean(s)) * Whether the test intends to use persistence for queues. Defaults to false. * <p> * If true, hydra will automatically set the "sys-disk-dir" property to a * directory named for the logical VM ID, such as vm_3_client2_disk, with * the same path as the system disk directory, which defaults to the same * path as the system directory. */ public static Long persistQueues; /** * (boolean(s)) * Whether the test intends to use persistence for tables. Defaults to false. * <p> * If true, hydra will automatically set the "sys-disk-dir" property to a * directory named for the logical VM ID, such as vm_3_client2_disk, with * the same path as the system disk directory, which defaults to the same * path as the system directory. */ public static Long persistTables; /** * (boolean(s)) * Whether the server should initiate rebalance on startup. Defaults to false. */ public static Long rebalance; /** * (String(s)) * The redundancy-zone property. Defaults to null. */ public static Long redundancyZone; /** * (Comma-separated list of String(s)) * Remote distributed system names to use when connecting as a locator with * {@link FabricServerHelper} in a WAN configuration. Defaults to {@link * #NONE}. This is used to set the <code>remote-locators</code> property to * the locators that have been created for each remote distributed system * with {@link FabricServerHelper#createLocator}. * <p> * Suitable test configuration functions to use with the WAN topology include * files in $JTESTS/hydraconfig/gemfirexd are: * <pre> * <code> * // for a ring-connected locator topology * fcn "hydra.TestConfigFcns.generateNamesRepeatedlyShift * (\"ds_\", ${wanSites}, ${locatorHostsPerSite}, false, true)" ncf, * none; * * // for a fully-connected locator topology * fcn "hydra.TestConfigFcns.generateNameListsRepeatedlyShift * (\"ds_\", ${wanSites}, ${locatorHostsPerSite})" ncf, * none; * </code> * </pre> */ public static Long remoteDistributedSystems; /** * (boolean(s)) * Whether to save the sys-disk-dirs in place regardless of whether * <code>moveRemoteDirs</code> is set true in BatteryTest by omitting them * from the <code>movedirs.sh</code> script. This also circumvents <code> * hydra.Prms-removeDiskFilesAfterTest</code>. Defaults to false. * <p> * This is typically used with {@link #sysDiskDirBaseMapFileName} to * generate disk files that can be used in another test. */ public static Long saveSysDiskDir; /** * (Comma-separated Lists of String(s)) * The "server-groups" property. Each logical fabric server description * can belong to multiple server groups. Can be specified as {@link #NONE}. */ public static Long serverGroups; /** * (int(s)) * The "socket-buffer-size" property, in bytes. */ public static Long socketBufferSize; /** * (int(s)) * The "socket-lease-time" property, in milliseconds. */ public static Long socketLeaseTime; /** * (int(s)) * The "statistic-sample-rate" property, in milliseconds. */ public static Long statisticSampleRate; /** * (boolean(s)) * The "statistic-sampling-enabled" property. Defaults to true, which * overrides the product default. */ public static Long statisticSamplingEnabled; /** * (String(s)) * Absolute name of a file containing a mapping of physical host names * to lists of absolute base paths to use for each sys-disk-dir. Defaults * to null (no map file is used), which uses the same path as the system * directory. * <p> * Example: * <code> * hydra.gemfirexd.FabricServerPrms-names = locatorConfig serverConfig; * hydra.gemfirexd.FabricServerPrms-sysDiskDirBaseMapFileName = * none $PWD/../diskmap.txt; * </code> * where <code>diskmap.txt</code> contains: * <code> * millet /export/millet1/users/$USER/scratchdisk * /export/millet2/users/$USER/scratchdisk * /export/millet3/users/$USER/scratchdisk * wheat /export/wheat1/users/$USER/scratchdisk * /export/wheat2/users/$USER/scratchdisk * </code> * <p> * In the example, fabric servers using locatorConfig will not configure * disk directory bases (they will default to the system directory), while * fabric servers using serverConfig and running on millet or wheat will * use one of the configured base directories. These are assigned round * robin by their logical hydra client VMID. * <p> * For example, if vm_3_server* and vm_4_server* use serverConfig and * both run on host millet, the first server will use the path for millet1 * and the second will use the path for millet2. * <p> * So, for example, if the test requires that each fabric server on a given * host use a different disk, the map file must at least as many paths (to * different disks) for that host as there will be servers running on that * host. In addition, the logical hydra client VMIDs need to be consecutive * to ensure each maps to a unique base path. */ public static Long sysDiskDirBaseMapFileName; /** * (boolean(s)) * Whether to expect the sys-disk-dirs to already exist. Defaults to false. * <p> * This is typically used with {@link #sysDiskDirBaseMapFileName} to use * disk files generated elsewhere. */ public static Long useExistingSysDiskDir; /** * (boolean(s)) * Whether to omit the test result directory name from the sys-disk-dir * path. Defaults to false. * <p> * This is typically used with {@link #sysDiskDirBaseMapFileName}, * {@link #saveSysDiskDirs} and/or {@link #useExistingSysDiskDirs} to * share disk files between tests. */ public static Long useGenericSysDiskDir; /** * (boolean(s)) * The "table-default-partitioned" property. Defaults to true. */ public static Long tableDefaultPartitioned; /** * (int(s)) * The "tcp-port" property. Defaults to an available port selected by * the operating system. */ public static Long tcpPort; /** * (int(s)) * The "udp-fragment-size" property, in bytes. */ public static Long udpFragmentSize; /** * (int(s)) * The "udp-recv-buffer-size" property, in bytes. */ public static Long udpRecvBufferSize; /** * (int(s)) * The "udp-send-buffer-size" property, in bytes. */ public static Long udpSendBufferSize; }
/** Applies the most recent undone commit command. */ public void redo() { if (undoneCommands.size() == 0) { return; } final Settings settings = Settings.getInstance(); final CommitCommand commitCommand = undoneCommands.remove(undoneCommands.size() - 1); final ChangeCommand[] changeCommands = commitCommand.getChangeCommands(); for (int i = changeCommands.length - 1 ; i >= 0 ; i--) { settings.setSetting(changeCommands[i].getKey(), changeCommands[i].getValue()); } appliedCommands.add(commitCommand); }
#include "bitmap.h" #include <string> Bitmap::~Bitmap() { al_destroy_bitmap( bitmap ); bitmap = nullptr; } void Bitmap::render() { if( bitmap != nullptr ) { int w = al_get_bitmap_width( bitmap ); int h = al_get_bitmap_height( bitmap ); al_draw_scaled_bitmap( bitmap, 0, 0, w, h, x, y, scale * w, scale * h, 0 ); } } // Creates a blank transparent bitmap. void Bitmap::create( int w, int h, int flags ) { int old_flags = al_get_new_bitmap_flags(); al_set_new_bitmap_flags( old_flags | flags ); bitmap = al_create_bitmap( w, h ); ALLEGRO_BITMAP* prev = al_get_target_bitmap(); al_set_target_bitmap( bitmap ); al_clear_to_color( al_map_rgba( 0, 0, 0, 0 ) ); al_set_target_bitmap( prev ); al_set_new_bitmap_flags( old_flags ); } void Bitmap::blit( const BitmapPtr& other, float x, float y, float scale ) { ALLEGRO_BITMAP* prev = al_get_target_bitmap(); al_set_target_bitmap( bitmap ); int w = al_get_bitmap_width( other->bitmap ); int h = al_get_bitmap_height( other->bitmap ); al_draw_scaled_bitmap( other->bitmap, 0, 0, w, h, x, y, w * scale, h * scale, 0 ); al_set_target_bitmap( prev ); } bool Bitmap::loadFromFile( const std::string& filename, int flags ) { int old_flags = al_get_new_bitmap_flags(); al_set_new_bitmap_flags( old_flags | flags ); bitmap = al_load_bitmap( filename.c_str() ); al_set_new_bitmap_flags( flags ); return bitmap != nullptr; } bool Bitmap::saveToFile( const std::string& filename ) { return al_save_bitmap( filename.c_str(), bitmap ); } BitmapPtr Bitmap::getSubBitmap( int _x, int _y, int _w, int _h ) { ALLEGRO_BITMAP *sub = al_create_sub_bitmap( bitmap, _x, _y, _w, _h ); return std::make_shared<Bitmap>( sub, 0, 0 ); } void Bitmap::setBitmap( ALLEGRO_BITMAP* nb ) { bitmap = nb; } void Bitmap::setBitmap( BitmapPtr& nb ) { // Copy but don't clone. // I.e. get a new pointer nb's bitmap // that can be free'd with out causing // nb to seg fault when destructing. // We don't want to set nb's pointer to // nullptr either. al_destroy_bitmap( bitmap ); bitmap = al_create_sub_bitmap( nb->bitmap, 0, 0, al_get_bitmap_width( nb->bitmap ), al_get_bitmap_height( nb->bitmap ) ); }
<reponame>epsilonhalbe/bricolage #![feature(proc_macro, wasm_custom_section, wasm_import_module)] extern crate wasm_bindgen; use wasm_bindgen::prelude::*; #[wasm_bindgen] extern { #[wasm_bindgen(js_namespace = console)] fn time(name: &str); #[wasm_bindgen(js_namespace = console)] fn timeEnd(name: &str); } pub struct Timer<'a> { name: &'a str, } impl<'a> Timer<'a> { pub fn new(name: &'a str) -> Timer<'a> { time(name); Timer { name } } } impl<'a> Drop for Timer<'a> { fn drop(&mut self) { timeEnd(self.name); } } #[repr(u8)] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum Cell { Dead = 0, Alive = 1, } impl Cell { fn toggle(&mut self) { *self = match *self { Cell::Dead => Cell::Alive, Cell::Alive => Cell::Dead, }; } } #[wasm_bindgen] pub struct Universe { width: u32, height: u32, cells: Vec<Cell>, } /// Public methods, exported to JavaScript. #[wasm_bindgen] impl Universe { pub fn new(w:u32, h:u32) -> Universe { let width = w; let height = h; let cells = (0..height * width).map(|i| { if i < width || i % width == 0 || i % width == width-1 || width * (height-1) <= i { Cell::Alive} else {Cell::Dead} }).collect(); Universe { width, height, cells, } } pub fn width(&self) -> u32 { self.width } pub fn height(&self) -> u32 { self.height } pub fn cells(&self) -> *const Cell { self.cells.as_ptr() } pub fn tick(&mut self) { // let _timer = Timer::new("Universe::tick"); let mut next = self.cells.clone(); for x in 0..self.width { for y in 0..self.height { let k = 3 * self.lvl1_neighbour_count(x, y) + self.lvl2_neighbour_count(x, y); next[self.get_index(x, y)] = match self.get(x,y) { Cell::Alive => if 6 <= k && k <= 10 { Cell::Alive } else { Cell::Dead }, Cell::Dead => if 7 <= k && k <= 9 { Cell::Alive } else { Cell::Dead }, }; } } self.cells = next; } } /// Private methods. #[wasm_bindgen] impl Universe { fn get_index(&self, x: u32, y: u32) -> usize { let x_ = if x < self.width { x } else { x - self.width }; let y_ = if y < self.height { y } else { y - self.height }; (x_ + y_ * self.width) as usize } pub fn toggle_cell(&mut self, x: u32, y: u32) { let idx = self.get_index(x, y); self.cells[idx].toggle(); } fn get(&self, x: u32, y: u32) -> Cell { self.cells[self.get_index(x,y)] } fn lvl1_neighbour_count(&self, x: u32, y: u32) -> u8 { let xx = x+self.width; let yy = y+self.height; [ (x,y+1),(x+1,y+1) ,(xx-1,y)/*(x,y)*/,(x+1,y) , (x,yy-1),(x+1,yy-1) ].iter().map(|(x_,y_)| { self.get(*x_, *y_) as u8 }).sum() } fn lvl2_neighbour_count(&self, x: u32, y: u32) -> u8 { let xx = x+self.width; let yy = y+self.height; [ (x,y+2), (xx-1,y+1), (x+2,y+1), /*(x,y)*/ (xx-1,yy-1), (x+2,yy-1), (x,yy-2) ].iter() .map(|(x_,y_)| { self.get(*x_, *y_) as u8 }).sum() } }
/** * Sets the {@link HttpClient} to use for sending and receiving requests to and from the service. * * @param httpClient The {@link HttpClient} to use for requests. * * @return The updated {@link TableClientBuilder}. */ public TableClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.warning("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; }
<gh_stars>0 //BogoSort #include <stdio.h> #include <stdlib.h> #include <stdbool.h> bool check_sorted(int *a, int n) { while ( --n >= 1 ) { if ( a[n] < a[n-1] ) return false; } return true; } void shuffle(int *a, int n) { int i, t, r; for(i=0; i < n; i++) { t = a[i]; r = rand() % n; a[i] = a[r]; a[r] = t; } } void sort(int *a, int n) { while ( !check_sorted(a, n) ) shuffle(a, n); } int main() { int numbers[6]; int i; printf("Enter 6 numbers unsorted \n\n"); for(i=0;i<6;i++){ scanf("%d",&numbers[i]); } sort(numbers, 6); for (i=0; i < 6; i++) printf("%d ", numbers[i]); printf("\n"); }
/** * Creates a new {@link MdcAccessor} for interacting with the MDC represented by the given class and adapter. The * accessor's function and consumers for accessing the MDC will be represented by generated classes which are injected * into the MDC's class loader. * * @param mdcAdapter the adapter specifying the MDC's GET, PUT and REMOVE methods * @param mdcClass the class of the target MDC * @return a new {@link MdcAccessor} instance * @throws Exception in case the accessor could not be created */ private MdcAccessor createAccessor(MdcAdapter mdcAdapter, Class<?> mdcClass) throws Exception { Method getMethod = mdcAdapter.getGetMethod(mdcClass); Method putMethod = mdcAdapter.getPutMethod(mdcClass); Method removeMethod = mdcAdapter.getRemoveMethod(mdcClass); ClassInjector.ByteCodeProvider bcpPutMethod = createByteCodeProvide(BiConsumer.class, "accept", putMethod); Class<? extends BiConsumer<String, Object>> putConsumerClass = injectClass("mdc_bi_consumer", mdcClass, bcpPutMethod); BiConsumer<String, Object> putConsumer = putConsumerClass.newInstance(); ClassInjector.ByteCodeProvider bcpGetMethod = createByteCodeProvide(Function.class, "apply", getMethod); Class<? extends Function<String, Object>> getFunctionClass = injectClass("mdc_function", mdcClass, bcpGetMethod); Function<String, Object> getFunction = getFunctionClass.newInstance(); ClassInjector.ByteCodeProvider bcpRemoveMethod = createByteCodeProvide(Consumer.class, "accept", removeMethod); Class<? extends Consumer<String>> removeConsumerClass = injectClass("mdc_consumer", mdcClass, bcpRemoveMethod); Consumer<String> removeConsumer = removeConsumerClass.newInstance(); return mdcAdapter.createAccessor(new WeakReference<>(mdcClass), putConsumer, getFunction, removeConsumer); }
<reponame>ZhehaoLi9705/QuantNet_CPP // // BinomialLatticeStrrategy.hpp // VI.6 Latice Methods // // Created by <NAME> on 2020/5/19. // Copyright © 2020 <NAME>. All rights reserved. // // Strategy pattern for creating binomial lattice. Note that there is a built-in Template Method pattern built in here. // For convenience all data is public and all code is inline. Furthermore, classes have minimal functionality. We can make production code from this at a later stage. #ifndef BinomialLatticeStrrategy_hpp #define BinomialLatticeStrrategy_hpp #include "Lattice.hpp" #include <cmath> // A enumeration class enum BinomialType {Additive, Multiplicative}; class BinomialLatticeStrategy{ protected: double u; // upward double d; // downward double p; // prob. of upward double r; // interest rate double v; // volatility double dt; // time step BinomialType bType; // ?? // Constructor BinomialLatticeStrategy(double interest, double vol, double deltaT); public: // Modifiers virtual void updateLattice(Lattice<double, int, 2> & source, double rootValue) const; // Accessors // Public inline functions for nomral clients double upValue() const {return u;} double downValue() const {return d;} double probValue() const {return p;} BinomialType binomialType () const {return bType;} }; // Method 1 class CoxRossRubinStrategy: public BinomialLatticeStrategy{ public: CoxRossRubinStrategy(double interest, double vol, double deltaT); }; // Method 2 class PadeCRRStrategy: public BinomialLatticeStrategy{ public: PadeCRRStrategy(double interest, double vol, double deltaT); }; // Method 3 class ModCRRStrategy: public BinomialLatticeStrategy{ public: ModCRRStrategy(double interest, double vol, double deltaT, double S, double K, int N); }; // Method 4 class JarrowRuddStrategy: public BinomialLatticeStrategy{ public: JarrowRuddStrategy(double interest, double vol, double deltaT); }; // Method 5 class PadeJRRStrategy: public BinomialLatticeStrategy{ public: PadeJRRStrategy(double interest, double vol, double deltaT); }; // Method 6 class EQPStrategy: public BinomialLatticeStrategy{ public: EQPStrategy(double interest, double vol, double deltaT); }; // Method 7 class TRGStrategy: public BinomialLatticeStrategy{ public: TRGStrategy(double interest, double vol, double deltaT); }; #endif /* BinomialLatticeStrrategy_hpp */
//Get a env variable with key, "" will return if not exist func (e *Environment) Get(key string) string { e.RLock() defer e.RUnlock() if value, ok := e.data[key]; ok { return value } return "" }
/* * Code to run REPEATEDLY after the driver hits PLAY but before they hit STOP */ @Override public void loop() { float left; float right; float arm; float leftservo; float rightservo; float front_right; float front_left; left = gamepad1.left_stick_y; right = -gamepad1.right_stick_y; arm = -gamepad2.left_stick_x; front_left = gamepad2.left_stick_y; front_right = gamepad2.right_stick_y; robot.leftDrive.setPower(left); robot.rightDrive.setPower(right); robot.arm.setPower(arm); robot.front_left.setPower(front_left); robot.front_right.setPower(front_right); telemetry.addData("left", "%.2f", left); }
I was just about to leave for my first big tango festival when a message popped up on my phone. It was from a skilled dancer raving over the amazing followers and how every dance was the “best one ever”. Then he said how excited he was to dance with me that afternoon. My excitement shifted – without a clutch – to anxiety. Was I going to be totally out of my league? I had been working on my tango but I still had much to learn. I didn’t want to disappoint any kind-hearted, unsuspecting leads who took a chance on an unknown girl. First impressions matter – especially in tango. That weekend, I felt this anxiety pulsing through my body with every guy who asked me to dance. Sure, the dances were amazing for me, but were they amazing for my partners? Because I truly cared about that. My focus when dancing is on giving and creating. I seek to give perfect balance, timing and responsiveness. I seek to create a moment with my partner that leaves them feeling awe over what just transpired between us and the music. That level of giving and creating takes time and work to master. Meanwhile, I wrestle with the insecurity of knowing that I’m not there yet. When I began tango I discovered something terribly awkward. With it’s complex technique, requisite intimacy and demand for total vulnerability, tango makes people insecure. Could I get truly comfortable with being raw, vulnerable and (gasp) – imperfect in this unforgiving dance? I could handle being raw and vulnerable – hey, I was once naked on stage. But the idea of people politely suffering through dances with me while making mental notes to avoid me for the next decade was unacceptable. I am secretly obsessed with how I feel to my partner. I never want a lead to feel burdened by a lack of balance, or thrown off by bad timing or wonder how to control something that doesn’t listen and moves on auto-pilot. The insecurity is a result of how much I care about how I affect my partner and what we are collaboratively seeking to do. Insecurity drove me to action. Therefore, I work regularly with a pro. I insist he is brutally honest when training me. I attend weekly practicas (and probably annoy the leads with how much I ask how something felt or what would make it feel better). I ask for specific feedback. I assume nothing because I’ve been surprised in the past. Insecurity has kept me open to growing. It drives me to root out and fix everything that doesn’t feel good to a partner. Insecurity drives me to take an experience and seek ways to make it better. This all served to help build greater confidence. However…. Confidence carries an ugly risk – assumptions. Sometimes we get so comfortable or confident that we get sloppy over time without realizing it. Or we think we know more than we truly do. A “good” embrace isn’t the same as a “phenomenal” embrace. Everything can be done better with new layers of technique. As I develop confidence in an area, I keep it on my radar to check regularly with practice partners and my pro. I’ve grown sloppy two weeks later on something I thought I had nailed down. So perhaps a dash of insecurity is a good thing after all… something to keep me humble and driven to stay on top of my game. A few weeks ago, I travelled to a festival out of state and had a drastically different experience. I felt confident. I felt humble. And even though I felt that tinge of insecurity, this time I knew what to do with it. I embraced it. Related: Tango isn’t for Everyone & The 9th Way to Get Asked to Dance.
/** * This method always returns immediately for the raw consumer because it just streams. * * @param timeout The amount of time in milliseconds to block until returning even if the stream * has not started. * @return <i>true</i> if the consumer is currently streaming. */ public boolean isStreaming(long timeout) { try { int segments = 5; long increment = timeout / segments; if (increment < 1000) { Thread.sleep(timeout); } else { while (segments-- > 0 && getBytesStreamed() > 1048576) { Thread.sleep(increment); } } } catch (InterruptedException e) { logger.debug("Interrupted while waiting for streaming."); } return true; }
/*- * Copyright (c) 2003-2017 Lev Walkin <[email protected]>. All rights reserved. * Redistribution and modifications are permitted subject to BSD license. */ #ifndef _CONSTR_SEQUENCE_OF_H_ #define _CONSTR_SEQUENCE_OF_H_ #include <asn_application.h> #include <constr_SET_OF.h> /* Implemented using SET OF */ #ifdef __cplusplus extern "C" { #endif /* * A set specialized functions dealing with the SEQUENCE OF type. * Generally implemented using SET OF. */ asn_struct_compare_f SEQUENCE_OF_compare; der_type_encoder_f SEQUENCE_OF_encode_der; xer_type_encoder_f SEQUENCE_OF_encode_xer; per_type_encoder_f SEQUENCE_OF_encode_uper; per_type_encoder_f SEQUENCE_OF_encode_aper; extern asn_TYPE_operation_t asn_OP_SEQUENCE_OF; #define SEQUENCE_OF_free SET_OF_free #define SEQUENCE_OF_print SET_OF_print #define SEQUENCE_OF_constraint SET_OF_constraint #define SEQUENCE_OF_decode_ber SET_OF_decode_ber #define SEQUENCE_OF_decode_xer SET_OF_decode_xer #define SEQUENCE_OF_decode_oer SET_OF_decode_oer #define SEQUENCE_OF_encode_oer SET_OF_encode_oer #define SEQUENCE_OF_decode_uper SET_OF_decode_uper #define SEQUENCE_OF_decode_aper SET_OF_decode_aper #define SEQUENCE_OF_random_fill SET_OF_random_fill #ifdef __cplusplus } #endif #endif /* _CONSTR_SET_OF_H_ */
<gh_stars>0 /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.lucene.queries.intervals; import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.Set; import java.util.stream.Collectors; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchesIterator; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryVisitor; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.automaton.CompiledAutomaton; import org.apache.lucene.util.automaton.RegExp; import org.junit.AfterClass; import org.junit.BeforeClass; public class TestIntervals extends LuceneTestCase { // 0 1 2 3 4 5 6 7 8 9 // 012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 private static String field1_docs[] = { "Nothing of interest to anyone here", "Pease porridge hot, pease porridge cold, pease porridge in the pot nine days old. Some like it hot, some like it cold, some like it in the pot nine days old", "Pease porridge cold, pease porridge hot, pease porridge in the pot twelve days old. Some like it cold, some like it hot, some like it in the fraggle", "Nor here, nowt hot going on in pease this one", "Pease porridge hot, pease porridge cold, pease porridge in the pot nine years old. Some like it hot, some like it twelve", "Porridge is great" }; // 0 1 2 3 4 5 6 7 8 9 // 012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 private static String field2_docs[] = { "In Xanadu did Kubla Khan a stately pleasure dome decree", "Where Alph the sacred river ran through caverns measureless to man", "a b a c b a b c", "So thrice five miles of fertile ground", "Pease hot porridge porridge", "w1 w2 w3 w4 w1 w6 w3 w8 w4 w7 w1 w6" }; private static Directory directory; private static IndexSearcher searcher; private static Analyzer analyzer = new StandardAnalyzer(CharArraySet.EMPTY_SET); private static final FieldType FIELD_TYPE = new FieldType(TextField.TYPE_STORED); static { FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); } @BeforeClass public static void setupIndex() throws IOException { directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(analyzer).setMergePolicy(newLogMergePolicy())); for (int i = 0; i < field1_docs.length; i++) { Document doc = new Document(); doc.add(new Field("field1", field1_docs[i], FIELD_TYPE)); doc.add(new Field("field2", field2_docs[i], FIELD_TYPE)); doc.add(new StringField("id", Integer.toString(i), Field.Store.NO)); doc.add(new NumericDocValuesField("id", i)); writer.addDocument(doc); } writer.close(); searcher = new IndexSearcher(DirectoryReader.open(directory)); } @AfterClass public static void teardownIndex() throws IOException { IOUtils.close(searcher.getIndexReader(), directory); } private void checkIntervals(IntervalsSource source, String field, int expectedMatchCount, int[][] expected) throws IOException { int matchedDocs = 0; for (LeafReaderContext ctx : searcher.getIndexReader().leaves()) { assertNull(source.intervals(field + "fake", ctx)); NumericDocValues ids = DocValues.getNumeric(ctx.reader(), "id"); IntervalIterator intervals = source.intervals(field, ctx); if (intervals == null) continue; for (int doc = 0; doc < ctx.reader().maxDoc(); doc++) { ids.advance(doc); MatchesIterator mi = source.matches(field, ctx, doc); int id = (int) ids.longValue(); if (intervals.docID() == doc || (intervals.docID() < doc && intervals.advance(doc) == doc)) { int i = 0, pos; assertEquals(-1, intervals.start()); assertEquals(-1, intervals.end()); while ((pos = intervals.nextInterval()) != IntervalIterator.NO_MORE_INTERVALS) { if (i >= expected[id].length) { fail("Unexpected match in doc " + id + ": " + intervals); } assertEquals(source + ": wrong start value in doc " + id, expected[id][i], pos); assertEquals("start() != pos returned from nextInterval()", expected[id][i], intervals.start()); assertEquals("Wrong end value in doc " + id, expected[id][i + 1], intervals.end()); i += 2; assertTrue(mi.next()); assertEquals(source + ": wrong start value in match in doc " + id, intervals.start(), mi.startPosition()); assertEquals(source + ": wrong end value in match in doc " + id, intervals.end(), mi.endPosition()); } assertEquals(source + ": wrong number of endpoints in doc " + id, expected[id].length, i); assertEquals(IntervalIterator.NO_MORE_INTERVALS, intervals.start()); assertEquals(IntervalIterator.NO_MORE_INTERVALS, intervals.end()); if (i > 0) { matchedDocs++; assertFalse(mi.next()); } else { assertNull("Expected null matches iterator on doc " + id, mi); } } else { assertEquals(0, expected[id].length); assertNull(mi); } } } assertEquals(expectedMatchCount, matchedDocs); } private void checkVisits(IntervalsSource source, int expectedVisitCount, String... expectedTerms) { Set<String> actualTerms = new HashSet<>(); int[] visitedSources = new int[1]; source.visit("field", new QueryVisitor() { @Override public void consumeTerms(Query query, Term... terms) { visitedSources[0]++; actualTerms.addAll(Arrays.stream(terms).map(Term::text).collect(Collectors.toList())); } @Override public void visitLeaf(Query query) { visitedSources[0]++; super.visitLeaf(query); } @Override public QueryVisitor getSubVisitor(BooleanClause.Occur occur, Query parent) { visitedSources[0]++; return super.getSubVisitor(occur, parent); } }); Set<String> expectedSet = new HashSet<>(Arrays.asList(expectedTerms)); expectedSet.removeAll(actualTerms); actualTerms.removeAll(Arrays.asList(expectedTerms)); assertEquals(expectedVisitCount, visitedSources[0]); assertTrue("Unexpected terms collected: " + actualTerms, actualTerms.isEmpty()); assertTrue("Missing expected terms: " + expectedSet, expectedSet.isEmpty()); } private MatchesIterator getMatches(IntervalsSource source, int doc, String field) throws IOException { int ord = ReaderUtil.subIndex(doc, searcher.getIndexReader().leaves()); LeafReaderContext ctx = searcher.getIndexReader().leaves().get(ord); return source.matches(field, ctx, doc - ctx.docBase); } private void assertMatch(MatchesIterator mi, int start, int end, int startOffset, int endOffset) throws IOException { assertTrue(mi.next()); assertEquals(start, mi.startPosition()); assertEquals(end, mi.endPosition()); assertEquals(startOffset, mi.startOffset()); assertEquals(endOffset, mi.endOffset()); } private void assertGaps(IntervalsSource source, int doc, String field, int[] expectedGaps) throws IOException { int ord = ReaderUtil.subIndex(doc, searcher.getIndexReader().leaves()); LeafReaderContext ctx = searcher.getIndexReader().leaves().get(ord); IntervalIterator it = source.intervals(field, ctx); doc = doc - ctx.docBase; assertEquals(doc, it.advance(doc)); for (int expectedGap : expectedGaps) { if (it.nextInterval() == IntervalIterator.NO_MORE_INTERVALS) { fail("Unexpected interval " + it); } assertEquals(expectedGap, it.gaps()); } } public void testIntervalsOnFieldWithNoPositions() throws IOException { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { Intervals.term("wibble").intervals("id", searcher.getIndexReader().leaves().get(0)); }); assertEquals("Cannot create an IntervalIterator over field id because it has no indexed positions", e.getMessage()); } public void testTermQueryIntervals() throws IOException { IntervalsSource source = Intervals.term("porridge"); checkIntervals(source, "field1", 4, new int[][]{ {}, { 1, 1, 4, 4, 7, 7 }, { 1, 1, 4, 4, 7, 7 }, {}, { 1, 1, 4, 4, 7, 7 }, { 0, 0 } }); assertNull(getMatches(source, 0, "field1")); assertNull(getMatches(source, 2, "no_such_field")); MatchesIterator mi = getMatches(source, 2, "field1"); assertMatch(mi, 1, 1, 6, 14); final TermQuery porridge = new TermQuery(new Term("field1","porridge")); assertEquals(porridge, mi.getQuery()); assertMatch(mi, 4, 4, 27, 35); assertEquals(porridge, mi.getQuery()); assertMatch(mi, 7, 7, 47, 55); assertEquals(porridge, mi.getQuery()); assertFalse(mi.next()); assertEquals(1, source.minExtent()); checkVisits(source, 1, "porridge"); } public void testOrderedNearIntervals() throws IOException { IntervalsSource source = Intervals.ordered(Intervals.term("pease"), Intervals.term("hot")); checkIntervals(source, "field1", 3, new int[][]{ {}, { 0, 2, 6, 17 }, { 3, 5, 6, 21 }, {}, { 0, 2, 6, 17 }, { } }); assertNull(getMatches(source, 3, "field1")); MatchesIterator mi = getMatches(source, 4, "field1"); assertMatch(mi, 0, 2, 0, 18); MatchesIterator sub = mi.getSubMatches(); assertMatch(sub, 0, 0, 0, 5); assertMatch(sub, 2, 2, 15, 18); assertFalse(sub.next()); assertMatch(mi, 6, 17, 41, 100); sub = mi.getSubMatches(); assertMatch(sub, 6, 6, 41, 46); assertMatch(sub, 17, 17, 97, 100); assertFalse(sub.next()); assertFalse(mi.next()); assertEquals(2, source.minExtent()); checkVisits(source, 3, "pease", "hot"); } public void testOrderedNearWithDuplicates() throws IOException { IntervalsSource source = Intervals.ordered(Intervals.term("pease"), Intervals.term("pease"), Intervals.term("porridge")); checkIntervals(source, "field1", 3, new int[][]{ {}, { 0, 4, 3, 7 }, { 0, 4, 3, 7 }, {}, { 0, 4, 3, 7 }, {} }); assertGaps(source, 1, "field1", new int[]{ 2, 2 }); MatchesIterator mi = getMatches(source, 1, "field1"); assertMatch(mi, 0, 4, 0, 34); MatchesIterator sub = mi.getSubMatches(); assertNotNull(sub); assertMatch(sub, 0, 0, 0, 5); assertMatch(sub, 3, 3, 20, 25); assertMatch(sub, 4, 4, 26, 34); assertMatch(mi, 3, 7, 20, 55); assertFalse(mi.next()); } public void testPhraseIntervals() throws IOException { IntervalsSource source = Intervals.phrase("pease", "porridge"); checkIntervals(source, "field1", 3, new int[][]{ {}, { 0, 1, 3, 4, 6, 7 }, { 0, 1, 3, 4, 6, 7 }, {}, { 0, 1, 3, 4, 6, 7 }, {} }); assertNull(getMatches(source, 0, "field1")); MatchesIterator mi = getMatches(source, 1, "field1"); assertMatch(mi, 0, 1, 0, 14); assertMatch(mi, 3, 4, 20, 34); MatchesIterator sub = mi.getSubMatches(); assertMatch(sub, 3, 3, 20, 25); assertEquals(new TermQuery( new Term("field1", "pease")), sub.getQuery()); assertMatch(sub, 4, 4, 26, 34); assertEquals(new TermQuery( new Term("field1", "porridge")), sub.getQuery()); assertFalse(sub.next()); assertMatch(mi, 6, 7, 41, 55); sub = mi.getSubMatches(); assertTrue(sub.next()); assertEquals(new TermQuery( new Term("field1", "pease")), sub.getQuery()); assertTrue(sub.next()); assertEquals(new TermQuery( new Term("field1", "porridge")), sub.getQuery()); assertFalse(sub.next()); assertEquals(2, source.minExtent()); checkVisits(source, 3, "pease", "porridge"); } public void testUnorderedNearIntervals() throws IOException { IntervalsSource source = Intervals.unordered(Intervals.term("pease"), Intervals.term("hot")); checkIntervals(source, "field1", 4, new int[][]{ {}, { 0, 2, 2, 3, 6, 17 }, { 3, 5, 5, 6, 6, 21 }, { 3, 7 }, { 0, 2, 2, 3, 6, 17 }, {} }); assertNull(getMatches(source, 0, "field1")); MatchesIterator mi = getMatches(source, 1, "field1"); assertMatch(mi, 0, 2, 0, 18); assertMatch(mi, 2, 3, 15, 25); assertMatch(mi, 6, 17, 41, 99); MatchesIterator sub = mi.getSubMatches(); assertMatch(sub, 6, 6, 41, 46); assertMatch(sub, 17, 17, 96, 99); assertFalse(sub.next()); assertFalse(mi.next()); assertGaps(source, 1, "field1", new int[]{ 1, 0, 10 }); assertEquals(2, source.minExtent()); checkVisits(source, 3, "pease", "hot"); } public void testUnorderedWithRepeats() throws IOException { IntervalsSource source = Intervals.unordered(Intervals.term("pease"), Intervals.term("pease"), Intervals.term("hot")); checkIntervals(source, "field1", 3, new int[][]{ {}, { 0, 3, 2, 6, 3, 17 }, { 0, 5, 3, 6 }, {}, { 0, 3, 2, 6, 3, 17 }, {} }); MatchesIterator mi = getMatches(source, 1, "field1"); assertMatch(mi, 0, 3, 0, 25); MatchesIterator sub = mi.getSubMatches(); assertNotNull(sub); assertMatch(sub, 0, 0, 0, 5); assertMatch(sub, 2, 2, 15, 18); assertMatch(sub, 3, 3, 20, 25); } public void testUnorderedWithRepeatsAndMaxGaps() throws IOException { IntervalsSource source = Intervals.maxgaps(2, Intervals.unordered(Intervals.term("pease"), Intervals.term("pease"), Intervals.term("hot"))); checkIntervals(source, "field1", 3, new int[][]{ {}, { 0, 3, 2, 6 }, { 3, 6 }, {}, { 0, 3, 2, 6 }, {} }); } public void testIntervalDisjunctionToStringStability() { /* Sanity check that the subclauses of a disjunction are presented in sorted order via the toString() method. The exact order is irrelevant, but ensuring stability of output makes the output more useful; e.g., for external comparison across different JVMs, etc... */ final int size = random().nextInt(22) + 4; // ensure a reasonably large minimum number of clauses final String[] terms = new String[size]; for (int i = 0; i < size; i++) { terms[i] = Character.toString((char) ('a' + i)); } final String expected = Arrays.stream(terms).collect(Collectors.joining(",", "or(", ")")); /* NOTE: shuffling below shouldn't matter at the moment (because the disjunction subSources are destined for a HashMap, so will be reordered anyway); but it might matter if the internal implementation of DisjunctionIntervalsSource changes. */ Collections.shuffle(Arrays.asList(terms), random()); IntervalsSource source = Intervals.or( Arrays.stream(terms) .map((term) -> Intervals.term(term)) .toArray((sz) -> new IntervalsSource[sz])); assertEquals(expected, source.toString()); } public void testIntervalDisjunction() throws IOException { IntervalsSource source = Intervals.or(Intervals.term("pease"), Intervals.term("hot"), Intervals.term("notMatching")); checkIntervals(source, "field1", 4, new int[][]{ {}, { 0, 0, 2, 2, 3, 3, 6, 6, 17, 17}, { 0, 0, 3, 3, 5, 5, 6, 6, 21, 21}, { 3, 3, 7, 7 }, { 0, 0, 2, 2, 3, 3, 6, 6, 17, 17}, {} }); assertNull(getMatches(source, 0, "field1")); MatchesIterator mi = getMatches(source, 3, "field1"); assertMatch(mi, 3, 3, 15, 18); assertEquals(new TermQuery(new Term("field1","hot")), mi.getQuery()); assertNull(mi.getSubMatches()); assertMatch(mi, 7, 7, 31, 36); assertEquals(new TermQuery(new Term("field1","pease")), mi.getQuery()); assertNull(mi.getSubMatches()); assertFalse(mi.next()); assertEquals(1, source.minExtent()); checkVisits(source, 4, "pease", "hot", "notMatching"); } public void testCombinationDisjunction() throws IOException { IntervalsSource source = Intervals.ordered( Intervals.or(Intervals.term("alph"), Intervals.term("sacred")), Intervals.term("measureless") ); checkIntervals(source, "field2", 1, new int[][]{ {}, { 3, 8 }, {}, {}, {}, {} }); assertEquals(2, source.minExtent()); checkVisits(source, 5, "alph", "sacred", "measureless"); } public void testNesting() throws IOException { IntervalsSource source = Intervals.unordered( Intervals.term("pease"), Intervals.term("porridge"), Intervals.or(Intervals.term("hot"), Intervals.term("cold"))); checkIntervals(source, "field1", 3, new int[][]{ {}, { 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5, 7, 6, 17 }, { 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5, 7, 6, 17 }, {}, { 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5, 7, 6, 17 }, {} }); assertEquals(3, source.minExtent()); assertNull(getMatches(source, 0, "field1")); MatchesIterator mi = getMatches(source, 1, "field1"); assertMatch(mi, 0, 2, 0, 18); assertMatch(mi, 1, 3, 6, 25); assertMatch(mi, 2, 4, 15, 34); assertMatch(mi, 3, 5, 20, 39); MatchesIterator sub = mi.getSubMatches(); assertMatch(sub, 3, 3, 20, 25); assertMatch(sub, 4, 4, 26, 34); assertMatch(sub, 5, 5, 35, 39); assertFalse(sub.next()); assertMatch(mi, 4, 6, 26, 46); assertMatch(mi, 5, 7, 35, 55); assertMatch(mi, 6, 17, 41, 99); assertFalse(mi.next()); assertGaps(source, 1, "field1", new int[]{ 0, 0, 0, 0, 0, 0, 9 }); } public void testOffsetIntervals() throws IOException { IntervalsSource source = Intervals.unordered( Intervals.term("pease"), Intervals.term("porridge"), Intervals.or(Intervals.term("hot"), Intervals.term("cold"))); IntervalsSource before = new OffsetIntervalsSource(source, true); checkIntervals(before, "field1", 3, new int[][]{ {}, { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5 }, { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5 }, {}, { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5 }, {} }); IntervalsSource after = new OffsetIntervalsSource(source, false); checkIntervals(after, "field1", 3, new int[][]{ {}, { 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 18, 18 }, { 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 18, 18 }, {}, { 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 18, 18 }, {} }); checkVisits(before, 7, "pease", "porridge", "hot", "cold"); } public void testNesting2() throws IOException { IntervalsSource source = Intervals.unordered( Intervals.ordered( Intervals.term("like"), Intervals.term("it"), Intervals.term("cold") ), Intervals.term("pease") ); checkIntervals(source, "field1", 2, new int[][]{ {}, {6, 21}, {6, 17}, {}, {}, {} }); assertNull(getMatches(source, 0, "field1")); MatchesIterator it = getMatches(source, 1, "field1"); assertMatch(it, 6, 21, 41, 118); MatchesIterator sub = it.getSubMatches(); assertMatch(sub, 6, 6, 41, 46); assertEquals(new TermQuery(new Term("field1", "pease")), sub.getQuery()); assertMatch(sub, 19, 19, 106, 110); assertEquals(new TermQuery(new Term("field1", "like")), sub.getQuery()); assertMatch(sub, 20, 20, 111, 113); assertEquals(new TermQuery(new Term("field1", "it")),sub.getQuery()); assertMatch(sub, 21, 21, 114, 118); assertEquals(new TermQuery(new Term("field1", "cold")),sub.getQuery()); assertFalse(sub.next()); assertFalse(it.next()); assertEquals(4, source.minExtent()); } public void testInterleavedOrdered() throws IOException { IntervalsSource source = Intervals.ordered(Intervals.term("a"), Intervals.term("b"), Intervals.term("c")); checkIntervals(source, "field2", 1, new int[][]{ {}, {}, { 0, 3, 5, 7 }, {}, {}, {} }); assertGaps(source, 2, "field2", new int[]{ 1, 0 }); } public void testUnorderedDistinct() throws IOException { checkIntervals(Intervals.unorderedNoOverlaps(Intervals.term("pease"), Intervals.term("pease")), "field1", 3, new int[][]{ {}, { 0, 3, 3, 6 }, { 0, 3, 3, 6 }, {}, { 0, 3, 3, 6 }, {} }); checkIntervals(Intervals.unorderedNoOverlaps( Intervals.unordered(Intervals.term("pease"), Intervals.term("porridge"), Intervals.term("hot")), Intervals.term("porridge")), "field1", 3, new int[][]{ {}, { 1, 4, 2, 7, 4, 17 }, { 1, 5, 4, 7 }, {}, { 1, 4, 2, 7, 4, 17 }, {} }); checkIntervals(Intervals.unorderedNoOverlaps( Intervals.unordered(Intervals.term("pease"), Intervals.term("porridge"), Intervals.term("hot")), Intervals.term("porridge")), "field2", 1, new int[][]{ {}, {}, {}, {}, { 0, 3 }, {} }); IntervalsSource source = Intervals.unorderedNoOverlaps( Intervals.term("porridge"), Intervals.unordered(Intervals.term("pease"), Intervals.term("porridge"))); checkIntervals(source, "field1", 3, new int[][]{ {}, { 1, 4, 4, 7 }, { 1, 4, 4, 7 }, {}, { 1, 4, 4, 7 }, {} }); // automatic rewrites mean that we end up with 11 sources to visit checkVisits(source, 11, "porridge", "pease"); } public void testContainedBy() throws IOException { IntervalsSource source = Intervals.containedBy( Intervals.term("porridge"), Intervals.ordered(Intervals.term("pease"), Intervals.term("cold")) ); checkIntervals(source, "field1", 3, new int[][]{ {}, { 4, 4, 7, 7 }, { 1, 1, 7, 7 }, {}, { 4, 4 }, {} }); MatchesIterator mi = getMatches(source, 1, "field1"); assertMatch(mi, 4, 4, 20, 39); MatchesIterator subs = mi.getSubMatches(); assertMatch(subs, 3, 3, 20, 25); assertMatch(subs, 4, 4, 26, 34); assertMatch(subs, 5, 5, 35, 39); assertFalse(subs.next()); assertMatch(mi, 7, 7, 41, 118); subs = mi.getSubMatches(); assertMatch(subs, 6, 6, 41, 46); assertMatch(subs, 7, 7, 47, 55); assertMatch(subs, 21, 21, 114, 118); assertFalse(subs.next()); assertFalse(mi.next()); assertEquals(1, source.minExtent()); checkVisits(source, 5, "porridge", "pease", "cold"); } public void testContaining() throws IOException { IntervalsSource source = Intervals.containing( Intervals.ordered(Intervals.term("pease"), Intervals.term("cold")), Intervals.term("porridge") ); checkIntervals(source, "field1", 3, new int[][]{ {}, { 3, 5, 6, 21 }, { 0, 2, 6, 17 }, {}, { 3, 5 }, {} }); MatchesIterator mi = getMatches(source, 1, "field1"); assertMatch(mi, 3, 5, 20, 39); MatchesIterator subs = mi.getSubMatches(); assertMatch(subs, 3, 3, 20, 25); assertMatch(subs, 4, 4, 26, 34); assertMatch(subs, 5, 5, 35, 39); assertFalse(subs.next()); assertMatch(mi, 6, 21, 41, 118); subs = mi.getSubMatches(); assertMatch(subs, 6, 6, 41, 46); assertMatch(subs, 7, 7, 47, 55); assertMatch(subs, 21, 21, 114, 118); assertFalse(subs.next()); assertFalse(mi.next()); assertEquals(2, source.minExtent()); } public void testNotContaining() throws IOException { IntervalsSource source = Intervals.notContaining( Intervals.ordered(Intervals.term("porridge"), Intervals.term("pease")), Intervals.term("hot") ); checkIntervals(source, "field1", 3, new int[][]{ {}, { 4, 6 }, { 1, 3 }, {}, { 4, 6 }, {} }); MatchesIterator mi = getMatches(source, 1, "field1"); assertMatch(mi, 4, 6, 26, 46); MatchesIterator subs = mi.getSubMatches(); assertMatch(subs, 4, 4, 26, 34); assertMatch(subs, 6, 6, 41, 46); assertFalse(subs.next()); assertFalse(mi.next()); assertEquals(2, source.minExtent()); } public void testMaxGaps() throws IOException { IntervalsSource source = Intervals.maxgaps(1, Intervals.unordered(Intervals.term("w1"), Intervals.term("w3"), Intervals.term("w4"))); checkIntervals(source, "field2", 1, new int[][]{ {}, {}, {}, {}, {}, { 0, 3, 2, 4, 3, 6 } }); MatchesIterator mi = getMatches(source, 5, "field2"); assertMatch(mi, 0, 3, 0, 11); assertEquals(3, source.minExtent()); assertEquals(source, source); assertEquals(source, Intervals.maxgaps(1, Intervals.unordered(Intervals.term("w1"), Intervals.term("w3"), Intervals.term("w4")))); assertNotEquals(source, Intervals.maxgaps(2, Intervals.unordered(Intervals.term("w1"), Intervals.term("w3"), Intervals.term("w4")))); } public void testMaxGapsWithRepeats() throws IOException { IntervalsSource source = Intervals.maxgaps(11, Intervals.ordered(Intervals.term("pease"), Intervals.term("pease"), Intervals.term("hot"))); checkIntervals(source, "field1", 1, new int[][]{ {}, {}, { 0, 5 }, {}, {}, {} }); assertGaps(source, 2, "field1", new int[]{ 3 }); } public void testMaxGapsWithOnlyRepeats() throws IOException { IntervalsSource source = Intervals.maxgaps(1, Intervals.ordered( Intervals.or(Intervals.term("pease"), Intervals.term("hot")), Intervals.or(Intervals.term("pease"), Intervals.term("hot")) )); checkIntervals(source, "field1", 3, new int[][]{ {}, { 0, 2, 2, 3 }, { 3, 5, 5, 6 }, {}, { 0, 2, 2, 3 }, {} }); assertGaps(source, 1, "field1", new int[]{ 1, 0 }); } public void testNestedMaxGaps() throws IOException { IntervalsSource source = Intervals.maxgaps(1, Intervals.unordered( Intervals.ordered(Intervals.term("w1"), Intervals.term("w3")), Intervals.term("w4") )); checkIntervals(source, "field2", 1, new int[][]{ {}, {}, {}, {}, {}, { 0, 3, 3, 6, 4, 8 } }); assertGaps(source, 5, "field2", new int[]{ 0, 0, 1 }); MatchesIterator mi = getMatches(source, 5, "field2"); assertMatch(mi, 0, 3, 0, 11); assertMatch(mi, 3, 6, 9, 20); assertMatch(mi, 4, 8, 12, 26); assertEquals(3, source.minExtent()); } public void testMinimumShouldMatch() throws IOException { IntervalsSource source = Intervals.atLeast(3, Intervals.term("porridge"), Intervals.term("hot"), Intervals.term("twelve"), Intervals.term("nine"), Intervals.term("pease")); checkIntervals(source, "field1", 3, new int[][]{ {}, {0, 2, 1, 3, 2, 4, 6, 11, 7, 17}, {3, 5, 4, 6, 5, 7, 6, 11, 7, 21}, {}, {0, 2, 1, 3, 2, 4, 6, 11, 7, 17, 11, 21}, {} }); assertGaps(source, 1, "field1", new int[]{0, 0, 0, 3, 8}); MatchesIterator mi = getMatches(source, 1, "field1"); assertMatch(mi, 0, 2, 0, 18); MatchesIterator subs = mi.getSubMatches(); assertNotNull(subs); assertMatch(subs, 0, 0, 0, 5); assertMatch(subs, 1, 1, 6, 14); assertMatch(subs, 2, 2, 15, 18); assertFalse(subs.next()); assertTrue(mi.next()); assertTrue(mi.next()); assertMatch(mi, 6, 11, 41, 71); subs = mi.getSubMatches(); assertMatch(subs, 6, 6, 41, 46); assertMatch(subs, 7, 7, 47, 55); assertMatch(subs, 11, 11, 67, 71); assertEquals(3, source.minExtent()); } public void testDegenerateMinShouldMatch() throws IOException { IntervalsSource source = Intervals.ordered( Intervals.atLeast(1, Intervals.term("interest")), Intervals.atLeast(1, Intervals.term("anyone"))); MatchesIterator mi = getMatches(source, 0, "field1"); assertMatch(mi, 2, 4, 11, 29); MatchesIterator subs = mi.getSubMatches(); assertNotNull(subs); assertMatch(subs, 2, 2, 11, 19); assertMatch(subs, 4, 4, 23, 29); assertFalse(subs.next()); assertFalse(mi.next()); } public void testNoMatchMinShouldMatch() throws IOException { IntervalsSource source = Intervals.atLeast(4, Intervals.term("a"), Intervals.term("b")); checkIntervals(source, "field", 0, new int[][] {}); } public void testDefinedGaps() throws IOException { IntervalsSource source = Intervals.phrase( Intervals.term("pease"), Intervals.extend(Intervals.term("cold"), 1, 1), Intervals.term("porridge") ); checkIntervals(source, "field1", 3, new int[][]{ {}, { 3, 7 }, { 0, 4 }, {}, { 3, 7 }, {} }); assertEquals(5, source.minExtent()); MatchesIterator mi = getMatches(source, 1, "field1"); assertMatch(mi, 3, 7, 20, 55); MatchesIterator sub = mi.getSubMatches(); assertNotNull(sub); assertMatch(sub, 3, 3, 20, 25); assertMatch(sub, 4, 6, 35, 39); assertMatch(sub, 7, 7, 47, 55); source = Intervals.extend(Intervals.term("w1"), 5, Integer.MAX_VALUE); checkIntervals(source, "field2", 1, new int[][]{ {}, {}, {}, {}, {}, { 0, Integer.MAX_VALUE - 1, 0, Integer.MAX_VALUE - 1, 5, Integer.MAX_VALUE - 1 } }); assertEquals(Integer.MAX_VALUE, source.minExtent()); } public void testAfter() throws IOException { IntervalsSource source = Intervals.after(Intervals.term("porridge"), Intervals.ordered(Intervals.term("pease"), Intervals.term("cold"))); checkIntervals(source, "field1", 3, new int[][]{ {}, { 7, 7 }, { 4, 4, 7, 7 }, {}, { 7, 7 }, {} }); MatchesIterator mi = getMatches(source, 1, "field1"); assertMatch(mi, 7, 7, 20, 55); MatchesIterator sub = mi.getSubMatches(); assertNotNull(sub); assertMatch(sub, 3, 3, 20, 25); assertMatch(sub, 5, 5, 35, 39); assertMatch(sub, 7, 7, 47, 55); assertFalse(sub.next()); assertEquals(1, source.minExtent()); } public void testBefore() throws IOException { IntervalsSource source = Intervals.before(Intervals.term("cold"), Intervals.term("twelve")); checkIntervals(source, "field1", 2, new int[][]{ {}, {}, { 2, 2 }, {}, { 5, 5 }, {} }); assertEquals(1, source.minExtent()); } public void testWithin() throws IOException { IntervalsSource source = Intervals.within(Intervals.term("hot"), 6, Intervals.or(Intervals.term("porridge"), Intervals.term("fraggle"))); checkIntervals(source, "field1", 3, new int[][]{ {}, { 2, 2 }, { 5, 5, 21, 21 }, {}, { 2, 2 }, {} }); assertEquals(1, source.minExtent()); } public void testOverlapping() throws IOException { IntervalsSource source = Intervals.overlapping( Intervals.unordered(Intervals.term("hot"), Intervals.term("porridge")), Intervals.unordered(Intervals.term("cold"), Intervals.term("pease")) ); checkIntervals(source, "field1", 3, new int[][]{ {}, { 2, 4, 7, 17 }, { 5, 7, 7, 21 }, {}, { 2, 4 }, {} }); assertGaps(source, 2, "field1", new int[]{ 1, 13 }); MatchesIterator mi = getMatches(source, 1, "field1"); assertNotNull(mi); assertMatch(mi, 2, 4, 15, 39); MatchesIterator sub = mi.getSubMatches(); assertNotNull(sub); assertMatch(sub, 2, 2, 15, 18); assertMatch(sub, 3, 3, 20, 25); assertMatch(sub, 4, 4, 26, 34); assertMatch(sub, 5, 5, 35, 39); assertFalse(sub.next()); assertMatch(mi, 7, 17, 41, 118); assertEquals(2, source.minExtent()); } public void testFixedField() throws IOException { IntervalsSource source = Intervals.phrase( Intervals.term("alph"), Intervals.fixField("field1", Intervals.term("hot"))); // We search in field2, but 'hot' will report intervals from field1 checkIntervals(source, "field2", 1, new int[][]{ {}, { 1, 2 }, {}, {}, {}, {} }); MatchesIterator mi = getMatches(source, 1, "field2"); assertNotNull(mi); assertMatch(mi, 1, 2, 6, 18); } public void testPrefix() throws IOException { IntervalsSource source = Intervals.prefix(new BytesRef("p")); checkIntervals(source, "field1", 5, new int[][]{ {}, {0, 0, 1, 1, 3, 3, 4, 4, 6, 6, 7, 7, 10, 10, 27, 27}, {0, 0, 1, 1, 3, 3, 4, 4, 6, 6, 7, 7, 10, 10}, {7, 7}, {0, 0, 1, 1, 3, 3, 4, 4, 6, 6, 7, 7, 10, 10}, {0, 0} }); MatchesIterator mi = getMatches(source, 1, "field1"); assertNotNull(mi); assertMatch(mi, 0, 0, 0, 5); assertMatch(mi, 1, 1, 6, 14); IntervalsSource noSuch = Intervals.prefix(new BytesRef("qqq")); checkIntervals(noSuch, "field1", 0, new int[][]{}); IntervalsSource s = Intervals.prefix(new BytesRef("p"), 1); IllegalStateException e = expectThrows(IllegalStateException.class, () -> { for (LeafReaderContext ctx : searcher.getIndexReader().leaves()) { s.intervals("field1", ctx); } }); assertEquals("Automaton [p*] expanded to too many terms (limit 1)", e.getMessage()); checkVisits(Intervals.prefix(new BytesRef("p")), 1); } public void testWildcard() throws IOException { IntervalsSource source = Intervals.wildcard(new BytesRef("?ot")); checkIntervals(source, "field1", 4, new int[][]{ {}, { 2, 2, 10, 10, 17, 17, 27, 27 }, { 5, 5, 10, 10, 21, 21 }, { 3, 3 }, { 2, 2, 10, 10, 17, 17 }, {} }); MatchesIterator mi = getMatches(source, 4, "field1"); assertNotNull(mi); assertMatch(mi, 2, 2, 15, 18); assertMatch(mi, 10, 10, 63, 66); assertMatch(mi, 17, 17, 97, 100); IllegalStateException e = expectThrows(IllegalStateException.class, () -> { IntervalsSource s = Intervals.wildcard(new BytesRef("?ot"), 1); for (LeafReaderContext ctx : searcher.getIndexReader().leaves()) { s.intervals("field1", ctx); } }); assertEquals("Automaton [?ot] expanded to too many terms (limit 1)", e.getMessage()); checkVisits(Intervals.wildcard(new BytesRef("p??")), 1); } public void testWrappedFilters() throws IOException { IntervalsSource source = Intervals.or( Intervals.term("nine"), Intervals.maxgaps(1, Intervals.or( Intervals.ordered(Intervals.term("pease"), Intervals.term("hot")), Intervals.ordered(Intervals.term("pease"), Intervals.term("cold"))))); checkIntervals(source, "field1", 3, new int[][]{ {}, { 0, 2, 3, 5, 11, 11, 28, 28 }, { 0, 2, 3, 5 }, {}, { 0, 2, 3, 5, 11, 11 }, {} }); } public void testMultiTerm() throws IOException { RegExp re = new RegExp("p.*e"); IntervalsSource source = Intervals.multiterm(new CompiledAutomaton(re.toAutomaton()), re.toString()); checkIntervals(source, "field1", 5, new int[][]{ {}, { 0, 0, 1, 1, 3, 3, 4, 4, 6, 6, 7, 7 }, { 0, 0, 1, 1, 3, 3, 4, 4, 6, 6, 7, 7 }, { 7, 7 }, { 0, 0, 1, 1, 3, 3, 4, 4, 6, 6, 7, 7 }, { 0, 0 } }); IllegalStateException e = expectThrows(IllegalStateException.class, () -> { IntervalsSource s = Intervals.multiterm(new CompiledAutomaton(re.toAutomaton()), 1, re.toString()); for (LeafReaderContext ctx : searcher.getIndexReader().leaves()) { s.intervals("field1", ctx); } }); assertEquals("Automaton [\\p(.)*\\e] expanded to too many terms (limit 1)", e.getMessage()); checkVisits(source, 1); } }
package main import ( "encoding/json" "fmt" "log" "path" "regexp" "strconv" "strings" "time" "github.com/grafana/grafana_plugin_model/go/datasource" "github.com/hashicorp/consul/api" "github.com/hashicorp/go-plugin" "golang.org/x/net/context" ) // ConsulDatasource implements a datasource which connects to a Consul instance type ConsulDatasource struct { plugin.NetRPCUnsupportedPlugin } // Query returns responses to req based on data in Consul func (t *ConsulDatasource) Query(ctx context.Context, req *datasource.DatasourceRequest) (*datasource.DatasourceResponse, error) { log.Printf("called consul plugin with: \n%v", req) consul, consulToken, err := newConsulFromReq(req) if err != nil { return generateErrorResponse(err, ""), nil } queries, err := parseQueries(req) if err != nil { return generateErrorResponse(fmt.Errorf("error parsing queries: %v", err), ""), nil } return handleQueries(consul, consulToken, queries), nil } func handleQueries(consul *api.Client, consulToken string, queries []query) *datasource.DatasourceResponse { if len(queries) == 0 { return generateErrorResponse(fmt.Errorf("no queries found in request"), "") } if len(queries) == 1 && queries[0].Type == "test" { return handleTest(consul, consulToken, queries[0].RefID) } switch queries[0].Format { case "timeseries": return handleTimeseries(consul, queries) case "table": return handleTable(consul, queries) } return generateErrorResponse(fmt.Errorf("unknown format, nothing to handle"), "") } func handleTest(consul *api.Client, consulToken, refID string) *datasource.DatasourceResponse { e, _, err := consul.ACL().Info(consulToken, &api.QueryOptions{}) if err != nil { return generateErrorResponse(fmt.Errorf("error retrieving acl info for token: %v", err), refID) } if e != nil && e.ID == consulToken { return &datasource.DatasourceResponse{} } return generateErrorResponse(fmt.Errorf("consulToken is not valid"), refID) } func handleTimeseries(consul *api.Client, qs []query) *datasource.DatasourceResponse { var qrs []*datasource.QueryResult for _, q := range qs { target := cleanTarget(q.Target) var qr *datasource.QueryResult var err error switch q.Type { case "get": qr, err = handleGet(consul, target) case "keys": qr, err = handleKeys(consul, target) case "tags": qr, err = handleTags(consul, target, false) case "tagsrec": qr, err = handleTags(consul, target, true) } if err != nil { return generateErrorResponse(err, q.RefID) } if qr == nil { return generateErrorResponse(fmt.Errorf("unknown type %q for format timeseries", q.Type), q.RefID) } qr.RefId = q.RefID qrs = append(qrs, qr) } return &datasource.DatasourceResponse{Results: qrs} } func cleanTarget(target string) string { return strings.Replace(target, "\\.", ".", -1) } func handleGet(consul *api.Client, target string) (*datasource.QueryResult, error) { if strings.HasSuffix(target, "/") { target = target[:len(target)-1] } var kvs []*api.KVPair kv, _, err := consul.KV().Get(target, &api.QueryOptions{RequireConsistent: true}) if err != nil { return nil, fmt.Errorf("error consul get %s: %v", target, err) } if kv != nil { kvs = append(kvs, kv) } qr, err := generateQueryResultFromKVPairs(kvs) if err != nil { return nil, fmt.Errorf("error generating time series: %v", err) } return qr, nil } func handleKeys(consul *api.Client, target string) (*datasource.QueryResult, error) { if !strings.HasSuffix(target, "/") { target = target + "/" } keys, _, err := consul.KV().Keys(target, "/", &api.QueryOptions{RequireConsistent: true}) if err != nil { return nil, fmt.Errorf("error consul list %s: %v", target, err) } return generateQueryResultFromKeys(keys), nil } func handleTags(consul *api.Client, target string, recursive bool) (*datasource.QueryResult, error) { if !strings.HasSuffix(target, "/") { target = target + "/" } separator := "/" if recursive { separator = "" } keys, _, err := consul.KV().Keys(target, separator, &api.QueryOptions{RequireConsistent: true}) if err != nil { return nil, fmt.Errorf("error consul get %s: %v", target, err) } var tagKVs []*api.KVPair for _, key := range keys { tagKV, _, err := consul.KV().Get(key, &api.QueryOptions{RequireConsistent: true}) if err != nil { return nil, fmt.Errorf("error consul get %s: %v", key, err) } if tagKV != nil { tagKVs = append(tagKVs, tagKV) } } qr, err := generateQueryResultWithTags(target, tagKVs) if err != nil { return nil, fmt.Errorf("error generating time series: %v", err) } return qr, nil } func handleTable(consul *api.Client, qs []query) *datasource.DatasourceResponse { var qrs []*datasource.QueryResult for _, q := range qs { targetRegex := strings.Replace(q.Target, "*", ".*", -1) regex, err := regexp.Compile(targetRegex) if err != nil { return generateErrorResponse(fmt.Errorf("error compiling regex: %v", err), q.RefID) } firstStar := strings.Index(q.Target, "*") prefix := q.Target if firstStar > 0 { prefix = q.Target[:firstStar] } columns := strings.Split(q.Columns, ",") keys, _, err := consul.KV().Keys(prefix, "", &api.QueryOptions{}) if err != nil { return generateErrorResponse(fmt.Errorf("error gettings keys %s from consul: %v", prefix, err), q.RefID) } var matchingKeys []string for _, key := range keys { if regex.Match([]byte(key)) { matchingKeys = append(matchingKeys, key) } } var tableCols []*datasource.TableColumn var tableRows []*datasource.TableRow for i := 0; i < len(matchingKeys); i++ { firstRow := i == 0 var tableRowValues []*datasource.RowValue for _, col := range columns { key := matchingKeys[i] colKey := calculateColumnKey(key, col) if firstRow { tableCols = append(tableCols, &datasource.TableColumn{Name: path.Base(colKey)}) } kv, _, err := consul.KV().Get(colKey, &api.QueryOptions{}) var kvValue string if err != nil || kv == nil { tableRowValues = append(tableRowValues, &datasource.RowValue{Kind: datasource.RowValue_TYPE_STRING, StringValue: "Not Found"}) } else { kvValue = string(kv.Value) if i, err := strconv.ParseInt(kvValue, 10, 64); err != nil { tableRowValues = append(tableRowValues, &datasource.RowValue{Kind: datasource.RowValue_TYPE_STRING, StringValue: kvValue}) } else { tableRowValues = append(tableRowValues, &datasource.RowValue{Kind: datasource.RowValue_TYPE_INT64, Int64Value: i}) } } } tableRows = append(tableRows, &datasource.TableRow{Values: tableRowValues}) } qrs = append(qrs, &datasource.QueryResult{ RefId: q.RefID, Tables: []*datasource.Table{ { Columns: tableCols, Rows: tableRows, }, }, }) } return &datasource.DatasourceResponse{Results: qrs} } func calculateColumnKey(key string, col string) string { for strings.HasPrefix(col, "../") { lastSlash := strings.LastIndex(key, "/") key = key[:lastSlash] col = strings.TrimPrefix(col, "../") } return path.Join(key, col) } func generateQueryResultFromKVPairs(kvs []*api.KVPair) (*datasource.QueryResult, error) { var series []*datasource.TimeSeries for _, kv := range kvs { value, err := strconv.ParseFloat(string(kv.Value), 64) if err != nil { return nil, err } series = append(series, &datasource.TimeSeries{ Name: kv.Key, Points: []*datasource.Point{ { Timestamp: time.Now().UnixNano() / int64(time.Millisecond), Value: value, }, }, }) } return &datasource.QueryResult{ Series: series, }, nil } func generateQueryResultFromKeys(keys []string) *datasource.QueryResult { var series []*datasource.TimeSeries for _, key := range keys { series = append(series, &datasource.TimeSeries{ Name: key, Points: []*datasource.Point{ { Timestamp: time.Now().UnixNano() / int64(time.Millisecond), Value: 1, }, }, }) } return &datasource.QueryResult{ Series: series, } } func generateQueryResultWithTags(target string, tagKVs []*api.KVPair) (*datasource.QueryResult, error) { var series []*datasource.TimeSeries tags := map[string]string{} for _, tagKV := range tagKVs { tagName := strings.TrimPrefix(tagKV.Key, target) tagName = strings.Replace(tagName, "/", ".", -1) tags[tagName] = string(tagKV.Value) } series = append(series, &datasource.TimeSeries{ Name: target, Tags: tags, Points: []*datasource.Point{ { Timestamp: time.Now().UnixNano() / int64(time.Millisecond), Value: 1, }, }, }) return &datasource.QueryResult{ Series: series, }, nil } func generateErrorResponse(err error, refID string) *datasource.DatasourceResponse { return &datasource.DatasourceResponse{ Results: []*datasource.QueryResult{ { RefId: refID, Error: err.Error(), }, }, } } func parseQueries(req *datasource.DatasourceRequest) ([]query, error) { var qs []query for _, rawQuery := range req.Queries { var q query err := json.Unmarshal([]byte(rawQuery.ModelJson), &q) if err != nil { return nil, fmt.Errorf("error parsing query %s: %v", rawQuery.ModelJson, err) } qs = append(qs, q) } return qs, nil } type query struct { Target string `json:"target"` Format string `json:"format"` Type string `json:"type"` RefID string `json:"refId"` DatasourceId int `json:"datasourceId"` Columns string `json:"columns"` } func newConsulFromReq(req *datasource.DatasourceRequest) (*api.Client, string, error) { consulToken := req.Datasource.DecryptedSecureJsonData["consulToken"] if consulToken == "" { // see https://www.consul.io/docs/acl/acl-system.html#acl-tokens consulToken = "<PASSWORD>" } var jsonData map[string]interface{} err := json.Unmarshal([]byte(req.Datasource.JsonData), &jsonData) if err != nil { return nil, "", fmt.Errorf("unable to get consulAddr: %v", err) } consulAddr := jsonData["consulAddr"].(string) if consulAddr == "" { return nil, "", fmt.Errorf("unable to get consulAddr") } consul, err := newConsul(req.Datasource.Id, consulAddr, consulToken) if err != nil { return nil, "", fmt.Errorf("creating consul client failed: %v", err) } return consul, consulToken, nil } type consulClientEntry struct { consulAddr string consulToken string client *api.Client } var consulClientCache = map[int64]consulClientEntry{} func newConsul(datasourceId int64, consulAddr, consulToken string) (*api.Client, error) { if client, ok := clientInCache(datasourceId, consulAddr, consulToken); ok { return client, nil } conf := api.DefaultConfig() conf.Address = consulAddr conf.Token = consulToken conf.TLSConfig.InsecureSkipVerify = true client, err := api.NewClient(conf) if err != nil { return nil, fmt.Errorf("error creating consul client: %v", err) } consulClientCache[datasourceId] = consulClientEntry{consulAddr, consulToken, client} return client, nil } func clientInCache(datasourceId int64, consulAddr, consulToken string) (*api.Client, bool) { entry, ok := consulClientCache[datasourceId] if !ok { return nil, false } if entry.consulAddr != consulAddr || entry.consulToken != consulToken { return nil, false } return entry.client, true }
/** * Abstract DAO class that provides basic CRUD operations. * * @param <T> */ public abstract class CommonDaoJpa<T> implements CommonDao<T> { /** * The class type of the entity the DAO instance is against. */ private final Class<T> typeParameterClass; /** * Entity manager used for persistence. */ private final EntityManager entityManager; /** * Public constructor for the PortServiceOrderDao JPA implementation. * * @param em Entity manager required for persistence * @param classType the type of the class representing the entity T */ @NotNull public CommonDaoJpa(final EntityManager em, final Class<T> classType) { entityManager = em; typeParameterClass = classType; } @Override public final void create(final T newEntity) { entityManager.getTransaction() .begin(); entityManager.persist(newEntity); entityManager.getTransaction() .commit(); } @Override public final T getOne(final long entityId) { TypedQuery<T> q = entityManager.createQuery( "SELECT e FROM " + typeParameterClass.getSimpleName() + " e WHERE e.id=:id", typeParameterClass) .setParameter("id", entityId); return runGetEntityQuery(q); } @Override public final T getOne(final String name) { TypedQuery<T> q = entityManager.createQuery( "SELECT e FROM " + typeParameterClass.getSimpleName() + " e WHERE e.name=:name", typeParameterClass) .setParameter("name", name); return runGetEntityQuery(q); } @Override public final List<T> getAll() { TypedQuery<T> q = entityManager.createQuery("SELECT e FROM " + typeParameterClass.getSimpleName() + " e", typeParameterClass); return q.getResultList(); } @Override public final void update(final T details) { entityManager.merge(details); } @Override public final void deleteOne(final long entityId) { entityManager.getTransaction() .begin(); entityManager.createQuery("DELETE FROM " + typeParameterClass.getSimpleName() + " e WHERE e.id=:id") .setParameter("id", entityId) .executeUpdate(); entityManager.getTransaction() .commit(); } @Override public final void deleteAll() { entityManager.getTransaction() .begin(); entityManager.createQuery("DELETE FROM " + typeParameterClass.getSimpleName()) .executeUpdate(); entityManager.getTransaction() .commit(); } protected final T runGetEntityQuery(final TypedQuery<T> q) { T result; try { result = q.getSingleResult(); } catch (NoResultException ex) { result = null; } return result; } }
package main import ( "fmt" "log" "net" "net/http" "github.com/conejoninja/bundle" "strings" ) func main() { b, e := bundle.LoadBundle("./web.bundle", []byte("")) if e != nil { log.Fatalf("Error loading bundle: %s", e) } fmt.Println("Change the files in the assets folder without re-creating the bundle.") ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { log.Fatal(err) } defer ln.Close() http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { if r.URL.Path[1:] == "gopherize.me.png" { gopherImg, err := b.Asset("gopherize.me.png") if err != nil { fmt.Fprintf(w, "Asset gopherize.me.png does not exists: %s!", err) return } fmt.Fprint(w, string(gopherImg)) return } htmlByte, err := b.Asset("index.html") if err != nil { fmt.Fprintf(w, "Asset index.html does not exists: %s!", err) return } fmt.Fprint(w, strings.Replace(string(htmlByte), "{SERVER}", "http://" + ln.Addr().String(), -1)) }) fmt.Println("Open your browser at http://" + ln.Addr().String()) log.Fatal(http.Serve(ln, nil)) }
<reponame>glennhickey/CharlieSandbox ''' Usage: python make_igv_batchfile.py -s ~{sample_id} -v ~{var_id} -r ~{ref_fasta} -b ~{write_lines(minibam_array)} -o batch.txt Purpose: write batch.txt file for IGV, given sample id, variant id, reference fasta, and newline-separated file containing minibams ''' import sys from optparse import OptionParser parser = OptionParser() parser.add_option('-s', '--sid', dest='sample_id', help='sample id') parser.add_option('-v', '--vid', dest='var_id', help='variant id in chr:pos:ref:alt format') parser.add_option('-r', '--ref', dest='ref_fasta', help='reference fasta path') parser.add_option('-b', '--bams', dest='bams', help='comma-separated string of mini-bam files') parser.add_option('-o', '--out', dest='outf', help='output filename') (options, args) = parser.parse_args() ## check that all arguments are present if None in vars(options).values(): print('\n'+'## ERROR: missing arguments') parser.print_help() print('\n') sys.exit() ## open output batch file for writing outfile = open(options.outf, 'w') ## bash header outfile.write('#!/bin/bash' + '\n') ## set reference genome outfile.write('genome %s'%(options.ref_fasta) + '\n') ## load each minibam in bam list ''' with open(options.bamf, 'r') as bamlist: for line in bamlist: tmp = line.strip() outfile.write('load %s'%(tmp) + '\n') ''' bamlist = options.bams.split(',') for b in bamlist: outfile.write('load %s'%(b) + '\n') ## set output snapshot directory outfile.write('snapshotDirectory ./' + '\n') ## set navigation to chr:pos-pos chr = options.var_id.split(':')[0] pos = options.var_id.split(':')[1] outfile.write('goto %s:%s-%s'%(chr, pos, pos) + '\n') ## sort base outfile.write('sort base' + '\n') ## set expand outfile.write('expand' + '\n') ## set max panel height outfile.write('maxPanelHeight 1000' + '\n') ## sort base again for good measure outfile.write('sort base' + '\n') ## snapshot command outfname = options.sample_id + '.' + '_'.join(options.var_id.split(':')) #outfile.write('snapshot %s.%s.png'%(options.sample_id, options.var_id)) outfile.write('snapshot %s.png'%(outfname) + '\n') outfile.write('exit') outfile.close()
def avg9x(self, matrix, percentage=0.05): xs = matrix.flatten() srt = sorted(xs, reverse=True) length = int(math.floor(percentage * len(srt))) matrix_subset = srt[:length] return numpy.median(matrix_subset)
package controller import ( "bytes" "context" "io/ioutil" "net/http" "testing" "time" "github.com/sirupsen/logrus" api "github.com/objectrocket/sensu-operator/pkg/apis/objectrocket/v1beta1" "github.com/objectrocket/sensu-operator/pkg/cluster" fakesensu "github.com/objectrocket/sensu-operator/pkg/generated/clientset/versioned/fake" sensuscheme "github.com/objectrocket/sensu-operator/pkg/generated/clientset/versioned/scheme" "github.com/objectrocket/sensu-operator/pkg/util/probe" "github.com/stretchr/testify/suite" fakeapiextensionsapiserver "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" testclient "k8s.io/client-go/kubernetes/fake" fakerest "k8s.io/client-go/rest/fake" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" ) type fakeIndexer struct{} type fakeController struct{} type fakeQueue struct{} func (f fakeIndexer) GetByKey(string) (interface{}, bool, error) { return nil, false, nil } func (f fakeController) Run(stopCh <-chan struct{}) {} func (f fakeController) HasSynced() bool { return true } func (f fakeQueue) Add(item interface{}) {} func (f fakeQueue) Done(item interface{}) {} func (f fakeQueue) ShutDown() {} func (f fakeQueue) Get() (item interface{}, shutdown bool) { return nil, true } func (f fakeQueue) Forget(interface{}) {} func (f fakeQueue) NumRequeues(item interface{}) int { return 0 } func (f fakeQueue) AddRateLimited(item interface{}) {} type InformerTestSuite struct { suite.Suite ctx context.Context cancelCtx context.CancelFunc } func (s *InformerTestSuite) SetupSuite() { } func (s *InformerTestSuite) SetupTest() { s.ctx, s.cancelCtx = context.WithCancel(context.Background()) } func (s *InformerTestSuite) TearDownTest() { s.cancelCtx() select { case <-s.ctx.Done(): return case <-time.After(2 * time.Minute): s.Fail("Timed out waiting for test to tear down") } } func TestRunSuite(t *testing.T) { suiteTester := new(InformerTestSuite) suite.Run(t, suiteTester) } func (s *InformerTestSuite) TestInformerWithNoEvents() { var ( source *cache.ListWatch clusterInformer Informer assetInformer Informer checkInformer Informer handlerInformer Informer eventFilterInformer Informer nodeInformer Informer ) controller := New(Config{ Namespace: "testns", ClusterWide: true, ServiceAccount: "testsa", KubeCli: testclient.NewSimpleClientset(), KubeExtCli: fakeapiextensionsapiserver.NewSimpleClientset(), SensuCRCli: fakesensu.NewSimpleClientset(), CreateCRD: false, WorkerThreads: 1, ProcessingRetries: 0, }) assetInformer.indexer = fakeIndexer{} assetInformer.controller = fakeController{} assetInformer.queue = fakeQueue{} checkInformer.indexer = fakeIndexer{} checkInformer.controller = fakeController{} checkInformer.queue = fakeQueue{} handlerInformer.indexer = fakeIndexer{} handlerInformer.controller = fakeController{} handlerInformer.queue = fakeQueue{} eventFilterInformer.indexer = fakeIndexer{} eventFilterInformer.controller = fakeController{} eventFilterInformer.queue = fakeQueue{} nodeInformer.indexer = fakeIndexer{} nodeInformer.controller = fakeController{} nodeInformer.queue = fakeQueue{} controller.informers[api.SensuAssetResourcePlural] = &assetInformer controller.informers[api.SensuCheckConfigResourcePlural] = &checkInformer controller.informers[api.SensuHandlerResourcePlural] = &handlerInformer controller.informers[api.SensuEventFilterResourcePlural] = &eventFilterInformer controller.informers[CoreV1NodesPlural] = &nodeInformer err := controller.initResource() s.Require().NoErrorf(err, "Failed to init resources: %v", err) probe.SetReady() clusterInformer.queue = workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) roundTripper := func(req *http.Request) (*http.Response, error) { response := &http.Response{ Body: ioutil.NopCloser(bytes.NewBufferString(` { "apiVersion": "objectrocket.com/v1beta1", "items": [], "kind": "SensuClusterList", "metadata": { "continue": "", "resourceVersion": "3570", "selfLink": "/apis/objectrocket.com/v1beta1/namespaces/default/sensuclusters" } } `)), StatusCode: 200, } response.Header = http.Header{"Content-Type": []string{"application/json"}} return response, nil } controller.Config.SensuCRCli.ObjectrocketV1beta1() source = cache.NewListWatchFromClient( &fakerest.RESTClient{ Client: fakerest.CreateHTTPClient(roundTripper), NegotiatedSerializer: serializer.DirectCodecFactory{ CodecFactory: serializer.NewCodecFactory(sensuscheme.Scheme), }, GroupVersion: schema.GroupVersion{}, VersionedAPIPath: "/not/a/real/path", }, api.SensuClusterResourcePlural, controller.Config.Namespace, fields.Everything()) clusterInformer.indexer, clusterInformer.controller = cache.NewIndexerInformer(source, &api.SensuCluster{}, 0, cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { key, err := cache.MetaNamespaceKeyFunc(obj) if err == nil { clusterInformer.queue.Add(key) } }, UpdateFunc: func(old interface{}, new interface{}) { key, err := cache.MetaNamespaceKeyFunc(new) if err == nil { clusterInformer.queue.Add(key) } }, DeleteFunc: func(obj interface{}) { // IndexerInformer uses a delta queue, therefore for deletes we have to use this // key function. key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err == nil { clusterInformer.queue.Add(key) } }, }, cache.Indexers{}) controller.informers[api.SensuClusterResourcePlural] = &clusterInformer ctx, cancelFunc := context.WithCancel(context.Background()) go controller.startProcessing(ctx) time.Sleep(2 * time.Second) cancelFunc() } func (s *InformerTestSuite) TestInformerWithOneCluster() { var ( source *cache.ListWatch clusterInformer Informer assetInformer Informer checkInformer Informer handlerInformer Informer eventFilterInformer Informer nodeInformer Informer ) controller := New(Config{ Namespace: "testns", ClusterWide: true, ServiceAccount: "testsa", KubeCli: testclient.NewSimpleClientset(), KubeExtCli: fakeapiextensionsapiserver.NewSimpleClientset(), SensuCRCli: fakesensu.NewSimpleClientset(), CreateCRD: false, WorkerThreads: 1, ProcessingRetries: 0, }) assetInformer.indexer = fakeIndexer{} assetInformer.controller = fakeController{} assetInformer.queue = fakeQueue{} checkInformer.indexer = fakeIndexer{} checkInformer.controller = fakeController{} checkInformer.queue = fakeQueue{} handlerInformer.indexer = fakeIndexer{} handlerInformer.controller = fakeController{} handlerInformer.queue = fakeQueue{} eventFilterInformer.indexer = fakeIndexer{} eventFilterInformer.controller = fakeController{} eventFilterInformer.queue = fakeQueue{} nodeInformer.indexer = fakeIndexer{} nodeInformer.controller = fakeController{} nodeInformer.queue = fakeQueue{} controller.informers[api.SensuAssetResourcePlural] = &assetInformer controller.informers[api.SensuCheckConfigResourcePlural] = &checkInformer controller.informers[api.SensuHandlerResourcePlural] = &handlerInformer controller.informers[api.SensuEventFilterResourcePlural] = &eventFilterInformer controller.informers[CoreV1NodesPlural] = &nodeInformer err := controller.initResource() s.Require().NoErrorf(err, "Failed to init resources: %v", err) probe.SetReady() clusterInformer.queue = workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) roundTripper := func(req *http.Request) (*http.Response, error) { response := &http.Response{ Body: ioutil.NopCloser(bytes.NewBufferString(` { "apiVersion": "objectrocket.com/v1beta1", "items": [ { "apiVersion": "objectrocket.com/v1beta1", "kind": "SensuCluster", "metadata": { "annotations": { "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"objectrocket.com/v1beta1\",\"kind\":\"SensuCluster\",\"metadata\":{\"annotations\":{},\"name\":\"example-sensu-cluster\",\"namespace\":\"default\"},\"spec\":{\"size\":3,\"version\":\"2.0.0-beta.8\"}}\n" }, "clusterName": "", "creationTimestamp": "2019-01-02T23:14:52Z", "generation": 1, "name": "example-sensu-cluster", "namespace": "default", "resourceVersion": "3570", "selfLink": "/apis/objectrocket.com/v1beta1/namespaces/default/sensuclusters/example-sensu-cluster", "uid": "358db0b6-0ee4-11e9-a33b-0800272dcccb" }, "spec": { "repository": "sensu/sensu", "size": 3, "version": "2.0.0-beta.8" }, "status": { "agentPort": 8081, "agentServiceName": "example-sensu-cluster-agent", "apiPort": 8080, "apiServiceName": "example-sensu-cluster-api", "conditions": [ { "lastTransitionTime": "2019-01-02T23:15:48Z", "lastUpdateTime": "2019-01-02T23:15:48Z", "reason": "Cluster available", "status": "True", "type": "Available" } ], "currentVersion": "2.0.0-beta.8", "dashboardPort": 3000, "dashboardServiceName": "example-sensu-cluster-dashboard", "members": { "ready": [ "example-sensu-cluster-6h5wp5t264", "example-sensu-cluster-8ldr4vhlz5", "example-sensu-cluster-b4cf6wcnpc" ] }, "phase": "Running", "size": 3, "targetVersion": "" } } ], "kind": "SensuClusterList", "metadata": { "continue": "", "resourceVersion": "3570", "selfLink": "/apis/objectrocket.com/v1beta1/namespaces/default/sensuclusters" } } `)), StatusCode: 200, } response.Header = http.Header{"Content-Type": []string{"application/json"}} return response, nil } controller.Config.SensuCRCli.ObjectrocketV1beta1() source = cache.NewListWatchFromClient( &fakerest.RESTClient{ Client: fakerest.CreateHTTPClient(roundTripper), NegotiatedSerializer: serializer.DirectCodecFactory{ CodecFactory: serializer.NewCodecFactory(sensuscheme.Scheme), }, GroupVersion: schema.GroupVersion{}, VersionedAPIPath: "/not/a/real/path", }, api.SensuClusterResourcePlural, controller.Config.Namespace, fields.Everything()) clusterInformer.indexer, clusterInformer.controller = cache.NewIndexerInformer(source, &api.SensuCluster{}, 0, cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { //s.Failf("Failed with obj:", " %v", obj) key, err := cache.MetaNamespaceKeyFunc(obj) if err == nil { clusterInformer.queue.Add(key) } }, UpdateFunc: func(old interface{}, new interface{}) { key, err := cache.MetaNamespaceKeyFunc(new) if err == nil { clusterInformer.queue.Add(key) } }, DeleteFunc: func(obj interface{}) { // IndexerInformer uses a delta queue, therefore for deletes we have to use this // key function. key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err == nil { clusterInformer.queue.Add(key) } }, }, cache.Indexers{}) ctx, cancelFunc := context.WithCancel(context.Background()) controller.informers[api.SensuClusterResourcePlural] = &clusterInformer go controller.startProcessing(ctx) time.Sleep(2 * time.Second) cancelFunc() } func initInformers() (Informer, Informer, Informer, Informer, Informer) { var ( assetInformer Informer checkInformer Informer handlerInformer Informer eventFilterInformer Informer nodeInformer Informer ) assetInformer.indexer = fakeIndexer{} assetInformer.controller = fakeController{} assetInformer.queue = fakeQueue{} checkInformer.indexer = fakeIndexer{} checkInformer.controller = fakeController{} checkInformer.queue = fakeQueue{} handlerInformer.indexer = fakeIndexer{} handlerInformer.controller = fakeController{} handlerInformer.queue = fakeQueue{} eventFilterInformer.indexer = fakeIndexer{} eventFilterInformer.controller = fakeController{} eventFilterInformer.queue = fakeQueue{} nodeInformer.indexer = fakeIndexer{} nodeInformer.controller = fakeController{} nodeInformer.queue = fakeQueue{} return assetInformer, checkInformer, handlerInformer, eventFilterInformer, nodeInformer } func TestController_initCRD(t *testing.T) { assetInformer, checkInformer, handlerInformer, eventFilterInformer, nodeInformer := initInformers() type fields struct { logger *logrus.Entry Config Config informers map[string]*Informer finalizers map[string]cache.Indexer clusters map[string]*cluster.Cluster } tests := []struct { name string fields fields wantErr bool }{ { "test both cluster, and checkconfig crds are created, and become valid", fields{ logrus.WithField("pkg", "test"), Config{ Namespace: "testns", ClusterWide: true, ServiceAccount: "testsa", KubeCli: testclient.NewSimpleClientset(), KubeExtCli: fakeapiextensionsapiserver.NewSimpleClientset(), SensuCRCli: fakesensu.NewSimpleClientset(), CreateCRD: false, WorkerThreads: 1, ProcessingRetries: 0, }, map[string]*Informer{}, map[string]cache.Indexer{}, map[string]*cluster.Cluster{}, }, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := &Controller{ logger: tt.fields.logger, Config: tt.fields.Config, informers: tt.fields.informers, finalizers: tt.fields.finalizers, clusters: tt.fields.clusters, } c.informers[api.SensuAssetResourcePlural] = &assetInformer c.informers[api.SensuCheckConfigResourcePlural] = &checkInformer c.informers[api.SensuHandlerResourcePlural] = &handlerInformer c.informers[api.SensuEventFilterResourcePlural] = &eventFilterInformer c.informers[CoreV1NodesPlural] = &nodeInformer if err := c.initCRD(); (err != nil) != tt.wantErr { t.Errorf("Controller.initCRD() error = %v, wantErr %v", err, tt.wantErr) } }) } }
/* * PppoeCtrlReadEvent() * * Receive an incoming control message from the PPPoE node */ static void PppoeCtrlReadEvent(int type, void *arg) { union { #ifdef NGM_PPPOE_SETMAXP_COOKIE u_char buf[sizeof(struct ng_mesg) + sizeof(struct ngpppoe_maxp)]; #else u_char buf[sizeof(struct ng_mesg) + sizeof(struct ngpppoe_sts)]; #endif struct ng_mesg resp; } u; char path[NG_PATHSIZ]; Link l = NULL; PppoeInfo pi = NULL; struct PppoeIf *PIf = (struct PppoeIf*)arg; (void)type; if (NgRecvMsg(PIf->csock, &u.resp, sizeof(u), path) < 0) { Perror("PPPoE: error reading message from \"%s\"", path); return; } if (u.resp.header.typecookie != NGM_PPPOE_COOKIE) { Log(LG_ERR, ("PPPoE: rec'd cookie %lu from \"%s\"", (u_long)u.resp.header.typecookie, path)); return; } switch (u.resp.header.cmd) { case NGM_PPPOE_SUCCESS: case NGM_PPPOE_FAIL: case NGM_PPPOE_CLOSE: #ifdef NGM_PPPOE_SETMAXP_COOKIE case NGM_PPPOE_SETMAXP: #endif { char ppphook[NG_HOOKSIZ]; char *linkname, *rest; int id; linkname = ((struct ngpppoe_sts *)u.resp.data)->hook; if (strncmp(linkname, "listen-", 7) == 0) return; snprintf(ppphook, NG_HOOKSIZ, "mpd%d-", gPid); if (strncmp(linkname, ppphook, strlen(ppphook))) { Log(LG_ERR, ("PPPoE: message %d from unknown hook \"%s\"", u.resp.header.cmd, ((struct ngpppoe_sts *)u.resp.data)->hook)); return; } linkname += strlen(ppphook); id = strtol(linkname, &rest, 10); if (rest[0] != 0 || !gLinks[id] || gLinks[id]->type != &gPppoePhysType || PIf != ((PppoeInfo)gLinks[id]->info)->PIf) { Log((u.resp.header.cmd == NGM_PPPOE_SUCCESS)?LG_ERR:LG_PHYS, ("PPPoE: message %d from unexisting link \"%s\"", u.resp.header.cmd, linkname)); return; } l = gLinks[id]; pi = (PppoeInfo)l->info; if (l->state == PHYS_STATE_DOWN) { if (u.resp.header.cmd != NGM_PPPOE_CLOSE) Log(LG_PHYS, ("[%s] PPPoE: message %d in DOWN state", l->name, u.resp.header.cmd)); return; } } } switch (u.resp.header.cmd) { case NGM_PPPOE_SESSIONID: Log(LG_PHYS3, ("PPPoE: rec'd SESSIONID %u from \"%s\"", ntohs((uint16_t)u.resp.data), path)); break; case NGM_PPPOE_SUCCESS: Log(LG_PHYS, ("[%s] PPPoE: connection successful", l->name)); if (pi->opened) { TimerStop(&pi->connectTimer); l->state = PHYS_STATE_UP; PhysUp(l); } else { l->state = PHYS_STATE_READY; } break; case NGM_PPPOE_FAIL: Log(LG_PHYS, ("[%s] PPPoE: connection failed", l->name)); PppoeDoClose(l); PhysDown(l, STR_CON_FAILED0, NULL); break; case NGM_PPPOE_CLOSE: Log(LG_PHYS, ("[%s] PPPoE: connection closed", l->name)); PppoeDoClose(l); PhysDown(l, STR_DROPPED, NULL); break; case NGM_PPPOE_ACNAME: Log(LG_PHYS, ("PPPoE: rec'd ACNAME \"%s\"", ((struct ngpppoe_sts *)u.resp.data)->hook)); break; #ifdef NGM_PPPOE_SETMAXP_COOKIE case NGM_PPPOE_SETMAXP: { struct ngpppoe_maxp *maxp; maxp = ((struct ngpppoe_maxp *)(void *)u.resp.data); Log(LG_PHYS, ("[%s] PPPoE: rec'd PPP-Max-Payload '%u'", l->name, maxp->data)); if (pi->max_payload > 0) { if (pi->max_payload == maxp->data) pi->mp_reply = 1; else Log(LG_PHYS, ("[%s] PPPoE: sent and returned values are not equal", l->name)); } else Log(LG_PHYS, ("[%s] PPPoE: server sent tag PPP-Max-Payload" " without request from the client", l->name)); break; } #endif #ifdef NGM_PPPOE_PADM_COOKIE case NGM_PPPOE_HURL: Log(LG_PHYS, ("PPPoE: rec'd HURL \"%s\"", ((struct ngpppoe_padm *)u.resp.data)->msg)); break; case NGM_PPPOE_MOTM: Log(LG_PHYS, ("PPPoE: rec'd MOTM \"%s\"", ((struct ngpppoe_padm *)u.resp.data)->msg)); break; #endif default: Log(LG_PHYS, ("PPPoE: rec'd command %lu from \"%s\"", (u_long)u.resp.header.cmd, path)); break; } }
from bs4 import BeautifulSoup as bs from selenium import webdriver import time from langdetect import detect from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait def get_comments(driver, address, line, doc_num): posts = open('../data/comments/' + line, 'w') driver.get(address) plain_html = click_load(driver) soup = bs(plain_html, 'html.parser') comments = soup.find_all('li', {'class':'gElp9'}) encount = 0 en_comments = [] for i in range(len(comments)): if i == 0: continue res = comments[i] txt = res.find("span", {'class': ''}).text print(txt) try: if detect(txt) == 'en': en_comments.append(txt) posts.write(txt) posts.write('\n') encount = encount + 1 except: pass # print(*en_comments, sep = '\n') print("%d --- English Comments in %s:" % (doc_num, line), len(en_comments)) time.sleep(1) # driver.quit() posts.close() def click_load(driver): # The XPATH for the button that loads more comments of the post. Copy from Inspect web page page_xpath = '//*[@id="react-root"]/section/main/div/div[1]/article/div[3]/div[1]/ul/li/div/button/span' html = driver.page_source while True: try: driver.find_element(By.XPATH, page_xpath).click() except: pass try: WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.XPATH, page_xpath))) except: break html = driver.page_source return html if __name__ == "__main__": shortcode_file = open('../data/shortcodes.txt','r') doc_num = 0 driver = webdriver.Chrome(executable_path='/home/ak/chromedriver/chromedriver') time.sleep(30) for line in shortcode_file: address = 'https://www.instagram.com' + line get_comments(driver, address, line[3:-2], doc_num) doc_num = doc_num + 1 shortcode_file.close()
use crate::io::file::create_file; use crate::io::fs::read_file_to_string; use crate::utils::error::to_eyre_error; use csv::{ReaderBuilder as CsvReaderBuilder, Writer as CsvWriterImpl, WriterBuilder as CsvWriterBuilder}; use eyre::Report; use serde::{Deserialize, Serialize}; use std::io::Write; use std::path::{Path, PathBuf}; /// Writes CSV. Each row is a serde-annotated struct. pub struct CsvStructWriter<W: Write + Send> { pub writer: CsvWriterImpl<W>, } impl<W: Write + Send> CsvStructWriter<W> { pub fn new(writer: W, delimiter: u8) -> Result<Self, Report> { let writer = CsvWriterBuilder::new().delimiter(delimiter).from_writer(writer); Ok(Self { writer }) } pub fn write<T: Serialize>(&mut self, record: &T) -> Result<(), Report> { self.writer.serialize(record)?; Ok(()) } } /// Writes CSV files. Each row is a serde-annotated struct. pub struct CsvStructFileWriter { pub filepath: PathBuf, pub writer: CsvStructWriter<Box<dyn Write + Send>>, } impl CsvStructFileWriter { pub fn new(filepath: impl AsRef<Path>, delimiter: u8) -> Result<Self, Report> { let filepath = filepath.as_ref(); let file = create_file(filepath)?; let writer = CsvStructWriter::new(file, delimiter)?; Ok(Self { filepath: filepath.to_owned(), writer, }) } pub fn write<T: Serialize>(&mut self, record: &T) -> Result<(), Report> { self.writer.write(record)?; Ok(()) } } pub trait VecWriter { fn write<I: IntoIterator<Item = T>, T: AsRef<[u8]>>(&mut self, values: I) -> Result<(), Report>; } /// Writes CSV. Each row is a vec of strings. pub struct CsvVecWriter<W: Write + Send> { pub headers: Vec<String>, pub writer: CsvWriterImpl<W>, } impl<W: Write + Send> CsvVecWriter<W> { pub fn new(writer: W, delimiter: u8, headers: &[String]) -> Result<Self, Report> { let mut writer = CsvWriterBuilder::new().delimiter(delimiter).from_writer(writer); writer.write_record(headers)?; Ok(Self { headers: headers.to_owned(), writer, }) } } impl<W: Write + Send> VecWriter for CsvVecWriter<W> { fn write<I: IntoIterator<Item = T>, T: AsRef<[u8]>>(&mut self, values: I) -> Result<(), Report> { self.writer.write_record(values)?; Ok(()) } } /// Writes CSV files. Each row is a vec of strings. pub struct CsvVecFileWriter { pub filepath: PathBuf, pub headers: Vec<String>, pub writer: CsvVecWriter<Box<dyn Write + Send>>, } impl CsvVecFileWriter { pub fn new(filepath: impl AsRef<Path>, delimiter: u8, headers: &[String]) -> Result<Self, Report> { let filepath = filepath.as_ref(); let file = create_file(filepath)?; let writer = CsvVecWriter::new(file, delimiter, headers)?; Ok(Self { filepath: filepath.to_owned(), headers: headers.to_owned(), writer, }) } } impl VecWriter for CsvVecFileWriter { fn write<I: IntoIterator<Item = T>, T: AsRef<[u8]>>(&mut self, values: I) -> Result<(), Report> { self.writer.write(values)?; Ok(()) } } /// Parses CSV data from string. pub fn parse_csv<T: for<'de> Deserialize<'de>, S: AsRef<str>>(data: S) -> Result<Vec<T>, Report> { let reader = CsvReaderBuilder::new() .has_headers(true) .from_reader(data.as_ref().as_bytes()); reader .into_deserialize::<T>() .into_iter() .map(to_eyre_error) .collect::<Result<Vec<T>, Report>>() } /// Parses CSV file. pub fn read_csv_file<T: for<'de> Deserialize<'de>>(filepath: impl AsRef<Path>) -> Result<Vec<T>, Report> { let filepath = filepath.as_ref(); let data = read_file_to_string(filepath)?; parse_csv(data) }
package dev.jshfx.base.jshell.commands; import java.nio.file.Path; import java.util.HashSet; import java.util.List; import java.util.Set; import dev.jshfx.base.jshell.CommandProcessor; import dev.jshfx.base.sys.PreferenceManager; import dev.jshfx.base.sys.RepositoryManager; import dev.jshfx.jfx.util.FXResourceBundle; import picocli.CommandLine.Command; import picocli.CommandLine.Parameters; @Command(name = "/resolve") public class ResolveCommand extends BaseCommand { public static final String NAME = "/resolve"; @Parameters(paramLabel = "<artifacts>", descriptionKey = "/resolve.artifacts", completionCandidates = RepoCoordinates.class) private List<String> coords; public ResolveCommand(CommandProcessor commandProcessor) { super(commandProcessor); } @Override public void run() { if (coords != null && !coords.isEmpty()) { commandProcessor.getSession().getFeedback() .commandSuccess(FXResourceBundle.getBundle().getString​("msg.resolution.started")).flush(); try { Set<String> classPaths = new HashSet<>(); Set<String> sourcePaths = new HashSet<>(); for (String coord : coords) { if (coord.endsWith(".xml")) { Path path = Path.of(coord); if (!path.isAbsolute()) { path = PreferenceManager.get().getLatestDir().resolve(path); } RepositoryManager.get().resolvePom(path.toString(), classPaths, sourcePaths); } else { RepositoryManager.get().resolve(coord, classPaths, sourcePaths); } } commandProcessor.getSession().getFeedback() .commandSuccess(FXResourceBundle.getBundle().getString​("msg.resolution.success")).flush(); commandProcessor.getSession().setSourcepath(sourcePaths); if (commandProcessor.getSession().setClasspath(classPaths)) { commandProcessor.getSession().getFeedback() .commandSuccess(FXResourceBundle.getBundle().getString​("msg.reload")).flush(); commandProcessor.getSession().reload(true); commandProcessor.getSession().getFeedback() .commandSuccess(FXResourceBundle.getBundle().getString​("msg.reload.done")).flush(); } } catch (Exception e) { commandProcessor .getSession().getFeedback().commandFailure(FXResourceBundle.getBundle() .getString​("msg.resolution.failure", e.getMessage() != null ? e.getMessage() : coords)) .flush(); } } } }
use bevy::prelude::*; use uuid::Uuid; pub enum LevelKind { Stock(usize), Custom(Uuid), } pub enum ButtonKind { Play, Editor, Options, Quit, Levels, Level(LevelKind), } #[derive(Component)] pub struct ButtonMarker { pub kind: ButtonKind, } impl ButtonMarker { pub fn new(kind: ButtonKind) -> ButtonMarker { ButtonMarker { kind } } pub fn play() -> ButtonMarker { ButtonMarker::new(ButtonKind::Play) } pub fn editor() -> ButtonMarker { ButtonMarker::new(ButtonKind::Editor) } pub fn options() -> ButtonMarker { ButtonMarker::new(ButtonKind::Options) } pub fn quit() -> ButtonMarker { ButtonMarker::new(ButtonKind::Quit) } pub fn levels() -> ButtonMarker { ButtonMarker::new(ButtonKind::Levels) } pub fn custom_level(uuid: Uuid) -> ButtonMarker { ButtonMarker::new(ButtonKind::Level(LevelKind::Custom(uuid))) } pub fn stock_level(index: usize) -> ButtonMarker { ButtonMarker::new(ButtonKind::Level(LevelKind::Stock(index))) } }
<reponame>mpomerant/furious-kylry737<gh_stars>1-10 import { Direction } from '@microsoft/fast-web-utilities'; export const bodyFontValue = 'aktiv-grotesk, "Segoe UI", Arial, Helvetica, sans-serif'; export const directionValue = Direction.ltr; export const disabledOpacityValue = 0.3; export const strokeWidthValue = 1; export const focusStrokeWidthValue = 2;
#include "hdf.h" #define FILE_NAME "Image_with_Palette.hdf" #define IMAGE_NAME "Image with Palette" #define N_ENTRIES 256 /* number of elements of each color */ int main( ) { /************************* Variable declaration **************************/ intn status, /* status for functions returning an intn */ i, j; int32 file_id, gr_id, ri_id, pal_id, ri_index; int32 data_type, n_comps, n_entries, interlace_mode; uint8 palette_data[N_ENTRIES][3]; /* static because of fixed size */ /************************* Variable declaration **************************/ /* * Open the file. */ file_id = Hopen (FILE_NAME, DFACC_READ, 0); /* * Initiate the GR interface. */ gr_id = GRstart (file_id); /* * Get the index of the image IMAGR_NAME. */ ri_index = GRnametoindex (gr_id, IMAGE_NAME); /* * Get image identifier. */ ri_id = GRselect (gr_id, ri_index); /* * Get the identifier of the palette attached to the image. */ pal_id = GRgetlutid (ri_id, ri_index); /* * Obtain and display information about the palette. */ status = GRgetlutinfo (pal_id, &n_comps, &data_type, &interlace_mode, &n_entries); printf ("Palette: %d components; %d entries\n", n_comps, n_entries); /* * Read the palette data. */ status = GRreadlut (pal_id, (VOIDP)palette_data); /* * Display the palette data. Recall that HDF supports only 256 colors. * Each color is defined by its 3 components. Therefore, * verifying the value of n_entries and n_comps is not necessary and * the buffer to hold the palette data can be static. However, * if more values or colors are added to the model, these parameters * must be checked to allocate sufficient space when reading a palette. */ printf (" Palette Data: \n"); for (i=0; i< n_entries; i++) { for (j = 0; j < n_comps; j++) printf ("%i ", palette_data[i][j]); printf ("\n"); } printf ("\n"); /* * Terminate access to the image and to the GR interface, and * close the HDF file. */ status = GRendaccess (ri_id); status = GRend (gr_id); status = Hclose (file_id); return 0; }
def parse_object(response, infotype): if infotype in ('idletime', 'refcount'): return int_or_none(response) return response
Bangladesh Installed 3 Million+ New Residential Solar Systems November 19th, 2014 by James Ayre To be exact, the recorded number was 3.1 million new systems — with more than 15 million people now benefiting from these new systems, according to coverage from the Bangladeshi newspaper The Daily Star. The systems were installed as part of the country’s Infrastructure Development Company Ltd (Idcol) program for off-grid areas. The total capacity of the new systems is somewhere around 135 MW. The new systems will help the country towards the achievement of its 2021 energy target — which denotes doubling electricity generation up to 24 GW, 10% of which is set to come via renewable energy. The state-ownedIdcol reportedly has a goal of financing six million residential solar systems by 2017. The Bangladeshi Prime Minister SheikhHasina commented, as quoted by Bangladeshi news company UNB: “We’ve set a target to provide solar energy facility to three million more families over the next three years through Idcol.” The new capacity represents a significant addition to the country’s renewable energy capacity — which, as of August 2014, stood at 10,618 MW, as per the Bangladesh Power Development Board. At current rates, Idcol is installing around 60,000 new residential solar systems a month. For more information of the country’s “solar energy revolution,” I highly recommend checking out some of our previous coverage, especially: Solar Energy Revolution That Everyone’s Ignoring In Bangladesh. While the country isn’t exactly a major global player, it’s still interesting to see the growth there. Given the country’s likely eventual fate at the hands of sea level rise and other global warming effects, it’s interesting in a different way as well though…. Image Credit: Solar for Bangladesh
Variability of Mineral Density in Coralline Hydroxyapatite Spheres: Study by Quantitative Computed Tomography Quantitative computed tomography (qCT) can be employed to determine the mineral density (MD) of bone or similar mineralized alloplastic materials with high precision. Porous spheres made from coralline hydroxy-apatite are currently used for reconstruction after enucleation procedures. The long-term fate of these implants is unknown. Using qCT, MD was determined in hydroxyapatite spheres prior to implantation. Intersphere MD varied up to 200% with a near Gaussian distribution. Intrasphere MD did not vary significantly when comparing central to peripheral sites. The density of coralline hydroxyapatite spheres was ~400% greater than the density of newly formed endochondral bone. This study demonstrates that qCT data were an invaluable tool for MD determination, detecting a marked variability in hydroxyapatite MDs. Because the long-term fate of these implants is unknown and fibro-vascular ingrowth is an important event in the integration of these implants, monitoring of MDs of coralline hydroxyapatite implants is of interest.
def rate_per_z(z): return rate(z) * cosmo.differential_comoving_volume(z) * \ nu_bright_fraction * (4 * np.pi * u.sr) / (1+z)
def main(): no_of_strings = int(input()) for _ in range(no_of_strings): current = input() length = len(current) if length > 10: current = current[0] + str(length - 2) + current[-1] print(current) main()
def parse(datetime_string: str, timezone: str = "Europe/Berlin") -> Union[datetime.datetime, None]: parser_result = Parser(datetime_string).parse() if parser_result is None: return None evaluator_result = Evaluator(parser_result, tz=timezone).evaluate() if evaluator_result is None: return None return evaluator_result
// ------------------------------------------------------------ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License (MIT). See License.txt in the repo root for license information. // ------------------------------------------------------------ #include "stdafx.h" using namespace std; using namespace Common; using namespace ServiceModel; ResourceGovernancePolicyDescription::ResourceGovernancePolicyDescription() : CodePackageRef(), MemoryInMB(0), MemorySwapInMB(0), MemoryReservationInMB(0), CpuShares(0), CpuPercent(0), MaximumIOps(0), MaximumIOBytesps(0), BlockIOWeight(0), CpusetCpus(), NanoCpus(0), CpuQuota(0) { } ResourceGovernancePolicyDescription::ResourceGovernancePolicyDescription(ResourceGovernancePolicyDescription const & other) : CodePackageRef(other.CodePackageRef), MemoryInMB(other.MemoryInMB), MemorySwapInMB(other.MemorySwapInMB), MemoryReservationInMB(other.MemoryReservationInMB), CpuShares(other.CpuShares), CpuPercent(other.CpuPercent), MaximumIOps(other.MaximumIOps), MaximumIOBytesps(other.MaximumIOBytesps), BlockIOWeight(other.BlockIOWeight), CpusetCpus(other.CpusetCpus), NanoCpus(other.NanoCpus), CpuQuota(other.CpuQuota) { } ResourceGovernancePolicyDescription::ResourceGovernancePolicyDescription(ResourceGovernancePolicyDescription && other) : CodePackageRef(move(other.CodePackageRef)), MemoryInMB(other.MemoryInMB), MemorySwapInMB(other.MemorySwapInMB), MemoryReservationInMB(other.MemoryReservationInMB), CpuShares(other.CpuShares), CpuPercent(other.CpuPercent), MaximumIOps(other.MaximumIOps), MaximumIOBytesps(other.MaximumIOBytesps), BlockIOWeight(other.BlockIOWeight), CpusetCpus(move(other.CpusetCpus)), NanoCpus(other.NanoCpus), CpuQuota(other.CpuQuota) { } ResourceGovernancePolicyDescription const & ResourceGovernancePolicyDescription::operator = (ResourceGovernancePolicyDescription const & other) { if (this != &other) { this->CodePackageRef = other.CodePackageRef; this->MemoryInMB = other.MemoryInMB; this->MemorySwapInMB = other.MemorySwapInMB; this->MemoryReservationInMB = other.MemoryReservationInMB; this->CpuShares = other.CpuShares; this->CpuPercent = other.CpuPercent; this->MaximumIOps = other.MaximumIOps; this->MaximumIOBytesps = other.MaximumIOBytesps; this->BlockIOWeight = other.BlockIOWeight; this->CpusetCpus = other.CpusetCpus; this->NanoCpus = other.NanoCpus; this->CpuQuota = other.CpuQuota; } return *this; } ResourceGovernancePolicyDescription const & ResourceGovernancePolicyDescription::operator = (ResourceGovernancePolicyDescription && other) { if (this != &other) { this->CodePackageRef = move(other.CodePackageRef); this->MemoryInMB = other.MemoryInMB; this->MemorySwapInMB = other.MemorySwapInMB; this->MemoryReservationInMB = other.MemoryReservationInMB; this->CpuShares = other.CpuShares; this->CpuPercent = other.CpuPercent; this->MaximumIOps = other.MaximumIOps; this->MaximumIOBytesps = other.MaximumIOBytesps; this->BlockIOWeight = other.BlockIOWeight; this->CpusetCpus = move(other.CpusetCpus); this->NanoCpus = other.NanoCpus; this->CpuQuota = other.CpuQuota; } return *this; } bool ResourceGovernancePolicyDescription::operator == (ResourceGovernancePolicyDescription const & other) const { return StringUtility::AreEqualCaseInsensitive(this->CodePackageRef, other.CodePackageRef) && (this->MemoryInMB == other.MemoryInMB) && (this->MemorySwapInMB == other.MemorySwapInMB) && (this->MemoryReservationInMB == other.MemoryReservationInMB) && (this->CpuShares == other.CpuShares) && (this->CpuPercent == other.CpuPercent) && (this->MaximumIOps == other.MaximumIOps) && (this->MaximumIOBytesps == other.MaximumIOBytesps) && (this->BlockIOWeight == other.BlockIOWeight) && (StringUtility::AreEqualCaseInsensitive(this->CpusetCpus, other.CpusetCpus)) && (this->NanoCpus == other.NanoCpus) && (this->CpuQuota == other.CpuQuota); } bool ResourceGovernancePolicyDescription::operator != (ResourceGovernancePolicyDescription const & other) const { return !(*this == other); } void ResourceGovernancePolicyDescription::WriteTo(TextWriter & w, FormatOptions const &) const { w.Write("ResourceGovernancePolicyDescription { "); w.Write("CodePackageRef = {0}, ", CodePackageRef); w.Write("MemoryInMB = {0}, ", MemoryInMB); w.Write("MemorySwapInMB = {0} ", MemorySwapInMB); w.Write("MemoryReservationInMB = {0} ", MemoryReservationInMB); w.Write("CpuShares = {0} ", CpuShares); w.Write("CpuPercent = {0} ", CpuPercent); w.Write("MaximumIOps = {0} ", MaximumIOps); w.Write("MaximumIOBytesps = {0} ", MaximumIOBytesps); w.Write("BlockIOWeight = {0} ", BlockIOWeight); w.Write("CpusetCpus = {0} ", CpusetCpus); w.Write("NanoCpus = {0} ", NanoCpus); w.Write("CpuQuota = {0} ", CpuQuota); w.Write("}"); } wstring ResourceGovernancePolicyDescription::ToString() const { return wformatString("ResourceGovernancePolicyDescription[ CpuShares = {0} , MemoryInMB = {1} ]", CpuShares, MemoryInMB); } void ResourceGovernancePolicyDescription::ReadFromXml( XmlReaderUPtr const & xmlReader) { // <ResourceGovernancePolicy CodePackageRef="" MemoryInMB="" MemorySwapInMB="" MemoryReservationInMB="" CpuShares=""/> xmlReader->StartElement( *SchemaNames::Element_ResourceGovernancePolicy, *SchemaNames::Namespace); this->CodePackageRef = xmlReader->ReadAttributeValue(*SchemaNames::Attribute_CodePackageRef); if(xmlReader->HasAttribute(*SchemaNames::Attribute_MemoryInMB)) { auto memoryInMB = xmlReader->ReadAttributeValue(*SchemaNames::Attribute_MemoryInMB); if (!StringUtility::TryFromWString<uint>( memoryInMB, this->MemoryInMB)) { Parser::ThrowInvalidContent(xmlReader, L"positive integer", memoryInMB); } } if (xmlReader->HasAttribute(*SchemaNames::Attribute_MemorySwapInMB)) { auto memorySwap = xmlReader->ReadAttributeValue(*SchemaNames::Attribute_MemorySwapInMB); if (!StringUtility::TryFromWString<uint>( memorySwap, this->MemorySwapInMB)) { Parser::ThrowInvalidContent(xmlReader, L"positive integer", memorySwap); } } if (xmlReader->HasAttribute(*SchemaNames::Attribute_MemoryReservationInMB)) { auto memoryReservation = xmlReader->ReadAttributeValue(*SchemaNames::Attribute_MemoryReservationInMB); if (!StringUtility::TryFromWString<uint>( memoryReservation, this->MemoryReservationInMB)) { Parser::ThrowInvalidContent(xmlReader, L"positive integer", memoryReservation); } } if (xmlReader->HasAttribute(*SchemaNames::Attribute_CpuShares)) { auto cpuShares = xmlReader->ReadAttributeValue(*SchemaNames::Attribute_CpuShares); if (!StringUtility::TryFromWString<uint>( cpuShares, this->CpuShares)) { Parser::ThrowInvalidContent(xmlReader, L"positive integer", cpuShares); } } if (xmlReader->HasAttribute(*SchemaNames::Attribute_CpuPercent)) { auto cpuPercent = xmlReader->ReadAttributeValue(*SchemaNames::Attribute_CpuPercent); if (!StringUtility::TryFromWString<uint>( cpuPercent, this->CpuPercent)) { Parser::ThrowInvalidContent(xmlReader, L"positive integer", cpuPercent); } } if (xmlReader->HasAttribute(*SchemaNames::Attribute_MaximumIOps)) { auto maximumIOps = xmlReader->ReadAttributeValue(*SchemaNames::Attribute_MaximumIOps); if (!StringUtility::TryFromWString<uint>( maximumIOps, this->MaximumIOps)) { Parser::ThrowInvalidContent(xmlReader, L"positive integer", maximumIOps); } } if (xmlReader->HasAttribute(*SchemaNames::Attribute_MaximumIOBytesps)) { auto maximumIOBps = xmlReader->ReadAttributeValue(*SchemaNames::Attribute_MaximumIOBytesps); if (!StringUtility::TryFromWString<uint>( maximumIOBps, this->MaximumIOBytesps)) { Parser::ThrowInvalidContent(xmlReader, L"positive integer", maximumIOBps); } } if (xmlReader->HasAttribute(*SchemaNames::Attribute_BlockIOWeight)) { auto blkioweight = xmlReader->ReadAttributeValue(*SchemaNames::Attribute_BlockIOWeight); if (!StringUtility::TryFromWString<uint>( blkioweight, this->BlockIOWeight)) { Parser::ThrowInvalidContent(xmlReader, L"positive integer", blkioweight); } } // Read the rest of the empty element xmlReader->ReadElement(); } Common::ErrorCode ResourceGovernancePolicyDescription::WriteToXml(XmlWriterUPtr const & xmlWriter) { //<ResourceGovernancePolicy> ErrorCode er = xmlWriter->WriteStartElement(*SchemaNames::Element_ResourceGovernancePolicy, L"", *SchemaNames::Namespace); if (!er.IsSuccess()) { return er; } //All the attributes er = xmlWriter->WriteAttribute(*SchemaNames::Attribute_CodePackageRef, this->CodePackageRef); if (!er.IsSuccess()) { return er; } er = xmlWriter->WriteNumericAttribute(*SchemaNames::Attribute_MemoryInMB, this->MemoryInMB); if (!er.IsSuccess()) { return er; } er = xmlWriter->WriteNumericAttribute(*SchemaNames::Attribute_MemorySwapInMB, this->MemorySwapInMB); if (!er.IsSuccess()) { return er; } er = xmlWriter->WriteNumericAttribute(*SchemaNames::Attribute_MemoryReservationInMB, this->MemoryReservationInMB); if (!er.IsSuccess()) { return er; } er = xmlWriter->WriteNumericAttribute(*SchemaNames::Attribute_CpuShares, this->CpuShares); if (!er.IsSuccess()) { return er; } er = xmlWriter->WriteNumericAttribute(*SchemaNames::Attribute_CpuPercent, this->CpuPercent); if (!er.IsSuccess()) { return er; } er = xmlWriter->WriteNumericAttribute(*SchemaNames::Attribute_MaximumIOps, this->MaximumIOps); if (!er.IsSuccess()) { return er; } er = xmlWriter->WriteNumericAttribute(*SchemaNames::Attribute_MaximumIOBytesps, this->MaximumIOBytesps); if (!er.IsSuccess()) { return er; } er = xmlWriter->WriteNumericAttribute(*SchemaNames::Attribute_BlockIOWeight, this->BlockIOWeight); if (!er.IsSuccess()) { return er; } //</ResourceGovernancePolicy> return xmlWriter->WriteEndElement(); } void ResourceGovernancePolicyDescription::clear() { this->CodePackageRef.clear(); this->MemoryInMB = 0; this->MemorySwapInMB = 0; this->MemoryReservationInMB = 0; this->CpuShares = 0; this->CpuPercent = 0; this->MaximumIOps = 0; this->MaximumIOBytesps = 0; this->BlockIOWeight = 0; this->CpusetCpus.clear(); this->NanoCpus = 0; this->CpuQuota = 0; } bool ResourceGovernancePolicyDescription::ShouldSetupCgroup() const { return this->CpuQuota > 0 || this->MemoryInMB > 0; } ErrorCode ResourceGovernancePolicyDescription::ToPublicApi( __in ScopedHeap & heap, __out FABRIC_RESOURCE_GOVERNANCE_POLICY_DESCRIPTION & fabricResourceGovernancePolicyDesc) const { fabricResourceGovernancePolicyDesc.CodePackageRef = heap.AddString(this->CodePackageRef); fabricResourceGovernancePolicyDesc.MemoryInMB = static_cast<ULONG>(this->MemoryInMB); fabricResourceGovernancePolicyDesc.MemorySwapInMB = static_cast<ULONG>(this->MemorySwapInMB); fabricResourceGovernancePolicyDesc.MemoryReservationInMB = static_cast<ULONG>(this->MemoryReservationInMB); fabricResourceGovernancePolicyDesc.CpuShares = static_cast<ULONG>(this->CpuShares); fabricResourceGovernancePolicyDesc.CpuPercent = static_cast<ULONG>(this->CpuPercent); fabricResourceGovernancePolicyDesc.MaximumIOps = static_cast<ULONG>(this->MaximumIOps); fabricResourceGovernancePolicyDesc.MaximumIOBytesps = static_cast<ULONG>(this->MaximumIOBytesps); fabricResourceGovernancePolicyDesc.BlockIOWeight = static_cast<ULONG>(this->BlockIOWeight); fabricResourceGovernancePolicyDesc.CpusetCpus = heap.AddString(this->CpusetCpus); fabricResourceGovernancePolicyDesc.NanoCpus = static_cast<ULONGLONG>(this->NanoCpus); fabricResourceGovernancePolicyDesc.CpuQuota = static_cast<ULONG>(this->CpuQuota); fabricResourceGovernancePolicyDesc.Reserved = nullptr; return ErrorCode::Success(); }
<filename>src/askvm/lib/resource.ts import { AskCode } from '../../askcode'; import { asyncMap } from '../../utils'; import { Options as RunOptions, runUntyped } from './run'; import * as types from './type'; import { any, Type } from './type'; import { typed, untyped } from './typed'; type ResourceOptions<T, A extends any[]> = Partial<Resource<T, A>>; /** * Resource is the basic value wrapper in AskCode. */ export class Resource<T, A extends any[]> { constructor(...options: ResourceOptions<T, A>[]) { Object.assign( this, { argsType: new Type(...options.map((o) => o.argsType)), type: new Type(...options.map((o) => o.type)), }, ...options.map(({ argsType, type, ...rest }) => rest) ); } // extends TypedValue ? readonly name: string = 'resource'; readonly type: Type<T> = any; readonly argsType: Type<A> = any; // empty list async resolver(...argsOrParams: A): Promise<T> { throw new Error('This resource requires resolver to be defined'); } async compute(options: RunOptions, code: AskCode, args?: A): Promise<T> { const resolverArgs = await asyncMap(code.params ?? args ?? [], (param) => runUntyped(options, param) ); return this.resolver(...untyped(typed(resolverArgs, this.argsType))); } } export function resource<T, A extends any[]>( ...options: ResourceOptions<T, A>[] ) { return new Resource(...options); } export type Resources = Record<string, Resource<any, any>>;
//BUG: Spectre Dimension data is held over between two worlds if minecraft instance is not fully closed before making another world public class SpectreDimensionHandler extends SavedData { private final HashMap<UUID, SpectreCube> cubes; private int cubeNumber; public SpectreDimensionHandler(int i) { cubes = new HashMap<>(); cubeNumber = i; } public void teleportPlayerToSpectreCube(Level level, Player player) { // tries to get the spectre dimension for the world MinecraftServer minecraftServer = level.getServer(); ServerLevel serverSpectreLevel = null; if (minecraftServer != null) { serverSpectreLevel = minecraftServer.getLevel(RandomThingsMod.SPECTRE_DIMENSION); } if (serverSpectreLevel != null) { super.setDirty(); // Save Old Position / Dimension // CompoundTag compoundTag = player.getPersistentData(); // compoundTag.putDouble("spectrePosX", player.getX()); // compoundTag.putDouble("spectrePosY", player.getY()); // compoundTag.putDouble("spectrePosZ", player.getZ()); // compoundTag.putInt("spectreDimension", 1); if (cubes.containsKey(player.getUUID())) { System.out.println("old player found"); SpectreCube cube = cubes.get(player.getUUID()); player.changeDimension(serverSpectreLevel, new SimpleTeleporter(cube.playerSpawnPosition)); } else { System.out.println("new player found"); SpectreCube cube = new SpectreCube(player.getUUID(), cubeNumber); cube.createBaseCube(serverSpectreLevel); cubes.put(player.getUUID(), cube); cubeNumber++; player.changeDimension(serverSpectreLevel, new SimpleTeleporter(cube.playerSpawnPosition)); } } } @Override public CompoundTag save(CompoundTag compoundTag) { compoundTag.putInt("cubeNumber", cubeNumber); for (SpectreCube cube : cubes.values()) { compoundTag.put(cube.owner.toString(), cube.save(new CompoundTag())); } return compoundTag; } public static SpectreDimensionHandler load(CompoundTag compoundTag) { SpectreDimensionHandler newHandler = new SpectreDimensionHandler(compoundTag.getInt("cubeNumber")); for (String key : compoundTag.getAllKeys()) { if (key.equals("cubeNumber")) { continue; } newHandler.cubes.put(UUID.fromString(key), SpectreCube.load(compoundTag.getCompound(key))); } return newHandler; } public SpectreCube getCube(UUID uuid) { return cubes.getOrDefault(uuid, null); } }
<reponame>uesp/skyedit /*=========================================================================== * * File: SReditView.H * Author: <NAME> (<EMAIL>) * Created On: 26 November 2011 * * Interface of the CSrEditView class. * *=========================================================================*/ #ifndef __SREDITVIEW_H #define __SREDITVIEW_H /*=========================================================================== * * Begin Required Includes * *=========================================================================*/ #include "windows/srrecordlistctrl.h" #include "windows/srrecordvirtuallistctrl.h" #include "windows/srrecordtreectrl.h" #include "modfile/srexport.h" #include "dialogs/sreditdlghandler.h" #include "srresourceview.h" #include "il/il.h" #include "il/ilu.h" #include "il/ilut.h" #include "afxwin.h" /*=========================================================================== * End of Required Includes *=========================================================================*/ /*=========================================================================== * * Begin Definitions * *=========================================================================*/ /* File filters for the open/save dialog */ #define SREDIT_CSV_FILTER _T("CSV Files (*.csv)|*.csv|All Files (*.*)|*.*||") /* Default file extensions */ #define SREDIT_CSV_EXT _T("csv") /* Number of operations required for a progress dlg to be shown */ #define SREDIT_MINIMUM_PROGRESS_COUNT 100 #define SREDIT_FILTERUPDATE_TIMERID 0x1234 /*=========================================================================== * End of Definitions *=========================================================================*/ /*=========================================================================== * * Begin Class CSrEditView Definition * *=========================================================================*/ class CSrEditView : public CFormView, public ISrListener { /*---------- Begin Protected Class Members ----------------------*/ protected: CSrEditDlgHandler m_DlgHandler; /* Handles all child windows */ bool m_IsInitialized; /* Has the view been initialized yet? */ CSrRecordFilter* m_pCurrentFilter; /* Currently displayed record filter */ CSString m_LastFilterID; bool m_UpdateFilterCounts; DWORD m_hFilterUpdateThreadID; HANDLE m_hFilterUpdateThread; HANDLE m_ThreadCloseEvent; /*---------- Begin Protected Class Methods ----------------------*/ protected: /* Create from serialization only */ CSrEditView(); DECLARE_DYNCREATE(CSrEditView) public: enum { IDD = IDD_SREDIT_FORM }; CStatic m_VertEdge; CSrRecordVirtualListCtrl m_RecordList; CSrRecordTreeCtrl m_RecordTree; /*---------- Begin Public Class Methods --------------------------*/ public: /* Class destructor */ virtual ~CSrEditView(); void ThreadUpdateFilterProc (void); /* Get the parent document */ CSrEditDoc* GetDocument(); CSrEditDlgHandler& GetDlgHandler (void) { return m_DlgHandler; } /* Helper export methods */ bool OnCsvExport (srexportinfo_t& ExportInfo, const TCHAR* pDialogTitle); /* Update events */ void OnEditRecord (CSrRecord* pRecord); /* Listener events */ virtual int OnListenAddRecord (CSrListenEvent* pEvent); virtual int OnListenCleanRecord (CSrListenEvent* pEvent); virtual int OnListenUpdateRecord (CSrListenEvent* pEvent); virtual int OnListenPreUpdateRecord (CSrListenEvent* pEvent); virtual int GetListenEvents (void) { return (SR_EVENTMASK_ALL); } bool OnToggleDeleteRecord (CSrRecord* pRecord); bool OnToggleIgnoreRecord (CSrRecord* pRecord); bool OnToggleQuestRecord (CSrRecord* pRecord); /* Updates the main view content display */ void UpdateContents (void); /* Diagnostics */ #ifdef _DEBUG virtual void AssertValid() const; virtual void Dump(CDumpContext& dc) const; #endif public: virtual BOOL PreCreateWindow(CREATESTRUCT& cs); protected: virtual void DoDataExchange(CDataExchange* pDX); virtual void OnInitialUpdate(); virtual void OnUpdate(CView* pSender, LPARAM lHint, CObject* pHint); virtual void OnActivateView(BOOL bActivate, CView* pActivateView, CView* pDeactiveView); afx_msg void OnSize(UINT nType, int cx, int cy); afx_msg void OnSelchangedRecordtree(NMHDR* pNMHDR, LRESULT* pResult); afx_msg void OnFileImportCsv(); afx_msg void OnFileExportcsvSelecteditems(); afx_msg void OnRecordtreeReload(); afx_msg void OnContextMenu(CWnd* pWnd, CPoint point); afx_msg void OnRecordtreeExporttocsv(); afx_msg void OnViewFilestructure(); afx_msg void OnFileExportCsvexteriorlocations(); afx_msg void OnFileExportCsvExtMapMarkers(); afx_msg void OnHelpTestfieldlookup(); afx_msg void OnFileExportCsvexportexteriorplants(); afx_msg void OnEditFindbinarydata(); afx_msg void OnHelpTestfindformid(); afx_msg void OnTestMemoryspeed(); afx_msg void OnTestTokenscripts(); afx_msg void OnRecordClean(); afx_msg void OnUpdateHasSelectedRecords(CCmdUI* pCmdUI); afx_msg void OnRecordMoveactive(); afx_msg LRESULT OnEditRecordMsg (WPARAM wParam, LPARAM lParam); afx_msg void OnDestroy (void); afx_msg void OnMenuEditrecord(); afx_msg void OnEditNewrecord(); afx_msg void OnRecordTogglequest(); afx_msg void OnRecordToggleignore(); afx_msg void OnRecordToggledelete(); afx_msg void OnEditUndo(); afx_msg void OnUpdateEditUndo(CCmdUI* pCmdUI); afx_msg void OnRecordRename(); afx_msg void OnRecordCreatecopy(); afx_msg void OnRecordBatchedit(); afx_msg void OnEditFind(); afx_msg void OnHelpTestoutputeffects(); afx_msg void OnHelpTestrecordsize(); afx_msg void OnTestCleanrecompileall(); afx_msg void OnTestRecompileall(); afx_msg void OnHelpTestcomparescripts(); afx_msg void OnEditSelectall(); DECLARE_MESSAGE_MAP(); public: afx_msg void OnMenuViewrawdata(); afx_msg void OnHelpTestoutputperks(); CEdit m_FilterText; CButton m_ActiveCheck; afx_msg void OnBnClickedActivecheck(); afx_msg void OnEnChangeFiltertext(); afx_msg void OnTimer(UINT_PTR nIDEvent); afx_msg void OnClose(); afx_msg void OnEditUselocalstrings(); afx_msg void OnUpdateEditUselocalstrings(CCmdUI *pCmdUI); afx_msg void OnEditSetmodauthor(); afx_msg void OnEditSetmoddescription(); afx_msg void OnMenuViewsummary(); afx_msg void OnUpdateMenuEditrecord(CCmdUI *pCmdUI); afx_msg void OnInitMenuPopup(CMenu* pPopupMenu, UINT nIndex, BOOL bSysMenu); afx_msg void OnViewActiveonly(); afx_msg void OnUpdateViewActiveonly(CCmdUI *pCmdUI); afx_msg void OnMenuChangemodindex(); afx_msg void OnMenuChangeformid(); afx_msg void OnMenuAssignnewformid(); }; /*=========================================================================== * End of Class CSrEditView Definition *=========================================================================*/ /*=========================================================================== * * Begin Inline Class Methods * *=========================================================================*/ #ifndef _DEBUG inline CSrEditDoc* CSrEditView::GetDocument() { return (CSrEditDoc*)m_pDocument; } #endif /*=========================================================================== * End of Inline Class Methods *=========================================================================*/ #endif /*=========================================================================== * End of File SrEditView.H *=========================================================================*/
/** * Top of a stack of parsed elements, that represent the current position in the aligned document. */ public class AlignmentContext { private AlignmentContext parent; private String namespaceUri; private String localName; private String qName; private Attributes attributes; private XSTypeDefinition typeDefinition; private int indentLevel; private Set<String> multipleOccurringChildElements=null; private boolean parentOfSingleMultipleOccurringChildElement=false; public AlignmentContext(AlignmentContext parent, String namespaceUri, String localName, String qName, Attributes attributes, XSTypeDefinition typeDefinition, int indentLevel, Set<String> multipleOccurringChildElements, boolean parentOfSingleMultipleOccurringChildElement) { super(); this.parent = parent; this.namespaceUri = namespaceUri; this.localName = localName; this.qName = qName; this.attributes = attributes; this.typeDefinition = typeDefinition; this.indentLevel = indentLevel; this.multipleOccurringChildElements = multipleOccurringChildElements; this.parentOfSingleMultipleOccurringChildElement = parentOfSingleMultipleOccurringChildElement; } public AlignmentContext getParent() { return parent; } public String getNamespaceUri() { return namespaceUri; } public String getLocalName() { return localName; } public String getqName() { return qName; } public Attributes getAttributes() { return attributes; } public XSTypeDefinition getTypeDefinition() { return typeDefinition; } public int getIndentLevel() { return indentLevel; } public Set<String> getMultipleOccurringChildElements() { return multipleOccurringChildElements; } public boolean isParentOfSingleMultipleOccurringChildElement() { return parentOfSingleMultipleOccurringChildElement; } }
/* * This function writes a packet into the Tx FIFO associated with the Host * Channel. For a channel associated with a non-periodic EP, the non-periodic * Tx FIFO is written. For a channel associated with a periodic EP, the * periodic Tx FIFO is written. This function should only be called in Slave * mode. * * Upon return the xfer_buff and xfer_count fields in _hc are incremented by * then number of bytes written to the Tx FIFO. */ void dwc_otg_hc_write_packet(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc) { uint32_t i; uint32_t remaining_count; uint32_t byte_count; uint32_t dword_count; uint32_t *data_buff = (uint32_t *)(_hc->xfer_buff); uint32_t *data_fifo = _core_if->data_fifo[_hc->hc_num]; remaining_count = _hc->xfer_len - _hc->xfer_count; if (remaining_count > _hc->max_packet) { byte_count = _hc->max_packet; } else { byte_count = remaining_count; } dword_count = (byte_count + 3) / 4; if ((((unsigned long)data_buff) & 0x3) == 0) { for (i = 0; i < dword_count; i++, data_buff++) { dwc_write_reg32(data_fifo, *data_buff); } } else { for (i = 0; i < dword_count; i++, data_buff++) { dwc_write_reg32(data_fifo, get_unaligned(data_buff)); } } _hc->xfer_count += byte_count; _hc->xfer_buff += byte_count; }
Specialists training in a technical university in the transition to a robotic society The prerequisites for the research of the topic arise from the tasks of society modernization and social training of professional staff for these purposes. The latter has traditionally been based on sociology as a science appropriate to the present moment, studying societal problems at the group and individual levels of individual behavior. However, during the transition to the Bologna model of education, it was withdrawn from the Federal State Educational Standard for training future engineers and dramatically downgraded in status, which had a negative impact on their personal potential as professionals. This was stated by employers who had been given the task of using these graduates for the purposes of Russia’s transition to an information, robotic society. The purpose of the research is to draw the attention of society and the authorities to the importance of strengthening the social training of groups capable of keeping up with the scientific and technological progress of society, using the base of sociological knowledge. For the methodological basis of the topic were used: the sociology of higher education, theories of personal role behavior, as well as the concept of social humanism. Three sets of sources and literature on the topic have been studied, revealing the main problem of society, which centers on the situation with “artificial sociality” as a condition of socialization of student youth, and the conjunction of their behavior with “digital risks”; as well as the impact of these risks on both natural and social environments. It is argued that students who are already taking these risks today are potential creators of new “digital” and other technologies, who have not mastered the skills of forecasting social and professional behavior and have not formed themselves as a whole person. The hypothesis formulated in the article about the tendency to reduce the hours of social training in universities for engineering personnel has been confirmed. The reason for this trend is the neoliberal policy of austerity on education. Introduction The topic of the article is driven by the task of training engineering personnel capable of making a technological breakthrough in the transition to the information society as a necessary stage of the Knowledge Society . In an unstable economy, setting such an ambitious goal for its implementation in a fairly short time shows its mobilizing nature and involves the joint efforts of the authorities, production specialists, employers and the educational community in this direction, as it happened more than once in the history of our country. In this situation, it is obvious that such problems can only be solved by strengthening both the natural-technical bases of future engineers' training and the social ones. The latter are traditionally based on sociology as a science, studying the fundamental social forms of human life in their adaptation to natural and artificial environments. This is due to the peculiarities of its subject and methods, directly addressed to the study of both internal and boundary problems of society, revealing the mechanisms of group and individual role behavior of the individual in it. The ongoing research covers not only the theoretical but also the empirical levels, and corresponds to the current moment in time. Thus, as part of the sociology of higher technical education social technologies are developed to adapt the personality of university graduates to the conditions of artificial sociality, to the economic and social challenges of society. This prepares them for an active role in professional activities, for positioning themselves in the hierarchy of groups at the cutting edge of scientific and technological progress. However, the situation with the teaching of sociology at technical universities is not satisfactory. This is caused by the willful exclusion of it from the Federal State Educational Standard (FSES), which destroyed the educational approach to the holistic training of the engineer's personality. This weakened his ability to master special disciplines, disoriented him in his professional growth. If we add to the above the educational failure of the new generations of students (coming from school reforms), it is clear that technical universities, in order to increase the competitiveness of their graduates, need to strengthen the subjects that ensure their social qualities. In this regard, this topic is relevant. Purpose of research: Drawing the attention of society and the Authorities to the increasing role of sociological training of engineers as a necessary tool to ensure their social qualities as professionals. Research objectives are: 1.to outline the general situation with social training of engineers in technical universities. 2.to show the connection between the lack of social training of future engineers and the problems of comprehensive modernization of society. 3.to formulate proposals for improving the social training of specialists in a technical university. The bibliography on the subject can be divided into three groups: 1) reflecting the current situation in society with "artificial sociality" as a condition of socialization of modern student youth in their natural social environment, and the risks that lie in wait for them along the way ; 2) revealing the content and composition of "universal competencies" for the formation of the personality of an engineer demanded by robotic production; as well as the content characteristics of students' personalities, their compliance (or non-compliance) with the role expectations of the modernizing society ; 3) focusing on the policy of institutional reform of education, its positive and negative consequences for the transformation of Russia into a Knowledge Society based on science and robotic technologies . Eurasian Educational Space: Traditions, Reality and Perspectives 2021 Hypothesis: There is a tendency to reduce the hours of social training in higher engineering education, underestimating the importance of this training in the modernization of Russian society. Methods The study of social training of specialists in a technical university in the conditions of mobilization transition to a robotic society is based on the methodological principles of Sociology of Higher Education , theories of role behavior of personality (Mareno, Parsons, Linton, Yadov), and the concept of social humanism . In selecting sources and literature within the framework of these theories, the relative autonomy of sociological knowledge intended for the pedagogical object in a technical university was taken into account. Also, the specifics of the content of the educational environment of the university, characterized by a spirit of technocratism and social skepticism, were taken into account. The selection was based on the principle of comparing conceptual approaches to the study of "artificial sociality" and the "natural" social environment, of which students themselves are active social agents . The selection criteria within the concept of social humanism were different social risks and related economic, environmental and even political risks , which were compared with possible "digital risks. The latter were viewed hypothetically as "gains and losses" from an approaching robotic society. It is also of interest to find out why "digital risks" exist. These are seen as reasons for the unequal rights of groups or individuals to access information-robotic tools, the unequal right to own them, and the natural division of people into those who can and those who cannot handle these tools . It should be noted that students are taking these "digital risks" today as potential creators of new "digital" and other technologies. They do not master the social knowledge of predicting the consequences of their professional activities and do not have the necessary personal characteristics for this. The sources that focus on everything that relates to the characterization of the personal qualities of the students being trained were singled out for consideration in a separate group. Here universal competencies were taken as a criterion and the possibility of comparing them with the practice of social behavior of the student. What did this comparison show? Practically all of the terminology and content of the proposed competencies relate to the science of sociology. However, a number of technical universities do not include such a subject in their curricula. In those technical universities that include sociology in their curricula, its position does not meet the objectives of quality social training of specialists. The result of this educational policy in technical universities is the extreme dissatisfaction of employers with the social characteristics of graduates of these universities. They lack business and human communication skills, notions of work discipline, responsibility for assigned tasks, the habit of self-control and self-analysis of their behavior, motivation to work in a team for the overall result. The percentage of such graduates is quite high. Only 5-10% of graduates are motivated, hardworking, diligent, develop their talents, and increase their ambition. Results The results of the research on the topic stated in the article confirmed our hypothesis about the underestimation of the role of social sciences in the training of engineering personnel at universities. This inevitably reduces their potential to reveal their abilities in adapting to the requirements of modern production, and also causes employers' dissatisfaction with the SHS Web of Conferences 121, Eurasian Educational Space: Traditions, Reality and Perspectives 2021 social quality of training of this personnel. To remedy this situation, it is recommended: 1) to change the vector of neoliberal reforms in higher technical education and rotate it in favor of the best national traditions of engineering education; 2) to overcome the tendency of excessive technocratic bias in engineering training and increase the importance of social training in it; 3) to return sociology to a place in the educational grid that is adequate to the tasks of competency-based training of engineers; 4) to restore the conditions of the educational environment inherent in the former technical universities, which were conducive to the academic success of students; 5) to ensure that in the educational environment of these universities all social agents trust each other. Meeting even part of these conditions will allow us to preserve young people, our invaluable educational and human capital, who still continue to believe in social justice. But young people can no longer make up their minds about their future, because their future is not obvious, and instead of a successful professional career, they risk ending up in the precariat. Discussion The following questions are presented for discussion: 1) Is the situation with the social training of engineering personnel in the country critical? Are there prerequisites for reversing this situation in the interests of a comprehensive modernization of society? The analysis of the literature and the authors' own reflections do not allow us to give an unambiguous answer to this question. The reason lies in the policies of neoliberal reforms, criticized by much of society for their austerity on public investment in education, as well as for the low actualization of student potential . 2) To what social type of behavior should the personality of a future engineer be oriented -to "an intellectual" (Russian educational school) or to "an intellectual" (European school)? In our opinion, in the current situation of uncertainty with the students' goal-setting, the first type seems to be more effective, as it corresponds to the national traditions of engineers' training, their service to the Motherland. Whereas the second type is more characterized by the spirit of technocratism (and even snobbery), underestimation of everything related to the social activity of the engineer, bringing him (in the spirit of "technical romance") closer to the "technical man". 3) How is the mobilization of society possible under conditions of insufficient social training of engineering personnel and the existing educational failure of students? We believe that this is impossible without the sovereignty of national educational policy. Conclusion In conclusion, we should like to note that the topic of social training of students in technical universities on the basis of sociology has not been practically studied. And it seems to us that this is just due to the fact that its status in each case is determined by the leadership of universities due to its exclusion from the federal standard. Most of the leaders of these institutions understand the importance of sociology for students' success as professionals. Especially since the latter set of general cultural competencies is more universal than the previous ones, and terminologically reflects the language of sociology almost one hundred percent. However, they are forced to cut back on something, and doubly at the undergraduate level. At the same time, employers' criticism of the poor quality of social (sociological) training of engineers cannot but worry them. They are forced to respond to it, but they cannot change the situation themselves. At the same time, the tasks of modernizing society in the direction of its robotization, as well as the presence of problems (as negative consequences of the "modernization" of school education) relating to the underperformance of students, increasingly require them to address the issue of their social (sociological) training at the ministerial level.
/** * The response corresponding to ServiceRegisterRequest * * @since 2.0 */ @XmlRootElement public class ServiceRegisterResponse { private String id; private long reregisterTimeMillis; public String getId() { return id; } public void setId(String id) { this.id = id; } public long getReregisterTimeMillis() { return reregisterTimeMillis; } public void setReregisterTimeMillis(long reregisterTimeMillis) { this.reregisterTimeMillis = reregisterTimeMillis; } }
def glossary_entity_decorator(props): return DOM.create_element('span', { 'data-term': props['term'], 'class': 'term', }, props['children'])
def spectrum_misc(f): end = False while not end: try: line = f.readline().split() wavnew = [float(w) for w in line] wav = np.append(wav, wavnew) prevwav = wavnew[-1] except BaseException: end = True aflux = f.readlines() for line in aflux: line = re.sub(r'-10\d', 'e-100', line) flux = np.append(flux, line.rstrip().split()) wav, flux = np.array(wav), np.array(flux) return wav, flux
/** * Contains factory methods for Mqtt sources. * Alternatively you can use {@link MqttSourceBuilder} * * @since 4.3 */ public final class MqttSources { private MqttSources() { } /** * Returns a builder object which offers a step-by-step fluent API to build * a custom Mqtt {@link StreamSource source} for the Pipeline API. * <p> * The source is not distributed, it creates a * <a href="https://www.eclipse.org/paho/clients/java/">Paho</a> client on * one of the members with memory persistence {@link ConcurrentMemoryPersistence}. * <p> * The source is not re-playable from a certain offset thus it cannot * participate in snapshotting and not fault tolerant. But if you set * {@link MqttConnectOptions#setCleanSession(boolean)} to {@code false} * The broker will keep the published messages with quality service above * `0` and deliver them to the source after restart. * <p> * The source emits items of type {@code byte[]}, the payload of the * message, if {@link MqttSourceBuilder#mapToItemFn(BiFunctionEx)} is not * set. */ @Nonnull public static MqttSourceBuilder<byte[]> builder() { return new MqttSourceBuilder<>(); } /** * Creates a streaming source which connects to the local broker and * subscribes to the given topic with {@link QualityOfService#AT_LEAST_ONCE}. * <p> * Useful for quick prototyping. See other methods * {@link #mqtt(String, Subscription, BiFunctionEx)} and * {@link #builder()} * <p> * For example: * <pre>{@code * pipeline.readFrom(MqttSources.subscribe("topic")); * }</pre> * * @param topic the topic which the source subscribes, may include * wildcards. */ @Nonnull public static StreamSource<byte[]> mqtt(@Nonnull String topic) { return builder().topic(topic).build(); } /** * Creates a streaming source which connects to the given broker and * subscribes to the given topic with {@link QualityOfService#AT_LEAST_ONCE}. * <p> * For example: * <pre>{@code * pipeline.readFrom(MqttSources.subscribe("tcp://localhost:1883", "topic")); * }</pre> * * @param broker the address of the server to connect to, specified as a URI. * @param topic the topic which the source subscribes, may include wildcards. */ @Nonnull public static StreamSource<byte[]> mqtt(@Nonnull String broker, @Nonnull String topic) { return builder().broker(broker).topic(topic).build(); } /** * Creates a streaming source which connects to the given broker and * subscribes using given {@link Subscription}. The source converts * messages to the desired output object using given {@code mapToItemFn}. * <p> * For example, to subscribe to the `topic` with `EXACTLY_ONCE` quality of * service and convert each message to a string: * <pre>{@code * pipeline.readFrom( * MqttSources.subscribe( * "tcp://localhost:1883", * Subscription.of("topic", EXACTLY_ONCE), * (t, m) -> new String(m.getPayload()) * ) * ) * }</pre> * * @param broker the address of the server to connect to, specified * as a URI. * @param subscription the topic which the source subscribes and its * quality of service value, the topic may include * wildcards. * @param mapToItemFn the function which converts the messages to pipeline * items. * @param <T> type of the pipeline items emitted to downstream. */ public static <T> StreamSource<T> mqtt( @Nonnull String broker, @Nonnull Subscription subscription, @Nonnull BiFunctionEx<String, MqttMessage, T> mapToItemFn ) { return builder() .broker(broker) .topic(subscription.getTopic()) .qualityOfService(subscription.getQualityOfService()) .mapToItemFn(mapToItemFn) .build(); } }
// MimeType returns the attachment mime type. func (p *PeerLink) MimeType() string { if p.Type != "" { return p.Type } return "application/octet-stream" }
/** * Hibernate implementation of the JPA {@link javax.persistence.metamodel.Metamodel} contract. * * @author Steve Ebersole * @author Emmanuel Bernard */ public class MetamodelImpl implements MetamodelImplementor, Serializable { // todo : Integrate EntityManagerLogger into CoreMessageLogger private static final EntityManagerMessageLogger log = HEMLogging.messageLogger( MetamodelImpl.class ); private static final Object ENTITY_NAME_RESOLVER_MAP_VALUE = new Object(); private static final String INVALID_IMPORT = ""; private static final String[] EMPTY_IMPLEMENTORS = new String[0]; private final SessionFactoryImplementor sessionFactory; private final Map<String,String> imports = new ConcurrentHashMap<>(); private final Map<String,EntityPersister> entityPersisterMap = new ConcurrentHashMap<>(); private final Map<Class,String> entityProxyInterfaceMap = new ConcurrentHashMap<>(); private final Map<String,CollectionPersister> collectionPersisterMap = new ConcurrentHashMap<>(); private final Map<String,Set<String>> collectionRolesByEntityParticipant = new ConcurrentHashMap<>(); private final ConcurrentMap<EntityNameResolver,Object> entityNameResolvers = new ConcurrentHashMap<>(); private final Map<Class<?>, EntityTypeImpl<?>> jpaEntityTypeMap = new ConcurrentHashMap<>(); /** * There can be multiple instances of an Embeddable type, each one being relative to its parent entity. */ private final Set<EmbeddableTypeImpl<?>> jpaEmbeddableTypes = new CopyOnWriteArraySet<>(); /** * That's not strictly correct in the JPA standard since for a given Java type we could have * multiple instances of an embeddable type. Some embeddable might override attributes, but we * can only return a single EmbeddableTypeImpl for a given Java object class. * * A better approach would be if the parent class and attribute name would be included as well * when trying to locate the embeddable type. */ private final Map<Class<?>, EmbeddableTypeImpl<?>> jpaEmbeddableTypeMap = new ConcurrentHashMap<>(); private final Map<Class<?>, MappedSuperclassType<?>> jpaMappedSuperclassTypeMap = new ConcurrentHashMap<>(); private final Map<String, EntityTypeImpl<?>> jpaEntityTypesByEntityName = new ConcurrentHashMap<>(); private final transient Map<String,EntityGraph> entityGraphMap = new ConcurrentHashMap<>(); private final TypeConfiguration typeConfiguration; private final Map<String, String[]> implementorsCache = new ConcurrentHashMap<>(); public MetamodelImpl(SessionFactoryImplementor sessionFactory, TypeConfiguration typeConfiguration) { this.sessionFactory = sessionFactory; this.typeConfiguration = typeConfiguration; } /** * Prepare the metamodel using the information from the collection of Hibernate * {@link PersistentClass} models * * @param mappingMetadata The mapping information * @param jpaMetaModelPopulationSetting Should the JPA Metamodel be built as well? */ public void initialize(MetadataImplementor mappingMetadata, JpaMetaModelPopulationSetting jpaMetaModelPopulationSetting) { this.imports.putAll( mappingMetadata.getImports() ); primeSecondLevelCacheRegions( mappingMetadata ); final PersisterCreationContext persisterCreationContext = new PersisterCreationContext() { @Override public SessionFactoryImplementor getSessionFactory() { return sessionFactory; } @Override public MetadataImplementor getMetadata() { return mappingMetadata; } }; final PersisterFactory persisterFactory = sessionFactory.getServiceRegistry().getService( PersisterFactory.class ); for ( final PersistentClass model : mappingMetadata.getEntityBindings() ) { final NavigableRole rootEntityRole = new NavigableRole( model.getRootClass().getEntityName() ); final EntityDataAccess accessStrategy = sessionFactory.getCache().getEntityRegionAccess( rootEntityRole ); final NaturalIdDataAccess naturalIdAccessStrategy = sessionFactory.getCache().getNaturalIdCacheRegionAccessStrategy( rootEntityRole ); final EntityPersister cp = persisterFactory.createEntityPersister( model, accessStrategy, naturalIdAccessStrategy, persisterCreationContext ); entityPersisterMap.put( model.getEntityName(), cp ); if ( cp.getConcreteProxyClass() != null && cp.getConcreteProxyClass().isInterface() && !Map.class.isAssignableFrom( cp.getConcreteProxyClass() ) && cp.getMappedClass() != cp.getConcreteProxyClass() ) { // IMPL NOTE : we exclude Map based proxy interfaces here because that should // indicate MAP entity mode.0 if ( cp.getMappedClass().equals( cp.getConcreteProxyClass() ) ) { // this part handles an odd case in the Hibernate test suite where we map an interface // as the class and the proxy. I cannot think of a real life use case for that // specific test, but.. log.debugf( "Entity [%s] mapped same interface [%s] as class and proxy", cp.getEntityName(), cp.getMappedClass() ); } else { final String old = entityProxyInterfaceMap.put( cp.getConcreteProxyClass(), cp.getEntityName() ); if ( old != null ) { throw new HibernateException( String.format( Locale.ENGLISH, "Multiple entities [%s, %s] named the same interface [%s] as their proxy which is not supported", old, cp.getEntityName(), cp.getConcreteProxyClass().getName() ) ); } } } } for ( final Collection model : mappingMetadata.getCollectionBindings() ) { final NavigableRole navigableRole = new NavigableRole( model.getRole() ); final CollectionDataAccess accessStrategy = sessionFactory.getCache().getCollectionRegionAccess( navigableRole ); final CollectionPersister persister = persisterFactory.createCollectionPersister( model, accessStrategy, persisterCreationContext ); collectionPersisterMap.put( model.getRole(), persister ); Type indexType = persister.getIndexType(); if ( indexType != null && indexType.isAssociationType() && !indexType.isAnyType() ) { String entityName = ( (AssociationType) indexType ).getAssociatedEntityName( sessionFactory ); Set<String> roles = collectionRolesByEntityParticipant.get( entityName ); if ( roles == null ) { roles = new HashSet<>(); collectionRolesByEntityParticipant.put( entityName, roles ); } roles.add( persister.getRole() ); } Type elementType = persister.getElementType(); if ( elementType.isAssociationType() && !elementType.isAnyType() ) { String entityName = ( ( AssociationType ) elementType ).getAssociatedEntityName( sessionFactory ); Set<String> roles = collectionRolesByEntityParticipant.get( entityName ); if ( roles == null ) { roles = new HashSet<>(); collectionRolesByEntityParticipant.put( entityName, roles ); } roles.add( persister.getRole() ); } } // after *all* persisters and named queries are registered entityPersisterMap.values().forEach( EntityPersister::generateEntityDefinition ); for ( EntityPersister persister : entityPersisterMap.values() ) { persister.postInstantiate(); registerEntityNameResolvers( persister, entityNameResolvers ); } collectionPersisterMap.values().forEach( CollectionPersister::postInstantiate ); if ( jpaMetaModelPopulationSetting != JpaMetaModelPopulationSetting.DISABLED ) { MetadataContext context = new MetadataContext( sessionFactory, mappingMetadata.getMappedSuperclassMappingsCopy(), jpaMetaModelPopulationSetting ); for ( PersistentClass entityBinding : mappingMetadata.getEntityBindings() ) { locateOrBuildEntityType( entityBinding, context ); } handleUnusedMappedSuperclasses( context ); context.wrapUp(); this.jpaEntityTypeMap.putAll( context.getEntityTypeMap() ); this.jpaEmbeddableTypes.addAll( context.getEmbeddableTypeMap() ); for ( EmbeddableTypeImpl<?> embeddable: jpaEmbeddableTypes ) { this.jpaEmbeddableTypeMap.put( embeddable.getJavaType(), embeddable ); } this.jpaMappedSuperclassTypeMap.putAll( context.getMappedSuperclassTypeMap() ); this.jpaEntityTypesByEntityName.putAll( context.getEntityTypesByEntityName() ); applyNamedEntityGraphs( mappingMetadata.getNamedEntityGraphs().values() ); } } private void primeSecondLevelCacheRegions(MetadataImplementor mappingMetadata) { final Map<String, DomainDataRegionConfigImpl.Builder> regionConfigBuilders = new ConcurrentHashMap<>(); // todo : ultimately this code can be made more efficient when we have a better intrinsic understanding of the hierarchy as a whole for ( PersistentClass bootEntityDescriptor : mappingMetadata.getEntityBindings() ) { final AccessType accessType = AccessType.fromExternalName( bootEntityDescriptor.getCacheConcurrencyStrategy() ); if ( accessType != null ) { if ( bootEntityDescriptor.isCached() ) { regionConfigBuilders.computeIfAbsent( bootEntityDescriptor.getRootClass().getCacheRegionName(), DomainDataRegionConfigImpl.Builder::new ) .addEntityConfig( bootEntityDescriptor, accessType ); } if ( RootClass.class.isInstance( bootEntityDescriptor ) && bootEntityDescriptor.hasNaturalId() && bootEntityDescriptor.getNaturalIdCacheRegionName() != null ) { regionConfigBuilders.computeIfAbsent( bootEntityDescriptor.getNaturalIdCacheRegionName(), DomainDataRegionConfigImpl.Builder::new ) .addNaturalIdConfig( (RootClass) bootEntityDescriptor, accessType ); } } } for ( Collection collection : mappingMetadata.getCollectionBindings() ) { final AccessType accessType = AccessType.fromExternalName( collection.getCacheConcurrencyStrategy() ); if ( accessType != null ) { regionConfigBuilders.computeIfAbsent( collection.getCacheRegionName(), DomainDataRegionConfigImpl.Builder::new ) .addCollectionConfig( collection, accessType ); } } final Set<DomainDataRegionConfig> regionConfigs; if ( regionConfigBuilders.isEmpty() ) { regionConfigs = Collections.emptySet(); } else { regionConfigs = new HashSet<>(); for ( DomainDataRegionConfigImpl.Builder builder : regionConfigBuilders.values() ) { regionConfigs.add( builder.build() ); } } getSessionFactory().getCache().prime( regionConfigs ); } @SuppressWarnings("unchecked") private void applyNamedEntityGraphs(java.util.Collection<NamedEntityGraphDefinition> namedEntityGraphs) { for ( NamedEntityGraphDefinition definition : namedEntityGraphs ) { log.debugf( "Applying named entity graph [name=%s, entity-name=%s, jpa-entity-name=%s", definition.getRegisteredName(), definition.getEntityName(), definition.getJpaEntityName() ); final EntityType entityType = entity( definition.getEntityName() ); if ( entityType == null ) { throw new IllegalArgumentException( "Attempted to register named entity graph [" + definition.getRegisteredName() + "] for unknown entity ["+ definition.getEntityName() + "]" ); } final EntityGraphImpl entityGraph = new EntityGraphImpl( definition.getRegisteredName(), entityType, this.getSessionFactory() ); final NamedEntityGraph namedEntityGraph = definition.getAnnotation(); if ( namedEntityGraph.includeAllAttributes() ) { for ( Object attributeObject : entityType.getAttributes() ) { entityGraph.addAttributeNodes( (Attribute) attributeObject ); } } if ( namedEntityGraph.attributeNodes() != null ) { applyNamedAttributeNodes( namedEntityGraph.attributeNodes(), namedEntityGraph, entityGraph ); } entityGraphMap.put( definition.getRegisteredName(), entityGraph ); } } private void applyNamedAttributeNodes( NamedAttributeNode[] namedAttributeNodes, NamedEntityGraph namedEntityGraph, AbstractGraphNode graphNode) { for ( NamedAttributeNode namedAttributeNode : namedAttributeNodes ) { final String value = namedAttributeNode.value(); AttributeNodeImpl attributeNode = graphNode.addAttribute( value ); if ( StringHelper.isNotEmpty( namedAttributeNode.subgraph() ) ) { final SubgraphImpl subgraph = attributeNode.makeSubgraph(); applyNamedSubgraphs( namedEntityGraph, namedAttributeNode.subgraph(), subgraph ); } if ( StringHelper.isNotEmpty( namedAttributeNode.keySubgraph() ) ) { final SubgraphImpl subgraph = attributeNode.makeKeySubgraph(); applyNamedSubgraphs( namedEntityGraph, namedAttributeNode.keySubgraph(), subgraph ); } } } private void applyNamedSubgraphs(NamedEntityGraph namedEntityGraph, String subgraphName, SubgraphImpl subgraph) { for ( NamedSubgraph namedSubgraph : namedEntityGraph.subgraphs() ) { if ( subgraphName.equals( namedSubgraph.name() ) ) { applyNamedAttributeNodes( namedSubgraph.attributeNodes(), namedEntityGraph, subgraph ); } } } @Override public java.util.Collection<EntityNameResolver> getEntityNameResolvers() { return entityNameResolvers.keySet(); } private static void registerEntityNameResolvers(EntityPersister persister, Map<EntityNameResolver,Object> entityNameResolvers) { if ( persister.getEntityMetamodel() == null || persister.getEntityMetamodel().getTuplizer() == null ) { return; } registerEntityNameResolvers( persister.getEntityMetamodel().getTuplizer(), entityNameResolvers ); } private static void registerEntityNameResolvers(EntityTuplizer tuplizer, Map<EntityNameResolver,Object> entityNameResolvers) { EntityNameResolver[] resolvers = tuplizer.getEntityNameResolvers(); if ( resolvers == null ) { return; } for ( EntityNameResolver resolver : resolvers ) { entityNameResolvers.put( resolver, ENTITY_NAME_RESOLVER_MAP_VALUE ); } } private static void handleUnusedMappedSuperclasses(MetadataContext context) { final Set<MappedSuperclass> unusedMappedSuperclasses = context.getUnusedMappedSuperclasses(); if ( !unusedMappedSuperclasses.isEmpty() ) { for ( MappedSuperclass mappedSuperclass : unusedMappedSuperclasses ) { log.unusedMappedSuperclass( mappedSuperclass.getMappedClass().getName() ); locateOrBuildMappedsuperclassType( mappedSuperclass, context ); } } } private static EntityTypeImpl<?> locateOrBuildEntityType(PersistentClass persistentClass, MetadataContext context) { EntityTypeImpl<?> entityType = context.locateEntityType( persistentClass ); if ( entityType == null ) { entityType = buildEntityType( persistentClass, context ); } return entityType; } //TODO remove / reduce @SW scope @SuppressWarnings("unchecked") private static EntityTypeImpl<?> buildEntityType(PersistentClass persistentClass, MetadataContext context) { final Class javaType = persistentClass.getMappedClass(); context.pushEntityWorkedOn( persistentClass ); final MappedSuperclass superMappedSuperclass = persistentClass.getSuperMappedSuperclass(); AbstractIdentifiableType<?> superType = superMappedSuperclass == null ? null : locateOrBuildMappedsuperclassType( superMappedSuperclass, context ); //no mappedSuperclass, check for a super entity if ( superType == null ) { final PersistentClass superPersistentClass = persistentClass.getSuperclass(); superType = superPersistentClass == null ? null : locateOrBuildEntityType( superPersistentClass, context ); } EntityTypeImpl entityType = new EntityTypeImpl( javaType, superType, persistentClass ); context.registerEntityType( persistentClass, entityType ); context.popEntityWorkedOn( persistentClass ); return entityType; } private static MappedSuperclassTypeImpl<?> locateOrBuildMappedsuperclassType( MappedSuperclass mappedSuperclass, MetadataContext context) { MappedSuperclassTypeImpl<?> mappedSuperclassType = context.locateMappedSuperclassType( mappedSuperclass ); if ( mappedSuperclassType == null ) { mappedSuperclassType = buildMappedSuperclassType( mappedSuperclass, context ); } return mappedSuperclassType; } //TODO remove / reduce @SW scope @SuppressWarnings("unchecked") private static MappedSuperclassTypeImpl<?> buildMappedSuperclassType( MappedSuperclass mappedSuperclass, MetadataContext context) { final MappedSuperclass superMappedSuperclass = mappedSuperclass.getSuperMappedSuperclass(); AbstractIdentifiableType<?> superType = superMappedSuperclass == null ? null : locateOrBuildMappedsuperclassType( superMappedSuperclass, context ); //no mappedSuperclass, check for a super entity if ( superType == null ) { final PersistentClass superPersistentClass = mappedSuperclass.getSuperPersistentClass(); superType = superPersistentClass == null ? null : locateOrBuildEntityType( superPersistentClass, context ); } final Class javaType = mappedSuperclass.getMappedClass(); MappedSuperclassTypeImpl mappedSuperclassType = new MappedSuperclassTypeImpl( javaType, mappedSuperclass, superType ); context.registerMappedSuperclassType( mappedSuperclass, mappedSuperclassType ); return mappedSuperclassType; } // /** // * Instantiate the metamodel. // * // * @param entityNameResolvers // * @param entities The entity mappings. // * @param embeddables The embeddable (component) mappings. // * @param mappedSuperclassTypeMap The {@link javax.persistence.MappedSuperclass} mappings // */ // private MetamodelImpl( // SessionFactoryImplementor sessionFactory, // Map<String, String> imports, // Map<String, EntityPersister> entityPersisterMap, // Map<Class, String> entityProxyInterfaceMap, // ConcurrentHashMap<EntityNameResolver, Object> entityNameResolvers, // Map<String, CollectionPersister> collectionPersisterMap, // Map<String, Set<String>> collectionRolesByEntityParticipant, // Map<Class<?>, EntityTypeImpl<?>> entities, // Map<Class<?>, EmbeddableTypeImpl<?>> embeddables, // Map<Class<?>, MappedSuperclassType<?>> mappedSuperclassTypeMap, // Map<String, EntityTypeImpl<?>> entityTypesByEntityName) { // this.sessionFactory = sessionFactory; // this.imports = imports; // this.entityPersisterMap = entityPersisterMap; // this.entityProxyInterfaceMap = entityProxyInterfaceMap; // this.entityNameResolvers = entityNameResolvers; // this.collectionPersisterMap = collectionPersisterMap; // this.collectionRolesByEntityParticipant = collectionRolesByEntityParticipant; // this.entities = entities; // this.embeddables = embeddables; // this.mappedSuperclassTypeMap = mappedSuperclassTypeMap; // this.entityTypesByEntityName = entityTypesByEntityName; // } @Override public TypeConfiguration getTypeConfiguration() { return typeConfiguration; } @Override public SessionFactoryImplementor getSessionFactory() { return sessionFactory; } @Override @SuppressWarnings({"unchecked"}) public <X> EntityType<X> entity(Class<X> cls) { final EntityType<?> entityType = jpaEntityTypeMap.get( cls ); if ( entityType == null ) { throw new IllegalArgumentException( "Not an entity: " + cls ); } return (EntityType<X>) entityType; } @Override @SuppressWarnings({"unchecked"}) public <X> ManagedType<X> managedType(Class<X> cls) { ManagedType<?> type = jpaEntityTypeMap.get( cls ); if ( type == null ) { type = jpaMappedSuperclassTypeMap.get( cls ); } if ( type == null ) { type = jpaEmbeddableTypeMap.get( cls ); } if ( type == null ) { throw new IllegalArgumentException( "Not a managed type: " + cls ); } return (ManagedType<X>) type; } @Override @SuppressWarnings({"unchecked"}) public <X> EmbeddableType<X> embeddable(Class<X> cls) { final EmbeddableType<?> embeddableType = jpaEmbeddableTypeMap.get( cls ); if ( embeddableType == null ) { throw new IllegalArgumentException( "Not an embeddable: " + cls ); } return (EmbeddableType<X>) embeddableType; } @Override public Set<ManagedType<?>> getManagedTypes() { final int setSize = CollectionHelper.determineProperSizing( jpaEntityTypeMap.size() + jpaMappedSuperclassTypeMap.size() + jpaEmbeddableTypes.size() ); final Set<ManagedType<?>> managedTypes = new HashSet<ManagedType<?>>( setSize ); managedTypes.addAll( jpaEntityTypesByEntityName.values() ); managedTypes.addAll( jpaMappedSuperclassTypeMap.values() ); managedTypes.addAll( jpaEmbeddableTypes ); return managedTypes; } @Override public Set<EntityType<?>> getEntities() { return new HashSet<>( jpaEntityTypesByEntityName.values() ); } @Override public Set<EmbeddableType<?>> getEmbeddables() { return new HashSet<>( jpaEmbeddableTypes ); } @Override @SuppressWarnings("unchecked") public <X> EntityType<X> entity(String entityName) { return (EntityType<X>) jpaEntityTypesByEntityName.get( entityName ); } @Override public String getImportedClassName(String className) { String result = imports.get( className ); if ( result == null ) { try { sessionFactory.getServiceRegistry().getService( ClassLoaderService.class ).classForName( className ); imports.put( className, className ); return className; } catch ( ClassLoadingException cnfe ) { imports.put( className, INVALID_IMPORT ); return null; } } else if ( result == INVALID_IMPORT ) { return null; } else { return result; } } @Override public String[] getImplementors(String className) throws MappingException { // computeIfAbsent() can be a contention point and we expect all the values to be in the map at some point so // let's do an optimistic check first String[] implementors = implementorsCache.get( className ); if ( implementors != null ) { return Arrays.copyOf( implementors, implementors.length ); } try { final Class<?> clazz = getSessionFactory().getServiceRegistry().getService( ClassLoaderService.class ).classForName( className ); implementors = doGetImplementors( clazz ); if ( implementors.length > 0 ) { implementorsCache.putIfAbsent( className, implementors ); return Arrays.copyOf( implementors, implementors.length ); } else { return EMPTY_IMPLEMENTORS; } } catch (ClassLoadingException e) { return new String[]{ className }; // we don't cache anything for dynamic classes } } @Override public Map<String, EntityPersister> entityPersisters() { return entityPersisterMap; } @Override public CollectionPersister collectionPersister(String role) { final CollectionPersister persister = collectionPersisterMap.get( role ); if ( persister == null ) { throw new MappingException( "Could not locate CollectionPersister for role : " + role ); } return persister; } @Override public Map<String, CollectionPersister> collectionPersisters() { return collectionPersisterMap; } @Override public EntityPersister entityPersister(Class entityClass) { return entityPersister( entityClass.getName() ); } @Override public EntityPersister entityPersister(String entityName) throws MappingException { EntityPersister result = entityPersisterMap.get( entityName ); if ( result == null ) { throw new MappingException( "Unknown entity: " + entityName ); } return result; } @Override public EntityPersister locateEntityPersister(Class byClass) { EntityPersister entityPersister = entityPersisterMap.get( byClass.getName() ); if ( entityPersister == null ) { String mappedEntityName = entityProxyInterfaceMap.get( byClass ); if ( mappedEntityName != null ) { entityPersister = entityPersisterMap.get( mappedEntityName ); } } if ( entityPersister == null ) { throw new UnknownEntityTypeException( "Unable to locate persister: " + byClass.getName() ); } return entityPersister; } @Override public EntityPersister locateEntityPersister(String byName) { final EntityPersister entityPersister = entityPersisterMap.get( byName ); if ( entityPersister == null ) { throw new UnknownEntityTypeException( "Unable to locate persister: " + byName ); } return entityPersister; } @Override public Set<String> getCollectionRolesByEntityParticipant(String entityName) { return collectionRolesByEntityParticipant.get( entityName ); } @Override public String[] getAllEntityNames() { return ArrayHelper.toStringArray( entityPersisterMap.keySet() ); } @Override public String[] getAllCollectionRoles() { return ArrayHelper.toStringArray( collectionPersisterMap.keySet() ); } @Override public <T> void addNamedEntityGraph(String graphName, EntityGraph<T> entityGraph) { if ( entityGraph instanceof EntityGraphImplementor ) { entityGraph = ( (EntityGraphImplementor<T>) entityGraph ).makeImmutableCopy( graphName ); } final EntityGraph old = entityGraphMap.put( graphName, entityGraph ); if ( old != null ) { log.debugf( "EntityGraph being replaced on EntityManagerFactory for name %s", graphName ); } } @Override @SuppressWarnings("unchecked") public <T> EntityGraph<T> findEntityGraphByName(String name) { return entityGraphMap.get( name ); } @Override @SuppressWarnings("unchecked") public <T> List<EntityGraph<? super T>> findEntityGraphsByType(Class<T> entityClass) { final EntityType<T> entityType = entity( entityClass ); if ( entityType == null ) { throw new IllegalArgumentException( "Given class is not an entity : " + entityClass.getName() ); } final List<EntityGraph<? super T>> results = new ArrayList<>(); for ( EntityGraph entityGraph : entityGraphMap.values() ) { if ( !EntityGraphImplementor.class.isInstance( entityGraph ) ) { continue; } final EntityGraphImplementor egi = (EntityGraphImplementor) entityGraph; if ( egi.appliesTo( entityType ) ) { results.add( egi ); } } return results; } @Override public void close() { // anything to do ? } private String[] doGetImplementors(Class<?> clazz) throws MappingException { ArrayList<String> results = new ArrayList<>(); for ( EntityPersister checkPersister : entityPersisters().values() ) { if ( !Queryable.class.isInstance( checkPersister ) ) { continue; } final Queryable checkQueryable = Queryable.class.cast( checkPersister ); final String checkQueryableEntityName = checkQueryable.getEntityName(); final boolean isMappedClass = clazz.getName().equals( checkQueryableEntityName ); if ( checkQueryable.isExplicitPolymorphism() ) { if ( isMappedClass ) { return new String[]{ clazz.getName() }; // NOTE EARLY EXIT } } else { if ( isMappedClass ) { results.add( checkQueryableEntityName ); } else { final Class<?> mappedClass = checkQueryable.getMappedClass(); if ( mappedClass != null && clazz.isAssignableFrom( mappedClass ) ) { final boolean assignableSuperclass; if ( checkQueryable.isInherited() ) { Class<?> mappedSuperclass = entityPersister( checkQueryable.getMappedSuperclass() ).getMappedClass(); assignableSuperclass = clazz.isAssignableFrom( mappedSuperclass ); } else { assignableSuperclass = false; } if ( !assignableSuperclass ) { results.add( checkQueryableEntityName ); } } } } } return results.toArray( new String[results.size()] ); } }
<gh_stars>0 import React, { FC, useCallback, useEffect, useMemo, useState } from 'react' import { useDispatch, useSelector } from 'react-redux' import { omit, pick } from 'ramda' import { NewReceiptItem, Receipt, ReceiptUpdateFields } from '../../../receipt.types' import { addReceiptItem, DeleteReceiptItem, deleteReceiptItem, updateReceipt, UpdateReceiptItem, updateReceiptItem, } from '../../../expenses.actions' import { ReceiptControls } from './receipt-controls' import { Expense } from './expense' import { createReceiptItemsSelector } from '../../../expenses.selectors' import { ExpenseFields, FocusableExpenseFields, ReceiptFields } from '../expense.types' import { createItem } from '../expense.helpers' type SavedExpenseProps = { receipt: Receipt } export const SavedExpense: FC<SavedExpenseProps> = ({ receipt }) => { const dispatch = useDispatch() const itemsSelector = useMemo(() => createReceiptItemsSelector(receipt.id), [receipt.id]) const [fields, setFields] = useState<Record<FocusableExpenseFields, HTMLInputElement | null>>({ day: null, category: null, }) const addField = useCallback((field: FocusableExpenseFields, input: HTMLInputElement | null) => { setFields(fields => ({ ...fields, [field]: input })) }, []) const onSave = useCallback((values: ReceiptUpdateFields) => dispatch(updateReceipt({ ...omit(['items'], receipt), ...pick(['day', 'shop'], values) })), [dispatch, receipt], ) const addItem = useCallback( (item: NewReceiptItem) => dispatch(addReceiptItem({ id: receipt.id, value: createItem(receipt.id, item), })), [dispatch, receipt.id], ) const updateItem = useCallback((item: UpdateReceiptItem) => dispatch(updateReceiptItem(item)), [dispatch]) const deleteItem = useCallback((item: DeleteReceiptItem) => dispatch(deleteReceiptItem(item)), [dispatch]) const items = useSelector(itemsSelector) const onBlur = useCallback((field: ReceiptFields, newValue: any) => { dispatch(updateReceipt({ ...receipt, [field]: newValue, })) }, [dispatch, receipt]) const onKeyDown = useCallback((field: ExpenseFields, event: React.KeyboardEvent) => { if (event.key === 'Enter') { switch (field) { case 'categoryId': case 'value': case 'description': setTimeout(() => fields.category !== null && fields.category.focus(), 0) break } } }, [fields]) const [expanded, setExpanded] = useState(receipt.expanded || false) const renderControls = useCallback((day?: number, shop?: string) => ( <ReceiptControls item={{ id: receipt.id, day: day || new Date().getDate(), shop: shop || '' }} expanded={expanded} processing={receipt.processing || false} setExpanded={setExpanded} /> ), [receipt.id, receipt.processing, expanded]) useEffect(() => { setExpanded(receipt.expanded || false) }, [receipt.expanded]) return ( <Expense addField={addField} addItem={addItem} deleteItem={deleteItem} expanded={expanded} items={items} receipt={receipt} onBlur={onBlur} onKeyDown={onKeyDown} onSave={onSave} updateItem={updateItem} > {renderControls} </Expense> ) }
/* Prune eliminates unnecessary repositories, and groups from db. */ func (db *Database) Prune() (int, int) { var repos, groups = db.PruneTargets() for _, repo := range repos { db.DeleteRepository(repo.ID) } for _, group := range groups { db.DeleteGroup(group.Name) } return len(repos), len(groups) }
// GetFunc gets the function defined by the given fully-qualified name. The // outFuncPtr parameter should be a pointer to a function with the appropriate // type (e.g. the address of a local variable), and is set to a new function // value that calls the specified function. If the specified function does not // exist, outFuncPtr is not set and an error is returned. func GetFunc(outFuncPtr interface{}, name string) (err error) { switch GoVersion { case "go1.14", "go1.15": err = go114.GetFunc(outFuncPtr, name) case "go1.16": err = go116.GetFunc(outFuncPtr, name) default: panic("Not suitable for " + GoVersion) } return }
def volume_panel(name, quiet=1, _self=cmd, _noqt=0): from pymol import gui qt_window = not int(_noqt) and gui.get_qtwindow() app = gui.get_pmgapp() if qt_window: from pmg_qt import volume as volume_qt @app.execute def _(): try: panel = _volume_windows_qt[name] except LookupError: panel = volume_qt.VolumePanel(qt_window, name, _self=_self) _volume_windows_qt[name] = panel panel.show() panel.raise_() return from pmg_tk import volume import tkinter as Tkinter def func(): try: window = _volume_windows[name] window.lift() except (LookupError, Tkinter.TclError): window = Tkinter.Toplevel(app.root) window.title('Volume Panel for "%s"' % name) window.panel = volume.VolumePanel(window, name, _self=_self) window.panel.pack() _volume_windows[name] = window app.execute(func)
// // main.cpp // irinasolves // // Created by Irina Korneeva on 13/07/16. // Copyright © 2016 Irina. All rights reserved. // #include <iostream> #include <vector> #include <set> using namespace std; int used[1000001]; set<int> d[1000001]; set<int> val; set<int>pos; int a[1000001]; void dfs(int v) { used[v] = 1; val.insert(a[v]); pos.insert(v); for(auto i = d[v].begin(); i != d[v].end(); i++){ if(!used[*i]) { dfs(*i); } } } int main() { int n; int m; cin >> n >> m; for(int i =0; i < n; i++) { cin >> a[i]; } int x, y; for(int i =0; i < m; i++){ cin >> x >> y; d[x - 1].insert(y - 1); d[y - 1].insert(x - 1); } for(int i = 0; i < n; i++) { if(!used[i]) { dfs(i); auto mean = val.rbegin(); for(auto t = pos.begin(); t != pos.end(); t++){ a[*t] = *mean; mean++; } val.clear(); pos.clear(); } } for(int i = 0; i < n; i++) { printf("%d ", a[i]); } }
Dousing the flames of fear Remembering a dangerous and thus frightful situation can be a lifesaver, but unchecked fearful memories can seriously affect the quality of life. Both the formation of fear memories and their extinction depend on the amygdala, a set of nuclei located deep within the temporal lobe. The way in which we and other mammals acquire and extinguish fear memories has been extensively studied over the last few years (Ehrlich et al. 2009). Importantly, instead of deleting the old trace, fear extinction forms a new memory that keeps the old, fearful association in check. What are the circuits that provide this control? Good evidence points to the involvement of inhibitory GABAergic neurons (Harris & Westbrook, 1998; Chhatwal et al. 2005; Heldt & Ressler, 2007). In the current issue of The Journal of Physiology, Manko et al. (2011) significantly add to our understanding of this process. They investigated intercalated neurons – a special class of projection GABAergic neuron that is found in several compact clusters around the basolateral nucleus of the amygdala and whose selective destruction causes a deficit in fear extinction (Likhtik et al. 2008; Amano et al. 2010). Manko and colleagues have studied the ‘main intercalated nucleus’ (Im), the largest, most ventrally located cluster of intercalated neurons – and curiously the least studied so far. On the one hand, they provide evidence that these intercalated neurons of the Im are less homogeneous than previously thought. They form three distinct groups of cells according to their intrinsic physiological properties. Studying their dendritic and axonal aspects and, physiologically, their synaptic inputs, Manko and colleagues mapped their local connectivity. Their results show that Im neurons receive synaptic inputs within the Im from diverse sources, including the basolateral nucleus. An important observation is that the influence of the intercalated network is more widespread than previously thought. Cells with the soma in the Im send functional projections to the central nucleus and basolateral amygdale as well as to extra-amygdaloid areas. On the other hand, they confirm findings that have been obtained studying other intercalated clusters (Marowsky et al. 2005) – in particular a prominent modulation of their activity by dopamine and the NMDA receptor-mediated component of their synaptic responses. Taken together, these results suggest that Im cells are subjected to activity-dependent changes with major behavioural consequences. A lot of work remains to be done to describe the circuitry that controls fear memory in detail. We know that intercalated neurons are crucial for fear extinction, but we do not know whether this function is mediated by all of them or only by specific subtypes. Characterising their baseline activity in vivo, and its changes upon emotional behaviour and the underlying mechanisms will be key to our understanding of how they fulfill their role. Finally and perhaps more challenging will be a complete mapping of their connectivity. The amygdala and the intercalated neurons receive many and diverse long-range inputs and project to the basal forebrain. These cannot be fully preserved in slice preparations and thus other methods, such as optogenetics or in vivo single-cell labelling may have to provide the necessary specificity. Overall, in spite of recent breakthroughs (Ciocchi et al. 2010; Haubensak et al. 2010), and of the potential behavioural and clinical importance of inhibition in the amygdala, our knowledge of its GABAergic circuitry remains surprisingly limited, and more work is necessary. We do not want to forget that something is frightening, rather we learn that it may – depending on the circumstances – actually be harmless. Change the context and the fear may be back. This intriguing and fragile regulation may save our lives, but it may also be crippling us with unnecessary fear. Miroslawa Manko et al. have just provided us with an important piece in the puzzle that describes this mechanism.
<reponame>Aayush-Jain01/lightning-bolts<filename>pl_bolts/models/gans/srgan/srgan_module.py """Adapted from: https://github.com/https-deeplearning-ai/GANs-Public.""" from argparse import ArgumentParser from pathlib import Path from typing import Any, List, Optional, Tuple from warnings import warn import pytorch_lightning as pl import torch import torch.nn.functional as F from pl_bolts.callbacks import SRImageLoggerCallback from pl_bolts.datamodules import TVTDataModule from pl_bolts.datasets.utils import prepare_sr_datasets from pl_bolts.models.gans.srgan.components import SRGANDiscriminator, SRGANGenerator, VGG19FeatureExtractor class SRGAN(pl.LightningModule): """SRGAN implementation from the paper `Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network <https://arxiv.org/abs/1609.04802>`__. It uses a pretrained SRResNet model as the generator if available. Code adapted from `https-deeplearning-ai/GANs-Public <https://github.com/https-deeplearning-ai/GANs-Public>`_ to Lightning by: - `<NAME> <https://github.com/chris-clem>`_ You can pretrain a SRResNet model with :code:`srresnet_module.py`. Example:: from pl_bolts.models.gan import SRGAN m = SRGAN() Trainer(gpus=1).fit(m) Example CLI:: # CelebA dataset, scale_factor 4 python srgan_module.py --dataset=celeba --scale_factor=4 --gpus=1 # MNIST dataset, scale_factor 4 python srgan_module.py --dataset=mnist --scale_factor=4 --gpus=1 # STL10 dataset, scale_factor 4 python srgan_module.py --dataset=stl10 --scale_factor=4 --gpus=1 """ def __init__( self, image_channels: int = 3, feature_maps_gen: int = 64, feature_maps_disc: int = 64, num_res_blocks: int = 16, scale_factor: int = 4, generator_checkpoint: Optional[str] = None, learning_rate: float = 1e-4, scheduler_step: int = 100, **kwargs: Any, ) -> None: """ Args: image_channels: Number of channels of the images from the dataset feature_maps_gen: Number of feature maps to use for the generator feature_maps_disc: Number of feature maps to use for the discriminator num_res_blocks: Number of res blocks to use in the generator scale_factor: Scale factor for the images (either 2 or 4) generator_checkpoint: Generator checkpoint created with SRResNet module learning_rate: Learning rate scheduler_step: Number of epochs after which the learning rate gets decayed """ super().__init__() self.save_hyperparameters() if generator_checkpoint: self.generator = torch.load(generator_checkpoint) else: assert scale_factor in [2, 4] num_ps_blocks = scale_factor // 2 self.generator = SRGANGenerator(image_channels, feature_maps_gen, num_res_blocks, num_ps_blocks) self.discriminator = SRGANDiscriminator(image_channels, feature_maps_disc) self.vgg_feature_extractor = VGG19FeatureExtractor(image_channels) def configure_optimizers(self) -> Tuple[List[torch.optim.Adam], List[torch.optim.lr_scheduler.MultiStepLR]]: opt_disc = torch.optim.Adam(self.discriminator.parameters(), lr=self.hparams.learning_rate) opt_gen = torch.optim.Adam(self.generator.parameters(), lr=self.hparams.learning_rate) sched_disc = torch.optim.lr_scheduler.MultiStepLR(opt_disc, milestones=[self.hparams.scheduler_step], gamma=0.1) sched_gen = torch.optim.lr_scheduler.MultiStepLR(opt_gen, milestones=[self.hparams.scheduler_step], gamma=0.1) return [opt_disc, opt_gen], [sched_disc, sched_gen] def forward(self, lr_image: torch.Tensor) -> torch.Tensor: """Generates a high resolution image given a low resolution image. Example:: srgan = SRGAN.load_from_checkpoint(PATH) hr_image = srgan(lr_image) """ return self.generator(lr_image) def training_step( self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int, optimizer_idx: int, ) -> torch.Tensor: hr_image, lr_image = batch # Train discriminator result = None if optimizer_idx == 0: result = self._disc_step(hr_image, lr_image) # Train generator if optimizer_idx == 1: result = self._gen_step(hr_image, lr_image) return result def _disc_step(self, hr_image: torch.Tensor, lr_image: torch.Tensor) -> torch.Tensor: disc_loss = self._disc_loss(hr_image, lr_image) self.log("loss/disc", disc_loss, on_step=True, on_epoch=True) return disc_loss def _gen_step(self, hr_image: torch.Tensor, lr_image: torch.Tensor) -> torch.Tensor: gen_loss = self._gen_loss(hr_image, lr_image) self.log("loss/gen", gen_loss, on_step=True, on_epoch=True) return gen_loss def _disc_loss(self, hr_image: torch.Tensor, lr_image: torch.Tensor) -> torch.Tensor: real_pred = self.discriminator(hr_image) real_loss = self._adv_loss(real_pred, ones=True) _, fake_pred = self._fake_pred(lr_image) fake_loss = self._adv_loss(fake_pred, ones=False) disc_loss = 0.5 * (real_loss + fake_loss) return disc_loss def _gen_loss(self, hr_image: torch.Tensor, lr_image: torch.Tensor) -> torch.Tensor: fake, fake_pred = self._fake_pred(lr_image) perceptual_loss = self._perceptual_loss(hr_image, fake) adv_loss = self._adv_loss(fake_pred, ones=True) content_loss = self._content_loss(hr_image, fake) gen_loss = 0.006 * perceptual_loss + 0.001 * adv_loss + content_loss return gen_loss def _fake_pred(self, lr_image: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: fake = self(lr_image) fake_pred = self.discriminator(fake) return fake, fake_pred @staticmethod def _adv_loss(pred: torch.Tensor, ones: bool) -> torch.Tensor: target = torch.ones_like(pred) if ones else torch.zeros_like(pred) adv_loss = F.binary_cross_entropy_with_logits(pred, target) return adv_loss def _perceptual_loss(self, hr_image: torch.Tensor, fake: torch.Tensor) -> torch.Tensor: real_features = self.vgg_feature_extractor(hr_image) fake_features = self.vgg_feature_extractor(fake) perceptual_loss = self._content_loss(real_features, fake_features) return perceptual_loss @staticmethod def _content_loss(hr_image: torch.Tensor, fake: torch.Tensor) -> torch.Tensor: return F.mse_loss(hr_image, fake) @staticmethod def add_model_specific_args(parent_parser: ArgumentParser) -> ArgumentParser: parser = ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument("--feature_maps_gen", default=64, type=int) parser.add_argument("--feature_maps_disc", default=64, type=int) parser.add_argument("--learning_rate", default=1e-4, type=float) parser.add_argument("--scheduler_step", default=100, type=float) return parser def cli_main(args=None): pl.seed_everything(1234) parser = ArgumentParser() parser.add_argument("--dataset", default="mnist", type=str, choices=["celeba", "mnist", "stl10"]) parser.add_argument("--data_dir", default="./", type=str) parser.add_argument("--log_interval", default=1000, type=int) parser.add_argument("--scale_factor", default=4, type=int) parser.add_argument("--save_model_checkpoint", dest="save_model_checkpoint", action="store_true") parser = TVTDataModule.add_argparse_args(parser) parser = SRGAN.add_model_specific_args(parser) parser = pl.Trainer.add_argparse_args(parser) args = parser.parse_args(args) datasets = prepare_sr_datasets(args.dataset, args.scale_factor, args.data_dir) dm = TVTDataModule(*datasets, **vars(args)) generator_checkpoint = Path(f"model_checkpoints/srresnet-{args.dataset}-scale_factor={args.scale_factor}.pt") if not generator_checkpoint.exists(): warn( "No generator checkpoint found. Training generator from scratch. \ Use srresnet_module.py to pretrain the generator." ) generator_checkpoint = None model = SRGAN( **vars(args), image_channels=dm.dataset_test.image_channels, generator_checkpoint=generator_checkpoint ) trainer = pl.Trainer.from_argparse_args( args, callbacks=[SRImageLoggerCallback(log_interval=args.log_interval, scale_factor=args.scale_factor)], logger=pl.loggers.TensorBoardLogger( save_dir="lightning_logs", name="srgan", version=f"{args.dataset}-scale_factor={args.scale_factor}", default_hp_metric=False, ), ) trainer.fit(model, dm) if __name__ == "__main__": cli_main()
import Cookie from "js-cookie"; import { setContext } from "apollo-link-context"; import { ApolloClient, HttpLink, InMemoryCache } from "apollo-boost"; const httpLink = new HttpLink({ uri: `${process.env.REACT_APP_GRAPHQL_URL}` }); const authLink = setContext(async (_: any, { headers }) => { if (Cookie.get("access_token")) { return ({ headers: { ...headers, authorization: `Bearer ${Cookie.get("access_token")}`, }, }); } return { headers }; }); export const client = new ApolloClient({ link: authLink.concat(httpLink), cache: new InMemoryCache(), connectToDevTools: process.env.NODE_ENV !== "production", });
def store_and_reset(self, replication): plan_key = self.add_plan(self.plans, replication) if self.plan_length < len(plan_key): self.plan_length = len(plan_key) self.add_to_csvrows(self.hardening_plans[plan_key]) self.reset()
Trend-spotters and futurologists have become the evangelists of the modern business world. Spend more than 10 minutes listening to their breezy uplift about what is around the corner, however, and two questions begin to well up inside you - how come they know this stuff, and how does one go about separating the wheat from the chaff? Built into the discipline, after all, is a tendency to exaggerate the shock of the new: it helps to drum up business. And by the time their prognostications have failed to materialise, it is safe to predict that most of them will have scarpered. No matter. The business of short-range futurology - that hybrid of science and intuition that reads the runes of business and consumer trends in an effort to predict what will whistle its way into the mainstream within the next 12 to 18 months - is now in high demand. So what do the crystal-ball gazers reckon will be the top 10 trends of 2008? The rise of N11 If 2007 was the year in which chatter about the possibilities posed by China and India (bludgeoned into the portmanteau "Chindia" by one ambitious analyst) reached a crescendo, 2008 may be the year in which N11 arrives on the lips of the cognoscenti. The term N11 was coined in a recent report by Goldman Sachs, and refers to the up-and-coming "Next 11" countries who are snapping at the heels of India, China, Russia and Brazil as investment opportunities - Bangladesh, Egypt, Indonesia, Iran, South Korea, Mexico, Nigeria, Pakistan, the Philippines, Turkey and Vietnam. Over the past three years, economic growth across Goldman Sachs's N11 has averaged 5.9%, the strongest in 15 years and more than double the 2.3% average growth of Old Europe. Marian Salzman, an energetic New York trend-spotter at the ad agency JWT, who was one of the first to talk up "Chindia" in 2007, now says that she is "watching N11 with interest". And where the money goes, there is a good chance that cultural fascination will follow. Peer-to-peer lending Jeremy Gutsche, a Toronto-based trend-spotter who runs the website Trendhunter.com, is a good example of a new breed of trend-spotter who collects insights by cultivating an online community of trend-watchers around the world. In keeping with the egalitarian ethos of the web, Gutsche argues that one of the key financial trends of 2008 will be the growth of person-to-person lending exchanges in which borrowers and lenders come together directly on the web and cut out the banks. Some of these lending operations use an eBay-like auction in which the lender who is willing to provide the lowest interest rate gets the borrower's loan; others are for people who already know one another but who want someone to help formalise the loan arrangement. A good example of the latter is CircleLending, an American firm in which Richard Branson's Virgin USA recently acquired a majority stake. Much the same model of "peer-to-peer" lending is slowly working its way into the charitable sector. Outfits such as Kiva.org, for example, put potential "social investors" together with small businesses in the developing world, who promise to send back regular email updates on how the business is doing. Social networking grows up Next year will see online social networking cease to be the preserve of the young. According to data recently released by the European Interactive Advertising Association, 18% of European over-55s now visit a social networking site at least once a month, not far behind the "digital generation" of 16- to 34-year-olds, where 28% access such sites. The past year has seen a 12% jump in these so-called silver surfers, and new social networking forums such as MyChumsClub and Saga Zone are appearing to accommodate older users. Online social networking is maturing in other ways, too. Following concerns that office-bound staff were spending too much of their day on social networking sites such as Facebook - a recent survey concluded that office workers in the UK spend company time worth £130m each day browsing online - many British firms are now mounting crackdowns against their use. As people get used to juggling a multiplicity of new roles and new rules, says London-based trend-spotter Tamar Kasriel, the result will be to fuel confusion about how personal the personal computer is when it's at work. Twitchiness among employers, says Kasriel, is largely responsible for the latest internet abbreviation, NSFW, or "not suitable for work", which bored office workers are increasingly using to preface any material they forward to their friends. Reverse knowledge migration Another solid basis for futurological speculation is to follow the flow of people. Paul Saffo, a respected California-based forecaster, argues that the next few years will see the beginnings of a "reverse knowledge migration" in which, as well as bright and well-educated workers coming from the developing world to the west, people will start to move in the opposite direction. This new global class of "cyber-gypsies", says Saffo, will not only include American and European Asians returning "home", but also highly educated, non-Asian Americans and Europeans going off to make their fortunes in places such as China. The trend, he argues, will soon move from a source of sociological curiosity to a source of alarm for governments and businesses. Companies, universities and thinktanks in Europe and America, he warns, who often smugly assumed that they would be a magnet for the world's talent, are going to discover that this is no longer the case. Handmade on the net For some years now, one staple of futurological speculation has been the quest for authenticity in what seems like an anonymous and artificial world. Witness, for example, the young fashionistas who self-consciously reclaim dreary leisure activities - everything from bingo to choir practice, from quiz nights to knitting - that would have bored even their parents rigid. A new twist on all this, says Reinier Evers of the Amsterdam firm Trendwatching.com, is the sprouting of internet-based ventures that purvey handmade and highly traditional fare. In Switzerland, for example, Netgranny (netgranny.ch) is a collective comprised of 15 cheerful-looking grannies who knit socks on demand and sell them online. Customers can choose their favourite granny by picture, pick the colour of their socks, or opt for a granny "surprise" design. It takes two weeks for a granny to knit a pair of socks; at €26 (£19) apiece, including delivery, they make an excellent idea for a Christmas gift. Something very similar is being touted by the Danish company Mormor.nu, which sells traditional handmade baby and children's wear online. Mormor.nu is Danish for "Grandma.now". All its products are handmade from pure wool, alpaca or cotton. Old knitting and crochet techniques and patterns have been revived, while the colours and materials have been updated. For a dash of extra authenticity, the company's workers are as steeped in tradition as its products; the youngest member of staff is 68. Likewise, Etsy (etsy.com) is an online marketplace for handmade goods that features more than 26,000 vendors from across the world and sells everything from scented soy candles to a tennis-ball chair. It is only two years old, but so far more than 1m items have been sold and 300,000 people have joined as members. Clubbing together If at first his or her predictions don't come to pass, the seasoned futurologist can simply hunker down and wait. Nearly a decade ago, for example, the American futurologist Jeremy Rifkin argued that we were all moving into "the age of access" - from an economy in which it was good to own stuff into one in which people would prefer to rent it. Nothing much happened, and people quickly moved on, but just recently the idea is beginning to look more plausible. For those who want to take out a time-share in a dog, for example, 2007 saw the launch in California of a "shared pet ownership" company, FlexPetz; the firm is fast expanding, and is now opening a branch in London. Meanwhile, a new Germany company called Lütte-Leihen is renting out baby clothes to parents of fast-growing young children; several companies, such as Bag, Borrow or Steal in America and Be a Fashionista in the UK, are renting out designer handbags; and the Dutch company Rent-a-Garden is leasing out sculptures and potted plants to those who want to give their back gardens a much needed summertime makeover. Sharing the costs, Salzman points out, is becoming increasingly popular in many different retail sectors. The inexorable rise of "fractional luxury", for example, is giving not-quite-wealthy-enough people the opportunity to buy a time-share in anything from a racehorse to a jumbo jet. Outfits such as PartialOwner.com and Fractionallife.com are extending the partial-ownership model to everything from homes to luxury cars and restaurants. Art lovers, too, can now buy into syndicates to purchase artwork. ARTvest in Glasgow, for example, was set up last year to enable people to pool their funds and get a foothold in the expensive market for contemporary art. Sharing the costs can be fun, too. Young women in Argentina and elsewhere, Salzman says, are holding clothes-swapping parties in order to share out the costs of getting hold of the latest fashion gear. The new vicarious consumption Ambitious futurologists need credible buzzwords, but good ones are in perilously short supply. One option is to take an old trend off the peg, dust it down a bit and give it a whole new twist. The idea of "vicarious consumption" was first coined 100 years ago by the economist Thorstein Veblen to describe the thrill rich people get when they buy their butler a lovely new uniform. Nowadays, reckons Evers, it is making a comeback in a whole new form. Just as book reviews have become a substitute among many of us for reading books, Evers says, our enthusiasm for endless product reviews is becoming a way through which we can vicariously experience almost anything through the eyes (and sometimes ears) of people who have already been there. Sites dedicated to reading reviews on other people's experiences, such as iliketotallyloveit.com and ballofdirt.com, offer a heady mixture of entertainment, voyeurism and exhibitionism, and are already quietly attracting millions on the web. DIY education The rise of blogging and self-broadcasting sites such as YouTube as an alternative to TV, says Gutsche, has brought with it a burgeoning demand among people to learn new skills, not from professional educators but from their peers. A good example, he says, is the proliferation of virtual cooking classes on YouTube, in which people persuade each other to experiment with their favourite recipes. Then there are the home videos that take people through the easiest way to unlock or otherwise manipulate their iPhones or other gadgets. The "education" on offer at such sites does not have to be improving. Willitblend.comb, for example, spoofs the growth of DIY education videos by showing viewers how to granulate their iPhones or iPods in a blender. Then there is the popularity of bizarre videos demonstrating that if, for example, you shove a packet of Mentos mints into a bottle of Diet Coke, the whole thing explodes - the YouTube equivalent, it would appear, of the school chemistry experiment. Digital housecleaning One consequence of the matrix of social networking sites such as Facebook and Second Life that are colonising the web is that our private selves are soon going to be on display as never before. In an age of "digital individualism", in which many of us work so hard to create and customise our identities on the net, it is ironic that all this information ends up stored on an anonymous bank of computer servers, to be cooled by some bored warehouse caretaker. While there is a much greater acceptance among young people of living life in the glare of the net, says Salzman, the lust for digital exhibitionism will soon wither when they come to start looking for a job. With admissions offices and human resources departments increasingly using the web to vet prospective candidates - two-thirds of companies readily admit to keeping tabs on employees by checking social networking sites, according to the British recruitment agency Poolio - a good chunk of the incriminating material whirling around the cybersphere will somehow have to be expunged. MySpace pages will be cleaned up and mass "Facebook suicides" will soon become the norm as young people try to agree a bond of forgetting by deactivating their profiles in unison. Companies will spring up like Reputation Defender, an American firm that promises to search out and destroy all inaccurate, inappropriate, hurtful and slanderous information that exists on its clients. Virtual identity managers Another consequence of the public display of ourselves on the net is that many of us are going to end up hiring professional stand-ins. By 2011, reckon the researchers at technology advice firm Gartner, 80% of internet users and major companies will have avatars, or digital replicas of themselves, for online work and play. Kasriel predicts that this will give rise to a new cadre of independent advisers - what she calls "holistic identity managers" - whose job it will be to garden the internet profiles of business people and keep them on the straight and narrow. Very soon, she believes, it will come as no surprise at all when we learn that high-ranking executives are not writing and updating their own profiles but paying someone else to do it for them. Already, sites such as FakeWebcam.com allow paying people to pre-record videos of themselves and play them on a loop as if they were visible on their webcam. The aesthetically challenged might even think about hiring more attractive stand-ins as well as scriptwriters. For those who can afford it, smile - it's not you on candid camera. · James Harkin's book Big Ideas: The Essential Guide to the Latest Thinking will be published in February by Atlantic Books.