content
stringlengths
10
4.9M
def no(): print('NO') exit(0) n, k = map(int, input().split()) bina = list(map(int, bin(n)[2:])) cur = bina.count(1) if k > n or cur > k: no() while cur < k: index = bina.index(next(x for x in bina if x > 0)) to = index while to < len(bina) - 1 and pow(2, to - index + 1) - 1 <= k - cur: to += 1 if to == index: no() bina[index] -= 1 bina[to] += pow(2, to - index) cur += pow(2, to - index) - 1 print('YES') for i in range(len(bina)): num = pow(2, len(bina) - i - 1) for j in range(bina[i]): print(num, end=' ')
/* Adds a message to the document's external message queue. */ void enqueue_message(Document *document, const Message *message) { if ((document->flags & DOCFLAG_EXTERNAL_MESSAGES) != 0) enqueue_message(&document->message_queue, message); }
// newHashTable creates a new hashtable func newHashTable(size uint) *hashTable { return &hashTable{ buckets: make([]*Bucket, size), nSize: 0, } }
Fermi-liquid and non-Fermi-liquid phases of an extended Hubbard model in infinite dimensions. We study an extended Hubbard model in the limit of infinite dimensions. The local correlation functions of this model are those of a generalised asymmetric Anderson model. The impurity model displays a Fermi liquid phase, a phase with neither the spin nor the charge of the impurity quenched, and an intermediate phase with the spin but not the charge of the impurity quenched. This analysis establishes the existence of metallic non-Fermi-liquid phases of the lattice model over a wide range of parameters and electron densities. The non-Fermi-liquid phases describe metals with incoherent spin and/or charge excitations and self-similar local correlation functions
Osama bin Laden issued a handy guide about masturbation – assuring jihadists that he approved of self-love in extreme cases. The memo — among the documents and sizable porn stash seized in 2011, when Navy SEALs killed him in Pakistan — have now been released by the Office of the Director of National Intelligence. The documents expose the 9/11 mastermind as a hands on leader, eager to dispense any and all advice to his sex-starved fighters. “Another very special and top secret matters (eyes only you, my brother Abu Muhammad Salah and Samir): it pertains to the problem of the brothers who are with you in their unfortunate celibacy and lack of availability of wives for them in the conditions that have been imposed on them,” he wrote. “We pray to God to release them. … God is not ashamed of the truth,” he continued. “As we see it, we have no objection to clarifying to the brothers that they may, in such conditions, masturbate, since this is an extreme case. The ancestors approved this for the community.” Before issuing the call to arms to his minions suffering through dry spells, Al Qaeda’s former top dog acknowledged that some jihadis might be “ashamed” by servicing themselves, News.com.au reported. “We wanted to reflect some more and research it, and we want to get back to you on it before you say anything about it. Give us your opinion — is it appropriate? “Can it be suggested to the brothers? If so, how?” he wrote. “Do you think it can help solve the problem? Or do you think we should avoid proposing it, and rather stick to the recommendation of total patience and seek help through fasting?” he added. In 2015, US officials refused to release details on the “extensive” smut collection – including sex videos — found in bin Laden’s Abbottad lair.
from sys import stdin def main(): stdin.readline() l = list(map(int, stdin.readline().split())) mi = ma = l[0] cnt = 0 for x in l: if ma < x: ma = x cnt += 1 elif mi > x: mi = x cnt += 1 print(cnt) main()
// Creates a new instance of the IntegrationAccountMapListResultPage type. func NewIntegrationAccountMapListResultPage(cur IntegrationAccountMapListResult, getNextPage func(context.Context, IntegrationAccountMapListResult) (IntegrationAccountMapListResult, error)) IntegrationAccountMapListResultPage { return IntegrationAccountMapListResultPage{ fn: getNextPage, iamlr: cur, } }
<gh_stars>10-100 #pragma once #include <map> #include <iostream> #include <glm/glm.hpp> #include <glm/gtx/transform.hpp> #include <glm/gtx/quaternion.hpp> #include <SDL2/SDL.h> #include "../core/display.h" #include "../editor/components/viewport.h" class Camera { public: Camera(Display* windown, float fov, float aspect, float near, float far); virtual ~Camera(); enum Mode { ORTHOGRAPHIC, PERSPECTIVE }; enum Direction { UP, DOWN, LEFT, RIGHT, FORWARD, BACKWARD }; inline float& getFov() { return m_fov; } inline void setFov(float value) { m_fov = value; } inline void setViewport(Viewport* viewport) { m_viewport = viewport; } inline Viewport* getViewport() { return m_viewport; } inline float getAspect() { return m_aspect; } inline void setAspect(float value) { m_aspect = value; } inline float getNear() { return m_near; } inline void setNear(float value) { m_near = value; } inline float getFar() { return m_far; } inline void setFar(float value) { m_far = value; } inline Mode getMode() { return m_mode; } inline void setMode(Mode mode) { m_mode = mode; } virtual inline const glm::mat4& getProjectionMatrix() { return m_projMatrix; } virtual inline void setProjectionMatrix(const glm::mat4& matrix) { m_projMatrix = matrix; } virtual inline const glm::mat4 getViewMatrix() { return m_viewMatrix; }; virtual inline void setViewMatrix(const glm::mat4& matrix) { m_viewMatrix = matrix; } virtual inline glm::vec3& getPosition() { return m_position; }; virtual inline void setPosition(const glm::vec3& position) { m_position = position; }; virtual inline bool& isActive() { return m_active; } virtual inline void setActive(bool value) { m_active = value; } virtual inline void onMouseMove(const glm::vec2& mouse) {} virtual inline void onMouseDown(Uint8 button) {} virtual inline void onMouseUp(Uint8 button) {} virtual inline void onKeyDown(const SDL_Keycode& keycode) {} virtual inline void onKeyUp(const SDL_Keycode& keycode) {} virtual inline void onMouseWheel(const SDL_Event& event) {} virtual inline void onWindowResized(const SDL_Event& event); virtual inline void onWindowSizeChanged(const SDL_Event& event); virtual inline Display* getWindow() { return m_window; } virtual inline void update(double delta) {} private: bool m_active; Display* m_window; Viewport* m_viewport; glm::vec3 m_position; Mode m_mode; float m_fov; float m_aspect; float m_near; float m_far; glm::mat4 m_projMatrix; glm::mat4 m_viewMatrix; };
GIGABYTE’s New Products Pictures launched of GIGABYTE’s Facebook pages this week point to at least four new models covering gaming, overclocking and connectivity. Part of GIGABYTE’s new range is its Black Editions (BK), reducing the color of the heatsinks and components to as black as possible. It is unclear if some models will solely be in BK mode or both will be offered. First up is the next Gaming motherboard, called the G1 WIFI-BK: At first glance users should notice the integrated air and water cooling power delivery heatsink, designed for system builders to use their own fittings. The extended heatsink seems to be masking a PLX8747 chip, which would explain the four PCIe slots and suggest x8/x8/x8/x8 operation for GPUs. Voltage check points are in the top right, along with a SATA power connector for PCIe power. The SATA ports are split with SATA Express ports, and the audio subsystem uses GIGABYTE’s OP-AMP, AMP-UP and gain switches, paired with a Creative audio codec. For overclocking, the SOC Force: No PLX chip this time, giving the four PCIe layout an x8/x4/x4 + x4 similar to the GIGABYTE Z87X-OC motherboard we reviewed last year. The overclocking buttons and switches at the top right are back, along with the USB ports next to the SATA ports. For PCIe power there is a 6-pin PCIe connector above the PCIe slots. For Z87 this level of motherboard was in the $200 range, forgoing some of the exotic features (particularly on audio) to provide a less expensive overclocking oriented platform. For connectivity, the UD7 TH: We did not get a chance to review the Z87 version of the UD7, but judging by the markings on the PCB here it is safe to say that it is being updated for Thunderbolt 2. While we cannot see the rear IO panel, it should be safe to assume that the TB2 ports are on the rear IO rather than an add-in card due to the lack of a TB header. The UD7 TH seems a bit toned down this time, with fewer SATA ports (making room for the SATA Express) and no obvious IR355x ICs. This might potentially leave room for a UD9 in the product stack for the future. For the mainstream, the UD5H-BK: The Black Edition version for the UD5H lives up to its name, with a few streaks of yellow on the heatsinks showing. Rather than equipping TB2 like the UD7 TH, the UD5H-BK uses more substantial power delivery and offers a similar SATA port/PCIe arrangement. In fact these two motherboards look rather alike, but with money being spent in different places. I would not be surprised if they end up in the same price bracket. Additional: We have just been given the go-ahead to post these un-doctored images from a GIGABYTE media event, showing most of the motherboards off in more detail including the name of the chipset. The newer one from this list is the UD3H: This looks a lot like the UD5H and UD7 in terms of color scheme, and offers M.2 with an Intel NIC. This board seems to be equipped with SATA Express (as some of the images show, four SATA ports with two being modified for SATA Express) and one USB 3.0 header. This motherboard looks a lot less busy around the socket area as well, in terms of extra resistors.
// SSHCreateFile creates a file on a remote machine func SSHCreateFile(ctx context.Context, user string, host string, dest string, f io.Reader) error { err := SSHRunCommand(ctx, user, host, fmt.Sprintf("sudo mkdir -p %s", filepath.Dir(dest))) if err != nil { return errors.Wrapf(err, "Error creating %s", filepath.Dir(dest)) } client, err := sshClient(ctx, user, host) if err != nil { return errors.Wrap(err, "Error creating client") } defer func() { _ = client.Close() }() session, err := client.NewSession() if err != nil { return errors.Wrap(err, "Error creating session") } defer func() { _ = session.Close() }() p, err := session.StdinPipe() if err != nil { return errors.Wrap(err, "Error getting STDIN pipe") } errCh := make(chan error, 1) go func() { buf := make([]byte, 1<<20) _, err := io.CopyBuffer(p, f, buf) errCh <- err err = p.Close() errCh <- err }() err = session.Run(fmt.Sprintf("sudo tee %s", dest)) if err != nil { return errors.Wrap(err, "Error running tee command") } err = <-errCh if err != nil { <-errCh return errors.Wrap(err, "Error copying data to target") } err = <-errCh if err != nil { return errors.Wrap(err, "Error closing SSH pipe to target") } return nil }
<filename>examples/gRPC/area_calculator/consumer-jvm/src/test/java/io/pact/example/grpc/consumer/PactConsumerTest.java package io.pact.example.grpc.consumer; import au.com.dius.pact.consumer.MockServer; import au.com.dius.pact.consumer.dsl.PactBuilder; import au.com.dius.pact.consumer.junit.MockServerConfig; import au.com.dius.pact.consumer.junit5.PactConsumerTestExt; import au.com.dius.pact.consumer.junit5.PactTestFor; import au.com.dius.pact.consumer.junit5.ProviderType; import au.com.dius.pact.consumer.model.MockServerImplementation; import au.com.dius.pact.core.model.PactSpecVersion; import au.com.dius.pact.core.model.V4Pact; import au.com.dius.pact.core.model.annotations.Pact; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import java.util.List; import java.util.Map; import static au.com.dius.pact.consumer.dsl.PactBuilder.filePath; @ExtendWith(PactConsumerTestExt.class) @PactTestFor(providerName = "area-calculator-provider", providerType = ProviderType.SYNCH_MESSAGE, pactVersion = PactSpecVersion.V4) public class PactConsumerTest { @Pact(consumer = "protobuf-consumer") V4Pact calculateRectangleArea(PactBuilder builder) { return builder .usingPlugin("protobuf") .expectsToReceive("calculate rectangle area request", "core/interaction/synchronous-message") .with(Map.of( "pact:proto", filePath("../../../proto/plugin.proto"), "pact:content-type", "application/protobuf", "pact:proto-service", "Calculator/calculate", "request", Map.of( "rectangle", Map.of( "length", "matching(number, 3)", "width", "matching(number, 4)" ), "response", List.of( Map.of( "value", "matching(number, 12)" ) ) ) )) .toPact(); } @Test @PactTestFor(pactMethod = "calculateRectangleArea") @MockServerConfig(implementation = MockServerImplementation.Plugin, registryEntry = "protobuf/mock-server/grpc") void consumeInitPluginMessage(MockServer mockServer) { // Plugin.InitPluginRequest request = Plugin.InitPluginRequest.parseFrom(message.getContents().getContents().getValue()); // assertThat(request.getImplementation(), is("pact-jvm-driver")); // assertThat(request.getVersion(), is("0.0.0")); } }
The Problem of Persistence with Rotating Displays Motion-to-photon latency causes images to sway from side to side in a VR/AR system, while display persistence causes smearing; both of these are undesirable artifacts. We show that once latency is reduced or eliminated, smearing due to display persistence becomes the dominant visual artifact, even with accurate tracker prediction. We investigate the human perceptual mechanisms responsible for this and we demonstrate a modified 3D rotation display controller architecture for driving a high speed digital display which minimizes latency and persistence. We simulate it in software and we built a testbench based on a very high frame rate (2880 fps 1-bit images) display system mounted on a mechanical rotation gantry which emulates display rotation during head rotation in an HMD.
/** * Package is containing highlighter classes. **/ package com.gentics.cr.lucene.search.highlight;
from math import ceil,floor n,m = map(int, input().split()) res = 0 for i in range(1,n+1): req = 5 - i%5 res += ((m-req) // 5) +1 print(res)
//Call this method *ONLY* to add initial rules from file public void addInitialRuleFromFile(long id,String operation,double amount,String coin, String direction, double target, String currency, String comment,String orderResponse, boolean active,boolean executed, String market, String executedTimestamp,boolean seen, boolean visualize) { Rule newRule = new Rule(id, operation, amount, coin, direction, target, currency, comment,orderResponse,active,executed,market,executedTimestamp,seen,visualize); Response resp = RuleValidator.validate(newRule); if (resp.isPositive()) { getRulesList().add(newRule); Utils.log("Initial Rule added:" + newRule.toString(),Utils.LOG_MID); } else { Utils.log("Error(s) while trying to add an initial rule : " + resp.getMessage()+"\nRule = "+newRule.toString(),Utils.LOG_ERR); } }
<gh_stars>0 import { Component } from '@angular/core'; import { FaIconLibrary } from '@fortawesome/angular-fontawesome'; import { faCalendar, faClipboard, faCommentDots, faEye, faEyeSlash, faHeart, faUser, faArrowAltCircleDown, faPauseCircle, faKeyboard } from '@fortawesome/free-regular-svg-icons'; import { State as RootState } from './reducers'; import { Store, select } from '@ngrx/store'; import * as UserSelectors from './reducers/user/user.selectors'; import * as MemorySelectors from './reducers/memory/memory.selectors'; import { faCheck, faCommentMedical, faCaretDown, faCommentSlash, faGlobe, faLink, faKey, faMapMarkerAlt, faPhotoVideo, faUsers, faSignOutAlt, } from '@fortawesome/free-solid-svg-icons'; import { NavigationCancel, NavigationEnd, NavigationStart, Router } from '@angular/router'; import { filter, map } from 'rxjs/operators'; import { combineLatest, Observable } from 'rxjs'; import { selectNavbarIsVisibleForRoute } from './reducers/router'; @Component({ selector: 'app-root', templateUrl: './app.component.html', styleUrls: ['./app.component.scss'] }) export class AppComponent { navbarIsVisible$ = this.store.pipe(select(selectNavbarIsVisibleForRoute)); userPublicKey$ = this.store.pipe(select(UserSelectors.selectUserPublicKey)); isLoading$: Observable<boolean>; constructor(library: FaIconLibrary, private store: Store<RootState>, router: Router) { library.addIcons( faEye, faEyeSlash, faUser, faCommentDots, faCommentMedical, faLink, faKey, faKeyboard, faCaretDown, faPauseCircle, faCommentSlash, faGlobe, faMapMarkerAlt, faCalendar, faClipboard, faHeart, faPhotoVideo, faCheck, faUsers, faArrowAltCircleDown, faSignOutAlt, ); const routeLoading$ = router.events.pipe( filter(event => event instanceof NavigationStart || event instanceof NavigationEnd || event instanceof NavigationCancel), map(event => event instanceof NavigationStart) ); this.isLoading$ = combineLatest([ routeLoading$, this.store.select(UserSelectors.selectIsLoading), this.store.select(MemorySelectors.selectIsLoading), ]).pipe( map(([route, user, memory]) => route || user || memory), ); } }
package main import ( "fmt" "github.com/zofan/go-country" "github.com/zofan/go-fwrite" "github.com/zofan/go-language" "github.com/zofan/go-req" "github.com/zofan/go-xmlre" "html" "path/filepath" "regexp" "runtime" "strings" "time" ) func main() { fmt.Println(Update()) } func Update() error { var ( httpClient = req.New(req.DefaultConfig) list = make(map[string]*language.Language) ) resp := httpClient.Get(`http://www.loc.gov/standards/iso639-2/php/code_list.php`) if resp.Error() != nil { return resp.Error() } body := string(resp.ReadAll()) body = html.UnescapeString(body) rowRe := xmlre.Compile(`<td scope="row">(\w+)</td><td>(\w+)</td><td>(\w+)</td>`) for _, row := range rowRe.FindAllStringSubmatch(body, -1) { l := &language.Language{ Alpha3: strings.ToUpper(strings.TrimSpace(row[1])), Alpha2: strings.ToUpper(strings.TrimSpace(row[2])), Name: strings.TrimSpace(row[3]), } list[l.Alpha3] = l } // --- for _, c := range country.List { for _, cl := range c.Languages { if _, ok := list[cl]; ok { list[cl].Users = append(list[cl].Users, c.Alpha3) } } } // --- updateTags(list) var tpl []string tpl = append(tpl, `package language`) tpl = append(tpl, ``) tpl = append(tpl, `// Updated at: `+time.Now().String()) tpl = append(tpl, `var List = []Language{`) for _, l := range list { s := fmt.Sprintf(`%#v`, *l) + `,` s = strings.ReplaceAll(s, `language.Language`, ``) tpl = append(tpl, s) } tpl = append(tpl, `}`) tpl = append(tpl, ``) _, file, _, _ := runtime.Caller(0) dir := filepath.Dir(file) return fwrite.WriteRaw(dir+`/../db.go`, []byte(strings.Join(tpl, "\n"))) } func updateTags(list map[string]*language.Language) { wordSplitRe := regexp.MustCompile(`[^\p{L}\p{N}]+`) wordMap := map[string][]*language.Language{} for _, l := range list { name := strings.ToLower(l.Name + ` ` + strings.Join(l.AltNames, ` `)) words := wordSplitRe.Split(name, -1) for _, w := range words { if len(w) > 0 { wordMap[w] = append(wordMap[w], l) } } l.Tags = []string{} } for w, ls := range wordMap { if len(ls) == 1 { ls[0].Tags = append(ls[0].Tags, w) } } }
Lithium-ion energy storage battery in PV-smart building application Photovoltaic (PV) panels with energy storage batteries represents a feasible solution for powering domestic loads. The service life of the batteries and the power management are the main challenges by developing the energy supply system of smart homes. Therefore, in this paper an efficient algorithm is developed in order to power a house by extracting the maximum power from the PV panels, enhancing the battery service life and minimizing the power supply from the smart grid. The smart metering, advanced inverter and rule-based energy management strategy ensure safe and optimal operation of the energy supply system. Matlab/Simulink model of typical PV array, lithium-ion battery, inverter with the associated controllers are developed to evaluate the performance of the proposed system. Starting from the actual solar irradiance data, the maximum power of PV array will be extracted as a function of the available irradiance and ambient temperature. The daily battery state of charge (SOC) and its internal temperature are calculated depending on the load, PV power and the battery charge/discharge modes. Simulation results show that the proposed algorithm can enhance the performance of energy supply system, extract the maximum solar power and minimize the power from smart grid.
<reponame>Belval/DiskList from .disklist import *
<reponame>DCTewi/My-Codes #include <bits/stdc++.h> using namespace std; const int MAX = 20 + 5; const int MAXN = 1e7 + 5; int n, k, tot = 0; int x[MAX]; bool ispri[MAXN]; int prime[MAXN], top = 0; void check_prime() { memset(ispri, 0x3f, sizeof(ispri)); ispri[0] = ispri[1] = 0; for (int i = 2; i < MAXN; ++i) { if (ispri[i]) { prime[top++] = i; } for (int j = 0; j < top; ++j) { if (prime[j] * i > MAXN) break; ispri[prime[j] * i] = 0; if (i % prime[j] == 0) break; } } } int ans = 0; bool vis[MAX]; map<int, int> counted; void trying(int now, int left) { if (!left) { if (ispri[now] && counted.find(now) == counted.end()) { #ifdef TEWILOCAL printf("%d is a prime and havn't counted, ans++\n", now); #endif ans++; counted[now] = 1; } return ; } for (int i = 0; i < n; i++) { if (!vis[i]) { vis[i] = 1; trying(now - x[i], left - 1); vis[i] = 0; } } } int main(int argc, char const *argv[]) { scanf("%d%d", &n, &k); for (int i = 0; i < n ; i++) { scanf("%d", &x[i]); tot += x[i]; } check_prime(); memset(vis, 0, sizeof(vis)); trying(tot, n - k); printf("%d\n", ans); return 0; }
The IRS intensified its war against the Tea Party with more delays and an unheard of move. It has stalled processing non-profit status requests for Tea Party groups yet again despite admitting in 2013 it targeted these groups unfairly. Instead of approving the applications, the IRS sent back a new round of questions. This time, they released the questions to the public. The move put confidential tax information in the public eye, a low blow in the ongoing legal saga. "The IRS has taken the unprecedented step of publicly filing actual return information," Edward Greim, a lawyer who is defending more than 400 groups targeted by the IRS, told the Washington Times. Although the release of private tax information would usually be considered illegal, tax experts say the IRS acted according to a loophole in their privacy laws. Conservative lawmakers and leaders have scrutinized the IRS for years for refusing to approve requests by various Tea Party groups for non-profit status. Jay Sekulow, chief counsel of the American Center for Law and Justice, represents some of the targeted groups. "We again demanded that they review their applications and process them in a fair and expeditious manner," he said in a statement. Sekulow says this is just another example of how the IRS does not fairly handle cases.
/** * Metadata for a single KML layer, in JSON format. <br> * <br> * See <a href="https://developers.google.com/maps/documentation/javascript/reference#KmlLayerMetadata">KmlLayerMetadata * API Doc</a> */ public class KmlLayerMetadata extends JavaScriptObject { /** * use newInstance(); */ protected KmlLayerMetadata() { } /** * Metadata for a single KML layer, in JSON format. */ public static final KmlLayerMetadata newInstance() { return JavaScriptObject.createObject().cast(); } /** * The layer's <atom:author>, extracted from the layer markup. * * @param author */ public final native void setAuthor(KmlAuthor author) /*-{ this.author = author; }-*/; /** * The layer's <atom:author>, extracted from the layer markup. */ public final native KmlAuthor getAuthor() /*-{ return this.author; }-*/; /** * The layer's <description>, extracted from the layer markup. * * @param description */ public final native void setDescription(String description) /*-{ this.description = description; }-*/; /** * The layer's <description>, extracted from the layer markup. */ public final native String getDescription() /*-{ return this.description; }-*/; /** * The layer's <name>, extracted from the layer markup. * * @param name */ public final native void setName(String name) /*-{ this.name = name; }-*/; /** * The layer's <name>, extracted from the layer markup. */ public final native String getName() /*-{ return this.name; }-*/; /** * The layer's <Snippet>, extracted from the layer markup * * @param snippet */ public final native void setSnippet(String snippet) /*-{ this.snippet = snippet; }-*/; /** * The layer's <Snippet>, extracted from the layer markup */ public final native String getSnippet() /*-{ return this.snippet; }-*/; }
k,n,w = map(int,input().split()) need = k*(w*(w+1))/2 required = need -n if required<0 : required = 0 print(int(required))
/** * Created by rroeser on 3/8/16. */ public class FailureAwareReactiveSocketClientTest { @Test public void testError() throws InterruptedException { AtomicInteger count = new AtomicInteger(0); FailureAwareReactiveSocketClient client = new FailureAwareReactiveSocketClient(new ReactiveSocketClient() { @Override public Publisher<Payload> requestResponse(Payload payload) { return s -> { if (count.get() < 1) { s.onNext(new Payload() { @Override public ByteBuffer getData() { return null; } @Override public ByteBuffer getMetadata() { return null; } }); count.incrementAndGet(); s.onComplete(); } else { s.onError(new RuntimeException()); } }; } @Override public void close() throws Exception { } }, 1, TimeUnit.SECONDS); double availability = client.availability(); Assert.assertTrue(1.0 == availability); Publisher<Payload> payloadPublisher = client.requestResponse(new Payload() { @Override public ByteBuffer getData() { return null; } @Override public ByteBuffer getMetadata() { return null; } }); TestSubscriber subscriber = new TestSubscriber(); RxReactiveStreams.toObservable(payloadPublisher).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertCompleted(); double good = client.availability(); subscriber = new TestSubscriber(); RxReactiveStreams.toObservable(payloadPublisher).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertError(RuntimeException.class); double bad = client.availability(); Assert.assertTrue(good > bad); } @Test public void testWidowReset() throws InterruptedException { AtomicInteger count = new AtomicInteger(0); FailureAwareReactiveSocketClient client = new FailureAwareReactiveSocketClient(new ReactiveSocketClient() { @Override public Publisher<Payload> requestResponse(Payload payload) { return s -> { if (count.get() < 1) { s.onNext(new Payload() { @Override public ByteBuffer getData() { return null; } @Override public ByteBuffer getMetadata() { return null; } }); count.incrementAndGet(); s.onComplete(); } else { s.onError(new RuntimeException()); } }; } @Override public void close() throws Exception { } }, 1, TimeUnit.SECONDS); double availability = client.availability(); Assert.assertTrue(1.0 == availability); Publisher<Payload> payloadPublisher = client.requestResponse(new Payload() { @Override public ByteBuffer getData() { return null; } @Override public ByteBuffer getMetadata() { return null; } }); TestSubscriber subscriber = new TestSubscriber(); RxReactiveStreams.toObservable(payloadPublisher).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertCompleted(); double good = client.availability(); subscriber = new TestSubscriber(); RxReactiveStreams.toObservable(payloadPublisher).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertError(RuntimeException.class); double bad = client.availability(); Assert.assertTrue(good > bad); Thread.sleep(1_001); double reset = client.availability(); Assert.assertTrue(reset > bad); } }
import pytest from matcher.score.logic import calculate_match_score from matcher.score.errors import EmptyInputRecord @pytest.mark.parametrize( 'input_record, db_record', [ ( {'col1': 'val1', 'col2': 'val2'}, None ) ] ) def test_calculate_match_score_with_empty_db_record(input_record, db_record): expected = 0.00 result = calculate_match_score(input_record, db_record) assert expected == result @pytest.mark.parametrize( 'input_record, db_record', [ ( None, None ) ] ) def test_calculate_match_score_empty_input_record(input_record, db_record): with pytest.raises(EmptyInputRecord): calculate_match_score(input_record, db_record)
#import "QuadPayCardholder.h" #import "QuadPayCard.h" #import "QuadPayCustomer.h" @class QuadPayVirtualCheckoutViewController; NS_ASSUME_NONNULL_BEGIN @protocol QuadPayVirtualCheckoutDelegate <NSObject> - (void)checkoutSuccessful:(QuadPayVirtualCheckoutViewController*)viewController card:(QuadPayCard *)card cardholder:(QuadPayCardholder *)cardholder customer:(QuadPayCustomer *)customer; - (void)checkoutCancelled:(QuadPayVirtualCheckoutViewController*)viewController reason:(NSString *)reason; - (void)didFailWithError:(QuadPayVirtualCheckoutViewController*)viewController error:(NSString *)error; @end NS_ASSUME_NONNULL_END
/** removes zero time Diff, Integrate terms **/ ex zero_order_rem(const ex& expr_) { ex _y=expr_; exmap expr; expr[Integrate(wild(0), wild(1), 0)] = wild(0); expr[Diff(wild(0), wild(1), 0)] = wild(0); ex xprev; do { xprev = _y; _y = _y.subs(expr, subs_options::algebraic); } while(xprev != _y); return _y; }
Risk, variability, and decision-making in goal-directed movements 1. Whole-body movements are less optimal than arm-reaching movements. 2. Optimality in arm-reaching is reduced by increased variability, whereas this effect is not observed in whole-body movements. This is the first study to assess the optimality of whole-body movements and to compare optimal planning performance across movement tasks. We have shown that: NULL NOISE CLIFF CLIFF+NOISE 90 100 110 120
package exohcl import ( "fmt" "github.com/deref/exo/internal/manifest/exohcl/hclgen" "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" ) type ComponentSet struct { // Analysis inputs. Blocks hcl.Blocks // Analysis outputs. Components []*Component } func NewComponentSet(m *Manifest) *ComponentSet { return &ComponentSet{ Blocks: m.Components, } } func (cs *ComponentSet) Analyze(ctx *AnalysisContext) { if len(cs.Blocks) > 1 { ctx.AppendDiags(&hcl.Diagnostic{ Severity: hcl.DiagWarning, Summary: "Expected at most one components block", Detail: fmt.Sprintf("Only one components block may appear in a manifest, but found %d", len(cs.Blocks)), Subject: cs.Blocks[1].DefRange.Ptr(), }) } for _, block := range cs.Blocks { if len(block.Labels) > 0 { ctx.AppendDiags(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Unexpected label on components block", Detail: fmt.Sprintf("A components block expects no labels, but has %d", len(block.Labels)), Subject: &block.LabelRanges[0], }) } body, ok := block.Body.(*hclsyntax.Body) if !ok { ctx.AppendDiags(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Malformed components block", Detail: fmt.Sprintf("Expected components block to be an *hclsyntax.Body, but got %T", block.Body), Subject: &block.DefRange, }) continue } if len(body.Attributes) > 0 { ctx.AppendDiags(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Unexpected attributes in components block", Detail: fmt.Sprintf("A components block expects no attributes, but has %d", len(body.Attributes)), Subject: body.Attributes.Range().Ptr(), }) } for _, componentBlock := range body.Blocks { component := NewComponent(componentBlock) component.Analyze(ctx) cs.Components = append(cs.Components, component) } } } type Component struct { Source *hclsyntax.Block Expansion *hclsyntax.Block Type string Name string Spec string DependsOn []string } func NewComponent(block *hclsyntax.Block) *Component { return &Component{ Source: block, } } func (c *Component) Analyze(ctx *AnalysisContext) { c.Expansion = expandComponent(ctx, c.Source) block := c.Expansion if block == nil { return } content, diags := block.Body.Content(&hcl.BodySchema{ Attributes: []hcl.AttributeSchema{ {Name: "type", Required: true}, {Name: "spec"}, {Name: "depends_on"}, }, Blocks: []hcl.BlockHeaderSchema{ {Type: "spec"}, }, }) ctx.AppendDiags(diags...) switch len(block.Labels) { case 0: ctx.AppendDiags(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Expected component name", Detail: `A component block must have exactly one label, which is the name of the component.`, Subject: block.DefRange().Ptr(), }) case 1: c.Name = block.Labels[0] default: ctx.AppendDiags(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Unexpected label.", Detail: `A component block must have exactly one label, which is the name of the component.`, Subject: block.LabelRanges[1].Ptr(), }) } typeAttr := content.Attributes["type"] if typeAttr != nil { var diag *hcl.Diagnostic c.Type, diag = parseLiteralString(typeAttr.Expr) if diag != nil { ctx.AppendDiags(diag) } } specAttr := content.Attributes["spec"] if specAttr == nil { specBlocks := content.Blocks.OfType("spec") switch len(specBlocks) { case 0: ctx.AppendDiags(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Expected component spec", Detail: `A component block must have either a spec attribute or a nested spec block, but neither was found.`, Subject: block.DefRange().Ptr(), }) case 1: c.Spec = string(hclgen.FormatBlock(specBlocks[0])) default: ctx.AppendDiags(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Expected at most one spec block", Detail: fmt.Sprintf("Only one spec block may appear in a component, but found %d", len(specBlocks)), Subject: specBlocks[1].DefRange.Ptr(), }) } } else { c.Spec, _ = AnalyzeString(ctx, specAttr.Expr) } depsAttr := content.Attributes["depends_on"] if depsAttr != nil { depsExpr := depsAttr.Expr tup, ok := depsExpr.(*hclsyntax.TupleConsExpr) if !ok { ctx.AppendDiags(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Expected array of strings", Detail: fmt.Sprintf("Expected literal array of strings, got %T", depsExpr), Subject: depsExpr.Range().Ptr(), }) } c.DependsOn = make([]string, 0, len(tup.Exprs)) for _, elem := range tup.Exprs { dep, diag := parseLiteralString(elem) if diag != nil { ctx.AppendDiags(diag) continue } c.DependsOn = append(c.DependsOn, dep) } } } func expandComponent(ctx *AnalysisContext, block *hclsyntax.Block) *hclsyntax.Block { body := block.Body var encodefunc string switch block.Type { case "component": return block case "process": encodefunc = "jsonencode" case "container", "volume", "network": encodefunc = "yamlencode" default: ctx.AppendDiags(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Unsupported component type", Detail: fmt.Sprintf(`The component type %q is not recognized.`, block.Type), Subject: block.DefRange().Ptr(), }) return nil } for _, subblock := range body.Blocks { switch subblock.Type { case "_": // TODO: copy content of "meta" blocks in to the expanded output. default: ctx.AppendDiags(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Unexpected block", Detail: fmt.Sprintf(`Unexpected %q block in %q component.`, subblock.Type, block.Type), Subject: body.Blocks[0].DefRange().Ptr(), }) } } attrs := body.Attributes specItems := make([]hclsyntax.ObjectConsItem, 0, len(attrs)) for _, attr := range attrs { specItems = append(specItems, hclsyntax.ObjectConsItem{ KeyExpr: hclgen.NewObjStringKey(attr.Name, attr.Range()), ValueExpr: attr.Expr, }) } // sort.Sort(specItemsSorter{specItems}) // XXX sort specItems by attr range? // XXX search for "_" blocks with depends_on, etc. and other meta properties. return &hclsyntax.Block{ Type: "component", Labels: block.Labels, Body: &hclsyntax.Body{ Attributes: hclsyntax.Attributes{ "type": &hclsyntax.Attribute{ Name: "type", Expr: hclgen.NewStringLiteral(block.Type, block.TypeRange), SrcRange: block.TypeRange, NameRange: block.TypeRange, EqualsRange: block.TypeRange, }, "spec": &hclsyntax.Attribute{ Name: "spec", Expr: &hclsyntax.FunctionCallExpr{ Name: encodefunc, Args: []hclsyntax.Expression{ &hclsyntax.ObjectConsExpr{ Items: specItems, SrcRange: body.SrcRange, OpenRange: block.OpenBraceRange, }, }, }, SrcRange: body.SrcRange, NameRange: block.TypeRange, EqualsRange: block.TypeRange, }, }, }, TypeRange: block.TypeRange, LabelRanges: block.LabelRanges, OpenBraceRange: block.OpenBraceRange, CloseBraceRange: block.CloseBraceRange, } }
import logging import warnings from datetime import datetime from src import * logger_spacy = logging.getLogger("spacy") logger_spacy.setLevel(logging.ERROR) warnings.filterwarnings("ignore", message=r"\[W108\]", category=UserWarning)
<filename>tests/benchmarks/bench_all.cpp // this file runs all the benchmarks #include <benchmark/benchmark.h> #include <benchutils.hpp> #include <cellular_automata.hpp> #include <iostream> // run 10 iterations on multiple grid sizes using the default number of workers static void BM_SimulationStepSequential(benchmark::State &state) { size_t nrows = state.range(0); size_t ncols = nrows; auto nsteps = 10; auto grid = ca::Grid<int>(nrows, ncols); auto update_fn = conways_game_of_life_update_function<int>; auto cellaut = ca::seq::CellularAutomaton<int>(grid, update_fn); for (auto _ : state) { cellaut.simulate(nsteps); } } static void BM_SimulationStepParallel(benchmark::State &state) { size_t nrows = state.range(0); size_t ncols = nrows; auto nsteps = 10; auto grid = ca::Grid<int>(nrows, ncols); auto update_fn = conways_game_of_life_update_function<int>; auto cellaut = ca::par::CellularAutomaton<int>(grid, update_fn); for (auto _ : state) { cellaut.simulate(nsteps); } } static void BM_SimulationStepParallelBW(benchmark::State &state) { size_t nrows = state.range(0); size_t ncols = nrows; auto nsteps = 10; auto grid = ca::Grid<int>(nrows, ncols); auto update_fn = conways_game_of_life_update_function<int>; auto cellaut = ca::par::CellularAutomaton<int>(grid, update_fn); for (auto _ : state) { cellaut.simulate(nsteps); } } static void BM_SimulationStepFF(benchmark::State &state) { size_t nrows = state.range(0); size_t ncols = nrows; auto nsteps = 10; auto grid = ca::Grid<int>(nrows, ncols); auto update_fn = conways_game_of_life_update_function<int>; auto cellaut = ca::ffl::CellularAutomaton<int>(grid, update_fn); for (auto _ : state) { cellaut.simulate(nsteps); } } static void BM_SimulationStepOMP(benchmark::State &state) { size_t nrows = state.range(0); size_t ncols = nrows; auto nsteps = 10; auto grid = ca::Grid<int>(nrows, ncols); auto update_fn = conways_game_of_life_update_function<int>; auto cellaut = ca::omp::CellularAutomaton<int>(grid, update_fn); for (auto _ : state) { cellaut.simulate(nsteps); } } BENCHMARK(BM_SimulationStepSequential)->Arg(1e2)->Arg(1e3)->Arg(1e4); BENCHMARK(BM_SimulationStepParallel)->Arg(1e2)->Arg(1e3)->Arg(1e4); BENCHMARK(BM_SimulationStepParallelBW)->Arg(1e2)->Arg(1e3)->Arg(1e4); BENCHMARK(BM_SimulationStepFF)->Arg(1e2)->Arg(1e3)->Arg(1e4); BENCHMARK(BM_SimulationStepOMP)->Arg(1e2)->Arg(1e3)->Arg(1e4); // run 10 iterations on a medium sized grid varying the number of workers static void BM_VaryingWorkersGrid1000Parallel(benchmark::State &state) { size_t nrows = 1000; size_t ncols = nrows; auto nsteps = 10; unsigned workers = state.range(0); auto grid = ca::Grid<int>(nrows, ncols); auto update_fn = conways_game_of_life_update_function<int>; auto cellaut = ca::par::CellularAutomaton<int>(grid, update_fn, workers); for (auto _ : state) { cellaut.simulate(nsteps); } } static void BM_VaryingWorkersGrid1000ParallelBW(benchmark::State &state) { size_t nrows = 1000; size_t ncols = nrows; auto nsteps = 10; unsigned workers = state.range(0); auto grid = ca::Grid<int>(nrows, ncols); auto update_fn = conways_game_of_life_update_function<int>; auto cellaut = ca::par::CellularAutomaton<int>(grid, update_fn, workers); for (auto _ : state) { cellaut.simulate(nsteps); } } static void BM_VaryingWorkersGrid1000FF(benchmark::State &state) { size_t nrows = 1000; size_t ncols = nrows; auto nsteps = 10; unsigned workers = state.range(0); auto grid = ca::Grid<int>(nrows, ncols); auto update_fn = conways_game_of_life_update_function<int>; auto cellaut = ca::ffl::CellularAutomaton<int>(grid, update_fn, workers); for (auto _ : state) { cellaut.simulate(nsteps); } } static void BM_VaryingWorkersGrid1000OMP(benchmark::State &state) { size_t nrows = 1000; size_t ncols = nrows; auto nsteps = 10; unsigned workers = state.range(0); auto grid = ca::Grid<int>(nrows, ncols); auto update_fn = conways_game_of_life_update_function<int>; auto cellaut = ca::omp::CellularAutomaton<int>(grid, update_fn, workers); for (auto _ : state) { cellaut.simulate(nsteps); } } BENCHMARK(BM_VaryingWorkersGrid1000Parallel)->DenseRange(0, 17, 1); BENCHMARK(BM_VaryingWorkersGrid1000ParallelBW)->DenseRange(0, 17, 1); BENCHMARK(BM_VaryingWorkersGrid1000FF)->DenseRange(0, 17, 1); BENCHMARK(BM_VaryingWorkersGrid1000OMP)->DenseRange(0, 17, 1); BENCHMARK_MAIN();
import numpy as np import matplotlib.pyplot as plt import h5py as py from BarAxialVibration import BarAxialVibration out_time = [] pcl_var = [] # numerical solution hdf5_file = py.File("../Build/Tests/t2d_me_s_1d_compression.h5", "r") th_grp = hdf5_file['TimeHistory']['compression'] output_num = th_grp.attrs['output_num'] is_init = False init_y = 0.0 for t_id in range(output_num): # frame frame_grp = th_grp['frame_%d' % t_id] frame_time = frame_grp.attrs['total_time'] out_time.append(frame_time) # particle pcl_dset = frame_grp['ParticleData']['field'] pcl_fld = pcl_dset[123] var = pcl_fld['y'] if not is_init: init_y = var is_init = True var = init_y - var pcl_var.append(var) hdf5_file.close() # analytical solution H = 1.0 p0 = 0.1 bf = 0.0 E = 1000.0 density = 10.0 t_len = 1.0 # time length data_num = 200 # cal data bav = BarAxialVibration(H, p0, bf, E, density) data_num += 1 t_ana = np.zeros(data_num) u_ana = np.zeros(data_num) t_inv = t_len / float(data_num) for i in range(data_num): t_ana[i] = t_inv * i u_ana[i] = bav.displacement(H, t_ana[i]) # plot var - time curve fig = plt.figure() plot1 = fig.subplots(1, 1) plot1.set_xlabel("time") plot1.set_ylabel("displacement") line1, = plot1.plot(out_time, pcl_var) line2, = plot1.plot(t_ana, u_ana) plt.legend(handles=[line1, line2], labels=['MPM', 'Analytical Solution']) plt.show() # output to csv file csv_file = open('t2d_1d_compression_disp.csv', 'w') csv_file.write("MPM,\n") # time csv_file.write("time, ") for ot in out_time: csv_file.write("%f, " % ot) csv_file.write("\n") # pcl vars csv_file.write("disp, ") for pv in pcl_var: csv_file.write("%f, " % pv) csv_file.write("\n") csv_file.write("Analytical solution,\n") # time csv_file.write("time, ") for ot in t_ana: csv_file.write("%f, " % ot) csv_file.write("\n") # pcl vars csv_file.write("disp, ") for pv in u_ana: csv_file.write("%f, " % pv) csv_file.write("\n") csv_file.close()
#include<stdio.h> int main(void){ int a,b,c,d[10],e=0,f,g,h,flag; scanf("%d%d",&a,&b); for(c=a;c<=b;c++){ h=c;flag=1;e=0; while(h>=10) {d[e]=h%10; h=h/10;e++; } d[e]=h; for(f=0;f<=e;f++) for(g=f+1;g<=e;g++) if(d[f]==d[g]) flag=0; if(flag==1) {printf("%d",c);return 0;}} if(flag==0) printf("-1"); }
/* * To be called by the forked process to load the generated links and Hadoop * configuration properties to automatically inject. * * @param props The Azkaban properties */ public static void injectResources(Props props) { if (props.getBoolean("azkaban.inject.hadoop-site.configs", true)) { Configuration.addDefaultResource("mapred-default.xml"); Configuration.addDefaultResource("mapred-site.xml"); Configuration.addDefaultResource("yarn-default.xml"); Configuration.addDefaultResource("yarn-site.xml"); Configuration.addDefaultResource("hdfs-default.xml"); Configuration.addDefaultResource("hdfs-site.xml"); } Configuration.addDefaultResource(INJECT_FILE); }
package frontEnd.CustomJavafxNodes; import java.util.List; import java.util.Optional; import javafx.scene.control.TextInputDialog; public class SingleFieldPrompt { private List<String> myDialogTitles; private String myPromptText; private String myPromptLabel; public SingleFieldPrompt(List<String> dialogTitles, String promptLabel, String promptText){ this.myDialogTitles = dialogTitles; this.myPromptText = promptText; this.myPromptLabel = promptLabel; } public String getUserInputString(){ TextInputDialog dialog = new TextInputDialog(myPromptText); dialog.setTitle(myDialogTitles.get(0)); dialog.setHeaderText(myDialogTitles.get(1)); dialog.setContentText(myPromptLabel); // Traditional way to get the response value. Optional<String> result = dialog.showAndWait(); if (result.isPresent()){ return result.get(); } return null; } public Double getUserInputDouble(){ String userInput = getUserInputString(); if(userInput == null){ return null; } Double value = Double.valueOf(userInput); return value; } }
<gh_stars>1000+ import { IDotnetify } from "../dist/typings"; declare const dotnetify: IDotnetify; export default dotnetify;
/** * Global imports */ import { NgModule } from '@angular/core'; import { CommonModule } from '@angular/common'; /** * Local imports */ import { TournamentServicesModule } from '@tournament/services'; /** * Import guards */ import { AdministratorGuard } from './administrator.guard'; import { CreatorGuard } from './creator.guard'; import { ReaderGuard } from './reader.guard'; /** * Module description * - This module contains all the guards that will be used in the application */ @NgModule({ imports: [ CommonModule, TournamentServicesModule ], providers: [ AdministratorGuard, CreatorGuard, ReaderGuard ] }) export class GuardsModule { }
package com.waes.jgu.service; import java.util.ArrayList; import java.util.List; import com.waes.jgu.domain.EntryData; import com.waes.jgu.dto.DiffResponse; import com.waes.jgu.enums.Side; import com.waes.jgu.exception.EntryIncompleteException; import com.waes.jgu.exception.EntryNotFoundException; import com.waes.jgu.exception.InmutableDataException; import com.waes.jgu.exception.InvalidDataException; import com.waes.jgu.util.Utils; import lombok.extern.slf4j.Slf4j; /** * Implementation of the {@code DiffService} interface. * * Executes operations to store and compare left and right attributes of an {@code EntryData} against a dummy {@code java.util.List} * * @author <NAME> jdgutierrezj */ @Slf4j public class DiffDummyServiceImpl implements DiffService { /** * Dummy DataBase * */ private List<EntryData> dummyDB = new ArrayList<EntryData>(); /** * @inheritDoc * * Execute the operation against a dummy Java Collection * */ @Override public EntryData saveData(String id, Side side, String base64Data) throws InmutableDataException, InvalidDataException { Utils.checkBase64Data(base64Data); log.info("Size " + dummyDB.size()); EntryData entry = dummyDB .stream() .filter(data -> id.equals(data.getId())) .findAny() .orElse(null); if(null != entry) { if(Side.LEFT.equals(side)) { if(null != entry.getLeft() && !"".equals(entry.getLeft().trim())) { throw new InmutableDataException(String.format("The %s side of the comparison was already received", side)); } entry.setLeft(base64Data); } if(Side.RIGHT.equals(side)) { if(null != entry.getRight() && !"".equals(entry.getRight().trim())) { throw new InmutableDataException(String.format("The %s side of the comparison was already received", side)); } entry.setRight(base64Data); } return entry; } else { entry = new EntryData(id); if(Side.LEFT.equals(side)) { entry.setLeft(base64Data); } if(Side.RIGHT.equals(side)) { entry.setRight(base64Data); } dummyDB.add(entry); return entry; } } /** * @inheritDoc * * Execute the operation against a dummy Java Collection * */ @Override public DiffResponse getDiff(String id) throws EntryNotFoundException, EntryIncompleteException { EntryData entry = dummyDB .stream() .filter(data -> id.equals(data.getId())) .findAny() .orElse(null); return Utils.compare(entry, id); } }
/** * Return true if this link has expired, ie. a different one exists at a later time than this that is in the past. */ boolean expired(DB db) throws SQLException { db.executeQuery("SELECT count(*) AS count FROM nodelinks WHERE nlid<>"+nlid+" AND nid="+nid+" AND starttime>'"+starttime+"' AND starttime<"+db.now()); db.rs.next(); return (db.rs.getInt("count")>0); }
package mylib2 import "strings" func repeatString(s string, n int) string { var pieces = make([]string, n) for i := range pieces { pieces[i] = s } const sep = "" return strings.Join(pieces, sep) }
Now I know we didn’t have the best relationship between our two sites, but I’d like to thank you for your service to the community nevertheless. I know not only myself, but many others have benefited from your site at one time or another. We used to use it as our primary streaming mirror (much to your chagrin) after all. It was great. I never received any complaints. It was always fast and reliable, regardless of your location. Raising money for a cause is hard, and at times you may feel as though you’re alone in making an effort to keep it afloat. A time eventually comes when the costs no longer outweigh the benefit, and it appears that time has finally come for Arkvid. It is with a heavy heart that I bid the site adieu. Thank you Zach and the rest of your team for all your hard work these two years. Justin
/* delete node "wn" from the block (which may be NULL). * adjust the next, previous, and block first and last pointers */ extern void LWN_Delete_From_Block(WN *block, WN* wn) { WN *node; WN* parent_wn; Is_True (wn, ("LWN_DeleteFromBlock: deleting a NULL node")); Is_True (((!block) || (WN_opcode(block) == OPC_BLOCK)), ("LWN_DeleteFromBlock: Expecting a BLOCK node")); parent_wn = LWN_Get_Parent(wn); Is_True (((!block) || (block == parent_wn)), ("LWN_DeleteFromBlock: block is not the parent")); Is_True (OPCODE_is_stmt(WN_opcode(wn)) || OPCODE_is_scf(WN_opcode(wn)), ("LWN_DeleteFromBlock: Expecting a SCF or a stmt node")); node = WN_first(parent_wn); while (node) { if (node == wn) break; node = WN_next (node); } FmtAssert (node != NULL, ("LWN_DeleteFromBlock: could not find node to delete")); if (WN_first(parent_wn) == wn && WN_last(parent_wn) == wn) { WN_first(parent_wn)=WN_last(parent_wn)=NULL; } else if (WN_first(parent_wn) == wn) { WN_first(parent_wn) = WN_next(wn); WN_prev(WN_first(parent_wn)) = NULL; } else if (WN_last(parent_wn) == wn) { WN_last(parent_wn) = WN_prev(wn); WN_next(WN_last(parent_wn)) = NULL; } else { WN_next(WN_prev(wn)) = WN_next(wn); WN_prev(WN_next(wn)) = WN_prev(wn); } WN_Delete(wn); }
/** * JUnit4 {@link org.junit.Rule} which executes test with real (in-memory) compiler and custom immutables processor. * It gives access to {@link Elements} and {@link Types} as well as {@link ValueType} (for a given class). * * <p>A much better alternative to mocking {@code javax.lang.model} classes. * * <h3>Usage example</h3> * <p> * <pre> * {@code * @Rule * public final ProcessorRule rule = new ProcessorRule(); * * @ProcessorRule.TestImmutable * interface MyClass { * String attr(); * } * * @Test * public void basic() { * ValueType type = rule.value(MyClass.class); * check(type.attributes.get(0).name()).is("attr"); * } * } * </pre> * </p> * <p>TODO: This rule has to be migrated to JUnit5 as <a href="https://junit.org/junit5/docs/current/user-guide/#extensions">extension</a>. * For some examples see: * <ol> * <li><a href="https://github.com/Kiskae/compile-testing-extension">compile testing extension</a></li> * <li><a href="https://github.com/google/compile-testing/pull/155">Add JUnit5 implementation of CompilationRule PR</a></li> * </ol> * </p> */ public class ProcessorRule implements TestRule { private static final Class<?> DEFAULT_ANNOTATION_CLASS = TestImmutable.class; /** * Simple "file" to trigger compilation. */ private static final JavaFileObject EMPTY = JavaFileObjects.forSourceLines("Empty", "final class Empty {}"); private final ValueTypeComposer composer = new ValueTypeComposer(); private Elements elements; private Types types; private Round round; /** * Annotation to be used instead of {@literal @}{@code Value.Immutable}. Avoids generating * unnecessary classes. */ public @interface TestImmutable {} /** * Returns {@link Elements} instance associated with the current execution. * @throws IllegalStateException if invoked outside the rule. */ public Elements elements() { Preconditions.checkState(elements != null, "not running as part of %s", ProcessorRule.class.getSimpleName()); return elements; } /** * Returns {@link Types} instance associated with the current execution. * @throws IllegalStateException if invoked outside the rule. */ public Types types() { Preconditions.checkState(types != null, "not running as part of %s", ProcessorRule.class.getSimpleName()); return types; } /** * Return single {@link ValueType} instance associated with immutable class {@code type}. * @throws IllegalArgumentException if multiple {@link ValueType}s are associated with {@code type} */ public ValueType value(Class<?> type) { final List<ValueType> values = values(type); if (values.size() != 1) { throw new IllegalArgumentException(String.format("Expected 1 values but got %d for %s", values.size(), type)); } return values.get(0); } /** * Return multiple {@link ValueType} instances associated with immutable class {@code type}. Multiple * instances can be due to several nested immutable classes. */ public List<ValueType> values(Class<?> type) { Preconditions.checkNotNull(type, "type"); Preconditions.checkState(round != null, "not running as part of %s", ProcessorRule.class.getSimpleName()); final TypeElement element = elements().getTypeElement(type.getCanonicalName()); final ImmutableList<Proto.Protoclass> protos = round.protoclassesFrom(Collections.singleton(element)); final List<ValueType> values = new ArrayList<>(); for (Proto.Protoclass proto: protos) { final ValueType value = new ValueType(); composer.compose(value, proto); values.add(value); } return ImmutableList.copyOf(values); } @Override public Statement apply(final Statement base, final Description description) { return new Statement() { @Override public void evaluate() throws Throwable { final LocalProcessor processor = new LocalProcessor(base); final Compilation compilation = Compiler.javac().withProcessors(processor).compile(EMPTY); if (!compilation.status().equals(Compilation.Status.SUCCESS)) { throw new AssertionError(String.format("Compilation failed (status:%s): %s", compilation.status(), compilation.diagnostics())); } if (!processor.wasEvaluated) { throw new AssertionError(String.format("%s was not evaluated. Check that annotation processor %s was triggered " + "(eg. %s annotation is correctly registered)", description.getDisplayName(), processor.getClass().getSimpleName(), DEFAULT_ANNOTATION_CLASS.getCanonicalName())); } processor.rethrowIfError(); } }; } /** * Simple annotation processor which saves environment information. It is then used by this rule * to instantiate internal classes like {@link Round} which gives access to {@link ValueType}. */ private class LocalProcessor extends AbstractGenerator { private final Statement statement; private final Class<?> annotation = DEFAULT_ANNOTATION_CLASS; // saved exception which is potentially rethrown after compilation phase private Throwable thrown; /** * Flag to track if test statement was executed or not. Fail fast if annotation processor * was not triggered. IE detected false (no-op) test executions */ private boolean wasEvaluated; private LocalProcessor(Statement statement) { this.statement = statement; } @Override public synchronized void init(ProcessingEnvironment processingEnv) { super.init(processingEnv); elements = processingEnv.getElementUtils(); types = processingEnv.getTypeUtils(); } @Override public Set<String> getSupportedAnnotationTypes() { // for some reason annotation.getCanonicalName() is not found // using wildcard here return Collections.singleton("*"); } @Override protected void process() { round = ImmutableRound.builder() .processing(processing()) .round(round()) .addCustomImmutableAnnotations(annotation.getCanonicalName()) .build(); try { statement.evaluate(); } catch (Throwable e) { // means test failed thrown = e; } // mark that statement was executed wasEvaluated = true; } void rethrowIfError() throws Throwable { if (thrown != null) { throw thrown; } } } }
/** * Check whether the given armor is leggings of any material. * * @param type * The material describing the armor. * @return <code>true</code>, if the armor is an leggings. */ public static boolean isLeggings(final Material type) { if (type == null) { return false; } switch (type) { case LEATHER_LEGGINGS: case CHAINMAIL_LEGGINGS: case IRON_LEGGINGS: case GOLD_LEGGINGS: case DIAMOND_LEGGINGS: return true; default: return false; } }
/** * @author <a href="mailto:[email protected]">Stian Thorgersen</a> */ abstract class ClientRepresentationMixIn { @JsonIgnore String registrationAccessToken; }
<gh_stars>0 import java.awt.*; public class CardClass extends ShapeClass { //Declare encapsulated data private int suitValue = 1; private int faceValue = 1; private Color suitColor = Color.red; private boolean faceUp = true; //Constructor method public CardClass () { setCentre (320, 250); setHeight (100); setWidth (70); } //Sets the card's suit value to the specified value (set method) //Also sets the color accordingly //ie. If the user sets the suit to a heart, then the suit color is automatically set to red public void setSuitValue (int input) { suitValue = input; //If the suit is a heart or diamond, set the suit's color to red if (suitValue == 1 || suitValue == 2) { suitColor = Color.red; } //If the suit is a spade or a club, set the suit's color to black else if (suitValue == 3 || suitValue == 4) { suitColor = Color.black; } } //Returns the card's suit value public int getSuitValue () { return suitValue; } //Sets the card's face value to the specified value public void setFaceValue (int input) { faceValue = input; } //Returns the card's face value public int getFaceValue () { return faceValue; } //Sets the card's size accordingly to one of 4 inputs public void setCardSize (int input) { if (input == 1) { setHeight (60); setWidth (42); } else if (input == 2) { setHeight (80); setWidth (56); } else if (input == 3) { setHeight (100); setWidth (70); } else //4,etc. { setHeight (120); setWidth (84); } } //Set the card face up or face down public void setFaceUp (boolean input) { faceUp = input; } //Returns the card's faceUp value public boolean getFaceUp () { return faceUp; } //Converts the card's suit value to a string and returns it private String SuitValueToString () { if (faceValue == 1) { return "A"; } else if (faceValue == 11) { return "J"; } else if (faceValue == 12) { return "Q"; } else if (faceValue == 13) { return "K"; } else if (faceValue >= 2 && faceValue <= 10) { return Integer.toString (faceValue); } else { return ""; } } //Card's draw method public void draw (Graphics g) { if (faceUp == true) //If the card's faceUp value is set to true, the program will draw the card { g.setColor (Color.white); g.fillRect (getCentreX () - getWidth () / 2, getCentreY () - getHeight () / 2, getWidth () + 1, getHeight () + 1); g.setColor (Color.black); g.drawRect (getCentreX () - getWidth () / 2, getCentreY () - getHeight () / 2, getWidth (), getHeight ()); g.setColor (suitColor); //Determine's what suit to draw based off of the encapsulated data suitValue if (suitValue == 1) { HeartClass heart = new HeartClass (); heart.setHeight ((int) getHeight () / 4); heart.setCentre (getCentreX (), getCentreY ()); heart.setColor (suitColor); heart.draw (g); } else if (suitValue == 2) { DiamondClass diamond = new DiamondClass (); diamond.setHeight ((int) getHeight () / 4); diamond.setCentre (getCentreX (), getCentreY ()); diamond.setColor (suitColor); diamond.draw (g); } else if (suitValue == 3) { SpadeClass spade = new SpadeClass (); spade.setHeight ((int) getHeight () / 4); spade.setCentre (getCentreX (), getCentreY ()); spade.setColor (suitColor); spade.draw (g); } else if (suitValue == 4) { ClubClass club = new ClubClass (); club.setHeight ((int) getHeight () / 4); club.setCentre (getCentreX (), getCentreY ()); club.setColor (suitColor); club.draw (g); } //Draws the face value of the card in the top left corner g.setFont (new Font ("SanSerif", Font.BOLD, (getHeight () / 4))); g.drawString (SuitValueToString (), getCentreX () - (int) (getWidth () * 0.4), getCentreY () - (int) (getHeight () / 4)); } else //If the card's faceUp value is set to false, the program will draw a blue face down card { g.setColor (Color.blue); g.fillRect (getCentreX () - getWidth () / 2, getCentreY () - getHeight () / 2, getWidth (), getHeight ()); g.setColor (Color.black); g.drawRect (getCentreX () - getWidth () / 2, getCentreY () - getHeight () / 2, getWidth (), getHeight ()); } } //Overrides the erase method in ShapeClass //Erases the card by drawing a white rectangle over displayed card public void erase (Graphics g) { g.setColor (Color.white); g.fillRect (getCentreX () - getWidth () / 2, getCentreY () - getHeight () / 2, getWidth () + 1, getHeight () + 1); } }
<gh_stars>1-10 #ifndef DTSegment_DTRecSegment2DAlgoFactory_h #define DTSegment_DTRecSegment2DAlgoFactory_h /** \class DTRecSegment2DAlgoFactory * * Factory of seal plugins for DT 2D segments reconstruction algorithms. * The plugins are concrete implementations of DTRecSegment2DBaseAlgo base class. * * \author <NAME> - INFN Legnaro <<EMAIL>> * */ /* Base Class Headers */ #include "FWCore/PluginManager/interface/PluginFactory.h" #include "RecoLocalMuon/DTSegment/src/DTRecSegment2DBaseAlgo.h" /* Collaborating Class Declarations */ /* C++ Headers */ /* ====================================================================== */ /* Class DTRecSegment2DAlgoFactory Interface */ typedef edmplugin::PluginFactory<DTRecSegment2DBaseAlgo *(const edm::ParameterSet &)> DTRecSegment2DAlgoFactory; #endif // DTSegment_DTRecSegment2DAlgoFactory_h
/* * Copyright (c) 2018-2020, <NAME> <<EMAIL>> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "Devices/PATADiskDevice.h" #include "KSyms.h" #include "Process.h" #include "RTC.h" #include "Scheduler.h" #include <AK/Types.h> #include <Kernel/ACPI/ACPIDynamicParser.h> #include <Kernel/ACPI/ACPIStaticParser.h> #include <Kernel/ACPI/DMIDecoder.h> #include <Kernel/ACPI/MultiProcessorParser.h> #include <Kernel/Arch/i386/CPU.h> #include <Kernel/CMOS.h> #include <Kernel/Devices/BXVGADevice.h> #include <Kernel/Devices/DebugLogDevice.h> #include <Kernel/Devices/DiskPartition.h> #include <Kernel/Devices/EBRPartitionTable.h> #include <Kernel/Devices/FloppyDiskDevice.h> #include <Kernel/Devices/FullDevice.h> #include <Kernel/Devices/GPTPartitionTable.h> #include <Kernel/Devices/KeyboardDevice.h> #include <Kernel/Devices/MBRPartitionTable.h> #include <Kernel/Devices/MBVGADevice.h> #include <Kernel/Devices/NullDevice.h> #include <Kernel/Devices/PATAChannel.h> #include <Kernel/Devices/PIT.h> #include <Kernel/Devices/PS2MouseDevice.h> #include <Kernel/Devices/RandomDevice.h> #include <Kernel/Devices/SB16.h> #include <Kernel/Devices/SerialDevice.h> #include <Kernel/Devices/VMWareBackdoor.h> #include <Kernel/Devices/ZeroDevice.h> #include <Kernel/FileSystem/Ext2FileSystem.h> #include <Kernel/FileSystem/VirtualFileSystem.h> #include <Kernel/Heap/SlabAllocator.h> #include <Kernel/Heap/kmalloc.h> #include <Kernel/Interrupts/APIC.h> #include <Kernel/Interrupts/InterruptManagement.h> #include <Kernel/Interrupts/PIC.h> #include <Kernel/KParams.h> #include <Kernel/Multiboot.h> #include <Kernel/Net/LoopbackAdapter.h> #include <Kernel/Net/NetworkTask.h> #include <Kernel/PCI/Access.h> #include <Kernel/PCI/Initializer.h> #include <Kernel/Random.h> #include <Kernel/TTY/PTYMultiplexer.h> #include <Kernel/TTY/VirtualConsole.h> #include <Kernel/VM/MemoryManager.h> // Defined in the linker script typedef void (*ctor_func_t)(); extern ctor_func_t start_ctors; extern ctor_func_t end_ctors; extern u32 __stack_chk_guard; u32 __stack_chk_guard; namespace Kernel { [[noreturn]] static void init_stage2(); static void setup_serial_debug(); static void setup_acpi(); static void setup_vmmouse(); static void setup_pci(); static void setup_interrupts(); VirtualConsole* tty0; extern "C" [[noreturn]] void init() { setup_serial_debug(); cpu_setup(); kmalloc_init(); slab_alloc_init(); new KParams(String(reinterpret_cast<const char*>(low_physical_to_virtual(multiboot_info_ptr->cmdline)))); MemoryManager::initialize(); bool text_debug = KParams::the().has("text_debug"); gdt_init(); idt_init(); setup_interrupts(); setup_acpi(); new VFS; new DebugLogDevice; new Console; klog() << "Starting SerenityOS..."; __stack_chk_guard = get_good_random<u32>(); PIT::initialize(); RTC::initialize(); // call global constructors after gtd and itd init for (ctor_func_t* ctor = &start_ctors; ctor < &end_ctors; ctor++) (*ctor)(); new KeyboardDevice; new PS2MouseDevice; setup_vmmouse(); new SB16; new NullDevice; if (!get_serial_debug()) new SerialDevice(SERIAL_COM1_ADDR, 64); new SerialDevice(SERIAL_COM2_ADDR, 65); new SerialDevice(SERIAL_COM3_ADDR, 66); new SerialDevice(SERIAL_COM4_ADDR, 67); VirtualConsole::initialize(); tty0 = new VirtualConsole(0, VirtualConsole::AdoptCurrentVGABuffer); new VirtualConsole(1); VirtualConsole::switch_to(0); // Sample test to see if the ACPI parser is working... klog() << "ACPI: HPET table @ " << ACPI::Parser::the().find_table("HPET"); setup_pci(); PIT::initialize(); if (text_debug) { dbg() << "Text mode enabled"; } else { if (multiboot_info_ptr->framebuffer_type == 1 || multiboot_info_ptr->framebuffer_type == 2) { new MBVGADevice( PhysicalAddress((u32)(multiboot_info_ptr->framebuffer_addr)), multiboot_info_ptr->framebuffer_pitch, multiboot_info_ptr->framebuffer_width, multiboot_info_ptr->framebuffer_height); } else { new BXVGADevice; } } LoopbackAdapter::the(); Process::initialize(); Thread::initialize(); Thread* init_stage2_thread = nullptr; Process::create_kernel_process(init_stage2_thread, "init_stage2", init_stage2); Thread* syncd_thread = nullptr; Process::create_kernel_process(syncd_thread, "syncd", [] { for (;;) { VFS::the().sync(); Thread::current->sleep(1 * TICKS_PER_SECOND); } }); Process::create_kernel_process(g_finalizer, "Finalizer", [] { Thread::current->set_priority(THREAD_PRIORITY_LOW); for (;;) { { InterruptDisabler disabler; if (!g_finalizer_has_work) Thread::current->wait_on(*g_finalizer_wait_queue); ASSERT(g_finalizer_has_work); g_finalizer_has_work = false; } Thread::finalize_dying_threads(); } }); Scheduler::pick_next(); sti(); Scheduler::idle_loop(); ASSERT_NOT_REACHED(); } void init_stage2() { Syscall::initialize(); new ZeroDevice; new FullDevice; new RandomDevice; new PTYMultiplexer; bool dmi_unreliable = KParams::the().has("dmi_unreliable"); if (dmi_unreliable) { DMIDecoder::initialize_untrusted(); } else { DMIDecoder::initialize(); } bool text_debug = KParams::the().has("text_debug"); bool force_pio = KParams::the().has("force_pio"); auto root = KParams::the().get("root"); if (root.is_empty()) { root = "/dev/hda"; } if (!root.starts_with("/dev/hda")) { klog() << "init_stage2: root filesystem must be on the first IDE hard drive (/dev/hda)"; hang(); } auto pata0 = PATAChannel::create(PATAChannel::ChannelType::Primary, force_pio); NonnullRefPtr<BlockDevice> root_dev = *pata0->master_device(); root = root.substring(strlen("/dev/hda"), root.length() - strlen("/dev/hda")); if (root.length()) { bool ok; unsigned partition_number = root.to_uint(ok); if (!ok) { klog() << "init_stage2: couldn't parse partition number from root kernel parameter"; hang(); } MBRPartitionTable mbr(root_dev); if (!mbr.initialize()) { klog() << "init_stage2: couldn't read MBR from disk"; hang(); } if (mbr.is_protective_mbr()) { dbg() << "GPT Partitioned Storage Detected!"; GPTPartitionTable gpt(root_dev); if (!gpt.initialize()) { klog() << "init_stage2: couldn't read GPT from disk"; hang(); } auto partition = gpt.partition(partition_number); if (!partition) { klog() << "init_stage2: couldn't get partition " << partition_number; hang(); } root_dev = *partition; } else { dbg() << "MBR Partitioned Storage Detected!"; if (mbr.contains_ebr()) { EBRPartitionTable ebr(root_dev); if (!ebr.initialize()) { klog() << "init_stage2: couldn't read EBR from disk"; hang(); } auto partition = ebr.partition(partition_number); if (!partition) { klog() << "init_stage2: couldn't get partition " << partition_number; hang(); } root_dev = *partition; } else { if (partition_number < 1 || partition_number > 4) { klog() << "init_stage2: invalid partition number " << partition_number << "; expected 1 to 4"; hang(); } auto partition = mbr.partition(partition_number); if (!partition) { klog() << "init_stage2: couldn't get partition " << partition_number; hang(); } root_dev = *partition; } } } auto e2fs = Ext2FS::create(root_dev); if (!e2fs->initialize()) { klog() << "init_stage2: couldn't open root filesystem"; hang(); } if (!VFS::the().mount_root(e2fs)) { klog() << "VFS::mount_root failed"; hang(); } Process::current->set_root_directory(VFS::the().root_custody()); dbg() << "Load ksyms"; load_ksyms(); dbg() << "Loaded ksyms"; // Now, detect whether or not there are actually any floppy disks attached to the system u8 detect = CMOS::read(0x10); RefPtr<FloppyDiskDevice> fd0; RefPtr<FloppyDiskDevice> fd1; if ((detect >> 4) & 0x4) { fd0 = FloppyDiskDevice::create(FloppyDiskDevice::DriveType::Master); klog() << "fd0 is 1.44MB floppy drive"; } else { klog() << "fd0 type unsupported! Type == 0x", String::format("%x", detect >> 4); } if (detect & 0x0f) { fd1 = FloppyDiskDevice::create(FloppyDiskDevice::DriveType::Slave); klog() << "fd1 is 1.44MB floppy drive"; } else { klog() << "fd1 type unsupported! Type == 0x", String::format("%x", detect & 0x0f); } int error; // SystemServer will start WindowServer, which will be doing graphics. // From this point on we don't want to touch the VGA text terminal or // accept keyboard input. if (text_debug) { tty0->set_graphical(false); Thread* thread = nullptr; Process::create_user_process(thread, "/bin/Shell", (uid_t)0, (gid_t)0, (pid_t)0, error, {}, {}, tty0); if (error != 0) { klog() << "init_stage2: error spawning Shell: " << error; hang(); } thread->set_priority(THREAD_PRIORITY_HIGH); } else { tty0->set_graphical(true); Thread* thread = nullptr; Process::create_user_process(thread, "/bin/SystemServer", (uid_t)0, (gid_t)0, (pid_t)0, error, {}, {}, tty0); if (error != 0) { klog() << "init_stage2: error spawning SystemServer: " << error; hang(); } thread->set_priority(THREAD_PRIORITY_HIGH); } { Thread* thread = nullptr; Process::create_kernel_process(thread, "NetworkTask", NetworkTask_main); } Process::current->sys$exit(0); ASSERT_NOT_REACHED(); } void setup_serial_debug() { // this is only used one time, directly below here. we can't use this part // of libc at this point in the boot process, or we'd just pull strstr in // from <string.h>. auto bad_prefix_check = [](const char* str, const char* search) -> bool { while (*search) if (*search++ != *str++) return false; return true; }; // serial_debug will output all the klog() and dbg() data to COM1 at // 8-N-1 57600 baud. this is particularly useful for debugging the boot // process on live hardware. // // note: it must be the first option in the boot cmdline. u32 cmdline = low_physical_to_virtual(multiboot_info_ptr->cmdline); if (cmdline && bad_prefix_check(reinterpret_cast<const char*>(cmdline), "serial_debug")) set_serial_debug(true); } extern "C" { multiboot_info_t* multiboot_info_ptr; } // Define some Itanium C++ ABI methods to stop the linker from complaining // If we actually call these something has gone horribly wrong void* __dso_handle __attribute__((visibility("hidden"))); extern "C" int __cxa_atexit(void (*)(void*), void*, void*) { ASSERT_NOT_REACHED(); return 0; } void setup_acpi() { if (!KParams::the().has("acpi")) { ACPI::DynamicParser::initialize_without_rsdp(); return; } auto acpi = KParams::the().get("acpi"); if (acpi == "off") { ACPI::Parser::initialize_limited(); return; } if (acpi == "on") { ACPI::DynamicParser::initialize_without_rsdp(); return; } if (acpi == "limited") { ACPI::StaticParser::initialize_without_rsdp(); return; } klog() << "acpi boot argmuent has an invalid value."; hang(); } void setup_vmmouse() { VMWareBackdoor::initialize(); if (!KParams::the().has("vmmouse")) { VMWareBackdoor::the().enable_absolute_vmmouse(); return; } auto vmmouse = KParams::the().get("vmmouse"); if (vmmouse == "off") return; if (vmmouse == "on") { VMWareBackdoor::the().enable_absolute_vmmouse(); return; } klog() << "vmmouse boot argmuent has an invalid value."; hang(); } void setup_pci() { if (!KParams::the().has("pci_mmio")) { PCI::Initializer::the().test_and_initialize(false); PCI::Initializer::the().dismiss(); return; } auto pci_mmio = KParams::the().get("pci_mmio"); if (pci_mmio == "on") { PCI::Initializer::the().test_and_initialize(false); } else if (pci_mmio == "off") { PCI::Initializer::the().test_and_initialize(true); } else { klog() << "pci_mmio boot argmuent has an invalid value."; hang(); } PCI::Initializer::the().dismiss(); } void setup_interrupts() { InterruptManagement::initialize(); if (!KParams::the().has("smp")) { InterruptManagement::the().switch_to_pic_mode(); return; } auto smp = KParams::the().get("smp"); if (smp == "off") { InterruptManagement::the().switch_to_pic_mode(); return; } if (smp == "on") { ASSERT_NOT_REACHED(); // FIXME: The IOAPIC mode is not stable yet so we can't use it now. InterruptManagement::the().switch_to_ioapic_mode(); APIC::init(); APIC::enable_bsp(); return; } klog() << "smp boot argmuent has an invalid value."; hang(); } }
Glenn Jones is a graphic designer and illustrator from Auckland, New Zealand. ‘GLENN’ from ‘NZ’ = GLENNZ; which was his user name on the T-shirt design site Threadless.com. What started as a hobby quickly became a booming success, eventually leading to the launch of his own t-shirt site GLENNZ.com, where he now sells all of his designs. Below are 25 of my favourite illustrations, although he has 75+ designs currently on his site. As a t-shirt some of these would be fantastic but even as a standalone illustration or poster they are equally hilarious. Enjoy! 2. 3. 4. 5. ILLUSTRATIONS AND DESIGNS BY GLENNZ 6. 7. 8. 9. 10. ILLUSTRATIONS AND DESIGNS BY GLENNZ 11. 12. 13. 14. 15. ILLUSTRATIONS AND DESIGNS BY GLENNZ 16. 17. 18. 19. 20. ILLUSTRATIONS AND DESIGNS BY GLENNZ 21. 22. 23. 24. 25. SOURCES: – Glenn Jones @ Glennz.com – First Spotted on Behance If you enjoyed this article, the Sifter highly recommends: REBRANDING THE BP LOGO: THE 25 FUNNIEST AND MOST CREATIVE
<filename>nerts-bot/src/lobbyinfo.rs use steamworks::{LobbyId, SteamId}; use crate::Bot; #[derive(Debug, Clone, Copy)] pub enum LobbyInfo { SteamLobby(LobbyId), FriendLobby(SteamId, LobbyId), } impl LobbyInfo { pub fn lobby_id(&self) -> LobbyId { match self { LobbyInfo::SteamLobby(id) => *id, LobbyInfo::FriendLobby(_, id) => *id, } } pub fn member_count(&self, bot: &Bot) -> usize { bot.client.matchmaking().lobby_member_count(self.lobby_id()) } pub fn member_limit(&self, bot: &Bot) -> Option<usize> { bot.client.matchmaking().lobby_member_limit(self.lobby_id()) } }
Robust Multivariable Estimation of the Relevant Information Coming from a Wheel Speed Sensor and an Accelerometer Embedded in a Car under Performance Tests In the present paper, in order to estimate the response of both a wheel speed sensor and an accelerometer placed in a car under performance tests, robust and optimal multivariable estimation techniques are used. In this case, the disturbances and noises corrupting the relevant information coming from the sensors' outputs are so dangerous that their negative influence on the electrical systems impoverish the general performance of the car. In short, the solution to this problem is a safety related problem that deserves our full attention. Therefore, in order to diminish the negative effects of the disturbances and noises on the car's electrical and electromechanical systems, an optimum observer is used. The experimental results show a satisfactory improvement in the signal-to-noise ratio of the relevant signals and demonstrate the importance of the fusion of several intelligent sensor design techniques when designing the intelligent sensors that today's cars need. Introduction Due to the continually growing need for better comfort and safety in today's cars, a revolutionary way of designing sensors, actuators, and electrical and electromechanical systems for the automotive industry has been set. As a matter of fact, researchers all around the world have been encouraged to work not only on the fabrication of sensors but also on the fabrication of intelligent systems consisting of sensors, signal conditioners and microprocessors, among other devices, that incorporate a certain amount of intelligence into the sensors themselves. What is more, these signal conditioning and signal processing stages added to the sensors transform such sensors into robust and optimal measuring systems that are able to work satisfactorily in polluted, corrupted environments where we have little or no prior knowledge of either the relevant signal coming from the sensors or the noise corrupting their electrical information. In short, one of the worst environments for sensors is that of the automotive industry. In the automotive industry sensors and electrical systems have to work under severe working conditions such as the endurance of high temperatures, high humidity, dangerous chemical attacks, undesirably strong vibrations, electromagnetic interferences, pollution, and so on . Therefore, with this scenario in mind, no one would dispute that the robustness, optimality, efficiency and reliability of a system have been, and will continue to be, crucial. The aim of the present paper is to design a robust and optimal multi-input multi-output system able to cancel noise and disturbances that corrupt the relevant information coming from both a wheel speed sensor and an accelerometer placed in a car under performance tests. In this paper, our system is linear and its equations are in state space notation, where the system state vector consists of the speed and the acceleration of the car. Section 2 concerns a general description of the principles of the sensors used in this paper. Section 3 concerns the design of the robust and optimal multi-input multi-output system. Section 4 concerns the results of the experiment. Section 5 concerns the conclusions. Principles In the industrial world, the most common design is based on a combination of Newton's law of mass acceleration and Hooke's law of spring action ( Fig. 1) . According to Johnson , if the seismic mass m, is undergoing an acceleration a, then there must be a force F acting on the mass and given by F 1 = m⋅a. In addition, the spring of spring constant k is extended (or stretched) from its equilibrium position for a distance ∆x with a force F 2 (opposite to F 1 ) acting on the spring and given by F 2 = k⋅∆x. This condition is described by equating Newton's and Hooke's laws. Thus, under steady-state accelerations, the measurement of acceleration is reduced to a measurement of spring extension (linear displacement) (Eq. (1)). x m In analyzing the transient response, we should take into account the friction associated with the seismic mass. Furthermore, if the system exhibits oscillations, its frequency of oscillation is calculated as in Eq. (2), where f N is the natural frequency (Eq. (3)) and ζ is the dimensionless damping ratio. In this paper, it is considered that the movement of the vehicle's center of gravity can be discarded. Thus, the accelerometer is situated at the vehicle's center of gravity. Types of accelerometers There is a wide variety of accelerometers that could be used in different applications depending on the requirements of range, natural frequency, damping, temperature, size, weight, hysteresis, low noise, and so on. Piezoelectric accelerometers, piezoresistive accelerometers, variable capacitance accelerometers, linear variable differential transformers (LVDT), variable reluctance accelerometers, potentiometric accelerometers, gyroscopes used for sensing acceleration, strain gauges accelerometers, among others, are a part of the variety of accelerometers. In this work, a variable capacitance accelerometer with range ± 2g (g = 9.81m/s 2 ) and sensitivity 985.6mV/g at 5Hz is used. Fig. 2 shows the frequency response of such a sensor (temperature 75ºF and humidity 57%). Wheel speed sensors The speed of rotation of the wheels is one of the most important inputs to the optimal braking system of the car. In addition, other uses of the information from the rotational speed of the car's wheel include: traction control, vehicle stability control, transmission control, engine management, chassis control, hill-holder brakes, rollback detection or electronic parking brakes, brake-force distribution and roll-over protection, among others. At this point, it is important to point out that in spite of the fact that many rotational speed sensors share similar characteristics, not all of them are suitable for use in the automotive industry . The reality is that angular motion sensors based on magnetic field sensing principles stand out because of their many inherent advantages and sensing benefits . In fact, angular motion sensors are the perfect choice across the whole automotive applications spectrum. In this paper, a proximity sensor held in a protective casing and mounted in a fixed position close to one of the wheels of the car was used as the wheel speed sensor. The proximity sensor is of the variable reluctance type and its coil is consists of a thin wire wounded around an insulating form and coupled to a permanent magnet. For the kind of tests carried out in this paper, this device was only used to measure the rotation of the wheels of the car; however, in the process-control industry this kind of sensor has many applications in measuring rotation, position and location. Principles When the proximity sensor detects the presence of any of the ferrous teeth of a toothed wheel, an output voltage is obtained (Fig. 3) because the ferrous teeth cross the magnetic field that is created in front of the sensor, causing a change in the resulting flow and producing an electromotive force in the coil. Thus, the output is an alternating signal whose frequency and amplitude are both proportional to the speed of rotation. A block diagram representing the measurement system is shown in Fig. 4. Considerations Due to the fact that proximity sensors are widely used in many industrial applications, it is important to say something about their advantages and disadvantages . On the one hand, they can be very small and we put embed them in places where other sensors may not fit. In addition, they are often sealed in protective cases and can be resistant to high temperatures and high pressures as well as chemical attacks. Other advantages are their reliability, the low maintenance required, and their low cost. On the other hand, they have to be placed very close to a suitable ferrous metal to produce an adequate output voltage. They also suffer from undesirable signals or noise. In fact, these sensors have a very low signal-to-noise ratio at automobile speeds lower than 5 km/h and give very corrupted and misleading information about the real speed of the car at speeds equal to or lower than the one previously mentioned. Such behavior is not appropriate for the braking performance. For this reason the anti-lock braking system (ABS) of most of today's cars is disconnected at the end of the braking process, and the car is finally braked but without the help of the electronic braking system. In summary, despite the fact that there are disadvantages, the variable reluctance proximity sensors seem to be the most suitable choice to measure the speed of rotation of motor car wheels in the ABS of today's automobiles. In this sense, these sensors play an important role in the optimal braking system of today's cars, which is a safety-related problem where effective and reliable performance has the highest priority. Introduction In the previous section both the accelerometer's and wheel speed sensor's principles were given. However, such sensors as shown in the fore mentioned section are not prepared to reject the undesirable effects of noise and disturbances corrupting their measurements. The reality is that the designer needs to deal with the consequences of the inevitable disturbances, noise and errors that cause sensor operations to deviate from their true value, which causes an undesirable degree of uncertainty in the measurements carried out by the sensors. In short, due to the fact that some parameters of the structure of the models are uncertain, the designer is faced with structured or parametric uncertainties. This type of uncertainty negatively affects parameters that define the dynamics of the process such as mass, damping, natural frequency, and so on. What is more, owing to the fact that the models are also in error because of missing dynamics, usually at high frequency, the designer is faced with neglected and not modeled dynamics uncertainties, which are present in every model of a real system. This section focuses on the application of linear quadratic Gaussian control and loop transfer recovery (LQG/LTR) control techniques to shape the multi-input multi-output loop transfer function of the system, so that both the accelerometer and the wheel speed sensor can succeed in dealing with the inevitable sources of the above mentioned uncertainties. At this point, it is important to point out that Kalman filtering and LQG/LTR control techniques are useful in applications in which we need to carry out the optimal observation of the state variables of feedback-controlled systems . In addition, the optimal and robust controller obtained as a result of applying the LQG/LTR control technique does not have a high computational burden. What is more, such a controller has good numerical properties, and good transient and tracking performance. Furthermore, the LQG/LTR controller has good disturbance rejection and robustness. On the other hand, LTR procedures have several disadvantages as well. For instance, their main disadvantage is that they are limited to minimum phase systems. In short, they should not be used to designing the controllers for non-minimum phase systems because the recovery procedures work by canceling the system zeros, which could lead to instability . However, the multi-input multi-output sensor presented in this paper is a minimum phase system, and the results of the application of the LQG/LTR control technique were satisfactory. Modeling of the sensors as a multi-input multi-output dynamic system The multi-input multi-output dynamic system is given by Eq. (4) and Eq. (5) In these equations, the plant under consideration is time-invariant and A, B 1 , C and D are constant matrices and B 2 is a constant vector. x(t) is the state vector, which consists of three components: the first component (that is, x 1 (t)) is the displacement; and the second and third components (that is, x 2 (t) and x 3 (t)) are the velocity and the acceleration, respectively. In this paper, the velocity and the acceleration are estimated by using a Kalman filter (optimum observer). In addition, it is assumed that the acceleration is a Wiener process . Furthermore, u(t) is the plant input and y(t) is the plant output. What is more, both u(t) and y(t) are vectors. w 1 (t) is white noise and represents process noise, and w 2 (t) is also white noise and represents extra process noise added directly to the control input. v(t) is white noise as well and represents measurement noise . Moreover, the process noise intensities have the same properties and are equal to W, which is in general a positive definite symmetric matrix. The measurement noise intensity is V, which is a positive definite symmetric matrix. A, B 1 , B 2 , C, D and E are given below (see Eq. (6) -Eq. (11)). The LQG/LTR method At this point, it is important to highlight that between the modern and post-modern control methods, the LQG/LTR control method is an outward one. It is focused on shaping the target feedback loop. However, there are others like the H ∞ optimal control method where the designer of the controller shapes both the sensitivity function and the complementary sensitivity function . According to Doyle and Stein , the idea behind the LQG/LTR method is a complex one and it could be summarized as follows: First of all, the attributes that the controller must have should be clearly established. To that end, the true design objectives should be expressed in mathematical terms as clear as possible. In short, a tradeoff should be established between what the designer wants and how the designer formulates it in mathematical terms. In this method either the optimum observer is adjusted so that the loop transfer function can have a gain margin of +∞ dB and a minimum phase margin of 60 0 for single-input single-output systems; or the optimum observer is adjusted so that the sensitivity function satisfies the condition that its maximum singular value is lower than or equal to 1. Secondly, a loop transfer function should be designed by using linear-quadratic optimal control methods so that the loop can have the desired margins mentioned above. In this step, the choice of the state-weighting matrix Q and the control-weighting matrix R depends on the kind of cost of the control that the designer wants to achieve and how the deviations of the state variables from the origin are going to be penalized. Consequently, a linear-quadratic optimal regulator (LQR) is obtained. Thirdly, a Kalman filter should be designed so that the characteristics of the loop transfer function obtained in this step are similar to the ones obtained with the LQR designed in the previous step. In short, according to an adjusting procedure the Kalman filter is designed and shaped. The intensity of the extra process noise added directly to the control input of the plant is gradually increased and the more the intensity is increased, the better the robustness properties of the loop transfer function. In other words, the more the intensity of such a fictitious noise approaches infinity, the more the loop transfer function of the system recovers its robustness properties. On the other hand, this ad-hoc design procedure of shaping the singular values of the loop transfer function could cause problems with unmodeled dynamics, because while it is carrying out the recovery of the robustness of the system, the gains get bigger step-by-step. This is why the recovery procedures are rarely carried out until their limits. Basically, the designers stop the process of recovering the robustness of the loop at the point where they consider that an acceptable design has been achieved. Design of the loop transfer function In accordance with the adopted approach in the previous subsections, Eq. (12) and Eq. (13) show the metasystem. where K C is the controller matrix gain and K O is the observer matrix gain. Furthermore, the procedure for the calculation of the previously mentioned matrices depends on solving the following equations where the matrices A g and B g are given by Eq. (18) and Eq. (19). At this point, it is important to point out that K C is a two by three matrix consisting of the optimum deterministic controller K LQR given by Eq. (15), which is obtained after solving for Π in the algebraic Riccati equation (ARE) Eq. (14) , and a unitary gain vector. In addition, K O is given by Eq. (17) and is obtained after solving for Θ in the ARE Eq. (16), where the parameter q is a scalar variable. Also, as q approaches infinity, the loop transfer function recovers asymptotically its robustness properties. Furthermore, in accordance with the separation theorem, the optimization of performance in the presence of disturbances was carried out ignoring the noise, achieving an optimum deterministic controller whose gain matrix is given by Eq. (24). Moreover, the optimization of the observer for estimating the state vector in the presence of white noise on the observation and white noise disturbances was carried out satisfactorily, achieving the Kalman filter gains given by Eq. (25) for q equal to 100. Basically, a trade-off was established between the noise rejection performance of the system and its robustness, resulting in the achievement of a satisfactory compromise between them by adjusting the parameter q. Such an adjustment process was carried out by increasing q step-by-step from zero up to the value given above. The results of the experiment The system designed in this research was implemented as an intelligent system embedded in a car under performance tests. To that end, a laptop computer and the National Instruments Data Acquisition Card DAQCard-700 were used along with an analogue signal conditioning circuit consisting of a general-purpose operational amplifier TL084 and a few resistors and capacitors reasonably situated. In addition, the sampling frequency was 500 Hz. Fig. 6 shows the information coming from the wheel speed sensor, after being conditioned, and Fig. 7 shows the information coming from the accelerometer during one of the performance tests. During this test, the car was at approximately 84 km/h when the driver hit on the brakes. Note that there is a high quantity of noise corrupting such information. The reality is that working with the information shown in Fig. 6 and Fig. 7 can lead to misleading conclusions. Such a corruption in the signals coming from both sensors justified the use of the robust estimator designed in this paper. Fig. 8 shows the power spectrum magnitude of the information coming from the sensors. Consequently, Fig. 9 and Fig. 10 show the result of the robust estimation process of the true signal from the corrupted information shown in Fig. 6 and Fig. 7, respectively. Note that the system based on the LQG/LTR regulator has considerably diminished the noise corrupting the relevant signals while leaving them practically unchanged from the engineering standpoint. Fig. 11 shows the power spectrum magnitude of the estimated speed and acceleration. It is important to point out the satisfactory reduction of the noise corrupting the important information. In Fig. 7 and Fig. 10 the ringing (or damped oscillation) shown at the end of both figures is the effect of the vertical movement, the pitch and the roll, yielded by the car when the driver hit on the brakes. The eigenfrequencies of this ringing lay between 1 Hz and 2 Hz. Their effects do not alter the experiment. Furthermore, the final acceleration read from the sensor should be zero because the car is stopped, but the accelerometer has a small offset that we usually cancel by using software techniques. Before moving on to the conclusions, it is important to point out that the satisfactory results obtained in this paper made a comparison between this estimator and any other unnecessary. If the random processes are white and Gaussian, the optimum observer will be optimum under any reasonable performance criterion. However, according to Friedland , the theoretical framework hardly exists for the treatment of anything but white noise. Even though the uncertainty in the parameters that define the dynamics of our process is not white noise, one practical way of dealing with this uncertainty is to assume that it is white noise, and the justification for this is the satisfactory improvement of the robustness of the resulting feedback controlled system. Furthermore, if we had used another estimator, the results of the experiment might not have been as good as the ones achieved in this paper. However, another way to solve the problem of estimation presented is to carry out the estimation of the acceleration of the car under performance tests and its speed by using two independent single-input single-output optimal filters. Nevertheless, this solution implies using more electronic devices, which increases the cost of the system and the number of operations that the microprocessor embedded in the car has to carry out. To sum up, despite there being several ways to carry out the estimation of the speed and the acceleration of a car under performance tests, the method presented here seems to be one of the most reliable, practical, inexpensive and efficient methods to estimate the above dynamic variables in today's cars. Conclusions To conclude, in this paper a robust and optimal estimator for a multi-input multi-output sensor system was designed and tested under laboratory conditions. Here, the noise corrupting the electrical information coming from both a wheel speed sensor of a car under performance tests and its acceleration was diminished by using robust estimation techniques. In addition, the two-input two-output dynamic system and the optimum observer were shaped in the sense that the system was robust against modeled and unmodeled uncertainties. For this reason, the robust estimator designed here can work satisfactorily in environments where the working conditions are severe, for example, in the automotive industry. The results show that despite the signals coming from real physical systems are corrupted by noise and interferences, and that the behavior of such systems is affected negatively by undesirable disturbances, the use of robust and optimal control techniques can bring satisfactory results. Furthermore, it is important to stress that the system designed in this paper is easy to implement and was built by using low-cost components, which makes the use of this technology affordable for car manufacturers. Moreover, as the loop transfer function resulting from the method presented was shaped to deal with models whose parameters have a certain degree of uncertainty, the system does not need expensive sensors to work satisfactorily. Actually, the cost of the electronic devices is a very important factor to be taken into consideration when building non-luxury cars. Last but not least, the use of this paper's design method can bridge the gap between intelligent control methods and the design of sensors and actuators for a wide range of applications. The reality is that only by the fusion of these concepts can the designer find the way clear to build the intelligent sensors that today's cars need.
def flare_ensemble(config, simulation_period, N, out_dir, file_suffix=""): for i in range(0, int(N)): instance_out_dir = "%s/%s" % (out_dir, i) flare_local(config, simulation_period, instance_out_dir, file_suffix=file_suffix)
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright 2017 Google, Inc */ #include <common.h> #include <dm.h> #include <errno.h> #include <log.h> #include <wdt.h> #include <asm/io.h> #include <asm/arch/wdt.h> #include <linux/err.h> #define WDT_AST2500 2500 #define WDT_AST2400 2400 struct ast_wdt_priv { struct ast_wdt *regs; }; static int ast_wdt_start(struct udevice *dev, u64 timeout, ulong flags) { struct ast_wdt_priv *priv = dev_get_priv(dev); ulong driver_data = dev_get_driver_data(dev); u32 reset_mode = ast_reset_mode_from_flags(flags); /* 32 bits at 1MHz is 4294967ms */ timeout = min_t(u64, timeout, 4294967); /* WDT counts in ticks of 1MHz clock. 1ms / 1e3 * 1e6 */ timeout *= 1000; clrsetbits_le32(&priv->regs->ctrl, WDT_CTRL_RESET_MASK << WDT_CTRL_RESET_MODE_SHIFT, reset_mode << WDT_CTRL_RESET_MODE_SHIFT); if (driver_data >= WDT_AST2500 && reset_mode == WDT_CTRL_RESET_SOC) writel(ast_reset_mask_from_flags(flags), &priv->regs->reset_mask); writel((u32) timeout, &priv->regs->counter_reload_val); writel(WDT_COUNTER_RESTART_VAL, &priv->regs->counter_restart); /* * Setting CLK1MHZ bit is just for compatibility with ast2400 part. * On ast2500 watchdog timer clock is fixed at 1MHz and the bit is * read-only */ setbits_le32(&priv->regs->ctrl, WDT_CTRL_EN | WDT_CTRL_RESET | WDT_CTRL_CLK1MHZ); return 0; } static int ast_wdt_stop(struct udevice *dev) { struct ast_wdt_priv *priv = dev_get_priv(dev); clrbits_le32(&priv->regs->ctrl, WDT_CTRL_EN); writel(WDT_RESET_DEFAULT, &priv->regs->reset_mask); return 0; } static int ast_wdt_reset(struct udevice *dev) { struct ast_wdt_priv *priv = dev_get_priv(dev); writel(WDT_COUNTER_RESTART_VAL, &priv->regs->counter_restart); return 0; } static int ast_wdt_expire_now(struct udevice *dev, ulong flags) { struct ast_wdt_priv *priv = dev_get_priv(dev); int ret; ret = ast_wdt_start(dev, 1, flags); if (ret) return ret; while (readl(&priv->regs->ctrl) & WDT_CTRL_EN) ; return ast_wdt_stop(dev); } static int ast_wdt_of_to_plat(struct udevice *dev) { struct ast_wdt_priv *priv = dev_get_priv(dev); priv->regs = dev_read_addr_ptr(dev); if (!priv->regs) return -EINVAL; return 0; } static const struct wdt_ops ast_wdt_ops = { .start = ast_wdt_start, .reset = ast_wdt_reset, .stop = ast_wdt_stop, .expire_now = ast_wdt_expire_now, }; static const struct udevice_id ast_wdt_ids[] = { { .compatible = "aspeed,wdt", .data = WDT_AST2500 }, { .compatible = "aspeed,ast2500-wdt", .data = WDT_AST2500 }, { .compatible = "aspeed,ast2400-wdt", .data = WDT_AST2400 }, {} }; static int ast_wdt_probe(struct udevice *dev) { debug("%s() wdt%u\n", __func__, dev_seq(dev)); ast_wdt_stop(dev); return 0; } U_BOOT_DRIVER(ast_wdt) = { .name = "ast_wdt", .id = UCLASS_WDT, .of_match = ast_wdt_ids, .probe = ast_wdt_probe, .priv_auto = sizeof(struct ast_wdt_priv), .of_to_plat = ast_wdt_of_to_plat, .ops = &ast_wdt_ops, };
/** * Generate the packing instruction suitable for modifying the zone associated * with the given user. * * @param userName * {@code String} with the iRODS user name. * @param zone * {@code String} with the user's zone. * @return {@link GeneralAdminInp} * @throws JargonException * as iRODS exception */ public static GeneralAdminInp instanceForModifyUserZone(final String userName, final String zone) throws JargonException { if (userName == null || userName.isEmpty()) { throw new JargonException("user name is null or empty"); } if (zone == null) { throw new JargonException("zone is null"); } return new GeneralAdminInp("modify", "user", userName, "zone", zone, BLANK, BLANK, BLANK, BLANK, BLANK, GEN_ADMIN_INP_API_NBR); }
#include <bits/stdc++.h> #define ff first #define ss second #define endl '\n' #define INF 1e9 using namespace std; using ll = long long; using pii = pair<int,int>; using vi = vector<int>; int main() { ios::sync_with_stdio(false); cin.tie(NULL); double x,y; cin >> x >> y; double k = floor(sqrt(x*x+y*y)); int d = ceil(sqrt(x*x+y*y)); if(d == k) cout << "black" << endl; else if(x >= 0 && y >= 0){ //cout << d << endl; if(d%2) cout << "black" << endl; else cout << "white" << endl; } else if(x >= 0 && y <= 0){ if(d%2){ cout << "white" << endl; }else{ cout << "black" << endl; } } else if(x <= 0 && y <= 0){ if(d%2) cout << "black" << endl; else cout << "white" << endl; } else{ if(d%2) cout << "white" << endl; else cout << "black" << endl; } }
#include<iostream> #include<cstdio> using namespace std; int main() { long long s, m, p, co=0, ex=0; // cin>>s>>m>>p; scanf("%lld %lld %lld", &s, &m, &p); co+=s/p; s=s%p; co+=m/p; m=m%p; if((m+s)/p>=1) { co+=(m+s)/p; if(s>m) ex=((s+m)-((s+m)%p))-s; else ex=((m+s)-((m+s)%p))-m ; } printf("%lld %lld\n", co, ex); return 0; }
<gh_stars>0 // // Created by <NAME> on 28.02.17. // #include <llvm/IR/DerivedTypes.h> #include <llvm/Support/raw_ostream.h> #include "type.h" #include "function-type.h" #include "pointer-type.h" #include "array-type.h" #include "struct-type.h" NAN_MODULE_INIT(TypeWrapper::Init) { auto type = Nan::GetFunction(Nan::New(typeTemplate())).ToLocalChecked(); auto typeIds = Nan::New<v8::Object>(); Nan::Set(typeIds, Nan::New("VoidTyID").ToLocalChecked(), Nan::New(llvm::Type::TypeID::VoidTyID)); Nan::Set(typeIds, Nan::New("HalfTyID").ToLocalChecked(), Nan::New(llvm::Type::TypeID::HalfTyID)); Nan::Set(typeIds, Nan::New("FloatTyID").ToLocalChecked(), Nan::New(llvm::Type::TypeID::FloatTyID)); Nan::Set(typeIds, Nan::New("DoubleTyID").ToLocalChecked(), Nan::New(llvm::Type::TypeID::DoubleTyID)); Nan::Set(typeIds, Nan::New("X86_FP80TyID").ToLocalChecked(), Nan::New(llvm::Type::TypeID::X86_FP80TyID)); Nan::Set(typeIds, Nan::New("FP128TyID").ToLocalChecked(), Nan::New(llvm::Type::TypeID::FP128TyID)); Nan::Set(typeIds, Nan::New("PPC_FP128TyID").ToLocalChecked(), Nan::New(llvm::Type::TypeID::PPC_FP128TyID)); Nan::Set(typeIds, Nan::New("LabelTyID").ToLocalChecked(), Nan::New(llvm::Type::TypeID::LabelTyID)); Nan::Set(typeIds, Nan::New("MetadataTyID").ToLocalChecked(), Nan::New(llvm::Type::TypeID::MetadataTyID)); Nan::Set(typeIds, Nan::New("X86_MMXTyID").ToLocalChecked(), Nan::New(llvm::Type::TypeID::X86_MMXTyID)); Nan::Set(typeIds, Nan::New("TokenTyID").ToLocalChecked(), Nan::New(llvm::Type::TypeID::TokenTyID)); Nan::Set(typeIds, Nan::New("IntegerTyID").ToLocalChecked(), Nan::New(llvm::Type::TypeID::IntegerTyID)); Nan::Set(typeIds, Nan::New("FunctionTyID").ToLocalChecked(), Nan::New(llvm::Type::TypeID::FunctionTyID)); Nan::Set(typeIds, Nan::New("StructTyID").ToLocalChecked(), Nan::New(llvm::Type::TypeID::StructTyID)); Nan::Set(typeIds, Nan::New("ArrayTyID").ToLocalChecked(), Nan::New(llvm::Type::TypeID::ArrayTyID)); Nan::Set(typeIds, Nan::New("PointerTyID").ToLocalChecked(), Nan::New(llvm::Type::TypeID::PointerTyID)); Nan::Set(typeIds, Nan::New("VectorTyID").ToLocalChecked(), Nan::New(llvm::Type::TypeID::VectorTyID)); Nan::Set(type, Nan::New("TypeID").ToLocalChecked(), typeIds); Nan::Set(target, Nan::New("Type").ToLocalChecked(), type); } NAN_METHOD(TypeWrapper::New) { if (!info.IsConstructCall()) { return Nan::ThrowTypeError("Constructor needs to be called with new"); } if (info.Length() < 1 || !info[0]->IsExternal()) { return Nan::ThrowTypeError("Expected type pointer"); } auto* type = static_cast<llvm::Type*>(v8::External::Cast(*info[0])->Value()); auto* wrapper = new TypeWrapper { type }; wrapper->Wrap(info.This()); info.GetReturnValue().Set(info.This()); } NAN_METHOD(TypeWrapper::equals) { if (info.Length() != 1 || !TypeWrapper::isInstance(info[0])) { return Nan::ThrowTypeError("equals needs to be called with: other: Type"); } llvm::Type* that = TypeWrapper::FromValue(info.Holder())->getType(); llvm::Type* other = TypeWrapper::FromValue(info[0])->getType(); info.GetReturnValue().Set(Nan::New(that == other)); } NAN_METHOD(TypeWrapper::getPointerTo) { if ((info.Length() == 1 && !info[0]->IsUint32()) || info.Length() > 1) { return Nan::ThrowTypeError("getPointer needs to called with: addrSpace?: uint32"); } uint32_t addressSpace {}; if (info.Length() == 1) { addressSpace = Nan::To<uint32_t>(info[0]).FromJust(); } auto* pointerType = TypeWrapper::FromValue(info.Holder())->getType()->getPointerTo(addressSpace); info.GetReturnValue().Set(PointerTypeWrapper::of(pointerType)); } NAN_METHOD(TypeWrapper::toString) { auto* type = TypeWrapper::FromValue(info.Holder())->getType(); std::string name {}; llvm::raw_string_ostream output { name }; type->print(output); info.GetReturnValue().Set(Nan::New(output.str()).ToLocalChecked()); } typedef llvm::Type* (getTypeFn)(llvm::LLVMContext&); template<getTypeFn method> NAN_METHOD(getTypeFactory) { if (info.Length() < 1 || !LLVMContextWrapper::isInstance(info[0])) { return Nan::ThrowTypeError("getType needs to be called with the context"); } auto context = LLVMContextWrapper::FromValue(info[0]); auto* type = method(context->getContext()); auto wrapped = TypeWrapper::of(type); info.GetReturnValue().Set(wrapped); } typedef llvm::IntegerType* (getIntTypeFn)(llvm::LLVMContext&); template<getIntTypeFn method> NAN_METHOD(getIntType) { if (info.Length() < 1 || !LLVMContextWrapper::isInstance(info[0])) { return Nan::ThrowTypeError("getIntTy needs to be called with the context"); } auto context = LLVMContextWrapper::FromValue(info[0]); auto* type = method(context->getContext()); auto wrapped = TypeWrapper::of(type); info.GetReturnValue().Set(wrapped); } typedef llvm::PointerType* (getPointerTypeFn)(llvm::LLVMContext&, unsigned AS); template<getPointerTypeFn method> NAN_METHOD(getPointerType) { if (info.Length() < 1 || !LLVMContextWrapper::isInstance(info[0]) || (info.Length() == 2 && !info[1]->IsUint32())) { return Nan::ThrowTypeError("getPointerTy needs to be called with: context: LLVMContext, AS=0: uint32"); } auto context = LLVMContextWrapper::FromValue(info[0]); unsigned AS = 0; if (info.Length() == 2) { AS = Nan::To<unsigned>(info[1]).FromJust(); } auto* type = method(context->getContext(), AS); auto wrapped = PointerTypeWrapper::of(type); info.GetReturnValue().Set(wrapped); } v8::Local<v8::Object> TypeWrapper::of(llvm::Type *type) { v8::Local<v8::Object> result {}; if (type->isFunctionTy()) { result = FunctionTypeWrapper::Create(static_cast<llvm::FunctionType*>(type)); } else if (type->isPointerTy()) { result = PointerTypeWrapper::of(static_cast<llvm::PointerType*>(type)); } else if (type->isArrayTy()) { result = ArrayTypeWrapper::of(static_cast<llvm::ArrayType*>(type)); } else if (type->isStructTy()) { result = StructTypeWrapper::of(static_cast<llvm::StructType*>(type)); } else { v8::Local<v8::FunctionTemplate> functionTemplate = Nan::New(typeTemplate()); auto constructorFunction = Nan::GetFunction(functionTemplate).ToLocalChecked(); v8::Local<v8::Value> argv[1] = { Nan::New<v8::External>(type) }; result = Nan::NewInstance(constructorFunction, 1, argv).ToLocalChecked(); } Nan::EscapableHandleScope escapeScope {}; return escapeScope.Escape(result); } typedef bool (llvm::Type::*isTy)() const; template<isTy method> NAN_METHOD(isOfType) { auto* type = TypeWrapper::FromValue(info.Holder())->getType(); auto result = Nan::New((type->*method)()); info.GetReturnValue().Set(result); } NAN_METHOD(isIntegerTy) { if (info.Length() > 1 && info[0]->IsUint32()) { return Nan::ThrowTypeError("isIntegerTy needs to be called with: bitwidth?: uint32"); } llvm::Type* type = TypeWrapper::FromValue(info.Holder())->getType(); bool result = info.Length() == 0 ? type->isIntegerTy() : type->isIntegerTy(Nan::To<uint32_t>(info[0]).FromJust()); info.GetReturnValue().Set(Nan::New(result)); } NAN_GETTER(TypeWrapper::getTypeID) { auto* wrapper = TypeWrapper::FromValue(info.Holder()); auto result = Nan::New(wrapper->type->getTypeID()); info.GetReturnValue().Set(result); } NAN_METHOD(TypeWrapper::getIntNTy) { if (info.Length() != 2 || !LLVMContextWrapper::isInstance(info[0]) || !info[1]->IsUint32()) { return Nan::ThrowTypeError("getIntNTy needs to be called with: context: LLVMContext, N: uint32"); } auto& context = LLVMContextWrapper::FromValue(info[0])->getContext(); auto N = Nan::To<uint32_t>(info[1]).FromJust(); auto* type = llvm::Type::getIntNTy(context, N); info.GetReturnValue().Set(TypeWrapper::of(type)); } NAN_METHOD(TypeWrapper::getPrimitiveSizeInBits) { auto* type = TypeWrapper::FromValue(info.Holder())->getType(); info.GetReturnValue().Set(Nan::New(type->getPrimitiveSizeInBits())); } Nan::Persistent<v8::FunctionTemplate>& TypeWrapper::typeTemplate() { static Nan::Persistent<v8::FunctionTemplate> persistentTemplate {}; if (persistentTemplate.IsEmpty()) { v8::Local<v8::FunctionTemplate> typeTemplate = Nan::New<v8::FunctionTemplate>(TypeWrapper::New); typeTemplate->SetClassName(Nan::New("Type").ToLocalChecked()); typeTemplate->InstanceTemplate()->SetInternalFieldCount(1); Nan::SetMethod(typeTemplate, "getDoubleTy", &getTypeFactory<&llvm::Type::getDoubleTy>); Nan::SetMethod(typeTemplate, "getVoidTy", &getTypeFactory<&llvm::Type::getVoidTy>); Nan::SetMethod(typeTemplate, "getFloatTy", &getTypeFactory<&llvm::Type::getFloatTy>); Nan::SetMethod(typeTemplate, "getLabelTy", &getTypeFactory<&llvm::Type::getLabelTy>); Nan::SetMethod(typeTemplate, "getInt1Ty", &getIntType<&llvm::Type::getInt1Ty>); Nan::SetMethod(typeTemplate, "getInt8Ty", &getIntType<&llvm::Type::getInt8Ty>); Nan::SetMethod(typeTemplate, "getInt16Ty", &getIntType<&llvm::Type::getInt16Ty>); Nan::SetMethod(typeTemplate, "getInt32Ty", &getIntType<&llvm::Type::getInt32Ty>); Nan::SetMethod(typeTemplate, "getInt64Ty", &getIntType<&llvm::Type::getInt64Ty>); Nan::SetMethod(typeTemplate, "getInt128Ty", &getIntType<&llvm::Type::getInt128Ty>); Nan::SetMethod(typeTemplate, "getIntNTy", getIntNTy); Nan::SetMethod(typeTemplate, "getInt1PtrTy", &getPointerType<&llvm::Type::getInt1PtrTy>); Nan::SetMethod(typeTemplate, "getInt8PtrTy", &getPointerType<&llvm::Type::getInt8PtrTy>); Nan::SetMethod(typeTemplate, "getInt32PtrTy", &getPointerType<&llvm::Type::getInt32PtrTy>); Nan::SetPrototypeMethod(typeTemplate, "equals", &equals); Nan::SetPrototypeMethod(typeTemplate, "isVoidTy", &isOfType<&llvm::Type::isVoidTy>); Nan::SetPrototypeMethod(typeTemplate, "isFloatTy", &isOfType<&llvm::Type::isFloatTy>); Nan::SetPrototypeMethod(typeTemplate, "isDoubleTy", &isOfType<&llvm::Type::isDoubleTy>); Nan::SetPrototypeMethod(typeTemplate, "isLabelTy", &isOfType<&llvm::Type::isLabelTy>); Nan::SetPrototypeMethod(typeTemplate, "isIntegerTy", &isIntegerTy); Nan::SetPrototypeMethod(typeTemplate, "isFunctionTy", &isOfType<&llvm::Type::isFunctionTy>); Nan::SetPrototypeMethod(typeTemplate, "isStructTy", &isOfType<&llvm::Type::isStructTy>); Nan::SetPrototypeMethod(typeTemplate, "isArrayTy", &isOfType<&llvm::Type::isArrayTy>); Nan::SetPrototypeMethod(typeTemplate, "isPointerTy", &isOfType<&llvm::Type::isPointerTy>); Nan::SetAccessor(typeTemplate->InstanceTemplate(), Nan::New("typeID").ToLocalChecked(), TypeWrapper::getTypeID); Nan::SetPrototypeMethod(typeTemplate, "getPointerTo", TypeWrapper::getPointerTo); Nan::SetPrototypeMethod(typeTemplate, "getPrimitiveSizeInBits", TypeWrapper::getPrimitiveSizeInBits); Nan::SetPrototypeMethod(typeTemplate, "toString", TypeWrapper::toString); persistentTemplate.Reset(typeTemplate); } return persistentTemplate; } bool TypeWrapper::isInstance(v8::Local<v8::Value> object) { return Nan::New(typeTemplate())->HasInstance(object); } llvm::Type *TypeWrapper::getType() { return type; }
package com.yiran.paychannel.exception; import com.yiran.paychannel.enums.ErrorCode; /** * * <p>checked exception 基类</p> */ public class AppCheckedException extends Exception { private static final long serialVersionUID = 7240166912823355206L; public AppCheckedException() { super(); } public AppCheckedException(String message) { super(message); } public AppCheckedException(ErrorCode errorCode,String message) { super(message); this.errorCode = errorCode; } public AppCheckedException(ErrorCode errorCode) { super(errorCode.getErrorMessage()); this.errorCode = errorCode; } public AppCheckedException(ErrorCode errorCode,String message, Throwable cause) { super(message, cause); this.errorCode = errorCode; } public AppCheckedException(String message, Throwable cause) { super(message, cause); } public AppCheckedException(Throwable cause) { super(cause); } ErrorCode errorCode = ErrorCode.EXCEPTION; public String getCode(){ return this.errorCode.getErrorCode(); } }
Source or target first? Comparison of two post-editing strategies with translation students We conducted an experiment with translation students to assess the influence of two different post-editing (PE) strategies (reading the source segment or the target segment first) on three aspects: PE time, ratio of corrected errors and number of optional modifications per word. Our results showed that the strategy that is adopted has no influence on the PE time or ratio of corrected errors. However, it does have an influence on the number of optional modifications per word. Two other thought-provoking observations emerged from this study: first, the ratio of corrected errors showed that, on average, students correct only half of the MT errors, which underlines the need for PE practice. Second, the time logs of the experiment showed that when students are not forced to read the source segment first, they tend to neglect the source segment and almost do monolingual PE. This experiment provides new insight relevant to PE teaching as well as the designing of PE environments.
<reponame>danilosalve/tir try: from setuptools import setup except ImportError: from distutils.core import setup config = { 'description': 'TOTVS Interface Robot', 'author': 'TOTVS Automation Team', 'url': 'https://github.com/totvs/tir', 'download_url': 'https://github.com/totvs/tir', 'author_email': '<EMAIL>', 'version': '1.9.1', 'install_requires': [ 'beautifulsoup4==4.6.0', 'bs4==0.0.1', 'numpy', 'pandas==0.23.4', 'python-dateutil==2.6.1', 'pytz==2017.3', 'selenium==3.8.0', 'six==1.11.0', 'enum34', 'requests' ], 'packages': ['tir'], 'scripts': [], 'name': 'tir' } setup(**config, include_package_data=True)
<filename>FYP/matlabImport.py<gh_stars>0 # -*- coding: utf-8 -*- """ scipy.io.loadmat notes: v4 (Level 1.0), v6 and v7 to 7.2 matfiles are supported. You will need an HDF5 python library to read matlab 7.3 format mat files. Because scipy does not supply one, we do not implement the HDF5 / 7.3 interface here. h5py and hdf5 are both related to importing and reading matlab files This file isn't actually that useful in the end as I'm interestd in the 'data6' key in the dictionary I'm currently working on how to efficiently manipulate that key, which contains roughly 10,000 rows. """ import numpy as np import matplotlib.pyplot as plt import scipy.io as io import h5py def loadMatlabFile(filename): matlab = io.loadmat(filename) # print(matlab) dataXRaw = np.array(matlab['data_x']).astype(float) dataYRaw = np.array(matlab['data_y']).astype(float) dataX = np.array([]) dataY = np.array([]) # Loops are required to flatten the raw data arrays # You also have to use array1 = numpy.append(array1, array2) to add to an existing array for x in dataXRaw: dataX = np.append(dataX, x) for y in dataYRaw: dataY = np.append(dataY, y) return dataX, dataY def loadMatlabFile(filename, singlekey): matlab = io.loadmat(filename) dataRaw = np.array(matlab[singlekey]) dataX = np.array([]) dataY = np.array([]) # TODO Work on getting the right x y data out for graphing return dataX, dataY #dataX, dataY = loadMatlabFile(filename = '0708 Trial1 TE.mat') dataX, dataY = loadMatlabFile(filename = '0708 Trial1 TE.mat', singlekey = 'data6') plt.plot(dataX, dataY) # Give it the x and y axis values plt.axis([0,10,0,10]) # What is the range of each axis? plt.xlabel("Pressure on the X axis") plt.ylabel("Pressure on the Y axis") plt.title("Centre of pressure") plt.show()
// Loads and starts the CLR (if necessary) and creates a new appdomain to run // managed code. // // (We don't want to use the default appdomain because we want to be able to unload // stuff, which you can only do by unloading an appdomain, and you can't unload the // default appdomain without shutting down the CLR, and we don't want to shut down the // CLR, because then it can't be restarted.) HRESULT Initialize( bool createNewAppDomain ) { HRESULT hr = S_OK; SetEnvironmentVariable( L"COREHOST_TRACE", L"1" ); SetEnvironmentVariable( L"COREHOST_TRACE_VERBOSITY", L"4" ); hr = load_hostfxr(); if( FAILED( hr ) ) { wprintf( L"load_hostfxr() failed: %#x\n", hr ); } return hr; }
import { DmvService, TeaDispenserService } from '../data/ChatService'; import ChatServiceContext from '../data/ChatServiceContext'; import Command from '../data/Command'; import FleetLoot from '../data/FleetLoot'; import InvalidCommand from '../data/InvalidCommand'; import ItemStack from '../data/ItemStack'; import MessageContext from '../data/MessageContext'; import WebServerContext from '../data/WebServerContext'; type Event = | PingedEvent | TeaDispenserImagePostedEvent | TeaDispenserHandsUpButtonPressedEvent | TeaDispenserCommandIssuedEvent | TeaDispenserKiwiButtonPressedEvent | DmvInstallCommandIssuedEvent | DmvCryButtonPressedEvent | WebFleetLootEditorRequestedEvent | WebFleetLootEditorPostedEvent | WebNeederChooserRequestedEvent | WebNeedsEditorRequestedEvent | WebNeedsEditorPostedEvent | WebIndexRequested; export interface PingedEvent extends ChatServiceContext { readonly type: '[Chat] Pinged'; } export interface TeaDispenserChatServiceEventCommon extends ChatServiceContext { readonly chatService: TeaDispenserService; } export interface TeaDispenserImagePostedEvent extends TeaDispenserChatServiceEventCommon { readonly type: '[TeaDispenser] ImagePosted'; readonly urls: readonly string[]; readonly username: string; } export interface TeaDispenserHandsUpButtonPressedEvent extends TeaDispenserChatServiceEventCommon { readonly type: '[TeaDispenser] HandsUpButtonPressed'; readonly messageId: string; } export interface TeaDispenserCommandIssuedEvent extends TeaDispenserChatServiceEventCommon { readonly type: '[TeaDispenser] CommandIssued'; readonly command: Command | InvalidCommand; readonly triggeringUserId: string; } export interface TeaDispenserKiwiButtonPressedEvent extends TeaDispenserChatServiceEventCommon { readonly type: '[TeaDispenser] KiwiButtonPressed'; readonly messageId: string; readonly triggeringUserId: string; } export interface DmvChatServiceEventCommon extends ChatServiceContext { readonly chatService: DmvService; } export interface DmvInstallCommandIssuedEvent extends DmvChatServiceEventCommon { readonly type: '[Dmv] InstallCommandIssued'; readonly mentionedRoles: readonly number[]; } export interface DmvCryButtonPressedEvent extends DmvChatServiceEventCommon { readonly type: '[Dmv] CryButtonPressed'; readonly messageId: string; readonly emojiId: string; readonly triggeringUserId: string; } interface TeaDispenserWebEventCommon extends MessageContext, WebServerContext { readonly chatService: TeaDispenserService; } export interface WebFleetLootEditorRequestedEvent extends TeaDispenserWebEventCommon { readonly type: '[Web] FleetLootEditorRequested'; readonly ie10OrBelow: boolean; } export interface WebFleetLootEditorPostedEvent extends TeaDispenserWebEventCommon { readonly type: '[Web] FleetLootEditorPosted'; readonly fleetLoot: FleetLoot | null; } export interface WebNeederChooserRequestedEvent extends TeaDispenserWebEventCommon { readonly type: '[Web] NeederChooserRequested'; } export interface WebNeedsEditorRequestedEvent extends TeaDispenserWebEventCommon { readonly type: '[Web] NeedsEditorRequested'; readonly needer: string; } export interface WebNeedsEditorPostedEvent extends TeaDispenserWebEventCommon { readonly type: '[Web] NeedsEditorPosted'; readonly needer: string; readonly itemStacks: readonly ItemStack[]; } export interface WebIndexRequested extends WebServerContext { readonly type: '[Web] IndexRequested'; } export default Event;
// Invocation line: g++ -std=c++17 -O3 #include <tuple> extern "C" void f(int *out); int c() { int r; f(&r); return r; } auto cpp() { std::tuple<int> r; f(&std::get<0>(r)); return r; }
15 years ago today, the Sega Dreamcast made its debut in Japan (the US release date was famously 9/9/99). This bizarre beige machine didn’t even last three years on the market, but it left a lasting mark on the world of video games. From second screen gameplay to online multiplayer to DLC, the Dreamcast was well ahead of its time. Let’s take this opportunity to examine what made the Dreamcast special, and pay respects to one of the most bizarre moments in gaming history. Compared to the PS4 and Xbox One, it’s startling just how little horsepower the Dreamcast had to work with back in 1998. It shipped with a 200 MHz CPU, 16MB of RAM, 8MB of VRAM, and had a max resolution of 480p. That said, the Dreamcast’s hardware was miles ahead of the other consoles on the market at the time. The Nintendo 64, released two years before, had a 93.75MHz CPU, 4MB of unified RAM (expandable to 8MB), and maxed out at 480i. At that point, the original PlayStation had been out for nearly four years in Japan, and its specs were even worse. Without a doubt, the Dreamcast blew existing hardware out of the water from a technical perspective. The Dreamcast’s performance superiority didn’t last long, though. The PS2 launched less than two years later, and was capable of rendering substantially better looking games. Frankly, the PS2 was an unstoppable juggernaut, and that lead to Sega’s untimely exit from hardware production. Even so, the Dreamcast had a number of quirky features that put it apart from everything else at the time, and ended up influencing the future of console gaming in a major way. Second screen gaming Whenever anyone brings up the Dreamcast, the VMU (Visual Memory Unit) is bound to be discussed. Long before tablets and smartphones became common tools for interacting with games, the Dreamcast’s memory card could serve as a second screen. The VMU’s primary function was that of a standard memory card, but it could also be used as a tiny handheld gaming system. Games like Sonic Adventure could load a mini-game on the VMU, and gamers could impact their game saves on the go. If you played a mini-game on the VMU, the game save would reflect that the next time you plugged it back into the Dreamcast. Now that so many games are tied into social networks and mobile apps, it’s easy to see where Sega was headed with this incredibly novel concept. Online connectivity The Dreamcast came with a dial-up modem right out of the box in most markets, and it could be easily upgraded to take advantage of broadband connections as well. With this built-in internet functionality, developers were able to take the plunge into full-fledged online gaming on a console for the very first time. While separate accessories, like the oddball Satellaview, had been sold for previous consoles, the included modem meant that everyone could take advantage of online features. I would argue that Sega’s Phantasy Star Online was the very first game to make a compelling case for the utility of online consoles. Today, internet connectivity is a given for any gaming device, and we definitely have the Dreamcast to thank for spearheading that movement. The Dreamcast’s connectivity didn’t stop at online multiplayer, though. It also ushered in the concept of DLC for console games. In Sega’s colorful rhythm game Samba de Amigo, players could connect to the internet, and unlock a number of additional songs. While the tracks themselves weren’t being downloaded over the internet, and stored on a tiny memory card, this is surprisingly similar to the way on-disc DLC is handled with modern games. Windows CE One of the strangest aspects of the Dreamcast era was Sega’s prominent partnership with Microsoft. The Dreamcast itself was compatible with a specialized version of Windows CE, and some games included a version of Redmond’s OS to take advantage of the DirectX compatibility. The goal was to provide more options for developers, and theoretically make the Dreamcast more appealing for a number of big-name ports. Unfortunately, not many games ended up using Windows CE on the Dreamcast thanks to the licensing and performance issues associated with it. Even so, it’s safe to say that this partnership helped pave to way for Microsoft to launch the original Xbox just a few years later. The Dreamcast’s legacy The Dreamcast only ended up selling a few million units over its short lifespan, and its failure a huge hit to Sega. It’s far from a perfect console, but it was innovative and forward-facing in a way unlike everything else that came before. 15 years later, we can now look back on the Dreamcast’s legacy, and see that it’s the foundation that everything since has been built upon. Now read: Shoulder buttons of giants: The evolution of controllers leading up to PS4 and Xbox One [Image credit: Evan-Amos]
<reponame>agunde406/consensource use database::{DbConn, PgPool}; use database_manager::models::Block; use database_manager::tables_schema::blocks; use diesel::prelude::*; use errors::ApiError; use hyper_sse::Server; use paging::*; use rocket_contrib::{Json, Value}; use std::{thread, time}; const DEFAULT_CHANNEL: u8 = 0; lazy_static! { static ref PUSH_SERVER: Server<u8> = Server::new(); } pub struct BlockWatcher { db_pool: PgPool, block_queue: Vec<Block>, last_block_height: i64, } impl Clone for BlockWatcher { fn clone(&self) -> Self { BlockWatcher { db_pool: self.db_pool.clone(), block_queue: vec![], last_block_height: -1, } } } impl BlockWatcher { /// Constructs a new BlockWatcher pub fn new(db_pool: PgPool) -> Self { BlockWatcher { db_pool, block_queue: vec![], last_block_height: -1, } } /// Returns the next block, if there is one. pub fn take(&mut self) -> Option<Block> { if self.block_queue.is_empty() { if let Err(err) = self.load_block_queue() { error!("Unable to load blocks: {:?}", err); } } self.block_queue.pop() } fn load_block_queue(&mut self) -> Result<(), WatchError> { let db_conn = self .db_pool .get() .map_err(|err| WatchError::ConnectionError(format!("{:?}", err)))?; if self.last_block_height < 0 { let block: Option<Block> = blocks::table .order(blocks::block_num.desc()) .first(&*db_conn) .optional()?; if let Some(block) = block { self.block_queue.push(block); } } else { let mut blocks: Vec<Block> = blocks::table .filter(blocks::block_num.gt(self.last_block_height)) .order(blocks::block_num.asc()) .load(&*db_conn)?; if !blocks.is_empty() { self.block_queue.append(&mut blocks); } } if let Some(block) = self.block_queue.last() { self.last_block_height = block.block_num; } Ok(()) } } #[derive(Debug)] enum WatchError { ConnectionError(String), QueryError(String), } impl From<::diesel::result::Error> for WatchError { fn from(err: ::diesel::result::Error) -> Self { WatchError::QueryError(format!("{:?}", err)) } } pub struct WatcherThread { join_handle: thread::JoinHandle<()>, } impl WatcherThread { pub fn run(block_watcher: BlockWatcher, interval: u64, host: &str, port: u16) -> Self { let interval = time::Duration::from_millis(interval); let mut watcher = block_watcher.clone(); thread::spawn(move || loop { if let Some(block) = watcher.take() { debug!("Sending {:?}", block); if let Err(err) = PUSH_SERVER.push(DEFAULT_CHANNEL, "block-event", &block) { warn!("Unable to push block-event: {:?}", err); }; } else { thread::sleep(interval); } }); error!("Starting SSE server on {}:{}", host, port); WatcherThread { join_handle: start_sse_server(host, port), } } pub fn join(self) -> thread::Result<()> { self.join_handle.join() } } fn start_sse_server(host: &str, port: u16) -> thread::JoinHandle<()> { PUSH_SERVER.spawn( format!("{}:{}", host, port) .parse() .expect("Should have been a valid address"), ) } #[get("/blocks/<block_id>")] pub fn fetch_block(block_id: String, conn: DbConn) -> Result<Json<Value>, ApiError> { fetch_block_with_head_param(block_id, Default::default(), conn) } #[get("/blocks/<block_id>?<head_param>")] pub fn fetch_block_with_head_param( block_id: String, head_param: BlockParams, conn: DbConn, ) -> Result<Json<Value>, ApiError> { let head_block_num: i64 = get_head_block_num(head_param.head, &conn)?; let block = blocks::table .filter(blocks::block_id.eq(block_id.to_string())) .filter(blocks::block_num.le(head_block_num)) .first::<Block>(&*conn) .optional() .map_err(|err| ApiError::InternalError(err.to_string()))?; let link = format!("/api/blocks/{}", block_id); match block { Some(block) => Ok(Json(json!({ "data": block, "link": link, "head": head_block_num, }))), None => Err(ApiError::NotFound(format!( "No block with the ID {} exists", block_id ))), } } #[derive(Default, FromForm, Clone)] pub struct BlockParams { limit: Option<i64>, offset: Option<i64>, head: Option<i64>, } #[get("/blocks")] pub fn list_blocks(conn: DbConn) -> Result<Json<Value>, ApiError> { list_blocks_with_params(Default::default(), conn) } #[get("/blocks?<params>")] pub fn list_blocks_with_params(params: BlockParams, conn: DbConn) -> Result<Json<Value>, ApiError> { let head_block_num: i64 = get_head_block_num(params.head, &conn)?; let mut blocks_query = blocks::table .filter(blocks::block_num.le(head_block_num)) .into_boxed(); let total_count = blocks::table .filter(blocks::block_num.le(head_block_num)) .into_boxed() .count() .get_result(&*conn) .map_err(|err| ApiError::InternalError(err.to_string()))?; let link_params = params.clone(); let paging_info = apply_paging(link_params, head_block_num, total_count)?; blocks_query = blocks_query.limit(params.limit.unwrap_or(DEFAULT_LIMIT)); blocks_query = blocks_query.offset(params.offset.unwrap_or(DEFAULT_OFFSET)); let blocks = blocks_query .load::<Block>(&*conn) .map_err(|err| ApiError::InternalError(err.to_string()))?; Ok(Json(json!({ "data": blocks, "link": paging_info.get("link"), "head": head_block_num, "paging": paging_info.get("paging") }))) } fn apply_paging(params: BlockParams, head: i64, total_count: i64) -> Result<Json<Value>, ApiError> { let link = format!("/api/blocks?head={}&", head); get_response_paging_info( params.limit, params.offset, link.to_string().clone(), total_count, ) }
/** * Set element indices of an n-dimensional array to value.indices is assumed to have the right number of elements * for the dimension of array. */ void sidlx_rmi_SimCall__array_set( struct sidlx_rmi_SimCall__array* array, const int32_t indices[], sidlx_rmi_SimCall const value) { sidl_interface__array_set((struct sidl_interface__array *)array, indices, ( struct sidl_BaseInterface__object *)value); }
d = 25 - int(input()) s = ['Eve'] * d print(' '.join(['Christmas'] + s))
class ActivitySearch: """``ActivitySearch`` defines the interface for specifying activity search options.""" __metaclass__ = abc.ABCMeta @abc.abstractmethod def search_among_activities(self, activity_ids): """Execute this search among the given list of activities. :param activity_ids: list of activities :type activity_ids: ``osid.id.IdList`` :raise: ``NullArgument`` -- ``activity_ids`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ pass @abc.abstractmethod def order_activity_results(self, activitiesearch_order): """Specify an ordering to the search results. :param activitiesearch_order: activity search order :type activitiesearch_order: ``osid.learning.ActivitySearchOrder`` :raise: ``NullArgument`` -- ``activitiesearch_order`` is ``null`` :raise: ``Unsupported`` -- ``activitiesearch_order`` is not of this service *compliance: mandatory -- This method must be implemented.* """ pass @abc.abstractmethod def get_activity_search_record(self, activitiesearch_record_type): """Gets the activity record corresponding to the given activity search record ``Type``. This method is used to retrieve an object implementing the requested record. :param activitiesearch_record_type: an activity search record type :type activitiesearch_record_type: ``osid.type.Type`` :return: the activity search record :rtype: ``osid.learning.records.ActivitySearchRecord`` :raise: ``NullArgument`` -- ``activitiesearch_record_type`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``Unsupported`` -- ``has_search_record_type(activitiesearch_record_type)`` is ``false`` *compliance: mandatory -- This method must be implemented.* """ return
Beyonce performs the National Anthem during the inauguration ceremony on Monday Photo by JEWEL SAMAD/AFP/Getty Images I’m hoping for a flurry of retractions. A Marine spokesperson said yesterday that she couldn’t confirm or deny that Beyoncé wasn’t lip-syncing, and pretty much every media outlet assumed that was an admission. On NBC Nightly News, Brian Williams said that Beyoncé wasn’t lip-syncing, but, “in effect, lip-syncing”; Jon Stewart’s jokes took it as a given that she faked it; NPR is wringing its hands NPR-ily. It’s bunk. That lady was singing live. She sang to a prerecorded track—a canned band—and perhaps there was a guide vocal in her earpiece, audible only to her, but that was absolutely a genuine performance. Kelly Clarkson performed to a prerecorded track, too. So did the choir. I’ve done a bunch of lip-syncing, in music videos, and it’s very easy to spot. Anyone who performs in, shoots, or edits music videos can see the tiny, observable latency endemic to lip-syncing. Beyoncé either sang live, or she’s the most gifted lip-syncer in the history of humanity. Below is the video for my cover of “Take Me Home, Country Roads.” My lips lag pitifully behind the words. It’s because—and this is extremely common—the way I sing a song changes very slightly every time, and, six months later, the phrasing can be significantly different. Video directors have asked me to spend three hours listening to my own song, lip-syncing all the while, but I’ve never done it. The result is an eminently mediocre lip-sync. Ignore the mountaineer in Cazals, and watch my mouth: Now here’s Beyoncé at the inauguration. Again, don’t look at anything but her mouth: For comparison, here’s Beyoncé’s video for “Work It Out,” in which she’s lip-syncing. It’s difficult to ignore her thighs, but, please, focus solely on the lips: If she was indeed lip-syncing at the inauguration, give her the Nobel Prize in mime. A soldier can differentiate one type of gun from another by the sound of it; a bird-watcher can hear the difference between warblers. If your job is predicated on microphones—as an engineer or a singer—it’s not that hard to tell the difference between a live vocal and a prerecorded one. The easiest way to say it would be that a recorded vocal sounds perfect, in the way that a live vocal can’t, and, to those who spend time meticulously mixing imperfect vocals to bring them closer to perfection, it’s as plain as day. In a recording studio, troublesome variables can be smoothed out. The reverb on the vocal can be exactingly calibrated. You can use a much more expensive, sophisticated, delicate microphone; a hand-held, onstage mic needs to be rugged. You can put a “pop screen” in front of the mic—in a live vocal you’ll hear Bs and Ps go pmpp!; you’ll hear a little more breath; Fs and Ss will make a slight whssh! sound. The national anthem is a bitch to sing—it’s the K2 of national anthems. The low notes are really low; the high notes are super high. The tune was an 18th-century drinking song, and I’m sure that half the fun of it was that it turned a room of drunks into blissful Biz Markies. Even Beyoncé seemingly had to decide which notes were worth the risk of flubbing, when choosing a key to sing it in. She chose the lows, at the beginning of the tune. “Oh say can you see” is barely audible; that’s probably because if the sound engineer mixed the vocal expressly to make her shakier, lower range louder, the big dramatic notes at the end would shriek. A prerecorded vocal would be mixed such that those low notes would be just as audible as the high notes. A singer with a big voice learns to pull the mic slightly further from her mouth on big notes, because it gets louder, and she doesn’t want to kill people. Rewind that video, and note the words “twilight” and “ramparts.” They vary slightly in volume—the low notes are louder than the high notes. Most dramatically, sound waves actually blow around in the wind. Sometimes, when I do a big outdoor festival, I sound-check in calm weather, but the wind picks up when the actual show begins, taking my voice and throwing it someplace other than where I’m expecting it. It’s easy to get confused. A politician might choke, like, “I’m not speaking right! Or the sound’s not right! I better be super loud! Or use the mic differently!” That would be a Howard Dean moment. If you’re the sound engineer at the inauguration, a big part of your gig is preventing Howard Dean moments. Beyoncé, being a samurai, clearly came expecting that possibility. So she compensates: She sings the word “bursting” a little too close to the mic, causing a little bit of discernible distortion—it’s like a subtler version of when you’re talking into the mic on your phone, and you suddenly get loud, or too close, and for a moment the voice gets kind of larger and fuzzier. When she pulls out her left earpiece—more on that in a moment—she’s adjusting how she sounds to herself, and she subsequently pulls the mic further from her face. Notice how the echo suddenly gets more obvious—for a split second, the vocal sounds like it’s going through a tin can. Right after that, you can tell that the sound person is scrambling to adjust the sound, because she’s adjusted her mic position. It sounds noticeably different until “Oh say does that star-spangled banner still wave,” when the sound is dialed in again. So: about the in-ear monitors. The sight of her earpiece begat the conspiracy theories, but an earpiece is not, by any means, a sign of lip-syncing. In-ears are worn by almost all singers who can afford them. Everybody who sings in arenas does. It may sound surprising, but, even for fantastic singers, it can be difficult to sing in tune if you’re only hearing yourself on an enormous sound system—overhead, flanking you, and facing not you but the audience. Anybody who would sing outdoors, in the wind, in front of hundreds of thousands of people (and millions on TV), without in-ears would be gambling absurdly. The choir was probably too large for everybody to have in-ears, but I bet the soloist did; if Kelly Clarkson didn’t use them, I’d be stunned. Probably she popped it out because the sound was weird—see above. Possibly, she usually performs with just one in, and used both at the inauguration to be extremely cautious—upon beginning to sing, she might’ve thought, Oh, wait, I don’t need this. (When I use in-ears on longer tours, when I can afford to bring along a sound engineer, I always keep one popped out so I don’t feel insulated.) Look, lip-syncing irritates me. It’s everywhere. I was stunned that, after the Ashlee Simpson debacle, SNL continued to have musical guests who lip-sync. And even more grating to me is the use of canned backing tracks when you could just put a real band there. Every single performance at the inauguration was done to prerecorded tracks—as was every performance in 2009, including Yo-Yo Ma’s. (He actually did fake playing his cello, because cold weather makes the wood and the strings of delicate instruments freak out.) I wasn’t an enthused viewer of the Bush inaugurations, and I was high during Clinton’s, but I’d bet you any sum that performances on those occasions were largely to canned music. That sucks! America, the richest country in the world, can’t afford to hire an orchestra and put microphones on them? Is it, like, hard, or something? What is this, pregame at the Gator Bowl? But that’s not the scandal—supposed lip-syncing is. It’s weird that nobody in a TV news department, where remote reporters are always wearing an earpiece to hear the anchor back in New York, would explain why singers would use them. For me, the most compelling evidence that Beyoncé was doing it for real is the HELL YES smile on Joe Biden’s face. Now, that is, clearly, a dude standing two feet from an electrifying lady singing like a motherfucker.
<reponame>rgianassi/learning_go package shorten import ( "encoding/json" "fmt" "io" "net/http" "strings" "sync" ) // URLShortener URL shortener server data structure type URLShortener struct { expanderRoute string shortenRoute string statisticsRoute string mappings map[string]string statistics StatsJSON mux sync.Mutex } // NewURLShortener a URLShortener constructor func NewURLShortener() *URLShortener { urlShortener := URLShortener{} urlShortener.expanderRoute = "/" urlShortener.shortenRoute = "/shorten" urlShortener.statisticsRoute = "/statistics" urlShortener.mappings = make(map[string]string) urlShortener.statistics = NewStatsJSON() return &urlShortener } // UnpersistFrom function reads and decodes a JSON from the reader passed in // and then updates the URL mappings func (c *URLShortener) UnpersistFrom(r io.Reader) error { decoder := json.NewDecoder(r) if err := decoder.Decode(&c.mappings); err != nil { return err } c.statistics.updateTotalURL(int64(len(c.mappings))) return nil } // PersistTo function encodes the URL mappings in a JSON written to the writer // passed in func (c *URLShortener) PersistTo(w io.Writer) error { encoder := json.NewEncoder(w) if err := encoder.Encode(c.mappings); err != nil { return err } return nil } // SetupHandlerFunctions setups handler functions func (c *URLShortener) SetupHandlerFunctions() { http.HandleFunc(c.shortenRoute, c.shortenHandler) http.HandleFunc(c.statisticsRoute, c.statisticsHandler) http.HandleFunc(c.expanderRoute, c.expanderHandler) } func (c *URLShortener) addURL(longURL, shortURL string) { c.mux.Lock() defer c.mux.Unlock() c.mappings[shortURL] = longURL c.statistics.updateTotalURL(int64(len(c.mappings))) } // GetURL returns the complete URL corresponding to the shortened URL func (c *URLShortener) GetURL(shortURL string) (string, error) { c.mux.Lock() defer c.mux.Unlock() longURL, ok := c.mappings[shortURL] if !ok { return "", fmt.Errorf("short URL not found: %s", shortURL) } return longURL, nil } func (c *URLShortener) shortenHandler(w http.ResponseWriter, r *http.Request) { serverAddress := r.Host url := r.URL query := url.Query() longURL := query.Get("url") shortURL := Shorten(longURL) c.addURL(longURL, shortURL) linkAddress := fmt.Sprintf("http://%s", serverAddress) hrefAddress := fmt.Sprintf("%s/%s", linkAddress, shortURL) hrefText := fmt.Sprintf("%s -> %s", shortURL, longURL) fmt.Fprintf(w, "<a href=\"%s\">%s</a>", hrefAddress, hrefText) c.statistics.incrementHandlerCounter(ShortenHandlerIndex, true) } func (c *URLShortener) statisticsHandler(w http.ResponseWriter, r *http.Request) { url := r.URL query := url.Query() format := query.Get("format") if f := strings.ToLower(format); f == "json" { jsonCandidate, err := json.Marshal(&c.statistics) if err != nil { w.WriteHeader(http.StatusNoContent) c.statistics.incrementHandlerCounter(StatisticsHandlerIndex, false) return } fmt.Fprintf(w, "%s", jsonCandidate) c.statistics.incrementHandlerCounter(StatisticsHandlerIndex, true) return } fmt.Fprintf(w, "%s", &c.statistics) c.statistics.incrementHandlerCounter(StatisticsHandlerIndex, true) } func (c *URLShortener) expanderHandler(w http.ResponseWriter, r *http.Request) { shortURLCandidate := r.URL.Path[len(c.expanderRoute):] redirectURL, err := c.GetURL(shortURLCandidate) if err != nil { w.WriteHeader(http.StatusNotFound) c.statistics.incrementHandlerCounter(ExpanderHandlerIndex, false) return } http.Redirect(w, r, redirectURL, http.StatusSeeOther) c.statistics.incrementHandlerCounter(ExpanderHandlerIndex, true) }
package com.webcheckers.model; import java.util.logging.Logger; /** * Represents a player * * @author <a href='mailto:<EMAIL>'><NAME></a> */ public class Player implements Comparable<Player> { private static final Logger LOG = Logger.getLogger(Player.class.getName()); private int wins; private int losses; /** * This player's username */ private String name; /** * Returns true if it's the player's turn */ private boolean isTurn; /** * Creates a new player with a username * @param name * The username for this player. MUST be not be null, empty, or contain non alphanumeric characters * besides spaces */ public Player(String name) { this.name = name; LOG.fine(String.format("Created a new player with username \"%s\"", name)); } /** * Returns the username for this player * @return * The username for this player as a string */ public String getName() { return name; } /** * Checks if it is the current player's turn * @return True if it is */ public boolean getIsTurn() {return isTurn;} /** * Sets whether or not it's the player's turn * @param turn The value to set it to */ public void setIsTurn(boolean turn) { this.isTurn = turn; } /** * Get the ranking of the player * @return The win ration of the player */ public double score() { if (wins + losses > 0) { return ((double) wins) / ((double) (wins + losses)); } else { return 0; } } /** * Indicate that the player has won a game */ public void winGame() { wins++; } /** * Indicate that a player has lost a game */ public void loseGame() { losses++; } /** * Compares two players by their score * @param player The player to compare to * @return A positive number is the score is greater, a negative number if less, and a zero if equal */ public int compareTo(Player player) { return (int) Math.round((this.score() - player.score()) * Integer.MAX_VALUE); } }
/// <reference types="node" /> import { Socket } from 'net'; import { TLSSocket } from 'tls'; interface Listeners { connect?: () => void; secureConnect?: () => void; close?: (hadError: boolean) => void; } declare const deferToConnect: (socket: TLSSocket | Socket, fn: Listeners | (() => void)) => void; export default deferToConnect;
/** * If You change the Nuxeo-System. The UUIDs will be different than the used ones here! You need to change those accordingly. * @author cstrobel */ public class NuxeoUtilityTest { private static final transient Logger LOG = LoggerFactory.getLogger(NuxeoUtilityTest.class); private static final String URL = "http://127.0.0.1:8080/nuxeo/"; public static final String TEST_SECTION = "d002f289-bf07-4881-be5a-cb2429ec3f58"; public static final String TEST_DOCUMENT_UUID = "88cdccd0-ab0c-439d-ac2f-5c1cf65396a2"; public static final String TEST_FOLDER_UUID = "dceba628-4e7d-4fdd-b7ed-09e0ac39695a"; public static final String TEST_FOLDER2_UUID = "e5594598-a2df-4c24-b947-c9db46d0e36e"; private static NuxeoUtility nuxeo; @BeforeClass public static void setUpBeforeClass() throws Exception { nuxeo = new NuxeoUtility(URL, "demo", "secret"); } @AfterClass public static void tearDownAfterClass() throws Exception { nuxeo.shutdown(); } @Before public void setUp() throws Exception { } @After public void tearDown() throws Exception { } @Test public void testGetDocuments() throws Exception { // LOG.debug(nuxeo.getAllDocumentIds() + ""); // LOG.debug(nuxeo.getSession() + ""); Document doc = nuxeo.getDocument(TEST_DOCUMENT_UUID); assertNotNull(doc); } @Test public void testGetCollections() throws Exception { List<Document> collections = nuxeo.getCollections(""); assertEquals("currently 1", true, collections.size() >= 1); // LOG.debug("Docs:" + collections); // for (Document doc : collections) { // LOG.debug(doc.getTitle()); // } // ----- List<Document> collections2 = nuxeo.getCollections("testCollection"); assertEquals("currently one Colletions is named like that", true, collections2.size() >= 1); // LOG.debug("Docs:" + collections2); // for (Document doc : collections2) { // LOG.debug(doc.getTitle()); // } } @Test public void testCreateCollection() throws Exception { // TODO find a way to check if the description was set String name = "tempCollection"; Document document = nuxeo.createCollection(name, "description", null); assertNotNull(document); assertEquals("", name, document.getTitle()); // assertEquals("", "description", document.getString("dc:description")); nuxeo.deleteDocument(document); // ----Doc2 Document document2 = nuxeo.createCollection(name, null, null); assertEquals("", name, document2.getTitle()); assertNotNull(document2); nuxeo.deleteDocument(document2); // ----Doc3 Exception exc = null; try { Document document3 = nuxeo.createCollection(name, "", document); nuxeo.deleteDocument(document3); } catch (Exception e) { // not a folder exc exc = e; } assertNotNull(exc); // ----Doc4 Document document4 = nuxeo.createCollection(name, "", null); assertEquals("", name, document4.getTitle()); nuxeo.deleteDocument(document4); } @Test public void testAddDocumentsToCollectionAndGetDocsFromCollection() throws Exception { // get a Folder // // Default domain> Workspaces> cstrobel-test> testFolder Document folder = nuxeo.getDocument(TEST_FOLDER_UUID); Document testDocument = createTestDocumentInFolder(folder); assertEquals("", "Note", testDocument.getType()); String path = "/default-domain/workspaces/java-junit-testfolders/testFolder/testDoc"; assertEquals("", path, testDocument.getPath().substring(0, path.length()));// To cut the UUID // finally create Collection and add the Documents to the collection List<Document> list = new ArrayList<>(); list.add(testDocument); Document collection = nuxeo.createCollection("someTestCollection", "", null); nuxeo.addDocumentsToCollection(collection, list); // check if the documents are realy added to the collection List<Document> documentsFromCollection = nuxeo.getDocumentsFromCollection(collection); boolean isTheDocumentInIt = false; for (Document doc : documentsFromCollection) { if (doc.getId().equals(testDocument.getId())) { isTheDocumentInIt = true; break; } } assertEquals("The Doc should be in the Collection", true, isTheDocumentInIt); // finally delete the test suff for (Document doc : list) { nuxeo.deleteDocument(doc); } nuxeo.deleteDocument(collection); LOG.debug("" + collection); } private Document createTestDocumentInFolder(Document folder) throws Exception { // Create a Document // Map<String, Object> params = new HashMap<String, Object>(); // params.put("name", "testDoc"); // params.put("type", "Note"); Properties properties = new Properties(); properties.setProperty("dc:title", "testDocument"); properties.setProperty("dc:description", "some Description"); Document document = nuxeo.createDocument(folder, "Note", properties); return document; } @Test public void testMoveDocumentToFolder() throws Exception { // test Doc Document document = null; try { // get target Folder Document folder = nuxeo.getDocument(TEST_FOLDER2_UUID); document = createTestDocumentInFolder(nuxeo.getDocument(TEST_FOLDER_UUID)); //check path String path = "/default-domain/workspaces/java-junit-testfolders/testFolder/testDocument"; LOG.debug(document.getPath()); assertEquals("", path, document.getPath().substring(0, path.length())); // move it move it Document newDoc = nuxeo.moveDocument(document, folder); //check path path = "/default-domain/workspaces/java-junit-testfolders/testFolder2/testDocument"; assertEquals("", path, newDoc.getPath().substring(0, path.length())); } catch (Exception e) { throw e; } finally { // delete nuxeo.deleteDocument(document); } } @Test public void testRenderDocument() throws Exception { // get a Folder // // Default domain> Workspaces> cstrobel-test> testFolder Document folder = nuxeo.getDocument(TEST_FOLDER_UUID); // create TestDoc Document document = createTestDocumentInFolder(folder); // render the Document final String name = "testOutput.ftl"; Blob renderDocument = nuxeo.renderDocument(document, "templates:Customer Reference using ODT Template", name, "text/xml", "ftl, mvel"); assertEquals("", name, renderDocument.getFileName()); assertNotNull("", renderDocument); // render again Blob renderDocument2 = nuxeo.renderDocument(document, "templates:SpecNux", null, null, null); assertNotNull("", renderDocument2); assertEquals("", "output.ftl", renderDocument2.getFileName()); // delete doc nuxeo.deleteDocument(document); } @Test public void testAddToWorklist() throws Exception { // create TestDoc Document document = null; try { // get a Folder // // Default domain> Workspaces> cstrobel-test> testFolder Document folder = nuxeo.getDocument(TEST_FOLDER_UUID); document = createTestDocumentInFolder(folder); assertEquals("", "Note", document.getType()); // check current Worklist List<Document> preDocuments = nuxeo.getDocumentsFromWorkList(); int sizePre = preDocuments.size(); nuxeo.addCurrentDocumentToWorklist(document); // check current Worklist List<Document> postDocuments = nuxeo.getDocumentsFromWorkList(); int sizePost = postDocuments.size(); assertEquals("", sizePre + 1, sizePost); // delete doc nuxeo.deleteDocument(document); // check current Worklist List<Document> postPostDocuments = nuxeo.getDocumentsFromWorkList(); int sizePostPost = postPostDocuments.size(); assertEquals("Should now be the size before our operation", sizePre, sizePostPost); } catch (Exception e) { throw e; } finally { } } @Test public void testAddAndRemovePermissionsFile() throws Exception { fail("Nuxeo crashes if I try to get a Document and have no access"); // get a Folder // // Default domain> Workspaces> cstrobel-test> testFolder Document folder = nuxeo.getDocument(TEST_FOLDER_UUID); // create TestDoc Document document = createTestDocumentInFolder(folder); try { // Blob queriedUsers = nuxeo.queryUsers("*", null); // doesnt work // get current Permissions checkIfUserHasAccessForPermission("anton", "aktie", "Read", document); // set Permission nuxeo.addPermissionToDocument(document, "READ", "powerusers", null, true); // group Document docWithPermission = nuxeo.addPermissionToDocument(document, "READ", "anton", null, true); // check Permissions again // FIXME need mechanism to get the Permissions of a Document. GUI says "Rights modified" but nothing more. Need API Method... Document docc = nuxeo.getUsersAndGroupsForDocument(document, "READ", "", false, false, false); // LOG.debug(docc.getState()); // LOG.debug(docc.getType()); // LOG.debug("ContextParameters:" + docc.getContextParameters()); // LOG.debug("Dirties:" + docc.getDirties()); // remove Permission nuxeo.removePermissionFromDocument(document, "anton", null); // check Permissions again // FIXME } catch (Exception e) { throw e; } finally { // delete File nuxeo.deleteDocument(document); } } @Test public void testAddAndRemovePermissionsFolder() throws Exception { fail("Nuxeo crashes if I try to get a Document and have no access"); // get a Folder // // Default domain> Workspaces> cstrobel-test> testFolder Document folder = nuxeo.getDocument(TEST_FOLDER_UUID); // create TestDoc Document document = createTestDocumentInFolder(folder); try { assertFalse(checkIfUserHasAccessForPermission("anton", "aktie", "Read", folder)); // set Permission nuxeo.addPermissionToDocument(folder, "READ", "powerusers", null, true); // group Document folderWithPermission = nuxeo.addPermissionToDocument(folder, "READ", "anton", null, true); // check Permissions again assertTrue(checkIfUserHasAccessForPermission("anton", "aktie", "Read", folder)); // FIXME need mechanism to get the Permissions of a Document. GUI says "Rights modified" but nothing more. Need API Method... Document folder2 = nuxeo.getUsersAndGroupsForDocument(folder, "READ", "", false, false, false); // remove Permission nuxeo.removePermissionFromDocument(document, "anton", null); // check Permissions again // FIXME } catch (Exception e) { throw e; } finally { // delete File nuxeo.deleteDocument(document); } } private boolean checkIfUserHasAccessForPermission(final String user, final String password, final String permission, final Document document) throws Exception { // connect to Nuxeo final NuxeoUtility tempNuxeo = new NuxeoUtility(URL, user, password); // check if the user has access Document docc = tempNuxeo.getDocument(document.getId()); // LOG.debug(docc.getState()); // LOG.debug(docc.getType()); // LOG.debug(docc.getContextParameters() + ""); // LOG.debug(docc.getDirties() + ""); // LOG.debug(docc.getProperties() + ""); // LOG.debug(docc.getFacets() + ""); // shutdown tempNuxeo.shutdown(); return docc != null; } @Test public void testPublishDocument() throws Exception { // get a Document Document folder = nuxeo.getDocument(TEST_FOLDER_UUID); // create TestDoc Document document = createTestDocumentInFolder(folder); // get a Section to publish to final String sectionUUID = TEST_SECTION; Document section = nuxeo.getDocument(sectionUUID);// Default domain> Sections> testSectionForUnitTest // check number of Docs in Section final int preSize = nuxeo.getChildrenOfDocument(section).size(); try { // publish nuxeo.publishDocumentToSection(document, section, false); // check publication final int postSize = nuxeo.getChildrenOfDocument(section).size(); assertEquals("Should be one more than the before check", preSize + 1, postSize); } catch (Exception e) { throw e; } finally { // delete nuxeo.deleteDocument(document); } } @Test public void testApproveDocument() throws Exception { // get a Document Document folder = nuxeo.getDocument(TEST_FOLDER_UUID); // create TestDoc Document document = createTestDocumentInFolder(folder); assertEquals("Should be Project", document.getState(), "project"); try { document = nuxeo.approveDocument(document); assertEquals("Should be Project", document.getState(), "project"); } catch (Exception e) { throw e; } finally { // delete nuxeo.deleteDocument(document); } } @Test public void testWorkflowStatus() throws Exception { fail("This Nuxeo Instance has currently no Workflows"); // get a Document Document folder = nuxeo.getDocument(TEST_FOLDER_UUID); // create TestDoc Document document = createTestDocumentInFolder(folder); try { // start workflow Document workflow = nuxeo.startWorkflow(document, "Process_1:1:b358ea4d-c72a-11e4-b8b0-22b5c971964e", false, new Properties()); // cancel nuxeo.cancelWorkflow(workflow); // resume nuxeo.resumeWorkflow(workflow.getId()); } catch (Exception e) { throw e; } finally { // delete nuxeo.deleteDocument(document); } } @Test public void testGetVersionsOfDocument() throws Exception { // get a Document Document folder = nuxeo.getDocument(TEST_FOLDER_UUID); // create TestDoc Document document = createTestDocumentInFolder(folder); // get Versions List<Document> documentVersions = nuxeo.getDocumentVersions(document); int versionAmount = documentVersions.size(); assertEquals("", 0, versionAmount); //add some versions nuxeo.addPermissionToDocument(document, "READ", "powerusers", null, true); // group nuxeo.addPermissionToDocument(document, "READ", "anton", null, true); document = nuxeo.createVersion(document, "Major", true); //get Versions documentVersions = nuxeo.getDocumentVersions(document); document = documentVersions.get(0); assertEquals("", 1, documentVersions.size()); // 4 document = nuxeo.createVersion(document, "Major", true); documentVersions = nuxeo.getDocumentVersions(document); assertEquals("", 2, documentVersions.size()); // 2 document = nuxeo.createVersion(document, "Minor", true); documentVersions = nuxeo.getDocumentVersions(document); document = documentVersions.get(0); assertEquals("", 2, documentVersions.size()); // 3 document = nuxeo.createVersion(document, "None", true); documentVersions = nuxeo.getDocumentVersions(document); document = documentVersions.get(0); assertEquals("", 3, documentVersions.size()); try { } catch (Exception e) { throw e; } finally { // delete nuxeo.deleteDocument(document); } } @Test public void testLockDocument() throws Exception { // get a Document Document folder = nuxeo.getDocument(TEST_FOLDER_UUID); // create TestDoc Document document = createTestDocumentInFolder(folder); // lock document = nuxeo.lockDocument(document); // check assertEquals("", true, document.isLocked()); // unlock document = nuxeo.unlockDocument(document); // check assertEquals("", false, document.isLocked()); try { } catch (Exception e) { throw e; } finally { // delete nuxeo.deleteDocument(document); } } @Test public void testCreateFolder() throws Exception { // get a Document Document folder = nuxeo.getDocument(TEST_FOLDER_UUID); // FIXME error from nuxeo ..? Document createdFolder = nuxeo.createFolder("folderForTesting", folder.getId()); // check assertEquals("", "Folder", createdFolder.getType()); try { } catch (Exception e) { throw e; } finally { // delete nuxeo.deleteDocument(createdFolder); } } @Test public void testTagDocument() throws Exception { // get a Document Document folder = nuxeo.getDocument(TEST_FOLDER_UUID); // create TestDoc Document document = createTestDocumentInFolder(folder); nuxeo.tagDocument(document, "oneTag"); nuxeo.tagDocument(document, "Tag1, Tag2, Tag3"); try { } catch (Exception e) { throw e; } finally { // delete nuxeo.deleteDocument(document); } } }
def plot_periodic_voro(points, box, colorfill="shape", plot_points=False, **kw): if isinstance(points, tuple): naxs = len(points) fig, axs= plt.subplots(ncols=naxs, figsize=(4*naxs, 4) ) else: naxs = 1 fig, axs= plt.subplots(ncols=1, figsize=(4, 4) ) axs = [axs] points = (points, ) line_colors = kw.get('line_colors', 'k') line_width = kw.get('line_width', 1.0) line_alpha = kw.get('line_alpha', 1.0) for colindex, p in enumerate(points): periodic_points = _create_images(p, box) points_in_box = [] for point in periodic_points: if point_in_box(point, box): points_in_box.append(point) points_in_box = np.array(points_in_box) if plot_points: axs[colindex].plot(points_in_box[:,0], points_in_box[:,1], 'k.') vor = Voronoi(periodic_points) added_points_region = [] segments_in_box = [] polygons = [] for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices): simplex = np.asarray(simplex) pts = vor.points[pointidx] if np.all(simplex >= 0): if np.any(np.all(pts >= 0, axis=1)) and np.any(np.all(pts <= box, axis=1)): segment = vor.vertices[simplex] for i, point in enumerate(pts): if point in added_points_region: continue if point_in_box(point, box): poly = vor.vertices[vor.regions[vor.point_region[pointidx[i]]]] polygons.append(poly) segments_in_box.append(segment) axs[colindex].add_collection(LineCollection(segments_in_box, colors=line_colors, lw=line_width, alpha=line_alpha, linestyle='solid')) axs[colindex].grid(b=False) axs[colindex].axis("off") plt.tight_layout() if colorfill: ax = _colorize(polygons, axs[colindex], fillparameter=colorfill) return fig, axs
<gh_stars>1-10 // general #defines #ifndef TRUE #define TRUE 1L #define FALSE 0L #endif #ifndef NULL #define NULL 0L #endif // generalized C/C++ types // note: BYTE, WORD, and BOOL are defined in Windows header files. typedef long LONG; typedef unsigned long ULONG; typedef short SHORT; typedef unsigned short USHORT; typedef unsigned char UBYTE; typedef unsigned int UINT; typedef unsigned char *STRPTR; #if 1 typedef void *APTR; #else typedef STRPTR *APTR; #endif // list & node types/functions // list & node structure typedef struct MinNode { struct MinNode *mln_Succ; struct MinNode *mln_Pred; } MNODE; typedef struct MinList { MNODE *mlh_Head; MNODE *mlh_Tail; MNODE *mlh_TailPred; } MLIST; #define TXTF_RED 1 // unused #define TXTF_GRN 2 // unused #define TXTF_BLU 4 // unused #define TXTF_SCL 8 // TXTF_UNUSED is used to turn off the string gad display for unused data #define TXTF_UNUSED 16 // unused // same as TXTF_UNUSED plus txtrdata is left justified #define TXTF_LUNUSED 32 // unused // TXTF_AUTOSCALE is used to scale the texture's axis with respect to // the object's bounding box as follows: // if set in pflags[0], autoscale texture in x // if set in pflags[1], autoscale texture in y // if set in pflags[2], autoscale texture in z #define TXTF_AUTOSCALE 128 // unused #define Fix(X) ((LONG)((X)*65536.0)) #ifdef _MSC_VER // Microsoft compiler version #define RAND15() rand() #define SRAND15(X) srand(X) #else #define RAND15() (rand()>>16) #define SRAND15(X) (srand(X)) #endif typedef struct _vector { float x; float y; float z; } IM_VECTOR; typedef struct _matrix { IM_VECTOR I; IM_VECTOR J; IM_VECTOR K; } IM_MATRIX; typedef struct _axes { IM_VECTOR r; IM_MATRIX m; } AXES; typedef struct _tform { IM_VECTOR r; IM_VECTOR a; IM_VECTOR b; IM_VECTOR c; IM_VECTOR s; } TFORM; struct _coords { LONG wx; LONG wy; LONG wz; LONG px; LONG py; LONG pz; LONG pyx; LONG pyz; }; typedef struct _coords COORDS; #define FSIZE ((LONG)sizeof(float)) #define VSIZE ((LONG)sizeof(IM_VECTOR)) #define MSIZE ((LONG)sizeof(IM_MATRIX)) #define TSIZE ((LONG)sizeof(TFORM)) #define CSIZE ((LONG)sizeof(COORDS)) typedef struct _gfxdata { SHORT width; SHORT height; SHORT xaspect; SHORT yaspect; UBYTE *r; UBYTE *g; UBYTE *b; UBYTE *gen; MLIST *objlist; void *(*allocmem)(ULONG, ULONG); void (*freemem)(void *, ULONG); void (*copymem)(APTR, APTR, ULONG); void (*notify)(const char *,...); } GFXDATA; // Object flags (OBJECT.Flags) used in Imagine #define OBJF_PERSPVIEW 0x0001 #define OBJF_FRONTVIEW 0x0002 #define OBJF_RIGHTVIEW 0x0004 #define OBJF_TOPVIEW 0x0008 #define OBJF_ALLVIEWS 0x000f #define OBJF_PICKED 0x0010 #define OBJF_SELECTED 0x0020 #define OBJF_QUICKED 0x0040 // "quick" edge flag - for obj_eflags #define OBJF_TEMP 0x0080 // temporary flag - for objects #define OBJF_SHARP 0x0080 #define OBJF_POINTS 0x0100 #define OBJF_QUICK 0x0200 #define OBJF_CUBED 0x0400 #define OBJF_REBUILD 0x0800 #define OBJF_BBOXED 0x1000 #define OBJF_BLUE 0x0080 // Object grouping (joint) flags #define OBJGF_NORMAL 0 #define OBJGF_HINGE 1 #define OBJGF_BALL 2 #define OBJGF_LEGAL3_0 0x0003 // SUBOBJ.sbj_shape #defines ... this should be in object.h // objects #define SHAP_SPHERE 0 #define SHAP_AXIS 2 #define SHAP_FACETS 3 // not legal in TDDD files #define SHAP_GROUND 5 // internal use only #define SHAP_IGNORE 1 // not legal in TDDD files #define SHAP_PFACET 4 // not legal in TDDD files #define SHAPM_SUBTYPE 7 // used in 'hitobj()', 'spot()', etc. // camera & "staging" light sources -- also used in staging file // ... old (2.0 -- only used internally) // #define SHAP_SPHERICAL 0x0100 // #define SHAP_CYLINDRICAL 0x0200 // #define SHAP_CONICAL 0x0400 // #define SHAP_SPHERICAL 0x0100 // unshaped, point light source #define SHAP_CONICAL 0x0200 // round, point light source #define SHAP_PYRAMID 0x0300 // rectangular, point light source #define SHAP_PLANAR 0x0500 // unshaped, parallel light source #define SHAP_CYLINDRICAL 0x0600 // round, parallel light source #define SHAP_BOX 0x0700 // rect., parallel light source #define SHAP_CAMERA 0x0800 // masks to identify (staging) lights or camera & (staging) lights #define SHAPM_LITES 0x0700 #define SHAPM_CAMLITES 0x0f00 // "shape type" mask to ignore path flags & "reserved" flags #define SHAPM_TYPE 0x0fff // paths #define SHAPF_PATH 0x1000 #define SHAPF_CLOSED 0x2000 // flags used in raytrace code #define SHAPF_AGAIN 0x4000 #define SHAPF_CHECKED 0x8000 // SUBOBJ.sbj_lamp #defines #define LITEF_POINTSRC 0x0001 // point source -- one of these #define LITEF_PARALLEL 0x0002 // parallel rays -- must be set #define LITEF_ISLAMP 0x0003 #define LITEF_NOSHAPE 0x0000 // no shape #define LITEF_ROUND 0x0004 // round shape #define LITEF_RECTANGULAR 0x0008 // rectangular shape #define LITEF_SHAPEMASK 0x000c // mask for above #define LITEF_NOFLARE 0x0010 // flag for lens flare global F/X #define LITEF_NOFALLOFF 0x0000 // no falloff #define LITEF_DIMINISH 0x0020 // (1/R) diminishing #define LITEF_CONTROLLED 0x0040 // controlled falloff #define LITEF_RESERVED 0x0060 // reserved falloff type #define LITEF_FALLOFFMASK 0x0060 // mask for above #define LITEF_SHADOWS 0x0080 // casts shadows #define LITEF_BRIGHTOBJ 0x8000 // bright object // forms "flags" #define FORM_VFORM 1 #define FORM_TWOF 0 #define FORM_ONEF 2 #define FORM_ONES 4 #define FORM_TYPE 6 // OBJECT macros ... #define OBJAXES(O) ((AXES *)&(O)->obj_object.sbj_r) #define OBJPOSN(O) (&(O)->obj_object.sbj_r) #define OBJMTRX(O) ((IM_MATRIX *)&(O)->obj_object.sbj_a) #define PDAXES(PD) ((AXES *)&(PD)->r) #define PDPOSN(PD) (&(PD)->r) #define PDMTRX(PD) ((IM_MATRIX *)&(PD)->a) #define ADAXES(AD) ((AXES *)&(AD)->r) #define ADPOSN(AD) (&(AD)->r) #define ADMTRX(AD) ((IM_MATRIX *)&(AD)->a) #define BRAXES(BR) ((AXES *)&(BR)->tform.r) #define TXAXES(TX) ((AXES *)&(TX)->tform.r) // Zero check macros - look a little nicer in "if" conditions #define ISZERO(EXPR) ((EXPR)==0) #define ISNULL(EXPR) ((EXPR)==0) #define ISNONZ(EXPR) ((EXPR)!=0) // this disables the warning about "possible loss of data" // generated whenever a double is stored in a float variable // without an explicit type cast to float. #ifdef _MSC_VER // Microsoft compiler version #pragma warning(disable:4244) #endif // structures used in rendering typedef struct _rgbfp { float r; float g; float b; } RGBFP; typedef struct _ray { IM_VECTOR m_base; // ray origin IM_VECTOR m_unit; // ray direction } RAY; typedef struct _patch { IM_VECTOR ptc_pos; // position - read only IM_VECTOR ptc_nor; // normal vector RGBFP ptc_col; // color (RGB) RGBFP ptc_ref; // reflect (RGB) RGBFP ptc_tra; // filter (RGB) RGBFP ptc_spc; // specular (RGB) USHORT ptc_shp; // copy 'sbj_shape' & flags - read only USHORT ptc_shd; // flag - object can shadow itself - read only float ptc_c1; // 1st barycentric coord. - read only float ptc_c2; // 2nd barycentric coord. - read only RAY *ptc_rayptr; // - read only float ptc_raydst; // - read only float ptc_foglen; float ptc_shiny; // new 3.1 float ptc_hard; float ptc_index; float ptc_bright; void *ptc_txdata; // points to 16 bytes for use by textures float ptc_rough; // new 3.3 RGBFP ptc_brlite; // new 3.3 } PATCH; typedef struct _subobj { USHORT sbj_shape; USHORT sbj_lamp; struct _object *sbj_parent; ULONG sbj_number; IM_VECTOR sbj_r; IM_VECTOR sbj_a; IM_VECTOR sbj_b; IM_VECTOR sbj_c; IM_VECTOR sbj_s; } SUBOBJ; typedef struct _ogfx { COORDS *ogfx_points; COORDS ogfx_parent; COORDS ogfx_object; COORDS ogfx_axisa; COORDS ogfx_axisb; COORDS ogfx_axisc; COORDS ogfx_cube[8]; COORDS ogfx_texta; COORDS ogfx_textb; COORDS ogfx_textc; } OGFX; #define NUM_OCS 15 #define NUM_IOBJ_PROPS 8 #define IPRP_DITHER 0 // for old (pre 3.3) TDDD files (PRP1 chunk) #define IPRP_BRIGHT 0 // for 3.3+ #define IPRP_HARD 1 #define IPRP_ROUGH 2 #define IPRP_SHINY 3 #define IPRP_INDEX 4 #define IPRP_QUICK 5 #define IPRP_PHONG 6 #define IPRP_GENLOCK 7 #define IPRPF_PHONG 1 #define IPRPF_PARTICLES 2 typedef struct _pthd { IM_VECTOR r; IM_VECTOR a; IM_VECTOR b; IM_VECTOR c; IM_VECTOR s; USHORT infrom; USHORT outto; USHORT flags; USHORT extracnt; } PTHD; #define ASIZE ((LONG)sizeof(PTHD)) #define PTHF_NEWPATH 0x01 #define PTHF_CONNECTIN 0x02 #define PTHF_CONNECTOUT 0x04 #define PTHF_REVERSE 0x40 #define PTHF_SHARP 0x80 typedef struct _ford { SHORT numc; SHORT numf; SHORT flags; SHORT nums; IM_MATRIX tform; IM_VECTOR xlate; IM_VECTOR *points; USHORT *sections; } FORD; typedef struct _auxf { IM_MATRIX tform; IM_VECTOR xlate; } AUXF; typedef struct _subgrp { MNODE node; char name[18]; USHORT count; USHORT *list; USHORT ptype; USHORT pseed; float psize; char *pfname; } SUBGRP; typedef struct _object { MNODE obj_node; struct _object *obj_parent; MLIST obj_children; MNODE obj_link; SUBOBJ obj_object; UBYTE obj_props[NUM_IOBJ_PROPS]; char obj_name[18]; USHORT obj_flags; USHORT obj_acount; USHORT obj_pcount; USHORT obj_ecount; USHORT obj_fcount; PTHD *obj_pathdata; IM_VECTOR *obj_points; USHORT *obj_edges; USHORT *obj_faces; UBYTE *obj_pflags; UBYTE *obj_eflags; UBYTE *obj_fflags; UBYTE obj_diffuse[4]; // 4 bytes (0,R,G,B) UBYTE obj_reflect[4]; // 4 bytes (0,R,G,B) UBYTE obj_transmit[4]; // 4 bytes (0,R,G,B) UBYTE obj_specular[4]; // 4 bytes (0,R,G,B) IM_VECTOR obj_intensity; UBYTE *obj_dlist; UBYTE *obj_rlist; UBYTE *obj_tlist; SUBOBJ *obj_subjects; IM_VECTOR *obj_phongs; MLIST obj_txbrobj; MLIST obj_txbrtree; UBYTE obj_txbrpad[16]; MLIST obj_subgrps; float obj_foglen; float obj_bounds[6]; OGFX *obj_ogfx; struct _pface *obj_pgfx; FORD *obj_ford; IM_VECTOR *obj_auxpoints; TFORM *obj_auxtform; AUXF *obj_auxf; PTHD *obj_auxpthd1; PTHD *obj_auxpthd2; UBYTE obj_txbrpad2[32]; struct _stgobj *obj_stgobj; SHORT obj_animpad; SHORT obj_animflags; MLIST obj_animdata; USHORT obj_ptype; USHORT obj_pseed; float obj_psize; ULONG obj_sbjcnt; MLIST obj_statedata; USHORT obj_grpflags; USHORT obj_frzflags; UBYTE obj_txbrpad3[4]; IM_VECTOR *obj_pts0; TFORM *obj_axes0; IM_MATRIX *obj_alt0; IM_VECTOR *obj_phong0; char *obj_pfname; USHORT obj_quality; USHORT obj_quality2; USHORT obj_quality3; USHORT obj_quality4; float obj_ang1; float obj_ang2; float obj_ang3; float obj_ang4; AXES obj_jbase; char obj_bbsg[18]; char obj_sbsg[18]; } OBJECT; #define MAXPOINTS 0x7fffL #define MAXEDGES 0x7fffL #define MAXFACES 0x7fffL #define MAXKNOTS 0x7fffL #define LINKOBJ(LINKPTR) ((OBJECT *)((UBYTE *)(LINKPTR)-24)) #define OSIZE ((LONG)sizeof(OBJECT)) // #include "objstate.h" // "particle" flags // type values - bits 0-3 of 'ptype's #define PTF_FCE 0x0000 #define PTF_TET 0x0001 #define PTF_PYR 0x0002 #define PTF_OCT 0x0003 #define PTF_CUB 0x0004 #define PTF_BLK 0x0005 #define PTF_DOD 0x0006 #define PTF_SPH 0x0007 #define PTF_RND 0x0008 #define PTF_FILE 0x0009 #define PTF_MASK 0x000f // size values - bits 8-11 of 'ptype's #define PSF_SMALL 0x0000 #define PSF_LARGE 0x0100 #define PSF_RANDOM 0x0200 #define PSF_SPECIFY 0x0300 #define PSF_MASK 0x0f00 // centering values - bits 4-7 of 'ptype's #define PCF_INSC 0x0000 #define PCF_CIRC 0x0010 #define PCF_INTR 0x0020 #define PCF_HOLO 0x0030 #define PCF_MASK 0x00f0 // alignment values - bits 12-15 of 'ptype's #define PAF_OBJ 0x0000 #define PAF_FACE 0x1000 #define PAF_RANDOM 0x2000 #define PAF_MASK 0xf000 // lighting stuff ... typedef struct _litedata { RGBFP intensity; // light source intensities float falloff; // distance falloff factor TFORM *tf; // world coords of texture axes IM_VECTOR basepoint; // local (TXTR axis) coords IM_VECTOR direction; // local (TXTR axis) coords float distance; // distance to target IM_VECTOR target; // local (TXTR axis) coords // ... more ... SUBOBJ *lightsource; // light source SUBOBJ PATCH *spatch; // pointer to surface patch float rdotp; // dotvec(viewint way, surf. normal) float ldotp; // dotvec(lighting way, surf. normal) // RAY lray; // lighting ray } LTDATA;
<filename>orion/modules/active/twitter.py """ Allows users to send tweets via voice command Requires: - IFTTT configuration Usage Examples: - "Tweet What's up guys?" - "Post What's up everyone? to twitter" """ from orion.classes.module import Module from orion.classes.task import ActiveTask from orion.api_library import ifttt_api as ifttt class SendTweetTask(ActiveTask): def __init__(self): super(SendTweetTask, self).__init__(patterns=[r'.*?\btweet (.+)', r'.*\bpost (.+)\bto twitter\b', r'.*\bpost to twitter\b(.+)']) def match(self, text): return self.match_and_save_groups(text, {1: 'tweet'}) def action(self, text): self.tweet += ' - from orion' print('\n~ Tweet: '+self.tweet) self.speak('Sending tweet... ', show_text=True) ifttt.trigger('voice_tweet', self.tweet) class Twitter(Module): def __init__(self): tasks = [SendTweetTask()] super(Twitter, self).__init__('twitter', tasks, priority=3)
My World Of Flops is Nathan Rabin’s survey of books, television shows, musical releases, or other forms of entertainment that were financial flops, critical failures, or lack a substantial cult following. There’s something inherently compelling about world-class athletes. We’re fascinated, on a superficial level, by their perfect young bodies, sculpted and engineered to compete in the highest echelons of sports. These are bodies created through sacrifice and self-discipline, by foregoing personal lives for the pursuit of greatness. Advertisement But there are also the human-interest stories behind these impossibly perfect-looking athletes, the tear-jerking accounts of dying grandmas and single mothers risking it all for a child’s dream, and all the other sweat-soaked melodramas that fill up airtime between the games themselves. The Olympics are about sports, but they’re also about stories, and television is shameless in how it chooses to tell those stories. Yet our fascination with Olympians tends to be on the fleeting and fickle side. One moment we’re strangely obsessed with the U.S women’s beach volleyball team, the next we’ve forgotten the sport’s entire existence. For every Michael Phelps who will undoubtedly remain famous for decades, there are hundreds of Olympians who thrilled the world, then were quickly forgotten. The world is fascinated by these athletes’ bodies, but they’re less interested in what’s inside their heads. That’s partially because many world-class athletes have more reason to develop their bodies than develop their minds. Their young lives are so focused on achieving a goal that many are doomed to be naive or, in some cases, developmentally stunted in other aspects of their day-to-day existence. We fall in love with Olympic athletes during the games, but do we really want to know more about them? Is our fascination just a summer (or winter) crush or something more permanent and sustainable? In 2013, E! took a gamble that the public did want to know more about multiple-gold-medal-winning swimmer Ryan Lochte, or at least it wanted to spend more time ogling his perfect abs and model-handsome face. So E! green-lit the reality show What Would Ryan Lochte Do? Advertisement The show was a crushing commercial and creative failure, although if a reality show were to follow Lochte around today as he deals with the simmering controversy of fleeing Brazil one step ahead of the law after possibly lying to the police about being robbed at gunpoint, it likely would be fascinating and a huge hit. What Would Ryan Lochte Do? establishes that its protagonist is as boring as he is beautiful, but this crazy international scandal has made him a whole lot more interesting, pretty much overnight. At times What Would Ryan Lochte Do? seems less like a television show than a fiendishly effective delivery system for people yelling, “Jeah!” with an enthusiasm that Lochte clearly hoped, and thought, would sell millions of T-shirts with “Jeah!” spelled out in green fake rhinestones on the front. People yell “Jeah!” so often in What Would Ryan Lochte Do? that if you were to do a shot every time that inane catchphrase comes up, you’d die of alcohol poisoning halfway through the first season. Yet you would still, somehow, not be drinking as much as Lochte and his “Lochterage” does. Yes, Lochterage, which the show helpfully defines as an “Inner Circle Dedicated To Turning It Up At All Times.” As the Lochterage label clumsily conveys, What Would Ryan Lochte Do? functions on one level as Entourage-style lifestyle porn: A preposterously good-looking intellectual lightweight whose life is one endless frat party with no hangover the next morning, and the worshipful bros on hand to keep Lochte from ever having to gaze into the abyss and face the meaninglessness of his existence. Advertisement His family is also on hand, to ground him. But his sisters, younger brother, and mother never really come into focus as anything more than fuzzy abstractions useful only as foils to the obnoxious star. Lochte’s friends are similarly forgettable; their greatest strength is their ability to briefly take some of the focus off of the raging black hole of narcissism the series is named for. Lochte is astonishingly dumb and naive in a way the show unsuccessfully tries to make seem adorable, in a Jessica Simpson-on-Newlyweds way. One of the show’s limp running jokes involves zooming in on Lochte’s deer-in-the-headlights expression when he’s asked to do anything outside his comfort zone of swimming real fast, partying with his bros, and being handsome. Advertisement What Would Ryan Lochte Do? follows Lochte as he parties, drinks, gets awards, chases women, meets with his sponsors at Speedo, and eats tons of fast food when not training. In one of the many details that make Lochte so deliciously relatable and sympathetic, Lochte exercises so much and has such a great metabolism that he can eat all the garbage food he wants and still have a perfect body. Your browser does not support HTML5 video tag.Click here to view original GIF What Would Ryan Lochte Do? is an obnoxious valentine to the emotionally stunted party lifestyle of its titular sex bomb (the star is an executive producer), but it has to pretend to be about something more than a world-class bro bro-ing it out in the company of his best bros. So the show intermittently pretends that its subject is a true romantic in search for the perfect woman to settle down with. Yet it’s difficult, if not impossible, to buy into the show’s cynical attempt to pass Lochte off as a man eager to settle down when he’s clearly having a ball having casual sex with the many women who throw themselves at him. There’s nothing inherently wrong about being preposterously hot, dense, and superficial, but Lochte is too calculating in his dumbness for it be much fun. In What Would Ryan Lochte Do?, the protagonist often comes across like a misanthropic parody of an oblivious, entitled millennial convinced the world owes him everything, and best not be leisurely in delivering on that promise. The show would seemingly be ideal for hate-watching. Perhaps E! green-lit the show because, as the Kardashians have illustrated, there’s an awful lot of money to be made in being hated. Even if audiences didn’t want to laugh with Lochte, maybe they’d laugh at him instead. The problem is that Lochte isn’t even an entertaining idiot; he’s just an idiot. His life is so impossibly perfect, a Maxim fantasy of wealth, power, and sex, that only people like George Clooney, Leonardo DiCaprio, and Justin Timberlake could possibly relate to his travails as a stunningly handsome, world-famous millionaire playboy at the peak of his athletic and professional perfection. Advertisement As if to really rub the no-stakes awesomeness of its protagonist’s life in the audience’s face, the eight half-hours of substance-free fluff that constitute What Would Ryan Lochte Do?’s first and only season concludes with the overgrown frat boy at its center taking his entire Lochterage to spring break for even more binge-drinking, casual sex, and partying. But if Lochte’s life is essentially one long Olympian frat party, it’s a frat party audiences can only watch from a jealous, joyless distance, and never participate in. Lochte is so single-mindedly devoted to pursuing his own hedonistic fun that he ignores the audience. Lochte is arrogant enough to imagine that because he swims so fast, and looks so good in a hot pink Speedo that he can pass off what resembles an undiscriminating Florida fraternity’s collection of unedited home movies as an actual television show. The complete failure of What Would Ryan Lochte Do? suggests that we only think we want to know about the everyday lives of our favorite Olympics athletes, when in reality, they’d either bore us or fill us with jealousy. What Would Ryan Lochte Do? does both. It confirms that the beefcake “charm” of Ryan Lochte is best processed through still imagery and watching him swim, both of which, crucially, do not call for Lochte to open his pretty mouth and attempt to string together words in a coherent fashion, something he fails at regularly. Talking is Lochte’s fatal weakness, and it turns out that if you have your own reality show, you have to do a lot of talking. That’s where What Would Ryan Lochte Do? goes awry. Lochte’s catchphrase “Jeah!” is the show in a microcosm: He desperately, pathetically tried to make it happen, but the public just wasn’t having it, and for good reason. Advertisement Though clearly designed to promote Lochte’s synergistic ventures, What Would Ryan Lochte Do? ended up irrevocably harming the athlete’s career by confirming the public’s conception of him as a ditsy, over-sexed, hard-partying bimbo who is a phenomenal physical specimen with the intelligence, depth, and attention span of a golden retriever puppy. Then again, I suspect that Lochte would much prefer his old image as a beautiful half-wit coasting breezily through the lush life to the new reputation he is rapidly developing as a beautiful half-wit so stupid and naive that his partying and bad judgment have the potential to cause a bona fide international incident. It turns out there are actually huge downsides to letting people get away with everything as long as they’re rich, powerful, attractive, and successful enough. One of those dangers is that they’re so used to getting away with everything short of murder, they won’t realize that they’re in serious trouble until it’s too late. Failure, Fiasco, or Secret Success: Failure
<reponame>TomatoYoung/beegfs<filename>client_module/source/common/net/message/storage/attribs/SetXAttrRespMsg.h #ifndef SETXATTRRESPMSG_H_ #define SETXATTRRESPMSG_H_ #include <common/net/message/SimpleIntMsg.h> struct SetXAttrRespMsg; typedef struct SetXAttrRespMsg SetXAttrRespMsg; static inline void SetXAttrRespMsg_init(SetXAttrRespMsg* this); struct SetXAttrRespMsg { SimpleIntMsg simpleIntMsg; }; void SetXAttrRespMsg_init(SetXAttrRespMsg* this) { SimpleIntMsg_init( (SimpleIntMsg*)this, NETMSGTYPE_SetXAttrResp); } // getters & setters static inline int SetXAttrRespMsg_getValue(SetXAttrRespMsg* this) { return SimpleIntMsg_getValue( (SimpleIntMsg*)this); } #endif /*SETXATTRRESPMSG_H_*/
#include <pcl/recognition/ransac_based/bvh.h> #error "Using pcl/recognition/bvh.h is deprecated, please use pcl/recognition/ransac_based/bvh.h instead."
def refresh_image(self): self.vb.autoRange() self.image.update() return
<reponame>gwsch/unitime /* * Licensed to The Apereo Foundation under one or more contributor license * agreements. See the NOTICE file distributed with this work for * additional information regarding copyright ownership. * * The Apereo Foundation licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. * */ package org.unitime.timetable.gwt.client.admin; import java.util.Date; import com.google.gwt.user.client.Cookies; /** * @author <NAME> */ public class AdminCookie { private static AdminCookie sInstance = null; private int iSortTasksBy = 0, iSortTaskExecutionsBy = 0, iSortBuildingsBy = 0; private AdminCookie() { try { String cookie = Cookies.getCookie("UniTime:Admin"); if (cookie != null) { String[] params = cookie.split("\\|"); int idx = 0; iSortTasksBy = Integer.valueOf(params[idx++]); iSortTaskExecutionsBy = Integer.valueOf(params[idx++]); iSortBuildingsBy = Integer.valueOf(params[idx++]); } } catch (Exception e) { } } private void save() { String cookie = iSortTasksBy + "|" + iSortTaskExecutionsBy + "|" + iSortBuildingsBy; Date expires = new Date(new Date().getTime() + 604800000l); // expires in 7 days Cookies.setCookie("UniTime:Admin", cookie, expires); } public static AdminCookie getInstance() { if (sInstance == null) sInstance = new AdminCookie(); return sInstance; } public int getSortTasksBy() { return iSortTasksBy; } public void setSortTasksBy(int sortTasksBy) { iSortTasksBy = sortTasksBy; save(); } public int getSortTaskExecutionsBy() { return iSortTaskExecutionsBy; } public void setSortTaskExecutionsBy(int sortTakExecutionsBy) { iSortTaskExecutionsBy = sortTakExecutionsBy; save(); } public int getSortBuildingsBy() { return iSortBuildingsBy; } public void setSortBuildingsBy(int sortBuildingsBy) { iSortBuildingsBy = sortBuildingsBy; save(); } }
In a rare tale of technology, bio­terrorism and chocolate, scientists are racing to sequence the cacao tree genome. They fear that without the genome in hand they will be unable to stop the spread of two virulent pathogens that threaten to devastate the world’s cocoa crop. Cacao trees were first domesticated more then 1,500 years ago by Mayans living in what is now Central America, but fungal diseases such as witch’s broom and frosty pod have largely chased the bean out of its native habitat. The great worry is that one of these diseases will cross the Atlantic Ocean to West Africa, where 70 percent of the crop is now produced. Cacao trees in West Africa have no resistance to the pathogens, which form spores and spread via the wind, careless farmers and, in at least one case, bioterrorists. Scientists say that just a few infected pods would lead to the loss of one third of total global production. One way to forestall such a crash is to breed plants that are resistant to infection. Scientists identify naturally resistant plants, artificially pollinate them, then test their offspring. This is a slow process, and having the cacao genome in hand would speed things up. Scientists would be able to identify the sections of DNA that confer increased resistance and select the best trees to breed. “It’s expensive work,” says Randy C. Ploetz, a plant pathologist at the University of Florida, “but once you have a genetic sequence, it makes that work a lot easier.” Scientists expect to release a first draft of the cacao genome by the end of the year; identifying the genetic sites responsible for resistance will take a few years more. In the meantime, producers in Côte d’Ivoire and Ghana have instituted strict quarantines to help protect their crops.
// ODBC SHORTANSI -- the actual MPLoc is encoded in the schName // using underscore delimiters, i.e. "systemName_volumeName_subvolName". // We make a real MPLoc out of that. NABoolean QualifiedName::applyShortAnsiDefault(NAString& catName, NAString& schName) const { ComMPLoc loc; loc.parse(schName, ComMPLoc::SUBVOL, TRUE); if (loc.isValid(ComMPLoc::SUBVOL)) { catName = loc.getSysDotVol(); schName = loc.getSubvolName(); return TRUE; } return FALSE; }
/// Optionally prepares a table of response times. /// /// This function is invoked by `GooseMetrics::print()` and /// `GooseMetrics::print_running()`. pub(crate) fn fmt_response_times(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { // If there's nothing to display, exit immediately. if self.requests.is_empty() { return Ok(()); } let mut aggregate_raw_times: BTreeMap<usize, usize> = BTreeMap::new(); let mut aggregate_raw_total_time: usize = 0; let mut aggregate_raw_counter: usize = 0; let mut aggregate_raw_min_time: usize = 0; let mut aggregate_raw_max_time: usize = 0; writeln!( fmt, " ------------------------------------------------------------------------------" )?; writeln!( fmt, " {:<24} | {:>11} | {:>10} | {:>11} | {:>10}", "Name", "Avg (ms)", "Min", "Max", "Median" )?; writeln!( fmt, " ------------------------------------------------------------------------------" )?; // First display the raw data, as it always exists. let mut co_data = false; for (request_key, request) in self.requests.iter().sorted() { if !co_data && request.coordinated_omission_data.is_some() { co_data = true; } let raw_average = match request.raw_data.counter { 0 => 0.0, _ => request.raw_data.total_time as f32 / request.raw_data.counter as f32, }; let raw_average_precision = determine_precision(raw_average); // Merge in all times from this request into an aggregate. aggregate_raw_times = merge_times(aggregate_raw_times, request.raw_data.times.clone()); // Increment total response time counter. aggregate_raw_total_time += &request.raw_data.total_time; // Increment counter tracking individual response times seen. aggregate_raw_counter += &request.raw_data.counter; // If user had new fastest response time, update global fastest response time. aggregate_raw_min_time = update_min_time(aggregate_raw_min_time, request.raw_data.minimum_time); // If user had new slowest response time, update global slowest response time. aggregate_raw_max_time = update_max_time(aggregate_raw_max_time, request.raw_data.maximum_time); writeln!( fmt, " {:<24} | {:>11.raw_avg_precision$} | {:>10} | {:>11} | {:>10}", util::truncate_string(request_key, 24), raw_average, format_number(request.raw_data.minimum_time), format_number(request.raw_data.maximum_time), format_number(util::median( &request.raw_data.times, request.raw_data.counter, request.raw_data.minimum_time, request.raw_data.maximum_time, )), raw_avg_precision = raw_average_precision, )?; } let raw_average = match aggregate_raw_counter { 0 => 0.0, _ => aggregate_raw_total_time as f32 / aggregate_raw_counter as f32, }; let raw_average_precision = determine_precision(raw_average); // Display aggregated data if there was more than one request. if self.requests.len() > 1 { writeln!( fmt, " -------------------------+-------------+------------+-------------+-----------" )?; writeln!( fmt, " {:<24} | {:>11.avg_precision$} | {:>10} | {:>11} | {:>10}", "Aggregated", raw_average, format_number(aggregate_raw_min_time), format_number(aggregate_raw_max_time), format_number(util::median( &aggregate_raw_times, aggregate_raw_counter, aggregate_raw_min_time, aggregate_raw_max_time )), avg_precision = raw_average_precision, )?; } // Nothing more to display if there was no Coordinated Omission data collected. if !co_data { return Ok(()); } writeln!( fmt, " ------------------------------------------------------------------------------" )?; writeln!(fmt, " Adjusted for Coordinated Omission:")?; let mut aggregate_co_times: BTreeMap<usize, usize> = BTreeMap::new(); let mut aggregate_co_total_time: usize = 0; let mut aggregate_co_counter: usize = 0; let mut aggregate_co_min_time: usize = 0; let mut aggregate_co_max_time: usize = 0; writeln!( fmt, " ------------------------------------------------------------------------------" )?; writeln!( fmt, " {:<24} | {:>11} | {:>10} | {:>11} | {:>10}", "Name", "Avg (ms)", "Std Dev", "Max", "Median" )?; writeln!( fmt, " ------------------------------------------------------------------------------" )?; // Now display Coordinated Omission data. for (request_key, request) in self.requests.iter().sorted() { let co_average; let standard_deviation; let co_minimum; let co_maximum; if let Some(co_data) = request.coordinated_omission_data.as_ref() { let raw_average = match request.raw_data.counter { 0 => 0.0, _ => request.raw_data.total_time as f32 / request.raw_data.counter as f32, }; co_average = match co_data.counter { 0 => 0.0, _ => co_data.total_time as f32 / co_data.counter as f32, }; standard_deviation = util::standard_deviation(raw_average, co_average); aggregate_co_times = merge_times(aggregate_co_times, co_data.times.clone()); aggregate_co_counter += co_data.counter; // If user had new fastest response time, update global fastest response time. aggregate_co_min_time = update_min_time(aggregate_co_min_time, co_data.minimum_time); // If user had new slowest response time, update global slowest response time. aggregate_co_max_time = update_max_time(aggregate_raw_max_time, co_data.maximum_time); aggregate_co_total_time += co_data.total_time; co_minimum = co_data.minimum_time; co_maximum = co_data.maximum_time; } else { co_average = 0.0; standard_deviation = 0.0; co_minimum = 0; co_maximum = 0; } let co_average_precision = determine_precision(co_average); let standard_deviation_precision = determine_precision(standard_deviation); // Coordinated Omission Mitigation was enabled for this request, display the extra data: if let Some(co_data) = request.coordinated_omission_data.as_ref() { writeln!( fmt, " {:<24} | {:>11.co_avg_precision$} | {:>10.sd_precision$} | {:>11} | {:>10}", util::truncate_string(request_key, 24), co_average, standard_deviation, format_number(co_maximum), format_number(util::median( &co_data.times, co_data.counter, co_minimum, co_maximum, )), co_avg_precision = co_average_precision, sd_precision = standard_deviation_precision, )?; } else { writeln!( fmt, " {:<24} | {:>11} | {:>10} | {:>11} | {:>10}", util::truncate_string(request_key, 24), "-", "-", "-", "-", )?; } } // Display aggregated Coordinate Omission data if there was more than one request. if self.requests.len() > 1 { let co_average = match aggregate_co_counter { 0 => 0.0, _ => aggregate_co_total_time as f32 / aggregate_co_counter as f32, }; let co_average_precision = determine_precision(co_average); let standard_deviation = util::standard_deviation(raw_average, co_average); let standard_deviation_precision = determine_precision(standard_deviation); writeln!( fmt, " -------------------------+-------------+------------+-------------+-----------" )?; writeln!( fmt, " {:<24} | {:>11.avg_precision$} | {:>10.sd_precision$} | {:>11} | {:>10}", "Aggregated", co_average, standard_deviation, format_number(aggregate_co_max_time), format_number(util::median( &aggregate_co_times, aggregate_co_counter, aggregate_co_min_time, aggregate_co_max_time )), avg_precision = co_average_precision, sd_precision = standard_deviation_precision, )?; } Ok(()) }
<reponame>mkinsner/llvm<filename>clang/test/CodeGen/sanitize-coverage-old-pm.c // RUN: %clang %s -target x86_64-unknown-linux-gnu -emit-llvm -S -fsanitize-coverage=trace-pc,trace-cmp -o - -flegacy-pass-manager | FileCheck %s --check-prefixes=CHECK // RUN: %clang %s -target x86_64-unknown-linux-gnu -emit-llvm -S -fsanitize=address -fsanitize-coverage=trace-pc,trace-cmp -o - -flegacy-pass-manager | FileCheck %s --check-prefixes=CHECK,ASAN // RUN: %clang %s -target x86_64-unknown-linux-gnu -emit-llvm -S -fsanitize=bounds -fsanitize-coverage=trace-pc,trace-cmp -o - -flegacy-pass-manager | FileCheck %s --check-prefixes=CHECK,BOUNDS // RUN: %clang %s -target x86_64-unknown-linux-gnu -emit-llvm -S -fsanitize=memory -fsanitize-coverage=trace-pc,trace-cmp -o - -flegacy-pass-manager | FileCheck %s --check-prefixes=CHECK,MSAN // RUN: %clang %s -target x86_64-unknown-linux-gnu -emit-llvm -S -fsanitize=thread -fsanitize-coverage=trace-pc,trace-cmp -o - -flegacy-pass-manager | FileCheck %s --check-prefixes=CHECK,TSAN // RUN: %clang %s -target x86_64-unknown-linux-gnu -emit-llvm -S -fsanitize=undefined -fsanitize-coverage=trace-pc,trace-cmp -o - -flegacy-pass-manager | FileCheck %s --check-prefixes=CHECK,UBSAN // // Host armv7 is currently unsupported: https://bugs.llvm.org/show_bug.cgi?id=46117 // UNSUPPORTED: armv7, armv7l, thumbv7, armv8l // The same issue also occurs on a riscv32 host. // XFAIL: riscv32 int x[10]; // CHECK-LABEL: define dso_local void @foo( void foo(int n) { // CHECK-DAG: call void @__sanitizer_cov_trace_pc // CHECK-DAG: call void @__sanitizer_cov_trace_const_cmp // ASAN-DAG: call void @__asan_report_store // MSAN-DAG: call void @__msan_warning // BOUNDS-DAG: call void @__ubsan_handle_out_of_bounds // TSAN-DAG: call void @__tsan_func_entry // UBSAN-DAG: call void @__ubsan_handle if (n) x[n] = 42; } // CHECK-LABEL: declare void
/* * Until the connector entity allows querying for the status, we have to go through all connections and * see if we can find our connector host in there. */ private RouterConnections collectConnectionInfo(List<List<?>> response) { int hostIdx = connection.getAttributeIndex("host"); int openedIdx = connection.getAttributeIndex("opened"); int operStatusIdx = connection.getAttributeIndex("operStatus"); List<String> hosts = filterOnAttribute(String.class, hostIdx, response); List<Boolean> opened = filterOnAttribute(Boolean.class, openedIdx, response); List<String> operStatus = filterOnAttribute(String.class, operStatusIdx, response); return new RouterConnections(hosts, opened, operStatus); }
The most depressed tweets from people at work after Glastonbury 'It's difficult not to have a little cry on the train' Glastonbury is all over for another year, and for the 175,000-ish lucky people who went, it's back to reality. Sorry. It was always going to be tricky setting your alarm for 7am and boarding a packed commuter train full of intensely miserable people, after five days of living it up in a field packed full of weird and wonderful things. Especially on a Tuesday (that's why you should probably have booked the Tuesday off work, too). Here are the most depressing tweets from Glastonbury-goers who are back at their desks instead of huffing laughing gas at the Stone Circle. Now, cut that dirty wristband off. First day back at work after Glastonbury is not the one #GlastonburyBlues — Benjamin Ashley (@BenAshley4) July 1, 2014 Should have booked Tuesday off work as well #glastonbury — Verity Cowley (@veritycowley) July 1, 2014 Listening to Elbow on the walk to work & trying not cry a bit. Maybe it was that emotional,maybe I'm very tired. Probably both. #glastonbury — Alice Elizabeth (@_aliceelizabeth) July 1, 2014 Feels weird not waking up in a field this morning!! Back to work! #Glastonbury — Thomas Odlum (@TomOdlum10) July 1, 2014 it's difficult not to have a little cry on the train to work. I miss Glastonbury. — ch (@chloelisabeth) July 1, 2014 I just deleted my Glastonbury app and now I'm getting ready for work. Life sucks right now 👎 — Stacey O'Connor (@staceysoc) July 1, 2014 Very real and very difficult Glastonbury blues this morning. Why do I find myself at work, didn't think this through.. — Terry Andersön (@terryaanderson) July 1, 2014 I cannot deal with the fact that this time yesterday I had just watched the sunrise over Glastonbury & now I'm getting ready for work. — BG (@BethGilligan) July 1, 2014 First day back at work after Glastonbury, 4.30am start. It hurts. — Beth Evans (@theBethEvans) July 1, 2014 Below - The greatest photos from Glastonbury 2014:
def phrase_matcher(terms: list, attribute: str = "LOWER") -> spacy.matcher.PhraseMatcher: matcher = spacy.matcher.PhraseMatcher(nlp.vocab, attr=attribute) if attribute != "LOWER": patterns = [phrase_nlp(term) for term in terms] else: patterns = [nlp(term) for term in terms] matcher.add("TerminologyList", None, *patterns) return matcher
package org.elder.sourcerer; import com.google.common.collect.ImmutableList; public class EventReadResult<T> { private final ImmutableList<EventRecord<T>> events; private final int fromVersion; private final int lastVersion; private final int nextVersion; private final boolean isEndOfStream; public EventReadResult( final ImmutableList<EventRecord<T>> events, final int fromVersion, final int lastVersion, final int nextVersion, final boolean isEndOfStream) { this.events = events; this.fromVersion = fromVersion; this.lastVersion = lastVersion; this.nextVersion = nextVersion; this.isEndOfStream = isEndOfStream; } public ImmutableList<EventRecord<T>> getEvents() { return events; } public int getFromVersion() { return fromVersion; } public int getLastVersion() { return lastVersion; } public int getNextVersion() { return nextVersion; } public boolean isEndOfStream() { return isEndOfStream; } }
<filename>shadows/supportv4/src/test/java/org/robolectric/shadows/support/v4/NotificationCompatBuilderTest.java package org.robolectric.shadows.support.v4; import static com.google.common.truth.Truth.assertThat; import android.app.Notification; import android.support.v4.app.NotificationCompat; import com.android.internal.R; import org.junit.Test; import org.junit.runner.RunWith; import org.robolectric.RuntimeEnvironment; import org.robolectric.util.TestRunnerWithManifest; @RunWith(TestRunnerWithManifest.class) public class NotificationCompatBuilderTest { @Test public void addAction__shouldAddActionToNotification() { NotificationCompat.Action action = new NotificationCompat.Action.Builder(R.drawable.ic_corp_icon, "a title", null).build(); Notification notification = new NotificationCompat.Builder(RuntimeEnvironment.application) .addAction(action) .build(); assertThat(notification.actions).asList().hasSize(1); } }
// vim: set filetype=go: /* BSD 3-Clause License Copyright (c) 2019, iXo All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // pcap replayer (with gui if needed, with step by step functionality) package main import ( "os" "net" "github.com/urfave/cli" "github.com/andlabs/ui" _ "github.com/andlabs/ui/winmanifest" "pcapreplay/gui" "pcapreplay/pcap" "pcapreplay/commons" ) func createGui() { commons.MainWin, commons.MainPane = gui.CreateMainWindow("PCAP Replay") commons.InterfacesPane = gui.CreateHPanel(commons.MainPane, "Net interfaces", false) commons.Interfaces = gui.CreateComboBox(commons.InterfacesPane, true) commons.Interfaces.OnSelected(func(*ui.Combobox) { intfs, _ := net.Interfaces() commons.IntfId = intfs[commons.Interfaces.Selected()].Name }) commons.ReplayPane = gui.CreateVPanel(commons.MainPane, "Replay", true) filePane := ui.NewHorizontalBox() commons.FileField = ui.NewEntry() filePane.Append(commons.FileField, true) fileSearchBtn := ui.NewButton("…") fileSearchBtn.OnClicked(func(*ui.Button) { commons.PcapFile = ui.OpenFile(commons.MainWin) commons.FileField.SetText(commons.PcapFile) pcap.Infos(commons.PcapFile) }) filePane.Append(fileSearchBtn, false) commons.ReplayPane.Append(filePane, false) commons.ReplayPane.Append(ui.NewLabel(" "), true) commons.Stats1 = gui.CreateLabeledField(commons.ReplayPane, "Avg packet rate :", false, true) commons.Stats2 = gui.CreateLabeledField(commons.ReplayPane, "Stats :", false, true) commons.StatPBar = ui.NewProgressBar() commons.StatPBar.SetValue(-1) commons.ReplayPane.Append(commons.StatPBar, true) commons.ReplayPane.Append(ui.NewLabel(" "), true) commons.ControlsPane = gui.CreateHPanel(commons.MainPane, "Controls", false) commons.ControlsPane.Append(ui.NewLabel(" "), true) commons.PlayBtn = ui.NewButton("▶") commons.FastPlayBtn = ui.NewButton("▶▶") commons.StepPlayBtn = ui.NewButton("▮▶") commons.StepOnePlayBtn = ui.NewButton("▮▶¹") commons.ResetBtn = ui.NewButton("⟲") commons.PlayBtn.OnClicked(func(*ui.Button) { commons.ReplayFast = false gui.DisableControls() go pcap.Replay() }) commons.FastPlayBtn.OnClicked(func(*ui.Button) { commons.ReplayFast = true gui.DisableControls() go pcap.Replay() }) commons.StepPlayBtn.OnClicked(func(*ui.Button) { commons.ReplayFast = false gui.DisableControls() go pcap.ReplayStep(commons.StepSpinBox.Value()) }) commons.StepOnePlayBtn.OnClicked(func(*ui.Button) { commons.ReplayFast = false gui.DisableControls() go pcap.ReplayStep(1) }) commons.ResetBtn.OnClicked(func(*ui.Button) { commons.Stats2.SetText("Resetted") go pcap.EndReplay() }) commons.StepSpinBox = ui.NewSpinbox(1, 5000) commons.ControlsPane.Append(commons.PlayBtn, false) commons.ControlsPane.Append(commons.FastPlayBtn, false) commons.ControlsPane.Append(ui.NewLabel(" "), true) commons.ControlsPane.Append(commons.StepSpinBox, false) commons.ControlsPane.Append(commons.StepPlayBtn, false) commons.ControlsPane.Append(commons.StepOnePlayBtn, false) commons.ControlsPane.Append(commons.ResetBtn, false) commons.ControlsPane.Append(ui.NewLabel(" "), true) commons.MainWin.Show() go populateGui() } func populateGui() { intfs, _ := net.Interfaces() for _, intf := range intfs { commons.Interfaces.Append(intf.Name) } if commons.PcapFile != "" && commons.FileField != nil { commons.FileField.SetText(commons.PcapFile) } } func main() { app := cli.NewApp() app.Name = "PCAP Replay" app.Version = "1.0.0" app.Usage = "pcapreplay" app.UsageText = "pcapreplay --intf <interface> [--gui] [--fast] --pcap <pcap file>" app.Flags = []cli.Flag{ cli.StringFlag{ Name: "intf", Usage: "system interface id", Destination: &commons.IntfId, }, cli.StringFlag{ Name: "pcap", Usage: "pcap file to replay", Destination: &commons.PcapFile, }, cli.BoolFlag{ Name: "fast", Usage: "replay without the real time between each packets", Destination: &commons.ReplayFast, }, cli.BoolFlag{ Name: "gui", Usage: "start the helper gui", Destination: &commons.WithGui, }, } app.Action = func(c *cli.Context) error { if commons.WithGui { ui.Main(createGui) } else { pcap.Replay() } return nil } app.Run(os.Args) }
/** * A description in the Interface Repository of a member of an IDL union. */ public final class UnionMember implements org.omg.CORBA.portable.IDLEntity { // instance variables /** * The name of the union member described by this * <code>UnionMember</code> object. * @serial */ public String name; /** * The label of the union member described by this * <code>UnionMember</code> object. * @serial */ public org.omg.CORBA.Any label; /** * The type of the union member described by this * <code>UnionMember</code> object. * @serial */ public org.omg.CORBA.TypeCode type; /** * The typedef that represents the IDL type of the union member described by this * <code>UnionMember</code> object. * @serial */ public org.omg.CORBA.IDLType type_def; // constructors /** * Constructs a new <code>UnionMember</code> object with its fields initialized * to null. */ public UnionMember() { } /** * Constructs a new <code>UnionMember</code> object with its fields initialized * to the given values. * * @param __name a <code>String</code> object with the name of this * <code>UnionMember</code> object * @param __label an <code>Any</code> object with the label of this * <code>UnionMember</code> object * @param __type a <code>TypeCode</code> object describing the type of this * <code>UnionMember</code> object * @param __type_def an <code>IDLType</code> object that represents the * IDL type of this <code>UnionMember</code> object */ public UnionMember(String __name, org.omg.CORBA.Any __label, org.omg.CORBA.TypeCode __type, org.omg.CORBA.IDLType __type_def) { name = __name; label = __label; type = __type; type_def = __type_def; } }
def classify_region(self, D: BorelSet, n: int)->Tuple[torch.Tensor,torch.Tensor,torch.Tensor]: mean, lcb, ucb = self.get_ucb_lcb(D, n) above = lcb > self.level_set below = ucb < self.level_set not_known = above * False + True not_known = not_known * (~ above) * (~ below) return (above.view(-1), not_known.view(-1), below.view(-1))
import re from django.shortcuts import render from elasticsearch import Elasticsearch from rest_framework.views import APIView from rest_framework.response import Response def index(req): return render(req, 'index.html') class Geomovement(APIView): es = Elasticsearch() indexes = ['news', 'tweets', 'science'] search_types = ['geostatement', 'geobin', 'suggest'] negations = ['movement', 'nomovement'] scales = ['states', 'continents', 'countries', 'bins1', 'bins2'] def get(self, request, format=None): results = self.esearch(request.query_params) return Response(results) def validate(self, v, check_type): if check_type == 'searchtype': # it should be one of the predefined search types if v in self.search_types: return v if check_type == 'binorprob': # check for numbers separated by commas tester = re.compile(r'^[0-9]*$') vs = v.split(',') output = [] for e in vs: if len(e) > 0 and tester.match(e): output.append(int(e)) return output if check_type == 'content': # return only letters and numbers with a max of 30 characters return (''.join(e for e in v if (e.isalnum() or e == ' ')))[:30] if check_type == 'yearmonth': # format should be 202005 for May, 2020 year = int(v[0:4]) month = int(v[4:6]) if year >= 2019 and 12 >= month >= 1: return '%s-%s-%s' % (v[0:4], v[4:6], '01') else: return None if check_type == 'iname': # they should be one of the predefined index names vs = v.split(',') output = [] for e in vs: if e in self.indexes: output.append('geomovement_%s' % e) return output if check_type == 'negation': # they should be one of the predefined index names vs = v.split(',') output = [] for e in vs: if e in self.negations: output.append(e != 'movement') return output if check_type == 'scale': # they should be one of the predefined index names if v in self.scales: return v else: return None def build_geostatement_query(self, content, bins, time_start, time_end, movement_probability, negations, scale): size = 10000 source = ["statementId", "content", "published", scale, "other%s" % scale.capitalize(), "url"] qs = [] if content is not None and content != ' ': cq = {"match": {"content": content}} qs.append(cq) if bins is not None and len(bins) > 0: if len(bins) > 1: bq = {"bool": {"should": [{"term": {scale: int(bin)}} for bin in bins]}} qs.append(bq) if len(bins) == 1: bq = {"term": {scale: bins[0]}} qs.append(bq) if time_start is not None and time_end is not None: tq = {"range": {"published": {"gte": '%sT00:00:00' % time_start, "lte": '%sT00:00:00' % time_end}}} qs.append(tq) if len(movement_probability) > 1: mq = {"bool": {"should": [{"term": {"predClass": p}} for p in movement_probability]}} qs.append(mq) if len(movement_probability) == 1: mq = {"term": {"predClass": movement_probability[0]}} qs.append(mq) if len(negations) == 1: mq = {"term": {"negated": negations[0]}} qs.append(mq) if len(negations) == 0: mq = {"bool": {"must": [{"term": {"negated": n}} for n in negations]}} qs.append(mq) q1 = {"bool": {"must": qs}} sq = {"_source": source, "size": size, "query": q1} if bins is not None: sq['aggs'] = { "l_f": {"filter": q1, "aggs": {"a_bi": {"terms": {"field": scale}, "aggs": {"l_bi": {"terms": {"field": "contentBigrams", "size": 20}}}} }}} return sq def build_geobin_query(self, content, time_start, time_end, movement_probability, negations, scale): size = 10000 qs = [] if content is not None and content != ' ': cq = {"match": {"content": content}} qs.append(cq) if time_start is not None and time_end is not None: tq = {"range": {"published": {"gte": '%sT00:00:00' % time_start, "lte": '%sT00:00:00' % time_end}}} qs.append(tq) if len(movement_probability) > 1: mq = {"bool": {"should": [{"term": {"predClass": p}} for p in movement_probability]}} qs.append(mq) if len(movement_probability) == 1: mq = {"term": {"predClass": movement_probability[0]}} qs.append(mq) if len(negations) == 1: mq = {"term": {"negated": negations[0]}} qs.append(mq) if len(negations) == 0: temp_negations = [True, False] mq = {"bool": {"must": [{"term": {"negated": n}} for n in temp_negations]}} qs.append(mq) q1 = {"bool": {"must": qs}} sq = {"size": 0, "aggs": {"l_f": {"aggs": {"l_b": {"terms": {"field": scale, "size": size}}, "n": {"terms": {"field": "negated"}, "aggs": {"n_d": {"terms": {"field": "yearMonth", "size": size}}}}}}, "l_t": { "aggs": {"t_n": {"terms": {"field": "negated"}, "aggs": {"t_n_d": {"terms": {"field": "yearMonth", "size": size}}}}} }}} if len(q1) > 0: sq['aggs']['l_f']['filter'] = q1 if len(movement_probability) > 1: mq = {"bool": {"must": [{"range": {"published": {"gte": '2019-08-01T00:00:00', "lte": '2025-01-01T00:00:00'}}}, {"bool": {"should": [{"term": {"predClass": p}} for p in movement_probability]}}]}} sq['aggs']['l_t']['filter'] = mq return sq def build_suggest_query(self, content): size = 5 sq = {"suggest": {"autosuggest": { "text": content, "term": {"field": "content", "size": size, "suggest_mode": "always" }}}} return sq def get_geostatement_results(self, responses, scale, bins): results = {'s': [], 'bi': {}, 'b': bins} bi = {} for response in responses: for hit in response['hits']['hits']: doc = hit['_source'] result = { 'id': doc['statementId'], 'c': doc['content'], 'p': doc['published'], 'b': doc[scale], 'ob': doc['other%s' % scale.capitalize()], 'u': doc['url'] } results['s'].append(result) if 'aggregations' in response: for bucket in response['aggregations']['l_f']['a_bi']['buckets']: key1 = str(bucket['key']) for bucket2 in bucket['l_bi']['buckets']: key = bucket2['key'] if key1 not in bi: bi[key1] = {} if key in bi[key1]: bi[key1][key] += bucket2['doc_count'] else: bi[key1][key] = bucket2['doc_count'] # for b in bi.items(): # bi[b[0]] = {k: v for k, v in sorted(b[1].items(), key=lambda item: item[1], reverse=True)} results['bi'] = bi return results def get_geobin_results(self, responses): results = {'l': {}, 'n': {}} for response in responses: for bucket in response['aggregations']['l_f']['l_b']['buckets']: key = bucket['key'] if key in results['l']: results['l'][key] += bucket['doc_count'] else: results['l'][key] = bucket['doc_count'] for bucket1 in response['aggregations']['l_t']['t_n']['buckets']: key1 = bucket1['key'] for bucket2 in bucket1['t_n_d']['buckets']: key2 = bucket2['key'] key2 = '%s-%s' % (key2[0:4], key2[4:6]) neg = 'nm' if key1 == 1 else 'm' o_neg = 'm' if key1 == 1 else 'nm' if key2 in results['n']: results['n'][key2]['t_%s' % neg] += bucket2['doc_count'] else: results['n'][key2] = {'t_%s' % neg: bucket2['doc_count'], 't_%s' % o_neg: 0} for bucket1 in response['aggregations']['l_f']['n']['buckets']: key1 = bucket1['key'] for bucket2 in bucket1['n_d']['buckets']: key2 = bucket2['key'] key2 = '%s-%s' % (key2[0:4], key2[4:6]) neg = 'nm' if key1 == 1 else 'm' o_neg = 'm' if key1 == 1 else 'nm' if key2 in results['n']: if 'f_%s' % neg in results['n'][key2]: results['n'][key2]['f_%s' % neg] += bucket2['doc_count'] else: results['n'][key2]['f_%s' % neg] = bucket2['doc_count'] results['n'][key2]['f_%s' % o_neg] = 0 # results['l'] = {k: v for k, v in sorted(results['l'].items(), key=lambda item: item[1])} return results def get_suggest_results(self, responses): results = [] for response in responses: for ele in response['suggest']['autosuggest'][0]['options']: suggest = ele['text'] if suggest not in results: results.append(suggest) return results def esearch(self, params): content = params.get('c', None) if content is not None: content = self.validate(content, 'content') bins = params.get('b', None) if bins is not None: bins = self.validate(bins, 'binorprob') time_start = params.get('ts', None) if time_start is not None: time_start = self.validate(time_start, 'yearmonth') time_end = params.get('te', None) if time_end is not None: time_end = self.validate(time_end, 'yearmonth') movement_probability = params.get('p', None) if movement_probability is not None: movement_probability = self.validate(movement_probability, 'binorprob') index_names = params.get('i', None) if index_names is not None: index_names = self.validate(index_names, 'iname') search_type = params.get('t', None) if search_type is not None: search_type = self.validate(search_type, 'searchtype') negations = params.get('n', None) if negations is not None: negations = self.validate(negations, 'negation') scale = params.get('s', None) if scale is not None: scale = self.validate(scale, 'scale') if search_type == 'geostatement': sq = self.build_geostatement_query(content, bins, time_start, time_end, movement_probability, negations, scale) responses = [] for index_name in index_names: responses.append(self.es.search(index=index_name, body=sq, request_timeout=30)) results = self.get_geostatement_results(responses, scale, bins) if search_type == 'geobin': sq = self.build_geobin_query(content, time_start, time_end, movement_probability, negations, scale) responses = [] for index_name in index_names: responses.append(self.es.search(index=index_name, body=sq, request_timeout=30)) results = self.get_geobin_results(responses) if search_type == 'suggest': sq = self.build_suggest_query(content) responses = [] for index_name in index_names: responses.append(self.es.search(index=index_name, body=sq)) results = self.get_suggest_results(responses) return results
import { Injectable } from '@angular/core'; import { Observable } from 'rxjs/Observable'; import { Subject } from 'rxjs/subject'; // http://blog.angular-university.io/how-to-build-angular2-apps-using-rxjs-observable-data-services-pitfalls-to-avoid/ export interface Todo { id: number; name: string; isSync: boolean; } @Injectable() export class TodoService { private _todos$: Subject<Todo[]>; private dataStore: Todo[] = []; private counter: number; constructor() { this.dataStore = this.generateTodos(); this.counter = this.dataStore.length; this._todos$ = <Subject<Todo[]>>new Subject(); } get todos$(): Observable<Todo[]> { return this._todos$.asObservable(); } public loadAll(): void { console.debug('loadAll()...'); this._todos$.next(this.dataStore); } public addTodo(): void { this.counter++; // Do some async stuff to add the TODO to backend service. When this is finished update client with its value let newItemName = 'added item ' + this.counter; let newTodo: Todo = { id: this.counter, name: newItemName, isSync: false }; // Add it to local cache this.dataStore.push(newTodo); // this._todos$.next(this.dataStore); // Update all Observers, uncomment to see what happens // Now add it to backend this.addTODObackend(newTodo).subscribe((resultItem: Todo) => { let updatedTodo = this.findById(resultItem.id); if(updatedTodo) { updatedTodo.isSync = true; this._todos$.next(this.dataStore); } }); } /** * @returns undefined when not found */ private findById(id: number): Todo { let retval: Todo = undefined; this.dataStore.forEach((item, index) => { if (item.id === id) { retval = item; } }); return retval; } private addTODObackend(item: Todo): Observable<Todo> { return Observable.create((observer: any) => { console.debug('Simulate adding item to backend, will finish after 2 seconds'); setTimeout(() => { observer.next(item); observer.complete(); }, 1500); }); } private generateTodos(): Todo[] { let todoList: Todo[] = []; for (let n of [1, 2, 3]) { let t: Todo = { id: n, name: 'item ' + n, isSync: true }; todoList.push(t); } return todoList; } }
// Close closes the connection. // Any blocked Read or Write operations will be unblocked and return errors. func (conn *Conn) Close() error { conn.closing <- 0 conn.closing <- 0 conn.closed.Wait() return conn.rawConn.Close() }
import React, { FC, ReactElement } from 'react'; import { EMail, EBody, ESection, EColumn } from '../../src/browser'; import { ISectionProps } from '../../src/eSection'; import { IColumnProps } from '../../src/eColumn'; export type EmailProps = { sectionProps?: ISectionProps; columnProps?: IColumnProps; children: ReactElement; }; export const Email: FC<EmailProps> = ({ sectionProps, columnProps, children }) => ( <EMail> <EBody width={600}> <ESection {...sectionProps} backgroundColor='#ffffff' borderRadius={4} padding='20px' > <EColumn width={600} {...columnProps}> {children} </EColumn> </ESection> </EBody> </EMail> );
Sex differences in 10-year ischemic cardiovascular disease risk prediction in Chinese patients with prediabetes and type 2 diabetes Background Cardiovascular disease has become a serious public health problem in recent years in China. The aim of the study was to examine sex differences in cardiovascular risk factors and 10-year ischemic cardiovascular disease (ICVD) risk in Chinese patients with prediabetes (PreDM) and type 2 diabetes mellitus (T2DM). Methods This was a multi-site retrospective case-control study conducted from April–November 2016 using an electronic medical record database, involving 217 PreDM and 900 T2DM patients admitted to endocrinology units in four hospitals in China. CVD risk was estimated using the Chinese 10-year ICVD risk model. The differences in 10-year absolute ICVD risk according to PreDM, T2DM < 1 year, T2DM 1–5 years or T2DM ≥5 years and sex were analyzed using ANOVA. Results When compared to PreDM females, males with PreDM had significantly higher 10-year ICVD risk In contrast, the opposite pattern of 10-year ICVD risk was observed in T2DM; males had significantly lower 10-year ICVD risk. Moreover, compared to T2DM females, males with T2DM had a lower proportion s with moderate or greater ICVD risk (p < 0.001). When compared to PreDM males, males with T2DM < 1 year, and with T2DM 1–5 years had no difference in 10-year ICVD risk, but had higher ICVD risk with T2DM ≥5 years (p < 0.05). Compared to PreDM females, females with T2DM in all subgroups had higher ICVD risk (p < 0.05). Among those with T2DM, hypertension rates of awareness, treatment and control were 78.60%, 65.38% and 31.10%, respectively; hyperlipidemia rates of awareness, treatment and control were lower (29.15%, 8.30% and 3.47%, respectively). Females with T2DM had higher prevalence, awareness and treatment of hypertension and hyperlipidemia than males with T2DM (p < 0.001). Conclusions There is a greater need for cardiovascular risk reduction programs for females with T2DM at diagnosis. Given the low numbers for awareness, treatment and control of hypertension and hyperlipidemia in both males and females, significant resources focused on them must be expended, specifically improving regular assessment of blood pressure and blood lipids. Strengthening the management of chronic diseases through adherence to evidence-based guidelines to enhance clinical treatment may reduce 10-year ICVD in patients with T2DM in China. Background In 2017, the International Diabetes Federation reported that China had the highest number of adults years) with diabetes mellitus. Its Diabetes Society refers to China as the global epicenter of the diabetes epidemic. About 114.4 million Chinese adults have diabetes, and its prevalence is expected to rise to 119.8 million by 2045 . Overall, the prevalence of type 2 diabetes mellitus (T2DM) is similar in women and men in China, the prevalence among men was 12.1% and among women was 11.0% . Prediabetes (PreDM) has dramatically increased in China, rising from 15.5% in 2007 to 50.1% in 2010 . According to the 20-year China Da Qing Diabetes Prevention Study , 93% of persons with PreDM will progress to overt T2DM in 20 years, imposing a large health and economic burden on China. In addition, PreDM is also an important risk factor for the development of cardiovascular disease . Recently, ischemic cardiovascular disease (ICVD) is increasing in China, although it appears to have fallen over the past 2 decades in the US and Europe . In 2013, the age-standardized mortality rate for ischemic heart disease increased by 2.6% and for stroke increased by 28.8% . About 2 out of every 5 deaths is from cardiovascular disease, the leading cause of death in China . Some evidence suggests that T2DM confers a stronger excess risk of cardiovascular diseases in women than in men , yet a National Health and Nutrition Examination Survey (NHANES) in the US showed that women in the midlife years have historically been at a lower risk for overall vascular events than similarly aged men. Therefore, examining sex differences in ICVD risk and preventing ICVD in China in those already diagnosed with T2DM are important goals in diabetes care. A Chinese task group of the national Fifteen Project developed sex-specific optimal 10-year risk prediction models in the China MUCA study cohort II . Distinct from models developed in Europe, the Chinese 10year ICVD risk model estimates total ICVD risk (both coronary heart disease and stroke). It is appropriate for Chinese racial and ethnic groups and has been incorporated into current Chinese guidelines for cardiovascular disease prevention. Although several studies have used the 10-year ICVD risk in Chinese adults, its prediction of sex differences in those with PreDM and T2DM has not been reported . Hence, in the current study, we examined CVD risk factors and used an established ICVD tool to examine the 10-year ICVD risk in male and female Chinese adults with PreDM and T2DM. This study may identify groups at higher risk, the consequence of which is that health care providers could use this information to deliver lifestyle modification programs or pharmacological interventions within diabetes care that target the most vulnerable groups. Design and sample selection A retrospective case-control study design was used. Electronic medical records were obtained from all patients admitted to endocrinology units in four hospitals, one in each province (Jiangsu, Henan, Shanxi and Sichuan) from April 2016 to November 2016. The majority of the inpatients in the study were admitted for blood glucose adjustment or for annual chronic complication screening not for illness. This practice is very different from th at in western countries because insurance provides more generous reimbursement for inpatient care in China, which incentivizes patients to get hospitalized for even minor health conditions. Hospital discharge data obtained from electronic medical records included demographics, medical history (diagnoses), diabetes status, laboratory tests, drug treatments and blood glucose records. The Scientific Research Committees and Ethics Committees of the four hospitals approved the study. There were 217 patients with PreDM in the data base. Age and sex -matched T2DM patient records (n = 900) were obtained from the database of the four centers for the calculation of 10-year ICVD risk. We included in the analysis those who were ≥ 35 years of age and had a diagnosis of PreDM or T2DM confirmed using a screening 2-h oral glucose tolerance test (OGTT). Participants were diagnosed with PreDM according to American Diabetes Association criteria: impaired fasting glucose (5.6 to 6.9 mmol/L ) and/or impaired oral glucose tolerance (7.8 to 11.1 mmol/L . Participants were diagnosed with T2DM according to Guidelines for the Prevention and Treatment of T2DM in China (2013 Edition) , with typical symptoms of diabetes, random plasma glucose ≥11.1 mmol/L, or fasting plasma glucose (FPG) ≥ 7.0 mmol/L, or 2-h plasma glucose level of ≥200 mg/dL (11.1 mmol/L) during a 75-g OGTT or hemoglobin A1c (HbA1c) level ≥ 6.5%. Exclusion criteria were pregnancy or lactation, liver or kidney dysfunction, malignant tumor, mental illness, or diagnosis of coronary heart disease or ischemic stroke. Collection of clinical and laboratory parameters Participants underwent anthropometric measurements, questionnaires and blood drawing at baseline. Smoking was defined as daily consumption of more than one cigarette/day for > 1 year . Weight (kg) and height (cm) were determined using a standard hospital balance scale and a metal ruler; participants wore light clothing and no shoes. Waist circumference was measured at the level of the umbilicus with the patient standing and breathing normally, measured twice and averaged. BMI was calculated as body weight (kg) divided by height (m 2 ). Overweight was defined using the Asian standard as BMI of 24 kg/m 2 or greater . Systolic and diastolic blood pressure (SBP, DBP) were taken on the dominant arm in a sitting position using a standard manual sphygmomanometer and an adult size cuff . Hypertension was defined as the absence of antihypertensive drugs and SBP ≥140 mmHg and/or DBP ≥90 mmHg on three different days or by a previous diagnosis of hypertension with current antihypertensive medication . Awareness of hypertension was defined as a participant's self-report of diagnosed high BP. Treated hypertension was defined as antihypertensive drugs being used currently, and controlled hypertension was defined as a participant's report of antihypertensive treatment together with a clinical measurement of SBP < 140 mmHg and DBP < 90 mmHg . Total cholesterol (TC), triglycerides (TG), HDL and low-density lipoprotein cholesterol (LDL) were measured after a 10-h fast, using a standardized and reliable method, an automatic biochemical analyzer. Hyperlipidemia was defined as TC > 5.18 mmol/L or/and TG > 1.70 mmol/L. Awareness of hyperlipidemia was defined as a self-report of any prior diagnosis of hyperlipidemia by a medical doctor. Treatment of hyperlipidemia was defined as use of pharmacological treatment to manage hyperlipidemia. Participants were considered to have controlled hyperlipidemia if the TC was < 5.18 mmol/L and TG was < 1.70 mmol/L after treatment. Calculation of absolute ICVD risk engine based on clinical and biochemical characteristics The 10-year ICVD risk scores of participants were evaluated using the ICVD risk engine . The items included: records of sex (male or female), age (≥35 years), BMI, current smoking status (no or yes), SBP, TC and diabetes . A risk value was calculated using these seven indices to get an absolute 10-year ICVD risk (Appendix). According to the risk value, participants were divided into extremely high risk (≥40%), high risk (20-40%), moderate risk (10-20%), low risk (5-10%) and extremely low risk (< 5%) . Therefore, the commonly used cut-off value of > 10% indicates moderate or greater risk . Statistical analysis Epidata 3.1 and SPSS22.0 were used. Continuous variables are presented as the mean ± SD, whereas categorical variables are presented as frequencies and percentages. Data were tested for normality of distribution by the Kolmogorow-Smirnow test. If data were not normally distributed, the respective nonparametric test was used. Sex differences between baseline characteristics of those with PreDM and T2DM were analyzed using t-tests for independent groups or a non-parametric test and chi-square for categorical variables (smoking and > 10% ICVD risk). We calculated the 10-year ICVD risk of each individual using the risk engine technique. The differences in 10year absolute ICVD risk according to PreDM, T2DM < 1 year, T2DM 1-5 years or T2DM ≥5 years and sex were analyzed using ANOVA. Differences in prevalence, awareness, treatment and control for hypertension and hyperlipidemia in males or females with T2DM were analyzed using chi-square. A p value of ≤.05 was considered statistically significant. Sample characteristics Baseline clinical and biochemical characteristics are presented (Table 1). PreDM patients included 77 males and 140 females and those with T2DM included 300 males and 600 females. When compared to PreDM females, males with PreDM had significantly higher 10-year ICVD risk. In contrast, the opposite pattern of 10-year ICVD risk was observed in T2DM; males had significantly lower 10-year ICVD risk. Also, compared to T2DM females, males with T2DM had a lower proportion with moderate or greater ICVD risk (p<0.001). 10-year ICVD risk in male and female patients with PreDM or T2DM We divided the T2DM patients into 3 subgroups according to their diabetes duration: T2DM < 1 year, T2DM 1-5 years or T2DM ≥5 years. The pattern of 10-year ICVD risk across PreDM and T2DM was dissimilar in males and females ( Fig. 1 and Fig. 2). When compared to PreDM males, males with T2DM < 1 year, and with T2DM 1-5 years had no difference in 10-year ICVD risk, but had higher ICVD risk with T2DM ≥5 years (p < 0.05). Compared to PreDM females, females with T2DM in all subgroups had higher ICVD risk (p < 0.05). These findings indicate different cut-off points for 10-year ICVD risk in male and female patients. Prevalence, awareness, treatment and control of hypertension and hyperlipidemia in male and female Chinese with T2DM The awareness, treatment and control of hypertension and hyperlipidemia overall (n = 900) were: 78.60% were aware of the hypertension diagnosis, 65.38% were receiving treatment and 31.10% had hypertension controlled, whereas those for hyperlipidemia were lower (29.15%, 8.30% and 3.47%, respectively). These outcomes in Chinese with T2DM are presented according to sex in Table 2. Females with T2DM had a significantly higher prevalence, awareness, treatment and control of hypertension and also prevalence, awareness and treatment of hyperlipidemia than males with T2DM . The numbers for awareness, treatment and control of hypertension and hyperlipidemia were very low in both males and females. Discussion An interesting aspect of our study is that the 10-year ICVD risk was higher in males with PreDM, whereas there is a definite converse difference in T2DM. A similar finding was observed in a previous studythat showed, at a given glycemic state, the absolute glycemic CVD risk was higher in males than in females. However, this difference narrowed when progressing from normal glycaemia to newly diagnosed diabetes, and the relative risk of CVD was higher in females with DM in comparison with male counterparts. Additionally, males with T2DM compared to females had a significantly lower proportion with a moderate or greater risk category. This is inconsistent with a Caribbean study, which reported a relatively higher proportion of male patients at > 15% 10-year ICVD risk compared to females (60.4% vs 30.6%) . This difference in results might be due to different models and algorithms for CVD risk calculation with different risk categories in different races. Our study suggested that Chinese female T2DM patients are at higher 10-year ICVD risk, and great attention should be paid to this finding. Compared to PreDM, we found that a significantly high 10-year ICVD risk occurred in females with T2DM at < 1 year, yet this finding was observed later in males with T2DM at > 5 years. The implication is that earlier intervention programs should be implemented for female patients with T2DM. There are two reasons that may account for the 10- year ICVD pattern being worse in females. First, recent accumulating evidence demonstrates that diabetes alters estrogenrelated protective mechanisms and causes pronounced adverse changes in cardiovascular risk factors leading to enhanced atherogenesis in females . In our study, the average age of females with T2DM was about 61 years; gradually decreasing estrogen levels may indirectly result in higher ICVD risk. Second, in a cross-sectional analysis,females were less likely than males to meet the goals for LDL cholesterol; this suggests the need for gender-specific approaches . A similar result, found in this research population, was that females with T2DM had a higher prevalence rate of hypertension and hyperlipidemia. Additionally, females with T2DM compared to males had significantly less optimal values for HOMA-IR. Insulin resistance levels are important and should be highlighted because insulin resistenace has been shown to be an independent risk factor for cardiovascular events . Hypertension and hyperlipidemia are well-established causal risk factors for ischemic cardiovascular disease . In the current study, the prevalence, awareness, treatment and control of hypertension overall were 66.44%, 78.60%, 65.38% and 31.10%, respectively. More than half of the T2DM patients had hypertension. A similar result was obtained in a US study that found that the frequency of hypertension was 63% in 2012 among both prevalent and newly-diagnosed T2DM cohorts . Although the awareness and treatment rate of hypertension in our study signals a need for more interventions in both males and females, it is on the control rate that researchers and clinicians in China must first focus. In addition, it should be noted that encouraging primary care physicians and other public healthcare professionals to continue to expand their efforts to control high blood pressure is necessary for Chinese patients. We also observed that in the current study, hyperlipidemia rates of prevalence, awareness, treatment and control were 57.56%, 29.15%, 8.30% and 3.47%, respectively. A similar result on prevalence was found in a US study; it reported that the frequency of hyperlipidemia with T2DM was 56.9% .The awareness, treatment and control of hyperlipidemia were not at high levels, possibly suggesting that people do not pay attention to their health or people with hyperlipidemia do not adhere to prescribed treatments, even after becoming aware of hyperlipidemia. Therefore, it is apparent that a national hyperlipidemia education program to promote community-and clinicbased serum lipid screening is urgently needed in China. The strength of the current study is that this was the first study to report the absolute 10-year ICVD risk assessment in Chinese patients with PreDM or T2DM. The current study has several limitations. First, the current study lacked data on the awareness, treatment and control of hypertension and hyperlipidemia in those with PreDM, and it also lacked data on physical activity, sedentary behavior time and specific medical treatments, which are strongly associated with the development of the metabolic syndrome, T2DM and CVD. Second, data on smoking, hypertension and hyperlipidemia were self-reported. Conclusions In conclusion, the current study demonstrated that different cut-off points exist for focusing on ischemic cardiovascular diseases: before 5 years for males and less than 1 year for females with T2DM. There is a greater need for cardiovascular risk reduction programs for females with T2DM, as soon as they are diagnosed. Also, given the higher prevalence of hypertension and hyperlipidemia in females, significant resources focused on them must be expended. Diabetes education in males is also needed before 5 years to be able to improve awareness, treatment and control of hypertension and hyperlipidemia, in particular. A key to reducing 10-year risk is continued intensive follow-up, which may or may not be possible in China with so many Chinese affected.
// Flags is the command flags func (cmd *CommandCreate) Flags() []flags.Flag { return []flags.Flag{ remoteAppFlag(&cmd.inputs.RemoteApp), flags.StringFlag{ Value: &cmd.inputs.LocalPath, Meta: flags.Meta{ Name: flagLocalPathCreate, Usage: flags.Usage{ Description: "Specify the local filepath of a Realm app to be created", }, }, }, nameFlag(&cmd.inputs.Name), locationFlag(&cmd.inputs.Location), deploymentModelFlag(&cmd.inputs.DeploymentModel), environmentFlag(&cmd.inputs.Environment), flags.StringSliceFlag{ Value: &cmd.inputs.Clusters, Meta: flags.Meta{ Name: flagCluster, Usage: flags.Usage{ Description: "Link Atlas cluster(s) to your Realm app", Note: "Only one cluster can be linked during app creation if creating a template app", }, }, }, flags.StringSliceFlag{ Value: &cmd.inputs.ClusterServiceNames, Meta: flags.Meta{ Name: flagClusterServiceName, Usage: flags.Usage{ Description: "Specify the Realm app Service name to reference your Atlas cluster", Note: "Service names will be overwritten when creating a template app", }, }, }, flags.StringSliceFlag{ Value: &cmd.inputs.Datalakes, Meta: flags.Meta{ Name: flagDatalake, Usage: flags.Usage{ Description: "Link Atlas data lake(s) to your Realm app", Note: "Data lakes cannot be used to create template apps", }, }, }, flags.StringSliceFlag{ Value: &cmd.inputs.DatalakeServiceNames, Meta: flags.Meta{ Name: flagDatalakeServiceName, Usage: flags.Usage{ Description: "Specify the Realm app Service name to reference your Atlas data lake", }, }, }, flags.StringFlag{ Value: &cmd.inputs.Template, Meta: flags.Meta{ Name: flagTemplate, Usage: flags.Usage{ Description: "Create your Realm app from an available template", AllowedValues: realm.AllowedTemplates, }, }, }, flags.BoolFlag{ Value: &cmd.inputs.DryRun, Meta: flags.Meta{ Name: flagDryRun, Shorthand: "x", Usage: flags.Usage{ Description: "Run without writing any changes to the local filepath or pushing any changes to the Realm server", }, }, }, cli.ProjectFlag(&cmd.inputs.Project), cli.ConfigVersionFlag(&cmd.inputs.ConfigVersion, flagConfigVersionDescription), } }