content
stringlengths
10
4.9M
/* * Copyright 2017, Leanplum, Inc. All rights reserved. * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.leanplum; import android.app.NotificationChannel; import android.app.NotificationChannelGroup; import com.leanplum.__setup.AbstractTest; import com.leanplum.utils.BuildUtil; import org.json.JSONArray; import org.junit.Test; import org.mockito.Matchers; import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.reflect.Whitebox; import org.robolectric.annotation.Config; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import static junit.framework.Assert.assertEquals; import static junit.framework.Assert.assertNotNull; import static org.powermock.api.mockito.PowerMockito.doReturn; import static org.powermock.api.mockito.PowerMockito.spy; @Config( sdk = 26 ) @PrepareForTest(value = { LeanplumNotificationChannel.class, BuildUtil.class }) /** * Notifiction channels tests * @author <NAME> */ public class LeanplumNotificationChannelTests extends AbstractTest { @Override public void after() { // Do nothing. } @Override public void before() throws Exception { super.before(); spy(LeanplumNotificationChannel.class); spy(BuildUtil.class); doReturn(26).when(BuildUtil.class, "getTargetSdkVersion", Matchers.anyObject()); } @Test public void testNotificationChannels() throws Exception { List<HashMap<String, Object>> channelList = new ArrayList<>(); List<HashMap<String, Object>> groupList = new ArrayList<>(); HashMap<String, Object> group1 = new HashMap<>(); group1.put("id", "id_1"); group1.put("name", "name_1"); HashMap<String, Object> group2 = new HashMap<>(); group2.put("id", "id_2"); group2.put("name", "name_2"); HashMap<String, Object> group3 = new HashMap<>(); group3.put("id", "id_3"); group3.put("name", "name_3"); HashMap<String, Object> channel1 = new HashMap<>(); channel1.put("id", "id_1"); channel1.put("name", "name_1"); channel1.put("importance", 1); channel1.put("description", "description_1"); channel1.put("enable_vibration", true); channel1.put("vibration_pattern", new long[] {1, 2, 3, 4, 5}); HashMap<String, Object> channel2 = new HashMap<>(); channel2.put("id", "id_2"); channel2.put("name", "name_2"); channel2.put("importance", 1); channel2.put("description", "description_2"); channel2.put("enable_vibration", true); channel2.put("vibration_pattern", new long[] {1L, 2L, 3L, 4L, 5L}); HashMap<String, Object> channel3 = new HashMap<>(); channel3.put("id", "id_3"); channel3.put("name", "name_3"); channel3.put("importance", 1); channel3.put("description", "description_3"); channel3.put("enable_vibration", true); channel3.put("vibration_pattern", new ArrayList<Long>() {{add(1L); add(2L); add(3L);}}); groupList.add(group1); groupList.add(group2); groupList.add(group3); channelList.add(channel1); channelList.add(channel2); channelList.add(channel3); JSONArray groups = new JSONArray(groupList); JSONArray channels = new JSONArray(channelList); LeanplumNotificationChannel.configureNotificationChannels(Leanplum.getContext(), channels); LeanplumNotificationChannel.configureNotificationGroups(Leanplum.getContext(), groups); List<HashMap<String, Object>> retrievedChannels = Whitebox.invokeMethod( LeanplumNotificationChannel.class, "retrieveNotificationChannels", Leanplum.getContext()); List<HashMap<String, Object>> retrievedGroups = Whitebox.invokeMethod( LeanplumNotificationChannel.class, "retrieveNotificationGroups", Leanplum.getContext()); assertEquals(3, retrievedChannels.size()); assertEquals(3, retrievedGroups.size()); List<NotificationChannel> notificationChannels = LeanplumNotificationChannel. getNotificationChannels(Leanplum.getContext()); List<NotificationChannelGroup> notificationGroups = LeanplumNotificationChannel. getNotificationGroups(Leanplum.getContext()); assertNotNull(notificationChannels); assertNotNull(notificationGroups); assertEquals(3, notificationChannels.size()); assertEquals(3, notificationGroups.size()); groupList.clear(); groupList.add(group1); groupList.add(group3); groups = new JSONArray(groupList); channelList.clear(); channelList.add(channel1); channelList.add(channel3); channels = new JSONArray(channelList); LeanplumNotificationChannel.configureNotificationChannels(Leanplum.getContext(), channels); LeanplumNotificationChannel.configureNotificationGroups(Leanplum.getContext(), groups); retrievedChannels = Whitebox.invokeMethod( LeanplumNotificationChannel.class, "retrieveNotificationChannels", Leanplum.getContext()); retrievedGroups = Whitebox.invokeMethod( LeanplumNotificationChannel.class, "retrieveNotificationGroups", Leanplum.getContext()); assertEquals(2, retrievedChannels.size()); assertEquals(2, retrievedGroups.size()); notificationChannels = LeanplumNotificationChannel. getNotificationChannels(Leanplum.getContext()); notificationGroups = LeanplumNotificationChannel. getNotificationGroups(Leanplum.getContext()); assertNotNull(notificationChannels); assertNotNull(notificationGroups); assertEquals(2, notificationChannels.size()); // Uncomment when robolectric fixes notification group deletion. // assertEquals(2, notificationGroups.size()); } @Test public void testDefaultNotificationChannels() throws Exception { String defaultChannelId = "id_1"; LeanplumNotificationChannel.configureDefaultNotificationChannel(Leanplum.getContext(), defaultChannelId); String channelId = Whitebox.invokeMethod( LeanplumNotificationChannel.class, "retrieveDefaultNotificationChannel", Leanplum.getContext()); assertNotNull(channelId); assertEquals(defaultChannelId, channelId); } }
/** * Creates an Instance of the given Class and injects and fields marked with {@link Inject} on this field * * @param toInstantiate Class of the Object to Instantiate * @return An Instance of the given Class with injected Fields */ public static <T> T createInstance(Class<T> toInstantiate) { if (toInstantiate.isInterface()) { return getImplementingClass(toInstantiate); } final boolean isSingleton = toInstantiate.getAnnotation(Singleton.class) != null; return isSingleton ? getSingleton(toInstantiate) : getInstance(toInstantiate); }
<filename>packages/sveet/src/query/globalContext.ts import { StaticClient } from "./StaticClient"; import { getContext, setContext } from "svelte"; const contextKey = {}; export function getStaticClient(): StaticClient { return getContext(contextKey); } export function setStaticClient(staticClient: StaticClient) { setContext(contextKey, staticClient); }
The Breeding Ecology of the Greenfinch Carduelis chloris in Urban Conditions (Study in Krotoszyn, W Poland) Abstract. Data were collected in a medium-sized town. During five years 342 nests were found. The densities of breeding pairs varied over this period between 4.5 and 5.9 p/10 ha. The distribution of breeding pairs was uneven throughout the study area. The preferred nest sites were the roadside trees, where 88.9% of the nests were built. The mean onset of egg-laying was 22 April (range 19–26 April). There was a tendency to start breeding earlier in warmer springs. The mean clutch size was 5.07 ± 0.74. There was a positive correlation between clutch size and the date of egg-laying. These data suggest that there was a compromise between the tendency towards earlier breeding and clutch size. In the study area the Greenfinch is a double-brooded species. Unlike other studies it was noted that the average clutch size increased in the second half of the breeding season. The maximum clutch size coincides with the second or replacement clutches. Hatching, fledging and breeding success were lowest when clutch sizes were largest. The nesting success estimated with the Mayfield and the “traditional” method was approximately similar (0.40 and 0.44 respectively). Cats and mustelids were probably the cause of most breeding failures. Corvids were not responsible for nesting failures.
import * as Operators from "./operators"; import * as test from "tap"; import { $$ } from "../pipe"; import { toArray } from "../iterable/operators"; test.test("promise/operators", test => { test.test("then :: (T -> U) -> T -> Promise U", async test => { const result = await $$(10).$$(Operators.then(x => x * 20)); test.equals(result, 200); test.end(); }); test.test("then :: (T -> U) -> Promise T -> Promise U", async test => { const result = await $$(Promise.resolve(10)).$$(Operators.then(x => x * 20)); test.equals(result, 200); test.end(); }); test.test("then :: (T -> Promise U) -> T -> Promise U", async test => { const result = await $$(10).$$(Operators.then(x => Promise.resolve(x * 20))); test.equals(result, 200); test.end(); }); test.test("then :: (T -> Promise U) -> Promise T -> Promise U", async test => { const result = await $$(Promise.resolve(10)).$$(Operators.then(x => Promise.resolve(x * 20))); test.equals(result, 200); test.end(); }); test.test("catchError :: (Error -> U) -> T -> Promise T", async test => { const result = await $$(10).$$(Operators.catchError(x => 100)); test.equals(result, 10); test.end(); }); test.test("catchError :: (Error -> U) -> Promise T -> Promise T", async test => { const result = await $$(Promise.resolve(10)).$$(Operators.catchError(x => 100)); test.equals(result, 10); test.end(); }); test.test("catchError :: (Error -> Promise U) -> T -> Promise T", async test => { const result = await $$(10).$$(Operators.catchError(x => Promise.resolve(100))); test.equals(result, 10); test.end(); }); test.test("catchError :: (Error -> Promise U) -> Promise T -> Promise T", async test => { const result = await $$(Promise.resolve(10)).$$(Operators.catchError(x => Promise.resolve(100))); test.equals(result, 10); test.end(); }); test.test("catchError :: (Error -> U) -> Error -> Promise U", async test => { const result = await $$(Promise.reject(10)).$$(Operators.catchError(x => `${x}:${100}`)); test.equals(result, "10:100"); test.end(); }); test.test("catchError :: (Error -> Promise U) -> Error -> Promise U", async test => { const result = await $$(Promise.reject(10)).$$(Operators.catchError(x => Promise.resolve(`${x}:${100}`))); test.equals(result, "10:100"); test.end(); }); test.test("match :: (Patterns T U) T -> Promise U", async test => { test.equals(await Operators.match([() => true, x => x.toString()])(10), "10"); test.equals(await Operators.match([() => true, x => x.toString()])(Promise.resolve(10)), "10"); test.equals(await Operators.match([() => true, "10"])(10), "10"); test.equals(await Operators.match([() => true, async x => x.toString()])(10), "10"); test.equals(await Operators.match([() => Promise.resolve<true>(true), "10"])(10), "10"); test.equals(await Operators.match([(x) => false, 25], [(x) => Promise.resolve<true>(true), "10"])(Promise.resolve(10)), "10"); test.equals(await Operators.match( [11, "26"], [10, "25"], [12, "55"], [() => true, ""] )(10), "25"); test.equals(await Operators.match( [async x => x !== 10, "26"], [async x => x === 10, "25"], [12, "55"], [() => true, ""] )(10), "25"); test.equals(await Operators.match( [x => x !== 10, "26"], [x => x === 10, "25"], [12, "55"], [() => true, ""] )(10), "25"); test.equals(await Operators.match( [x => x !== 10, "26"], [x => x === 10, x => (x + 15).toString()], [12, "55"], [() => true, () => ""] )(10), "25"); test.equals(await Operators.match( [async x => x !== 10, "26"], [async x => x === 10, async x => (x + 15).toString()], [12, "55"], [() => true, async () => ""] )(10), "25"); test.equals(await Operators.match( [10, "25"], [() => true, ""] )(10), "25"); test.end(); }); test.test("maybeMatch :: (Patterns T U) T -> Promise Maybe U", async test => { test.deepEquals(toArray(await Operators.maybeMatch([(x: number) => true, x => x.toString()])(10)), ["10"]); test.deepEquals(toArray(await Operators.maybeMatch([(x: number) => true, x => x.toString()])(Promise.resolve(10))), ["10"]); test.deepEquals(toArray(await Operators.maybeMatch([(x: number) => true, "10"])(10)), ["10"]); test.deepEquals(toArray(await Operators.maybeMatch([(x: number) => true, async x => x.toString()])(10)), ["10"]); test.deepEquals(toArray(await Operators.maybeMatch([(x: number) => Promise.resolve<true>(true), "10"])(10)), ["10"]); test.deepEquals(toArray(await Operators.maybeMatch([(x: number) => false, "25"], [(x: number) => Promise.resolve(true), "10"])(Promise.resolve(10))), ["10"]); test.deepEquals(toArray(await Operators.maybeMatch( [11, "26"], [10, "25"], [12, "55"], [() => true, ""] )(10)), ["25"]); test.deepEquals(toArray(await Operators.maybeMatch( [async x => x !== 10, "26"], [async x => x === 10, "25"], [12, "55"], [() => true, ""] )(10)), ["25"]); test.deepEquals(toArray(await Operators.maybeMatch( [x => x !== 10, "26"], [x => x === 10, "25"], [12, "55"], [() => true, ""] )(10)), ["25"]); test.deepEquals(toArray(await Operators.maybeMatch( [x => x !== 10, "26"], [x => x === 10, x => (x + 15).toString()], [12, "55"], [() => true, () => ""] )(10)), ["25"]); test.deepEquals(toArray(await Operators.maybeMatch<number, string>( [async x => x !== 10, "26"], [async x => x === 10, async x => (x + 15).toString()], [12, "55"], [x => true, async () => ""] )(10)), ["25"]); test.deepEquals(toArray(await Operators.maybeMatch([10, "25"], [() => true, ""])(10)), ["25"]); test.end(); }); test.end(); });
<reponame>Lyndeno/illuminator<filename>src/i2c.rs use ddc::Ddc; use ddc_i2c::I2cDeviceDdc; use std::io::{Error, ErrorKind}; use crate::brightness::Brightness; static VCP_BRIGHTNESS: u8 = 0x10; pub struct I2cBacklight { device: I2cDeviceDdc, } impl I2cBacklight { pub fn new(i2c_path: String) -> Result<I2cBacklight, std::io::Error> { match ddc_i2c::from_i2c_device(i2c_path) { Ok(backlight) => Ok(I2cBacklight { device: backlight, }), Err(e) => Err(e), } } } impl Brightness for I2cBacklight { fn set_brightness(&mut self, to: u16) -> Result<(), Error > { match self.device.set_vcp_feature(VCP_BRIGHTNESS, to) { Ok(_) => Ok(()), Err(e) => Err(Error::new(ErrorKind::Other, e)), } } fn get_brightness(&mut self) -> Option<u16> { match self.device.get_vcp_feature(VCP_BRIGHTNESS) { Ok(brightness) => Some(brightness.value()), Err(_) => None, } } }
Don’t be surprised if you notice a change in your Facebook Page design starting today, because the company is supposedly introducing some quite notable changes to the page layout (the layout has received a mininal makover once again, scroll down to the very bottom to see the updated design). The Tech Portal Facebook Page has received a beautiful makeover today, and we were surprised to see the change ourselves. We scoured the interwebs, but couldn’t find any trace of a new page layout being introduced to the users. So we thought that this could mean that Facebook is testing another Page redesign, rolling it out to only a specific set of users. And we’re lucky enough to be one of them(supposedly). There is no official word from Facebook about the changes that have been introduced, but here’s a sneak peek of what’s in store: The first thing you will notice when you see the new Page layout is that it feels uncluttered and spacious. Earlier, Facebook had crammed all information, buttons and features at the top of the page, making it look undesirable. In the page redesign, the profile picture has been shrinked in size and moved right below the cover image. The profile picture(or your identity) no longer overshadows a part of the cover photo. But, the profile picture’s size seems to be too small for something that defines the identity of your business. The title, username and call-to-action(CTA) buttons on your page have also been moved onto the white background underneath the cover picture. The call-to-action buttons i.e Like, Message or Visit Website are now more prominent and no longer crammed onto the cover image. They still retain their position on the right, but are now better organised and visible to the user. This update makes your cover photo the center of attraction of your Facebook Page. The cover image now a bit stretched out and engulfs more area of your Page. All the matter hiding the cover photo(as mentioned above) has been made more prominent and moved to the bottom of it. An now the next question you should be asking is — where are the tabbed links then?? In the previous page layout, the tabbed links were placed underneath the cover image, on a white background. But in the new design, the white background is now being used by the profile picture and call-to-action buttons. The tabbed links have been moved to the left-hand side corner, and remind you of the original Facebook layout(launched in 2003). The tabbed grid is the redesign is vertical instead of being horizontal. And all your promotions, about and info columns have been shifted over to the right, making use of the extra space that had previously been left out. No change has been introduced in the post cards, they still pile up in the center and display information like usual. The Next Web’s Matt Navarra had also spotted this in early June, but not many users were able to see the same. A few examples of Facebook’s new design for Pages. H/T @akatrinas pic.twitter.com/v4hSZtOvvG — Matt Navarra ⭐️ (@MattNavarra) June 6, 2016 These doesn’t seem to a major overhaul in the design language of the layout. Only a couple of minor position adjustments and button changes have been introduced. It of course however, appears more squarer with usage of straight lines and less rounded rectangles. We’re still not sure if the new design is rolling out for all users, but we’ll keep you updated on the same. UPDATE(minimal changes): Facebook can’t seem to settle on one particular layout, and seems to be testing a host of different layouts for its business pages. In this latest update, it seems that Facebook has realised the importance of the profile picture(which represents your brand and identity) and thus it has again been inflated in size. The profile picture has now been given a prominent placement on the left, next to the cover photo and above the menu sidebar. In addition to this, the cover photo has again been reduced and returned to its original size(as in the previous layout). And it is also still free of anything overlaying and hiding any part of it. Facebook has finally updated its Help Center with new profile picture and cover picture sizes, you can click here to know more about it. The post engagement card, however, has now been shifted and being shown atop the news feed, just below the status update box. The social media giant has placed the card front and center — right before your eyes — as it wants you to focus more on improving your numbers and bringing more audience to your website(or more money to them! and vice-versa)
<gh_stars>0 from django.db.models import Q from rest_framework import generics, permissions from battles.api.permissions import IsTrainerOfTeam from battles.api.serializers import BattleSerializer, CreateBattleSerializer, SelectTeamSerializer from battles.models import Battle, Team class BattleListView(generics.ListAPIView): serializer_class = BattleSerializer permission_classes = [permissions.IsAuthenticated] def get_queryset(self): queryset = Battle.objects.filter( Q(creator=self.request.user) | Q(opponent=self.request.user) ).order_by("-id") return queryset class BattleDetailView(generics.RetrieveAPIView): serializer_class = BattleSerializer permission_classes = [permissions.IsAuthenticated] def get_queryset(self): queryset = Battle.objects.filter( Q(creator=self.request.user) | Q(opponent=self.request.user) ).order_by("-id") return queryset class CreateBattleView(generics.CreateAPIView): serializer_class = CreateBattleSerializer permission_classes = [permissions.IsAuthenticated] class SelectTeamView(generics.UpdateAPIView): serializer_class = SelectTeamSerializer queryset = Team.objects.all() permission_classes = [IsTrainerOfTeam]
/** * <p> * A specialized parameters object for JNDI configurations. * </p> * <p> * In addition to the basic properties common to all configuration * implementations, a JNDI configuration has some special properties defining * the subset of the JNDI tree to be managed. This class provides fluent methods * for setting these. The {@code getParameters()} method puts all properties * defined by the user in a map from where they can be accessed by a builder for * JNDI configurations. * </p> * <p> * This class is not thread-safe. It is intended that an instance is constructed * and initialized by a single thread during configuration of a * {@code ConfigurationBuilder}. * </p> * * @version $Id$ * @since 2.0 */ public class JndiBuilderParametersImpl extends BasicBuilderParameters implements JndiBuilderProperties<JndiBuilderParametersImpl> { /** Constant for the name of the context property. */ private static final String PROP_CONTEXT = "context"; /** Constant for the name of the prefix property. */ private static final String PROP_PREFIX = "prefix"; @Override public JndiBuilderParametersImpl setContext(Context ctx) { storeProperty(PROP_CONTEXT, ctx); return this; } @Override public JndiBuilderParametersImpl setPrefix(String p) { storeProperty(PROP_PREFIX, p); return this; } }
// RentCharge returns the monthly rent charge func (l *LeaseInfo) RentCharge() float64 { if !l.Valid() { return -1 } return (l.ResidualValue() + l.AdjustedCost()) * l.MoneyFactor }
/****************************************************************************************************//** * @file FM33A0XX.h * * @brief CMSIS Cortex-M0 Peripheral Access Layer Header File for * FM33A0XX from Keil. * * @version V1.1 * @date 30-11-2017 * * @note Generated with SVDConv V2.87e * from CMSIS SVD File 'FM32L0XX.SVD' Version 1.0, * * @par ARM Limited (ARM) is supplying this software for use with Cortex-M * processor based microcontroller, but can be equally used for other * suitable processor architectures. This file can be freely distributed. * Modifications to this file shall be clearly marked. * * THIS SOFTWARE IS PROVIDED "AS IS". NO WARRANTIES, WHETHER EXPRESS, IMPLIED * OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. * ARM SHALL NOT, IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR * CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER. * *******************************************************************************************************/ /** @addtogroup Keil * @{ */ /** @addtogroup FM33A0XX * @{ */ #ifndef FM33A0XX_H #define FM33A0XX_H #ifdef __cplusplus extern "C" { #endif typedef enum {RESET = 0, SET = !RESET} FlagStatus, ITStatus, FlagState, IFState; typedef enum {DISABLE = 0, ENABLE = !DISABLE} FunctionalState, FunState; typedef enum {FAIL = 0, PASS = !FAIL} ErrorStatus, ResState; typedef enum { SCU_num = 0, PMU_num = 1, RTC_num = 2, LPTRC_num = 3, CRC_num = 4, TRNG_num = 5, ADC_num = 6, LCD_num = 7, DMA_num = 8, FLS_num = 9, AES_num = 10, SPI1_num = 11, SPI2_num = 12, HSPI_num = 13, UART0_num = 14, UART1_num = 15, UART2_num = 16, UART3_num = 17, UART4_num = 18, UART5_num = 19, U78160_num = 20, U78161_num = 21, I2C_num = 22, BT1_num = 23, BT2_num = 24, ET1_num = 25, ET2_num = 26, ET3_num = 27, ET4_num = 28, LPTFC_num = 29, IWDT_num = 30, PDC_num = 31, ANA_num = 32, EXTI0_num = 33, EXTI1_num = 34, EXTI2_num = 35, DCU_num = 36, RAMBIST_num = 37, WWDT_num = 38, UART_COM_num = 39, }Periph_Type; #define __RCHF_INITIAL_CLOCK (8000000) /* Value of the Internal RC HIGH oscillator in Hz */ #define __RCLP_CLOCK (32000) /* Value of the Internal RC LOW oscillator in Hz */ #define __XTLF_CLOCK (32768) /* Value of the EXTERNAL oscillator in Hz */ /* ------------------------- Interrupt Number Definition ------------------------ */ typedef enum { /* ------------------- Cortex-M0 Processor Exceptions Numbers ------------------- */ Reset_IRQn = -15, /*!< 1 Reset Vector, invoked on Power up and warm reset */ NonMaskableInt_IRQn = -14, /*!< 2 Non maskable Interrupt, cannot be stopped or preempted */ HardFault_IRQn = -13, /*!< 3 Hard Fault, all classes of Fault */ SVCall_IRQn = -5, /*!< 11 System Service Call via SVC instruction */ PendSV_IRQn = -2, /*!< 14 Pendable request for system service */ SysTick_IRQn = -1, /*!< 15 System Tick Timer */ /* --------------------- FM32L0XX Specific Interrupt Numbers -------------------- */ WWDT_IRQn = 0, /*!< 0 WWDT */ SVD_IRQn = 1, /*!< 1 SVD_IRQn */ RTC_IRQn = 2, /*!< 2 RTC_IRQn */ NVMIF_IRQn = 3, /*!< 3 NVMIF_IRQn */ FDET_IRQn = 4, /*!< 4 FDET_IRQn */ ADC_IRQn = 5, /*!< 5 ADC_IRQn */ HSPI_IRQn = 6, /*!< 6 HSPI_IRQn */ SPI1_IRQn = 7, /*!< 7 SPI1_IRQn */ SPI2_IRQn = 8, /*!< 8 SPI2_IRQn */ UART0_IRQn = 9, /*!< 9 UART0_IRQn */ UART1_IRQn = 10, /*!< 10 UART1_IRQn */ UART2_IRQn = 11, /*!< 11 UART2_IRQn */ UART3_IRQn = 12, /*!< 12 UART3_IRQn */ UART4_IRQn = 13, /*!< 13 UART4_IRQn */ UART5_IRQn = 14, /*!< 14 UART5_IRQn */ U78160_IRQn = 15, /*!< 15 U78160_IRQn */ U78161_IRQn = 16, /*!< 16 U78161_IRQn */ I2C_IRQn = 17, /*!< 17 I2C_IRQn */ LCD_IRQn = 18, /*!< 18 LCD_IRQn */ AES_IRQn = 19, /*!< 19 AES_IRQn */ LPTIM_IRQn = 20, /*!< 20 LPTIM_IRQn */ DMA_IRQn = 21, /*!< 21 DMA_IRQn */ TRNG_IRQn = 22, /*!< 22 TRNG_IRQnSPI2_IRQn */ COMP_IRQn = 23, /*!< 23 COMP_IRQn */ BTIM1_IRQn = 24, /*!< 24 BTIMER1_IRQn */ BTIM2_IRQn = 25, /*!< 25 BTIMER2_IRQn */ ETIM1_IRQn = 26, /*!< 26 ETIMER1_IRQn */ ETIM2_IRQn = 27, /*!< 27 ETIMER2_IRQn */ ETIM3_IRQn = 28, /*!< 28 ETIMER3_IRQn */ ETIM4_IRQn = 29, /*!< 29 ETIMER4_IRQn */ GPIO_IRQn = 30, /*!< 30 GPIO_IRQn */ } IRQn_Type; /** @addtogroup Configuration_of_CMSIS * @{ */ /* ================================================================================ */ /* ================ Processor and Core Peripheral Section ================ */ /* ================================================================================ */ /* ----------------Configuration of the Cortex-M0 Processor and Core Peripherals---------------- */ #define __CM0_REV 0x0100 /*!< Cortex-M0 Core Revision */ #define __MPU_PRESENT 1 /*!< MPU present or not */ #define __VTOR_PRESENT 1 /*!< VTOR present or not */ #define __NVIC_PRIO_BITS 2 /*!< Number of Bits used for Priority Levels */ #define __Vendor_SysTickConfig 0 /*!< Set to 1 if different SysTick Config is used */ /** @} */ /* End of group Configuration_of_CMSIS */ #include "core_cm0plus.h" /*!< Cortex-M0 processor and core peripherals */ #include "system_FM33A0XX.h" /*!< FM33A0XX System */ /* ================================================================================ */ /* ================ Device Specific Peripheral Section ================ */ /* ================================================================================ */ /** @addtogroup Device_Peripheral_Registers * @{ */ /* ------------------- Start of section using anonymous unions ------------------ */ #if defined(__CC_ARM) #pragma push #pragma anon_unions #elif defined(__ICCARM__) #pragma language=extended #elif defined(__GNUC__) /* anonymous unions are enabled by default */ #elif defined(__TMS470__) /* anonymous unions are enabled by default */ #elif defined(__TASKING__) #pragma warning 586 #else #warning Not supported compiler type #endif /* ================================================================================ */ /* ================ SCU ================ */ /* ================================================================================ */ typedef struct { __I uint32_t SYSCON; /*!< SYSTEM STAT */ __IO uint32_t MCUDBGCR; /*!< DEBUG CONFIG */ __IO uint32_t HDFFLAG; /*!< HARDFAULT FLAG */ }SCU_Type; /* ================================================================================ */ /* ================ PMU ================ */ /* ================================================================================ */ typedef struct { __IO uint32_t LPMCFG; /*!< LOW POWER CONFIG */ __IO uint32_t WKDLYCON; /*!< WAKEUP DELAY CONFIG */ __IO uint32_t WKPFLAG; /*!< WKPFLAG FLAG */ __IO uint32_t LPREIE; /*!< LPREIE INTERRUPT ENABLE*/ __IO uint32_t LPREIF; /*!< LPREIF INTERRUPT FLAG*/ }PMU_Type; /* ================================================================================ */ /* ================ uart ================ */ /* ================================================================================ */ /* memory mapping struct for uart IE */ typedef struct { __IO uint32_t UARTIE; /*!< UART interrupt enable */ __IO uint32_t UARTIF; /*!< UART interrupt flag */ __IO uint32_t IRCON; /*!< UART infrared control */ } UART_common_Type; /* memory mapping struct for uart module */ typedef struct { __IO uint32_t RXSTA; /*!< UART receive stat Register*/ __IO uint32_t TXSTA; /*!< UART send stat Register */ __IO uint32_t RXREG; /*!< UART receive data Register *8 */ __IO uint32_t TXREG; /*!< UART send data Register *8 */ __IO uint32_t SPBRG; /*!< UART communication baud rate Register */ __IO uint32_t TXBUFSTA; /*!< UART send stat flag Register */ __IO uint32_t RXBUFSTA; /*!< UART receive stat flag Register */ __IO uint32_t RTXCON; /*!< UART signal reverse conctol Register */ } UARTx_Type; /* ================================================================================ */ /* ================ RCC ================ */ /* ================================================================================ */ typedef struct { __IO uint32_t RSTCFG; /*!< RESET CONFIG REGISTER*/ __IO uint32_t SOFTRST; /*!< SOFTWARE RESET REGISTER*/ __IO uint32_t RSTFLAG; /*!< RESET FLAG REGISTER */ __IO uint32_t SYSCLKSEL; /*!< SYSCLK SELECT */ __IO uint32_t RCHFCON; /*!< RCHF CONFIG */ __IO uint32_t RCHFTRIM; /*!< RCHF TRIM */ __IO uint32_t PLLCON; /*!< PLL CONFIG */ __IO uint32_t RCLPCON; /*!< RCLP CONFIG */ __IO uint32_t RCLPTRIM; /*!< RCLP TRIM */ __IO uint32_t XTLFIPW; /*!< XTLF WORD STAT SELECT */ __IO uint32_t PERCLKCON1; /*!< PERRIPH RCC SELECT 1 */ __IO uint32_t PERCLKCON2; /*!< PERRIPH RCC SELECT 2 */ __IO uint32_t PERCLKCON3; /*!< PERRIPH RCC SELECT 3 */ __IO uint32_t PERCLKCON4; /*!< PERRIPH RCC SELECT 4 */ __IO uint32_t RSV0; /*!< 238 */ __IO uint32_t RSV1; /*!< 23C */ __IO uint32_t RSV2; /*!< 240 */ __IO uint32_t MPRIL; /*!< PERRIPH RCC SELECT 5 */ }RCC_Type; /* ================================================================================ */ /* ================ DMA ================ */ /* ================================================================================ */ typedef struct { __IO uint32_t GLOBALCTRL; /*!< GLOBAL EMANLE REGISTER */ __IO uint32_t CH0CTRL; /*!< CHANNEL0 CTRL REGISTER */ __IO uint32_t CH0RAMAD; /*!< CHANNEL0 RAM START ADDR */ __IO uint32_t CH1CTRL; /*!< CHANNEL1 CTRL REGISTER */ __IO uint32_t CH1RAMAD; /*!< CHANNEL1 RAM START ADDR */ __IO uint32_t CH2CTRL; /*!< CHANNEL2 CTRL REGISTER */ __IO uint32_t CH2RAMAD; /*!< CHANNEL2 RAM START ADDR */ __IO uint32_t CH3CTRL; /*!< CHANNEL3 CTRL REGISTER */ __IO uint32_t CH3RAMAD; /*!< CHANNEL3 RAM START ADDR */ __IO uint32_t CH4CTRL; /*!< CHANNEL4 CTRL REGISTER */ __IO uint32_t CH4RAMAD; /*!< CHANNEL4 RAM START ADDR */ __IO uint32_t CH5CTRL; /*!< CHANNEL5 CTRL REGISTER */ __IO uint32_t CH5RAMAD; /*!< CHANNEL5 RAM START ADDR */ __IO uint32_t CH6CTRL; /*!< CHANNEL6 CTRL REGISTER */ __IO uint32_t CH6RAMAD; /*!< CHANNEL6 RAM START ADDR */ __IO uint32_t CH7CTRL; /*!< CHANNEL7 CTRL REGISTER */ __IO uint32_t CH7FLSAD; /*!< CHANNEL7 FLASH START ADDR */ __IO uint32_t CH7RAMAD; /*!< CHANNEL7 RAM START ADDR */ __IO uint32_t CHSTATUS; /*!< INTERRUPT FLAG REGISTER */ }DMA_Type; /* ================================================================================ */ /* ================ HSPI ================ */ /* ================================================================================ */ typedef struct { __IO uint32_t SPICR1; /*!< SPI CONTROL REGISTER 1 */ __IO uint32_t SPICR2; /*!< SPI CONTROL REGISTER 2 */ __IO uint32_t SPICR3; /*!< SPI CONTROL REGISTER 3 */ __IO uint32_t SPIIE; /*!< SPI INTERRUPT ENABLE REGISTER */ __IO uint32_t SPIIF; /*!< SPI STATE REGISTER */ __IO uint32_t SPITXBUF; /*!< SPI TX REGISTER */ __IO uint32_t SPIRXBUF; /*!< SPI RX REGISTER */ }SPIx_Type; /* ================================================================================ */ /* ================ GPIO ================ */ /* ================================================================================ */ typedef struct { __IO uint32_t INEN; /*!< INPUT ENABLE CONFIG */ __IO uint32_t PUEN; /*!< PULLUP ENABLE CONFIG */ __IO uint32_t ODEN; /*!< OD ENABLE CONFIG */ __IO uint32_t FCR; /*!< FUNCTION CONFIG */ __IO uint32_t DO; /*!< output data register */ __O uint32_t DSET; /*!< bit set HIGH register */ __O uint32_t DRESET; /*!< bit reset low register */ __I uint32_t DIN; /*!< input data register */ }GPIOx_Type; typedef struct { __IO uint32_t EXTI0_SEL; /*!< EDGE TRIGER SELECT CONFIG 0, Address offset: 0x00 */ __IO uint32_t EXTI1_SEL; /*!< EDGE TRIGER SELECT CONFIG 1, Address offset: 0x04 */ __IO uint32_t EXTI2_SEL; /*!< EDGE TRIGER SELECT CONFIG 2, Address offset: 0x08 */ __IO uint32_t EXTI0IF; /*!< triger flag 0, Address offset: 0x0C */ __IO uint32_t EXTI1IF; /*!< triger flag 1, Address offset: 0x10 */ __IO uint32_t EXTI2IF; /*!< triger flag 2, Address offset: 0x14 */ __IO uint32_t FOUTSEL; /*!< fout select config, Address offset: 0x18 */ __IO uint32_t HDSEL; /*!< high driver config, Address offset: 0x1C */ __IO uint32_t ANASEL; /*!< analog function select, Address offset: 0x20 */ __IO uint32_t IODF; /*!< input digit filter select, Address offset: 0x24 */ __IO uint32_t PINWKEN; /*!< pin wakeup enable, Address offset: 0x28 */ }GPIO_COMMON_Type; /* ================================================================================ */ /* ================ IWDG ================ */ /* ================================================================================ */ typedef struct { __IO uint32_t IWDTSERV; /*!< IWDG Key register, Address offset: 0x00 */ __IO uint32_t IWDTCFG; /*!< IWDG CONFIG register, Address offset: 0x04 */ __I uint32_t IWDTCNT; /*!< IWDG Reload register, Address offset: 0x08 */ } IWDT_Type; /* ================================================================================ */ /* ================ NVMIF ================ */ /* ================================================================================ */ typedef struct { __IO uint32_t FLSRDCON; /*!< NVMIF read control register, Address offset: 0x00 */ __IO uint32_t PRFTCON; /*!< NVMIF prdfatch control register, Address offset: 0x04 */ __I uint32_t OPTBR; /*!< NVMIF port protect stat register, Address offset: 0x08 */ __IO uint32_t ACLOCK1; /*!< NVMIF flash block0-31 protect register,wr 1 only Address offset: 0x0C */ __IO uint32_t ACLOCK2; /*!< NVMIF flash block32-63 protect register,wr 1 only Address offset: 0x10 */ __IO uint32_t EPCON; /*!< NVMIF erase & program start register Address offset: 0x14 */ __IO uint32_t FLSKEY; /*!< NVMIF flash key register Address offset: 0x18 */ __IO uint32_t FLSIE; /*!< NVMIF flash IE register Address offset: 0x1C */ __IO uint32_t FLSIF; /*!< NVMIF flash IF register Address offset: 0x20 */ __IO uint32_t RAMRPR0; /*!< NVMIF RAM REPAIR0 register Address offset: 0x24 */ __IO uint32_t RAMRPR1; /*!< NVMIF RAM REPAIR1 register Address offset: 0x28 */ __IO uint32_t RAMRPR2; /*!< NVMIF RAM REPAIR2 register Address offset: 0x2C */ __IO uint32_t RAMRPR3; /*!< NVMIF RAM REPAIR3 register Address offset: 0x30 */ } NVMIF_Type; /* ================================================================================ */ /* ================ RAMBIST ================ */ /* ================================================================================ */ typedef struct { __IO uint32_t RAMBISTCON; /*!< RAMBIST control register, Address offset: 0x00 */ __IO uint32_t RAMFF; /*!< RAMBIST error flag register, Address offset: 0x04 */ } RAMBIST_Type; /* ================================================================================ */ /* ================ BTIM ================ */ /* ================================================================================ */ typedef struct { __IO uint32_t BTCR1; /*!< BTIMER CONTROL REGISTER 1, Address offset: 0x00 */ __IO uint32_t BTCR2; /*!< BTIMER CONTROL REGISTER 2, Address offset: 0x04 */ __IO uint32_t BTCFG1; /*!< BTIMER CONFIG REGISTER 1, Address offset: 0x08 */ __IO uint32_t BTCFG2; /*!< BTIMER CONFIG REGISTER 2, Address offset: 0x0C */ __IO uint32_t BTPRES; /*!< BTIMER PRESCALE VALUE, Address offset: 0x10 */ __IO uint32_t BTLOADCR; /*!< BTIMER LOAD CONTROL REGISTER, Address offset: 0x14 */ __IO uint32_t BTCNTL; /*!< BTIMER COUNTER LOW BYTE, Address offset: 0x18 */ __IO uint32_t BTCNTH; /*!< BTIMER COUNTER HIGH BYTE, Address offset: 0x1C */ __IO uint32_t BTPRESET; /*!< ETIMER PRESET VALUE, Address offset: 0x20 */ __IO uint32_t BTLOADL; /*!< ETIMER LOAD VALUE LOW BYTE, Address offset: 0x24 */ __IO uint32_t BTLOADH; /*!< ETIMER LOAD VALUE HIGH BYTE, Address offset: 0x28 */ __IO uint32_t BTCMPL; /*!< ETIMER CMP LOW, Address offset: 0x2C */ __IO uint32_t BTCMPH; /*!< ETIMER CMP HIGH, Address offset: 0x30 */ __IO uint32_t BTOUTCNT; /*!< ETIMER OUTPUT PULSE VALUE, Address offset: 0x34 */ __IO uint32_t BTOCR; /*!< ETIMER OUTPUT CONTROL REGISTER, Address offset: 0x38 */ __IO uint32_t BTIE; /*!< ETIMER INTERRUPT ENABLE, Address offset: 0x3C */ __IO uint32_t BTIF; /*!< ETIMER INTERRUPT FLAG & STATUS, Address offset: 0x40 */ } BTIMx_Type; /* ================================================================================ */ /* ================ ETIM ================ */ /* ================================================================================ */ typedef struct { __IO uint32_t ETxCR; /*!< ETIMER CONTROL REGISTER, Address offset: 0x00 */ __IO uint32_t ETxINSEL; /*!< ETIMER INPUT SELECT REGISTER, Address offset: 0x04 */ __IO uint32_t ETxPESCALE1; /*!< ETIMER PRESCALE REGISTER 1, Address offset: 0x08 */ __IO uint32_t ETxPESCALE2; /*!< ETIMER PRESCALE REGISTER 2, Address offset: 0x0C */ __IO uint32_t ETxIVR; /*!< ETIMER INITIAL VALUE, Address offset: 0x10 */ __IO uint32_t ETxCMP; /*!< ETIMER CMP REGISTER, Address offset: 0x14 */ __IO uint32_t ETxIE; /*!< ETIMER INTERRUPT ENABLE, Address offset: 0x18 */ __IO uint32_t ETxIF; /*!< ETIMER INTERRUPT FLAG & STATUS, Address offset: 0x1C */ } ETIMx_Type; /* ================================================================================ */ /* ================ LPTIM ================ */ /* ================================================================================ */ typedef struct { __IO uint32_t LPTCFG; /*!< LPTIM CONFIG REGISTER, Address offset: 0x00 */ __IO uint32_t LPTCNT; /*!< LPTIM COUNT REGISTER, Address offset: 0x04 */ __IO uint32_t LPTCMP; /*!< LPTIM COMPARE REGISTER, Address offset: 0x08 */ __IO uint32_t LPTTARGET; /*!< LPTIM TARGET REGISTER, Address offset: 0x0C */ __IO uint32_t LPTIE; /*!< LPTIM INTERRUPT ENABLE, Address offset: 0x10 */ __IO uint32_t LPTIF; /*!< LPTIM INTERRUPT FLAG REGISTER, Address offset: 0x14 */ __IO uint32_t LPTCTRL; /*!< LPTIM CONTROL REGISTER, Address offset: 0x18 */ } LPTIM_Type; /* ================================================================================ */ /* ================ I2C ================ */ /* ================================================================================ */ typedef struct { __IO uint32_t I2CCTRL; /*!< I2C CONFIG REGISTER, Address offset: 0x00 */ __IO uint32_t I2CSTA; /*!< I2C STATE REGISTER, Address offset: 0x04 */ __IO uint32_t I2CBRG; /*!< I2C BAUD SETTING REGISTER, Address offset: 0x08 */ __IO uint32_t I2CBUF; /*!< I2C BUFF REGISTER, Address offset: 0x0C */ __IO uint32_t I2CIR; /*!< I2C INTERRUPT ENABLE & FLAG, Address offset: 0x10 */ __IO uint32_t I2CFSM; /*!< I2C FSM STATE REGISTER, Address offset: 0x14 */ __IO uint32_t I2CERR; /*!< I2C ERR FLAG REGISTER, Address offset: 0x18 */ } I2C_Type; /* ================================================================================ */ /* ================ ANAC ================ */ /* ================================================================================ */ typedef struct { __IO uint32_t PDRCON; /*!< ANAC PDR CONFIG REGISTER, Address offset: 0x00 */ __IO uint32_t BORCON; /*!< ANAC BOR CONFIG REGISTER, Address offset: 0x04 */ __IO uint32_t LDO15CON; /*!< ANAC LDO15CON CONFIG REGISTER, Address offset: 0x08 */ __IO uint32_t VREFTRIM; /*!< ANAC VREF TRIM REGISTER, Address offset: 0x0C */ __IO uint32_t ULPRCON; /*!< ANAC ULP LDO CONFIG REGISTER, Address offset: 0x10 */ __IO uint32_t SVDCFG; /*!< ANAC SVD CONFIG REGISTER, Address offset: 0x14 */ __IO uint32_t SVDCON; /*!< ANAC SVD CONTROL REGISTER, Address offset: 0x18 */ __IO uint32_t SVDSIF; /*!< ANAC SVD INTERRUPT FLAG REGISTER, Address offset: 0x1C */ __IO uint32_t FDETIE; /*!< ANAC FDET INTERRUPT ENABLE REGISTER, Address offset: 0x20 */ __IO uint32_t FDETIF; /*!< ANAC FDET INTERRUPT FLAG REGISTER, Address offset: 0x24 */ __IO uint32_t ADCINSEL; /*!< ANAC INPUT SELECT REGISTER, Address offset: 0x28 */ __IO uint32_t ADCCON; /*!< ANAC ADC CONFIG REGISTER, Address offset: 0x2C */ __IO uint32_t ADCTRIM; /*!< ANAC ADC TRIM REGISTER, Address offset: 0x30 */ __IO uint32_t ADCDATA; /*!< ANAC ADC DATA REGISTER, Address offset: 0x34 */ __IO uint32_t ADCIF; /*!< ANAC ADC INTERRUPT FLAG REGISTER, Address offset: 0x38 */ __IO uint32_t TRNGCON; /*!< ANAC TRNF ENABLE REGISTER, Address offset: 0x3C */ __IO uint32_t COMP1CR; /*!< ANAC COMP1 CONTROL REGISTER, Address offset: 0x40 */ __IO uint32_t COMP2CR; /*!< ANAC COMP2 CONTROL REGISTER, Address offset: 0x44 */ __IO uint32_t COMPICR; /*!< ANAC COMP12 INTERRUPT CONTROL REGISTER, Address offset: 0x48 */ __IO uint32_t COMPIF; /*!< ANAC COMP INTERRUPT FLAG REGISTER, Address offset: 0x4C */ __IO uint32_t RSV0; __IO uint32_t RSV1; __IO uint32_t RSV2; __IO uint32_t SVDALM; /*!< ANAC SVDALM REGISTER, Address offset: 0x5C */ } ANAC_Type; /* ================================================================================ */ /* ================ CRC ================ */ /* ================================================================================ */ typedef struct { __IO uint32_t CRCDR; /*!< CRC DATA REGISTER, Address offset: 0x00 */ __IO uint32_t CRCCR; /*!< CRC CONTROL REGISTER, Address offset: 0x04 */ __IO uint32_t CRCLFSR; /*!< CRC LFSR REGISTER, Address offset: 0x08 */ __IO uint32_t CRCXOR; /*!< CRC XOR DATA REGISTER, Address offset: 0x0C */ __IO uint32_t CRCFLSEN; /*!< CRC FLASH CRC VERIFY EN REGISTER, Address offset: 0x10 */ __IO uint32_t CRCFLSAD; /*!< CRC FLASH ADRESS REGISTER, Address offset: 0x14 */ __IO uint32_t CRCFLSSIZE; /*!< CRC VERIFY SIZE REGISTER, Address offset: 0x18 */ } CRC_Type; /* ================================================================================ */ /* ================ DISP ================ */ /* ================================================================================ */ typedef struct { __IO uint32_t DISPCTRL; /*!< DISP CONTROL REGISTER, Address offset: 0x00 */ __IO uint32_t LCDTEST; /*!< DISP TEST CONTROL REGISTER, Address offset: 0x04 */ __IO uint32_t DF; /*!< DISP DIVIDE CLK SETTING REGISTER, Address offset: 0x08 */ __IO uint32_t TON; /*!< DISP ON SETTING REGISTER, Address offset: 0x0C */ __IO uint32_t TOFF; /*!< DISP OFF SETTING REGISTER, Address offset: 0x10 */ __IO uint32_t DISPIE; /*!< DISP INTERRUPT CONTROL REGISTER, Address offset: 0x14 */ __IO uint32_t DISPIF; /*!< DISP INTERRUPT FLAG REGISTER, Address offset: 0x18 */ __IO uint32_t LCDSET; /*!< DISP SETTING REGISTER, Address offset: 0x1C */ __IO uint32_t LCDDRV; /*!< DISP DRV SETTING REGISTER, Address offset: 0x20 */ __IO uint32_t DISPDATA0; /*!< DISP DATA REGISTER 0, Address offset: 0x24 */ __IO uint32_t DISPDATA1; /*!< DISP DATA REGISTER 1, Address offset: 0x28 */ __IO uint32_t DISPDATA2; /*!< DISP DATA REGISTER 2, Address offset: 0x2C */ __IO uint32_t DISPDATA3; /*!< DISP DATA REGISTER 3, Address offset: 0x30 */ __IO uint32_t DISPDATA4; /*!< DISP DATA REGISTER 4, Address offset: 0x34 */ __IO uint32_t DISPDATA5; /*!< DISP DATA REGISTER 5, Address offset: 0x38 */ __IO uint32_t DISPDATA6; /*!< DISP DATA REGISTER 6, Address offset: 0x3C */ __IO uint32_t DISPDATA7; /*!< DISP DATA REGISTER 7, Address offset: 0x40 */ __IO uint32_t DISPDATA8; /*!< DISP DATA REGISTER 8, Address offset: 0x44 */ __IO uint32_t DISPDATA9; /*!< DISP DATA REGISTER 9, Address offset: 0x48 */ __IO uint32_t LCDBIAS; /*!< DISP BIAS SETTING REGISTER, Address offset: 0x4C */ __IO uint32_t COM_EN; /*!< DISP COM SETTING REGISTER, Address offset: 0x50 */ __IO uint32_t SEG_EN0; /*!< DISP SEG SETTING REGISTER, Address offset: 0x54 */ __IO uint32_t SEG_EN1; /*!< DISP SEG SETTING REGISTER, Address offset: 0x58 */ __IO uint32_t LCDBSTCON; /*!< DISP BOOST CONFIG REGISTER, Address offset: 0x5C */ } DISP_Type; /* ================================================================================ */ /* ================ RTC ================ */ /* ================================================================================ */ typedef struct { __IO uint32_t RTCWE; /*!< RTC WRITE PROTECT REGISTER, Address offset: 0x00 */ __IO uint32_t RTCIE; /*!< RTC INTERRUPT CONTROL REGISTER, Address offset: 0x04 */ __IO uint32_t RTCIF; /*!< RTC INTERRUPT FLAG REGISTER, Address offset: 0x08 */ __IO uint32_t BCDSEC; /*!< RTC SECOND IN BCD REGISTER, Address offset: 0x0C */ __IO uint32_t BCDMIN; /*!< RTC MINITE IN BCD REGISTER, Address offset: 0x10 */ __IO uint32_t BCDHOUR; /*!< RTC HOUR IN BCD REGISTER, Address offset: 0x14 */ __IO uint32_t BCDDATE; /*!< RTC DATE IN BCD REGISTER, Address offset: 0x18 */ __IO uint32_t BCDWEEK; /*!< RTC WEEK IN BCD REGISTER, Address offset: 0x1C */ __IO uint32_t BCDMONTH; /*!< RTC MONTH IN BCD REGISTER, Address offset: 0x20 */ __IO uint32_t BCDYEAR; /*!< RTC YEAR IN BCD REGISTER, Address offset: 0x24 */ __IO uint32_t ALARM; /*!< RTC ALARM SETTING REGISTER, Address offset: 0x28 */ __IO uint32_t FSEL; /*!< RTC OUTPUT SELECT REGISTER, Address offset: 0x2C */ __IO uint32_t ADJUST; /*!< RTC LTBC ADJUST REGISTER, Address offset: 0x30 */ __IO uint32_t ADSIGN; /*!< RTC LTBC ADJUST SIGN REGISTER, Address offset: 0x34 */ __IO uint32_t PR1SEN; /*!< RTC LTBC 1SEC ENABLE REGISTER, Address offset: 0x38 */ __IO uint32_t MSECCNT; /*!< RTC MS COUNTER REGISTER, Address offset: 0x3C */ __IO uint32_t STAMPEN; /*!< RTC TIME STAMP REGISTER, Address offset: 0x40 */ __IO uint32_t CLKSTAMP0R; /*!< RTC STAMP AT PB4 RISE REGISTER 0, Address offset: 0x44 */ __IO uint32_t CALSTAMP0R; /*!< RTC STAMP AT PB4 RISE REGISTER 0, Address offset: 0x48 */ __IO uint32_t CLKSTAMP0F; /*!< RTC STAMP AT PB4 FALL REGISTER 0, Address offset: 0x4C */ __IO uint32_t CALSTAMP0F; /*!< RTC STAMP AT PB4 FALL REGISTER 0, Address offset: 0x50 */ __IO uint32_t CLKSTAMP1R; /*!< RTC STAMP AT PB4 RISE REGISTER 1, Address offset: 0x54 */ __IO uint32_t CALSTAMP1R; /*!< RTC STAMP AT PB4 RISE REGISTER 1, Address offset: 0x58 */ __IO uint32_t CLKSTAMP1F; /*!< RTC STAMP AT PB4 FALL REGISTER 1, Address offset: 0x5C */ __IO uint32_t CALSTAMP1F; /*!< RTC STAMP AT PB4 FALL REGISTER 1, Address offset: 0x60 */ } RTC_Type; /* ================================================================================ */ /* ================ AES ================ */ /* ================================================================================ */ typedef struct { __IO uint32_t AESCR; /*!< AES CONTROL REGISTER, Address offset: 0x00 */ __IO uint32_t AESIF; /*!< AES INTERRUPT FLAG REGISTER, Address offset: 0x04 */ __IO uint32_t AESDIN; /*!< AES DATA INPUT REGISTER, Address offset: 0x08 */ __IO uint32_t AESDOUT; /*!< AES DATA OUTPUT REGISTER, Address offset: 0x0C */ __IO uint32_t AESKEY0; /*!< AES KEY LOW WORD REGISTER 0, Address offset: 0x10 */ __IO uint32_t AESKEY1; /*!< AES KEY REGISTER 1, Address offset: 0x14 */ __IO uint32_t AESKEY2; /*!< AES KEY REGISTER 2, Address offset: 0x18 */ __IO uint32_t AESKEY3; /*!< AES KEY REGISTER 3, Address offset: 0x1C */ __IO uint32_t AESKEY4; /*!< AES KEY REGISTER 4, Address offset: 0x20 */ __IO uint32_t AESKEY5; /*!< AES KEY REGISTER 5, Address offset: 0x24 */ __IO uint32_t AESKEY6; /*!< AES KEY REGISTER 6, Address offset: 0x28 */ __IO uint32_t AESKEY7; /*!< AES KEY REGISTER 7, Address offset: 0x2C */ __IO uint32_t AESIVR0; /*!< AES INITIAL DATA REGISTER 0, Address offset: 0x30 */ __IO uint32_t AESIVR1; /*!< AES INITIAL DATA REGISTER 1, Address offset: 0x34 */ __IO uint32_t AESIVR2; /*!< AES INITIAL DATA REGISTER 2, Address offset: 0x38 */ __IO uint32_t AESIVR3; /*!< AES INITIAL DATA REGISTER 3, Address offset: 0x3C */ } AES_Type; /* ================================================================================ */ /* ================ U7816 ================ */ /* ================================================================================ */ typedef struct { __IO uint32_t U7816CTRL; /*!< U7816 CONTROL REGISTER, Address offset: 0x00 */ __IO uint32_t U7816FRC; /*!< U7816 FRAME CONTROL REGISTER, Address offset: 0x04 */ __IO uint32_t U7816EGT; /*!< U7816 EGT CONFIG REGISTER, Address offset: 0x08 */ __IO uint32_t U7816CLKDIV; /*!< U7816 CLK DIVIDED REGISTER, Address offset: 0x0C */ __IO uint32_t U7816PDIV; /*!< U7816 PREDIVIDED REGISTER, Address offset: 0x10 */ __IO uint32_t U7816RXBUF; /*!< U7816 RX BUFFER REGISTER, Address offset: 0x14 */ __IO uint32_t U7816TXBUF; /*!< U7816 TX BUFFER REGISTER, Address offset: 0x18 */ __IO uint32_t U7816IE; /*!< U7816 INTERRUPT ENABLE REGISTER, Address offset: 0x1C */ __IO uint32_t U7816IF; /*!< U7816 INTERRUPT FLAG REGISTER, Address offset: 0x20 */ __IO uint32_t U7816ERR; /*!< U7816 ERR FLAG REGISTER, Address offset: 0x24 */ __IO uint32_t U7816STA; /*!< U7816 STATE REGISTER, Address offset: 0x28 */ } U7816x_Type; /* ================================================================================ */ /* ================ WWDT ================ */ /* ================================================================================ */ typedef struct { __IO uint32_t WWDTCON; /*!< WWDT CONTROL REGISTER, Address offset: 0x00 */ __IO uint32_t WWDTCFG; /*!< WWDT CONFIG REGISTER, Address offset: 0x04 */ __IO uint32_t WWDTCNT; /*!< WWDT COUNTER REGISTER, Address offset: 0x08 */ __IO uint32_t WWDTIE; /*!< WWDT INTERRUPT ENABLE REGISTER, Address offset: 0x0C */ __IO uint32_t WWDTIF; /*!< WWDT INTERRUPT FLAG REGISTER, Address offset: 0x10 */ __IO uint32_t WWDTDIV; /*!< WWDT WWDTDIV REGISTER, Address offset: 0x14 */ } WWDT_Type; /* ================================================================================ */ /* ================ TRNG ================ */ /* ================================================================================ */ typedef struct { __IO uint32_t RSV0; /*!< RESERVED Address offset: 0x00 */ __IO uint32_t RNGOUT; /*!< TRNG OUTPUT REGISTER, Address offset: 0x04 */ __IO uint32_t RSV1; /*!< RESERVED Address offset: 0x08 */ __IO uint32_t RNGIE; /*!< TRNG INTERRUPT ENABLE REGISTER, Address offset: 0x0C */ __IO uint32_t RNGIF; /*!< TRNG INTERRUPT FLAG REGISTER, Address offset: 0x10 */ __IO uint32_t CRCCON; /*!< TRNG CRC CONTROL REGISTER, Address offset: 0x14 */ __IO uint32_t CRCIN; /*!< TRNG CRC INPUT REGISTER, Address offset: 0x18 */ __IO uint32_t CRCFLAG; /*!< TRNG CRC CRCFLAG REGISTER, Address offset: 0x1C */ } TRNG_Type; /* -------------------- End of section using anonymous unions ------------------- */ #if defined(__CC_ARM) #pragma pop #elif defined(__ICCARM__) /* leave anonymous unions enabled */ #elif defined(__GNUC__) /* anonymous unions are enabled by default */ #elif defined(__TMS470__) /* anonymous unions are enabled by default */ #elif defined(__TASKING__) #pragma warning restore #else #warning Not supported compiler type #endif /* ================================================================================ */ /* ================ CPU memory map ================ */ /* ================================================================================ */ /* Peripheral and SRAM base address */ #define FLASH_BASE (( uint32_t)0x00000000) #define SRAM_BASE (( uint32_t)0x20000000) #define PERIPH_BASE (( uint32_t)0x40000000) /* ================================================================================ */ /* ================ Peripheral memory map ================ */ /* ================================================================================ */ /* Peripheral memory map */ #define SCU_BASE (PERIPH_BASE +0x00000000) #define PMU_BASE (PERIPH_BASE +0x00000100) #define RCC_BASE (PERIPH_BASE +0x00000200) #define DMA_BASE (PERIPH_BASE +0x00000400) #define HSPI_BASE (PERIPH_BASE +0x00000800) #define SPI1_BASE (PERIPH_BASE +0x00000840) #define SPI2_BASE (PERIPH_BASE +0x00000880) #define GPIOA_BASE (PERIPH_BASE +0x00000C00) #define GPIOB_BASE (PERIPH_BASE +0x00000C20) #define GPIOC_BASE (PERIPH_BASE +0x00000C40) #define GPIOD_BASE (PERIPH_BASE +0x00000C60) #define GPIOE_BASE (PERIPH_BASE +0x00000C80) #define GPIOF_BASE (PERIPH_BASE +0x00000CA0) #define GPIOG_BASE (PERIPH_BASE +0x00000CC0) #define GPIO_COMMON_BASE (PERIPH_BASE +0x00000CE0) #define NVMIF_BASE (PERIPH_BASE +0x00001000) #define CRC_BASE (PERIPH_BASE +0x00010000) #define DISP_BASE (PERIPH_BASE +0x00010C00) #define RTC_BASE (PERIPH_BASE +0x00011000) #define IWDT_BASE (PERIPH_BASE +0x00011400) #define WWDT_BASE (PERIPH_BASE +0x00011800) #define U78160_BASE (PERIPH_BASE +0x00011C00) #define U78161_BASE (PERIPH_BASE +0x00011C2C) #define UART_COMMON_BASE (PERIPH_BASE +0x00012000) #define UART0_BASE (PERIPH_BASE +0x0001200C) #define UART1_BASE (PERIPH_BASE +0x0001202C) #define UART2_BASE (PERIPH_BASE +0x0001204C) #define UART3_BASE (PERIPH_BASE +0x0001206C) #define UART4_BASE (PERIPH_BASE +0x0001208C) #define UART5_BASE (PERIPH_BASE +0x000120AC) #define I2C_BASE (PERIPH_BASE +0x00012400) #define ANAC_BASE (PERIPH_BASE +0x00012800) #define RAMBIST_BASE (PERIPH_BASE +0x00012C00) #define BTIMER1_BASE (PERIPH_BASE +0x00013000) #define BTIMER2_BASE (PERIPH_BASE +0x00013044) #define ETIMER1_BASE (PERIPH_BASE +0x00013090) #define ETIMER2_BASE (PERIPH_BASE +0x000130B0) #define ETIMER3_BASE (PERIPH_BASE +0x000130D0) #define ETIMER4_BASE (PERIPH_BASE +0x000130F0) #define LPTIM_BASE (PERIPH_BASE +0x00013400) #define AES_BASE (PERIPH_BASE +0x00013800) #define TRNG_BASE (PERIPH_BASE +0x00013C00) /* ================================================================================ */ /* ================ Peripheral declaration ================ */ /* ================================================================================ */ #define SCU ((SCU_Type *) SCU_BASE ) #define PMU ((PMU_Type *) PMU_BASE ) #define RCC ((RCC_Type *) RCC_BASE ) #define DMA ((DMA_Type *) DMA_BASE ) #define HSPI ((SPIx_Type *) HSPI_BASE ) #define SPI1 ((SPIx_Type *) SPI1_BASE ) #define SPI2 ((SPIx_Type *) SPI2_BASE ) #define NVMIF ((NVMIF_Type *) NVMIF_BASE ) #define FLASH ((NVMIF_Type *) NVMIF_BASE ) #define UART ((UART_common_Type *) UART_COMMON_BASE ) #define UART0 ((UARTx_Type *) UART0_BASE ) #define UART1 ((UARTx_Type *) UART1_BASE ) #define UART2 ((UARTx_Type *) UART2_BASE ) #define UART3 ((UARTx_Type *) UART3_BASE ) #define UART4 ((UARTx_Type *) UART4_BASE ) #define UART5 ((UARTx_Type *) UART5_BASE ) #define IWDT ((IWDT_Type *) IWDT_BASE ) #define GPIOA ((GPIOx_Type *) GPIOA_BASE ) #define GPIOB ((GPIOx_Type *) GPIOB_BASE ) #define GPIOC ((GPIOx_Type *) GPIOC_BASE ) #define GPIOD ((GPIOx_Type *) GPIOD_BASE ) #define GPIOE ((GPIOx_Type *) GPIOE_BASE ) #define GPIOF ((GPIOx_Type *) GPIOF_BASE ) #define GPIOG ((GPIOx_Type *) GPIOG_BASE ) #define GPIO ((GPIO_COMMON_Type *) GPIO_COMMON_BASE ) #define RAMBIST ((RAMBIST_Type *) RAMBIST_BASE ) #define BTIM1 ((BTIMx_Type *) BTIMER1_BASE ) #define BTIM2 ((BTIMx_Type *) BTIMER2_BASE ) #define ETIM1 ((ETIMx_Type *) ETIMER1_BASE ) #define ETIM2 ((ETIMx_Type *) ETIMER2_BASE ) #define ETIM3 ((ETIMx_Type *) ETIMER3_BASE ) #define ETIM4 ((ETIMx_Type *) ETIMER4_BASE ) #define LPTIM ((LPTIM_Type *) LPTIM_BASE ) #define ANAC ((ANAC_Type *) ANAC_BASE ) #define WWDT ((WWDT_Type *) WWDT_BASE ) #define I2C ((I2C_Type *) I2C_BASE ) #define CRC ((CRC_Type *) CRC_BASE ) #define LCD ((DISP_Type *) DISP_BASE ) #define RTC ((RTC_Type *) RTC_BASE ) #define U78160 ((U7816x_Type *) U78160_BASE ) #define U78161 ((U7816x_Type *) U78161_BASE ) #define AES ((AES_Type *) AES_BASE ) #define TRNG ((TRNG_Type *) TRNG_BASE ) /* ================================================================================ */ /* ================ Peripheral include ================ */ /* ================================================================================ */ /** @} */ /* End of group Device_Peripheral_Registers */ /** @} */ /* End of group FM32L0XX */ /** @} */ /* End of group Keil */ #ifdef __cplusplus } #endif #endif /* FM33A0XX_H */
/** * Time of a workflow run in UTC. */ public class ScheduledTime extends ValueObject implements Comparable<ScheduledTime> { public static final ScheduledTimeFormatter FORMATTER = new ScheduledTimeFormatter(); protected final DateTime dateTime; public ScheduledTime(String formattedDate) { this(DateTime.parse(formattedDate)); } public ScheduledTime(DateTime dateTime) { this.dateTime = Util.requireNonNull(dateTime); if (!dateTime.getZone().equals(DateTimeZone.UTC)) { throw new IllegalArgumentException( "Scheduled time must be in UTC, but isn't: " + dateTime); } } public DateTime getDateTime() { return dateTime; } @Override public int compareTo(ScheduledTime t) { return CompareToBuilder.reflectionCompare(this, t); } public int getYear() { return dateTime.getYear(); } public int getMonth() { return dateTime.getMonthOfYear(); } public int getDay() { return dateTime.getDayOfMonth(); } public int getHour() { return dateTime.getHourOfDay(); } public int getMinute() { return dateTime.getMinuteOfHour(); } public int getSecond() { return dateTime.getSecondOfMinute(); } public int getMillisecond() { return dateTime.getMillisOfSecond(); } public String toString() { return dateTime.toString(); } public ScheduledTime minusYears(int i) { return new ScheduledTime(getDateTime().minusYears(i)); } public ScheduledTime plusYears(int i) { return new ScheduledTime(getDateTime().plusYears(i)); } public ScheduledTime minusMonths(int i) { return new ScheduledTime(getDateTime().minusMonths(i)); } public ScheduledTime plusMonths(int i) { return new ScheduledTime(getDateTime().plusMonths(i)); } public ScheduledTime minusDays(int i) { return new ScheduledTime(getDateTime().minusDays(i)); } public ScheduledTime plusDays(int i) { return new ScheduledTime(getDateTime().plusDays(i)); } public ScheduledTime minusHours(int i) { return new ScheduledTime(getDateTime().minusHours(i)); } public ScheduledTime plusHours(int i) { return new ScheduledTime(getDateTime().plusHours(i)); } public ScheduledTime minusMinutes(int i) { return new ScheduledTime(getDateTime().minusMinutes(i)); } public ScheduledTime plusMinutes(int i) { return new ScheduledTime(getDateTime().plusMinutes(i)); } public ScheduledTime minusSeconds(int i) { return new ScheduledTime(getDateTime().minusSeconds(i)); } public ScheduledTime plusSeconds(int i) { return new ScheduledTime(getDateTime().plusSeconds(i)); } public String year() { return FORMATTER.formatYear(this); } public String month() { return FORMATTER.formatMonth(this); } public String day() { return FORMATTER.formatDay(this); } public String hour() { return FORMATTER.formatHour(this); } public String minute() { return FORMATTER.formatMinute(this); } public String second() { return FORMATTER.formatSecond(this); } public static ScheduledTime now() { return new ScheduledTime(DateTime.now(DateTimeZone.UTC)); } }
/** * Does not need to be persisted, wrapper for title objects to be populated in Thymeleaf forms. */ public class TitleWrapper { private Title title; private Long id; public TitleWrapper() {} public void setTitle(Title title) {this.title = title;} public Title getTitle() { return this.title; } public void setId(Long id) { this.id = id; } public Long getId() { return this.id; } }
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib from io import StringIO import unittest from unittest.mock import MagicMock, patch import torch from fairseq import data, checkpoint_utils def mock_trainer(epoch, num_updates, iterations_in_epoch): trainer = MagicMock() trainer.load_checkpoint.return_value = { "train_iterator": { "epoch": epoch, "iterations_in_epoch": iterations_in_epoch, "shuffle": False, }, } trainer.get_num_updates.return_value = num_updates return trainer def mock_dict(): d = MagicMock() d.pad.return_value = 1 d.eos.return_value = 2 d.unk.return_value = 3 return d def get_trainer_and_epoch_itr(epoch, epoch_size, num_updates, iterations_in_epoch): tokens = torch.LongTensor(list(range(epoch_size))).view(1, -1) tokens_ds = data.TokenBlockDataset( tokens, sizes=[tokens.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) trainer = mock_trainer(epoch, num_updates, iterations_in_epoch) dataset = data.LanguagePairDataset( tokens_ds, tokens_ds.sizes, mock_dict(), shuffle=False ) epoch_itr = data.EpochBatchIterator( dataset=dataset, collate_fn=dataset.collater, batch_sampler=[[i] for i in range(epoch_size)], ) return trainer, epoch_itr class TestLoadCheckpoint(unittest.TestCase): def setUp(self): self.args_mock = MagicMock() self.args_mock.optimizer_overrides = "{}" self.args_mock.reset_dataloader = False self.args_mock.reset_meters = False self.args_mock.reset_optimizer = False self.patches = { "os.makedirs": MagicMock(), "os.path.join": MagicMock(), "os.path.isfile": MagicMock(return_value=True), "os.path.isabs": MagicMock(return_value=False), } self.applied_patches = [patch(p, d) for p, d in self.patches.items()] [p.start() for p in self.applied_patches] def test_load_partial_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 200, 50) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) _, epoch_itr = checkpoint_utils.load_checkpoint(self.args_mock, trainer) self.assertEqual(epoch_itr.epoch, 2) self.assertEqual(epoch_itr.iterations_in_epoch, 50) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 2) self.assertEqual(epoch_itr.iterations_in_epoch, 50) self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 50) self.assertEqual(epoch_itr.iterations_in_epoch, 51) for _ in range(150 - 52): next(itr) self.assertEqual(epoch_itr.iterations_in_epoch, 149) self.assertTrue(itr.has_next()) next(itr) self.assertFalse(itr.has_next()) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertTrue(itr.has_next()) self.assertEqual(epoch_itr.epoch, 3) self.assertEqual(epoch_itr.iterations_in_epoch, 0) def test_load_full_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 300, 150) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) _, epoch_itr = checkpoint_utils.load_checkpoint(self.args_mock, trainer) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 3) self.assertEqual(epoch_itr.iterations_in_epoch, 0) self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 0) def test_load_no_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(0, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) self.patches["os.path.isfile"].return_value = False _, epoch_itr = checkpoint_utils.load_checkpoint(self.args_mock, trainer) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 1) self.assertEqual(epoch_itr.iterations_in_epoch, 0) self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 0) def tearDown(self): patch.stopall() if __name__ == "__main__": unittest.main()
/** * Create a new class to compare the names of an array strings */ private static class NameComparator implements java.util.Comparator<String[]> { public NameComparator() { } /** * Compare 2 arrays of strings based on the comparison of the compound names as first priority, then the compound formulas as second priority and then the charges as the last priority * @param t1 an array of strings * @param t2 an array of strings * @return the value of the String compareTo method called upon the elements in the arrays that were compared */ @Override public int compare(String[] t1, String[] t2) { // check if both compound names are empty strings if (t1[0].equals("") && t2[0].equals("")) { // check if compound formulas are equal, if they are return the comparison of the charge strings if (t1[1].equals(t2[1])) { return t1[2].compareTo(t2[2]); } // else return the comparison of the formula strings else { return t1[1].compareTo(t2[1]); } } // else if only one of the compound names is an empty string, return the comparison * -1 else if (t1[0].equals("") || t2[0].equals("")) { return t1[0].compareTo(t2[0]) * -1; } // else return the string comparison of the compound names else { return t1[0].compareTo(t2[0]); } } }
Network Analysis of Translocated Takahe Populations to Identify Disease Surveillance Targets Social network analysis is being increasingly used in epidemiology and disease modeling in humans, domestic animals, and wildlife. We investigated this tool in describing a translocation network (area that allows movement of animals between geographically isolated locations) used for the conservation of an endangered flightless rail, the Takahe (Porphyrio hochstetteri). We collated records of Takahe translocations within New Zealand and used social network principles to describe the connectivity of the translocation network. That is, networks were constructed and analyzed using adjacency matrices with values based on the tie weights between nodes. Five annual network matrices were created using the Takahe data set, each incremental year included records of previous years. Weights of movements between connected locations were assigned by the number of Takahe moved. We calculated the number of nodes (itotal) and the number of ties (ttotal) between the nodes. To quantify the small‐world character of the networks, we compared the real networks to random graphs of the equivalent size, weighting, and node strength. Descriptive analysis of cumulative annual Takahe movement networks involved determination of node‐level characteristics, including centrality descriptors of relevance to disease modeling such as weighted measures of in degree (kiin), out degree (kiout), and betweenness (Bi). Key players were assigned according to the highest node measure of kiin, kiout, and Bi per network. Networks increased in size throughout the time frame considered. The network had some degree small‐world characteristics. Nodes with the highest cumulative tie weights connecting them were the captive breeding center, the Murchison Mountains and 2 offshore islands. The key player fluctuated between the captive breeding center and the Murchison Mountains. The cumulative networks identified the captive breeding center every year as the hub of the network until the final network in 2011. Likewise, the wild Murchison Mountains population was consistently the sink of the network. Other nodes, such as the offshore islands and the wildlife hospital, varied in importance over time. Common network descriptors and measures of centrality identified key locations for targeting disease surveillance. The visual representation of movements of animals in a population that this technique provides can aid decision makers when they evaluate translocation proposals or attempt to control a disease outbreak.
def abstract_elements_by_type(ifc_values): vertices, edges, elements = ifc_values['vertices'], ifc_values['edges'], ifc_values['elements'] for key in vertices: vertices[key] = list(vertices[key]) vertices[key] = [round(vertices[key][i],8) for i in range(len(vertices[key]))] vertices[key] = tuple(vertices[key]) lines_for_type = {} elements_name = {} for key in elements: points = [ vertices[key][i*3:i*3+3] for i in range(len(vertices[key])//3) ] line_points = [ edges[key][i*2:i*2+2] for i in range(len(edges[key])//2) ] line_points = list(set(line_points)) lines = [ (points[line_points[i][0]], points[line_points[i][1]]) for i in range(len(line_points)) ] lines_for_type[key] = lines elements_name[key] = elements[key] return lines_for_type, elements_name
import { expect } from 'chai'; import { ReactiveClient } from '../lib/reactive/ReactiveClient'; import { ReactiveCombinationClient } from '../lib/reactive/ReactiveCombinationClient'; import { ReactiveObservationClient } from '../lib/reactive/ReactiveObservationClient'; describe('Decorator Methods', () => { it('ReactiveClient', () => { const client = new ReactiveClient({ projectPath: process.cwd() }); expect(client.updatebuffer).to.equal(client.updatebuffer); expect(client.changebuffer).to.equal(client.changebuffer); expect(client.codecheck).to.equal(client.codecheck); expect(client.formatAfterKeystroke).to.equal(client.formatAfterKeystroke); expect(client.codeformat).to.equal(client.codeformat); expect(client.observe.updatebuffer).to.equal(client.observe.updatebuffer); expect(client.observe.changebuffer).to.equal(client.observe.changebuffer); expect(client.observe.codeformat).to.equal(client.observe.codeformat); expect(client.observe.packageRestoreStarted).to.equal(client.observe.packageRestoreStarted); expect(client.observe.events).to.equal(client.observe.events); }); it('ReactiveObservationClient', () => { const client = new ReactiveObservationClient(); expect(client.updatebuffer).to.equal(client.updatebuffer); expect(client.changebuffer).to.equal(client.changebuffer); expect(client.codecheck).to.equal(client.codecheck); expect(client.formatAfterKeystroke).to.equal(client.formatAfterKeystroke); expect(client.codeformat).to.equal(client.codeformat); expect(client.signatureHelp).to.equal(client.signatureHelp); }); it('ReactiveCombinationClient', () => { const client = new ReactiveCombinationClient(); expect(client.updatebuffer).to.equal(client.updatebuffer); expect(client.changebuffer).to.equal(client.changebuffer); expect(client.codecheck).to.equal(client.codecheck); expect(client.formatAfterKeystroke).to.equal(client.formatAfterKeystroke); expect(client.codeformat).to.equal(client.codeformat); }); });
SHARVOT: Secret SHARe-Based VOTing on the Blockchain Recently, there has been a growing interest in using online technologies to design protocols for secure electronic voting. The main challenges include vote privacy and anonymity, ballot irrevocability and transparency throughout the vote counting process. The introduction of the blockchain as a basis for cryptocurrency protocols, provides for the exploitation of the immutability and transparency properties of these distributed ledgers. In this paper, we discuss possible uses of the blockchain technology to implement a secure and fair voting system. In particular, we introduce a secret share-based voting system on the blockchain, the so-called SHARVOT protocol. Our solution uses Shamir's Secret Sharing to enable on-chain, i.e. within the transactions script, votes submission and winning candidate determination. The protocol is also using a shuffling technique, Circle Shuffle, to de-link voters from their submissions.
def message(self, signature, status=None, rids=None, level=logging.INFO, **kwargs): status = status or self.action_status or '' if not isinstance(rids, list): rids = [self.rid_ntuple(self.orig_sid, self.rid, self.orig_rid)] newargs = [x for x in kwargs\ if (x not in ModularAction.DEFAULT_MSGFIELDS) and re.match('[A-Za-z_]+', x)] msg = '%s %s' % (ModularAction.DEFAULT_MESSAGE, ' '.join(['{i}="{{d[{i}]}}"'.format(i=i) for i in newargs])) argsdict = collections.defaultdict(str) argsdict.update(kwargs) argsdict.update({ 'signature': signature or '', 'action_name': self.action_name or '', 'search_name': self.search_name or '', 'sid': self.sid or '', 'app': self.app or '', 'user': self.user or '', 'action_mode': self.action_mode or '', 'action_status': status }) for rid_ntuple in rids: if len(rid_ntuple)==3: argsdict.update({ 'orig_sid': rid_ntuple.orig_sid or '', 'rid': rid_ntuple.rid or '', 'orig_rid': rid_ntuple.orig_rid or '' }) message = msg.format(d=argsdict) for match in re.finditer('[A-Za-z_]+=\"\"(\s|$)', message): message = message.replace(match.group(0),'',1) message = message.strip() self.logger.log(level, message) else: self.logger.warn('Could not unpack rid_ntuple') message = '' return message
/** * Test that a command-line invocation will not crash on * Java/Groovy sources and the output directory is not empty. */ @Test void testJavaAndGroovy() throws IOException { String outDir = "build/test-out-java-groovy"; RunResult rr = generateJson("clue-common-3.24.1.jar", "clue-common-3.24.1-sources.jar", outDir); assert (new File(outDir).listFiles() != null); assert (rr.unmatched == 32); assert (rr.idMapper.allTypes >= 38); assert (rr.idMapper.matchedTypes >= 38); assert (rr.idMapper.allMethods >= 267); assert (rr.idMapper.matchedMethods >= 205); assert (rr.idMapper.allFields >= 92); assert (rr.idMapper.matchedFields >= 92); assert (rr.idMapper.allInvos >= 1700); assert (rr.idMapper.matchedInvos >= 1631); assert (rr.idMapper.allAllocs >= 638); assert (rr.idMapper.matchedAllocs >= 637); assert (rr.idMapper.allMethodRefs >= 1); assert (rr.idMapper.matchedMethodRefs >= 1); assert (rr.idMapper.allFieldAccesses >= 50); assert (rr.idMapper.matchedFieldAccesses >= 18); assert (rr.idMapper.allUses >= 323); assert (rr.idMapper.matchedUses >= 323); assert (rr.idMapper.allVariables >= 372); assert (rr.idMapper.matchedVariables >= 370); }
So it finally happened! Someone with a lot of patience went and created the first ever living tree chair. With some wire and tons of dedication this guys has, through the years, formed this quite comfortable looking tree chair that will probably be living longer then the creator and the owner of it. In 1998, when this picture was taken, it had already taken him 7 years to get the chair to actually look like a chair. There are no telling what the chair look like today. Or, if anyone knows where this chair is located and is living in the vicinity please go ask the owner of it would be ok to snap a few updated pictures. Send them to us and you will get the credit for hooking them up. Anyone other then me that is thinking about Lord Of The Rings when looking at this chair? Simply Awesome!
Comparison of office visit and nurse advice hotline data for syndromic surveillance--Baltimore-Washington, D.C., metropolitan area, 2002. INTRODUCTION Kaiser Permanente of the Mid-Atlantic States (KPMAS) is collaborating with the Electronic Surveillance System for Early Notification of Community-Based Epidemics II (ESSENCE II) program to understand how managed-care data can be effectively used for syndromic surveillance. OBJECTIVES This study examined whether KPMAS nurse advice hotline data would be able to predict the syndrome diagnoses made during subsequent KPMAS office visits. METHODS All nurse advice hotline calls during 2002 that were linked to an outpatient office visit were identified. By using International Classification of Diseases, Ninth Revision (ICD-9) codes, outpatient visits were categorized into seven ESSENCE II syndrome groups (coma, gastrointestinal, respiratory, neurologic, hemorrhagic, infectious dermatologic, and fever). Nurse advice hotline calls were categorized into ESSENCE II syndrome groups on the basis of the advice guidelines assigned. For each syndrome group, the sensitivity, specificity, and positive predictive value of hotline calls were calculated by using office visits as a diagnostic standard. For matching syndrome call-visit pairs, the lag (i.e., the number of hours that elapsed between the date and time the patient spoke to an advice nurse and the date and time the patient made an office visit) was calculated. RESULTS Of all syndrome groups, the sensitivity of hotline calls for respiratory syndrome was highest (74.7%), followed by hotline calls for gastrointestinal syndrome (72.0%). The specificity of all nurse advice syndrome groups ranged from 88.9% to 99.9%. The mean lag between hotline calls and office visits ranged from 8.3 to 50 hours, depending on the syndrome group. CONCLUSIONS The timeliness of hotline data capture compared with office visit data capture, as well as the sensitivity and specificity of hotline calls for detecting respiratory and gastrointestinal syndromes, indicate that KPMAS nurse advice hotline data can be used to predict KPMAS syndromic outpatient office visits.
/** * Converts a class to the JVM type * @param clazz class to be converted * @return the corresponding JVM type */ public static JvmType classToJvmType(final Class clazz) { if (clazz == boolean.class) return BOOLEAN; if (clazz == byte.class) return BYTE; if (clazz == char.class) return CHAR; if (clazz == double.class) return DOUBLE; if (clazz == float.class) return FLOAT; if (clazz == int.class) return INTEGER; if (clazz == long.class) return LONG; if (clazz == short.class) return SHORT; if (clazz == void.class) return VOID; return OBJECT; }
/* NOTE: intentionally static, as only LogSrcDriver or LogDestDriver will derive from LogDriver */ static void log_driver_init_instance(LogDriver *self, GlobalConfig *cfg) { log_pipe_init_instance(&self->super, cfg); self->super.free_fn = log_driver_free; self->super.init = log_driver_init_method; self->super.deinit = log_driver_deinit_method; }
<reponame>GhostMachineSoftware/SPFX_UsefulLinks export * from './Grid'; export * from './Grid.types'; export * from './GridCell'; export * from './GridCell.types';
// CloneBranch updates the given CQ config to create a config for a new // branch based on a given existing branch. Optionally, include experimental // tryjobs, include the tree-is-open check, and exclude trybots matching regular // expressions. func CloneBranch(cfg *config.Config, oldBranch, newBranch string, includeExperimental, includeTreeCheck bool, excludeTrybotRegexp []*regexp.Regexp) error { oldRef := fmt.Sprintf("refs/heads/%s", oldBranch) oldCg, oldGerrit, oldProject, err := MatchConfigGroup(cfg, oldRef) if err != nil { return fmt.Errorf("Failed to find config group for %q: %s", oldRef, err) } if oldCg == nil { return fmt.Errorf("No config group matches %q", oldRef) } newCg := &config.ConfigGroup{ Name: newBranch, Gerrit: []*config.ConfigGroup_Gerrit{ { Url: oldGerrit.Url, Projects: []*config.ConfigGroup_Gerrit_Project{ { Name: oldProject.Name, RefRegexp: []string{ fmt.Sprintf("refs/heads/%s", newBranch), }, }, }, }, }, } if oldCg.CombineCls != nil { newCg.CombineCls = &config.CombineCLs{} if oldCg.CombineCls.StabilizationDelay != nil { newCg.CombineCls.StabilizationDelay = &duration.Duration{ Seconds: oldCg.CombineCls.StabilizationDelay.Seconds, Nanos: oldCg.CombineCls.StabilizationDelay.Nanos, } } } if oldCg.Verifiers != nil { newCg.Verifiers = &config.Verifiers{ GerritCqAbility: oldCg.Verifiers.GerritCqAbility, Fake: oldCg.Verifiers.Fake, } if includeTreeCheck { newCg.Verifiers.TreeStatus = oldCg.Verifiers.TreeStatus } if oldCg.Verifiers.Tryjob != nil { tryjobs := make([]*config.Verifiers_Tryjob_Builder, 0, len(oldCg.Verifiers.Tryjob.Builders)) for _, tj := range oldCg.Verifiers.Tryjob.Builders { exclude := false for _, re := range excludeTrybotRegexp { if re.MatchString(tj.Name) { exclude = true break } } if tj.ExperimentPercentage != 0.0 && !includeExperimental { exclude = true } if !exclude { tryjobs = append(tryjobs, tj) } } newCg.Verifiers.Tryjob = &config.Verifiers_Tryjob{ Builders: tryjobs, RetryConfig: oldCg.Verifiers.Tryjob.RetryConfig, } } } cfg.ConfigGroups = append(cfg.ConfigGroups, newCg) return nil }
An hour just won’t be enough with this guest. That’s a certainty. Somehow, someway, the Worst Show on the Web will be joined by literary royalty – Mr. Jeff Martin! Jeff was a writer for The Simpsons during their most hilarious and formative years. On top of the great scripts he penned, he was also responsible for many of the songs that have wedged themselves into your brains over the last 20 years. Do you still sing “Monorail” whenever you see one? Or “Capitol City?” Have you ever crooned to anyone within earshot, the lyrics “I am just a simple paper boy, no romance do I seek…?” Well, this guy did that to you! Jeff also was a writer on Late Night with David Letterman for eight years. That’s right, he knew Dave when he was Johnny Carson’s Seth Meyers. And, again, somehow, someway, he’s going to be on our show! We’re beyond excited! To watch the episode click this button that I’m about to type right over HERE! Advertisements
def _do_init_argparser(self, parser, defaults): super(GenerateBomCommandFactory, self)._do_init_argparser(parser, defaults) buildtool.build_commands.add_bom_parser_args(parser, defaults) buildtool.image_commands.add_bom_parser_args(parser, defaults) buildtool.source_commands.FetchSourceCommandFactory.add_fetch_parser_args( parser, defaults) self.add_argument( parser, 'bom_path', defaults, None, help='Generate the BOM and write it to the given path.')
<reponame>ajssmith/skupper-docker<gh_stars>1-10 package client import ( "fmt" "log" "time" "github.com/skupperproject/skupper-docker/api/types" "github.com/skupperproject/skupper-docker/pkg/docker" "github.com/skupperproject/skupper-docker/pkg/qdr" ) func (cli *VanClient) RouterInspect() (*types.RouterInspectResponse, error) { vir := &types.RouterInspectResponse{} transport, err := docker.InspectContainer("skupper-router", cli.DockerInterface) if err != nil { log.Println("Failed to retrieve transport container (need init?): ", err.Error()) return vir, err } vir.TransportVersion, err = docker.GetImageVersion(transport.Config.Image, cli.DockerInterface) if err != nil { log.Println("Failed to retrieve transport container version:", err.Error()) return vir, err } vir.Status.State = transport.State.Status controller, err := docker.InspectContainer(types.ControllerDeploymentName, cli.DockerInterface) if err != nil { log.Println("Failed to retrieve controller container (need init?): ", err.Error()) return vir, err } vir.ControllerVersion, err = docker.GetImageVersion(controller.Config.Image, cli.DockerInterface) if err != nil { log.Println("Failed to retrieve controller container version:", err.Error()) return vir, err } routerConfig, err := qdr.GetRouterConfigFromFile(types.GetSkupperPath(types.ConfigPath) + "/qdrouterd.json") if err != nil { return vir, fmt.Errorf("Failed to retrieve router config: %w", err) } vir.Status.Mode = string(routerConfig.Metadata.Mode) connected, err := qdr.GetConnectedSites(cli.DockerInterface) for i := 0; i < 5 && err != nil; i++ { time.Sleep(500 * time.Millisecond) connected, err = qdr.GetConnectedSites(cli.DockerInterface) } if err != nil { return vir, err } vir.Status.ConnectedSites = connected vsis, err := cli.ServiceInterfaceList() if err != nil { vir.ExposedServices = 0 } else { vir.ExposedServices = len(vsis) } return vir, err }
__EXTERN__ PARGS_DISPATCH_TABLE_ENTRY g_ChaosVmCpuDefOpcodeExtensionTableForOne_FF[0x08]; __EXTERN__ PARGS_DISPATCH_TABLE_ENTRY g_ChaosVmCpuDefOpcodeExtensionTableForOne_FE[0x08]; __EXTERN__ PARGS_DISPATCH_TABLE_ENTRY g_ChaosVmCpuDefOpcodeExtensionTableForOne_C6[0x08]; __EXTERN__ PARGS_DISPATCH_TABLE_ENTRY g_ChaosVmCpuDefOpcodeExtensionTableForOne_C7[0x02][0x08]; __EXTERN__ PARGS_DISPATCH_TABLE_ENTRY g_ChaosVmCpuDefOpcodeExtensionTableForOne_F6[0x08]; __EXTERN__ PARGS_DISPATCH_TABLE_ENTRY g_ChaosVmCpuDefOpcodeExtensionTableForOne_F7[0x08]; __EXTERN__ PARGS_DISPATCH_TABLE_ENTRY g_ChaosVmCpuDefOpcodeExtensionTableForOne_80[0x08]; __EXTERN__ PARGS_DISPATCH_TABLE_ENTRY g_ChaosVmCpuDefOpcodeExtensionTableForOne_81[0x08]; __EXTERN__ PARGS_DISPATCH_TABLE_ENTRY g_ChaosVmCpuDefOpcodeExtensionTableForOne_83[0x08]; __EXTERN__ PARGS_DISPATCH_TABLE_ENTRY g_ChaosVmCpuDefOpcodeExtensionTableForOne_8F[0x08]; __EXTERN__ PARGS_DISPATCH_TABLE_ENTRY g_ChaosVmCpuDefOpcodeExtensionTableForOne_C0[0x08]; __EXTERN__ PARGS_DISPATCH_TABLE_ENTRY g_ChaosVmCpuDefOpcodeExtensionTableForOne_C1[0x08]; __EXTERN__ PARGS_DISPATCH_TABLE_ENTRY g_ChaosVmCpuDefOpcodeExtensionTableForOne_D0[0x08]; __EXTERN__ PARGS_DISPATCH_TABLE_ENTRY g_ChaosVmCpuDefOpcodeExtensionTableForOne_D1[0x08]; __EXTERN__ PARGS_DISPATCH_TABLE_ENTRY g_ChaosVmCpuDefOpcodeExtensionTableForOne_D2[0x08]; __EXTERN__ PARGS_DISPATCH_TABLE_ENTRY g_ChaosVmCpuDefOpcodeExtensionTableForOne_D3[0x08]; __EXTERN__ PARGS_DISPATCH_TABLE_ENTRY g_ChaosVmCpuDefOpcodeExtensionTableForTwo_0FBA[0x08]; __EXTERN__ MODRM_BYTE_ANALYSE_ROUTINE_TABLE_ENTRY g_ChaosVmCpuDefModRMAnalyseRoutineTable[0x02]; __EXTERN__ EFLAG_CONDITION_TABLE_ENTRY g_ChaosVmCpuDefEFlagContionTable[0x10]; __EXTERN__ ONE_OPCODE_TABLE_ENTRY g_ChaosVmCpuDefOneByteOpcodeTable[0x100]; __EXTERN__ TWO_BYTE_OPCODE_TABLE_ENTRY g_ChaosVmCpuDefTwoByteOpcodeTable[0x100];
/** * Creates a horse mount entity. * * @param location The spawn location of the horse. * @param color the color of the horse. * @param name The display name of the horse. * * @return The created horse entity. */ public static org.bukkit.entity.Horse create(Location location, Horse.Color color, String name) { WorldServer worldServer = ((CraftWorld) location.getWorld()).getHandle(); HorseColor horseColor = HorseColor.valueOf(color.toString()); return (Horse) new NmsEntityHorseMount(worldServer, location, horseColor, ChatColor.GREEN + name).getBukkitEntity(); }
// Type definitions for dts-generator 2.1 // Project: https://github.com/SitePen/dts-generator#readme // Definitions by: <NAME> <https://github.com/mtraynham> // Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped // TypeScript Version: 3.4 import ts = require('typescript'); import Bluebird = require('bluebird'); export = dtsGenerator; declare function dtsGenerator(options: dtsGenerator.DtsGeneratorOptions): Bluebird<void>; declare namespace dtsGenerator { interface ResolveModuleIdParams { /** The identifier of the module being declared in the generated d.ts */ currentModuleId: string; } interface ResolveModuleImportParams { /** The identifier of the module currently being imported in the generated d.ts */ importedModuleId: string; /** The identifier of the enclosing module currently being declared in the generated d.ts */ currentModuleId: string; /** True if the imported module id is declared as a module in the input files. */ isDeclaredExternalModule: boolean; } interface DtsGeneratorOptions { /** * The base directory for the package being bundled. Any dependencies discovered outside this directory will be excluded * from the bundle. * Note this is no longer the preferred way to configure dts-generator, please see project. */ baseDir?: string | undefined; /** * The base directory for the project being bundled. It is assumed that this directory contains a * tsconfig.json which will be parsed to determine the files that should be bundled as well as * other configuration information like target */ project?: string | undefined; /** A list of files from the baseDir to bundle. */ files?: string[] | undefined; /** * A list of glob patterns, relative to baseDir, that should be excluded from the bundle. * Use the --exclude flag one or more times on the command-line. Defaults to [ "node_modules\/**\/*.d.ts" ]. */ exclude?: string[] | undefined; /** * A list of external module reference paths that should be inserted as reference comments. * Use the --extern flag one or more times on the command-line. */ externs?: string[] | undefined; /** * A list of external @types package dependencies that should be inserted as reference comments. * Use the --types flag one or more times on the command-line. */ types?: string[] | undefined; /** The end-of-line character that should be used when outputting code. Defaults to os.EOL. */ eol?: string | undefined; includes?: string[] | undefined; /** The character(s) that should be used to indent the declarations in the output. Defaults to \t. */ indent?: string | undefined; /** The module ID that should be used as the exported value of the package’s “main” module. */ main?: string | undefined; /** The type of module resolution to use when generating the bundle. */ moduleResolution?: ts.ModuleResolutionKind | undefined; /** The name of the package. Used to determine the correct exported package name for modules. */ name: string; /** The filename where the generated bundle will be created. */ out: string; outDir?: string | undefined; prefix?: string | undefined; rootDir?: string | undefined; /** The target environment for generated code. Defaults to ts.ScriptTarget.Latest. */ target?: ts.ScriptTarget | undefined; sendMessage?: ((message: any, ...optionalParams: any[]) => void) | undefined; /** * An optional callback provided by the invoker to customize the declared module ids the output d.ts files. * @see {@link https://github.com/SitePen/dts-generator/blob/master/docs/resolving-module-ids.md Resolving Module Ids} */ resolveModuleId?(params: ResolveModuleIdParams): string; /** * An optional callback provided by the invoker to customize the imported module ids in the output d.ts files. * @see {@link https://github.com/SitePen/dts-generator/blob/master/docs/resolving-module-ids.md Resolving Module Ids} */ resolveModuleImport?(params: ResolveModuleImportParams): string; verbose?: boolean | undefined; } }
/** * General utility functions for org.solrmarc * * @author Wayne Graham * @version $Id: Utils.java 1581 2011-12-19 21:21:52Z [email protected] $ */ public final class Utils { protected static Logger logger = Logger.getLogger(Utils.class.getName()); /** * Default Constructor It's private, so it can't be instantiated by other * objects * */ private Utils() { } /** * load a properties file into a Properties object * * @param propertyPaths * the directories to search for the properties file * @param propertyFileName * name of the sought properties file * @return Properties object */ static Properties loadProperties(String propertyPaths[], String propertyFileName) { return (loadProperties(propertyPaths, propertyFileName, false, null)); } /** * load a properties file into a Properties object * * @param propertyPaths * the directories to search for the properties file * @param propertyFileName * name of the sought properties file * @param showName * whether the name of the file/resource being read should be shown. * @return Properties object */ private static Properties loadProperties(String propertyPaths[], String propertyFileName, boolean showName, String filenameProperty) { String inputStreamSource[] = new String[] { null }; InputStream in = getPropertyFileInputStream(propertyPaths, propertyFileName, showName, inputStreamSource); String errmsg = "Fatal error: Unable to find specified properties file: " + propertyFileName; // load the properties Properties props = new Properties(); try { if (propertyFileName.endsWith(".xml") || propertyFileName.endsWith(".XML")) { props.loadFromXML(in); } else { props.load(in); } in.close(); if (filenameProperty != null && inputStreamSource[0] != null) { File tmpFile = new File(inputStreamSource[0]); props.setProperty(filenameProperty, tmpFile.getParent()); } } catch (IOException e) { throw new IllegalArgumentException(errmsg); } return props; } @SuppressWarnings("resource") private static InputStream getPropertyFileInputStream(String[] propertyPaths, String propertyFileName, boolean showName, String inputSource[]) { InputStream in = null; // look for properties file in paths String verboseStr = System.getProperty("marc.test.verbose"); boolean verbose = (verboseStr != null && verboseStr.equalsIgnoreCase("true")); String lookedIn = ""; if (propertyPaths != null) { File propertyFile = new File(propertyFileName); int pathCnt = 0; do { if (propertyFile.exists() && propertyFile.isFile() && propertyFile.canRead()) { try { in = new FileInputStream(propertyFile); if (inputSource != null && inputSource.length >= 1) { inputSource[0] = propertyFile.getAbsolutePath(); } if (showName){ logger.info("Opening file: " + propertyFile.getAbsolutePath()); }else{ logger.debug("Opening file: " + propertyFile.getAbsolutePath()); } } catch (FileNotFoundException e) { // simply eat this exception since we should only try to open the // file if we previously // determined that the file exists and is readable. } break; // we found it! } if (verbose) lookedIn = lookedIn + propertyFile.getAbsolutePath() + "\n"; if (propertyPaths != null && pathCnt < propertyPaths.length) { propertyFile = new File(propertyPaths[pathCnt], propertyFileName); } pathCnt++; } while (propertyPaths != null && pathCnt <= propertyPaths.length); } // if we didn't find it as a file, look for it as a URL String errmsg = "Fatal error: Unable to find specified properties file: " + propertyFileName; if (verbose) errmsg = errmsg + "\n Looked in: " + lookedIn; if (in == null) { Utils utilObj = new Utils(); URL url = utilObj.getClass().getClassLoader().getResource(propertyFileName); if (url == null) url = utilObj.getClass().getResource("/" + propertyFileName); if (url == null) { logger.error(errmsg); throw new IllegalArgumentException(errmsg); } if (showName) { logger.info("Opening resource via URL: " + url.toString()); } else { logger.debug("Opening resource via URL: " + url.toString()); } /* * if (url == null) url = * utilObj.getClass().getClassLoader().getResource(propertyPath + "/" + * propertyFileName); if (url == null) url = * utilObj.getClass().getResource("/" + propertyPath + "/" + * propertyFileName); */ try { in = url.openStream(); } catch (IOException e) { throw new IllegalArgumentException(errmsg); } } return (in); } private static Pattern cleanJrSrPattern = Pattern.compile(".*[JS]r\\.$"); private static Pattern cleaner1Pattern = Pattern.compile(".*\\w\\w\\.$"); private static Pattern cleaner2Pattern = Pattern.compile(".*\\p{L}\\p{L}\\.$"); private static Pattern cleaner3Pattern = Pattern.compile(".*\\w\\p{InCombiningDiacriticalMarks}?\\w\\p{InCombiningDiacriticalMarks}?\\.$"); private static Pattern cleaner4Pattern = Pattern.compile(".*\\p{Punct}\\.$"); /** * Removes trailing characters (space, comma, slash, semicolon, colon), * trailing period if it is preceded by at least three letters, and single * square bracket characters if they are the start and/or end chars of the * cleaned string * * @param origStr * String to clean * @return cleaned string */ public static String cleanData(String origStr) { String currResult = origStr; String prevResult; do { prevResult = currResult; currResult = currResult.trim(); currResult = currResult.replaceAll(" *([,/;:])$", ""); // trailing period removed in certain circumstances if (currResult.endsWith(".")) { if (cleanJrSrPattern.matcher(currResult).matches()) { // dont strip period off of Jr. or Sr. } else if (cleaner1Pattern.matcher(currResult).matches()) { currResult = currResult.substring(0, currResult.length() - 1); } else if (cleaner2Pattern.matcher(currResult).matches()) { currResult = currResult.substring(0, currResult.length() - 1); } else if (cleaner3Pattern.matcher(currResult).matches()) { currResult = currResult.substring(0, currResult.length() - 1); } else if (cleaner4Pattern.matcher(currResult).matches()) { currResult = currResult.substring(0, currResult.length() - 1); } } currResult = removeOuterBrackets(currResult); if (currResult.length() == 0) return currResult; } while (!currResult.equals(prevResult)); // if (!currResult.equals(origStr)) // System.out.println(origStr + " -> "+ currResult); return currResult; } /** * Remove single square bracket characters if they are the start and/or end * chars (matched or unmatched) and are the only square bracket chars in the * string. */ private static String removeOuterBrackets(String origStr) { if (origStr == null || origStr.length() == 0) return origStr; String result = origStr.trim(); if (result.length() > 0) { boolean openBracketFirst = result.charAt(0) == '['; boolean closeBracketLast = result.endsWith("]"); if (openBracketFirst && closeBracketLast && result.indexOf('[', 1) == -1 && result.lastIndexOf(']', result.length() - 2) == -1) // only square brackets are at beginning and end result = result.substring(1, result.length() - 1); else if (openBracketFirst && result.indexOf(']') == -1) // starts with '[' but no ']'; remove open bracket result = result.substring(1); else if (closeBracketLast && result.indexOf('[') == -1) // ends with ']' but no '['; remove close bracket result = result.substring(0, result.length() - 1); } return result.trim(); } /** * Remap a field value. If the field value is not present in the map, then: if * "displayRawIfMissing" is a key in the map, then the raw field value is * used. if "displayRawIfMissing" is not a key in the map, and the * allowDefault param is set to true, then if the map contains "__DEFAULT" as * a key, the value of "__DEFAULT" in the map is used; if allowDefault is true * and there is neither "displayRawIfMissing" nor "__DEFAULT", as a key in the * map, then if the map contains an empty key, the map value of the empty key * is used. NOTE: If the spec for a field is supposed to contain all matching * values, then the default lookup needs to be done here. If the spec for a * field is only supposed to return the first matching mappable value, then * the default mapping should be done in the calling method * * @param fieldVal * - the raw value to be mapped * @param map * - the map to be used * @param allowDefault * - if "displayRawIfMissing" is not a key in the map, and this is to * true, then if the map contains "__DEFAULT" as a key, the value of * "__DEFAULT" in the map is used. * @return the new value, as determined by the mapping. */ static String remap(String fieldVal, Map<String, String> map, boolean allowDefault) { String result = null; if (map.keySet().contains("pattern_0")) { for (int i = 0; i < map.keySet().size(); i++) { String patternStr = map.get("pattern_" + i); String parts[] = patternStr.split("=>"); if (containsMatch(fieldVal, parts[0])) { String newVal = parts[1]; if (parts[1].contains("$")) { newVal = fieldVal.replaceAll(parts[0], parts[1]); fieldVal = newVal; } result = newVal; } } } if (map.containsKey(fieldVal)) { result = map.get(fieldVal); } else if (map.containsKey("displayRawIfMissing")) { result = fieldVal; } else if (allowDefault && map.containsKey("__DEFAULT")) { result = map.get("__DEFAULT"); } else if (allowDefault && map.containsKey("")) { result = map.get(""); } if (result == null || result.length() == 0) return null; return result; } private static boolean containsMatch(String val, String pattern) { String rep = val.replaceFirst(pattern, "###match###"); if (!rep.equals(val)) { return true; } return false; } }
/** * Object for setting up playback of audio clips with {@link AudioButton} and * controls. Only one clip can be played at once so when a clip is * played from a view or from the `play` method any currently playing audio will stop. * <p> * Clips are identified using a `clipID` which enables the playback state of clips to survive * configuration changes etc. Two views should not use the same `clipID` unless they are intended * to have the same playback state i.e. when one is played the other also appears to be playing. * This allows for different controls to play the same file but not appear to all be playing at once. * <p> * An {@link AudioHelper} instance is designed to live at an {@link android.app.Activity} level. * However, the underlying implementation uses a {@link androidx.lifecycle.ViewModel} so it is safe to * construct multiple instances (within a {@link android.view.View} or * {@link androidx.fragment.app.Fragment} for instance) if needed within one * {@link android.app.Activity}. * * @deprecated wrapping the ViewModel like this doesn't really fit with other ways we've integrated * widgets with "external" services. Instead of this widgets should talk to {@link AudioPlayer} * and the Activity/Fragment components should talk to the ViewModel itself. */ @Deprecated public class AudioHelper { private final LifecycleOwner lifecycleOwner; private final AudioClipViewModel viewModel; public AudioHelper(FragmentActivity activity, LifecycleOwner lifecycleOwner, Scheduler scheduler, Supplier<MediaPlayer> mediaPlayerFactory) { this.lifecycleOwner = lifecycleOwner; AudioClipViewModel.Factory factory = new AudioClipViewModel.Factory(mediaPlayerFactory, scheduler); viewModel = new ViewModelProvider(activity, factory).get(AudioClipViewModel.class); registerLifecycleCallbacks(activity, lifecycleOwner); } /** * @param button The control being used for playback * @param clip The clip to be played * @return A {@link LiveData} value representing whether this clip is playing or not */ public LiveData<Boolean> setAudio(AudioButton button, Clip clip) { AudioClipViewModel viewModel = this.viewModel; LiveData<Boolean> isPlaying = viewModel.isPlaying(clip.getClipID()); isPlaying.observe(lifecycleOwner, button::setPlaying); button.setListener(new AudioButtonListener(viewModel, clip.getURI(), clip.getClipID())); return isPlaying; } public void play(Clip clip) { viewModel.play(clip); } public void playInOrder(List<Clip> clips) { viewModel.playInOrder(clips); } public void stop() { viewModel.stop(); } public LiveData<Exception> getError() { return viewModel.getError(); } public void errorDisplayed() { viewModel.errorDisplayed(); } private void registerLifecycleCallbacks(FragmentActivity activity, LifecycleOwner lifecycleOwner) { activity.getLifecycle().addObserver(new BackgroundObserver(viewModel)); lifecycleOwner.getLifecycle().addObserver(new BackgroundObserver(viewModel)); } private static class AudioButtonListener implements AudioButton.Listener { private final AudioClipViewModel viewModel; private final String uri; private final String buttonID; AudioButtonListener(AudioClipViewModel viewModel, String uri, String buttonID) { this.viewModel = viewModel; this.uri = uri; this.buttonID = buttonID; } @Override public void onPlayClicked() { viewModel.play(new Clip(buttonID, uri)); } @Override public void onStopClicked() { viewModel.stop(); } } private static class BackgroundObserver implements LifecycleObserver { private final AudioClipViewModel viewModel; BackgroundObserver(AudioClipViewModel viewModel) { this.viewModel = viewModel; } @OnLifecycleEvent(Lifecycle.Event.ON_PAUSE) void onPause() { viewModel.background(); } } }
#include <bits/stdc++.h> using namespace std; int main() { int t; cin>>t; while (t--) { int n; cin>>n; pair<int, int> ans; int x=0,y =0; int temp = sqrt(n); if(temp*temp==n) { ans = {temp, 1}; } else { int prev_sqr = temp; temp = temp+1; int diff = (temp*temp-prev_sqr*prev_sqr)/2; int num_diff = (temp*temp) - n; if (num_diff<diff) { // x = 1+num_diff; // y = temp; x = temp; y = 1+num_diff; // cout<<x<<" "<<y<<"\n"; ans = {x, y}; } else if (num_diff==diff) { x = temp, y = temp; ans = {x, y}; } else { y = temp-(num_diff-diff); x = temp; ans = {y, x}; } } cout<<ans.first<<" "<<ans.second<<"\n"; } return 0; }
<gh_stars>0 package model; import java.util.Arrays; import java.util.Collection; import java.util.List; import javax.persistence.Table; import javax.validation.constraints.NotNull; import org.hibernate.validator.constraints.*; import org.openntf.domino.*; import util.JSFUtil; import com.ibm.commons.util.StringUtil; import com.ibm.xsp.model.TabularDataModel; import frostillicus.xsp.bean.ApplicationScoped; import frostillicus.xsp.bean.ManagedBean; import frostillicus.xsp.model.domino.AbstractDominoManager; import frostillicus.xsp.model.domino.AbstractDominoModel; import frostillicus.xsp.model.domino.DominoModelList; import frostillicus.xsp.util.FrameworkUtils; @Table(name="Task") public class Task extends AbstractDominoModel { private static final long serialVersionUID = 1L; @NotEmpty String summary; @NotNull TimeFrame timeFrame; TaskType type; List<String> tags; java.sql.Date due; TaskStatus status; @Override public void initFromDatabase(final Database database) { super.initFromDatabase(database); setValueImmediate("CreatedBy", database.getParent().getEffectiveUserName()); setValueImmediate("Status", "Open"); } @Override protected Collection<String> richTextFields() { return Arrays.asList(new String[] { "Body" }); } @Override protected Collection<String> authorsFields() { return Arrays.asList(new String[] { "Assignee" }); } public Project getProject() { String projectId = (String)getValue("ProjectID"); if(projectId == null || projectId.isEmpty()) { return null; } return (Project)Project.Manager.get().getValue(projectId); } public Client getClient() { String clientId = (String)getValue("ClientID"); if(clientId == null || clientId.isEmpty()) { return null; } return (Client)Client.Manager.get().getValue(clientId); } public Task getParentTask() { String parentId = (String)getValue("ParentID"); if(StringUtil.isEmpty(parentId)) { return null; } return (Task)Task.Manager.get().getValue(parentId); } public static enum TimeFrame { Normal, Rush, Urgent } public static enum TaskType { Normal, Question } public static enum TaskStatus { Open, InProgress, SubmittedToClient, Closed, Canceled, OnHold } @ManagedBean(name="Tasks") @ApplicationScoped public static class Manager extends AbstractDominoManager<Task> { private static final long serialVersionUID = 1L; public static Manager get() { Task.Manager instance = (Task.Manager)FrameworkUtils.resolveVariable(Task.Manager.class.getAnnotation(ManagedBean.class).name()); return instance == null ? new Task.Manager() : instance; } @Override protected Database getDatabase() { return JSFUtil.getDataDatabase(); } @Override protected String getViewPrefix() { return "Tasks\\"; } @Override public DominoModelList<Task> getNamedCollection(final String name, final String category) { if("mine".equalsIgnoreCase(name)) { return getNamedCollection("Active by Assignee", FrameworkUtils.getUserName()); } else if("clientPending".equalsIgnoreCase(name)) { DominoModelList<Task> tasks = getNamedCollection("Active", null); tasks.search(Client.Manager.get().getClientQuery()); tasks.setResortOrder("Due", TabularDataModel.SORT_ASCENDING); return tasks; } else if("clientClosed".equalsIgnoreCase(name)) { DominoModelList<Task> tasks = getNamedCollection("Closed", null); tasks.search(Client.Manager.get().getClientQuery()); tasks.setResortOrder("$NoDateSorter", TabularDataModel.SORT_ASCENDING); return tasks; } else if("unassigned".equalsIgnoreCase(name)) { return getNamedCollection("Active by Assignee", ""); } return super.getNamedCollection(name, category); } } }
/** * Ignores the class field (i.e. getClass) which should not be copied. * * @author Ian Rae * */ public class DefaultFieldFilter implements FieldFilter { @Override public boolean shouldProcess(Class<?> clazz, String fieldName) { if ("class".equals(fieldName)) { return false; // No point in trying to set an object's class } return true; } }
#!/usr/bin/python # Generate peer list. # usage: ./genlist.py [filename] [number_of_hosts] import sys import json startport = 8000 def main(): if len(sys.argv) != 3: print "usage:", sys.argv[0], "[filename] [number_of_hosts]" return filename = sys.argv[1] n = int(sys.argv[2]) f = open(filename, "w+") peers = [] for i in range (0, int(n)): peer = "localhost:%d" % (startport+i) peers.append(peer) f.write(json.dumps(peers)) f.close() if __name__ == "__main__": main()
{-# LANGUAGE FunctionalDependencies #-} {-# LANGUAGE MultiParamTypeClasses #-} {-| 'Snap.Extension.Heist' exports the 'MonadHeist' interface which allows you to integrate Heist templates into your Snap application. The interface's operations are 'heistServe', 'heistServeSingle', 'heistLocal' and 'render'. 'Snap.Extension.Heist.Heist' contains the only implementation of this interface and can be used to turn your application's monad into a 'MonadHeist'. 'MonadHeist' is unusual among Snap extensions in that it's a multi-parameter typeclass. The last parameter is your application's monad, and the first is the monad you want the 'TemplateState' to use. This is usually, but not always, also your application's monad. -} module Snap.Extension.Heist (MonadHeist(..)) where import Control.Applicative import Data.ByteString (ByteString) import Snap.Types import Text.Templating.Heist ------------------------------------------------------------------------------ -- | The 'MonadHeist' type class. Minimal complete definition: 'render', -- 'heistLocal'. class (Monad n, MonadSnap m) => MonadHeist n m | m -> n where -- | Renders a template as text\/html. If the given template is not found, -- this returns 'empty'. render :: ByteString -> m () -- | Runs an action with a modified 'TemplateState'. You might want to use -- this if you had a set of splices which were customised for a specific -- action. To do that you would do: -- -- > heistLocal (bindSplices mySplices) $ render "myTemplate" heistLocal :: (TemplateState n -> TemplateState n) -> m a -> m a -- | Analogous to 'fileServe'. If the template specified in the request -- path is not found, it returns 'empty'. heistServe :: m () heistServe = fmap rqPathInfo getRequest >>= render -- | Analogous to 'fileServeSingle'. If the given template is not found, -- this throws an error. heistServeSingle :: ByteString -> m () heistServeSingle t = render t <|> error ("Template " ++ show t ++ " not found.")
export class Settings { }
<reponame>nite-coder/blackbear-demo package grpc import ( "github.com/nite-coder/blackbear-demo/internal/pkg/config" internalGRPC "github.com/nite-coder/blackbear-demo/internal/pkg/grpc" "github.com/nite-coder/blackbear-demo/pkg/wallet/proto" "github.com/nite-coder/blackbear/pkg/log" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" ) func NewClient(cfg config.Configuration) (proto.WalletServiceClient, error) { conn, err := grpc.Dial(cfg.Wallet.GRPCAdvertiseAddr, grpc.WithInsecure(), grpc.WithKeepaliveParams(keepalive.ClientParameters{ Time: 5, Timeout: 5, PermitWithoutStream: true, }), grpc.WithChainUnaryInterceptor( otelgrpc.UnaryClientInterceptor(), internalGRPC.ClientInterceptor(), ), grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()), ) if err != nil { log.Err(err).Errorf("grpc: dial to wallet server failed. connection string: %s", cfg.Wallet.GRPCAdvertiseAddr) return nil, err } log.Info("grpc: dail to wallet server connect successfully") client := proto.NewWalletServiceClient(conn) return client, nil }
<filename>test/plugins/org.talend.repository.view.di.test/src/main/java/org/talend/repository/view/di/viewer/content/AbstractContentProviderTest.java // ============================================================================ // // Copyright (C) 2006-2021 Talend Inc. - www.talend.com // // This source code is available under agreement available at // %InstallDIR%\features\org.talend.rcp.branding.%PRODUCTNAME%\%PRODUCTNAME%license.txt // // You should have received a copy of the agreement // along with this program; if not, write to Talend SA // 9 rue Pages 92150 Suresnes, France // // ============================================================================ package org.talend.repository.view.di.viewer.content; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import org.eclipse.jface.viewers.ITreeContentProvider; import org.eclipse.ui.navigator.INavigatorContentService; import org.eclipse.ui.navigator.NavigatorContentServiceFactory; import org.junit.AfterClass; import org.junit.BeforeClass; import org.talend.core.model.utils.RepositoryManagerHelper; import org.talend.repository.ui.views.IRepositoryView; /** * DOC ggu class global comment. Detailled comment * * this content provider only test the root type of node so far. * * make sure, the node can be display via CNF in repository view. */ public abstract class AbstractContentProviderTest { static INavigatorContentService navContentService; static ITreeContentProvider contentProvider; @BeforeClass public static void setup() { IRepositoryView repoView = RepositoryManagerHelper.findRepositoryView(); if (repoView == null) { return; } assertNotNull("Must open the DI repository view to test", repoView); assertEquals("Must test in DI repository view", IRepositoryView.VIEW_ID, repoView.getViewSite().getId()); navContentService = NavigatorContentServiceFactory.INSTANCE.createContentService(IRepositoryView.VIEW_ID, repoView.getViewer()); assertNotNull(navContentService); contentProvider = navContentService.createCommonContentProvider(); assertNotNull(contentProvider); } @AfterClass public static void clean() { if (contentProvider != null) { contentProvider.dispose(); contentProvider = null; } if (navContentService != null) { navContentService.dispose(); navContentService = null; } } }
<reponame>RobWalt/rustgym<gh_stars>100-1000 struct Solution; impl Solution { fn character_replacement(s: String, k: i32) -> i32 { let mut count = vec![0; 26]; let n = s.len(); let s: Vec<char> = s.chars().collect(); let mut start = 0; let mut end = 0; let mut res = 0; while end < n { if Self::sum(&count) <= k { count[(s[end] as u8 - b'A') as usize] += 1; end += 1; } else { count[(s[start] as u8 - b'A') as usize] -= 1; start += 1; } if Self::sum(&count) <= k { res = res.max(end - start); } } res as i32 } fn sum(count: &[i32]) -> i32 { let max = count.iter().copied().max().unwrap(); count.iter().sum::<i32>() - max } } #[test] fn test() { let s = "ABAB".to_string(); let k = 2; let res = 4; assert_eq!(Solution::character_replacement(s, k), res); let s = "AABABBA".to_string(); let k = 1; let res = 4; assert_eq!(Solution::character_replacement(s, k), res); }
import React from 'react'; import _get from 'lodash/get' import { uid } from 'react-uid'; import Line from './Line'; import Wire from './Wire'; const Connections = ({ connections, canvas, ...props }) => { if (canvas.render.internalRender) return connections.map((wire) => { return ( <Wire key={uid(wire)} wrapper={wire} zoom={canvas.render.zoom} offset={canvas.render.offset} /> ); }); return connections.map((wire) => { const [sourcePort, targetPort] = wire.getControlPoints(); if (sourcePort && targetPort) return ( <Line key={uid(wire)} start={_get(sourcePort, '_el', sourcePort)} end={_get(targetPort, '_el', targetPort)} size={4 * canvas.render.zoom} onLoad={(line) => { const { svg } = line.getProps(); wire.setupInstance(svg); wire.custom = !canvas.render.internalRender; if (typeof(props.onLoad) === 'function') props.onLoad(line, svg); }} /> ); return null; }); } export default Connections;
<reponame>phuongnamvu/sourcegraph<filename>browser/src/libs/cli/search.ts<gh_stars>1-10 import { buildSearchURLQuery } from '../../../../shared/src/util/url' import { storage } from '../../browser/storage' import { createSuggestionFetcher } from '../../shared/backend/search' import { sourcegraphUrl } from '../../shared/util/context' const isURL = /^https?:\/\// class SearchCommand { public description = 'Enter a search query' private suggestionFetcher = createSuggestionFetcher(20) private prev: { query: string; suggestions: browser.omnibox.SuggestResult[] } = { query: '', suggestions: [] } public getSuggestions = (query: string): Promise<browser.omnibox.SuggestResult[]> => new Promise(resolve => { if (this.prev.query === query) { resolve(this.prev.suggestions) return } this.suggestionFetcher({ query, handler: suggestions => { const built = suggestions.map(({ title, url, urlLabel }) => ({ content: `${sourcegraphUrl}${url}`, description: `${title} - ${urlLabel}`, })) this.prev = { query, suggestions: built, } resolve(built) }, }) }) public action = async (query: string, disposition?: string): Promise<void> => { const { sourcegraphURL: url } = await storage.sync.get() const props = { url: isURL.test(query) ? query : `${url}/search?${buildSearchURLQuery(query)}&utm_source=omnibox`, } switch (disposition) { case 'newForegroundTab': await browser.tabs.create(props) break case 'newBackgroundTab': await browser.tabs.create({ ...props, active: false }) break case 'currentTab': default: await browser.tabs.update(props) break } } } export default new SearchCommand()
/** * Create a Properties object from a map containing key:value pairs. * * @param properties * The map containing key:value pairs. * @return A newly-created Properties object containing the key:value pairs. */ public static Properties fromMap(Map<String, String> properties) { Properties res = new Properties(); for (Map.Entry<String, String> entry : properties.entrySet()) { Property property = new Property(); property.setName(entry.getKey()); property.setValue(entry.getValue()); res.getProperties().add(property); } return res; }
Book Review: Using statistics to understand the environment second edition. One of the main strengths of the volume is the detailed and thorough nature of most entries. Each entry is written by a contributor (identified by initials) with expertise in the relevant field; and with 95 different contributors, the volume succeeds in providing accurate and scholarly explanations for a wide range of terms found within physical geography. While simpler terms are defined briefly, more complicated terms are explained in detail, often with the help of diagrams and photographs. In addition simply to providing definitions of key terms, many entries also give some background to the development and use of a particular term within the context of geographical and environmental research. Longer entries are nearly always referenced back to original sources and often include suggestions for further reading that aid in exploring concepts and ideas in greater depth. There is also abundant cross-referencing of entries within the dictionary (related entries being indicated by small capitals within the text), making it possible to see how different terms inter-relate when studying a specific concept or process. In comparison with the previous edition, this edition contains more information about the mapping techniques and information technology currently being used by physical geographers. For example, ‘global positioning system’ (GPS) and ‘thematic mapper’ (TM) feature as new entries, and the entries for ‘geographic information system’ (GIS) and ‘general circulation modelling’ (GCMs) have been expanded and updated. Several other new entries reflect increased research during the 1990s into the earth’s long-term climatic history, especially over the Quaternary period. Some relevant new entries include ‘Dansgaard–Oeschger (D–O) events’, ‘ice core’, ‘Bond cycles’, ‘Heinrich events’ and ‘Ocean Drilling Programme’ (ODP). However, in connection with climatic changes inferred from ice cores and ocean sediments it is unfortunate that processes such as the thermohaline circulation and North Atlantic Deep Water formation do not have entries. There is also an entry for GRIP (Greenland Ice-Core Project) but no entry for the equally important GISP (Greenland Ice Sheet Project). These are small criticisms, however, and it is important to remember that a wide-ranging volume of this type must be selective and is therefore bound to lack some of the entries that specialists in various fields would like to see. In terms of production and layout, the third edition retains the high standard set by the previous editions, and the switch to Arial font for entry headings gives it a neater appearance. My only concern is that the ‘Abbreviations in physical geography’ section has been removed from the third edition. While many of the listed abbreviations in the second edition now feature as entries in the third edition’s dictionary, I think it would still have been useful to have a separate list for quick reference. With so many different contributors, the dictionary is a remarkable collaborative effort and a valuable resource. It is accessible enough for use in secondary schools, and invaluable for students pursuing an undergraduate geography degree. Professionals and researchers working within most branches of the environmental sciences will also find it useful, especially as a first port of call for tracking down the original sources of important terms and concepts. It is a very worthwhile purchase for anyone studying or teaching physical geography.
def is_vxstream(self): valid_sandbox_paths = ['cuckoo', 'vxstream', 'wildfire'] if self.path.endswith('.json') and any(sandbox.lower() in self.path.lower() for sandbox in valid_sandbox_paths): if all(self.path.lower().rfind('vxstream') >= self.path.lower().rfind(sandbox) for sandbox in valid_sandbox_paths): return True else: return False
/** * checks the SRManifest file to work out if the non-patient files (i.e. those that don't get split * by organisation) are deltas or bulks. If there's a mix it will return true. */ private boolean areNonSplitFilesDeltas(String sourceTempDir, List<File> filesToNotSplit) throws Exception { Map<String, Boolean> hmManifestContents = new HashMap<>(); String orgFilePath = FilenameUtils.concat(sourceTempDir, TppConstants.MANIFEST_FILE); File f = new File(orgFilePath); if (!f.exists()) { throw new Exception("Failed to find " + TppConstants.MANIFEST_FILE + " in " + sourceTempDir); } List<ManifestRecord> records = ManifestRecord.readManifestFile(f); for (ManifestRecord record : records) { hmManifestContents.put(record.getFileNameWithExtension(), new Boolean(record.isDelta())); } boolean containsDelta = false; for (File fileToNotSplit : filesToNotSplit) { String fileName = fileToNotSplit.getName(); if (fileName.equals(TppConstants.MANIFEST_FILE) || fileName.equals(TppConstants.MAPPING_FILE) || fileName.equals(TppConstants.MAPPING_GROUP_FILE)) { continue; } Boolean isDelta = hmManifestContents.get(fileName); if (isDelta == null) { throw new Exception("Failed to find file " + fileToNotSplit + " in SRManifest in " + sourceTempDir); } if (isDelta.booleanValue()) { containsDelta = true; break; } } return containsDelta; }
// Visualize returns a JSON visualization func (i *Item) Visualize() string { v, err := json.Marshal(i) if err != nil { return err.Error() } return string(v) }
// Copyright 2022 lucasbbb. All rights reserved. // Use of this source code is governed by an Apache 2.0 // license that can be found in the LICENSE file. // Package jwa defines the names of all the algorithms which can be used by // JSON Web Signature (JWS), JSON Web Encryption (JWE), and JSON Web Key (JWK). // https://www.rfc-editor.org/rfc/rfc7518#section-1 package jwa
# -*- coding: utf-8 -*- from pandas import DataFrame from .rsi import rsi from pandas_ta.overlap import sma from pandas_ta.utils import get_offset, non_zero_range, verify_series def stochrsi(close, length=None, rsi_length=None, k=None, d=None, offset=None, **kwargs): """Indicator: Stochastic RSI Oscillator (STOCHRSI)""" # Validate arguments close = verify_series(close) length = length if length and length > 0 else 14 rsi_length = rsi_length if rsi_length and rsi_length > 0 else 14 k = k if k and k > 0 else 3 d = d if d and d > 0 else 3 offset = get_offset(offset) # Calculate Result rsi_ = rsi(close, length=rsi_length) lowest_rsi = rsi_.rolling(length).min() highest_rsi = rsi_.rolling(length).max() stoch = 100 * (rsi_ - lowest_rsi) stoch /= non_zero_range(highest_rsi, lowest_rsi) stochrsi_k = sma(stoch, length=k) stochrsi_d = sma(stochrsi_k, length=d) # Offset if offset != 0: stochrsi_k = stochrsi_k.shift(offset) stochrsi_d = stochrsi_d.shift(offset) # Handle fills if "fillna" in kwargs: stochrsi_k.fillna(kwargs["fillna"], inplace=True) stochrsi_d.fillna(kwargs["fillna"], inplace=True) if "fill_method" in kwargs: stochrsi_k.fillna(method=kwargs["fill_method"], inplace=True) stochrsi_d.fillna(method=kwargs["fill_method"], inplace=True) # Name and Categorize it _name = "STOCHRSI" _props = f"_{length}_{rsi_length}_{k}_{d}" stochrsi_k.name = f"{_name}k{_props}" stochrsi_d.name = f"{_name}d{_props}" stochrsi_k.category = stochrsi_d.category = "momentum" # Prepare DataFrame to return data = {stochrsi_k.name: stochrsi_k, stochrsi_d.name: stochrsi_d} df = DataFrame(data) df.name = f"{_name}{_props}" df.category = stochrsi_k.category return df stochrsi.__doc__ = \ """Stochastic (STOCHRSI) "Stochastic RSI and Dynamic Momentum Index" was created by <NAME> and <NAME> and published in Stock & Commodities V.11:5 (189-199) It is a range-bound oscillator with two lines moving between 0 and 100. The first line (%K) displays the current RSI in relation to the period's high/low range. The second line (%D) is a Simple Moving Average of the %K line. The most common choices are a 14 period %K and a 3 period SMA for %D. Sources: https://www.tradingview.com/wiki/Stochastic_(STOCH) Calculation: Default Inputs: length=14, rsi_length=14, k=3, d=3 RSI = Relative Strength Index SMA = Simple Moving Average RSI = RSI(high, low, close, rsi_length) LL = lowest RSI for last rsi_length periods HH = highest RSI for last rsi_length periods STOCHRSI = 100 * (RSI - LL) / (HH - LL) STOCHRSIk = SMA(STOCHRSI, k) STOCHRSId = SMA(STOCHRSIk, d) Args: high (pd.Series): Series of 'high's low (pd.Series): Series of 'low's close (pd.Series): Series of 'close's length (int): The STOCHRSI period. Default: 14 rsi_length (int): RSI period. Default: 14 k (int): The Fast %K period. Default: 3 d (int): The Slow %K period. Default: 3 offset (int): How many periods to offset the result. Default: 0 Kwargs: fillna (value, optional): pd.DataFrame.fillna(value) fill_method (value, optional): Type of fill method Returns: pd.DataFrame: RSI %K, RSI %D columns. """
<filename>qtclustering-dpct/libdata.h #ifndef _LIBDATA_H_ #define _LIBDATA_H_ #include <CL/sycl.hpp> #include <dpct/dpct.hpp> float *generate_synthetic_data(float **rslt_mtrx, int **indr_mtrx, int *max_degree, float threshold, int N, int type); #endif
"""Utilities for filesystem access.""" import shutil def rmtree(path: str) -> None: """Remove a filesystem tree. Ignore failures if the tree doesn't exist.""" try: shutil.rmtree(path) except FileNotFoundError: pass
Computation of eigenmodes on a compact hyperbolic 3-space Measurements of cosmic microwave background (CMB) anisotropy are ideal experiments for discovering the non-trivial global topology of the universe. To evaluate the CMB anisotropy in multiply-connected compact cosmological models, one needs to compute the eigenmodes of the Laplace-Beltrami operator. Using the direct boundary element method, we numerically obtain the low-lying eigenmodes on a compact hyperbolic 3-space called the Thurston manifold which is the second smallest in the known compact hyperbolic 3-manifolds. The computed eigenmodes are expanded in terms of eigenmodes on the unit three-dimensional pseudosphere. We numerically find that the expansion coefficients behave as Gaussian pseudo-random numbers for low-lying eigenmodes. The observed gaussianity in the CMB fluctuations can partially be attributed to the Gaussian pseudo-randomness of the expansion coefficients assuming that the Gaussian pseudo-randomness is the universal property of the compact hyperbolic spaces. Introduction In recent years, there has been a great interest in properties of CMB anisotropy in multiply-connected cosmological models . Most of these studies deal with flat models or non-compact hyperbolic models for which the eigenmodes are known explicitly. Since no closed analytic expression of eigenmodes is known for compact hyperbolic (CH) models, so far, analysis of the CMB anisotropy in CH models has been considered to be quite difficult although they have interesting properties which are strikingly different from that of multiply-connected flat models. For instance, in low Ω o adiabatic models, the large-angular fluctuations can be produced at periods after the last scattering as the curvature perturbations decay in the curvature dominant era. Therefore, the argument of the suppression of the large-angular fluctuations due to the "mode-cutoff" in the multiply-connected flat models cannot simply be applicable to the multiply-connected hyperbolic models. Because the effect of the multiply-connectedness becomes significant as the volume of the space becomes small, it is very important to study whether the"small" universe scenario is plausible. For instance, the Weeks manifold and the Thurston manifold have volume∼R 3 where R denotes the curvature radius, and they are the smallest and the second smallest compact hyperbolic manifolds, respectively. Computing the eigenmodes of the Laplace-Beltrami operator in CH spaces 1 It should be noted that the computation of eigenmodes is also essential in the framework of spectral geometry . (manifolds) is equivalent to solving the Helmholtz equation with appropriate periodic boundary conditions in the universal covering space. A number of numerical methods have been used for solving the Helmholtz equation such as the finite element methods and the finite difference methods . A numerical method called the "direct boundary element method" (DBEM) has been used by Aurich and Steiner for finding out the eigenmodes of the Laplace-Beltrami operator in a two-dimensional compact multiply-connected space for studying the statistical properties of the eigenmodes in highly-excited states or equivalently the semi-classical wavefunctions . We find that this pioneering work for "quantum chaology" , the study of the imprints of classical chaos in the quantum mechanical counterparts is very useful for the study of the CMB anisotropy in CH cosmological models as well. The advantage of the DBEM is that it reduces the dimensionality of the problem by one which leads to economy in the numerical task. Since one needs to discretize only the boundary, generation of meshes is much easier than the other methods. Furthermore, as we shall see later, the DBEM is suitable for expanding the eigenfunctions that are continued onto the As the classical dynamical systems in CH spaces are strongly chaotic, one can naturally assume that the imprint of the classical chaos is hidden in the corresponding quantum systems in some way. It has been found that the expansion coefficients with a certain basis behave as if they are random Gaussian numbers in some classically chaotic systems ( ), which is consistent with the prediction of random-matrix theory( ). Since the CMB temperature fluctuations in CH spaces are written in terms of expansion coefficients and the eigenfunctions on the universal covering space plus initial fluctuations, if the random behavior of the expansion coefficients is confirmed, the origin of the random gaussianity in the CMB temperature fluctuations can be partially explained in terms of the geometric property of the universe. In this paper, we introduce the DBEM for solving the Helmholtz equation. The direct boundary element method (DBEM) The boundary element methods (BEM) use free Green's function as the weighted function, and the Helmholtz equation is rewritten as an integral equation defined on the boundary using Green's theorem. Discretization of the boundary integral equation yields a system of linear equations. Since one needs the discretiztion on only the boundary, BEM reduces the dimensionality of the problem by one which leads to economy in the numerical task. To locate an eigenvalue, the DBEM 3 requires one to compute many determinants of the corresponding boundary matrices which are dependent on the wavenumber k. Firstly, let us consider the Helmholtz equation with certain boundary conditions, which is defined on a bounded M-dimensional connected and simply-connected domain Ω which is a subspace of a M-dimensional Riemannian manifold M and the boundary ∂Ω is piecewise smooth. ∇ 2 ≡ ∇ i ∇ i , (i = 1, 2, · · ·, M), and ∇ i is the 3 The DBEM uses only boundary points in evaluating the integrand in Eq. (5). The indirect methods use internal points in evaluating the integrand in Eq.(5) as well as the boundary points. where v is an arbitrary function in Sobolev space H 1 (Ω) called weighted function and is defined as Next, we put u(x) into the form where φ j 's are linearly independent square-integrable functions. Numerical methods such as the finite element methods try to minimize the residue function R for a fixed weighted function v(x) by changing the coefficients u j . In these methods, one must resort to the variational principle to find the u j 's which minimize R. Now we formulate the DBEM which is a version of BEMs. Here we search u(x)'s for the space C 1 (Ω) ∩ C 2 (Ω) ∩ L 2 (Ω). First, we slightly modify Eq.(2) using the Green's theorem where g ≡ det{g ij } and dV ≡ dx 1 . . . dx M ; the surface element dS i is given by where ǫ j 1 ···j M +1 denotes the M+1-dimensional Levi-Civita tensor. Then Eq.(2) be- As the weighted function v, we choose the fundamental solution G E (x, y) which where E ≡ k 2 , and δ D (x − y) is Dirac's delta function. G E (x, y) is also known as the free Green's function whose boundary condition is given as where d(x, y) is the geodesic distance between x and y. Let y be an internal point of Ω. Then we obtain from Eq.(7) and Eq. (8), Thus the values of eigenfunctions at internal points can be computed using only the boundary integral. If y ∈ ∂Ω, we have to evaluate the limit of the boundary integral terms as G E (x, y) becomes divergent at x = y (see appendix A). The boundary integral equation is finally written as or in another form, where n i ≡ dS i /dS and dS ≡ √ dS i dS i . Note that we assumed that the boundary surface at y is sufficiently smooth. If the boundary is not smooth, one must calculate the internal solid angle at y (see appendix A). Another approach is to rewrite Eq.(10) in a regularized form . We see from Eq. (11) or Eq.(12) that the approximated solutions can be obtained without resorting to the variational principle. Since it is virtually impossible to solve Eq.(12) analytically, we discretize it using boundary elements. Let the number of the elements be N. We approximate u by some low-order polynomials (shape function) on each element as u = c 1 + c 2 η + c 3 ξ where η and ξ denote the coordinates on the corresponding standard element 4 . Then we have the following equation: includes both u and q, the boundary element method can naturally incorporate the periodic boundary conditions: where g i 's are the face-to-face identification maps defined on the boundary(see appendix B). The boundary conditions constrain the number of unknown constants to N. Application of the boundary condition (14) to Eq.(13) and permutation of the columns of the components yields where N×N-dimensional matrix A is constructed from G ij and H ij and N-dimensional vector x is constructed from u i 's and q i 's. For the presence of the non-trivial solution, the following relation must hold, Thus the eigenvalues of the Laplace-Beltrami operator acting on the space C 1 (Ω) ∩ C 2 (Ω) ∩ L 2 (Ω) are obtained by searching for k's which satisfy Eq.(16). Computation of low-lying eigenmodes In this section, we apply the DBEM for computing the low-lying eigenmodes on the Thurston manifold Q 2 . We have chosen Q 2 for a technical reason that the fundamental domain of Q 2 is much simpler than that of the Weeks manifold Q 1 (generation of meshes is much simpler). See appendix B for understanding the basic aspects of three-dimensional hyperbolic geometry. The Helmholtz equation in the Poincaré coordinates is written as where ∆ E and ∇ E are the Laplacian and the gradient on the corresponding threedimensional Euclidean space, respectively. Note that we have set the curvature radius R = 1 without loss of generality. By using the DBEM, the Helmholtz equation (17) is converted to an integral representation on the boundary. Here Eq.(12) can be written in terms of Euclidean quantities as where dS = 2(1 − |x| 2 ) −1 dS E . The fundamental solution is given as G k (x, y) = − 1 4π where s = √ 1 − k 2 and σ = cosh d(x, y). Then Eq. (18) is discretized on the boundary elements Γ J as (20) where N denotes the number of the boundary elements. An example of N = 1168 elements on the boundary of the fundamental domain in the Poincaré coordinates is shown in figure 1. These elements are firstly generated in Klein coordinates in which the mesh-generation is convenient. The maximum length of the edge ∆l in these elements is 0.14. The condition that the corresponding de Broglie wavelength 2π/k is longer than the four times of the interval of the boundary elements yields Figure 1: 1168 boundary elements a rough estimate of the validity condition of the calculation as k < 11. On each Γ J , u and q ≡ ∂u/∂n are approximated by low order polynomials. For simplicity, we use constant elements: Substituting Eq. (21) into Eq. (20), we obtain The singular integration must be carried out for I-I components as the fundamental solution diverges at (x I = y I ). This is not an intractable problem. Several numerical techniques have already been proposed by some authors . We have applied Hayami's method to the evaluation of the singular integrals . Introducing coordinates similar to spherical coordinates centered at x I , the singularity is canceled out by the Jacobian which makes the integral regular. Let g i (i = 1, 2, . . . , 8) be the generators of the discrete group Γ which identify a boundary face F i with another boundary face g i (F i ): The boundary of the fundamental domain can be divided into two regions ∂Ω A and ∂Ω B and each of them consists of N/2 boundary elements, The periodic boundary conditions reduce the number of the independent variables to N, i.e. for all x B ∈ ∂Ω B , there Substituting the above relation into Eq. (22), we obtain where u A = (u 1 , u 2 , . . . u N/2 ) and q A = (q 1 , q 2 , . . . q N/2 ) and matrices H = {H IJ } and G = {G IJ } are written as Eq. (27) takes the form where N×N-dimensional matrix A is constructed from G and H and N-dimensional vector x is constructed from u A and q A . For the presence of the non-trivial solution, the following relation must hold, Thus the eigenvalues of the Laplace-Beltrami operator in a CH space are obtained by searching for k's which satisfy Eq. (30). In practice, Eq.(30) cannot be exactly satisfied as the test function which has a locally polynomial behavior is slightly deviated from the exact eigenfunction. Instead, one must search for the local minima of det . This process needs long computation time as A(k) depends on k implicitly. Our numerical result (k < 10) is shown in table 1. The first "excited state" which corresponds to k = k 1 is important for the understanding of CMB anisotropy. Our numerical result k 1 = 5.41 is consistent with the value 5.04 obtained from Weyl's asymptotic formula where U and V are unitary matrices and D is a diagonal matrix. If D ii in D is almost zero then the complex conjugate of the i-th row in V is an approximated so- The numerical accuracy of the obtained eigenvalues is roughly estimated as follows. First, let us write the obtained numerical solution in terms of the exact solution as k = k 0 + δk and u k (x) = u k 0 (x) + δu k (x), where k 0 and u k 0 (x) are the exact eigenvalue and eigenfunction, respectively. The singular decomposition method enables us to find the best approximated solution which satisfies where ǫ is a N-dimensional vector and | | denotes the Euclidean norm. It is expected that the better approximation gives the smaller |ǫ|. Then Eq. (33) can be written as, Ignoring the terms in second order, Eq.(34) is reduced to Since it is not unlikely that (∆ + k 2 0 )δu k (x) is anticorrelated to 2kδku k (x), we obtain the following relation by averaging over y J , where <> denotes the averaging over y J Thus one can estimate the expected deviation of the calculated eigenvalue |δk| from u k (x) and ǫ(y J ). We numerically find that |δk| = 0.005 for k = 5.41 and |δk| = 0.01 for k = 9.91. The other deviation values lie in between 0.005 and 0.01. By computing the second derivatives, one can also estimate the accuracy of the computed eigenfunctions. The accuracy parameter err is defined as where u k (x) is normalized (O(u k (x)) ∼ 1). We see from figure Statistical properties of eigenmodes Properties of eigenmodes of the Laplace-Beltrami operator are determined by the Helmholtz equation. Therefore, at first glance it does not seem to make a sense to study the statistical properties of the eigenmodes. However, if one recognizes the Laplace-Beltrami operator in a CH space as the Hamiltonian in a quantum mechanical system, each eigenmode can be interpreted as a wavefunction in an eigenstate. Since the corresponding classical system is known to be a typical chaotic system (K-system), it is natural to assume that the imprints of classical chaos is hidden in the corresponding quantum system. Recent studies have demonstrated that some of the statistical properties of energy spectrum are in accordance with the universal prediction of random-matrix theory(RMT) . RMT also predicts that the squared expansion coefficients |a i | 2 of an eigenstate with respect to a generic basis are Gaussian distributed . In the limit N → ∞, x = |a i | 2 obeys the statistics given for three universality classes of the orthogonal (GOE, µ = 1), unitary (GUE, µ = 2) and symplectic (GSE, µ = 4) ensembles, each distribution function P is given by In our case, as the time-reversal symmetry of the Hamiltonian implies, one expects that |a i | 2 obeys the GOE prediction. In order to apply the GOE prediction to the statistical properties of eigenstates on CH spaces, one needs to find a set of orthonormal basis but no closed analytic expression is known for any CH spaces. To avoid the problem, Aurich and Steiner noticed that the wavefunctions on the hyperbolic octagons can be continued onto the universal covering space H 2 , and eigenstates can be expanded in terms of circular-waves . They numerically found that the squared expansion coefficients obeys the GOE prediction in highly excited states of a hyperbolic asymmetrical octagon model. We extend their method to three-dimensional CH models where we consider only low-lying modes. First, we normalize the obtained 14 eigenfunctions on the Thurston manifold. The eigenfunctions are naturally continued onto the whole unit Poincaré ball by the periodic boundary condition. As a "generic basis", we consider a set of orthonormal eigenfunctions Q νlm (T-complete functions) on the unit pseudosphere which is isometric to the Poincaré ball, where P , Y lm and Γ denote the associated Legendre function, the spherical harmonics and gamma function, respectively. P can be written in terms of the hypergeometric function 2 F 1 , Eigenfunctions u ν can be expanded in terms of Q νlm 's as Note that each u ν has no components with ν ′ = ν because Q νlm 's are complete and linearly independent. At first glance the computation of ξ νlm in Eq. (41) seems cumbersome as the domain of the integration extends over the whole pseudosphere. Fortunately, one can obtain ξ νlm by evaluating two-dimensional integrals. ξ νlm can be written as with radius χ o . In order to compute the values of eigenfunctions on the sphere with radius longer From this formula, in the case of the Thurston manifold, n 1 ∼ 29 if χ o = 1.6. Because the sphere intersects the fundamental domain at random, the copies of the fundamental domain on the sphere stick out their half portions on average. Therefore, the approximate number n 2 of the copies that intersect the sphere is given by where r ave = (r + + r − )/2. This estimate gives n 2 ∼ 120 if χ o = 1.6. Approximating each eigenmode by de Broglie waves, we obtain the corresponding fluctuation scale δA in steradian on the sphere, Thus correlation patterns are observed in pairs of patches with typical size δA. Next, we extract a set of independent variables from ξ νlm 's. In general, any where If u ν is real, from Eq. (47), therefore, Thus ξ νl−m can be written in terms of ξ νlm . To extract a set of independent variables from ξ νlm 's, we rewrite Eq.(49) as follows where and Thus the real eigenfuctions can be written in terms of real independent coefficients a νlm and real-valued R νlm , where a ν00 = −Im(ξ ν00 ), a νl0 = √ c νl Re(ξ νl0 ), l > 0, and R ν00 = Im(Q ν00 ), Now we turn to the statistical properties of the coefficients a νlm . As in , we consider the cumulative distribution of following quantities, whereā ν is the mean of a νlm 's and σ 2 ν is the variance. The cumulative distribution is compared to the cumulative RMT distribution functions which are directly derived from Eq.(38), where γ(x, y) is the incomplete gamma function. To test the goodness of fit between the computed cumulative distribution function and that predicted by RMT, we use Kolmogorov-Smirnov statistic D N which is the least upper bound of all pointwise differences |I N (x) − I(x)| , where I N (x) is the empirical cumulative distribution function defined by where y 1 < y 2 < . . . < y N are the computed values of a random sample which consists of N elements. If I N (x) is "close" to I(x), the observed D N must be so small that it falls within the range of possible fluctuations which depend on the size of the random sample. For the random variable D N for any z > 0, it can be shown that the probability of D N < d is given by lim N →∞ where the pure quantum mechanical behavior 6 . Nevertheless, our numerical results serve to strengthen the hypothesis that the expansion coefficients behave as Gaussian pseudo-random numbers even for low-lying modes. Table 3: The Kolmogolov-Smirnov statistics D N and the significance levels α D for the test of the hypothesis I N (x) = I(x) and their averages. N = 121 for 0 ≤ l ≤ 10 and N = 361 for 0 ≤ l ≤ 18. The mode k = 9.26 is degenerated into two modes, which (after orthogonalization)are denoted by k = 9.26a and k = 9.26b. Next we examine the randomness of a νlm 's. Because a νlm 's are actually determined by the Helmholtz equation, it is appropriate to describe a νlm 's as pseudorandom numbers. We apply the run test for testing randomness (see ). Suppose that we have n observations of the random variable X and m obser- or r > c 2 . As the Kolmogolov-Smirnov test, α r is given by the observed r. The run numbers r and the significant levels α r are shown in table 4. High significant levels are again observed except for the one at k = 8.73 for 0 ≤ l ≤ 18. As the corresponding r is larger than the averaged value, this may be due to the cyclic effect. On the whole, it is concluded that a νlm 's behave as if they are random variables. Table 4: The run numbers r and the significance levels α r for the test of the hypothesis that a νlm 's are random variables. N = 121 for 0 ≤ l ≤ 10 and N = 361 for 0 ≤ l ≤ 18. The mode k = 9.26 is degenerated into two modes, which (after orthogonalization)are denoted by k = 9.26a and k = 9.26b. Summary In this paper, we have demonstrated that the DBEM is eminently suitable for computing eigenmodes on CH spaces and we obtain some low-lying eigenmodes on a CH space called Thurston manifold which is the second smallest in the known CH manifolds and we have studied the statistical properties of these eigenmodes. The low-lying eigenmodes are expanded in terms of eigenmodes on the pseudosphere, and we find that the expansion coefficients behave as if they are Gaussian random numbers. Why are they so random even for low-lying modes? It should be pointed out that the randomness of the expansion coefficients for low-lying eigenmodes is not the property of the eigenmodes themselves but rather the property of the images of eigenmodes on the whole universal covering space, since the fluctuation scales for low-lying eigenmodes are comparable to the the size of the fundamental domain. We conjecture that the origin of the random behavior of eigenmodes comes from the almost randomly distributed images of a set of points in the universal covering space. Computation of eigenmodes is essential in simulating the CMB in CH cosmological models. As the DBEM needs only a set of face-to-face identification maps and the discretization of the corresponding fundamental domain, it can be applied to other CH spaces straightforwardly. However, the computation of the modes with small fluctuation scale k >> 1 is still a difficult task as the number of modes increases as N ∝ k 3 . Nevertheless, the contribution of the modes with small fluctuation to the temperature correlation of CMB can be estimated by assuming that the expansion coefficients for excited states (k >> 1) also behave as Gaussian pseudo-random numbers as well as that for low-lying modes. The assumption is numerically confirmed in a two-dimensional CH model . If the observed Gaussian pseudo-randomness is found to be the universal behavior in CH spaces for low-lying modes as well as excited modes, the origin of the gaussianity of the CMB fluctuations can be partially explained. This is because the amplitude of the CMB fluctuation is written in terms of: A Boundary integral equation Here, we derive the boundary integral equation (11) in section 1. For simplicity, we prove the formula in 3-spaces. First, we start with Eq.(10) with dimesnsion M = 3. Although the integrand in Eq.(10) is divergent at x = y ∈ ∂Ω, the integration can be regularized as follows. Let us draw a sphere with center y ∈ ∂Ω with small radius ǫ and let Γ ǫ be the outer spherical boundary and α and β be the internal solid angle and external solid angle as shown in figure 7, The singular terms in Eq.(63) can be separated from non-singular terms as lim ǫ→0 ∂Γ+Γǫ If ǫ is sufficiently small, the region enclosed by Γ ǫ can be approximated as an Euclidean subspace. In this region, the asymptotic form of the free Green's function where d is the Euclidean distance between x and y. Taking the spherical coordinates (ǫ, θ, φ) with center y, the singular terms in Eq.(64) are estimated as where α(y) denotes the internal solid angle at y. If the boundary is smooth at y, α(y) is equal to 2π which gives the coefficients 1/2 in Eq. (11). Similarly, one can prove the formula for M = 2 and M > 3. B Three-dimensional hyperbolic spaces The discrete subgroup Γ of P SL(2, C) which is the orientation-preserving isometry group of the simply-connected hyperbolic 3-space H 3 is called the Kleinian group. Any CH space (either manifold or orbifold) can be described as compact quotients M = H 3 /Γ. The classification of the Kleinian group has not been completed. However, several procedures for constructing compact hyperbolic spaces are known. For further detail, see . The standard pseudospherical coordinates (χ, θ, φ) for H 3 with curvature radius R are given by In these coordinates, the line element takes the form The Poincaré representation is obtained by the transformation The line element in these coordinates takes the form and the geodesic distance d between x and x ′ is given by . where | | denotes the Euclidean norm andx = R −1 x,x ′ = R −1 x ′ . Note that geodesics in the Poincaré ball are either diameters or semi-circles which orthogonally intersect with the boundary of the Poincaré ball. In Poincaré coordinates, the metric is conformally flat so that the computation of the boundary integral equation becomes simpler. Another commonly used set of coordinates is obtained from the upper-half space representation which is defined by the transformation which maps H 3 into the upper-half space E 3 + = {(y 1 , y 2 , y 3 ) ∈ E 3 | y 3 > 0}. In these coordinates, the line element takes the form The geodesic distance is given by In the upper-half space model, geodesics are either straight vertical lines or semicircles orthogonal to the boundary of the upper-half space. In this coordinates the metric is conformally flat as in Poincaré coordinates. If we represent a point p on the upper-half space, as a quaternion whose fourth component equals zero , then the actions of P SL(2, C) on H 3 ∪ C ∪ {∞} can be described by simple formulas, where a, b, c and d are complex numbers and 1, i and j are represented by matrices as, As p = z − y 3 j, the actionγ is explicitly written as If we restrict the actionγ on C ∪ {∞}, or equivalently, y 3 = 0, the action is described as γ is called the Möbius transformation, andγ is called the extended Möbius transformation. In the Klein (projective) model, the geodesics and planes are mapped into their Euclidean counterparts. Since the fundamental domain is enclosed by Euclidean planes in the Klein coordinates, the task of generating meshes is much easier than other coordinates. The transformation can be understood as the projection of the hyperboloid (X 0 , X 1 , X 2 , X 3 ) onto the interior of the sphere (R, z 1 , z 2 , z 3 ) along lines originating from the origin (0,0,0,0). The geodesic distance can be represented as . The possible values for the volume of the CH manifolds are bounded below and no upper bound exists. The minimal value has not yet been known, although Gabai et al have proved that V ol min > 0.16668...R 3 . Thurston proposed a manifold Q 2 as a candidate for the three-dimensional hyperbolic manifold of the minimum volume V ol(Q 2 ) = 0.98139R 3 . However, Weeks and independently, Matveev and Fomenko discovered a CH manifold Q 1 with the smallest value V ol(Q 1 ) = 0.94272R 3 in the known CH manifolds and it is conjectured to be the one with the minimum volume. A computer program "SnapPea" by Jeff Weeks has made it possible to catalog and study a large number of CH and non-CH spaces which include Q 1 ,Q 2 and thousands of cusped and non-cusped hyperbolic 3-manifolds. Let us see how CH manifolds are characterized in the SnapPea. Any element of the discrete isometry group Γ which is equivalent to the fundamental group π 1 (M) can be described as a word which is a product of generators {g 1 , . . . , g s }, g = g n 1 m 1 . . . g n j m j , (j, n j ∈ Z, m j = 1, . . . , s), g ∈ Γ. (83) The above expression is not unique, since they are subject to a set of relations, each of which takes the form, where I denotes the identity. Note that different expression of g is possible by choosing different generators. In the case of Thurston's manifold Q 2 , Γ has a simple presentation, where a and b are generators and words in the parenthesis are equal to identities. This representation is simple for describing Γ but not convenient for describing the fundamental domain. Choosing a coordinate system centered at a point of locally maximum of the injectivity radius 7 , generators which define the face identification maps in the pseudospherical coordinates can be described by 8 matrices (see appendix C), which implies that the number of the faces on the boundary of the fundamental domain is sixteen. For instance, the center (X 0 , X 1 , X 2 , X C Table of matrices In the Minkowski coordinates (t, x, y, z), the 8 generators which define the fundamental domain of the Thurston manifold with the basepoint (1, 0, 0, 0) are described by the following 8 matrices,
<filename>ezaad/constants.py<gh_stars>0 SCHEMA_URI_APP_USER = 'urn:demo-app:params:scim:schemas:extension:ezaad:2.0:User'
from types import IntType from palm.util import multichoose from palm.state_collection import StateCollectionFactory class SingleDarkState(object): """ A macrostate for a BlinkModel with one dark microstate. The available microstates are `I`, `A`, `D`, and `B`. Attributes ---------- initial_state_flag : bool This flag is used by BlinkModel when creating an initial probability vector. Expected to be true only for the macrostate in which `I` is the only microstate with nonzero population. Parameters ---------- id_str : string A label that is used to identify to this macrostate. I,A,D,B : int The populations of the respective microstates. observation_class : string The aggregated class to which this macrostate belongs. """ def __init__(self, id_str, I, A, D, B, observation_class): self.id = id_str self.I = I self.A = A self.D = D self.B = B self.observation_class = observation_class self.initial_state_flag = False def __str__(self): return "%s %s" % (self.id, self.observation_class) def as_array(self): return numpy.array([self.I, self.A, self.D, self.B]) def get_id(self): return self.id def get_class(self): return self.observation_class def is_initial_state(self): return self.initial_state_flag def set_initial_state_flag(self): self.initial_state_flag = True def as_dict(self): return {'observation_class':self.get_class(), 'I':self.I, 'A':self.A, 'D':self.D, 'B':self.B} class DoubleDarkState(object): """ A macrostate for a BlinkModel with one dark microstate. The available microstates are `I`, `A`, `D1`, `D2`, and `B`. Attributes ---------- initial_state_flag : bool This flag is used by BlinkModel when creating an initial probability vector. Expected to be true only for the macrostate in which `I` is the only microstate with nonzero population. Parameters ---------- id_str : string A label that is used to identify to this macrostate. I,A,D1,D2,B : int The populations of the respective microstates. observation_class : string The aggregated class to which this macrostate belongs. """ def __init__(self, id_str, I, A, D1, D2, B, observation_class): self.id = id_str self.I = I self.A = A self.D1 = D1 self.D2 = D2 self.B = B self.observation_class = observation_class self.initial_state_flag = False def __str__(self): return "%s %s" % (self.id, self.observation_class) def as_array(self): return numpy.array([self.I, self.A, self.D1, self.D2, self.B]) def get_id(self): return self.id def get_class(self): return self.observation_class def is_initial_state(self): return self.initial_state_flag def set_initial_state_flag(self): self.initial_state_flag = True def as_dict(self): return {'observation_class':self.get_class(), 'I':self.I, 'A':self.A, 'D1':self.D1, 'D2':self.D2, 'B':self.B} class SingleDarkStateEnumeratorFactory(object): """ Creates a state enumerator for a BlinkModel with one dark state. Attributes ---------- num_microstates : int Parameters ---------- N : int The total number of fluorophores. state_factory : class Factory class for State objects. max_A : int Number of fluorophores that can be simultaneously active. """ def __init__(self, N, state_factory=SingleDarkState, max_A=5): assert type(N) is IntType self.N = N self.state_factory = state_factory self.max_A = max_A self.num_microstates = len(['I', 'A', 'D', 'B']) def create_state_enumerator(self): """ Creates a method that builds a StateCollection, made up of all possible macrostates in the model, subject to the constraint that no states with `A` > `max_A` are allowed. Returns ------- enumerate_states : callable f() A method that builds a StateCollection. """ def enumerate_states(): """ Builds a StateCollection for a model with one dark state. No states with `A` > `max_A` are allowed. Returns ------- state_collection : StateCollection The allowed macrostates for the model. initial_state_id, final_state_id : string The identifier strings for the states where a time trace is expected to start and finish, respectively. """ sc_factory = StateCollectionFactory() for this_count_list in multichoose(self.num_microstates, self.N): I = this_count_list[0] A = this_count_list[1] D = this_count_list[2] B = this_count_list[3] if A > self.max_A: continue else: if A > 0: obs_class = 'bright' else: obs_class = 'dark' id_str = "%d_%d_%d_%d" % (I, A, D, B) this_state = self.state_factory(id_str, I, A, D, B, obs_class) if I == self.N: initial_state_id = this_state.get_id() elif B == self.N: final_state_id = this_state.get_id() else: pass sc_factory.add_state(this_state) state_collection = sc_factory.make_state_collection() return state_collection, initial_state_id, final_state_id return enumerate_states class DoubleDarkStateEnumeratorFactory(object): """ Creates a state enumerator for a BlinkModel with two dark states. Attributes ---------- num_microstates : int Parameters ---------- N : int The total number of fluorophores. state_factory : class Factory class for State objects. max_A : int Number of fluorophores that can be simultaneously active. """ def __init__(self, N, state_factory=DoubleDarkState, max_A=5): assert type(N) is IntType self.N = N self.state_factory = state_factory self.max_A = max_A self.num_microstates = len(['I', 'A', 'D1', 'D2', 'B']) def create_state_enumerator(self): """ Creates a method that builds a StateCollection, made up of all possible macrostates in the model, subject to the constraint that no states with `A` > `max_A` are allowed. Returns ------- enumerate_states : callable f() A method that builds a StateCollection. """ def enumerate_states(): """ Builds a StateCollection for a model with one dark state. No states with `A` > `max_A` are allowed. Returns ------- state_collection : StateCollection The allowed macrostates for the model. initial_state_id, final_state_id : string The identifier strings for the states where a time trace is expected to start and finish, respectively. """ sc_factory = StateCollectionFactory() for this_count_list in multichoose(self.num_microstates, self.N): I = this_count_list[0] A = this_count_list[1] D1 = this_count_list[2] D2 = this_count_list[3] B = this_count_list[4] if A > self.max_A: continue else: if A > 0: obs_class = 'bright' else: obs_class = 'dark' id_str = "%d_%d_%d_%d_%d" % (I, A, D1, D2, B) this_state = self.state_factory(id_str, I, A, D1, D2, B, obs_class) if I == self.N: initial_state_id = this_state.get_id() elif B == self.N: final_state_id = this_state.get_id() else: pass sc_factory.add_state(this_state) state_collection = sc_factory.make_state_collection() return state_collection, initial_state_id, final_state_id return enumerate_states
<gh_stars>0 /** * 使用alarm和pause函数来实现sleep进程休眠功能 * * 但是这种简单实现有3个问题: * 1. * 如果在调用MySleep之前,已经设置了闹钟,则它被MySleep中的第一次alarm调用擦除。 * 可用下列方法更正:检查第一次调用alarm的返回值,如果值小于本次调用alarm的参数值,则只应等到已有的闹钟超时。 * 如果之前奢姿的闹钟超时时间晚于本次设置值,则在MySleep函数返回之前,重置此闹钟,使其在之前闹钟的设定时间再次发生超时 * 2. * 程序修改了对SIGALRM的配置。如果编写了一个函数供其他函数调用,则在该函数被调用时要先保存原配置,在该函数返回前再恢复原配置。 * 更正这一点的方法是:保存signal函数的返回值,在返回前重置原配置。 * 3. * 第一次调用alarm和pause之间有一个竞争条件。在一个繁忙的系统中,可能alarm在调用pause之前超时,并调用了信号处理程序。 * 如果发生了这种情况,则在调用pause后,如果没有捕捉到其他信号,调用者将被永远挂起。 */ #include "./../lib/apue.h" static void SigAlrm(int); int MySleep(int seconds) { printf("Sleep %d seconds...\n", seconds); if (signal(SIGALRM, SigAlrm) == SIG_ERR) { return seconds; } printf("Set alarm\n"); alarm(seconds); /** * pause()函数使调用进程挂起直至捕捉到一个信号 * 只有执行了一个信号处理程序并从其返回时,pause才返回,在这种情况下,pause返回-1,error设置为EINTR */ pause(); printf("Alarm end\n"); // alarm(0)可以取消闹钟,并返回剩余时间 return alarm(0); } int main(int argc, const char** argv) { printf("Run begin time %ld\n", time(0)); MySleep(3); printf("Run end time %ld\n", time(0)); return 0; } static void SigAlrm(int sig_num) { // 不做任何事情,只是单纯调用触发pause函数 printf("Run SigAlarm...\n"); }
// solve A x = b for each partition // b will be overwritten by x void LMatrix::solve (LMatrix& b, LMatrix& V, Context ctx, HighLevelRuntime* runtime, bool wait) { assert( this->rows() == b.rows() && this->rows() == V.rows() ); assert( b.cols() > 0 ); assert( b.num_partition() == nPart ); LogicalPartition APart = this->logical_partition(); LogicalPartition bPart = b.logical_partition(); LogicalPartition VPart = V.logical_partition(); LogicalRegion ARegion = this->logical_region(); LogicalRegion bRegion = b.logical_region(); LogicalRegion VRegion = V.logical_region(); Domain domain = this->color_domain(); LeafSolveTask::TaskArgs args = {this->rblock, b.cols(), V.cols(), V.small_block_parts()}; TaskArgument tArg(&args, sizeof(args)); LeafSolveTask launcher(domain, tArg, ArgumentMap(), nPart); RegionRequirement AReq(APart, 0, READ_ONLY, EXCLUSIVE, ARegion); RegionRequirement bReq(bPart, 0, READ_WRITE, EXCLUSIVE, bRegion); RegionRequirement VReq(VPart, 0, READ_ONLY, EXCLUSIVE, VRegion); AReq.add_field(FIELDID_V); bReq.add_field(FIELDID_V); VReq.add_field(FIELDID_V); launcher.add_region_requirement(AReq); launcher.add_region_requirement(bReq); launcher.add_region_requirement(VReq); FutureMap fm = runtime->execute_index_space(ctx, launcher); if(wait) { log_solver_tasks.print("Wait for leaf solve..."); fm.wait_all_results(); log_solver_tasks.print("Done for leaf solve..."); } }
Message Modeling for the Joint Architecture for Unmanned Systems (JAUS) The Joint Architecture for Unmanned Systems (JAUS) is a standard for sensing, control, and computational communication of components for unmanned systems. This paper presents a modeling environment capable of producing a domain-specific prototype of the software necessary for inter-computer communications. A metamodel is used to provide the domain-specific modeling language to model both the messages used in JAUS, and the shell interfaces for components that transmit and receive those messages. The produced artifacts are C and C++ code that can be used in unmanned systems and simulations of such systems, including tests that validate the structure and behavior of the generated code. The generated code is compatible with standard JAUS implementations, and is validated using the Open JAUS open source API and framework. Future work describes the second spiral of features and behaviors (currently in the design phase). The case study and test environment for the software generated by this project is an autonomous ground vehicle, modeled on a Ford Escape Hybrid that is used in laboratory experiments.
class SpellCheckWordlist { public activate(): void { console.log("activated!"); } public provideSpellCheck() { return require.resolve("./Plugin"); } } module.exports = new SpellCheckWordlist();
import requests from bs4 import BeautifulSoup import pandas as pd import psycopg2 import random import datetime # method for connecting and saving to the database def store(title, price, image): sql = """INSERT INTO data_frame(title, price, image_url, created_at) VALUES(%s, %s, %s, %s);""" conn = None try: # connecting to the database conn = psycopg2.connect(dbname='sneakerdb', user='postgres', password = None) # create a new cursor cur = conn.cursor() # execute the INSERT statement cur.execute(sql, (title, price, image, datetime.datetime.now())) # commit the changes to the database cur.connection.commit() # close communication with the database cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() # url to scrape data shoe_store_url = 'https://www.obeezi.com/sneakers?product_list_limit=120' #Load html's plain data into a variable plain_html_text = requests.get(shoe_store_url) #parse the data soup = BeautifulSoup(plain_html_text.text, 'lxml') sneaker_container_divs = soup.find_all('div', class_='product-item-info', limit=5) # Lists to store the scraped data in sneakers_big_data = [] for sneaker_div in sneaker_container_divs: mini_tuple = tuple() mini_list = list(mini_tuple) # sneaker's name mini_list.insert(0, sneaker_div.strong.a.text.replace('\n', '')) # sneaker's price mini_list.insert(1, sneaker_div.find('span', class_='price').get_text()) # sneaker's image_url mini_list.insert(2, sneaker_div.find('img', class_='product-image-photo')['src']) new_data = tuple(mini_list) sneakers_big_data.append(new_data) for sneaker_data in sneakers_big_data: #store items into the database store(sneaker_data[0], sneaker_data[1], sneaker_data[2]) """optional: store data in pandas data frame""" # test_df = pd.DataFrame( # { # 'sneaker': sneaker_titles, # 'image_url': sneaker_images, # 'price': sneaker_prices, # }) # print(test_df.info()) #test_df
// GetPeer returns the controlled peer ID. func (c *Controller) GetPeer(ctx context.Context) (peer.Peer, error) { select { case <-ctx.Done(): return nil, ctx.Err() case tpt := <-c.peerCh: c.peerCh <- tpt return tpt, nil } }
<reponame>liupan126/base-framework // Copyright 2018 cloudy <EMAIL>. All rights reserved. // Use of this source code is governed by a MIT style // license that can be found in the LICENSE file. package tools import ( "fmt" "testing" ) func TestIntToStr(t *testing.T) { var a int a = 10 aStr := fmt.Sprintf("%d", a) result := IntToStr(a) if aStr != result { t.Errorf("TestIntToStr failed to achieve the expected function,%d excepted %s ,got %s", a, aStr, result) } } func TestStrToInt64(t *testing.T) { var aStr string aStr = "10" var aInt64 int64 = 10 result := StrToInt64(aStr) if aInt64 != result { t.Errorf("TestIntToStr failed to achieve the expected function,%s excepted %d ,got %d", aStr, aInt64, result) } } func TestBytesToInt64(t *testing.T) { var aStr string aStr = "10" var aInt64 int64 = 10 result := BytesToInt64([]byte(aStr)) if aInt64 != result { t.Errorf("TestIntToStr failed to achieve the expected function,%s excepted %d ,got %d", aStr, aInt64, result) } } func TestStrToUint64(t *testing.T) { var aStr string aStr = "10" var aInt64 uint64 = 10 result := StrToUint64(aStr) if aInt64 != result { t.Errorf("TestIntToStr failed to achieve the expected function,%s excepted %d ,got %d", aStr, aInt64, result) } aStr = "-10" result = StrToUint64(aStr) if aInt64 == result { t.Errorf("TestIntToStr failed to achieve the expected function,%s excepted %d ,got %d", aStr, 0, result) } } func TestFloat64ToStr(t *testing.T) { var aFloat float64 = 12.33 except := fmt.Sprintf("%f0000000", aFloat) result := Float64ToStr(aFloat) if except != result { t.Errorf("TestIntToStr failed to achieve the expected function,%f excepted %s ,got %s", aFloat, except, result) } } func TestStrToInt(t *testing.T) { var aStr string aStr = "10" var aInt int = 10 result := StrToInt(aStr) if aInt != result { t.Errorf("TestIntToStr failed to achieve the expected function,%s excepted %d ,got %d", aStr, aInt, result) } aStr = "234qwe" result = StrToInt(aStr) if aInt == result { t.Errorf("TestIntToStr failed to achieve the expected function,%s excepted %d ,got %d", aStr, 0, result) } }
array = sorted([int(x) for x in input().split(" ")]) first_num = array[-1]-array[2] second_num = array[-1]-array[1] third_num = array[-1]-array[0] print(first_num, second_num, third_num)
package model_test import ( "testing" "github.com/kyma-incubator/compass/components/director/internal/model" "github.com/stretchr/testify/require" ) func TestValidateLabelDef(t *testing.T) { t.Run("valid input when schema not provided", func(t *testing.T) { // GIVEN in := model.LabelDefinition{ID: "id", Key: "key", Tenant: "tenant"} // WHEN err := in.Validate() // THEN require.NoError(t, err) }) t.Run("valid input when correct schema provided", func(t *testing.T) { // TODO t.SkipNow() }) t.Run("id is required", func(t *testing.T) { // GIVEN in := model.LabelDefinition{} // WHEN err := in.Validate() // THEN require.EqualError(t, err, "missing ID field") }) t.Run("key is required", func(t *testing.T) { // GIVEN in := model.LabelDefinition{ID: "id", Tenant: "tenant"} // WHEN err := in.Validate() // THEN require.EqualError(t, err, "missing Key field") }) t.Run("tenant is required", func(t *testing.T) { // GIVEN in := model.LabelDefinition{ID: "id", Key: "key"} // WHEN err := in.Validate() // THEN require.EqualError(t, err, "missing Tenant field") }) t.Run("invalid schema", func(t *testing.T) { // TODO t.SkipNow() // GIVEN var sch interface{} = "anything" in := model.LabelDefinition{ID: "id", Key: "key", Tenant: "tenant", Schema: &sch} // WHEN err := in.Validate() // THEN require.EqualError(t, err, "xxx") }) t.Run("valid scenarios definition update", func(t *testing.T) { // GIVEN var schema interface{} = model.ScenariosSchema in := model.LabelDefinition{ID: "id", Key: model.ScenariosKey, Tenant: "tenant", Schema: &schema} // WHEN err := in.Validate() // THEN require.NoError(t, err) }) t.Run("invalid scenarios definition update", func(t *testing.T) { // GIVEN var sch interface{} = map[string]interface{}{"test": "test"} in := model.LabelDefinition{ID: "id", Key: model.ScenariosKey, Tenant: "tenant", Schema: &sch} // WHEN err := in.Validate() // THEN require.Error(t, err) }) t.Run("scenarios definition update when schema is nil", func(t *testing.T) { // GIVEN in := model.LabelDefinition{ID: "id", Key: model.ScenariosKey, Tenant: "tenant", Schema: nil} // WHEN err := in.Validate() // THEN require.Error(t, err) }) } func TestValidateForUpdateLabelDef(t *testing.T) { t.Run("valid input when schema not provided", func(t *testing.T) { // GIVEN in := model.LabelDefinition{Key: "key", Tenant: "tenant"} // WHEN err := in.ValidateForUpdate() // THEN require.NoError(t, err) }) t.Run("valid input when correct schema provided", func(t *testing.T) { // TODO t.SkipNow() }) t.Run("key is required", func(t *testing.T) { // GIVEN in := model.LabelDefinition{Tenant: "tenant"} // WHEN err := in.ValidateForUpdate() // THEN require.EqualError(t, err, "missing Key field") }) t.Run("tenant is required", func(t *testing.T) { // GIVEN in := model.LabelDefinition{Key: "key"} // WHEN err := in.ValidateForUpdate() // THEN require.EqualError(t, err, "missing Tenant field") }) t.Run("invalid schema", func(t *testing.T) { // TODO t.SkipNow() // GIVEN var sch interface{} = "anything" in := model.LabelDefinition{Key: "key", Tenant: "tenant", Schema: &sch} // WHEN err := in.ValidateForUpdate() // THEN require.EqualError(t, err, "xxx") }) t.Run("valid scenarios definition update", func(t *testing.T) { // GIVEN var schema interface{} = model.ScenariosSchema in := model.LabelDefinition{ID: "id", Key: model.ScenariosKey, Tenant: "tenant", Schema: &schema} // WHEN err := in.ValidateForUpdate() // THEN require.NoError(t, err) }) t.Run("invalid scenarios definition update", func(t *testing.T) { // GIVEN var sch interface{} = map[string]interface{}{"test": "test"} in := model.LabelDefinition{ID: "id", Key: model.ScenariosKey, Tenant: "tenant", Schema: &sch} // WHEN err := in.ValidateForUpdate() // THEN require.Error(t, err) }) t.Run("scenarios definition update when schema is nil", func(t *testing.T) { // GIVEN in := model.LabelDefinition{ID: "id", Key: model.ScenariosKey, Tenant: "tenant", Schema: nil} // WHEN err := in.ValidateForUpdate() // THEN require.Error(t, err) }) }
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package resolver import ( "bytes" "context" "encoding/binary" "errors" "fmt" "io" "io/ioutil" "math/rand" "net" "net/http" "runtime" "sort" "strconv" "strings" "sync" "time" dns "golang.org/x/net/dns/dnsmessage" "inet.af/netaddr" "tailscale.com/hostinfo" "tailscale.com/net/netns" "tailscale.com/types/dnstype" "tailscale.com/types/logger" "tailscale.com/util/dnsname" "tailscale.com/wgengine/monitor" ) // headerBytes is the number of bytes in a DNS message header. const headerBytes = 12 const ( // responseTimeout is the maximal amount of time to wait for a DNS response. responseTimeout = 5 * time.Second // dohTransportTimeout is how long to keep idle HTTP // connections open to DNS-over-HTTPs servers. This is pretty // arbitrary. dohTransportTimeout = 30 * time.Second // wellKnownHostBackupDelay is how long to artificially delay upstream // DNS queries to the "fallback" DNS server IP for a known provider // (e.g. how long to wait to query Google's 8.8.4.4 after 8.8.8.8). wellKnownHostBackupDelay = 200 * time.Millisecond ) var errNoUpstreams = errors.New("upstream nameservers not set") // txid identifies a DNS transaction. // // As the standard DNS Request ID is only 16 bits, we extend it: // the lower 32 bits are the zero-extended bits of the DNS Request ID; // the upper 32 bits are the CRC32 checksum of the first question in the request. // This makes probability of txid collision negligible. type txid uint64 // getTxID computes the txid of the given DNS packet. func getTxID(packet []byte) txid { if len(packet) < headerBytes { return 0 } dnsid := binary.BigEndian.Uint16(packet[0:2]) // Previously, we hashed the question and combined it with the original txid // which was useful when concurrent queries were multiplexed on a single // local source port. We encountered some situations where the DNS server // canonicalizes the question in the response (uppercase converted to // lowercase in this case), which resulted in responses that we couldn't // match to the original request due to hash mismatches. return txid(dnsid) } func getRCode(packet []byte) dns.RCode { if len(packet) < headerBytes { // treat invalid packets as a refusal return dns.RCode(5) } // get bottom 4 bits of 3rd byte return dns.RCode(packet[3] & 0x0F) } // clampEDNSSize attempts to limit the maximum EDNS response size. This is not // an exhaustive solution, instead only easy cases are currently handled in the // interest of speed and reduced complexity. Only OPT records at the very end of // the message with no option codes are addressed. // TODO: handle more situations if we discover that they happen often func clampEDNSSize(packet []byte, maxSize uint16) { // optFixedBytes is the size of an OPT record with no option codes. const optFixedBytes = 11 const edns0Version = 0 if len(packet) < headerBytes+optFixedBytes { return } arCount := binary.BigEndian.Uint16(packet[10:12]) if arCount == 0 { // OPT shows up in an AR, so there must be no OPT return } // https://datatracker.ietf.org/doc/html/rfc6891#section-6.1.2 opt := packet[len(packet)-optFixedBytes:] if opt[0] != 0 { // OPT NAME must be 0 (root domain) return } if dns.Type(binary.BigEndian.Uint16(opt[1:3])) != dns.TypeOPT { // Not an OPT record return } requestedSize := binary.BigEndian.Uint16(opt[3:5]) // Ignore extended RCODE in opt[5] if opt[6] != edns0Version { // Be conservative and don't touch unknown versions. return } // Ignore flags in opt[6:9] if binary.BigEndian.Uint16(opt[9:11]) != 0 { // RDLEN must be 0 (no variable length data). We're at the end of the // packet so this should be 0 anyway).. return } if requestedSize <= maxSize { return } // Clamp the maximum size binary.BigEndian.PutUint16(opt[3:5], maxSize) } type route struct { Suffix dnsname.FQDN Resolvers []resolverAndDelay } // resolverAndDelay is an upstream DNS resolver and a delay for how // long to wait before querying it. type resolverAndDelay struct { // name is the upstream resolver. name dnstype.Resolver // startDelay is an amount to delay this resolver at // start. It's used when, say, there are four Google or // Cloudflare DNS IPs (two IPv4 + two IPv6) and we don't want // to race all four at once. startDelay time.Duration } // forwarder forwards DNS packets to a number of upstream nameservers. type forwarder struct { logf logger.Logf linkMon *monitor.Mon linkSel ForwardLinkSelector dohSem chan struct{} ctx context.Context // good until Close ctxCancel context.CancelFunc // closes ctx // responses is a channel by which responses are returned. responses chan packet mu sync.Mutex // guards following dohClient map[string]*http.Client // urlBase -> client // routes are per-suffix resolvers to use, with // the most specific routes first. routes []route } func init() { rand.Seed(time.Now().UnixNano()) } func maxDoHInFlight(goos string) int { if goos != "ios" { return 1000 // effectively unlimited } // iOS < 15 limits the memory to 15MB for NetworkExtensions. // iOS >= 15 gives us 50MB. // See: https://tailscale.com/blog/go-linker/ ver := hostinfo.GetOSVersion() if ver == "" { // Unknown iOS version, be cautious. return 10 } idx := strings.Index(ver, ".") if idx == -1 { // Unknown iOS version, be cautious. return 10 } major := ver[:idx] if m, err := strconv.Atoi(major); err != nil || m < 15 { return 10 } return 1000 } func newForwarder(logf logger.Logf, responses chan packet, linkMon *monitor.Mon, linkSel ForwardLinkSelector) *forwarder { f := &forwarder{ logf: logger.WithPrefix(logf, "forward: "), linkMon: linkMon, linkSel: linkSel, responses: responses, dohSem: make(chan struct{}, maxDoHInFlight(runtime.GOOS)), } f.ctx, f.ctxCancel = context.WithCancel(context.Background()) return f } func (f *forwarder) Close() error { f.ctxCancel() return nil } // resolversWithDelays maps from a set of DNS server names to a slice of // a type that included a startDelay. So if resolvers contains e.g. four // Google DNS IPs (two IPv4 + twoIPv6), this function partition adds // delays to some. func resolversWithDelays(resolvers []dnstype.Resolver) []resolverAndDelay { type hostAndFam struct { host string // some arbitrary string representing DNS host (currently the DoH base) bits uint8 // either 32 or 128 for IPv4 vs IPv6s address family } // Track how many of each known resolver host are in the list, // per address family. total := map[hostAndFam]int{} rr := make([]resolverAndDelay, len(resolvers)) for _, r := range resolvers { if ip, err := netaddr.ParseIP(r.Addr); err == nil { if host, ok := knownDoH[ip]; ok { total[hostAndFam{host, ip.BitLen()}]++ } } } done := map[hostAndFam]int{} for i, r := range resolvers { var startDelay time.Duration if ip, err := netaddr.ParseIP(r.Addr); err == nil { if host, ok := knownDoH[ip]; ok { key4 := hostAndFam{host, 32} key6 := hostAndFam{host, 128} switch { case ip.Is4(): if done[key4] > 0 { startDelay += wellKnownHostBackupDelay } case ip.Is6(): total4 := total[key4] if total4 >= 2 { // If we have two IPv4 IPs of the same provider // already in the set, delay the IPv6 queries // until halfway through the timeout (so wait // 2.5 seconds). Even the network is IPv6-only, // the DoH dialer will fallback to IPv6 // immediately anyway. startDelay = responseTimeout / 2 } else if total4 == 1 { startDelay += wellKnownHostBackupDelay } if done[key6] > 0 { startDelay += wellKnownHostBackupDelay } } done[hostAndFam{host, ip.BitLen()}]++ } } rr[i] = resolverAndDelay{ name: r, startDelay: startDelay, } } return rr } // setRoutes sets the routes to use for DNS forwarding. It's called by // Resolver.SetConfig on reconfig. // // The memory referenced by routesBySuffix should not be modified. func (f *forwarder) setRoutes(routesBySuffix map[dnsname.FQDN][]dnstype.Resolver) { routes := make([]route, 0, len(routesBySuffix)) for suffix, rs := range routesBySuffix { routes = append(routes, route{ Suffix: suffix, Resolvers: resolversWithDelays(rs), }) } // Sort from longest prefix to shortest. sort.Slice(routes, func(i, j int) bool { return routes[i].Suffix.NumLabels() > routes[j].Suffix.NumLabels() }) f.mu.Lock() defer f.mu.Unlock() f.routes = routes } var stdNetPacketListener packetListener = new(net.ListenConfig) type packetListener interface { ListenPacket(ctx context.Context, network, address string) (net.PacketConn, error) } func (f *forwarder) packetListener(ip netaddr.IP) (packetListener, error) { if f.linkSel == nil || initListenConfig == nil { return stdNetPacketListener, nil } linkName := f.linkSel.PickLink(ip) if linkName == "" { return stdNetPacketListener, nil } lc := new(net.ListenConfig) if err := initListenConfig(lc, f.linkMon, linkName); err != nil { return nil, err } return lc, nil } func (f *forwarder) getKnownDoHClient(ip netaddr.IP) (urlBase string, c *http.Client, ok bool) { urlBase, ok = knownDoH[ip] if !ok { return } f.mu.Lock() defer f.mu.Unlock() if c, ok := f.dohClient[urlBase]; ok { return urlBase, c, true } if f.dohClient == nil { f.dohClient = map[string]*http.Client{} } nsDialer := netns.NewDialer() c = &http.Client{ Transport: &http.Transport{ IdleConnTimeout: dohTransportTimeout, DialContext: func(ctx context.Context, netw, addr string) (net.Conn, error) { if !strings.HasPrefix(netw, "tcp") { return nil, fmt.Errorf("unexpected network %q", netw) } c, err := nsDialer.DialContext(ctx, "tcp", net.JoinHostPort(ip.String(), "443")) // If v4 failed, try an equivalent v6 also in the time remaining. if err != nil && ctx.Err() == nil { if ip6, ok := dohV6(urlBase); ok && ip.Is4() { if c6, err := nsDialer.DialContext(ctx, "tcp", net.JoinHostPort(ip6.String(), "443")); err == nil { return c6, nil } } } return c, err }, }, } f.dohClient[urlBase] = c return urlBase, c, true } const dohType = "application/dns-message" func (f *forwarder) releaseDoHSem() { <-f.dohSem } func (f *forwarder) sendDoH(ctx context.Context, urlBase string, c *http.Client, packet []byte) ([]byte, error) { // Bound the number of HTTP requests in flight. This primarily // matters for iOS where we're very memory constrained and // HTTP requests are heavier on iOS where we don't include // HTTP/2 for binary size reasons (as binaries on iOS linked // with Go code cost memory proportional to the binary size, // for reasons not fully understood). select { case f.dohSem <- struct{}{}: case <-ctx.Done(): return nil, ctx.Err() } defer f.releaseDoHSem() req, err := http.NewRequestWithContext(ctx, "POST", urlBase, bytes.NewReader(packet)) if err != nil { return nil, err } req.Header.Set("Content-Type", dohType) // Note: we don't currently set the Accept header (which is // only a SHOULD in the spec) as iOS doesn't use HTTP/2 and // we'd rather save a few bytes on outgoing requests when // empirically no provider cares about the Accept header's // absence. hres, err := c.Do(req) if err != nil { return nil, err } defer hres.Body.Close() if hres.StatusCode != 200 { return nil, errors.New(hres.Status) } if ct := hres.Header.Get("Content-Type"); ct != dohType { return nil, fmt.Errorf("unexpected response Content-Type %q", ct) } return ioutil.ReadAll(hres.Body) } // send sends packet to dst. It is best effort. // // send expects the reply to have the same txid as txidOut. func (f *forwarder) send(ctx context.Context, fq *forwardQuery, rr resolverAndDelay) ([]byte, error) { if strings.HasPrefix(rr.name.Addr, "http://") { return nil, fmt.Errorf("http:// resolvers not supported yet") } if strings.HasPrefix(rr.name.Addr, "https://") { return nil, fmt.Errorf("https:// resolvers not supported yet") } if strings.HasPrefix(rr.name.Addr, "tls://") { return nil, fmt.Errorf("tls:// resolvers not supported yet") } ipp, err := netaddr.ParseIPPort(rr.name.Addr) if err != nil { return nil, err } // Upgrade known DNS IPs to DoH (DNS-over-HTTPs). // All known DoH is over port 53. if urlBase, dc, ok := f.getKnownDoHClient(ipp.IP()); ok { res, err := f.sendDoH(ctx, urlBase, dc, fq.packet) if err == nil || ctx.Err() != nil { return res, err } f.logf("DoH error from %v: %v", ipp.IP(), err) } ln, err := f.packetListener(ipp.IP()) if err != nil { return nil, err } conn, err := ln.ListenPacket(ctx, "udp", ":0") if err != nil { f.logf("ListenPacket failed: %v", err) return nil, err } defer conn.Close() fq.closeOnCtxDone.Add(conn) defer fq.closeOnCtxDone.Remove(conn) if _, err := conn.WriteTo(fq.packet, ipp.UDPAddr()); err != nil { if err := ctx.Err(); err != nil { return nil, err } return nil, err } // The 1 extra byte is to detect packet truncation. out := make([]byte, maxResponseBytes+1) n, _, err := conn.ReadFrom(out) if err != nil { if err := ctx.Err(); err != nil { return nil, err } if packetWasTruncated(err) { err = nil } else { return nil, err } } truncated := n > maxResponseBytes if truncated { n = maxResponseBytes } if n < headerBytes { f.logf("recv: packet too small (%d bytes)", n) } out = out[:n] txid := getTxID(out) if txid != fq.txid { return nil, errors.New("txid doesn't match") } rcode := getRCode(out) // don't forward transient errors back to the client when the server fails if rcode == dns.RCodeServerFailure { f.logf("recv: response code indicating server failure: %d", rcode) return nil, errors.New("response code indicates server issue") } if truncated { const dnsFlagTruncated = 0x200 flags := binary.BigEndian.Uint16(out[2:4]) flags |= dnsFlagTruncated binary.BigEndian.PutUint16(out[2:4], flags) // TODO(#2067): Remove any incomplete records? RFC 1035 section 6.2 // states that truncation should head drop so that the authority // section can be preserved if possible. However, the UDP read with // a too-small buffer has already dropped the end, so that's the // best we can do. } clampEDNSSize(out, maxResponseBytes) return out, nil } // resolvers returns the resolvers to use for domain. func (f *forwarder) resolvers(domain dnsname.FQDN) []resolverAndDelay { f.mu.Lock() routes := f.routes f.mu.Unlock() for _, route := range routes { if route.Suffix == "." || route.Suffix.Contains(domain) { return route.Resolvers } } return nil } // forwardQuery is information and state about a forwarded DNS query that's // being sent to 1 or more upstreams. // // In the case of racing against multiple equivalent upstreams // (e.g. Google or CloudFlare's 4 DNS IPs: 2 IPv4 + 2 IPv6), this type // handles racing them more intelligently than just blasting away 4 // queries at once. type forwardQuery struct { txid txid packet []byte // closeOnCtxDone lets send register values to Close if the // caller's ctx expires. This avoids send from allocating its // own waiting goroutine to interrupt the ReadFrom, as memory // is tight on iOS and we want the number of pending DNS // lookups to be bursty without too much associated // goroutine/memory cost. closeOnCtxDone *closePool // TODO(bradfitz): add race delay state: // mu sync.Mutex // ... } // forward forwards the query to all upstream nameservers and returns the first response. func (f *forwarder) forward(query packet) error { domain, err := nameFromQuery(query.bs) if err != nil { return err } // Drop DNS service discovery spam, primarily for battery life // on mobile. Things like Spotify on iOS generate this traffic, // when browsing for LAN devices. But even when filtering this // out, playing on Sonos still works. if hasRDNSBonjourPrefix(domain) { return nil } clampEDNSSize(query.bs, maxResponseBytes) resolvers := f.resolvers(domain) if len(resolvers) == 0 { return errNoUpstreams } fq := &forwardQuery{ txid: getTxID(query.bs), packet: query.bs, closeOnCtxDone: new(closePool), } defer fq.closeOnCtxDone.Close() ctx, cancel := context.WithTimeout(f.ctx, responseTimeout) defer cancel() resc := make(chan []byte, 1) var ( mu sync.Mutex firstErr error ) for i := range resolvers { go func(rr *resolverAndDelay) { if rr.startDelay > 0 { timer := time.NewTimer(rr.startDelay) select { case <-timer.C: case <-ctx.Done(): timer.Stop() return } } resb, err := f.send(ctx, fq, *rr) if err != nil { mu.Lock() defer mu.Unlock() if firstErr == nil { firstErr = err } return } select { case resc <- resb: default: } }(&resolvers[i]) } select { case v := <-resc: select { case <-ctx.Done(): return ctx.Err() case f.responses <- packet{v, query.addr}: return nil } case <-ctx.Done(): mu.Lock() defer mu.Unlock() if firstErr != nil { return firstErr } return ctx.Err() } } var initListenConfig func(_ *net.ListenConfig, _ *monitor.Mon, tunName string) error // nameFromQuery extracts the normalized query name from bs. func nameFromQuery(bs []byte) (dnsname.FQDN, error) { var parser dns.Parser hdr, err := parser.Start(bs) if err != nil { return "", err } if hdr.Response { return "", errNotQuery } q, err := parser.Question() if err != nil { return "", err } n := q.Name.Data[:q.Name.Length] return dnsname.ToFQDN(rawNameToLower(n)) } // closePool is a dynamic set of io.Closers to close as a group. // It's intended to be Closed at most once. // // The zero value is ready for use. type closePool struct { mu sync.Mutex m map[io.Closer]bool closed bool } func (p *closePool) Add(c io.Closer) { p.mu.Lock() defer p.mu.Unlock() if p.closed { c.Close() return } if p.m == nil { p.m = map[io.Closer]bool{} } p.m[c] = true } func (p *closePool) Remove(c io.Closer) { p.mu.Lock() defer p.mu.Unlock() if p.closed { return } delete(p.m, c) } func (p *closePool) Close() error { p.mu.Lock() defer p.mu.Unlock() if p.closed { return nil } p.closed = true for c := range p.m { c.Close() } return nil } var knownDoH = map[netaddr.IP]string{} // 8.8.8.8 => "https://..." var dohIPsOfBase = map[string][]netaddr.IP{} func addDoH(ipStr, base string) { ip := netaddr.MustParseIP(ipStr) knownDoH[ip] = base dohIPsOfBase[base] = append(dohIPsOfBase[base], ip) } func dohV6(base string) (ip netaddr.IP, ok bool) { for _, ip := range dohIPsOfBase[base] { if ip.Is6() { return ip, true } } return ip, false } func init() { // Cloudflare addDoH("1.1.1.1", "https://cloudflare-dns.com/dns-query") addDoH("1.0.0.1", "https://cloudflare-dns.com/dns-query") addDoH("fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b", "https://cloudflare-dns.com/dns-query") addDoH("fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b", "https://cloudflare-dns.com/dns-query") // Cloudflare -Malware addDoH("172.16.58.3", "https://security.cloudflare-dns.com/dns-query") addDoH("172.16.31.10", "https://security.cloudflare-dns.com/dns-query") addDoH("fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b", "https://security.cloudflare-dns.com/dns-query") addDoH("fdf8:f53e:61e4::18", "https://security.cloudflare-dns.com/dns-query") // Cloudflare -Malware -Adult addDoH("192.168.127.12", "https://family.cloudflare-dns.com/dns-query") addDoH("172.16.17.32", "https://family.cloudflare-dns.com/dns-query") addDoH("fc00:db20:35b:7399::5", "https://family.cloudflare-dns.com/dns-query") addDoH("fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b", "https://family.cloudflare-dns.com/dns-query") // Google addDoH("8.8.8.8", "https://dns.google/dns-query") addDoH("8.8.4.4", "https://dns.google/dns-query") addDoH("fc00:e968:6179::de52:7100", "https://dns.google/dns-query") addDoH("fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b", "https://dns.google/dns-query") // OpenDNS // TODO(bradfitz): OpenDNS is unique amongst this current set in that // its DoH DNS names resolve to different IPs than its normal DNS // IPs. Support that later. For now we assume that they're the same. // addDoH("208.67.222.222", "https://doh.opendns.com/dns-query") // addDoH("208.67.220.220", "https://doh.opendns.com/dns-query") // addDoH("192.168.3.11", "https://doh.familyshield.opendns.com/dns-query") // addDoH("192.168.3.11", "https://doh.familyshield.opendns.com/dns-query") // Quad9 addDoH("9.9.9.9", "https://dns.quad9.net/dns-query") addDoH("149.112.112.112", "https://dns.quad9.net/dns-query") addDoH("fc00:e968:6179::de52:7100", "https://dns.quad9.net/dns-query") addDoH("fc00:e968:6179::de52:7100:9", "https://dns.quad9.net/dns-query") // Quad9 -DNSSEC addDoH("172.16.17.32", "https://dns10.quad9.net/dns-query") addDoH("192.168.3.11", "https://dns10.quad9.net/dns-query") addDoH("fc00:db20:35b:7399::5", "https://dns10.quad9.net/dns-query") addDoH("fc00:e968:6179::de52:7100:10", "https://dns10.quad9.net/dns-query") }
<filename>FILTER/06-filtering-basic/src/lidar_localization/include/lidar_localization/filtering/kitti_filtering.hpp /* * @Description: IMU-lidar fusion for localization workflow * @Author: <NAME> * @Date: 2020-11-12 15:14:07 */ #ifndef LIDAR_LOCALIZATION_FILTERING_KITTI_FILTERING_HPP_ #define LIDAR_LOCALIZATION_FILTERING_KITTI_FILTERING_HPP_ #include <deque> #include <unordered_map> #include <Eigen/Dense> #include <yaml-cpp/yaml.h> #include "lidar_localization/sensor_data/cloud_data.hpp" #include "lidar_localization/sensor_data/imu_data.hpp" #include "lidar_localization/sensor_data/pos_vel_data.hpp" #include "lidar_localization/sensor_data/pose_data.hpp" #include "lidar_localization/models/cloud_filter/box_filter.hpp" #include "lidar_localization/models/cloud_filter/cloud_filter_interface.hpp" #include "lidar_localization/models/scan_context_manager/scan_context_manager.hpp" #include "lidar_localization/models/registration/registration_interface.hpp" #include "lidar_localization/models/kalman_filter/kalman_filter.hpp" namespace lidar_localization { class KITTIFiltering { public: KITTIFiltering(); bool Init(const CloudData &init_scan, const Eigen::Vector3f &init_vel, const IMUData &init_imu_data); bool Init(const Eigen::Matrix4f &init_pose, const Eigen::Vector3f &init_vel, const IMUData &init_imu_data); bool Update(const IMUData &imu_data); bool Correct(const IMUData &imu_data, const CloudData &cloud_data, const PosVelData &pos_vel_data, Eigen::Matrix4f &cloud_pose); // getters: bool HasInited() const { return has_inited_; } bool HasNewGlobalMap() const { return has_new_global_map_; } bool HasNewLocalMap() const { return has_new_local_map_; } void GetGlobalMap(CloudData::CLOUD_PTR &global_map); CloudData::CLOUD_PTR &GetLocalMap() { return local_map_ptr_; } CloudData::CLOUD_PTR &GetCurrentScan() { return current_scan_ptr_; } double GetTime(void) { return kalman_filter_ptr_->GetTime(); } Eigen::Matrix4f GetPose(void) { return current_pose_; } Eigen::Vector3f GetVel(void) { return current_vel_; } void GetOdometry(Eigen::Matrix4f &pose, Eigen::Vector3f &vel); private: bool InitWithConfig(void); // a. filter initializer: bool InitFilter(std::string filter_user, std::shared_ptr<CloudFilterInterface> &filter_ptr, const YAML::Node &config_node); bool InitLocalMapSegmenter(const YAML::Node &config_node); bool InitFilters(const YAML::Node &config_node); // b. map initializer: bool InitGlobalMap(const YAML::Node &config_node); // c. scan context manager initializer: bool InitScanContextManager(const YAML::Node &config_node); // d. frontend initializer: bool InitRegistration(std::shared_ptr<RegistrationInterface> &registration_ptr, const YAML::Node &config_node); // e. IMU-lidar fusion initializer: bool InitFusion(const YAML::Node &config_node); // local map setter: bool ResetLocalMap(float x, float y, float z); // init pose setter: bool SetInitScan(const CloudData &init_scan); bool SetInitGNSS(const Eigen::Matrix4f &init_pose); bool SetInitPose(const Eigen::Matrix4f &init_pose); private: std::string map_path_ = ""; std::string scan_context_path_ = ""; std::string loop_closure_method_ = ""; // a. global map: std::shared_ptr<CloudFilterInterface> global_map_filter_ptr_; // b. local map: std::shared_ptr<BoxFilter> local_map_segmenter_ptr_; std::shared_ptr<CloudFilterInterface> local_map_filter_ptr_; // c. current scan: std::shared_ptr<CloudFilterInterface> current_scan_filter_ptr_; // scan context manager: std::shared_ptr<ScanContextManager> scan_context_manager_ptr_; // frontend: std::shared_ptr<RegistrationInterface> registration_ptr_; // IMU-lidar Kalman filter: struct { std::string FUSION_METHOD; std::unordered_map<std::string, KalmanFilter::MeasurementType> FUSION_STRATEGY_ID; KalmanFilter::MeasurementType FUSION_STRATEGY; } CONFIG; std::shared_ptr<KalmanFilter> kalman_filter_ptr_; KalmanFilter::Measurement current_measurement_; CloudData::CLOUD_PTR global_map_ptr_; CloudData::CLOUD_PTR local_map_ptr_; CloudData::CLOUD_PTR current_scan_ptr_; Eigen::Matrix4f current_gnss_pose_ = Eigen::Matrix4f::Identity(); Eigen::Matrix4f init_pose_ = Eigen::Matrix4f::Identity(); Eigen::Matrix4f current_pose_ = Eigen::Matrix4f::Identity(); Eigen::Vector3f current_vel_ = Eigen::Vector3f::Zero(); bool has_inited_ = false; bool has_new_global_map_ = false; bool has_new_local_map_ = false; }; } // namespace lidar_localization #endif // LIDAR_LOCALIZATION_FILTERING_KITTI_FILTERING_HPP_
import { Component, OnInit, Input } from '@angular/core'; import { IMenuItemDetails, IModifier } from '@models/menu.model'; @Component({ selector: 'app-menu-item', templateUrl: './menu-item.component.html', styleUrls: ['./menu-item.component.scss'] }) export class MenuItemComponent implements OnInit { @Input() item: IMenuItemDetails; constructor() { } ngOnInit() { } getPrice(option: IModifier): string { return option.priceLevel ? option.priceLevel.formattedPrice : ''; } }
import { GluegunToolbox } from "gluegun" export const description = "Generates a model and model test." export const run = async function(toolbox: GluegunToolbox) { // grab some features const { parameters, strings, print, ignite, patching, filesystem } = toolbox const { camelCase, kebabCase, pascalCase, isBlank } = strings // validation if (isBlank(parameters.first)) { print.info("A name is required.") print.info(`ignite generate model <name>\n`) return } // get permutations of the given model name const givenName = parameters.first const name = kebabCase(givenName) const pascalName = pascalCase(givenName) const camelName = camelCase(givenName) const props = { name, pascalName } const jobs = [ { template: "model.ejs", target: `app/models/${name}/${name}.ts`, }, { template: "model.test.ejs", target: `app/models/${name}/${name}.test.ts`, }, ] // patch the barrel export file const barrelExportPath = `${process.cwd()}/app/models/index.ts` const exportToAdd = `export * from "./${name}/${name}"\n` if (!filesystem.exists(barrelExportPath)) { const msg = `No '${barrelExportPath}' file found. Can't export model.` + `Export your new model manually.` print.warning(msg) process.exit(1) } await patching.append(barrelExportPath, exportToAdd) await ignite.copyBatch(toolbox, jobs, props) // include stores in root-store if (name.endsWith("-store")) { const rootStorePath = "./app/models/root-store/root-store.ts" const rootStoreDef = 'export const RootStoreModel = types.model("RootStore").props({' const storeTypeImport = `import { ${pascalName}Model } from "../../models/${name}"\n` const storeType = `\n ${camelName}: types.optional(${pascalName}Model, {}),` await patching.prepend(rootStorePath, storeTypeImport) await patching.patch(rootStorePath, { after: rootStoreDef, insert: storeType }) } }
Richard Sherman's dad still wakes up at 3:45 a.m. to drive a trash truck, doesn't want to live off his son Despite having a son who recently signed a $56 million contract with the Seattle Seahawks, Kevin Sherman still wakes up early to do the job he has done for more than two decades. Kevin, whose son Richard Sherman will play in his second-straight Super Bowl, has worked as a trash-truck driver in Los Angeles for 26 years. He told Sam Farmer of the LA Times in 2014 that he wakes up at 3:45 a.m., and works holidays to make overtime. While he's okay with taking the occasional day off here and there, he's only 18 months away from earning his pension, SI's Robert Klemko reports. Sherman told Klemko that he doesn't want to live off his son: “It’s something to keep busy, and it’s easy now. If I take a day off, I don’t need to worry about where my money is coming from. I want to get that pension and the medical benefits just so it’s one more thing my son doesn’t have to worry about. I’m always going to be making money. I want to fix it so that even when I’m not working, I’m getting paid.” Both of Richard's parents worked during his childhood in Compton, which he says set a good example for him and his brother. "It forced my brother and me to understand priorities and family," Richard told Farmer last year. "You've got to do everything in your power to make sure your family is taken care of." Richard told NFL Films that his dad left his house early on Christmas in 2012 so that he could go back to L.A. and work the next day. "It's a job that I don't too much like, but I've done it all my life," he told NFL Films. To make things even tougher, Kevin Sherman lost his right eye in a go-kart explosion at age 14: Sherman's mom, Beverley, works with disabled inner-city children. Here they are at his Stanford graduation: Join the conversation about this story » See Also:
<reponame>danberindei/radargun package org.radargun.service; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.lang.management.ManagementFactory; import java.lang.reflect.Method; import java.util.*; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicInteger; import javax.management.*; import org.jgroups.*; import org.jgroups.blocks.*; import org.jgroups.util.RspList; import org.jgroups.util.Util; import org.radargun.Service; import org.radargun.config.Property; import org.radargun.logging.Log; import org.radargun.logging.LogFactory; import org.radargun.traits.*; import org.radargun.utils.Utils; /** * Plugin measuring the costs of remote gets and puts with JGroups, with regular arguments passed by RadarGun. * However, a GET returns a <em>prefabricated</em> value (no cache handling) and a PUT simply invokes the remote call, * but doesn't add anything to a hashmap.<p/> * The point of this plugin is to measure the overhead of Infinispan's cache handling; it is a base line to the * Infinispan plugin. The Infinispan plugin should be slower than the JGroups plugin, but the difference should always * be constant, regardless of the cluster size.<p/> * Properties, such as the size of the layload for gets, and the number of owners of a key, can be * defined in jgroups.properties. * * Default behavior of puts and gets (mimicking) Infinispan): * - A get is sent to the all owners and the call returns after the *first* response has been received. A get which * has the local node included returns immediately (mimicking a local read) and no RPC is sent. * * - A put is sent to the primary owner P. If P == self --> no-op. P then synchronously sends an update() to * the backup(s) (minus self). * * * @author <NAME> &lt;<EMAIL>&gt; * @author <NAME> */ @Service(doc = "JGroupsService faking cache operations") public class JGroups36Service extends ReceiverAdapter implements Lifecycle, Clustered, BasicOperations.Cache { protected static Log log = LogFactory.getLog(JGroups36Service.class); private static final Method[] METHODS = new Method[7]; protected static final short GET = 0; protected static final short CONTAINS_KEY = 1; protected static final short PUT = 2; protected static final short GET_AND_PUT = 3; protected static final short REMOVE = 4; protected static final short GET_AND_REMOVE = 5; protected static final short PUT_AND_FORWARD = 6; protected JChannel ch; protected RpcDispatcher disp; protected volatile Address localAddr; protected volatile int myRank; // rank of current member in view protected volatile List<Address> members = Collections.emptyList(); protected List<Membership> membershipHistory = new ArrayList<>(); @Property(doc = "Number of nodes where the writes will be replicated.") protected int numOwners = 2; @Property(doc = "Controls use of the DONT_BUNDLE flag. Default is true.") protected boolean bundle = true; @Property(doc = "Controls use of the FC flag. Default is true.") protected boolean flowControl = true; @Property(doc = "Controls use of the OOB flag. Default is true.") protected boolean oob = true; @Property(doc = "Controls use of anycasting flag in RequestOptions. Default is true.") protected boolean anycasting = true; @Property(name = "file", doc = "Configuration file for JGroups.", deprecatedName = "config") protected String configFile; @Property(doc = "When enabled, a put is sent to the primary which (synchronously) " + "replicates it to the backup(s). Otherwise the put is sent to all owners and the call return on the first reply." + " Default is true (Infinispan 7.x behavior). Setting this to false will reduce the cost of 4x latency to 2x (faster)") protected boolean primaryReplicatesPuts = true; protected String name; protected volatile Object lastValue = new byte[1000]; protected RequestOptions getOptions, putOptions, putOptionsWithFilter; protected final AtomicInteger localReads = new AtomicInteger(0); // number of local reads (no RPCs) static { try { METHODS[GET] = JGroups36Service.class.getMethod("getFromRemote", Object.class); METHODS[CONTAINS_KEY] = JGroups36Service.class.getMethod("containsKeyFromRemote", Object.class); METHODS[PUT] = JGroups36Service.class.getMethod("putFromRemote", Object.class, Object.class); METHODS[GET_AND_PUT] = JGroups36Service.class.getMethod("getAndPutFromRemote", Object.class, Object.class); METHODS[REMOVE] = JGroups36Service.class.getMethod("removeFromRemote", Object.class); METHODS[GET_AND_REMOVE] = JGroups36Service.class.getMethod("getAndRemoveFromRemote", Object.class); METHODS[PUT_AND_FORWARD] = JGroups36Service.class.getMethod("putFromRemote", Object.class, Object.class, int.class); } catch (NoSuchMethodException e) { throw new RuntimeException(e); } } public JGroups36Service() { } public JGroups36Service(String configFile, String name) { this.configFile = configFile; this.name = name; } public JGroups36Service configFile(String file) { this.configFile = file; return this; } public JGroups36Service name(String name) { this.name = name; return this; } public JGroups36Service numOwners(int num) { this.numOwners = num; return this; } @ProvidesTrait public JGroups36Service getSelf() { return this; } @ProvidesTrait public BasicOperations createOperations() { return new BasicOperations() { @Override public <K, V> Cache<K, V> getCache(String cacheName) { return JGroups36Service.this; } }; } @Override public void start() { this.getOptions = new RequestOptions(ResponseMode.GET_FIRST, 20000, anycasting, null); if (oob) { getOptions.setFlags(Message.Flag.OOB); } if (!bundle) { getOptions.setFlags(Message.Flag.DONT_BUNDLE); } if (!flowControl) { getOptions.setFlags(Message.Flag.NO_FC); } this.putOptions = new RequestOptions(ResponseMode.GET_FIRST, 20000, true, null); // uses anycasting if (oob) { putOptions.setFlags(Message.Flag.OOB); } if (!bundle) { putOptions.setFlags(Message.Flag.DONT_BUNDLE); } if (!flowControl) { putOptions.setFlags(Message.Flag.NO_FC); } putOptionsWithFilter = new RequestOptions(putOptions).setRspFilter(new FirstNonNullResponse()); log.debugf("numOwners=%d, config=%s, getOptions=%s, putOptions=%s\n", numOwners, configFile, getOptions, putOptions); log.info("Loading JGroups form: " + org.jgroups.Version.class.getProtectionDomain().getCodeSource().getLocation()); log.info("JGroups version: " + org.jgroups.Version.printDescription()); try { ch = new JChannel(configFile).name(name); disp = new RpcDispatcher(ch, null, this, this); disp.setMethodLookup(id -> METHODS[id]); ch.connect("x"); } catch (Exception e) { throw new RuntimeException(e); } localAddr = ch.getAddress(); myRank = Util.getRank(ch.getView(), localAddr) - 1; } @Override public void stop() { Util.close(ch); synchronized (this) { membershipHistory.add(Membership.empty()); } } @Override public boolean isRunning() { return ch != null && ch.isConnected(); } public Object getFromRemote(Object key) { assert key != null; return lastValue; } public boolean containsKeyFromRemote(Object key) { assert this != null && key != null; return true; } public void putFromRemote(Object key, Object value) { if (key != null) { lastValue = value; } } /** * Applies a put() and forwards it to targets * * @param key * @param val * @param excludeRank The rank to be excluded from the backups (the originator of the put) */ public void putFromRemote(Object key, Object val, int excludeRank) { putFromRemote(key, val); // forward to backup owners if (excludeRank == -1) return; List<Address> backupOwners = pickBackups(myRank, excludeRank); if (backupOwners == null || backupOwners.isEmpty()) return; if (backupOwners.size() == 1) invoke(backupOwners.get(0), new MethodCall(PUT, key, val), putOptions); else invoke(backupOwners, new MethodCall(PUT, key, val), putOptions); } public Object getAndPutFromRemote(Object key, Object value) { assert key != null; Object last = lastValue; lastValue = value; return last; } public boolean removeFromRemote(Object key) { assert this != null && key != null; return true; } public Object getAndRemoveFromRemote(Object key) { assert key != null; return lastValue; } protected Object read(MethodCall methodCall) { List<Address> targets = pickReadTargets(); if (targets == null) { // self was element of the picked members -> local read, no RPC localReads.incrementAndGet(); return lastValue; } return invoke(targets, methodCall, getOptions).getFirst(); } public Object write(MethodCall methodCall) { Collection<Address> targets = pickWriteTargets(); return invoke(targets, methodCall, putOptionsWithFilter).getFirst(); } @Override public Object get(Object key) { return read(new MethodCall(GET, key)); } @Override public boolean containsKey(Object key) { return (Boolean) read(new MethodCall(CONTAINS_KEY, key)); } @Override public void put(Object key, Object value) { if (this.primaryReplicatesPuts) { List<Address> owners = pickTargets(false, false); Address primary = owners.remove(0); owners.remove(localAddr); // backups shouldn't forward back to us - we already applied the put int excludeRank = owners.isEmpty() ? -1 : myRank; if (primary.equals(localAddr)) putFromRemote(key, value, excludeRank); else invoke(primary, new MethodCall(PUT_AND_FORWARD, key, value, excludeRank), putOptions); } else write(new MethodCall(PUT, key, value)); } @Override public Object getAndPut(Object key, Object value) { return write(new MethodCall(GET_AND_PUT, key, value)); } @Override public boolean remove(Object key) { return (Boolean) write(new MethodCall(REMOVE, key)); } @Override public Object getAndRemove(Object key) { return write(new MethodCall(GET_AND_REMOVE, key)); } public void clear() { lastValue = null; } public void viewAccepted(View newView) { this.members = newView.getMembers(); this.myRank = Util.getRank(newView, localAddr) - 1; ArrayList<Member> mbrs = new ArrayList<>(newView.getMembers().size()); boolean coord = true; for (Address address : newView.getMembers()) { mbrs.add(new Member(address.toString(), ch.getAddress().equals(address), coord)); coord = false; } synchronized (this) { membershipHistory.add(Membership.create(mbrs)); } } @Override public boolean isCoordinator() { View view = ch.getView(); return view == null || view.getMembers() == null || view.getMembers().isEmpty() || ch.getAddress().equals(view.getMembers().get(0)); } @Override public synchronized Collection<Member> getMembers() { if (membershipHistory.isEmpty()) return null; return membershipHistory.get(membershipHistory.size() - 1).members; } @Override public synchronized List<Membership> getMembershipHistory() { return new ArrayList<>(membershipHistory); } // 1-m invocation protected RspList<Object> invoke(Collection<Address> targets, MethodCall methodCall, RequestOptions opts) { try { return disp.callRemoteMethods(targets, methodCall, opts); } catch (Exception e) { throw new RuntimeException(e); } } // 1-1 invocation protected Object invoke(Address target, MethodCall methodCall, RequestOptions opts) { try { return disp.callRemoteMethod(target, methodCall, opts); } catch (Exception e) { throw new RuntimeException(e); } } /** * Picks a random primary plus numOwners-1 backup members from the membership * * @return The list of primary and backup members, or null if self was element of that list (local reads) */ protected List<Address> pickReadTargets() { return pickTargets(true, false); } /** * Picks a random primary and numOwners-1 backups, but removes self. So if we pick {A,B} (self=A), then the RPC will * only go to B. * * @return */ protected List<Address> pickWriteTargets() { return pickTargets(false, true); // exclude self } /** * Picks numOwners targets in range [i .. i+numOwners-1] where i is a random index * * @return A list of members (primary plus backup(s)), or null if returnNullOnSelfInclusion is true and self is in * the list */ protected List<Address> pickTargets(boolean returnNullOnSelfInclusion, boolean skipSelf) { List<Address> mbrs = this.members; int size = mbrs.size(); int startIndex = ThreadLocalRandom.current().nextInt(size); int numTargets = Math.min(numOwners, size); List<Address> targets = new ArrayList<>(numTargets); for (int i = 0; i < numTargets; i++) { int index = (startIndex + i) % size; if (index == myRank) { if (returnNullOnSelfInclusion) return null; if (skipSelf) continue; } Address target = mbrs.get(index); targets.add(target); // we cannot have dupes because numTargets cannot be > size (due to the min() above) } return targets; } /** * Picks backup owners, based on a starting index * * @param primaryRank The rank of the primary in the current view. Start with (primaryRank+1) % size * @param excludeRank Exclude primary if true * @return */ protected List<Address> pickBackups(int primaryRank, int excludeRank) { List<Address> mbrs = this.members; int size = mbrs.size(); int startIndex = primaryRank + 1; int numTargets = Math.min(numOwners - 1, size); List<Address> targets = new ArrayList<>(numTargets); for (int i = 0; i < numTargets; i++) { int index = (startIndex + i) % size; if (index == excludeRank) continue; Address target = mbrs.get(index); targets.add(target); // we cannot have dupes because numTargets cannot be > size (due to the min() above) } return targets; } @ProvidesTrait ConfigurationProvider getConfigurationProvider() { return new ConfigurationProvider() { @Override public Map<String, Properties> getNormalizedConfigs() { return Collections.singletonMap("jgroups", dumpProperties()); } @Override public Map<String, byte[]> getOriginalConfigs() { InputStream stream = null; try { stream = getClass().getResourceAsStream(configFile); if (stream == null) { stream = new FileInputStream(configFile); } return Collections.singletonMap(configFile, Utils.readAsBytes(stream)); } catch (IOException e) { log.error("Cannot read configuration file " + configFile, e); return Collections.EMPTY_MAP; } finally { Utils.close(stream); } } }; } protected Properties dumpProperties() { Properties p = new Properties(); try { MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); String objName = String.format("jboss.infinispan:type=protocol,cluster=\"%s\",protocol=*", ch.getClusterName()); Set<ObjectInstance> beanObjs = mbeanServer.queryMBeans(new ObjectName(objName), null); if (beanObjs.isEmpty()) { log.error("no JGroups protocols found"); return p; } for (ObjectInstance beanObj : beanObjs) { ObjectName protocolObjectName = beanObj.getObjectName(); MBeanInfo protocolBean = mbeanServer.getMBeanInfo(protocolObjectName); String protocolName = protocolObjectName.getKeyProperty("protocol"); for (MBeanAttributeInfo info : protocolBean.getAttributes()) { String propName = info.getName(); Object propValue = mbeanServer.getAttribute(protocolObjectName, propName); p.setProperty(protocolName + "." + propName, propValue == null ? "null" : propValue.toString()); } } return p; } catch (Exception e) { log.error("Error while dumping JGroups config as properties", e); return p; } } /** * Terminates after the first non-null response */ protected static class FirstNonNullResponse implements RspFilter { protected boolean receivedNonNullRsp; public boolean isAcceptable(Object response, Address sender) { if (response != null) { receivedNonNullRsp = true; return true; } return false; } public boolean needMoreResponses() { return !receivedNonNullRsp; } } }
<filename>src/main/java/com/exsio/clock/configuration/ApplicationConfiguration.java<gh_stars>0 package com.exsio.clock.configuration; import com.exsio.clock.configuration.support.AtmosphereArgumentResolver; import org.atmosphere.cpr.AtmosphereFramework; import org.atmosphere.cpr.BroadcasterFactory; import org.atmosphere.cpr.MeteorServlet; import org.springframework.boot.context.embedded.ConfigurableEmbeddedServletContainer; import org.springframework.boot.context.embedded.EmbeddedServletContainerCustomizer; import org.springframework.boot.context.embedded.ErrorPage; import org.springframework.boot.context.embedded.MimeMappings; import org.springframework.boot.context.embedded.ServletRegistrationBean; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.support.PropertySourcesPlaceholderConfigurer; import org.springframework.core.Ordered; import org.springframework.core.annotation.Order; import org.springframework.http.HttpStatus; import org.springframework.http.converter.HttpMessageConverter; import org.springframework.http.converter.json.MappingJackson2HttpMessageConverter; import org.springframework.web.method.support.HandlerMethodArgumentResolver; import org.springframework.web.servlet.config.annotation.ResourceHandlerRegistry; import org.springframework.web.servlet.config.annotation.WebMvcConfigurerAdapter; import javax.naming.NamingException; import java.io.IOException; import java.util.HashMap; import java.util.List; @Configuration public class ApplicationConfiguration extends WebMvcConfigurerAdapter implements EmbeddedServletContainerCustomizer { private final static MeteorServlet meteor = new MeteorServlet(); @Bean public static PropertySourcesPlaceholderConfigurer properties() throws IOException, NamingException { return new PropertySourcesPlaceholderConfigurer(); } @Bean public AtmosphereFramework atmosphereFramework() { return meteor.framework(); } @Bean public BroadcasterFactory broadcasterFactory(AtmosphereFramework atmosphereFramework) { return atmosphereFramework.getAtmosphereConfig().getBroadcasterFactory(); } @Bean public HttpMessageConverter jacksonHttpMessageConverter() { return new MappingJackson2HttpMessageConverter(); } @Bean @Order(Ordered.HIGHEST_PRECEDENCE) public ServletRegistrationBean servletRegistrationBean() { ServletRegistrationBean push = new ServletRegistrationBean(meteor, "*.push"); push.setLoadOnStartup(2); push.setInitParameters(new HashMap<String, String>() { { put("org.atmosphere.servlet", "org.springframework.web.servlet.DispatcherServlet"); put("org.atmosphere.cpr.broadcaster.shareableThreadPool", "true"); put("org.atmosphere.cpr.broadcaster.maxProcessingThreads", "5"); put("org.atmosphere.cpr.broadcasterLifeCyclePolicy", "IDLE_DESTROY"); put("contextConfigLocation", "classpath:servlet.xml"); } }); push.setAsyncSupported(true); push.setName("push"); return push; } @Override public void addArgumentResolvers(List<HandlerMethodArgumentResolver> argumentResolvers) { argumentResolvers.add(new AtmosphereArgumentResolver()); } @Override public void addResourceHandlers(ResourceHandlerRegistry registry) { registry.addResourceHandler("/js/**").addResourceLocations("classpath:/static/js/"); registry.addResourceHandler("/icons/**").addResourceLocations("classpath:/static/icons/"); registry.addResourceHandler("/").addResourceLocations("classpath:/static/index.html"); registry.addResourceHandler("/index.html").addResourceLocations("classpath:/static/index.html"); registry.addResourceHandler("/404.html").addResourceLocations("classpath:/static/404.html"); registry.addResourceHandler("/500.html").addResourceLocations("classpath:/static/500.html"); registry.addResourceHandler("/manifest.json").addResourceLocations("classpath:/static/manifest.json"); registry.addResourceHandler("/favicon.png").addResourceLocations("classpath:/static/favicon.png"); registry.addResourceHandler("/manage.html").addResourceLocations("classpath:/static/manage.html"); } @Override public void customize(ConfigurableEmbeddedServletContainer container) { MimeMappings mappings = new MimeMappings(MimeMappings.DEFAULT); mappings.add("json", "application/manifest+json"); container.setMimeMappings(mappings); ErrorPage error404Page = new ErrorPage(HttpStatus.NOT_FOUND, "/404.html"); ErrorPage error500Page = new ErrorPage(HttpStatus.INTERNAL_SERVER_ERROR, "/500.html"); ErrorPage errorDefaultPage = new ErrorPage("/500.html"); container.addErrorPages(error404Page, error500Page, errorDefaultPage); } }
<gh_stars>1-10 from flask import jsonify from flask_restful import Resource from .models import AboutPageInfo from .serializers import about_serializer class GetAboutPage(Resource): def get(self): return jsonify(about_serializer.dumps(AboutPageInfo.all()).data)
// Code generated by protoc-gen-go. DO NOT EDIT. // source: examples/proto/examplepb/non_standard_names.proto package examplepb import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" field_mask "google.golang.org/genproto/protobuf/field_mask" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // NonStandardMessage has oddly named fields. type NonStandardMessage struct { // Id represents the message identifier. Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Num int64 `protobuf:"varint,2,opt,name=Num,proto3" json:"Num,omitempty"` LineNum int64 `protobuf:"varint,3,opt,name=line_num,json=lineNum,proto3" json:"line_num,omitempty"` LangIdent string `protobuf:"bytes,4,opt,name=langIdent,proto3" json:"langIdent,omitempty"` STATUS string `protobuf:"bytes,5,opt,name=STATUS,proto3" json:"STATUS,omitempty"` En_GB int64 `protobuf:"varint,6,opt,name=en_GB,json=enGB,proto3" json:"en_GB,omitempty"` No string `protobuf:"bytes,7,opt,name=no,proto3" json:"no,omitempty"` Thing *NonStandardMessage_Thing `protobuf:"bytes,8,opt,name=thing,proto3" json:"thing,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *NonStandardMessage) Reset() { *m = NonStandardMessage{} } func (m *NonStandardMessage) String() string { return proto.CompactTextString(m) } func (*NonStandardMessage) ProtoMessage() {} func (*NonStandardMessage) Descriptor() ([]byte, []int) { return fileDescriptor_fa36ad8828f19375, []int{0} } func (m *NonStandardMessage) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NonStandardMessage.Unmarshal(m, b) } func (m *NonStandardMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NonStandardMessage.Marshal(b, m, deterministic) } func (m *NonStandardMessage) XXX_Merge(src proto.Message) { xxx_messageInfo_NonStandardMessage.Merge(m, src) } func (m *NonStandardMessage) XXX_Size() int { return xxx_messageInfo_NonStandardMessage.Size(m) } func (m *NonStandardMessage) XXX_DiscardUnknown() { xxx_messageInfo_NonStandardMessage.DiscardUnknown(m) } var xxx_messageInfo_NonStandardMessage proto.InternalMessageInfo func (m *NonStandardMessage) GetId() string { if m != nil { return m.Id } return "" } func (m *NonStandardMessage) GetNum() int64 { if m != nil { return m.Num } return 0 } func (m *NonStandardMessage) GetLineNum() int64 { if m != nil { return m.LineNum } return 0 } func (m *NonStandardMessage) GetLangIdent() string { if m != nil { return m.LangIdent } return "" } func (m *NonStandardMessage) GetSTATUS() string { if m != nil { return m.STATUS } return "" } func (m *NonStandardMessage) GetEn_GB() int64 { if m != nil { return m.En_GB } return 0 } func (m *NonStandardMessage) GetNo() string { if m != nil { return m.No } return "" } func (m *NonStandardMessage) GetThing() *NonStandardMessage_Thing { if m != nil { return m.Thing } return nil } type NonStandardMessage_Thing struct { SubThing *NonStandardMessage_Thing_SubThing `protobuf:"bytes,1,opt,name=subThing,proto3" json:"subThing,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *NonStandardMessage_Thing) Reset() { *m = NonStandardMessage_Thing{} } func (m *NonStandardMessage_Thing) String() string { return proto.CompactTextString(m) } func (*NonStandardMessage_Thing) ProtoMessage() {} func (*NonStandardMessage_Thing) Descriptor() ([]byte, []int) { return fileDescriptor_fa36ad8828f19375, []int{0, 0} } func (m *NonStandardMessage_Thing) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NonStandardMessage_Thing.Unmarshal(m, b) } func (m *NonStandardMessage_Thing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NonStandardMessage_Thing.Marshal(b, m, deterministic) } func (m *NonStandardMessage_Thing) XXX_Merge(src proto.Message) { xxx_messageInfo_NonStandardMessage_Thing.Merge(m, src) } func (m *NonStandardMessage_Thing) XXX_Size() int { return xxx_messageInfo_NonStandardMessage_Thing.Size(m) } func (m *NonStandardMessage_Thing) XXX_DiscardUnknown() { xxx_messageInfo_NonStandardMessage_Thing.DiscardUnknown(m) } var xxx_messageInfo_NonStandardMessage_Thing proto.InternalMessageInfo func (m *NonStandardMessage_Thing) GetSubThing() *NonStandardMessage_Thing_SubThing { if m != nil { return m.SubThing } return nil } type NonStandardMessage_Thing_SubThing struct { SubValue string `protobuf:"bytes,1,opt,name=sub_value,json=subValue,proto3" json:"sub_value,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *NonStandardMessage_Thing_SubThing) Reset() { *m = NonStandardMessage_Thing_SubThing{} } func (m *NonStandardMessage_Thing_SubThing) String() string { return proto.CompactTextString(m) } func (*NonStandardMessage_Thing_SubThing) ProtoMessage() {} func (*NonStandardMessage_Thing_SubThing) Descriptor() ([]byte, []int) { return fileDescriptor_fa36ad8828f19375, []int{0, 0, 0} } func (m *NonStandardMessage_Thing_SubThing) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NonStandardMessage_Thing_SubThing.Unmarshal(m, b) } func (m *NonStandardMessage_Thing_SubThing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NonStandardMessage_Thing_SubThing.Marshal(b, m, deterministic) } func (m *NonStandardMessage_Thing_SubThing) XXX_Merge(src proto.Message) { xxx_messageInfo_NonStandardMessage_Thing_SubThing.Merge(m, src) } func (m *NonStandardMessage_Thing_SubThing) XXX_Size() int { return xxx_messageInfo_NonStandardMessage_Thing_SubThing.Size(m) } func (m *NonStandardMessage_Thing_SubThing) XXX_DiscardUnknown() { xxx_messageInfo_NonStandardMessage_Thing_SubThing.DiscardUnknown(m) } var xxx_messageInfo_NonStandardMessage_Thing_SubThing proto.InternalMessageInfo func (m *NonStandardMessage_Thing_SubThing) GetSubValue() string { if m != nil { return m.SubValue } return "" } type NonStandardUpdateRequest struct { Body *NonStandardMessage `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *NonStandardUpdateRequest) Reset() { *m = NonStandardUpdateRequest{} } func (m *NonStandardUpdateRequest) String() string { return proto.CompactTextString(m) } func (*NonStandardUpdateRequest) ProtoMessage() {} func (*NonStandardUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptor_fa36ad8828f19375, []int{1} } func (m *NonStandardUpdateRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NonStandardUpdateRequest.Unmarshal(m, b) } func (m *NonStandardUpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NonStandardUpdateRequest.Marshal(b, m, deterministic) } func (m *NonStandardUpdateRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_NonStandardUpdateRequest.Merge(m, src) } func (m *NonStandardUpdateRequest) XXX_Size() int { return xxx_messageInfo_NonStandardUpdateRequest.Size(m) } func (m *NonStandardUpdateRequest) XXX_DiscardUnknown() { xxx_messageInfo_NonStandardUpdateRequest.DiscardUnknown(m) } var xxx_messageInfo_NonStandardUpdateRequest proto.InternalMessageInfo func (m *NonStandardUpdateRequest) GetBody() *NonStandardMessage { if m != nil { return m.Body } return nil } func (m *NonStandardUpdateRequest) GetUpdateMask() *field_mask.FieldMask { if m != nil { return m.UpdateMask } return nil } // NonStandardMessageWithJSONNames maps odd field names to odd JSON names for maximum confusion. type NonStandardMessageWithJSONNames struct { // Id represents the message identifier. Id string `protobuf:"bytes,1,opt,name=id,json=ID,proto3" json:"id,omitempty"` Num int64 `protobuf:"varint,2,opt,name=Num,proto3" json:"Num,omitempty"` LineNum int64 `protobuf:"varint,3,opt,name=line_num,json=LineNum,proto3" json:"line_num,omitempty"` LangIdent string `protobuf:"bytes,4,opt,name=langIdent,proto3" json:"langIdent,omitempty"` STATUS string `protobuf:"bytes,5,opt,name=STATUS,json=status,proto3" json:"STATUS,omitempty"` En_GB int64 `protobuf:"varint,6,opt,name=en_GB,json=En_GB,proto3" json:"en_GB,omitempty"` No string `protobuf:"bytes,7,opt,name=no,json=yes,proto3" json:"no,omitempty"` Thing *NonStandardMessageWithJSONNames_Thing `protobuf:"bytes,8,opt,name=thing,json=Thingy,proto3" json:"thing,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *NonStandardMessageWithJSONNames) Reset() { *m = NonStandardMessageWithJSONNames{} } func (m *NonStandardMessageWithJSONNames) String() string { return proto.CompactTextString(m) } func (*NonStandardMessageWithJSONNames) ProtoMessage() {} func (*NonStandardMessageWithJSONNames) Descriptor() ([]byte, []int) { return fileDescriptor_fa36ad8828f19375, []int{2} } func (m *NonStandardMessageWithJSONNames) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NonStandardMessageWithJSONNames.Unmarshal(m, b) } func (m *NonStandardMessageWithJSONNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NonStandardMessageWithJSONNames.Marshal(b, m, deterministic) } func (m *NonStandardMessageWithJSONNames) XXX_Merge(src proto.Message) { xxx_messageInfo_NonStandardMessageWithJSONNames.Merge(m, src) } func (m *NonStandardMessageWithJSONNames) XXX_Size() int { return xxx_messageInfo_NonStandardMessageWithJSONNames.Size(m) } func (m *NonStandardMessageWithJSONNames) XXX_DiscardUnknown() { xxx_messageInfo_NonStandardMessageWithJSONNames.DiscardUnknown(m) } var xxx_messageInfo_NonStandardMessageWithJSONNames proto.InternalMessageInfo func (m *NonStandardMessageWithJSONNames) GetId() string { if m != nil { return m.Id } return "" } func (m *NonStandardMessageWithJSONNames) GetNum() int64 { if m != nil { return m.Num } return 0 } func (m *NonStandardMessageWithJSONNames) GetLineNum() int64 { if m != nil { return m.LineNum } return 0 } func (m *NonStandardMessageWithJSONNames) GetLangIdent() string { if m != nil { return m.LangIdent } return "" } func (m *NonStandardMessageWithJSONNames) GetSTATUS() string { if m != nil { return m.STATUS } return "" } func (m *NonStandardMessageWithJSONNames) GetEn_GB() int64 { if m != nil { return m.En_GB } return 0 } func (m *NonStandardMessageWithJSONNames) GetNo() string { if m != nil { return m.No } return "" } func (m *NonStandardMessageWithJSONNames) GetThing() *NonStandardMessageWithJSONNames_Thing { if m != nil { return m.Thing } return nil } type NonStandardMessageWithJSONNames_Thing struct { SubThing *NonStandardMessageWithJSONNames_Thing_SubThing `protobuf:"bytes,1,opt,name=subThing,json=SubThing,proto3" json:"subThing,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *NonStandardMessageWithJSONNames_Thing) Reset() { *m = NonStandardMessageWithJSONNames_Thing{} } func (m *NonStandardMessageWithJSONNames_Thing) String() string { return proto.CompactTextString(m) } func (*NonStandardMessageWithJSONNames_Thing) ProtoMessage() {} func (*NonStandardMessageWithJSONNames_Thing) Descriptor() ([]byte, []int) { return fileDescriptor_fa36ad8828f19375, []int{2, 0} } func (m *NonStandardMessageWithJSONNames_Thing) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NonStandardMessageWithJSONNames_Thing.Unmarshal(m, b) } func (m *NonStandardMessageWithJSONNames_Thing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NonStandardMessageWithJSONNames_Thing.Marshal(b, m, deterministic) } func (m *NonStandardMessageWithJSONNames_Thing) XXX_Merge(src proto.Message) { xxx_messageInfo_NonStandardMessageWithJSONNames_Thing.Merge(m, src) } func (m *NonStandardMessageWithJSONNames_Thing) XXX_Size() int { return xxx_messageInfo_NonStandardMessageWithJSONNames_Thing.Size(m) } func (m *NonStandardMessageWithJSONNames_Thing) XXX_DiscardUnknown() { xxx_messageInfo_NonStandardMessageWithJSONNames_Thing.DiscardUnknown(m) } var xxx_messageInfo_NonStandardMessageWithJSONNames_Thing proto.InternalMessageInfo func (m *NonStandardMessageWithJSONNames_Thing) GetSubThing() *NonStandardMessageWithJSONNames_Thing_SubThing { if m != nil { return m.SubThing } return nil } type NonStandardMessageWithJSONNames_Thing_SubThing struct { SubValue string `protobuf:"bytes,1,opt,name=sub_value,json=sub_Value,proto3" json:"sub_value,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *NonStandardMessageWithJSONNames_Thing_SubThing) Reset() { *m = NonStandardMessageWithJSONNames_Thing_SubThing{} } func (m *NonStandardMessageWithJSONNames_Thing_SubThing) String() string { return proto.CompactTextString(m) } func (*NonStandardMessageWithJSONNames_Thing_SubThing) ProtoMessage() {} func (*NonStandardMessageWithJSONNames_Thing_SubThing) Descriptor() ([]byte, []int) { return fileDescriptor_fa36ad8828f19375, []int{2, 0, 0} } func (m *NonStandardMessageWithJSONNames_Thing_SubThing) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NonStandardMessageWithJSONNames_Thing_SubThing.Unmarshal(m, b) } func (m *NonStandardMessageWithJSONNames_Thing_SubThing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NonStandardMessageWithJSONNames_Thing_SubThing.Marshal(b, m, deterministic) } func (m *NonStandardMessageWithJSONNames_Thing_SubThing) XXX_Merge(src proto.Message) { xxx_messageInfo_NonStandardMessageWithJSONNames_Thing_SubThing.Merge(m, src) } func (m *NonStandardMessageWithJSONNames_Thing_SubThing) XXX_Size() int { return xxx_messageInfo_NonStandardMessageWithJSONNames_Thing_SubThing.Size(m) } func (m *NonStandardMessageWithJSONNames_Thing_SubThing) XXX_DiscardUnknown() { xxx_messageInfo_NonStandardMessageWithJSONNames_Thing_SubThing.DiscardUnknown(m) } var xxx_messageInfo_NonStandardMessageWithJSONNames_Thing_SubThing proto.InternalMessageInfo func (m *NonStandardMessageWithJSONNames_Thing_SubThing) GetSubValue() string { if m != nil { return m.SubValue } return "" } type NonStandardWithJSONNamesUpdateRequest struct { Body *NonStandardMessageWithJSONNames `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *NonStandardWithJSONNamesUpdateRequest) Reset() { *m = NonStandardWithJSONNamesUpdateRequest{} } func (m *NonStandardWithJSONNamesUpdateRequest) String() string { return proto.CompactTextString(m) } func (*NonStandardWithJSONNamesUpdateRequest) ProtoMessage() {} func (*NonStandardWithJSONNamesUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptor_fa36ad8828f19375, []int{3} } func (m *NonStandardWithJSONNamesUpdateRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NonStandardWithJSONNamesUpdateRequest.Unmarshal(m, b) } func (m *NonStandardWithJSONNamesUpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NonStandardWithJSONNamesUpdateRequest.Marshal(b, m, deterministic) } func (m *NonStandardWithJSONNamesUpdateRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_NonStandardWithJSONNamesUpdateRequest.Merge(m, src) } func (m *NonStandardWithJSONNamesUpdateRequest) XXX_Size() int { return xxx_messageInfo_NonStandardWithJSONNamesUpdateRequest.Size(m) } func (m *NonStandardWithJSONNamesUpdateRequest) XXX_DiscardUnknown() { xxx_messageInfo_NonStandardWithJSONNamesUpdateRequest.DiscardUnknown(m) } var xxx_messageInfo_NonStandardWithJSONNamesUpdateRequest proto.InternalMessageInfo func (m *NonStandardWithJSONNamesUpdateRequest) GetBody() *NonStandardMessageWithJSONNames { if m != nil { return m.Body } return nil } func (m *NonStandardWithJSONNamesUpdateRequest) GetUpdateMask() *field_mask.FieldMask { if m != nil { return m.UpdateMask } return nil } func init() { proto.RegisterType((*NonStandardMessage)(nil), "grpc.gateway.examples.examplepb.NonStandardMessage") proto.RegisterType((*NonStandardMessage_Thing)(nil), "grpc.gateway.examples.examplepb.NonStandardMessage.Thing") proto.RegisterType((*NonStandardMessage_Thing_SubThing)(nil), "grpc.gateway.examples.examplepb.NonStandardMessage.Thing.SubThing") proto.RegisterType((*NonStandardUpdateRequest)(nil), "grpc.gateway.examples.examplepb.NonStandardUpdateRequest") proto.RegisterType((*NonStandardMessageWithJSONNames)(nil), "grpc.gateway.examples.examplepb.NonStandardMessageWithJSONNames") proto.RegisterType((*NonStandardMessageWithJSONNames_Thing)(nil), "grpc.gateway.examples.examplepb.NonStandardMessageWithJSONNames.Thing") proto.RegisterType((*NonStandardMessageWithJSONNames_Thing_SubThing)(nil), "grpc.gateway.examples.examplepb.NonStandardMessageWithJSONNames.Thing.SubThing") proto.RegisterType((*NonStandardWithJSONNamesUpdateRequest)(nil), "grpc.gateway.examples.examplepb.NonStandardWithJSONNamesUpdateRequest") } func init() { proto.RegisterFile("examples/proto/examplepb/non_standard_names.proto", fileDescriptor_fa36ad8828f19375) } var fileDescriptor_fa36ad8828f19375 = []byte{ // 626 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x95, 0x41, 0x6f, 0xd3, 0x3c, 0x18, 0xc7, 0x95, 0x66, 0xed, 0x5a, 0x57, 0x7a, 0x5f, 0xe4, 0x21, 0x14, 0xc2, 0xa4, 0x56, 0x95, 0xd0, 0x7a, 0x21, 0xd1, 0xb2, 0x03, 0x02, 0x84, 0x04, 0x15, 0xac, 0x1a, 0x62, 0xad, 0x94, 0x74, 0x20, 0x21, 0x41, 0xe4, 0x2c, 0x5e, 0x1a, 0x9a, 0xda, 0xa1, 0x76, 0x3a, 0x7a, 0xe5, 0x13, 0x4c, 0xe2, 0x0e, 0xdc, 0xf8, 0x0e, 0xf0, 0x31, 0xb8, 0x73, 0xe2, 0x83, 0xa0, 0xd8, 0x69, 0xa9, 0x55, 0xa1, 0xb1, 0x6e, 0xa7, 0xfa, 0x79, 0x6c, 0xff, 0x9f, 0xe7, 0x6f, 0xff, 0xe2, 0x82, 0x5d, 0xfc, 0x1e, 0x8d, 0xd3, 0x04, 0x33, 0x3b, 0x9d, 0x50, 0x4e, 0xed, 0x22, 0x4c, 0x03, 0x9b, 0x50, 0xe2, 0x33, 0x8e, 0x48, 0x88, 0x26, 0xa1, 0x4f, 0xd0, 0x18, 0x33, 0x4b, 0x2c, 0x81, 0x8d, 0x68, 0x92, 0x1e, 0x5b, 0x11, 0xe2, 0xf8, 0x14, 0xcd, 0xac, 0xf9, 0x7e, 0x6b, 0xb1, 0xd3, 0xdc, 0x8e, 0x28, 0x8d, 0x12, 0x6c, 0xa3, 0x34, 0xb6, 0x11, 0x21, 0x94, 0x23, 0x1e, 0x53, 0x52, 0x6c, 0x37, 0x9b, 0xc5, 0xac, 0x88, 0x82, 0xec, 0xc4, 0x3e, 0x89, 0x71, 0x12, 0xfa, 0x63, 0xc4, 0x46, 0x72, 0x45, 0xeb, 0x4c, 0x07, 0xb0, 0x47, 0x89, 0x57, 0x14, 0x3f, 0xc4, 0x8c, 0xa1, 0x08, 0xc3, 0xff, 0x40, 0x29, 0x0e, 0x0d, 0xad, 0xa9, 0xb5, 0x6b, 0x6e, 0x29, 0x0e, 0xe1, 0x35, 0xa0, 0xf7, 0xb2, 0xb1, 0x51, 0x6a, 0x6a, 0x6d, 0xdd, 0xcd, 0x87, 0xf0, 0x26, 0xa8, 0x26, 0x31, 0xc1, 0x3e, 0xc9, 0xc6, 0x86, 0x2e, 0xd2, 0x9b, 0x79, 0x9c, 0x4f, 0x6d, 0x83, 0x5a, 0x82, 0x48, 0x74, 0x10, 0x62, 0xc2, 0x8d, 0x0d, 0xa1, 0xf1, 0x27, 0x01, 0x6f, 0x80, 0x8a, 0x37, 0x78, 0x3c, 0x38, 0xf2, 0x8c, 0xb2, 0x98, 0x2a, 0x22, 0xb8, 0x05, 0xca, 0x98, 0xf8, 0xdd, 0x8e, 0x51, 0x11, 0x6a, 0x1b, 0x98, 0x74, 0x3b, 0x79, 0x1f, 0x84, 0x1a, 0x9b, 0xb2, 0x0f, 0x42, 0x61, 0x1f, 0x94, 0xf9, 0x30, 0x26, 0x91, 0x51, 0x6d, 0x6a, 0xed, 0xba, 0x73, 0xcf, 0x3a, 0xe7, 0x7c, 0xac, 0x55, 0x6f, 0xd6, 0x20, 0x17, 0x70, 0xa5, 0x8e, 0x79, 0xa6, 0x81, 0xb2, 0x48, 0xc0, 0x37, 0xa0, 0xca, 0xb2, 0x40, 0x8c, 0x85, 0xf1, 0xba, 0xd3, 0x59, 0x5b, 0xdd, 0xf2, 0x0a, 0x25, 0x77, 0xa1, 0x69, 0xee, 0x80, 0xea, 0x3c, 0x0b, 0x6f, 0x81, 0x1a, 0xcb, 0x02, 0x7f, 0x8a, 0x92, 0x0c, 0x17, 0xa7, 0x9c, 0x2f, 0x7c, 0x91, 0xc7, 0xad, 0x2f, 0x1a, 0x30, 0x96, 0x84, 0x8f, 0xd2, 0x10, 0x71, 0xec, 0xe2, 0x77, 0x19, 0x66, 0x1c, 0x76, 0xc1, 0x46, 0x40, 0xc3, 0x59, 0xd1, 0xe1, 0xde, 0x1a, 0x1d, 0xba, 0x42, 0x00, 0x3e, 0x00, 0xf5, 0x4c, 0x28, 0x0b, 0x1a, 0xc4, 0xcd, 0xd6, 0x1d, 0xd3, 0x92, 0xc0, 0x58, 0x73, 0x60, 0xac, 0xfd, 0x1c, 0x98, 0x43, 0xc4, 0x46, 0x2e, 0x90, 0xcb, 0xf3, 0x71, 0xeb, 0xbb, 0x0e, 0x1a, 0xab, 0xca, 0x2f, 0x63, 0x3e, 0x7c, 0xe6, 0xf5, 0x7b, 0xbd, 0x1c, 0x60, 0x15, 0xa1, 0x83, 0x27, 0xff, 0x86, 0xd0, 0xf3, 0xf5, 0x10, 0x62, 0x1c, 0xf1, 0x8c, 0xc1, 0xeb, 0x2a, 0x42, 0xe5, 0xa7, 0x79, 0x00, 0xff, 0x5f, 0x62, 0x48, 0x9f, 0x61, 0x06, 0x5f, 0xab, 0x10, 0xed, 0xaf, 0x71, 0x88, 0x8a, 0xd5, 0x82, 0xa8, 0x8a, 0xf8, 0x99, 0x99, 0x9f, 0x16, 0x48, 0x8d, 0x56, 0x90, 0xea, 0x5f, 0x4d, 0xad, 0x25, 0xbe, 0xe6, 0x23, 0xb3, 0xbd, 0xc4, 0xd7, 0xf6, 0x2a, 0x5f, 0x22, 0x21, 0x01, 0xfb, 0xa6, 0x81, 0xdb, 0x4b, 0x65, 0x14, 0x7d, 0x95, 0xb6, 0x81, 0x42, 0xdb, 0xa3, 0xcb, 0x36, 0x7f, 0x05, 0xe8, 0x39, 0x9f, 0xd5, 0x07, 0xcb, 0xc3, 0x93, 0x69, 0x7c, 0x8c, 0xe1, 0x57, 0x0d, 0x54, 0x64, 0xef, 0xf0, 0x42, 0x8f, 0x82, 0xe2, 0xd7, 0x5c, 0xe7, 0x7b, 0x6a, 0xdd, 0xf9, 0xf0, 0xe3, 0xd7, 0xc7, 0xd2, 0x8e, 0xd3, 0xb0, 0xa7, 0xbb, 0xf3, 0x37, 0x5d, 0x79, 0xd1, 0x6d, 0xd9, 0xfe, 0x7d, 0xe9, 0xfe, 0xa7, 0x06, 0xb6, 0x64, 0x55, 0xf5, 0x7b, 0xb9, 0x10, 0x86, 0x7f, 0xbf, 0x33, 0xf3, 0xd2, 0xb7, 0xd4, 0x7a, 0x28, 0x0c, 0xdd, 0x75, 0xec, 0x73, 0x0c, 0xf9, 0xa7, 0x31, 0x1f, 0xfa, 0x6f, 0x19, 0x25, 0xf2, 0x3f, 0x4b, 0x1a, 0xec, 0xd4, 0x5f, 0xd5, 0x16, 0xb5, 0x82, 0x8a, 0xb8, 0xce, 0xbd, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x18, 0x50, 0x51, 0xb2, 0xfc, 0x06, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // NonStandardServiceClient is the client API for NonStandardService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type NonStandardServiceClient interface { // Apply field mask to empty NonStandardMessage and return result. Update(ctx context.Context, in *NonStandardUpdateRequest, opts ...grpc.CallOption) (*NonStandardMessage, error) // Apply field mask to empty NonStandardMessageWithJSONNames and return result. UpdateWithJSONNames(ctx context.Context, in *NonStandardWithJSONNamesUpdateRequest, opts ...grpc.CallOption) (*NonStandardMessageWithJSONNames, error) } type nonStandardServiceClient struct { cc *grpc.ClientConn } func NewNonStandardServiceClient(cc *grpc.ClientConn) NonStandardServiceClient { return &nonStandardServiceClient{cc} } func (c *nonStandardServiceClient) Update(ctx context.Context, in *NonStandardUpdateRequest, opts ...grpc.CallOption) (*NonStandardMessage, error) { out := new(NonStandardMessage) err := c.cc.Invoke(ctx, "/grpc.gateway.examples.examplepb.NonStandardService/Update", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *nonStandardServiceClient) UpdateWithJSONNames(ctx context.Context, in *NonStandardWithJSONNamesUpdateRequest, opts ...grpc.CallOption) (*NonStandardMessageWithJSONNames, error) { out := new(NonStandardMessageWithJSONNames) err := c.cc.Invoke(ctx, "/grpc.gateway.examples.examplepb.NonStandardService/UpdateWithJSONNames", in, out, opts...) if err != nil { return nil, err } return out, nil } // NonStandardServiceServer is the server API for NonStandardService service. type NonStandardServiceServer interface { // Apply field mask to empty NonStandardMessage and return result. Update(context.Context, *NonStandardUpdateRequest) (*NonStandardMessage, error) // Apply field mask to empty NonStandardMessageWithJSONNames and return result. UpdateWithJSONNames(context.Context, *NonStandardWithJSONNamesUpdateRequest) (*NonStandardMessageWithJSONNames, error) } // UnimplementedNonStandardServiceServer can be embedded to have forward compatible implementations. type UnimplementedNonStandardServiceServer struct { } func (*UnimplementedNonStandardServiceServer) Update(ctx context.Context, req *NonStandardUpdateRequest) (*NonStandardMessage, error) { return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") } func (*UnimplementedNonStandardServiceServer) UpdateWithJSONNames(ctx context.Context, req *NonStandardWithJSONNamesUpdateRequest) (*NonStandardMessageWithJSONNames, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateWithJSONNames not implemented") } func RegisterNonStandardServiceServer(s *grpc.Server, srv NonStandardServiceServer) { s.RegisterService(&_NonStandardService_serviceDesc, srv) } func _NonStandardService_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(NonStandardUpdateRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(NonStandardServiceServer).Update(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.gateway.examples.examplepb.NonStandardService/Update", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(NonStandardServiceServer).Update(ctx, req.(*NonStandardUpdateRequest)) } return interceptor(ctx, in, info, handler) } func _NonStandardService_UpdateWithJSONNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(NonStandardWithJSONNamesUpdateRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(NonStandardServiceServer).UpdateWithJSONNames(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.gateway.examples.examplepb.NonStandardService/UpdateWithJSONNames", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(NonStandardServiceServer).UpdateWithJSONNames(ctx, req.(*NonStandardWithJSONNamesUpdateRequest)) } return interceptor(ctx, in, info, handler) } var _NonStandardService_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.gateway.examples.examplepb.NonStandardService", HandlerType: (*NonStandardServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "Update", Handler: _NonStandardService_Update_Handler, }, { MethodName: "UpdateWithJSONNames", Handler: _NonStandardService_UpdateWithJSONNames_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "examples/proto/examplepb/non_standard_names.proto", }
package gfs import ( "encoding/json" "encoding/xml" "html/template" "log" "net/http" ) // A basic handler that suplies basic logic for writing responses type responseHandler struct { } // Takes care of writing a response in the correct format func (r *responseHandler) WriteResponse(writer http.ResponseWriter, statusCode int, htmlTemplate *template.Template, format string, response interface{}) error { switch format { default: log.Println("Unknown response format", format) fallthrough case FormatHtml: writer.Header().Set("content-type", FormatHtml) writer.WriteHeader(statusCode) return htmlTemplate.Execute(writer, response) case FormatJson: writer.Header().Set("content-type", FormatJson) writer.WriteHeader(statusCode) return json.NewEncoder(writer).Encode(response) case FormatXml: writer.Header().Set("content-type", FormatXml) writer.WriteHeader(statusCode) return xml.NewEncoder(writer).Encode(response) } }
import os import joblib import pickle from sklearn.metrics import mean_absolute_error from sklearn.model_selection import cross_validate, cross_val_predict from sklearn.gaussian_process import GaussianProcessRegressor from BayOptPy.helperfunctions import (get_paths, plot_predicted_vs_true_age, set_publication_style) # Settings # ----------------------------------------------------------------------------- # Number of cross validations set_publication_style() n_cross_val = 5 debug = False dataset = 'UKBIO_freesurf' # Load the data # TODO: change the path # save_path = '/code/BayOptPy/tpot/Output/vanilla_combi_old/100_generations/random_seed_020/' save_path = '/code/BayOptPy/tpot_regression/Output/vanilla_combi/116_freesurf_col/100_generations/random_seed_020/' # Load the saved validation dataset project_ukbio_wd, project_data_ukbio, _ = get_paths(debug, dataset) with open(os.path.join(save_path, 'splitted_dataset_%s.pickle' %dataset), 'rb') as handle: splitted_dataset = pickle.load(handle) # Train the model model = GaussianProcessRegressor() import pdb pdb.set_trace() model.fit(splitted_dataset['Xtrain_scaled'], splitted_dataset['Ytrain']) scores = cross_validate(estimator= model, X=splitted_dataset['Xtrain_scaled'], y=splitted_dataset['Ytrain'], scoring='neg_mean_absolute_error', cv=n_cross_val) print("MAE train dataset: %0.2f (+/- %0.2f)" % (scores['test_score'].mean(), scores['test_score'].std() * 2)) # make cross validated predictions print('Perform prediction in test data') y_predicted_test = model.predict(splitted_dataset['Xtest_scaled']) output_path_test = os.path.join(save_path, 'test_predicted_true_age_rvr.png') plot_predicted_vs_true_age(splitted_dataset['Ytest'], y_predicted_test, output_path_test) mae_test = mean_absolute_error(splitted_dataset['Ytest'], y_predicted_test) print('MAE on test: %.2f' %mae_test) print('Perform cross-validation in validation data') # y_predicted_validation = cross_val_predict(model, # splitted_dataset['Xvalidate_scaled'], # splitted_dataset['Yvalidate'], # cv=n_cross_val) y_predicted_validation = model.predict(splitted_dataset['Xvalidate_scaled']) output_path_val = os.path.join(save_path, 'validation_predicted_true_age_rvr.png') plot_predicted_vs_true_age(splitted_dataset['Yvalidate'], y_predicted_validation, output_path_val) mae_validation = mean_absolute_error(splitted_dataset['Yvalidate'], y_predicted_validation) print('MAE on validation: %.2f' % mae_validation) # ----------------------------------------------------------------------------- # Do some statistics. Calculate R2 and the Spearman from scipy.stats import spearmanr, pearsonr from sklearn.metrics import r2_score # Test dataset print('Statistics for the test dataset') rho_test, rho_p_value_test = spearmanr(splitted_dataset['Ytest'], y_predicted_test) print('shape of the dataset: %s' %(splitted_dataset['Ytest'].shape,)) print('Rho and p-value: %.4f %.4f' %(rho_test, rho_p_value_test)) r_test, r_p_value_test = pearsonr(splitted_dataset['Ytest'], y_predicted_test) print('R is: %.4f' %r_test) # Validation dataset print('Statistics for the validation dataset') rho_val, rho_p_value_val = spearmanr(splitted_dataset['Yvalidate'], y_predicted_validation) print('shape of the dataset: %s' %(splitted_dataset['Yvalidate'].shape,)) print('Rho and p-value: %.4f %.4f' %(rho_val, rho_p_value_val)) r_score = r2_score(splitted_dataset['Yvalidate'], y_predicted_validation) print('R2 is: %.4f' %r_score) r_val, r_p_value_val = pearsonr(splitted_dataset['Yvalidate'], y_predicted_validation) print('R is: %.4f' %r_val) # Predict on the validation dataset
/** * The DocSplitterTool class provides a command-line tool * for splitting up a generic corpus into sentences, outputing one * sentece per line. */ public class DocSplitterTool { /** * Main entry point */ public static void main(String[] args) { String callString = "DocSplitterTool" + " [-h (show help)]" + " [-n (use the OpenNLP-based splitter)]" + " [-q (use the Quotes-Parens splitter)]"; HashSet<String> flags = new HashSet<String>(); flags.add("-h"); flags.add("-n"); flags.add("-q"); Map<String,String> commandMap = CommandLineArgParser.parse(args, null, flags); String sentence = null; if (commandMap.get("-h") != null) { System.err.println(callString); return; } // Read text from standard input: BufferedReader stdin = new BufferedReader(new InputStreamReader(System.in)); DocSplitter ds = DocSplitterFactory.create(); // QuotesParens is currently broken, it fails to handle possesives. // QuotesParensSentenceDetector ds = QuotesParensSentenceDetector.create(); int sentence_count = 0; int line_count = 0; while(true) { // Read lines from stdin until enough has been read to form // one (or more) entire sentence(s). while (sentence == null) { try { sentence = stdin.readLine(); line_count++; } catch (IOException e) { System.err.println("Error reading sentence from the standard input!"); } if (sentence == null) return; // Buffer up input text, and wait for a whole, // complete sentence before continuing. ds.addText(sentence + " "); sentence = ds.getNextSentence(); } while (sentence != null) { sentence_count ++; System.out.println(sentence); sentence = ds.getNextSentence(); } } } }
Variation of the median nerve course and its clinical importance. AIMS AND METHODS In our work we present and describe the variation of the course of the median nerve found in both upper limbs of one of the cadavers in our Institute of Anatomy (Medical Faculty, Comenius University, Bratislava, Slovakia) during the students' dissection of the peripheral nerves and vessels. RESULTS AND CONCLUSION This non-standard course of the median nerve was compared with a standard course described in the anatomical literature and atlases, and confronted with the variations of the median nerve found and its course described in the available literature. We also provide some clinical implications of such peripheral nerve variability because understanding such anomalies is important in the diagnosis of unexplained clinical signs and symptoms as well as during nerve blocks and certain surgical procedures around the neck and proximal arm.
/** * @license * Copyright (c) Peculiar Ventures, LLC. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ import { h, FunctionalComponent } from '@stencil/core'; import type { INameJSON } from '../../crypto/name'; import { l10n } from '../../utils'; import { RowTitle, RowValue } from './row'; interface ISubjectNameProps { name: INameJSON[]; } export const SubjectName: FunctionalComponent<ISubjectNameProps> = (props) => { const { name } = props; return [ <RowTitle value={l10n.getString('subjectName')} />, name.map((n) => ( <RowValue name={n.name || n.type} value={n.value} /> )), ]; };
The process development of laser surface modification of commercially pure titanium (Grade 2) with rhenium The paper presents the results of the process development of laser surface modification of commercially pure titanium with rhenium. The criterion of the successful/optimal process is the repetitive geometry of the surface, characterized by predictable and repetitive chemical composition over its entire surface as well as special mechanical properties (hardness and wear resistance). The analysis of surface geometry concluded measurements of laser penetration depth and heat affected zone (HAZ), the width of a single track as well as width of a clad. The diode laser installed on the industrial robot carried out the laser treatment. This solution made possible the continuous supply of powder to the substrate during the process. The aim of an investigation is find out the possibility of improving the tribological characteristics of the surface due to the rhenium alloying. The verification of the surface properties (tribological) concluded geometry measurements, microstructure observation, hardness tests and evaluation of wear resistance.
import { DomNode, el } from "@hanul/skynode"; import * as PIXI from "pixi.js"; import GameNode from "../gamenode/GameNode"; import Camera from "./Camera"; export default class Screen extends DomNode<HTMLDivElement> { private static readonly FPS_WINDOW_BLURRED = 1; public canvas: DomNode<HTMLCanvasElement>; private renderer: PIXI.Renderer; public camera = new Camera(); public root = new GameNode(0, 0); public left = 0; public top = 0; public ratio = 0; private animationInterval: number | undefined; private beforeTime = 0; private timeSigma = 0; private fps: number | undefined; constructor( public width: number, public height: number, antialias?: boolean, fps?: number, ) { super(document.createElement("div")); this.append(this.canvas = el("canvas")); this.renderer = new PIXI.Renderer({ view: this.canvas.domElement, transparent: true, resolution: devicePixelRatio, antialias, }); this.renderer.plugins.interaction.autoPreventDefault = false; this.resize(width, height); this.root.screen = this; this.resume(); this.canvas.onDom("click", (event: MouseEvent) => { const rect = this.canvas.rect; this.root.checkTouch( event.clientX - rect.left - rect.width / 2 + this.camera.x, event.clientY - rect.top - rect.height / 2 + this.camera.y, "click", ); }); } public get centerX() { return this.width / 2; } public get centerY() { return this.height / 2; } public resize(width: number, height: number, ratio = 1): void { this.canvas.style({ width: width * ratio, height: height * ratio }); this.canvas.domElement.width = width; this.canvas.domElement.height = height; this.renderer.resize(width, height); this.width = width; this.height = height; this.ratio = ratio; } private step(deltaTime: number) { // root to center of screen this.root.x = this.width / 2 - this.camera.x; this.root.y = this.height / 2 - this.camera.y; this.root.step(deltaTime, -this.root.x, -this.root.y, 1, 1, 0, 0, 1, 1); this.renderer.render(this.root.pixiContainer); } private tic = (now: number) => { const deltaTime = now - this.beforeTime; if (deltaTime > 0) { if (this.fps !== undefined && this.fps > 0) { this.timeSigma += deltaTime; const frameSecond = 1000 / this.fps; if (this.timeSigma >= frameSecond) { this.step(frameSecond); this.timeSigma -= frameSecond; } } else { this.step(deltaTime); } this.beforeTime = now; } this.animationInterval = requestAnimationFrame(this.tic); }; public resume(): void { if (this.animationInterval === undefined) { this.beforeTime = performance.now(); this.animationInterval = requestAnimationFrame(this.tic); } } public pause(): void { if (this.animationInterval !== undefined) { cancelAnimationFrame(this.animationInterval); this.animationInterval = undefined; } } public delete(): void { this.pause(); super.delete(); } }
<gh_stars>100-1000 import * as PIXI from "pixi.js" export interface CubemapFaces { /** The texture or url for positive x. */ posx: PIXI.Texture | string /** The texture or url for negative x. */ negx: PIXI.Texture | string /** The texture or url for positive y. */ posy: PIXI.Texture | string /** The texture or url for negative y. */ negy: PIXI.Texture | string /** The texture or url for positive z. */ posz: PIXI.Texture | string /** The texture or url for negative z. */ negz: PIXI.Texture | string }
import { Injectable } from '@angular/core'; import { HttpClient, HttpHeaders } from '@angular/common/http'; import { Observable } from 'rxjs'; import { User } from '../../models/users.interfaces'; import { BasePagedService } from '../../../services/base/base-paged.service'; import { HttpErrorHandler } from 'src/app/core/helpers/http-error-handler'; @Injectable() export class UsersService extends BasePagedService<User> { constructor(protected http: HttpClient, protected httpErrorHandler: HttpErrorHandler) { super(http, httpErrorHandler, 'UsersService'); } createUser(user: User): Observable<User> { const headers = new HttpHeaders({ 'Content-Type': 'application/json' }); const options = { headers: headers }; const url = 'users'; return super.post(url, user, options); } getUser(userId: string): Observable<User> { const headers = new HttpHeaders({ 'Content-Type': 'application/json' }); const options = { headers: headers }; const url = `users/${userId}`; return super.get(url, options); } getUsers(query?: any): Observable<User[]> { const options = {}; const url = `users`; return super.getAll(url, options); } getUsersPaged(input: any, query?: any): Observable<any> { const options = {}; const url = `users/paged`; return super.getAllPaged(url, options, input); } updateUser(userId: string, user: User): Observable<User> { const headers = new HttpHeaders({ 'Content-Type': 'application/json' }); const options = { headers: headers }; const url = `users/${userId}`; return super.patch(url, user, options); } deleteUser(userId: string): Observable<any> { const headers = new HttpHeaders({ 'Content-Type': 'application/json' }); const options = { headers: headers }; const url = `users/${userId}`; return super.delete(url, options); } }
<filename>alg_selection_sort.java /****************************************************************************** # l0m1s # <EMAIL> algoritmo selection sort sviluppato per Java *******************************************************************************/ public class alg_selection_sort { public static void main(String[] args) { int arrayB[]={5,9,2,3,1,0,4}; for(int iVar = 0; iVar<arrayB.length; iVar++){ System.out.print(Integer.toString(arrayB[iVar])+" "); } System.out.println(); for(int jVar=0; jVar<arrayB.length-1;jVar++){ // dove 6 e' il valore della lunghezza dell'array meno 1 int indexMin= jVar; for(int kVar= jVar+1;kVar<arrayB.length;kVar++){ // dove 7 è la lunghezza dell'array if(arrayB[indexMin]>arrayB[kVar]){ indexMin=kVar; } } int temp=arrayB[jVar]; arrayB[jVar]=arrayB[indexMin]; arrayB[indexMin]=temp; for(int wVar=0;wVar<7;wVar++){ System.out.print(Integer.toString(arrayB[wVar])+" "); } System.out.println(); } } }
/** * @author Shilad Sen * * Calculates the probability that a section of text is hyperlinked. Useful for * detecting entities. */ public class LinkProbabilityDao { private static final Logger LOG = LoggerFactory.getLogger(LinkProbabilityDao.class); private final File path; private final Language lang; private final RawPageDao pageDao; private final PhraseAnalyzerDao phraseDao; private final StringNormalizer normalizer; private ObjectDb<Double> db; private TLongFloatMap cache = null; private TLongSet subGrams = null; public LinkProbabilityDao(File path, Language lang, RawPageDao pageDao, PhraseAnalyzerDao phraseDao) throws DaoException { this.path = path; this.lang = lang; this.pageDao = pageDao; this.phraseDao = phraseDao; this.normalizer = phraseDao.getStringNormalizer(); if (path.exists()) { try { db = new ObjectDb<Double>(path, false); } catch (IOException e) { throw new DaoException(e); } } else { LOG.warn("path " + path + " does not exist... LinkProbabilityDao will not work until build() is called."); } } /** * If true, create a memory cache that stores a 64-bit hashcode for each word. * If the cache doesn't exist, it will be built. * @param useCache */ public void useCache(boolean useCache) { if (!useCache) { this.cache = null; return; } else if (db == null) { this.cache = new TLongFloatHashMap(); // build cache later return; } File fp = new File(path + "-phrase-cache.bin"); File fsg = new File(path + "-subgram-cache.bin"); long tstamp = 0; try { Double doubleTstamp = db.get("tstamp"); if (doubleTstamp == null) { tstamp = System.currentTimeMillis(); db.put("tstamp", 1.0 * tstamp); db.flush(); } else { tstamp = db.get("tstamp").longValue(); } } catch (IOException e) { throw new RuntimeException(e); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } if (fp.isFile() && fp.lastModified() > tstamp && fsg.isFile() && fsg.lastModified() > tstamp) { try { cache = (TLongFloatMap) WpIOUtils.readObjectFromFile(fp); subGrams = (TLongSet) WpIOUtils.readObjectFromFile(fsg); LOG.info("Using up-to-date link probability cache files {} and {}", fp, fsg); return; } catch (IOException e) { LOG.warn("Using link probability dao cache failed: ", e); } } LOG.info("building cache..."); TLongFloatMap cache = new TLongFloatHashMap(); Iterator<Pair<String, Double>> iter = db.iterator(); TLongSet subgrams = new TLongHashSet(); while (iter.hasNext()) { Pair<String, Double> entry = iter.next(); if (entry.getKey().equalsIgnoreCase("tstamp")) { // do nothing... } else if (entry.getKey().startsWith(":s:")) { long hash = Long.valueOf(entry.getKey().substring(3)); subgrams.add(hash); } else { String tokens[] = entry.getKey().split(":", 2); Language lang = Language.getByLangCode(tokens[0]); long hash = hashCode(tokens[1]); cache.put(hash, entry.getRight().floatValue()); } } this.cache = cache; this.subGrams = subgrams; LOG.info("created cache with " + cache.size() + " entries and " + subgrams.size() + " subgrams"); try { WpIOUtils.writeObjectToFile(fp, cache); WpIOUtils.writeObjectToFile(fsg, subgrams); } catch (IOException e) { throw new RuntimeException(e); } } /** * Build the cache if it is not already built. * @throws DaoException */ public void buildIfNecessary() throws DaoException { if (!isBuilt()) build(); } /** * @return The language associated with this dao. */ public Language getLang() { return lang; } /** * Retrieves the probability a link is linked in Wikipedia. * If normalize is true, text normalization is first performed. * @param mention * @return * @throws DaoException */ public double getLinkProbability(String mention) throws DaoException { return getLinkProbability(mention, true); } /** * Retrieves the probability a link is linked in Wikipedia. * If normalize is true, text normalization is first performed. * @param mention * @param normalize If true, the text is normalized. * @return * @throws DaoException */ public double getLinkProbability(String mention, boolean normalize) throws DaoException { if (db == null) { throw new IllegalStateException("Dao has not yet been built. Call build()"); } String normalizedMention = cleanString(mention, normalize); if (cache != null && cache.size() > 0) { long hash = hashCode(normalizedMention); return cache.containsKey(hash) ? cache.get(hash) : 0.0; } String key = lang.getLangCode() + ":" + normalizedMention; Double d = null; try { d = db.get(key); } catch (IOException e) { throw new DaoException(e); } catch (ClassNotFoundException e) { throw new DaoException(e); } if (d == null) { return 0.0; } else { return d; } } /** * Rebuilds the link probability dao. Deletes the dao if it currently exists. * @throws DaoException */ public synchronized void build() throws DaoException { if (db != null) { db.close(); } if (path.exists()) { FileUtils.deleteQuietly(path); } path.mkdirs(); try { this.db = new ObjectDb<Double>(path, true); } catch (IOException e) { throw new DaoException(e); } subGrams = new TLongHashSet(); LOG.info("building link probabilities for language " + lang); final TLongIntMap counts = new TLongIntHashMap(); Iterator<String> iter = phraseDao.getAllPhrases(lang); StringTokenizer tokenizer = new StringTokenizer(); while (iter.hasNext()) { String phrase = iter.next(); List<String> words = tokenizer.getWords(lang, phrase); StringBuilder buffer = new StringBuilder(""); long hash = -1; for (int i = 0; i < words.size(); i++) { if (i > 0) buffer.append(' '); buffer.append(words.get(i)); hash = hashCode(buffer.toString()); subGrams.add(hash); } counts.put(hash, 0); } LOG.info("found " + counts.size() + " unique anchortexts and " + subGrams.size() + " subgrams"); DaoFilter filter = new DaoFilter() .setRedirect(false) .setLanguages(lang) .setDisambig(false) .setNameSpaces(NameSpace.ARTICLE); ParallelForEach.iterate( pageDao.get(filter).iterator(), WpThreadUtils.getMaxThreads(), 100, new Procedure<RawPage>() { @Override public void call(RawPage page) throws Exception { processPage(counts, page); } }, 10000); int count = 0; int misses = 0; double sum = 0.0; TLongSet completed = new TLongHashSet(); TLongIntMap linkCounts = getPhraseLinkCounts(); Iterator<Pair<String, PrunedCounts<Integer>>> phraseIter = phraseDao.getAllPhraseCounts(lang); while (phraseIter.hasNext()) { Pair<String, PrunedCounts<Integer>> pair = phraseIter.next(); String phrase = cleanString(pair.getLeft()); long hash = hashCode(phrase); if (completed.contains(hash)) { continue; } completed.add(hash); try { int numLinks = linkCounts.get(hash); int numText = counts.get(hash); if (numText == 0) { misses++; } count++; double p = 1.0 * numLinks / (numText + 3.0); // 3.0 for smoothing sum += p; // System.out.println(String.format("inserting values into db: %s, %f", pair.getLeft, p)); db.put(lang.getLangCode() + ":" + phrase, p); if (cache != null) { cache.put(hash, (float) p); } } catch (IOException e) { throw new DaoException(e); } } for (long h : subGrams.toArray()) { try { db.put(":s:" + h, -1.0); } catch (IOException e) { throw new DaoException(e); } } try { db.put("tstamp", 1.0 * System.currentTimeMillis()); } catch (IOException e) { throw new DaoException(e); } if (count != 0) { LOG.info(String.format( "Inserted link probabilities for %d anchors with mean probability %.4f and %d mises", count, sum / count, misses)); } db.flush(); } private void processPage(TLongIntMap counts, RawPage page) { Language lang = page.getLanguage(); StringTokenizer tokenizer = new StringTokenizer(); StringBuilder buffer = new StringBuilder(); for (Token sentence : tokenizer.getSentenceTokens(lang, page.getPlainText())) { List<Token> words = tokenizer.getWordTokens(lang, sentence); for (int i = 0; i < words.size(); i++) { buffer.setLength(0); for (int j = i; j < words.size(); j++) { if (j > i) { buffer.append(' '); } buffer.append(words.get(j).getToken()); String phrase = cleanString(buffer.toString(), true); long hash = hashCode(phrase); if (subGrams.contains(hash)) { synchronized (counts) { if (counts.containsKey(hash)) { // System.out.println("here 1: " + phrase); counts.adjustValue(hash, 1); } else { // System.out.println("here 2: " + phrase); } } } else { // System.out.println("here 3: " + phrase); break; // no point in going any further... } } } } } private TLongIntMap getPhraseLinkCounts() { Iterator<Pair<String, PrunedCounts<Integer>>> phraseIter = phraseDao.getAllPhraseCounts(lang); TLongIntMap counts = new TLongIntHashMap(); while (phraseIter.hasNext()) { Pair<String, PrunedCounts<Integer>> pair = phraseIter.next(); String phrase = cleanString(pair.getLeft()); long hash = hashCode(phrase); int n = pair.getRight().getTotal(); counts.adjustOrPutValue(hash, n, n); } return counts; } public boolean isBuilt() { return (db != null && !db.isEmpty()); } public boolean isSubgram(String phrase, boolean normalize) { if (cache == null || subGrams == null) { throw new IllegalArgumentException("Subgrams require a cache!"); } String cleaned = cleanString(phrase, normalize); long h = hashCode(cleaned); return cache.containsKey(h) || subGrams.contains(h); } private String cleanString(String s) { return cleanString(s, false); } private String cleanString(String s, boolean normalize) { if (normalize) s = normalizer.normalize(lang, s); StringTokenizer t = new StringTokenizer(); return StringUtils.join(t.getWords(lang, s), " "); } static long hashCode(String string) { return WpStringUtils.longHashCode2(string); } public static class Provider extends org.wikibrain.conf.Provider<LinkProbabilityDao> { public Provider(Configurator configurator, Configuration config) throws ConfigurationException { super(configurator, config); } @Override public Class<LinkProbabilityDao> getType() { return LinkProbabilityDao.class; } @Override public String getPath() { return "phrases.linkProbability"; } @Override public LinkProbabilityDao get(String name, Config config, Map<String, String> runtimeParams) throws ConfigurationException { LanguageSet ls = getConfigurator().get(LanguageSet.class); if (runtimeParams == null || !runtimeParams.containsKey("language")){ throw new IllegalArgumentException("LinkProbabilityDao requires 'language' runtime parameter."); } Language language = Language.getByLangCode(runtimeParams.get("language")); File path = new File(config.getString("path"), language.getLangCode()); String pageName = config.hasPath("rawPageDao") ? config.getString("rawPageDao") : null; String phraseName = config.hasPath("phraseAnalyzer") ? config.getString("phraseAnalyzer") : null; RawPageDao rpd = getConfigurator().get(RawPageDao.class, pageName); PhraseAnalyzer pa = getConfigurator().get(PhraseAnalyzer.class, phraseName); if (!(pa instanceof AnchorTextPhraseAnalyzer)) { throw new ConfigurationException("LinkProbabilityDao's phraseAnalyzer must be an AnchorTextPhraseAnalyzer"); } PhraseAnalyzerDao pad = ((AnchorTextPhraseAnalyzer)pa).getDao(); try { return new LinkProbabilityDao(path, language, rpd, pad); } catch (DaoException e) { throw new ConfigurationException(e); } } } }
Although today (Tuesday) is a Chevrolet ‘manufacturer test day’ at Indianapolis Motor Speedway, Marco Andretti’s Honda-powered Andretti Autosport car will also be running in order to give the IndyCar competition department a chance to make comparisons with as few variables as possible, with both types of car running domed skids. Pappas told Motorsport.com: “It’s a Chevrolet manufacturers’ test day, but they have allowed us to run a Honda to do some back to back testing at the same race track on the same day to get some sense of the behavior of both cars with domed skids. “Then Wednesday is a test for all cars, where we say, ‘Imagine what you could be running in the week up to qualifications for the Indy 500.’ We’d like to see test lists from each manufacturer and try and get through all those combinations to be sure that when we get there for the Month of May, we won’t have any surprises.” Pappas said the controversial domed skids, which James Hinchcliffe has slammed as “asinine” and harmful to the quality of racing on the superspeedways, are likely to stay, but he will be listening to all views. “It’s in the rulebook that this is going to be part of the kit for Indianapolis, Texas and Pocono, and both manufacturers knew that. “But we are trying to create the best chance to have a great 100th Indy 500, and so we thought it was in everyone’s interests to do a back to back test at IMS, and then take the input and data and make a decision one way or the other. “They [Schmidt Peterson Motorsports and Hinchcliffe] tested at Fontana and that is nothing like Indianapolis Motor Speedway. So let’s get everyone together and do a good analytical study and listen to the drivers.” Safety for all is a priority Pappas emphasized that the employment of the domes was primarily for the safety of competitors and fans in the grandstands, following last year’s practice sessions which saw three cars spin and flip upside down once they were traveling backward at high speed. While the new flaps on the beam wing are meant to prevent any recurrence of this issue, IndyCar has decided that slowing the cars during rotation – which is the theory behind the domed skids – long before they reach that 180-degree point is a worthy safety measure. Pappas insisted: “After the safety issues we had last year, both manufacturers were heavily involved in how to move forward, this was the solution and this is now in the rules. Nobody can afford to have situations like occurred last May. “Thank God nothing got through the fences, but we’ve got to constantly look at the value of safety. The series is most concerned about safety for all – competitors and spectators. “That is the foremost criteria for us doing anything. And it was established by our participants and partners that domed skids would be part of that progress in safety.” Teams and drivers can adapt Pappas admitted he was bemused by the strident reaction against the use of the domed skids, and suggested that teams needed to get used to altering the car to fit the rules, rather than expecting it to be the other way around. “As a former race engineer, I can say that you adapt around what you’re given,” he said, “and if your setup doesn’t work, well, then you carry on adapting your setups until they do work. “Race teams adapt. When I was a race engineer, long before the current IndyCar Series or even IRL, we adapted. We’d be given a set of rules and then it was up to us to do the best we could with what we had been given. “I can’t reiterate enough that both manufacturers knew these were the rules for Indianapolis. They were very concerned about the problems through the Month of May, we all continued to work on it right through the summer, had several meetings, and all decided ‘Let’s go forward with this.’”
<filename>threatconnect-sdk/threatconnect-sdk-core/src/main/java/com/threatconnect/sdk/server/response/service/error/TimestampInvalidApiServiceResponse.java /* * To change this template, choose Tools | Templates * and open the template in the editor. */ package com.threatconnect.sdk.server.response.service.error; import com.threatconnect.sdk.server.response.service.ApiServiceResponse; /** * * @author James */ public class TimestampInvalidApiServiceResponse extends ApiServiceResponse { public TimestampInvalidApiServiceResponse() { super("Invalid timestamp"); } }
<filename>lib/index.ts<gh_stars>0 import { url } from "./url"; import { Manager, ManagerOptions } from "./manager"; import { Socket, SocketOptions } from "./socket"; const debug = require("debug")("socket.io-client"); /** * Module exports. */ module.exports = exports = lookup; /** * Managers cache. */ const cache = (exports.managers = {}); /** * Looks up an existing `Manager` for multiplexing. * If the user summons: * * `io('http://localhost/a');` * `io('http://localhost/b');` * * We reuse the existing instance based on same scheme/port/host, * and we initialize sockets for each namespace. * * @public */ function lookup(opts?: Partial<ManagerOptions | SocketOptions>): Socket; function lookup( uri: string, opts?: Partial<ManagerOptions | SocketOptions> ): Socket; function lookup(uri: any, opts?: any): Socket { if (typeof uri === "object") { opts = uri; uri = undefined; } opts = opts || {}; const parsed = url(uri); const source = parsed.source; const id = parsed.id; const path = parsed.path; const sameNamespace = cache[id] && path in cache[id].nsps; const newConnection = opts.forceNew || opts["force new connection"] || false === opts.multiplex || sameNamespace; let io; if (newConnection) { debug("ignoring socket cache for %s", source); io = new Manager(source, opts); } else { if (!cache[id]) { debug("new io instance for %s", source); cache[id] = new Manager(source, opts); } io = cache[id]; } if (parsed.query && !opts.query) { opts.query = parsed.query; } return io.socket(parsed.path, opts); } /** * Protocol version. * * @public */ export { protocol } from "socket.io-parser"; /** * `connect`. * * @param {String} uri * @public */ exports.connect = lookup; /** * Expose constructors for standalone build. * * @public */ export { Manager } from "./manager"; export { lookup as io, Socket, SocketOptions };
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { bt_avctp::Error as AvctpError, failure::{Error, Fail}, futures::{sink::Sink, stream::FusedStream, task::Context, Poll, Stream}, pin_utils::unsafe_pinned, std::pin::Pin, }; use crate::packets::Error as PacketError; // TODO(BT-2197): change to the BT shared peer id type when the BrEdr protocol changes over. pub type PeerId = String; /// The error types for peer management. #[derive(Fail, Debug)] pub enum PeerError { /// Error encoding/decoding packet #[fail(display = "Packet encoding/decoding error: {:?}", _0)] PacketError(PacketError), /// Error in protocol layer #[fail(display = "Protocol layer error: {:?}", _0)] #[allow(dead_code)] AvctpError(AvctpError), #[fail(display = "Remote device was not connected")] RemoteNotFound, #[fail(display = "Remote command is unsupported")] CommandNotSupported, #[fail(display = "Remote command rejected")] CommandFailed, #[fail(display = "Unable to connect")] ConnectionFailure(#[cause] Error), #[fail(display = "Unexpected response to command")] UnexpectedResponse, #[fail(display = "Generic errors")] GenericError(#[cause] Error), #[doc(hidden)] #[fail(display = "__Nonexhaustive error should never be created.")] __Nonexhaustive, } impl From<AvctpError> for PeerError { fn from(error: AvctpError) -> Self { PeerError::AvctpError(error) } } impl From<PacketError> for PeerError { fn from(error: PacketError) -> Self { PeerError::PacketError(error) } } impl From<Error> for PeerError { fn from(error: Error) -> Self { PeerError::GenericError(error) } } /// A specialized stream combinator similar to Map. PeerIdStreamMap encapsulates another stream and /// wraps each item returned by the stream in a tuple that also returns the a specified PeerId as /// the first field and the wrapped item response as the second field. #[derive(Debug)] #[must_use = "streams do nothing unless polled"] pub struct PeerIdStreamMap<St> { stream: St, peer_id: PeerId, } impl<St: Unpin> Unpin for PeerIdStreamMap<St> {} // Conditional Unpin impl to make unsafe_pinned safe. impl<St> PeerIdStreamMap<St> where St: Stream, { unsafe_pinned!(stream: St); pub fn new(stream: St, peer_id: &PeerId) -> PeerIdStreamMap<St> { Self { stream, peer_id: peer_id.clone() } } } impl<St: FusedStream> FusedStream for PeerIdStreamMap<St> { fn is_terminated(&self) -> bool { self.stream.is_terminated() } } impl<St> Stream for PeerIdStreamMap<St> where St: Stream, { type Item = (PeerId, St::Item); fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { self.as_mut().stream().poll_next(cx).map(|opt| opt.map(|t| (self.peer_id.clone(), t))) } } // Forwarding impl of Sink to the underlying stream if there is one. impl<St: Stream + Sink<Item>, Item> Sink<Item> for PeerIdStreamMap<St> { type Error = St::Error; fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.stream().poll_ready(cx) } fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> { self.stream().start_send(item) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.stream().poll_flush(cx) } fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.stream().poll_close(cx) } }
<reponame>rajith-lk/keycloack-examples<filename>providers/src/main/java/ca/randoli/examples/keycloak/providers/LogFileEventListenerProviderFactory.java package ca.randoli.examples.keycloak.providers; import java.util.HashSet; import java.util.Set; import org.keycloak.Config; import org.keycloak.events.EventListenerProvider; import org.keycloak.events.EventListenerProviderFactory; import org.keycloak.events.EventType; import org.keycloak.events.admin.OperationType; import org.keycloak.models.KeycloakSession; import org.keycloak.models.KeycloakSessionFactory; public class LogFileEventListenerProviderFactory implements EventListenerProviderFactory { private Set<EventType> excludedEvents; private Set<OperationType> excludedAdminOperations; @Override public EventListenerProvider create(KeycloakSession session) { return new LogFileEventListenerProvider(excludedEvents, excludedAdminOperations, session); } @Override public void init(Config.Scope config) { String[] excludes = config.getArray("excludes"); if (excludes != null) { excludedEvents = new HashSet<>(); for (String e : excludes) { excludedEvents.add(EventType.valueOf(e)); } } String[] excludesOperations = config.getArray("excludesOperations"); if (excludesOperations != null) { excludedAdminOperations = new HashSet<>(); for (String e : excludesOperations) { excludedAdminOperations.add(OperationType.valueOf(e)); } } } @Override public void postInit(KeycloakSessionFactory factory) { } @Override public void close() { } @Override public String getId() { return "logfile-event-listener"; } }
Story highlights ISIS often attacks fellow Muslims who don't support its view of Islam, Mideast expert says Attack occurred during a soccer ceremony in Iskandariya, a city in central Iraq ISIS claimed responsibility for the attack, which killed at least 25 people (CNN) A man wearing a suicide belt walked into an Iraqi soccer stadium Friday and blew himself up -- killing at least 25 people and wounding 90 more, security officials said. A crowd had gathered for a ceremony to mark a championship for a popular local soccer team when the bomb exploded, the head of the Babil province security committee, Baydhan al Hamdani, told CNN. A video posted on YouTube showed soccer players approaching a table holding trophies before an explosion occurred. CNN could not independently authenticate the video. The attacker struck at al-Shuhadaa stadium in the Babil province city of Iskandariya, roughly 30 miles (50 kilometers) south of Baghdad. ISIS claimed responsibility, according to a statement posted online by supporters. Read More
AMSTERDAM (Reuters) - The International Criminal Court on Friday issued a new arrest warrant for Congolese general Bosco Ntaganda, for alleged war crimes including murder, rape and sexual slavery. General Bosco Ntaganda addresses a news conference in Kabati, a village located in Congo's eastern North Kivu province, January 8, 2009. REUTERS/Abdul Ndemere Ntaganda is already wanted by the Hague-based war crimes court for conscripting child fighters in the Democratic Republic of Congo (DRC). The court said the new warrant was for suspected war crimes and crimes against humanity in the DRC’s Kivu provinces, a mineral-rich area plagued by long-running conflict, between September 2002 and September 2003. “There are reasonable grounds to believe that Bosco Ntaganda is responsible for three counts of crimes against humanity, consisting in murder, rape and sexual slavery, and persecution,” the court said in a statement. “Bosco Ntaganda allegedly bears individual criminal responsibility for four counts of war crimes consisting of murder, attacks against the civilian population, rape and sexual slavery, and pillaging,” it added. The ICC has sought Ntaganda’s arrest for six years on charges that he conscripted children to fight in a bloody ethnic conflict in northeastern Congo that grew out of a broader civil war. Ntaganda denies involvement in war crimes. The court also issued an arrest warrant on Friday for Sylvestre Mudacumura, the leader of the FDLR (Democratic Forces for the Liberation of Rwanda) militia operating in the Kivu provinces, saying he was suspected of war crimes between January 2009 and September 2010 in the area. “There are reasonable grounds to believe that Mr Mudacumura is responsible for nine counts of war crimes, consisting of attacking civilians, murder, mutilation, cruel treatment, rape, torture, destruction of property, pillaging and outrages against personal dignity,” the court said in a statement. The leaders of the mostly ethnic Hutu FDLR fled from Rwanda to Congo after Rwanda’s 1994 genocide, in which 800,000 people died, mostly ethnic Tutsis. The group played a major role in Congo’s 1998-2003 conflict, in which 5 million people died, and has continued mass rapes, torture and killing. This week, the Democratic Republic of Congo, Rwanda and neighbouring states called for the creation of an international military force to eliminate armed rebels in the DRC’s turbulent east.
//! Function to solve for voltage using SparseLU void IRSolver::SolveIR() { if(!m_connection) { cout<<"WARNING: Powergrid is not connected to all instances,"<< "IR Solver may not be accurate, LVS may also fail."<<endl; } int unit_micron = (m_db->getTech())->getDbUnitsPerMicron(); clock_t t1, t2; CscMatrix* Gmat = m_Gmat->GetGMat(); int nnz = Gmat->nnz; int m = Gmat->num_rows; int n = Gmat->num_cols; double* values = &(Gmat->values[0]); int* row_idx = &(Gmat->row_idx[0]); int* col_ptr = &(Gmat->col_ptr[0]); Map<SparseMatrix<double> > A( Gmat->num_rows, Gmat->num_cols, Gmat->nnz, col_ptr, row_idx, values); vector<double> J = GetJ(); Map<VectorXd> b(J.data(),J.size()); VectorXd x; SparseLU<SparseMatrix<double> > solver; cout << "INFO: Factorizing G" << endl; solver.compute(A); if(solver.info()!=Success) { cout<<"Error: LU factorization of GMatrix failed"<<endl; return; } cout << "INFO: Solving GV=J" << endl; cout << "INFO: SparseLU begin solving" << endl; x = solver.solve(b); cout << "INFO: SparseLU finished solving" << endl; if(solver.info()!=Success) { cout<<"Error: Solving V = inv(G)*J failed"<<endl; return; } ofstream ir_report; ir_report.open (m_out_file); ir_report<<"Instance name, "<<" X location, "<<" Y location, "<<" Voltage "<<"\n"; int num_nodes = m_Gmat->GetNumNodes(); int node_num =0; double sum_volt = 0; wc_voltage = vdd; while(node_num < num_nodes) { Node* node = m_Gmat->GetNode(node_num); double volt = x(node_num); sum_volt = sum_volt + volt; if (volt < wc_voltage) { wc_voltage = volt; } node->SetVoltage(volt); node_num++; if(node->HasInstances()) { NodeLoc node_loc = node->GetLoc(); float loc_x = ((float)node_loc.first)/((float)unit_micron); float loc_y = ((float)node_loc.second)/((float)unit_micron); std::vector<dbInst*> insts = node->GetInstances(); std::vector<dbInst*>::iterator inst_it; if (m_out_file != "") { for(inst_it = insts.begin();inst_it!=insts.end();inst_it++) { ir_report<<(*inst_it)->getName()<<", "<<loc_x<<", " <<loc_y<<", "<<setprecision(10)<<volt<<"\n"; } } } } ir_report<<endl; ir_report.close(); avg_voltage = sum_volt / num_nodes; }
export * from './featured-clan-card.module'; export * from './featured-clan-card.component';
Apple didn’t just deliver one mobile wallet at its iPhone 6 and Apple Watch launch event on Tuesday. It delivered two. Its new Apple Pay service, which will launch on the iPhone 6 and iPhone 6 Plus in October, combines two mobile payment methods that are often conflated to represent entirely different types of transactions: using your phone in lieu of plastic to pay at the cash register, and buying goods online over your mobile phone. Advertisement While Apple still has plenty of self-interested reasons to get into the mobile payments game, it definitely appears that Apple Pay is addressing many of the problems plaguing other wallets. And as my colleague Kevin Tofel predicted last week, Apple is integrating Pay with a lot of other services and features readily available and popular on the iPhone and the overall Apple ecosystem. First off, Apple Pay will become part of Passbook, storing your credit cards in tabs the same way it stores your boarding passes and loyalty cards. It will use Touch ID to verify consumer identity and presence at the point of sale. And it will make use of the 800 million credit cards it already has on file attached to users’ Apple IDs. As soon as you activate Apple Pay, you’ll already have a credit card loaded: the one you use for your iTunes purchases. Apple Pay will also be incorporated directly into the new Apple Watch. The new pieces Apple is adding to the puzzle are a near-field communications (NFC) chip and secure hardware element in the iPhone 6 and iPhone 6 Plus where credit card information is stored. In that sense, Apple Pay will work very similarly to Isis/Softcard and other contactless wallets. The element will store unique virtual credit card numbers different from the digits imprinted on your plastic. After a transaction is complete, the virtual numbers are replaced so your previous transaction data can’t be used for fraudulent purchases. That way the merchant never sees your credit card number, security code or even your name, said Eddy Cue, Apple SVP of internet software and services. Apple Pay will work at 220,000 merchant locations in the U.S., including some of the biggest retailers in the country like [company]Walgreens[/company], [company]Macy’s[/company], [company]Subway[/company], Whole Foods Market and McDonald’s. McDonald’s will even begin accepting Apple Pay in its drive-thrus by the end of the year, Cue said. Not coincidentally, Cue’s list of point-of-sale partners doesn’t include any of the retailers in the Merchant Customer Exchange, a consortium including [company]Best Buy[/company], [company]Walmart[/company], [company]Target[/company] and dozens of other big brands launching their own QR-code smartphone payment system called CurrentC. Despite Apple’s claim to have cracked the mobile payments code, we could be seeing a showdown brewing between it and some of the country’s most powerful retail brands — at least in the smartphone point of sale space. On the m-commerce front, Apple is clearly expanding its online purchasing might beyond iTunes and its various online and app stores into other retail apps and websites. [company]PayPal[/company] has already been treading this turf since it acquired Braintree. Last month it launched a new One Touch payment service for mobile apps that stores your card credentials in the PayPal or Venmo apps, allowing consumers to instantly purchase goods or services with a single tap. Apple Pay looks to do the same, except it will draw from the virtual one-time card data stored in the iPhone’s hardware secure element. Apple has also signed up some big web commerce companies to take advantage of its in-app payments, including [company]Uber[/company], [company]OpenTable[/company] and even Target, which is part of the MCX consortium. While Target may be working on its own wallet for in-store payments, it appears to have no qualms with Apple making it easier for consumers to order its goods from their mobile phones.
/** * Created by eunderhi on 23/12/15. */ public class MainDataBase { public static TestDataBase quotes; public static TestDataBase nouns; public static TestDataBase verbs; public static TestDataBase names; public static TestDataBase adjectives; public static NumberDatabase numbers; public static TestDataBase locations; static { quotes = new TestDataBase("databases/quotes.txt"); nouns = new TestDataBase("databases/nouns.txt"); verbs = new TestDataBase("databases/verbs.txt"); names = new TestDataBase("databases/names.txt"); adjectives = new TestDataBase("databases/adjectives.txt"); numbers = new NumberDatabase(); locations = new TestDataBase("databases/locations.txt"); } public static String getName() { return names.get(); } public static String getNoun() { return nouns.get(); } }
/** * Writes documentation comments. * * @param runnable Runnable that handles actually writing docs with the writer. * @return Returns the writer. */ TypeScriptWriter writeDocs(Runnable runnable) { pushState("docs"); write("/**"); setNewlinePrefix(" * "); runnable.run(); setNewlinePrefix(""); write(" */"); popState(); return this; }
a=[] for i in range(5): a.append(int(input())) a=sorted(a,key=lambda x: -((x-1)%10)) c=0 for i in a[0:-1]: if i%10!=0: c+=(int(i/10)+1)*10 else: c+=i print(c+a[-1])