focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public KTable<K, VOut> aggregate(final Initializer<VOut> initializer, final Materialized<K, VOut, KeyValueStore<Bytes, byte[]>> materialized) { return aggregate(initializer, NamedInternal.empty(), materialized); }
@Test public void shouldNotHaveNullInitializerOnAggregate() { assertThrows(NullPointerException.class, () -> cogroupedStream.aggregate(null)); }
public SerializableFunction<T, Row> getToRowFunction() { return toRowFunction; }
@Test public void testPrimitiveProtoToRow() throws InvalidProtocolBufferException { ProtoDynamicMessageSchema schemaProvider = schemaFromDescriptor(Primitive.getDescriptor()); SerializableFunction<DynamicMessage, Row> toRow = schemaProvider.getToRowFunction(); assertEquals(PRIMITIVE_ROW, toRow.apply(toDynamic(PRIMITIVE_PROTO))); }
@ConstantFunction(name = "bitxor", argTypes = {SMALLINT, SMALLINT}, returnType = SMALLINT) public static ConstantOperator bitxorSmallInt(ConstantOperator first, ConstantOperator second) { return ConstantOperator.createSmallInt((short) (first.getSmallint() ^ second.getSmallint())); }
@Test public void bitxorSmallInt() { assertEquals(0, ScalarOperatorFunctions.bitxorSmallInt(O_SI_10, O_SI_10).getSmallint()); }
@Subscribe public void onVarbitChanged(VarbitChanged event) { if (event.getVarbitId() == Varbits.IN_RAID) { removeVarTimer(OVERLOAD_RAID); removeGameTimer(PRAYER_ENHANCE); } if (event.getVarbitId() == Varbits.VENGEANCE_COOLDOWN && config.showVengeance()) { if (event.getValue() == 1) { createGameTimer(VENGEANCE); } else { removeGameTimer(VENGEANCE); } } if (event.getVarbitId() == Varbits.SPELLBOOK_SWAP && config.showSpellbookSwap()) { if (event.getValue() == 1) { createGameTimer(SPELLBOOK_SWAP); } else { removeGameTimer(SPELLBOOK_SWAP); } } if (event.getVarbitId() == Varbits.HEAL_GROUP_COOLDOWN && config.showHealGroup()) { if (event.getValue() == 1) { createGameTimer(HEAL_GROUP); } else { removeGameTimer(HEAL_GROUP); } } if (event.getVarbitId() == Varbits.DEATH_CHARGE_COOLDOWN && config.showArceuusCooldown()) { if (event.getValue() == 1) { createGameTimer(DEATH_CHARGE_COOLDOWN); } else { removeGameTimer(DEATH_CHARGE_COOLDOWN); } } if (event.getVarbitId() == Varbits.CORRUPTION_COOLDOWN && config.showArceuusCooldown()) { if (event.getValue() == 1) { createGameTimer(CORRUPTION_COOLDOWN); } else { removeGameTimer(CORRUPTION_COOLDOWN); } } if (event.getVarbitId() == Varbits.RESURRECT_THRALL_COOLDOWN && config.showArceuusCooldown()) { if (event.getValue() == 1) { createGameTimer(RESURRECT_THRALL_COOLDOWN); } else { removeGameTimer(RESURRECT_THRALL_COOLDOWN); } } if (event.getVarbitId() == Varbits.SHADOW_VEIL_COOLDOWN && config.showArceuusCooldown()) { if (event.getValue() == 1) { createGameTimer(SHADOW_VEIL_COOLDOWN); } else { removeGameTimer(SHADOW_VEIL_COOLDOWN); } } if (event.getVarbitId() == Varbits.WARD_OF_ARCEUUS_COOLDOWN && config.showArceuusCooldown()) { if (event.getValue() == 1) { createGameTimer(WARD_OF_ARCEUUS_COOLDOWN); } else { removeGameTimer(WARD_OF_ARCEUUS_COOLDOWN); } } if (event.getVarbitId() == Varbits.VENGEANCE_ACTIVE && config.showVengeanceActive()) { updateVarCounter(VENGEANCE_ACTIVE, event.getValue()); } if (event.getVarbitId() == Varbits.DEATH_CHARGE && config.showArceuus()) { if (event.getValue() == 1) { createGameTimer(DEATH_CHARGE, Duration.of(client.getRealSkillLevel(Skill.MAGIC), RSTimeUnit.GAME_TICKS)); } else { removeGameTimer(DEATH_CHARGE); } } if (event.getVarbitId() == Varbits.RESURRECT_THRALL && event.getValue() == 0 && config.showArceuus()) { removeGameTimer(RESURRECT_THRALL); } if (event.getVarbitId() == Varbits.SHADOW_VEIL && event.getValue() == 0 && config.showArceuus()) { removeGameTimer(SHADOW_VEIL); } if (event.getVarpId() == VarPlayer.POISON && config.showAntiPoison()) { final int poisonVarp = event.getValue(); final int tickCount = client.getTickCount(); if (poisonVarp == 0) { nextPoisonTick = -1; } else if (nextPoisonTick - tickCount <= 0) { nextPoisonTick = tickCount + POISON_TICK_LENGTH; } updateVarTimer(ANTIPOISON, event.getValue(), i -> i >= 0 || i < VENOM_VALUE_CUTOFF, i -> nextPoisonTick - tickCount + Math.abs((i + 1) * POISON_TICK_LENGTH)); updateVarTimer(ANTIVENOM, event.getValue(), i -> i >= VENOM_VALUE_CUTOFF, i -> nextPoisonTick - tickCount + Math.abs((i + 1 - VENOM_VALUE_CUTOFF) * POISON_TICK_LENGTH)); } if ((event.getVarbitId() == Varbits.NMZ_OVERLOAD_REFRESHES_REMAINING || event.getVarbitId() == Varbits.COX_OVERLOAD_REFRESHES_REMAINING) && config.showOverload()) { final int overloadVarb = event.getValue(); final int tickCount = client.getTickCount(); if (overloadVarb <= 0) { nextOverloadRefreshTick = -1; } else if (nextOverloadRefreshTick - tickCount <= 0) { nextOverloadRefreshTick = tickCount + OVERLOAD_TICK_LENGTH; } GameTimer overloadTimer = client.getVarbitValue(Varbits.IN_RAID) == 1 ? OVERLOAD_RAID : OVERLOAD; updateVarTimer(overloadTimer, overloadVarb, i -> nextOverloadRefreshTick - tickCount + (i - 1) * OVERLOAD_TICK_LENGTH); } if (event.getVarbitId() == Varbits.TELEBLOCK && config.showTeleblock()) { updateVarTimer(TELEBLOCK, event.getValue() - 100, i -> i <= 0, IntUnaryOperator.identity()); } if (event.getVarpId() == VarPlayer.CHARGE_GOD_SPELL && config.showCharge()) { updateVarTimer(CHARGE, event.getValue(), i -> i * 2); } if (event.getVarbitId() == Varbits.IMBUED_HEART_COOLDOWN && config.showImbuedHeart()) { updateVarTimer(IMBUEDHEART, event.getValue(), i -> i * 10); } if (event.getVarbitId() == Varbits.DRAGONFIRE_SHIELD_COOLDOWN && config.showDFSSpecial()) { updateVarTimer(DRAGON_FIRE_SHIELD, event.getValue(), i -> i * 8); } if (event.getVarpId() == LAST_HOME_TELEPORT && config.showHomeMinigameTeleports()) { checkTeleport(LAST_HOME_TELEPORT); } if (event.getVarpId() == LAST_MINIGAME_TELEPORT && config.showHomeMinigameTeleports()) { checkTeleport(LAST_MINIGAME_TELEPORT); } if (event.getVarbitId() == Varbits.RUN_SLOWED_DEPLETION_ACTIVE || event.getVarbitId() == Varbits.STAMINA_EFFECT || event.getVarbitId() == Varbits.RING_OF_ENDURANCE_EFFECT) { // staminaEffectActive is checked to match https://github.com/Joshua-F/cs2-scripts/blob/741271f0c3395048c1bad4af7881a13734516adf/scripts/%5Bproc%2Cbuff_bar_get_value%5D.cs2#L25 int staminaEffectActive = client.getVarbitValue(Varbits.RUN_SLOWED_DEPLETION_ACTIVE); int staminaPotionEffectVarb = client.getVarbitValue(Varbits.STAMINA_EFFECT); int enduranceRingEffectVarb = client.getVarbitValue(Varbits.RING_OF_ENDURANCE_EFFECT); final int totalStaminaEffect = staminaPotionEffectVarb + enduranceRingEffectVarb; if (staminaEffectActive == 1 && config.showStamina()) { updateVarTimer(STAMINA, totalStaminaEffect, i -> i * 10); } } if (event.getVarbitId() == Varbits.ANTIFIRE && config.showAntiFire()) { final int antifireVarb = event.getValue(); final int tickCount = client.getTickCount(); if (antifireVarb == 0) { nextAntifireTick = -1; } else if (nextAntifireTick - tickCount <= 0) { nextAntifireTick = tickCount + ANTIFIRE_TICK_LENGTH; } updateVarTimer(ANTIFIRE, antifireVarb, i -> nextAntifireTick - tickCount + (i - 1) * ANTIFIRE_TICK_LENGTH); } if (event.getVarbitId() == Varbits.SUPER_ANTIFIRE && config.showAntiFire()) { final int superAntifireVarb = event.getValue(); final int tickCount = client.getTickCount(); if (superAntifireVarb == 0) { nextSuperAntifireTick = -1; } else if (nextSuperAntifireTick - tickCount <= 0) { nextSuperAntifireTick = tickCount + SUPERANTIFIRE_TICK_LENGTH; } updateVarTimer(SUPERANTIFIRE, event.getValue(), i -> nextSuperAntifireTick - tickCount + (i - 1) * SUPERANTIFIRE_TICK_LENGTH); } if (event.getVarbitId() == Varbits.MAGIC_IMBUE && config.showMagicImbue()) { updateVarTimer(MAGICIMBUE, event.getValue(), i -> i * 10); } if (event.getVarbitId() == Varbits.DIVINE_SUPER_ATTACK && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_SUPER_COMBAT) > event.getValue()) { return; } updateVarTimer(DIVINE_SUPER_ATTACK, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.DIVINE_SUPER_STRENGTH && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_SUPER_COMBAT) > event.getValue()) { return; } updateVarTimer(DIVINE_SUPER_STRENGTH, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.DIVINE_SUPER_DEFENCE && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_SUPER_COMBAT) > event.getValue() || client.getVarbitValue(Varbits.DIVINE_BASTION) > event.getValue() || client.getVarbitValue(Varbits.DIVINE_BATTLEMAGE) > event.getValue() // When drinking a dose of moonlight potion while already under its effects, desync between // Varbits.MOONLIGHT_POTION and Varbits.DIVINE_SUPER_DEFENCE can occur, with the latter being 1 tick // greater || client.getVarbitValue(Varbits.MOONLIGHT_POTION) >= event.getValue()) { return; } if (client.getVarbitValue(Varbits.MOONLIGHT_POTION) < event.getValue()) { removeVarTimer(MOONLIGHT_POTION); } updateVarTimer(DIVINE_SUPER_DEFENCE, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.DIVINE_RANGING && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_BASTION) > event.getValue()) { return; } updateVarTimer(DIVINE_RANGING, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.DIVINE_MAGIC && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_BATTLEMAGE) > event.getValue()) { return; } updateVarTimer(DIVINE_MAGIC, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.DIVINE_SUPER_COMBAT && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_SUPER_ATTACK) == event.getValue()) { removeVarTimer(DIVINE_SUPER_ATTACK); } if (client.getVarbitValue(Varbits.DIVINE_SUPER_STRENGTH) == event.getValue()) { removeVarTimer(DIVINE_SUPER_STRENGTH); } if (client.getVarbitValue(Varbits.DIVINE_SUPER_DEFENCE) == event.getValue()) { removeVarTimer(DIVINE_SUPER_DEFENCE); } updateVarTimer(DIVINE_SUPER_COMBAT, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.DIVINE_BASTION && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_RANGING) == event.getValue()) { removeVarTimer(DIVINE_RANGING); } if (client.getVarbitValue(Varbits.DIVINE_SUPER_DEFENCE) == event.getValue()) { removeVarTimer(DIVINE_SUPER_DEFENCE); } updateVarTimer(DIVINE_BASTION, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.DIVINE_BATTLEMAGE && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_MAGIC) == event.getValue()) { removeVarTimer(DIVINE_MAGIC); } if (client.getVarbitValue(Varbits.DIVINE_SUPER_DEFENCE) == event.getValue()) { removeVarTimer(DIVINE_SUPER_DEFENCE); } updateVarTimer(DIVINE_BATTLEMAGE, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.BUFF_STAT_BOOST && config.showOverload()) { updateVarTimer(SMELLING_SALTS, event.getValue(), i -> i * 25); } if (event.getVarbitId() == Varbits.MENAPHITE_REMEDY && config.showMenaphiteRemedy()) { updateVarTimer(MENAPHITE_REMEDY, event.getValue(), i -> i * 25); } if (event.getVarbitId() == Varbits.LIQUID_ADERNALINE_ACTIVE && event.getValue() == 0 && config.showLiquidAdrenaline()) { removeGameTimer(LIQUID_ADRENALINE); } if (event.getVarbitId() == Varbits.FARMERS_AFFINITY && config.showFarmersAffinity()) { updateVarTimer(FARMERS_AFFINITY, event.getValue(), i -> i * 20); } if (event.getVarbitId() == Varbits.GOD_WARS_ALTAR_COOLDOWN && config.showGodWarsAltar()) { updateVarTimer(GOD_WARS_ALTAR, event.getValue(), i -> i * 100); } if (event.getVarbitId() == Varbits.CURSE_OF_THE_MOONS && config.showCurseOfTheMoons()) { final int regionID = WorldPoint.fromLocal(client, client.getLocalPlayer().getLocalLocation()).getRegionID(); if (regionID == ECLIPSE_MOON_REGION_ID) { updateVarCounter(CURSE_OF_THE_MOONS_ECLIPSE, event.getValue()); } else { updateVarCounter(CURSE_OF_THE_MOONS_BLUE, event.getValue()); } } if (event.getVarbitId() == Varbits.COLOSSEUM_DOOM && config.showColosseumDoom()) { updateVarCounter(COLOSSEUM_DOOM, event.getValue()); } if (event.getVarbitId() == Varbits.MOONLIGHT_POTION && config.showMoonlightPotion()) { int moonlightValue = event.getValue(); // Increase the timer by 1 tick in case of desync due to drinking a dose of moonlight potion while already // under its effects. Otherwise, the timer would be 1 tick shorter than it is meant to be. if (client.getVarbitValue(Varbits.DIVINE_SUPER_DEFENCE) == moonlightValue + 1) { moonlightValue++; } updateVarTimer(MOONLIGHT_POTION, moonlightValue, IntUnaryOperator.identity()); } }
@Test public void testDeathChargeCooldown() { when(timersAndBuffsConfig.showArceuusCooldown()).thenReturn(true); VarbitChanged varbitChanged = new VarbitChanged(); varbitChanged.setVarbitId(Varbits.DEATH_CHARGE_COOLDOWN); varbitChanged.setValue(1); timersAndBuffsPlugin.onVarbitChanged(varbitChanged); ArgumentCaptor<InfoBox> ibcaptor = ArgumentCaptor.forClass(InfoBox.class); verify(infoBoxManager).addInfoBox(ibcaptor.capture()); TimerTimer infoBox = (TimerTimer) ibcaptor.getValue(); assertEquals(GameTimer.DEATH_CHARGE_COOLDOWN, infoBox.getTimer()); }
@Override public Optional<JavaClass> tryResolve(String typeName) { String typeFile = typeName.replace(".", "/") + ".class"; Optional<URI> uri = tryGetUriOf(typeFile); return uri.isPresent() ? classUriImporter.tryImport(uri.get()) : Optional.empty(); }
@Test public void finds_uri_of_class_on_classpath() { JavaClass expectedJavaClass = importClassWithContext(Object.class); when(uriImporter.tryImport(TestUtils.uriOf(Object.class))).thenReturn(Optional.of(expectedJavaClass)); Optional<JavaClass> result = resolver.tryResolve(Object.class.getName()); assertThat(result).contains(expectedJavaClass); }
public RandomAccessData increment() throws IOException { RandomAccessData copy = copy(); for (int i = copy.size - 1; i >= 0; --i) { if (copy.buffer[i] != UnsignedBytes.MAX_VALUE) { copy.buffer[i] = UnsignedBytes.checkedCast(UnsignedBytes.toInt(copy.buffer[i]) + 1L); return copy; } } return POSITIVE_INFINITY; }
@Test public void testIncrement() throws Exception { assertEquals( new RandomAccessData(new byte[] {0x00, 0x01}), new RandomAccessData(new byte[] {0x00, 0x00}).increment()); assertEquals( new RandomAccessData(new byte[] {0x01, UnsignedBytes.MAX_VALUE}), new RandomAccessData(new byte[] {0x00, UnsignedBytes.MAX_VALUE}).increment()); // Test for positive infinity assertSame(RandomAccessData.POSITIVE_INFINITY, new RandomAccessData(new byte[0]).increment()); assertSame( RandomAccessData.POSITIVE_INFINITY, new RandomAccessData(new byte[] {UnsignedBytes.MAX_VALUE}).increment()); assertSame(RandomAccessData.POSITIVE_INFINITY, RandomAccessData.POSITIVE_INFINITY.increment()); }
@Override protected Result[] run(String value) { final Map<String, Object> extractedJson; try { extractedJson = extractJson(value); } catch (IOException e) { throw new ExtractorException(e); } final List<Result> results = new ArrayList<>(extractedJson.size()); for (Map.Entry<String, Object> entry : extractedJson.entrySet()) { results.add(new Result(entry.getValue(), entry.getKey(), -1, -1)); } return results.toArray(new Result[results.size()]); }
@Test public void testRunWithFlattenedObjectAndDifferentKVSeparator() throws Exception { final JsonExtractor jsonExtractor = new JsonExtractor(new MetricRegistry(), "json", "title", 0L, Extractor.CursorStrategy.COPY, "source", "target", ImmutableMap.<String, Object>of("flatten", true, "kv_separator", ":"), "user", Collections.<Converter>emptyList(), Extractor.ConditionType.NONE, ""); final String value = "{\"object\": {\"text\": \"foobar\", \"number\": 1234.5678, \"bool\": true, \"nested\": {\"text\": \"foobar\"}}}"; final Extractor.Result[] results = jsonExtractor.run(value); assertThat(results).contains( new Extractor.Result("text:foobar, number:1234.5678, bool:true, nested:{text=foobar}", "object", -1, -1) ); }
@Override public String trackTimerStart(String eventName) { return ""; }
@Test public void trackTimerStart() { mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() { @Override public boolean onTrackEvent(String eventName, JSONObject eventProperties) { Assert.fail(); return false; } }); mSensorsAPI.trackTimerStart("TestTimerEvent"); mSensorsAPI.trackTimerEnd("TestTimerEvent"); }
@Override public void write(InputT element, Context context) throws IOException, InterruptedException { while (bufferedRequestEntries.size() >= maxBufferedRequests) { flush(); } addEntryToBuffer(elementConverter.apply(element, context), false); nonBlockingFlush(); }
@Test public void testThatSnapshotsAreTakenOfBufferCorrectlyBeforeAndAfterAutomaticFlush() throws IOException, InterruptedException { AsyncSinkWriterImpl sink = new AsyncSinkWriterImplBuilder().context(sinkInitContext).maxBatchSize(3).build(); sink.write("25"); sink.write("55"); assertThatBufferStatesAreEqual(sink.wrapRequests(25, 55), getWriterState(sink)); assertThat(res.size()).isEqualTo(0); sink.write("75"); assertThatBufferStatesAreEqual(BufferedRequestState.emptyState(), getWriterState(sink)); assertThat(res.size()).isEqualTo(3); }
public SerializableFunction<Row, T> getFromRowFunction() { return fromRowFunction; }
@Test public void testWktRowToProto() { ProtoDynamicMessageSchema schemaProvider = schemaFromDescriptor(WktMessage.getDescriptor()); SerializableFunction<Row, DynamicMessage> fromRow = schemaProvider.getFromRowFunction(); assertEquals(WKT_MESSAGE_PROTO.toString(), fromRow.apply(WKT_MESSAGE_ROW).toString()); }
public static TypeBuilder<Schema> builder() { return new TypeBuilder<>(new SchemaCompletion(), new NameContext()); }
@Test void props() { Schema s = SchemaBuilder.builder().intBuilder().prop("p1", "v1").prop("p2", "v2").prop("p2", "v2real") // overwrite .endInt(); int size = s.getObjectProps().size(); assertEquals(2, size); assertEquals("v1", s.getProp("p1")); assertEquals("v2real", s.getProp("p2")); }
public static void write(HttpServletResponse response, String text, String contentType) { response.setContentType(contentType); Writer writer = null; try { writer = response.getWriter(); writer.write(text); writer.flush(); } catch (IOException e) { throw new UtilException(e); } finally { IoUtil.close(writer); } }
@Test @Disabled public void jakartaWriteTest() { jakarta.servlet.http.HttpServletResponse response = null; byte[] bytes = StrUtil.utf8Bytes("地球是我们共同的家园,需要大家珍惜."); //下载文件 // 这里没法直接测试,直接写到这里,方便调用; //noinspection ConstantConditions if (response != null) { String fileName = "签名文件.pdf"; String contentType = "application/pdf";// application/octet-stream、image/jpeg、image/gif response.setCharacterEncoding(StandardCharsets.UTF_8.name()); // 必须设置否则乱码; 但是 safari乱码 JakartaServletUtil.write(response, new ByteArrayInputStream(bytes), contentType, fileName); } }
@Nullable public DnsCache resolveCache() { return resolveCache; }
@Test void resolveCache() { assertThat(builder.build().resolveCache()).isNull(); TestDnsCache resolveCache = new TestDnsCache(); builder.resolveCache(resolveCache); assertThat(builder.build().resolveCache()).isEqualTo(resolveCache); }
@Override public boolean decide(final SelectStatementContext selectStatementContext, final List<Object> parameters, final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database, final ShardingRule rule, final Collection<DataNode> includedDataNodes) { Collection<String> tableNames = rule.getShardingLogicTableNames(selectStatementContext.getTablesContext().getTableNames()); if (tableNames.isEmpty()) { return false; } includedDataNodes.addAll(getTableDataNodes(rule, tableNames, database)); if (selectStatementContext.isContainsSubquery() || selectStatementContext.isContainsHaving() || selectStatementContext.isContainsCombine() || selectStatementContext.isContainsPartialDistinctAggregation()) { return true; } if (!selectStatementContext.isContainsJoinQuery() || rule.isAllTablesInSameDataSource(tableNames)) { return false; } if (1 == tableNames.size() && selectStatementContext.isContainsJoinQuery() && !rule.isAllBindingTables(database, selectStatementContext, tableNames)) { return true; } return tableNames.size() > 1 && !rule.isAllBindingTables(database, selectStatementContext, tableNames); }
@Test void assertDecideWhenContainsSameShardingCondition() { SelectStatementContext select = createStatementContext(); when(select.isContainsSubquery()).thenReturn(true); Collection<DataNode> includedDataNodes = new HashSet<>(); ShardingRule shardingRule = createShardingRule(); assertTrue(new ShardingSQLFederationDecider().decide(select, Collections.emptyList(), mock(RuleMetaData.class), createDatabase(shardingRule), shardingRule, includedDataNodes)); assertThat(includedDataNodes.size(), is(4)); }
@Override public Temporal with(TemporalField field, long newValue) { return getNewZoneOffset(offsetTime.with(field, newValue)); }
@Test void withTemporalField() { ZoneTime expected = new ZoneTime(offsetTime.with(ChronoField.HOUR_OF_DAY, 3), zoneId, false); assertEquals(expected, zoneTime.with(ChronoField.HOUR_OF_DAY, 3)); }
public static MusicProtocol.MusicAlbum convertMusicReleaseToMusicAlbum(MusicRelease release) { MusicProtocol.MusicAlbum.Builder albumBuilder = MusicProtocol.MusicAlbum.newBuilder(); if (release == null) { return albumBuilder.build(); } if (!StringUtils.isBlank(release.getTitle())) { albumBuilder.setTitle(release.getTitle()); } if (!StringUtils.isBlank(release.getIcpnCode())) { albumBuilder.setIcpnCode(release.getIcpnCode()); } if (release.getByArtists() != null && !release.getByArtists().isEmpty()) { List<MusicProtocol.MusicArtist> albumArtists = convertMusicGroupsToMusicArtists(release.getByArtists()); albumBuilder.addAllAlbumArtist(albumArtists); } return albumBuilder.build(); }
@Test public void testConvertMusicReleaseToMusicAlbum() { String expectedArtistName = "Expected Artist Name"; MusicGroup musicGroup = new MusicGroup(expectedArtistName); String expectedICPN = "1234567890abcdefg"; String expectedTitle = "Expected Album Title"; MusicProtocol.MusicAlbum nullMusicAlbum = AppleMusicPlaylistConverter.convertMusicReleaseToMusicAlbum(null); Assertions.assertNotNull(nullMusicAlbum); Assertions.assertFalse(nullMusicAlbum.hasTitle()); Assertions.assertFalse(nullMusicAlbum.hasIcpnCode()); Assertions.assertEquals(nullMusicAlbum.getAlbumArtistCount(), 0); MusicRelease emptyTitleMusicRelease = new MusicRelease(expectedICPN, null, List.of(musicGroup)); MusicProtocol.MusicAlbum emptyTitleMusicAlbum = AppleMusicPlaylistConverter.convertMusicReleaseToMusicAlbum(emptyTitleMusicRelease); Assertions.assertNotNull(emptyTitleMusicAlbum); Assertions.assertTrue(emptyTitleMusicAlbum.hasIcpnCode()); Assertions.assertEquals(emptyTitleMusicAlbum.getIcpnCode(), expectedICPN); Assertions.assertFalse(emptyTitleMusicAlbum.hasTitle()); Assertions.assertEquals(emptyTitleMusicAlbum.getAlbumArtistCount(), 1); MusicRelease emptyICPNMusicRelease = new MusicRelease(null, expectedTitle, List.of(musicGroup)); MusicProtocol.MusicAlbum emptyICPNMusicAlbum = AppleMusicPlaylistConverter.convertMusicReleaseToMusicAlbum(emptyICPNMusicRelease); Assertions.assertNotNull(emptyICPNMusicAlbum); Assertions.assertFalse(emptyICPNMusicAlbum.hasIcpnCode()); Assertions.assertTrue(emptyICPNMusicAlbum.hasTitle()); Assertions.assertEquals(emptyICPNMusicAlbum.getTitle(), expectedTitle); Assertions.assertEquals(emptyICPNMusicAlbum.getAlbumArtistCount(), 1); MusicRelease emptyByArtistsMusicRelease = new MusicRelease(expectedICPN, expectedTitle, null); MusicProtocol.MusicAlbum emptyByArtistsMusicAlbum = AppleMusicPlaylistConverter.convertMusicReleaseToMusicAlbum(emptyByArtistsMusicRelease); Assertions.assertNotNull(emptyByArtistsMusicAlbum); Assertions.assertTrue(emptyByArtistsMusicAlbum.hasIcpnCode()); Assertions.assertEquals(emptyByArtistsMusicAlbum.getIcpnCode(), expectedICPN); Assertions.assertTrue(emptyByArtistsMusicAlbum.hasTitle()); Assertions.assertEquals(emptyByArtistsMusicAlbum.getTitle(), expectedTitle); Assertions.assertEquals(emptyByArtistsMusicAlbum.getAlbumArtistCount(), 0); MusicRelease musicRelease = new MusicRelease(expectedICPN, expectedTitle, List.of(musicGroup)); MusicProtocol.MusicAlbum musicAlbum = AppleMusicPlaylistConverter.convertMusicReleaseToMusicAlbum(musicRelease); Assertions.assertNotNull(musicAlbum); Assertions.assertTrue(musicAlbum.hasIcpnCode()); Assertions.assertEquals(musicAlbum.getIcpnCode(), expectedICPN); Assertions.assertTrue(musicAlbum.hasTitle()); Assertions.assertEquals(musicAlbum.getTitle(), expectedTitle); Assertions.assertEquals(musicAlbum.getAlbumArtistCount(), 1); }
protected ScopeTimeConfig calculateStartAndEndForJudgement( CanaryAnalysisExecutionRequest config, long judgementNumber, Duration judgementDuration) { Duration warmupDuration = config.getBeginCanaryAnalysisAfterAsDuration(); Duration offset = judgementDuration.multipliedBy(judgementNumber); ScopeTimeConfig scopeTimeConfig = new ScopeTimeConfig(); Instant startTime = Optional.ofNullable(config.getStartTime()).orElse(now(clock)); scopeTimeConfig.start = startTime; scopeTimeConfig.end = startTime.plus(offset); if (config.getEndTime() == null) { scopeTimeConfig.start = scopeTimeConfig.start.plus(warmupDuration); scopeTimeConfig.end = scopeTimeConfig.end.plus(warmupDuration); } // If the look back is defined, use it to recalculate the start time, this is used to do sliding // window judgements if (config.getLookBackAsInstant().isAfter(ZERO_AS_INSTANT)) { scopeTimeConfig.start = scopeTimeConfig.end.minus(config.getLookBackAsDuration()); } return scopeTimeConfig; }
@Test public void test_that_calculateStartAndEndForJudgement_has_expected_start_and_end_when_start_iso_only_is_defined() { int interval = 1; String startIso = "2018-12-17T20:56:39.689Z"; Duration lifetimeDuration = Duration.ofMinutes(3L); CanaryAnalysisExecutionRequest request = CanaryAnalysisExecutionRequest.builder() .scopes( ImmutableList.of( CanaryAnalysisExecutionRequestScope.builder().startTimeIso(startIso).build())) .build(); SetupAndExecuteCanariesStage.ScopeTimeConfig actual = stage.calculateStartAndEndForJudgement(request, interval, lifetimeDuration); assertEquals(Instant.parse(startIso), actual.getStart()); assertEquals(Instant.parse(startIso).plus(3L, ChronoUnit.MINUTES), actual.getEnd()); }
public static SFChooseIdentityPanel sharedChooseIdentityPanel() { return Rococoa.createClass("SFChooseIdentityPanel", SFChooseIdentityPanel._Class.class).sharedChooseIdentityPanel(); }
@Test public void sharedChooseIdentityPanel() { assertNotNull(SFChooseIdentityPanel.sharedChooseIdentityPanel()); }
public CoordinatorResult<TxnOffsetCommitResponseData, CoordinatorRecord> commitTransactionalOffset( RequestContext context, TxnOffsetCommitRequestData request ) throws ApiException { return offsetMetadataManager.commitTransactionalOffset(context, request); }
@Test public void testCommitTransactionalOffset() { GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class); OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class); CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class); CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class); GroupCoordinatorShard coordinator = new GroupCoordinatorShard( new LogContext(), groupMetadataManager, offsetMetadataManager, Time.SYSTEM, new MockCoordinatorTimer<>(new MockTime()), mock(GroupCoordinatorConfig.class), coordinatorMetrics, metricsShard ); RequestContext context = requestContext(ApiKeys.TXN_OFFSET_COMMIT); TxnOffsetCommitRequestData request = new TxnOffsetCommitRequestData(); CoordinatorResult<TxnOffsetCommitResponseData, CoordinatorRecord> result = new CoordinatorResult<>( Collections.emptyList(), new TxnOffsetCommitResponseData() ); when(offsetMetadataManager.commitTransactionalOffset( context, request )).thenReturn(result); assertEquals(result, coordinator.commitTransactionalOffset(context, request)); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { final ThreadPool pool = ThreadPoolFactory.get("list", concurrency); try { final String prefix = this.createPrefix(directory); if(log.isDebugEnabled()) { log.debug(String.format("List with prefix %s", prefix)); } final Path bucket = containerService.getContainer(directory); final AttributedList<Path> objects = new AttributedList<>(); String priorLastKey = null; String priorLastVersionId = null; long revision = 0L; String lastKey = null; boolean hasDirectoryPlaceholder = bucket.isRoot() || containerService.isContainer(directory); do { final VersionOrDeleteMarkersChunk chunk = session.getClient().listVersionedObjectsChunked( bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), prefix, String.valueOf(Path.DELIMITER), new HostPreferences(session.getHost()).getInteger("s3.listing.chunksize"), priorLastKey, priorLastVersionId, false); // Amazon S3 returns object versions in the order in which they were stored, with the most recently stored returned first. for(BaseVersionOrDeleteMarker marker : chunk.getItems()) { final String key = URIEncoder.decode(marker.getKey()); if(new SimplePathPredicate(PathNormalizer.compose(bucket, key)).test(directory)) { if(log.isDebugEnabled()) { log.debug(String.format("Skip placeholder key %s", key)); } hasDirectoryPlaceholder = true; continue; } final PathAttributes attr = new PathAttributes(); attr.setVersionId(marker.getVersionId()); if(!StringUtils.equals(lastKey, key)) { // Reset revision for next file revision = 0L; } attr.setRevision(++revision); attr.setDuplicate(marker.isDeleteMarker() && marker.isLatest() || !marker.isLatest()); if(marker.isDeleteMarker()) { attr.setCustom(Collections.singletonMap(KEY_DELETE_MARKER, String.valueOf(true))); } attr.setModificationDate(marker.getLastModified().getTime()); attr.setRegion(bucket.attributes().getRegion()); if(marker instanceof S3Version) { final S3Version object = (S3Version) marker; attr.setSize(object.getSize()); if(StringUtils.isNotBlank(object.getEtag())) { attr.setETag(StringUtils.remove(object.getEtag(), "\"")); // The ETag will only be the MD5 of the object data when the object is stored as plaintext or encrypted // using SSE-S3. If the object is encrypted using another method (such as SSE-C or SSE-KMS) the ETag is // not the MD5 of the object data. attr.setChecksum(Checksum.parse(StringUtils.remove(object.getEtag(), "\""))); } if(StringUtils.isNotBlank(object.getStorageClass())) { attr.setStorageClass(object.getStorageClass()); } } final Path f = new Path(directory.isDirectory() ? directory : directory.getParent(), PathNormalizer.name(key), EnumSet.of(Path.Type.file), attr); if(metadata) { f.withAttributes(attributes.find(f)); } objects.add(f); lastKey = key; } final String[] prefixes = chunk.getCommonPrefixes(); final List<Future<Path>> folders = new ArrayList<>(); for(String common : prefixes) { if(new SimplePathPredicate(PathNormalizer.compose(bucket, URIEncoder.decode(common))).test(directory)) { continue; } folders.add(this.submit(pool, bucket, directory, URIEncoder.decode(common))); } for(Future<Path> f : folders) { try { objects.add(Uninterruptibles.getUninterruptibly(f)); } catch(ExecutionException e) { log.warn(String.format("Listing versioned objects failed with execution failure %s", e.getMessage())); for(Throwable cause : ExceptionUtils.getThrowableList(e)) { Throwables.throwIfInstanceOf(cause, BackgroundException.class); } throw new DefaultExceptionMappingService().map(Throwables.getRootCause(e)); } } priorLastKey = null != chunk.getNextKeyMarker() ? URIEncoder.decode(chunk.getNextKeyMarker()) : null; priorLastVersionId = chunk.getNextVersionIdMarker(); listener.chunk(directory, objects); } while(priorLastKey != null); if(!hasDirectoryPlaceholder && objects.isEmpty()) { // Only for AWS if(S3Session.isAwsHostname(session.getHost().getHostname())) { if(StringUtils.isEmpty(RequestEntityRestStorageService.findBucketInHostname(session.getHost()))) { if(log.isWarnEnabled()) { log.warn(String.format("No placeholder found for directory %s", directory)); } throw new NotfoundException(directory.getAbsolute()); } } else { // Handle missing prefix for directory placeholders in Minio final VersionOrDeleteMarkersChunk chunk = session.getClient().listVersionedObjectsChunked( bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), String.format("%s%s", this.createPrefix(directory.getParent()), directory.getName()), String.valueOf(Path.DELIMITER), 1, null, null, false); if(Arrays.stream(chunk.getCommonPrefixes()).map(URIEncoder::decode).noneMatch(common -> common.equals(prefix))) { throw new NotfoundException(directory.getAbsolute()); } } } return objects; } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Listing directory {0} failed", e, directory); } finally { // Cancel future tasks pool.shutdown(false); } }
@Test public void testListPlaceholderDot() throws Exception { final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final S3AccessControlListFeature acl = new S3AccessControlListFeature(session); final Path placeholder = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir( new Path(container, ".", EnumSet.of(Path.Type.directory)), new TransferStatus()); assertTrue(new S3VersionedObjectListService(session, new S3AccessControlListFeature(session)).list(container, new DisabledListProgressListener()).contains(placeholder)); new S3DefaultDeleteFeature(session).delete(Collections.singletonList(placeholder), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@SuppressWarnings("ChainOfInstanceofChecks") public OpenFileInformation prepareToOpenFile( final Path path, final OpenFileParameters parameters, final long blockSize) throws IOException { Configuration options = parameters.getOptions(); Set<String> mandatoryKeys = parameters.getMandatoryKeys(); // S3 Select is not supported in this release if (options.get(SelectConstants.SELECT_SQL, null) != null) { if (mandatoryKeys.contains(SelectConstants.SELECT_SQL)) { // mandatory option: fail with a specific message. throw new UnsupportedOperationException(SelectConstants.SELECT_UNSUPPORTED); } else { // optional; log once and continue LOG_NO_SQL_SELECT.warn(SelectConstants.SELECT_UNSUPPORTED); } } // choice of keys depends on open type rejectUnknownMandatoryKeys( mandatoryKeys, InternalConstants.S3A_OPENFILE_KEYS, "for " + path + " in file I/O"); // where does a read end? long fileLength = LENGTH_UNKNOWN; // was a status passed in via a withStatus() invocation in // the builder API? FileStatus providedStatus = parameters.getStatus(); S3AFileStatus fileStatus = null; if (providedStatus != null) { // there's a file status // make sure the file name matches -the rest of the path // MUST NOT be checked. Path providedStatusPath = providedStatus.getPath(); checkArgument(path.getName().equals(providedStatusPath.getName()), "Filename mismatch between file being opened %s and" + " supplied filestatus %s", path, providedStatusPath); // make sure the status references a file if (providedStatus.isDirectory()) { throw new FileNotFoundException( "Supplied status references a directory " + providedStatus); } // build up the values long len = providedStatus.getLen(); long modTime = providedStatus.getModificationTime(); String versionId; String eTag; // can use this status to skip our own probes, LOG.debug("File was opened with a supplied FileStatus;" + " skipping getFileStatus call in open() operation: {}", providedStatus); // what type is the status (and hence: what information does it contain?) if (providedStatus instanceof S3AFileStatus) { // is it an S3AFileSystem status? S3AFileStatus st = (S3AFileStatus) providedStatus; versionId = st.getVersionId(); eTag = st.getEtag(); } else if (providedStatus instanceof S3ALocatedFileStatus) { // S3ALocatedFileStatus instance may supply etag and version. S3ALocatedFileStatus st = (S3ALocatedFileStatus) providedStatus; versionId = st.getVersionId(); eTag = st.getEtag(); } else { // it is another type. // build a status struct without etag or version. LOG.debug("Converting file status {}", providedStatus); versionId = null; eTag = null; } // Construct a new file status with the real path of the file. fileStatus = new S3AFileStatus( len, modTime, path, blockSize, username, eTag, versionId); // set the end of the read to the file length fileLength = fileStatus.getLen(); } FSBuilderSupport builderSupport = new FSBuilderSupport(options); // determine start and end of file. long splitStart = builderSupport.getPositiveLong(FS_OPTION_OPENFILE_SPLIT_START, 0); // split end long splitEnd = builderSupport.getLong( FS_OPTION_OPENFILE_SPLIT_END, LENGTH_UNKNOWN); if (splitStart > 0 && splitStart > splitEnd) { LOG.warn("Split start {} is greater than split end {}, resetting", splitStart, splitEnd); splitStart = 0; } // read end is the open file value fileLength = builderSupport.getPositiveLong(FS_OPTION_OPENFILE_LENGTH, fileLength); // if the read end has come from options, use that // in creating a file status if (fileLength >= 0 && fileStatus == null) { fileStatus = createStatus(path, fileLength, blockSize); } // Build up the input policy. // seek policy from default, s3a opt or standard option // read from the FS standard option. Collection<String> policies = options.getStringCollection(FS_OPTION_OPENFILE_READ_POLICY); if (policies.isEmpty()) { // fall back to looking at the S3A-specific option. policies = options.getStringCollection(INPUT_FADVISE); } return new OpenFileInformation() .withAsyncDrainThreshold( builderSupport.getPositiveLong(ASYNC_DRAIN_THRESHOLD, defaultReadAhead)) .withBufferSize( (int)builderSupport.getPositiveLong( FS_OPTION_OPENFILE_BUFFER_SIZE, defaultBufferSize)) .withChangePolicy(changePolicy) .withFileLength(fileLength) .withInputPolicy( S3AInputPolicy.getFirstSupportedPolicy(policies, defaultInputPolicy)) .withReadAheadRange( builderSupport.getPositiveLong(READAHEAD_RANGE, defaultReadAhead)) .withSplitStart(splitStart) .withSplitEnd(splitEnd) .withStatus(fileStatus) .build(); }
@Test public void testStatusWithValidFilename() throws Throwable { Path p = new Path("file:///tmp/" + TESTPATH.getName()); ObjectAssert<OpenFileSupport.OpenFileInformation> asst = assertFileInfo(prepareToOpenFile( params(FS_OPTION_OPENFILE_LENGTH, "32") .withStatus(status(p, 4096)))); asst.extracting(f -> f.getStatus().getVersionId()) .isEqualTo("version"); asst.extracting(f -> f.getStatus().getEtag()) .isEqualTo("etag"); asst.extracting(f -> f.getStatus().getLen()) .isEqualTo(4096L); }
public String generateHeader() { StringBuilder builder = new StringBuilder(); append(builder, NLS.str("certificate.cert_type"), x509cert.getType()); append(builder, NLS.str("certificate.serialSigVer"), ((Integer) x509cert.getVersion()).toString()); // serial number append(builder, NLS.str("certificate.serialNumber"), "0x" + x509cert.getSerialNumber().toString(16)); // Get subject Principal subjectDN = x509cert.getSubjectDN(); append(builder, NLS.str("certificate.cert_subject"), subjectDN.getName()); append(builder, NLS.str("certificate.serialValidFrom"), x509cert.getNotBefore().toString()); append(builder, NLS.str("certificate.serialValidUntil"), x509cert.getNotAfter().toString()); return builder.toString(); }
@Test public void decodeDSAKeyHeader() { assertThat(certificateManagerDSA.generateHeader()) .contains("X.509") .contains("0x16420ba2") .contains("O=\"UJMRFVV CN=EDCVBGT C=TG\""); }
public static Driver load(String className) throws DriverLoadException { final ClassLoader loader = DriverLoader.class.getClassLoader(); return load(className, loader); }
@Test public void testLoad_String_String() throws Exception { String className = "com.mysql.jdbc.Driver"; //we know this is in target/test-classes //File testClassPath = (new File(this.getClass().getClassLoader().getResource("org.mortbay.jetty.jar").getPath())).getParentFile(); File testClassPath = BaseTest.getResourceAsFile(this, "org.mortbay.jetty.jar").getParentFile(); File driver = new File(testClassPath, "../../src/test/resources/mysql-connector-java-5.1.27-bin.jar"); assertTrue("MySQL Driver JAR file not found in src/test/resources?", driver.isFile()); Driver d = null; try { d = DriverLoader.load(className, driver.getAbsolutePath()); d = DriverManager.getDriver("jdbc:mysql://localhost:3306/dependencycheck"); assertNotNull(d); } finally { if (d != null) { DriverManager.deregisterDriver(d); } } }
String getFileName(double lat, double lon) { int lonInt = getMinLonForTile(lon); int latInt = getMinLatForTile(lat); return toLowerCase(getLatString(latInt) + getNorthString(latInt) + getLonString(lonInt) + getEastString(lonInt) + FILE_NAME_END); }
@Test public void testFileNotFound() { File file = new File(instance.getCacheDir(), instance.getFileName(46, -20) + ".gh"); File zipFile = new File(instance.getCacheDir(), instance.getFileName(46, -20) + ".tif"); file.delete(); zipFile.delete(); instance.setDownloader(new Downloader("test GH") { @Override public void downloadFile(String url, String toFile) throws IOException { throw new FileNotFoundException("xyz"); } }); assertEquals(0, instance.getEle(46, -20), 1); // file not found assertTrue(file.exists()); assertEquals(1048676, file.length()); instance.setDownloader(new Downloader("test GH") { @Override public void downloadFile(String url, String toFile) throws IOException { throw new SocketTimeoutException("xyz"); } }); try { instance.setSleep(30); instance.getEle(16, -20); fail(); } catch (Exception ex) { } file.delete(); zipFile.delete(); }
public String getFieldNames() { if (StringUtils.isBlank(fieldNames)) { this.fieldNames = this.fields.stream().map(TableField::getColumnName).collect(Collectors.joining(", ")); } return this.fieldNames; }
@Test void getFieldNamesTest() { TableInfo tableInfo; ConfigBuilder configBuilder; configBuilder = new ConfigBuilder(GeneratorBuilder.packageConfig(), dataSourceConfig, GeneratorBuilder.strategyConfig(), null, null, null); tableInfo = new TableInfo(configBuilder, "name"); tableInfo.addField(new TableField(configBuilder, "name").setColumnName("name")); Assertions.assertEquals(tableInfo.getFieldNames(), "name"); configBuilder = new ConfigBuilder(GeneratorBuilder.packageConfig(), dataSourceConfig, GeneratorBuilder.strategyConfig(), null, null, null); tableInfo = new TableInfo(configBuilder, "name"); tableInfo.addField(new TableField(configBuilder, "name").setColumnName("name")); tableInfo.addField(new TableField(configBuilder, "age").setColumnName("age")); Assertions.assertEquals(tableInfo.getFieldNames(), "name, age"); configBuilder = new ConfigBuilder(GeneratorBuilder.packageConfig(), dataSourceConfig, GeneratorBuilder.strategyConfig(), null, null, null); tableInfo = new TableInfo(configBuilder, "name"); tableInfo.addField(new TableField(configBuilder, "name").setColumnName("name")); tableInfo.addField(new TableField(configBuilder, "age").setColumnName("age")); tableInfo.addField(new TableField(configBuilder, "phone").setColumnName("phone")); Assertions.assertEquals(tableInfo.getFieldNames(), "name, age, phone"); }
@Override public Iterator iterator() { if (entries == null) { return Collections.emptyIterator(); } return new ResultIterator(); }
@Test public void testIterator_whenNotEmpty_IterationType_Entry() { List<Map.Entry> entries = new ArrayList<>(); MapEntrySimple entry = new MapEntrySimple("key", "value"); entries.add(entry); ResultSet resultSet = new ResultSet(entries, IterationType.ENTRY); Iterator<Map.Entry> iterator = resultSet.iterator(); assertTrue(iterator.hasNext()); Map.Entry entryFromIterator = iterator.next(); assertEquals("key", entryFromIterator.getKey()); assertEquals("value", entryFromIterator.getValue()); }
public void remove(ConnectorTaskId id) { final ScheduledFuture<?> task = committers.remove(id); if (task == null) return; try (LoggingContext loggingContext = LoggingContext.forTask(id)) { task.cancel(false); if (!task.isDone()) task.get(); } catch (CancellationException e) { // ignore log.trace("Offset commit thread was cancelled by another thread while removing connector task with id: {}", id); } catch (ExecutionException | InterruptedException e) { throw new ConnectException("Unexpected interruption in SourceTaskOffsetCommitter while removing task with id: " + id, e); } }
@Test public void testRemoveTaskAndInterrupted() throws ExecutionException, InterruptedException { expectRemove(); when(taskFuture.get()).thenThrow(new InterruptedException()); committers.put(taskId, taskFuture); assertThrows(ConnectException.class, () -> committer.remove(taskId)); }
@SuppressWarnings({"rawtypes", "unchecked"}) public SchemaMetaData revise(final SchemaMetaData originalMetaData) { SchemaMetaData result = originalMetaData; for (Entry<ShardingSphereRule, MetaDataReviseEntry> entry : OrderedSPILoader.getServices(MetaDataReviseEntry.class, rules).entrySet()) { result = revise(result, entry.getKey(), entry.getValue()); } return result; }
@Test void assertReviseWithoutMetaDataReviseEntry() { SchemaMetaData schemaMetaData = new SchemaMetaData("expected", Collections.singleton(mock(TableMetaData.class))); SchemaMetaData actual = new SchemaMetaDataReviseEngine( Collections.emptyList(), new ConfigurationProperties(new Properties()), mock(DatabaseType.class), mock(DataSource.class)).revise(schemaMetaData); assertThat(actual.getName(), is(schemaMetaData.getName())); assertThat(actual.getTables(), is(schemaMetaData.getTables())); }
@Override public Object convertToPropertyType(Class<?> entityType, String[] propertyPath, String value) { IndexValueFieldDescriptor fieldDescriptor = getValueFieldDescriptor(entityType, propertyPath); if (fieldDescriptor == null) { return super.convertToPropertyType(entityType, propertyPath, value); } Class<?> type = fieldDescriptor.type().dslArgumentClass(); if (Date.class != type) { return super.convertToPropertyType(entityType, propertyPath, value); } try { return DateTools.stringToDate(value); } catch (ParseException e) { throw new ParsingException(e); } }
@Test public void testConvertIntProperty() { assertThat(convertToPropertyType(TestEntity.class, "i", "42")).isEqualTo(42); }
public String deserialize(String password, String encryptedPassword, Validatable config) { if (isNotBlank(password) && isNotBlank(encryptedPassword)) { config.addError(PASSWORD, "You may only specify `password` or `encrypted_password`, not both!"); config.addError(ScmMaterialConfig.ENCRYPTED_PASSWORD, "You may only specify `password` or `encrypted_password`, not both!"); } if (isNotBlank(password)) { try { return goCipher.encrypt(password); } catch (CryptoException e) { config.addError(PASSWORD, "Could not encrypt the password. This usually happens when the cipher text is invalid"); } } else if (isNotBlank(encryptedPassword)) { try { goCipher.decrypt(encryptedPassword); } catch (Exception e) { config.addError(ENCRYPTED_PASSWORD, "Encrypted value for password is invalid. This usually happens when the cipher text is invalid."); } return encryptedPassword; } return null; }
@Test public void shouldErrorOutWhenBothPasswordAndEncryptedPasswordAreGivenForDeserialization() throws CryptoException { SvnMaterialConfig svnMaterialConfig = svn(); PasswordDeserializer passwordDeserializer = new PasswordDeserializer(); passwordDeserializer.deserialize("password", new GoCipher().encrypt("encryptedPassword"), svnMaterialConfig); assertThat(svnMaterialConfig.errors().getAllOn("password"), is(List.of("You may only specify `password` or `encrypted_password`, not both!"))); assertThat(svnMaterialConfig.errors().getAllOn("encryptedPassword"), is(List.of("You may only specify `password` or `encrypted_password`, not both!"))); }
public Class<?> getServiceInterfaceClass() { return serviceInterfaceClass; }
@Test void getServiceInterfaceClass() { Assertions.assertEquals(DemoService.class, service.getServiceInterfaceClass()); }
public void run() throws Exception { final Terminal terminal = TerminalBuilder.builder() .nativeSignals(true) .signalHandler(signal -> { if (signal == Terminal.Signal.INT || signal == Terminal.Signal.QUIT) { if (execState == ExecState.RUNNING) { throw new InterruptShellException(); } else { exit(0); } } }) .build(); run((providersMap) -> { String serviceUrl = ""; String adminUrl = ""; for (ShellCommandsProvider provider : providersMap.values()) { final String providerServiceUrl = provider.getServiceUrl(); if (providerServiceUrl != null) { serviceUrl = providerServiceUrl; } final String providerAdminUrl = provider.getAdminUrl(); if (providerAdminUrl != null) { adminUrl = providerAdminUrl; } } LineReaderBuilder readerBuilder = LineReaderBuilder.builder() .terminal(terminal) .parser(parser) .completer(systemRegistry.completer()) .variable(LineReader.INDENTATION, 2) .option(LineReader.Option.INSERT_BRACKET, true); configureHistory(properties, readerBuilder); LineReader reader = readerBuilder.build(); final String welcomeMessage = String.format("Welcome to Pulsar shell!\n %s: %s\n %s: %s\n\n" + "Type %s to get started or try the autocompletion (TAB button).\n" + "Type %s or %s to end the shell session.\n", new AttributedStringBuilder().style(AttributedStyle.BOLD).append("Service URL").toAnsi(), serviceUrl, new AttributedStringBuilder().style(AttributedStyle.BOLD).append("Admin URL").toAnsi(), adminUrl, new AttributedStringBuilder().style(AttributedStyle.BOLD).append("help").toAnsi(), new AttributedStringBuilder().style(AttributedStyle.BOLD).append("exit").toAnsi(), new AttributedStringBuilder().style(AttributedStyle.BOLD).append("quit").toAnsi()); output(welcomeMessage, terminal); String promptMessage; if (configShell.getCurrentConfig() != null) { promptMessage = String.format("%s(%s)", configShell.getCurrentConfig(), getHostFromUrl(serviceUrl)); } else { promptMessage = getHostFromUrl(serviceUrl); } final String prompt = createPrompt(promptMessage); return new InteractiveLineReader() { @Override public String readLine() { return reader.readLine(prompt); } @Override public List<String> parseLine(String line) { return reader.getParser().parse(line, 0).words(); } }; }, () -> terminal); }
@Test public void testFileMode() throws Exception { Terminal terminal = TerminalBuilder.builder().build(); final MockLineReader linereader = new MockLineReader(terminal); final Properties props = new Properties(); props.setProperty("webServiceUrl", "http://localhost:8080"); final String shellFile = Thread.currentThread() .getContextClassLoader().getResource("test-shell-file").getFile(); final TestPulsarShell testPulsarShell = new TestPulsarShell(new String[]{"-f", shellFile}, props, pulsarAdmin); testPulsarShell.run((a) -> linereader, () -> terminal); verify(topics).createNonPartitionedTopic(eq("persistent://public/default/my-topic"), any(Map.class)); verify(testPulsarShell.cmdProduceHolder.get()).call(); }
@Override public boolean edgeExists(String source, String target) { checkId(source); checkId(target); NodeDraftImpl sourceNode = getNode(source); NodeDraftImpl targetNode = getNode(target); if (sourceNode != null && targetNode != null) { boolean undirected = edgeDefault.equals(EdgeDirectionDefault.UNDIRECTED) || (undirectedEdgesCount > 0 && directedEdgesCount == 0); long edgeId = getLongId(sourceNode, targetNode, !undirected); for (Long2ObjectMap l : edgeTypeSets) { if (l != null) { if (l.containsKey(edgeId)) { return true; } } } } return false; }
@Test public void testEdgeExistsSelfLoop() { ImportContainerImpl importContainer = new ImportContainerImpl(); generateTinyGraphWithSelfLoop(importContainer, EdgeDirection.DIRECTED); Assert.assertTrue(importContainer.edgeExists("1", "1")); importContainer = new ImportContainerImpl(); generateTinyGraphWithSelfLoop(importContainer, EdgeDirection.UNDIRECTED); Assert.assertTrue(importContainer.edgeExists("1", "1")); }
public String transform() throws ScanException { StringBuilder stringBuilder = new StringBuilder(); compileNode(node, stringBuilder, new Stack<Node>()); return stringBuilder.toString(); }
@Test public void variable() throws ScanException { String input = "${k0}"; Node node = makeNode(input); NodeToStringTransformer nodeToStringTransformer = new NodeToStringTransformer(node, propertyContainer0); assertEquals("v0", nodeToStringTransformer.transform()); }
public static <T> T[] getBeans(Class<T> interfaceClass) { Object object = serviceMap.get(interfaceClass.getName()); if(object == null) return null; if(object instanceof Object[]) { return (T[])object; } else { Object array = Array.newInstance(interfaceClass, 1); Array.set(array, 0, object); return (T[])array; } }
@Test public void testArrayFromSingle() { // get an array with only one implementation. A[] a = SingletonServiceFactory.getBeans(A.class); Assert.assertEquals(1, a.length); }
@Override public void open(Configuration parameters) throws Exception { this.rateLimiterTriggeredCounter = getRuntimeContext() .getMetricGroup() .addGroup( TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT) .counter(TableMaintenanceMetrics.RATE_LIMITER_TRIGGERED); this.concurrentRunThrottledCounter = getRuntimeContext() .getMetricGroup() .addGroup( TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT) .counter(TableMaintenanceMetrics.CONCURRENT_RUN_THROTTLED); this.nothingToTriggerCounter = getRuntimeContext() .getMetricGroup() .addGroup( TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT) .counter(TableMaintenanceMetrics.NOTHING_TO_TRIGGER); this.triggerCounters = taskNames.stream() .map( name -> getRuntimeContext() .getMetricGroup() .addGroup(TableMaintenanceMetrics.GROUP_KEY, name) .counter(TableMaintenanceMetrics.TRIGGERED)) .collect(Collectors.toList()); this.nextEvaluationTimeState = getRuntimeContext() .getState(new ValueStateDescriptor<>("triggerManagerNextTriggerTime", Types.LONG)); this.accumulatedChangesState = getRuntimeContext() .getListState( new ListStateDescriptor<>( "triggerManagerAccumulatedChange", TypeInformation.of(TableChange.class))); this.lastTriggerTimesState = getRuntimeContext() .getListState(new ListStateDescriptor<>("triggerManagerLastTriggerTime", Types.LONG)); tableLoader.open(); }
@Test void testEqDeleteRecordCount() throws Exception { TriggerManager manager = manager( sql.tableLoader(TABLE_NAME), new TriggerEvaluator.Builder().eqDeleteRecordCount(3).build()); try (KeyedOneInputStreamOperatorTestHarness<Boolean, TableChange, Trigger> testHarness = harness(manager)) { testHarness.open(); addEventAndCheckResult(testHarness, TableChange.builder().eqDeleteRecordCount(1L).build(), 0); addEventAndCheckResult(testHarness, TableChange.builder().eqDeleteRecordCount(2L).build(), 1); addEventAndCheckResult(testHarness, TableChange.builder().eqDeleteRecordCount(5L).build(), 2); // No trigger in this case addEventAndCheckResult(testHarness, TableChange.builder().eqDeleteRecordCount(1L).build(), 2); addEventAndCheckResult(testHarness, TableChange.builder().eqDeleteRecordCount(2L).build(), 3); } }
@Override public Num calculate(BarSeries series, Position position) { return numberOfPositionsCriterion.calculate(series, position); }
@Test public void calculateWithShortPositions() { BarSeries series = new MockBarSeries(numFunction, 100d, 95d, 102d, 105d, 97d, 113d); TradingRecord tradingRecord = new BaseTradingRecord(Trade.sellAt(0, series), Trade.buyAt(2, series), Trade.sellAt(3, series), Trade.buyAt(4, series)); // there are 3 positions with 1 winning positions AnalysisCriterion winningPositionsRatio = getCriterion(PositionFilter.PROFIT); assertNumEquals(0.5, winningPositionsRatio.calculate(series, tradingRecord)); // there are 3 positions with 1 losing positions AnalysisCriterion losingPositionsRatio = getCriterion(PositionFilter.LOSS); assertNumEquals(0.5, losingPositionsRatio.calculate(series, tradingRecord)); }
@SuppressWarnings("unused") // Required for automatic type inference public static <K> Builder0<K> forClass(final Class<K> type) { return new Builder0<>(); }
@Test(expected = IllegalArgumentException.class) public void shouldThrowIfHandlerSupplierThrows0() { HandlerMaps.forClass(BaseType.class) .put(LeafTypeA.class, () -> { throw new RuntimeException("Boom"); }) .build(); }
@ProcessElement public ProcessContinuation processElement( @Element PulsarSourceDescriptor pulsarSourceDescriptor, RestrictionTracker<OffsetRange, Long> tracker, WatermarkEstimator watermarkEstimator, OutputReceiver<PulsarMessage> output) throws IOException { long startTimestamp = tracker.currentRestriction().getFrom(); String topicDescriptor = pulsarSourceDescriptor.getTopic(); try (Reader<byte[]> reader = newReader(this.client, topicDescriptor)) { if (startTimestamp > 0) { reader.seek(startTimestamp); } while (true) { if (reader.hasReachedEndOfTopic()) { reader.close(); return ProcessContinuation.stop(); } Message<byte[]> message = reader.readNext(); if (message == null) { return ProcessContinuation.resume(); } Long currentTimestamp = message.getPublishTime(); // if tracker.tryclaim() return true, sdf must execute work otherwise // doFn must exit processElement() without doing any work associated // or claiming more work if (!tracker.tryClaim(currentTimestamp)) { reader.close(); return ProcessContinuation.stop(); } if (pulsarSourceDescriptor.getEndMessageId() != null) { MessageId currentMsgId = message.getMessageId(); boolean hasReachedEndMessageId = currentMsgId.compareTo(pulsarSourceDescriptor.getEndMessageId()) == 0; if (hasReachedEndMessageId) { return ProcessContinuation.stop(); } } PulsarMessage pulsarMessage = new PulsarMessage(message.getTopicName(), message.getPublishTime(), message); Instant outputTimestamp = extractOutputTimestampFn.apply(message); output.outputWithTimestamp(pulsarMessage, outputTimestamp); } } }
@Test public void testProcessElement() throws Exception { MockOutputReceiver receiver = new MockOutputReceiver(); long startOffset = fakePulsarReader.getStartTimestamp(); long endOffset = fakePulsarReader.getEndTimestamp(); OffsetRangeTracker tracker = new OffsetRangeTracker(new OffsetRange(startOffset, endOffset)); PulsarSourceDescriptor descriptor = PulsarSourceDescriptor.of(TOPIC, startOffset, endOffset, null, SERVICE_URL, ADMIN_URL); DoFn.ProcessContinuation result = dofnInstance.processElement(descriptor, tracker, null, (DoFn.OutputReceiver) receiver); int expectedResultWithoutCountingLastOffset = NUMBEROFMESSAGES - 1; assertEquals(DoFn.ProcessContinuation.stop(), result); assertEquals(expectedResultWithoutCountingLastOffset, receiver.getOutputs().size()); }
@Override public List<Column> getPartitionColumns() { List<Column> partitionColumns = new ArrayList<>(); if (!partColNames.isEmpty()) { partitionColumns = partColNames.stream().map(this::getColumn) .collect(Collectors.toList()); } return partitionColumns; }
@Test public void testPartitionKeys() { List<ColumnSchema> columns = Arrays.asList( genColumnSchema("a", org.apache.kudu.Type.INT32), genColumnSchema("b", org.apache.kudu.Type.STRING), genColumnSchema("c", org.apache.kudu.Type.DATE) ); List<Column> fullSchema = new ArrayList<>(columns.size()); List<String> partColNames = Arrays.asList("a", "b"); ArrayList<Column> partitionSchema = new ArrayList<>(); for (ColumnSchema column : columns) { Type fieldType = ColumnTypeConverter.fromKuduType(column); Column convertedColumn = new Column(column.getName(), fieldType, true); fullSchema.add(convertedColumn); if (partColNames.contains(column.getName())) { partitionSchema.add(convertedColumn); } } String catalogName = "testCatalog"; String dbName = "testDB"; String tableName = "testTable"; String kuduTableName = "impala::testDB.testTable"; KuduTable kuduTable = new KuduTable("localhost:7051", catalogName, dbName, tableName, kuduTableName, fullSchema, partColNames); List<Column> partitionColumns = kuduTable.getPartitionColumns(); Assertions.assertThat(partitionColumns).hasSameElementsAs(partitionSchema); }
public static <T> CloseableResource<T> of(T resource, Closer<T> closer) { checkArgument(resource != null, "Resource must be non-null"); checkArgument(closer != null, "%s must be non-null", Closer.class.getName()); return new CloseableResource<>(resource, closer); }
@Test public void wrapsExceptionsInCloseException() throws Exception { Exception wrapped = new Exception(); thrown.expect(CloseException.class); thrown.expectCause(is(wrapped)); try (CloseableResource<Foo> ignored = CloseableResource.of( new Foo(), foo -> { throw wrapped; })) { // Do nothing. } }
static AnnotatedClusterState generatedStateFrom(final Params params) { final ContentCluster cluster = params.cluster; final ClusterState workingState = ClusterState.emptyState(); final Map<Node, NodeStateReason> nodeStateReasons = new HashMap<>(); for (final NodeInfo nodeInfo : cluster.getNodeInfos()) { final NodeState nodeState = computeEffectiveNodeState(nodeInfo, params, nodeStateReasons); workingState.setNodeState(nodeInfo.getNode(), nodeState); } takeDownGroupsWithTooLowAvailability(workingState, nodeStateReasons, params); final Optional<ClusterStateReason> reasonToBeDown = clusterDownReason(workingState, params); if (reasonToBeDown.isPresent()) { workingState.setClusterState(State.DOWN); } workingState.setDistributionBits(inferDistributionBitCount(cluster, workingState, params)); return new AnnotatedClusterState(workingState, reasonToBeDown, nodeStateReasons); }
@Test void cluster_not_down_if_more_than_min_ratio_of_distributors_available() { final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) .bringEntireClusterUp() .reportDistributorNodeState(0, State.DOWN); final ClusterStateGenerator.Params params = fixture.generatorParams().minRatioOfDistributorNodesUp(0.5); final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); assertThat(state.toString(), equalTo("distributor:3 .0.s:d storage:3")); assertThat(state.getClusterStateReason(), equalTo(Optional.empty())); }
@Override public void open(Configuration parameters) throws Exception { this.rateLimiterTriggeredCounter = getRuntimeContext() .getMetricGroup() .addGroup( TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT) .counter(TableMaintenanceMetrics.RATE_LIMITER_TRIGGERED); this.concurrentRunThrottledCounter = getRuntimeContext() .getMetricGroup() .addGroup( TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT) .counter(TableMaintenanceMetrics.CONCURRENT_RUN_THROTTLED); this.nothingToTriggerCounter = getRuntimeContext() .getMetricGroup() .addGroup( TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT) .counter(TableMaintenanceMetrics.NOTHING_TO_TRIGGER); this.triggerCounters = taskNames.stream() .map( name -> getRuntimeContext() .getMetricGroup() .addGroup(TableMaintenanceMetrics.GROUP_KEY, name) .counter(TableMaintenanceMetrics.TRIGGERED)) .collect(Collectors.toList()); this.nextEvaluationTimeState = getRuntimeContext() .getState(new ValueStateDescriptor<>("triggerManagerNextTriggerTime", Types.LONG)); this.accumulatedChangesState = getRuntimeContext() .getListState( new ListStateDescriptor<>( "triggerManagerAccumulatedChange", TypeInformation.of(TableChange.class))); this.lastTriggerTimesState = getRuntimeContext() .getListState(new ListStateDescriptor<>("triggerManagerLastTriggerTime", Types.LONG)); tableLoader.open(); }
@Test void testCommitCount() throws Exception { TriggerManager manager = manager(sql.tableLoader(TABLE_NAME), new TriggerEvaluator.Builder().commitCount(3).build()); try (KeyedOneInputStreamOperatorTestHarness<Boolean, TableChange, Trigger> testHarness = harness(manager)) { testHarness.open(); addEventAndCheckResult(testHarness, TableChange.builder().commitCount(1).build(), 0); addEventAndCheckResult(testHarness, TableChange.builder().commitCount(2).build(), 1); addEventAndCheckResult(testHarness, TableChange.builder().commitCount(3).build(), 2); addEventAndCheckResult(testHarness, TableChange.builder().commitCount(10).build(), 3); // No trigger in this case addEventAndCheckResult(testHarness, TableChange.builder().commitCount(1).build(), 3); addEventAndCheckResult(testHarness, TableChange.builder().commitCount(1).build(), 3); addEventAndCheckResult(testHarness, TableChange.builder().commitCount(1).build(), 4); } }
@Override public void filter(ContainerRequestContext requestContext) throws IOException { if (resourceInfo.getResourceMethod().isAnnotationPresent(SupportedSearchVersion.class) || resourceInfo.getResourceMethod().isAnnotationPresent(SupportedSearchVersions.class)) { checkVersion(resourceInfo.getResourceMethod().getAnnotationsByType(SupportedSearchVersion.class)); } else if (resourceInfo.getResourceClass().isAnnotationPresent(SupportedSearchVersion.class) || resourceInfo.getResourceClass().isAnnotationPresent(SupportedSearchVersions.class)) { checkVersion(resourceInfo.getResourceClass().getAnnotationsByType(SupportedSearchVersion.class)); } }
@Test public void testFilterWithMultipleDistributionsSuccess() throws Exception { final Method resourceMethod = TestResourceWithMultipleSupportedVersions.class.getMethod("methodWithAnnotation"); when(resourceInfo.getResourceMethod()).thenReturn(resourceMethod); when(versionProvider.get()).thenReturn(elasticSearchV6, openSearchV1); filter.filter(requestContext); filter.filter(requestContext); verify(versionProvider, times(2)).get(); }
@Override public String[] getManagedIndices() { final Set<String> indexNames = indices.getIndexNamesAndAliases(getIndexWildcard()).keySet(); // also allow restore archives to be returned final List<String> result = indexNames.stream() .filter(this::isManagedIndex) .toList(); return result.toArray(new String[result.size()]); }
@Test public void nullIndexerDoesNotThrowOnIndexName() { final String[] indicesNames = mongoIndexSet.getManagedIndices(); assertThat(indicesNames).isEmpty(); }
@Override public void startScheduling() { checkIdleSlotTimeout(); state.as(Created.class) .orElseThrow( () -> new IllegalStateException( "Can only start scheduling when being in Created state.")) .startScheduling(); }
@Test void testStartSchedulingSetsResourceRequirementsForReactiveMode() throws Exception { final JobGraph jobGraph = createJobGraph(); final DefaultDeclarativeSlotPool declarativeSlotPool = createDeclarativeSlotPool(jobGraph.getJobID()); final Configuration configuration = new Configuration(); configuration.set(JobManagerOptions.SCHEDULER_MODE, SchedulerExecutionMode.REACTIVE); final AdaptiveScheduler scheduler = new AdaptiveSchedulerBuilder( jobGraph, mainThreadExecutor, EXECUTOR_RESOURCE.getExecutor()) .setDeclarativeSlotPool(declarativeSlotPool) .setJobMasterConfiguration(configuration) .build(); scheduler.startScheduling(); // should request the max possible resources final int expectedParallelism = KeyGroupRangeAssignment.computeDefaultMaxParallelism(PARALLELISM); assertThat(declarativeSlotPool.getResourceRequirements()) .contains(ResourceRequirement.create(ResourceProfile.UNKNOWN, expectedParallelism)); }
@Udf(description = "Converts a string representation of a date in the given format" + " into a DATE value.") public Date parseDate( @UdfParameter( description = "The string representation of a date.") final String formattedDate, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.text.SimpleDateFormat.") final String formatPattern) { if (formattedDate == null || formatPattern == null) { return null; } try { final long time = formatters.get(formatPattern).parse(formattedDate).getTime(); if (time % MILLIS_IN_DAY != 0) { throw new KsqlFunctionException("Date format contains time field."); } return new Date(time); } catch (final ExecutionException | RuntimeException | ParseException e) { throw new KsqlFunctionException("Failed to parse date '" + formattedDate + "' with formatter '" + formatPattern + "': " + e.getMessage(), e); } }
@Test public void shouldSupportEmbeddedChars() { // When: final Date result = udf.parseDate("2021-12-01Fred", "yyyy-MM-dd'Fred'"); // Then: assertThat(result.getTime(), is(1638316800000L)); }
@Override public Authentication authenticate(Authentication authentication) throws AuthenticationException { JWTBearerAssertionAuthenticationToken jwtAuth = (JWTBearerAssertionAuthenticationToken)authentication; try { ClientDetailsEntity client = clientService.loadClientByClientId(jwtAuth.getName()); JWT jwt = jwtAuth.getJwt(); JWTClaimsSet jwtClaims = jwt.getJWTClaimsSet(); if (!(jwt instanceof SignedJWT)) { throw new AuthenticationServiceException("Unsupported JWT type: " + jwt.getClass().getName()); } // check the signature with nimbus SignedJWT jws = (SignedJWT) jwt; JWSAlgorithm alg = jws.getHeader().getAlgorithm(); if (client.getTokenEndpointAuthSigningAlg() != null && !client.getTokenEndpointAuthSigningAlg().equals(alg)) { throw new AuthenticationServiceException("Client's registered token endpoint signing algorithm (" + client.getTokenEndpointAuthSigningAlg() + ") does not match token's actual algorithm (" + alg.getName() + ")"); } if (client.getTokenEndpointAuthMethod() == null || client.getTokenEndpointAuthMethod().equals(AuthMethod.NONE) || client.getTokenEndpointAuthMethod().equals(AuthMethod.SECRET_BASIC) || client.getTokenEndpointAuthMethod().equals(AuthMethod.SECRET_POST)) { // this client doesn't support this type of authentication throw new AuthenticationServiceException("Client does not support this authentication method."); } else if ((client.getTokenEndpointAuthMethod().equals(AuthMethod.PRIVATE_KEY) && (alg.equals(JWSAlgorithm.RS256) || alg.equals(JWSAlgorithm.RS384) || alg.equals(JWSAlgorithm.RS512) || alg.equals(JWSAlgorithm.ES256) || alg.equals(JWSAlgorithm.ES384) || alg.equals(JWSAlgorithm.ES512) || alg.equals(JWSAlgorithm.PS256) || alg.equals(JWSAlgorithm.PS384) || alg.equals(JWSAlgorithm.PS512))) || (client.getTokenEndpointAuthMethod().equals(AuthMethod.SECRET_JWT) && (alg.equals(JWSAlgorithm.HS256) || alg.equals(JWSAlgorithm.HS384) || alg.equals(JWSAlgorithm.HS512)))) { // double-check the method is asymmetrical if we're in HEART mode if (config.isHeartMode() && !client.getTokenEndpointAuthMethod().equals(AuthMethod.PRIVATE_KEY)) { throw new AuthenticationServiceException("[HEART mode] Invalid authentication method"); } JWTSigningAndValidationService validator = validators.getValidator(client, alg); if (validator == null) { throw new AuthenticationServiceException("Unable to create signature validator for client " + client + " and algorithm " + alg); } if (!validator.validateSignature(jws)) { throw new AuthenticationServiceException("Signature did not validate for presented JWT authentication."); } } else { throw new AuthenticationServiceException("Unable to create signature validator for method " + client.getTokenEndpointAuthMethod() + " and algorithm " + alg); } // check the issuer if (jwtClaims.getIssuer() == null) { throw new AuthenticationServiceException("Assertion Token Issuer is null"); } else if (!jwtClaims.getIssuer().equals(client.getClientId())){ throw new AuthenticationServiceException("Issuers do not match, expected " + client.getClientId() + " got " + jwtClaims.getIssuer()); } // check expiration if (jwtClaims.getExpirationTime() == null) { throw new AuthenticationServiceException("Assertion Token does not have required expiration claim"); } else { // it's not null, see if it's expired Date now = new Date(System.currentTimeMillis() - (timeSkewAllowance * 1000)); if (now.after(jwtClaims.getExpirationTime())) { throw new AuthenticationServiceException("Assertion Token is expired: " + jwtClaims.getExpirationTime()); } } // check not before if (jwtClaims.getNotBeforeTime() != null) { Date now = new Date(System.currentTimeMillis() + (timeSkewAllowance * 1000)); if (now.before(jwtClaims.getNotBeforeTime())){ throw new AuthenticationServiceException("Assertion Token not valid untill: " + jwtClaims.getNotBeforeTime()); } } // check issued at if (jwtClaims.getIssueTime() != null) { // since it's not null, see if it was issued in the future Date now = new Date(System.currentTimeMillis() + (timeSkewAllowance * 1000)); if (now.before(jwtClaims.getIssueTime())) { throw new AuthenticationServiceException("Assertion Token was issued in the future: " + jwtClaims.getIssueTime()); } } // check audience if (jwtClaims.getAudience() == null) { throw new AuthenticationServiceException("Assertion token audience is null"); } else if (!(jwtClaims.getAudience().contains(config.getIssuer()) || jwtClaims.getAudience().contains(config.getIssuer() + "token"))) { throw new AuthenticationServiceException("Audience does not match, expected " + config.getIssuer() + " or " + (config.getIssuer() + "token") + " got " + jwtClaims.getAudience()); } // IFF we managed to get all the way down here, the token is valid // add in the ROLE_CLIENT authority Set<GrantedAuthority> authorities = new HashSet<>(client.getAuthorities()); authorities.add(ROLE_CLIENT); return new JWTBearerAssertionAuthenticationToken(jwt, authorities); } catch (InvalidClientException e) { throw new UsernameNotFoundException("Could not find client: " + jwtAuth.getName()); } catch (ParseException e) { logger.error("Failure during authentication, error was: ", e); throw new AuthenticationServiceException("Invalid JWT format"); } }
@Test public void should_return_valid_token_when_audience_contains_token_endpoint() { JWTClaimsSet jwtClaimsSet = new JWTClaimsSet.Builder() .issuer(CLIENT_ID) .subject(SUBJECT) .expirationTime(new Date()) .audience(ImmutableList.of("http://issuer.com/token", "invalid")) .build(); JWT jwt = mockSignedJWTAuthAttempt(jwtClaimsSet); Authentication authentication = jwtBearerAuthenticationProvider.authenticate(token); assertThat(authentication, instanceOf(JWTBearerAssertionAuthenticationToken.class)); JWTBearerAssertionAuthenticationToken token = (JWTBearerAssertionAuthenticationToken) authentication; assertThat(token.getName(), is(SUBJECT)); assertThat(token.getJwt(), is(jwt)); assertThat(token.getAuthorities(), hasItems(authority1, authority2, authority3)); assertThat(token.getAuthorities().size(), is(4)); }
public static long nextLong(final long startInclusive, final long endExclusive) { checkParameters(startInclusive, endExclusive); long diff = endExclusive - startInclusive; if (diff == 0) { return startInclusive; } return (long) (startInclusive + (diff * RANDOM.nextDouble())); }
@Test void testNextLongWithIllegalArgumentException() { assertThrows(IllegalArgumentException.class, () -> { RandomUtils.nextLong(999L, 199L); }); }
public Mono<Void> resetToOffsets( KafkaCluster cluster, String group, String topic, Map<Integer, Long> targetOffsets) { Preconditions.checkNotNull(targetOffsets); var partitionOffsets = targetOffsets.entrySet().stream() .collect(toMap(e -> new TopicPartition(topic, e.getKey()), Map.Entry::getValue)); return checkGroupCondition(cluster, group).flatMap( ac -> ac.listOffsets(partitionOffsets.keySet(), OffsetSpec.earliest(), true) .flatMap(earliest -> ac.listOffsets(partitionOffsets.keySet(), OffsetSpec.latest(), true) .map(latest -> editOffsetsBounds(partitionOffsets, earliest, latest)) .flatMap(offsetsToCommit -> resetOffsets(ac, group, offsetsToCommit))) ); }
@Test void resetToOffsetsCommitsEarliestOrLatestOffsetsIfOffsetsBoundsNotValid() { sendMsgsToPartition(Map.of(0, 10, 1, 10, 2, 10)); var offsetsWithInValidBounds = Map.of(0, -2L, 1, 5L, 2, 500L); var expectedOffsets = Map.of(0, 0L, 1, 5L, 2, 10L); offsetsResetService.resetToOffsets(cluster, groupId, topic, offsetsWithInValidBounds).block(); assertOffsets(expectedOffsets); }
static Map<String, String> determineHeadersFrom(final Response response) { final HttpFields headers = response.getHeaders(); final Map<String, String> answer = new LinkedHashMap<>(); for (final HttpField header : headers) { final String headerName = header.getName(); if (headerName.startsWith("Sforce")) { answer.put(headerName, header.getValue()); } } // don't set the response code to "0" and the response text to null if there's a response timeout if (response.getStatus() != 0) { answer.put(Exchange.HTTP_RESPONSE_CODE, String.valueOf(response.getStatus())); answer.put(Exchange.HTTP_RESPONSE_TEXT, response.getReason()); } return answer; }
@Test public void shouldDetermineHeadersFromResponse() { final Response response = mock(Response.class); final HttpFields.Mutable httpHeaders = HttpFields.build(); httpHeaders.add("Date", "Mon, 20 May 2013 22:21:46 GMT"); httpHeaders.add("Sforce-Limit-Info", "api-usage=18/5000"); httpHeaders.add("Last-Modified", "Mon, 20 May 2013 20:49:32 GMT"); httpHeaders.add("Content-Type", "application/json;charset=UTF-8"); httpHeaders.add("Transfer-Encoding", "chunked"); when(response.getHeaders()).thenReturn(httpHeaders); final Map<String, String> headers = AbstractClientBase.determineHeadersFrom(response); assertThat(headers).containsEntry("Sforce-Limit-Info", "api-usage=18/5000"); }
public Span newTrace() { return _toSpan(null, newRootContext(0)); }
@Test void newTrace_isRootSpan() { assertThat(tracer.newTrace()) .satisfies(s -> assertThat(s.context().parentId()).isNull()) .isInstanceOf(RealSpan.class); }
public void completeSegmentOperations(String tableNameWithType, SegmentMetadata segmentMetadata, FileUploadType uploadType, @Nullable URI finalSegmentLocationURI, File segmentFile, @Nullable String sourceDownloadURIStr, String segmentDownloadURIStr, @Nullable String crypterName, long segmentSizeInBytes, boolean enableParallelPushProtection, boolean allowRefresh, HttpHeaders headers) throws Exception { String segmentName = segmentMetadata.getName(); boolean refreshOnly = Boolean.parseBoolean(headers.getHeaderString(FileUploadDownloadClient.CustomHeaders.REFRESH_ONLY)); ZNRecord existingSegmentMetadataZNRecord = _pinotHelixResourceManager.getSegmentMetadataZnRecord(tableNameWithType, segmentName); if (existingSegmentMetadataZNRecord != null && shouldProcessAsNewSegment(tableNameWithType, segmentName, existingSegmentMetadataZNRecord, enableParallelPushProtection)) { LOGGER.warn("Removing segment ZK metadata (recovering from previous upload failure) for table: {}, segment: {}", tableNameWithType, segmentName); Preconditions.checkState(_pinotHelixResourceManager.removeSegmentZKMetadata(tableNameWithType, segmentName), "Failed to remove segment ZK metadata for table: %s, segment: %s", tableNameWithType, segmentName); existingSegmentMetadataZNRecord = null; } if (existingSegmentMetadataZNRecord == null) { // Add a new segment if (refreshOnly) { throw new ControllerApplicationException(LOGGER, String.format("Cannot refresh non-existing segment: %s for table: %s", segmentName, tableNameWithType), Response.Status.GONE); } LOGGER.info("Adding new segment: {} to table: {}", segmentName, tableNameWithType); processNewSegment(tableNameWithType, segmentMetadata, uploadType, finalSegmentLocationURI, segmentFile, sourceDownloadURIStr, segmentDownloadURIStr, crypterName, segmentSizeInBytes, enableParallelPushProtection, headers); } else { // Refresh an existing segment if (!allowRefresh) { // We cannot perform this check up-front in UploadSegment API call. If a segment doesn't exist during the check // done up-front but ends up getting created before the check here, we could incorrectly refresh an existing // segment. throw new ControllerApplicationException(LOGGER, String.format("Segment: %s already exists in table: %s. Refresh not permitted.", segmentName, tableNameWithType), Response.Status.CONFLICT); } LOGGER.info("Segment: {} already exists in table: {}, refreshing it", segmentName, tableNameWithType); processExistingSegment(tableNameWithType, segmentMetadata, uploadType, existingSegmentMetadataZNRecord, finalSegmentLocationURI, segmentFile, sourceDownloadURIStr, segmentDownloadURIStr, crypterName, segmentSizeInBytes, enableParallelPushProtection, headers); } }
@Test public void testCompleteSegmentOperations() throws Exception { ZKOperator zkOperator = new ZKOperator(_resourceManager, mock(ControllerConf.class), mock(ControllerMetrics.class)); SegmentMetadata segmentMetadata = mock(SegmentMetadata.class); when(segmentMetadata.getName()).thenReturn(SEGMENT_NAME); when(segmentMetadata.getCrc()).thenReturn("12345"); when(segmentMetadata.getIndexCreationTime()).thenReturn(123L); HttpHeaders httpHeaders = mock(HttpHeaders.class); // Test if Zk segment metadata is removed if exception is thrown when moving segment to final location. try { // Create mock finalSegmentLocationURI and segmentFile. URI finalSegmentLocationURI = URIUtils.getUri("mockPath", OFFLINE_TABLE_NAME, URIUtils.encode(segmentMetadata.getName())); File segmentFile = new File(new File("foo/bar"), "mockChild"); zkOperator.completeSegmentOperations(OFFLINE_TABLE_NAME, segmentMetadata, FileUploadType.SEGMENT, finalSegmentLocationURI, segmentFile, "downloadUrl", "downloadUrl", "crypter", 10, true, true, httpHeaders); fail(); } catch (Exception e) { // Expected } // Wait for the segment Zk entry to be deleted. TestUtils.waitForCondition(aVoid -> { SegmentZKMetadata segmentZKMetadata = _resourceManager.getSegmentZKMetadata(OFFLINE_TABLE_NAME, SEGMENT_NAME); return segmentZKMetadata == null; }, 30_000L, "Failed to delete segmentZkMetadata."); zkOperator.completeSegmentOperations(OFFLINE_TABLE_NAME, segmentMetadata, FileUploadType.SEGMENT, null, null, "downloadUrl", "downloadUrl", "crypter", 10, true, true, httpHeaders); SegmentZKMetadata segmentZKMetadata = _resourceManager.getSegmentZKMetadata(OFFLINE_TABLE_NAME, SEGMENT_NAME); assertNotNull(segmentZKMetadata); assertEquals(segmentZKMetadata.getCrc(), 12345L); assertEquals(segmentZKMetadata.getCreationTime(), 123L); long pushTime = segmentZKMetadata.getPushTime(); assertTrue(pushTime > 0); assertEquals(segmentZKMetadata.getRefreshTime(), Long.MIN_VALUE); assertEquals(segmentZKMetadata.getDownloadUrl(), "downloadUrl"); assertEquals(segmentZKMetadata.getCrypterName(), "crypter"); assertEquals(segmentZKMetadata.getSegmentUploadStartTime(), -1); assertEquals(segmentZKMetadata.getSizeInBytes(), 10); // Test if the same segment can be uploaded when the previous upload failed after segment ZK metadata is created but // before segment is assigned to the ideal state // Manually remove the segment from the ideal state IdealState idealState = _resourceManager.getTableIdealState(OFFLINE_TABLE_NAME); assertNotNull(idealState); idealState.getRecord().getMapFields().remove(SEGMENT_NAME); _resourceManager.getHelixAdmin() .setResourceIdealState(_resourceManager.getHelixClusterName(), OFFLINE_TABLE_NAME, idealState); // The segment should be uploaded as a new segment (push time should change, and refresh time shouldn't be set) zkOperator.completeSegmentOperations(OFFLINE_TABLE_NAME, segmentMetadata, FileUploadType.SEGMENT, null, null, "downloadUrl", "downloadUrl", "crypter", 10, true, true, httpHeaders); segmentZKMetadata = _resourceManager.getSegmentZKMetadata(OFFLINE_TABLE_NAME, SEGMENT_NAME); assertNotNull(segmentZKMetadata); assertEquals(segmentZKMetadata.getCrc(), 12345L); assertEquals(segmentZKMetadata.getCreationTime(), 123L); assertTrue(segmentZKMetadata.getPushTime() > pushTime); pushTime = segmentZKMetadata.getPushTime(); assertTrue(pushTime > 0); assertEquals(segmentZKMetadata.getRefreshTime(), Long.MIN_VALUE); assertEquals(segmentZKMetadata.getDownloadUrl(), "downloadUrl"); assertEquals(segmentZKMetadata.getCrypterName(), "crypter"); assertEquals(segmentZKMetadata.getSegmentUploadStartTime(), -1); assertEquals(segmentZKMetadata.getSizeInBytes(), 10); // Upload the same segment with allowRefresh = false. Validate that an exception is thrown. try { zkOperator.completeSegmentOperations(OFFLINE_TABLE_NAME, segmentMetadata, FileUploadType.SEGMENT, null, null, "otherDownloadUrl", "otherDownloadUrl", "otherCrypter", 10, true, false, httpHeaders); fail(); } catch (Exception e) { // Expected } // Refresh the segment with unmatched IF_MATCH field when(httpHeaders.getHeaderString(HttpHeaders.IF_MATCH)).thenReturn("123"); try { zkOperator.completeSegmentOperations(OFFLINE_TABLE_NAME, segmentMetadata, FileUploadType.SEGMENT, null, null, "otherDownloadUrl", "otherDownloadUrl", "otherCrypter", 10, true, true, httpHeaders); fail(); } catch (Exception e) { // Expected } // Refresh the segment with the same segment (same CRC) with matched IF_MATCH field but different creation time, // downloadURL and crypter when(httpHeaders.getHeaderString(HttpHeaders.IF_MATCH)).thenReturn("12345"); when(segmentMetadata.getIndexCreationTime()).thenReturn(456L); zkOperator.completeSegmentOperations(OFFLINE_TABLE_NAME, segmentMetadata, FileUploadType.SEGMENT, null, null, "otherDownloadUrl", "otherDownloadUrl", "otherCrypter", 10, true, true, httpHeaders); segmentZKMetadata = _resourceManager.getSegmentZKMetadata(OFFLINE_TABLE_NAME, SEGMENT_NAME); assertNotNull(segmentZKMetadata); assertEquals(segmentZKMetadata.getCrc(), 12345L); // Push time should not change assertEquals(segmentZKMetadata.getPushTime(), pushTime); // Creation time and refresh time should change assertEquals(segmentZKMetadata.getCreationTime(), 456L); long refreshTime = segmentZKMetadata.getRefreshTime(); assertTrue(refreshTime > 0); // Download URL should change. Refer: https://github.com/apache/pinot/issues/11535 assertEquals(segmentZKMetadata.getDownloadUrl(), "otherDownloadUrl"); // crypter should not be changed assertEquals(segmentZKMetadata.getCrypterName(), "crypter"); assertEquals(segmentZKMetadata.getSegmentUploadStartTime(), -1); assertEquals(segmentZKMetadata.getSizeInBytes(), 10); // Refresh the segment with a different segment (different CRC) when(segmentMetadata.getCrc()).thenReturn("23456"); when(segmentMetadata.getIndexCreationTime()).thenReturn(789L); // Add a tiny sleep to guarantee that refresh time is different from the previous round Thread.sleep(10); zkOperator.completeSegmentOperations(OFFLINE_TABLE_NAME, segmentMetadata, FileUploadType.SEGMENT, null, null, "otherDownloadUrl", "otherDownloadUrl", "otherCrypter", 100, true, true, httpHeaders); segmentZKMetadata = _resourceManager.getSegmentZKMetadata(OFFLINE_TABLE_NAME, SEGMENT_NAME); assertNotNull(segmentZKMetadata); assertEquals(segmentZKMetadata.getCrc(), 23456L); // Push time should not change assertEquals(segmentZKMetadata.getPushTime(), pushTime); // Creation time, refresh time, downloadUrl and crypter should change assertEquals(segmentZKMetadata.getCreationTime(), 789L); assertTrue(segmentZKMetadata.getRefreshTime() > refreshTime); assertEquals(segmentZKMetadata.getDownloadUrl(), "otherDownloadUrl"); assertEquals(segmentZKMetadata.getCrypterName(), "otherCrypter"); assertEquals(segmentZKMetadata.getSizeInBytes(), 100); }
@Override public ExecuteContext doBefore(ExecuteContext context) { DatabaseInfo databaseInfo = getDataBaseInfo(context); Query query = (Query) context.getArguments()[0]; String sql = query.toString((ParameterList) context.getArguments()[1]); String database = databaseInfo.getDatabaseName(); handleWriteOperationIfWriteDisabled(sql, database, DatabaseWriteProhibitionManager.getPostgreSqlProhibitionDatabases(), context); return context; }
@Test public void testDoBefore() throws Exception { // Database write prohibition switch turned off GLOBAL_CONFIG.setEnablePostgreSqlWriteProhibition(false); ExecuteContext context = ExecuteContext.forMemberMethod(queryExecutor, methodMock, argument, null, null); queryExecutorImplInterceptor.before(context); Assert.assertNull(context.getThrowableOut()); // The database write prohibition switch is turned off, and the write prohibition database set contains the // intercepted database Set<String> databases = new HashSet<>(); databases.add("database-test"); GLOBAL_CONFIG.setPostgreSqlDatabases(databases); queryExecutorImplInterceptor.before(context); Assert.assertNull(context.getThrowableOut()); // The database write prohibition switch is turned on, and the write prohibition database collection contains // the intercepted databases GLOBAL_CONFIG.setEnablePostgreSqlWriteProhibition(true); context = ExecuteContext.forMemberMethod(queryExecutor, methodMock, argument, null, null); queryExecutorImplInterceptor.before(context); Assert.assertEquals("Database prohibit to write, database: database-test", context.getThrowableOut().getMessage()); // The database write prohibition switch is turned on, and the write prohibition database collection contains // the intercepted database. SQL does not perform write operations Query readQuery = new PostSqlQuery(READ_SQL); context = ExecuteContext.forMemberMethod(queryExecutor, methodMock, new Object[]{readQuery, null, null, null, null, null, null}, null, null); queryExecutorImplInterceptor.before(context); Assert.assertNull(context.getThrowableOut()); // The database write prohibition switch is turned on, and the write prohibition database collection does not // contain the intercepted database GLOBAL_CONFIG.setPostgreSqlDatabases(new HashSet<>()); context = ExecuteContext.forMemberMethod(queryExecutor, methodMock, argument, null, null); queryExecutorImplInterceptor.before(context); Assert.assertNull(context.getThrowableOut()); }
@Override public ConfigOperateResult insertOrUpdateTag(final ConfigInfo configInfo, final String tag, final String srcIp, final String srcUser) { if (findConfigInfo4TagState(configInfo.getDataId(), configInfo.getGroup(), configInfo.getTenant(), tag) == null) { return addConfigInfo4Tag(configInfo, tag, srcIp, srcUser); } else { return updateConfigInfo4Tag(configInfo, tag, srcIp, srcUser); } }
@Test void testInsertOrUpdateTagOfAdd() { String dataId = "dataId111222"; String group = "group"; String tenant = "tenant"; String appName = "appname1234"; String content = "c12345"; ConfigInfo configInfo = new ConfigInfo(dataId, group, tenant, appName, content); configInfo.setEncryptedDataKey("key23456"); //mock query config state empty and return obj after insert ConfigInfoStateWrapper configInfoStateWrapper = new ConfigInfoStateWrapper(); configInfoStateWrapper.setLastModified(System.currentTimeMillis()); configInfoStateWrapper.setId(234567890L); String tag = "tag123"; Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant, tag}), eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))).thenThrow(new EmptyResultDataAccessException(1)) .thenReturn(configInfoStateWrapper); String srcIp = "ip345678"; String srcUser = "user1234567"; ConfigOperateResult configOperateResult = externalConfigInfoTagPersistService.insertOrUpdateTag(configInfo, tag, srcIp, srcUser); //verify insert to be invoked Mockito.verify(jdbcTemplate, times(1)) .update(anyString(), eq(dataId), eq(group), eq(tenant), eq(tag), eq(appName), eq(configInfo.getContent()), eq(configInfo.getMd5()), eq(srcIp), eq(srcUser), any(Timestamp.class), any(Timestamp.class)); assertEquals(configInfoStateWrapper.getId(), configOperateResult.getId()); assertEquals(configInfoStateWrapper.getLastModified(), configOperateResult.getLastModified()); }
public void createQprofileChangesForRuleUpdates(DbSession dbSession, Set<PluginRuleUpdate> pluginRuleUpdates) { List<QProfileChangeDto> changesToPersist = pluginRuleUpdates.stream() .flatMap(pluginRuleUpdate -> { RuleChangeDto ruleChangeDto = createNewRuleChange(pluginRuleUpdate); insertRuleChange(dbSession, ruleChangeDto); return findQualityProfilesForRule(dbSession, pluginRuleUpdate.getRuleUuid()).stream() .map(qualityProfileUuid -> buildQprofileChangeDtoForRuleChange(qualityProfileUuid, ruleChangeDto)); }).toList(); if (!changesToPersist.isEmpty()) { dbClient.qProfileChangeDao().bulkInsert(dbSession, changesToPersist); } }
@Test public void updateWithoutCommit_whenNoRuleChanges_thenDontInteractWithDatabase() { underTest.createQprofileChangesForRuleUpdates(mock(), Set.of()); verifyNoInteractions(dbClient); }
@SuppressWarnings("checkstyle:magicnumber") protected boolean tryProcess4(@Nonnull Object item) throws Exception { return tryProcess(4, item); }
@Test public void when_tryProcess4_then_delegatesToTryProcess() throws Exception { // When boolean done = p.tryProcess4(MOCK_ITEM); // Then assertTrue(done); p.validateReceptionOfItem(ORDINAL_4, MOCK_ITEM); }
public HostAndPort getHttpBindAddress() { return httpBindAddress .requireBracketsForIPv6() .withDefaultPort(GRAYLOG_DEFAULT_PORT); }
@Test public void testHttpBindAddressWithCustomPort() throws RepositoryException, ValidationException { jadConfig.setRepository(new InMemoryRepository(ImmutableMap.of("http_bind_address", "example.com:12345"))) .addConfigurationBean(configuration) .process(); assertThat(configuration.getHttpBindAddress()).isEqualTo(HostAndPort.fromParts("example.com", 12345)); }
public Predicate convert(ScalarOperator operator) { if (operator == null) { return null; } return operator.accept(this, null); }
@Test public void testNotEq() { ConstantOperator value = ConstantOperator.createInt(5); ScalarOperator op = new BinaryPredicateOperator(BinaryType.NE, F0, value); Predicate result = CONVERTER.convert(op); Assert.assertTrue(result instanceof LeafPredicate); LeafPredicate leafPredicate = (LeafPredicate) result; Assert.assertTrue(leafPredicate.function() instanceof NotEqual); Assert.assertEquals(5, leafPredicate.literals().get(0)); }
public static String asString(byte[] bytes, Charset charset) { if (charset == null) { charset = detectUtfCharset(bytes); } return skipBOM(new String(bytes, charset)); }
@Test void validateTextConversionFromBytes() { assertEquals("A", UtfTextUtils.asString(hexBytes("EFBBBF41"), StandardCharsets.UTF_8)); assertEquals("A", UtfTextUtils.asString(hexBytes("EFBBBF41"), null)); assertEquals("A", UtfTextUtils.asString(hexBytes("41"), StandardCharsets.UTF_8)); assertEquals("A", UtfTextUtils.asString(hexBytes("41"), null)); }
protected Object createAndFillObject(ObjectNode json, Object toReturn, String className, List<String> genericClasses) { Iterator<Map.Entry<String, JsonNode>> fields = json.fields(); while (fields.hasNext()) { Map.Entry<String, JsonNode> element = fields.next(); String key = element.getKey(); JsonNode jsonNode = element.getValue(); if (isSimpleTypeNode(jsonNode)) { Map.Entry<String, List<String>> fieldDescriptor = getFieldClassNameAndGenerics(toReturn, key, className, genericClasses); setField(toReturn, key, internalLiteralEvaluation(getSimpleTypeNodeTextValue(jsonNode), fieldDescriptor.getKey())); } else if (jsonNode.isArray()) { List<Object> nestedList = new ArrayList<>(); Map.Entry<String, List<String>> fieldDescriptor = getFieldClassNameAndGenerics(toReturn, key, className, genericClasses); List<Object> returnedList = createAndFillList((ArrayNode) jsonNode, nestedList, fieldDescriptor.getKey(), fieldDescriptor.getValue()); setField(toReturn, key, returnedList); } else if (jsonNode.isObject()) { Map.Entry<String, List<String>> fieldDescriptor = getFieldClassNameAndGenerics(toReturn, key, className, genericClasses); Object nestedObject = createObject(fieldDescriptor.getKey(), fieldDescriptor.getValue()); Object returnedObject = createAndFillObject((ObjectNode) jsonNode, nestedObject, fieldDescriptor.getKey(), fieldDescriptor.getValue()); setField(toReturn, key, returnedObject); } else if (!isEmptyText(jsonNode)) { Map.Entry<String, List<String>> fieldDescriptor = getFieldClassNameAndGenerics(toReturn, key, className, genericClasses); setField(toReturn, key, internalLiteralEvaluation(jsonNode.textValue(), fieldDescriptor.getKey())); } else { // empty strings are skipped } } return toReturn; }
@Test public void convertObject_simpleList() { ObjectNode objectNode = new ObjectNode(factory); objectNode.put("key1", "Polissena"); objectNode.put("key2", "Antonia"); Object result = expressionEvaluator.createAndFillObject(objectNode, new HashMap<>(), Map.class.getCanonicalName(), List.of(String.class.getCanonicalName())); assertThat(result).isInstanceOf(Map.class); Map<String, Object> resultMap = (Map<String, Object>) result; assertThat(resultMap).hasSize(2).containsEntry("key1", "Polissena").containsEntry("key2", "Antonia"); }
public static int getApplicableNodeCountForAM(RMContext rmContext, Configuration conf, List<ResourceRequest> amReqs) { // Determine the list of nodes that are eligible based on the strict // resource requests Set<NodeId> nodesForReqs = new HashSet<>(); for (ResourceRequest amReq : amReqs) { if (amReq.getRelaxLocality() && !amReq.getResourceName().equals(ResourceRequest.ANY)) { nodesForReqs.addAll( rmContext.getScheduler().getNodeIds(amReq.getResourceName())); } } if (YarnConfiguration.areNodeLabelsEnabled(conf)) { // Determine the list of nodes that are eligible based on the node label String amNodeLabelExpression = amReqs.get(0).getNodeLabelExpression(); Set<NodeId> nodesForLabels = getNodeIdsForLabel(rmContext, amNodeLabelExpression); if (nodesForLabels != null && !nodesForLabels.isEmpty()) { // If only node labels, strip out any wildcard NodeIds and return if (nodesForReqs.isEmpty()) { for (Iterator<NodeId> it = nodesForLabels.iterator(); it.hasNext();) { if (it.next().getPort() == 0) { it.remove(); } } return nodesForLabels.size(); } else { // The NodeIds common to both the strict resource requests and the // node label is the eligible set return Sets.intersection(nodesForReqs, nodesForLabels).size(); } } } // If no strict resource request NodeIds nor node label NodeIds, then just // return the entire cluster if (nodesForReqs.isEmpty()) { return rmContext.getScheduler().getNumClusterNodes(); } // No node label NodeIds, so return the strict resource request NodeIds return nodesForReqs.size(); }
@Test public void testGetApplicableNodeCountForAMLocalityAndLabels() throws Exception { List<NodeId> rack1Nodes = new ArrayList<>(); for (int i = 0; i < 29; i++) { rack1Nodes.add(NodeId.newInstance("host" + i, 1234)); } NodeId node1 = NodeId.newInstance("node1", 1234); NodeId node2 = NodeId.newInstance("node2", 1234); rack1Nodes.add(node2); Set<NodeId> noLabelNodes = new HashSet<>(); for (int i = 0; i < 19; i++) { noLabelNodes.add(rack1Nodes.get(i)); } noLabelNodes.add(node2); for (int i = 29; i < 89; i++) { noLabelNodes.add(NodeId.newInstance("host" + i, 1234)); } Set<NodeId> label1Nodes = new HashSet<>(); label1Nodes.add(node1); for (int i = 89; i < 93; i++) { label1Nodes.add(NodeId.newInstance("host" + i, 1234)); } for (int i = 19; i < 29; i++) { label1Nodes.add(rack1Nodes.get(i)); } label1Nodes.add(NodeId.newInstance("host101", 0)); label1Nodes.add(NodeId.newInstance("host102", 0)); Map<String, Set<NodeId>> label1NodesMap = new HashMap<>(); label1NodesMap.put("label1", label1Nodes); YarnConfiguration conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true); ResourceScheduler scheduler = Mockito.mock(ResourceScheduler.class); Mockito.when(scheduler.getNumClusterNodes()).thenReturn(100); Mockito.when(scheduler.getNodeIds("/rack1")).thenReturn(rack1Nodes); Mockito.when(scheduler.getNodeIds("node1")) .thenReturn(Collections.singletonList(node1)); Mockito.when(scheduler.getNodeIds("node2")) .thenReturn(Collections.singletonList(node2)); RMContext rmContext = Mockito.mock(RMContext.class); Mockito.when(rmContext.getScheduler()).thenReturn(scheduler); RMNodeLabelsManager labMan = Mockito.mock(RMNodeLabelsManager.class); Mockito.when(labMan.getNodesWithoutALabel()).thenReturn(noLabelNodes); Mockito.when(labMan.getLabelsToNodes(Collections.singleton("label1"))) .thenReturn(label1NodesMap); Mockito.when(rmContext.getNodeLabelManager()).thenReturn(labMan); ResourceRequest anyReq = createResourceRequest(ResourceRequest.ANY, true, null); List<ResourceRequest> reqs = new ArrayList<>(); reqs.add(anyReq); Assert.assertEquals(80, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); ResourceRequest rackReq = createResourceRequest("/rack1", true, null); reqs.add(rackReq); Assert.assertEquals(20, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); anyReq.setRelaxLocality(false); Assert.assertEquals(20, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); rackReq.setRelaxLocality(false); Assert.assertEquals(80, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); ResourceRequest node1Req = createResourceRequest("node1", false, null); reqs.add(node1Req); Assert.assertEquals(80, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); node1Req.setRelaxLocality(true); Assert.assertEquals(0, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); rackReq.setRelaxLocality(true); Assert.assertEquals(20, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); ResourceRequest node2Req = createResourceRequest("node2", false, null); reqs.add(node2Req); Assert.assertEquals(20, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); node2Req.setRelaxLocality(true); Assert.assertEquals(20, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); rackReq.setRelaxLocality(false); Assert.assertEquals(1, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); node1Req.setRelaxLocality(false); Assert.assertEquals(1, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); node2Req.setRelaxLocality(false); Assert.assertEquals(80, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); anyReq.setNodeLabelExpression("label1"); rackReq.setNodeLabelExpression("label1"); node1Req.setNodeLabelExpression("label1"); node2Req.setNodeLabelExpression("label1"); anyReq.setRelaxLocality(true); reqs = new ArrayList<>(); reqs.add(anyReq); Assert.assertEquals(15, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); rackReq.setRelaxLocality(true); reqs.add(rackReq); Assert.assertEquals(10, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); anyReq.setRelaxLocality(false); Assert.assertEquals(10, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); rackReq.setRelaxLocality(false); Assert.assertEquals(15, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); node1Req.setRelaxLocality(false); reqs.add(node1Req); Assert.assertEquals(15, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); node1Req.setRelaxLocality(true); Assert.assertEquals(1, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); rackReq.setRelaxLocality(true); Assert.assertEquals(11, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); node2Req.setRelaxLocality(false); reqs.add(node2Req); Assert.assertEquals(11, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); node2Req.setRelaxLocality(true); Assert.assertEquals(11, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); rackReq.setRelaxLocality(false); Assert.assertEquals(1, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); node1Req.setRelaxLocality(false); Assert.assertEquals(0, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); node2Req.setRelaxLocality(false); Assert.assertEquals(15, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); }
public SelectionAdapterOptions( SelectionOperation selectionOperation, String[] filters, String defaultFilter, String[] providerFilters, boolean useSchemaPath ) { this.selectionOperation = selectionOperation; this.filters = filters; this.defaultFilter = defaultFilter; this.providerFilters = providerFilters; this.useSchemaPath = useSchemaPath; }
@Test public void testSelectionAdapterOptions() { SelectionOperation selectionOperation = SelectionOperation.FILE; FilterType[] filterTypes = new FilterType[] { FilterType.ALL, FilterType.CSV, FilterType.TXT }; FilterType defaultFilter = FilterType.CSV; String[] providerFilters = new String[] { "local" }; boolean useSchemaPath = true; String[] expectedFilterTypes = new String[] { "ALL", "CSV", "TXT" }; SelectionAdapterOptions selectionAdapterOptions = new SelectionAdapterOptions( selectionOperation, filterTypes, defaultFilter, providerFilters, useSchemaPath ); assertEquals( SelectionOperation.FILE, selectionAdapterOptions.getSelectionOperation() ); assertArrayEquals( expectedFilterTypes, selectionAdapterOptions.getFilters() ); assertEquals( defaultFilter.toString(), selectionAdapterOptions.getDefaultFilter() ); assertArrayEquals( providerFilters , selectionAdapterOptions.getProviderFilters() ); assertTrue( selectionAdapterOptions.getUseSchemaPath() ); }
@ConstantFunction.List(list = { @ConstantFunction(name = "subdate", argTypes = {DATETIME, INT}, returnType = DATETIME, isMonotonic = true), @ConstantFunction(name = "date_sub", argTypes = {DATETIME, INT}, returnType = DATETIME, isMonotonic = true), @ConstantFunction(name = "days_sub", argTypes = {DATETIME, INT}, returnType = DATETIME, isMonotonic = true) }) public static ConstantOperator daysSub(ConstantOperator date, ConstantOperator day) { return ConstantOperator.createDatetimeOrNull(date.getDatetime().minusDays(day.getInt())); }
@Test public void daysSub() { assertEquals("2015-03-13T09:23:55", ScalarOperatorFunctions.daysSub(O_DT_20150323_092355, O_INT_10).getDatetime().toString()); }
@Override public Flux<BooleanResponse<RenameCommand>> rename(Publisher<RenameCommand> commands) { return execute(commands, command -> { Assert.notNull(command.getKey(), "Key must not be null!"); Assert.notNull(command.getNewName(), "New name must not be null!"); byte[] keyBuf = toByteArray(command.getKey()); byte[] newKeyBuf = toByteArray(command.getNewName()); if (executorService.getConnectionManager().calcSlot(keyBuf) == executorService.getConnectionManager().calcSlot(newKeyBuf)) { return super.rename(commands); } return read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.DUMP, keyBuf) .filter(Objects::nonNull) .zipWith( Mono.defer(() -> pTtl(command.getKey()) .filter(Objects::nonNull) .map(ttl -> Math.max(0, ttl)) .switchIfEmpty(Mono.just(0L)) ) ) .flatMap(valueAndTtl -> { return write(newKeyBuf, StringCodec.INSTANCE, RedisCommands.RESTORE, newKeyBuf, valueAndTtl.getT2(), valueAndTtl.getT1()); }) .thenReturn(new BooleanResponse<>(command, true)) .doOnSuccess((ignored) -> del(command.getKey())); }); }
@Test public void testRename_keyNotExist() { Integer originalSlot = getSlotForKey(originalKey); newKey = getNewKeyForSlot(new String(originalKey.array()), getTargetSlot(originalSlot)); if (sameSlot) { // This is a quirk of the implementation - since same-slot renames use the non-cluster version, // the result is a Redis error. This behavior matches other spring-data-redis implementations assertThatThrownBy(() -> connection.keyCommands().rename(originalKey, newKey).block()) .isInstanceOf(RedisSystemException.class); } else { Boolean response = connection.keyCommands().rename(originalKey, newKey).block(); assertThat(response).isTrue(); final ByteBuffer newKeyValue = connection.stringCommands().get(newKey).block(); assertThat(newKeyValue).isEqualTo(null); } }
public SubscriptionName createSubscription(TopicName topicName, String subscriptionName) { checkArgument(!subscriptionName.isEmpty(), "subscriptionName can not be empty"); checkIsUsable(); if (!createdTopics.contains(topicName)) { throw new IllegalArgumentException( "Can not create a subscription for a topic not managed by this instance."); } LOG.info("Creating subscription '{}' for topic '{}'", subscriptionName, topicName); Subscription subscription = Failsafe.with(retryOnDeadlineExceeded()) .get( () -> subscriptionAdminClient.createSubscription( getSubscriptionName(subscriptionName), topicName, PushConfig.getDefaultInstance(), DEFAULT_ACK_DEADLINE_SECONDS)); SubscriptionName reference = PubsubUtils.toSubscriptionName(subscription); createdSubscriptions.add(getSubscriptionName(subscriptionName)); LOG.info( "Subscription '{}' for topic '{}' was created successfully!", subscription.getName(), topicName); return reference; }
@Test public void testCreateSubscriptionUnmanagedTopicShouldFail() { IllegalArgumentException exception = assertThrows( IllegalArgumentException.class, () -> testManager.createSubscription( TopicName.of(PROJECT_ID, "topic-name"), "subscription-name")); assertThat(exception).hasMessageThat().contains("topic not managed"); }
@SuppressWarnings("unchecked") public static <T> T deep(final T input) { if (input instanceof Map<?, ?>) { return (T) deepMap((Map<?, ?>) input); } else if (input instanceof List<?>) { return (T) deepList((List<?>) input); } else if (input instanceof RubyString) { // new instance but sharing ByteList (until either String is modified) return (T) ((RubyString) input).dup(); } else if (input instanceof Collection<?>) { throw new ClassCastException("unexpected Collection type " + input.getClass()); } return input; }
@Test public void testRubyStringCloning() { String javaString = "fooBar"; RubyString original = RubyString.newString(RubyUtil.RUBY, javaString); RubyString result = Cloner.deep(original); // Check object identity assertNotSame(original, result); // Check string equality assertEquals(original, result); assertEquals(javaString, result.asJavaString()); }
@Deprecated public IoFuture<ClientConnection> connect(final URI uri, final XnioWorker worker, ByteBufferPool bufferPool, OptionMap options) { return connect(uri, worker, null, bufferPool, options); }
@Test public void default_group_key_is_used_in_Http2Client_SSL() throws Exception{ final Http2Client client = createClient(); final ClientConnection connection = client.connect(new URI("https://localhost:7778"), worker, Http2Client.SSL, Http2Client.BUFFER_POOL, OptionMap.create(UndertowOptions.ENABLE_HTTP2, true)).get(); assertTrue(connection.isOpen()); IoUtils.safeClose(connection); }
@Override public boolean eval(Object arg) { QueryableEntry entry = (QueryableEntry) arg; Data keyData = entry.getKeyData(); return (key == null || key.equals(keyData)) && predicate.apply((Map.Entry) arg); }
@Test public void testEval_givenFilterContainsKey_whenKeyOfEntryIsEqualAndPredicacteIsMatching_thenReturnTrue() { //given Data key1 = serializationService.toData("key1"); Predicate<Object, Object> predicate = Predicates.alwaysTrue(); QueryEventFilter filter = new QueryEventFilter(key1, predicate, true); //when Data key2 = serializationService.toData("key1"); QueryableEntry entry = mockEntryWithKeyData(key2); //then boolean result = filter.eval(entry); assertTrue(result); }
public static @Nullable List<Map<String, Object>> getListOfMaps( Map<String, Object> map, String name, @Nullable List<Map<String, Object>> defaultValue) { @Nullable Object value = map.get(name); if (value == null) { if (map.containsKey(name)) { throw new IncorrectTypeException(name, map, "a list"); } return defaultValue; } if (Data.isNull(value)) { // This is a JSON literal null. When represented as a list, // this is an empty list. return Collections.emptyList(); } if (!(value instanceof List)) { throw new IncorrectTypeException(name, map, "a list"); } List<?> elements = (List<?>) value; for (Object elem : elements) { if (!(elem instanceof Map)) { throw new IncorrectTypeException(name, map, "a list of Map objects"); } } @SuppressWarnings("unchecked") List<Map<String, Object>> result = (List<Map<String, Object>>) elements; return result; }
@Test public void testGetListOfMaps() throws Exception { Map<String, Object> o = makeCloudDictionary(); Assert.assertEquals(makeCloudObjects(), getListOfMaps(o, "multipleObjectsKey", null)); try { getListOfMaps(o, "singletonLongKey", null); Assert.fail("should have thrown an exception"); } catch (Exception exn) { assertThat(exn.toString(), Matchers.containsString("not a list")); } }
public static NamenodeRole convert(NamenodeRoleProto role) { switch (role) { case NAMENODE: return NamenodeRole.NAMENODE; case BACKUP: return NamenodeRole.BACKUP; case CHECKPOINT: return NamenodeRole.CHECKPOINT; } return null; }
@Test public void testDataNodeInfoPBHelper() { DatanodeID id = DFSTestUtil.getLocalDatanodeID(); DatanodeInfo dnInfos0 = new DatanodeInfoBuilder().setNodeID(id) .build(); dnInfos0.setCapacity(3500L); dnInfos0.setDfsUsed(1000L); dnInfos0.setNonDfsUsed(2000L); dnInfos0.setRemaining(500L); HdfsProtos.DatanodeInfoProto dnproto = PBHelperClient.convert(dnInfos0); DatanodeInfo dnInfos1 = PBHelperClient.convert(dnproto); compare(dnInfos0, dnInfos1); assertEquals(dnInfos0.getNonDfsUsed(), dnInfos1.getNonDfsUsed()); //Testing without nonDfs field HdfsProtos.DatanodeInfoProto.Builder b = HdfsProtos.DatanodeInfoProto.newBuilder(); b.setId(PBHelperClient.convert(id)).setCapacity(3500L).setDfsUsed(1000L) .setRemaining(500L); DatanodeInfo dnInfos3 = PBHelperClient.convert(b.build()); assertEquals(dnInfos0.getNonDfsUsed(), dnInfos3.getNonDfsUsed()); }
void placeOrder(Order order) { sendShippingRequest(order); }
@Test void testPlaceOrderWithDatabaseAndExceptions() throws Exception { long paymentTime = timeLimits.paymentTime(); long queueTaskTime = timeLimits.queueTaskTime(); long messageTime = timeLimits.messageTime(); long employeeTime = timeLimits.employeeTime(); long queueTime = timeLimits.queueTime(); for (double d = 0.1; d < 2; d = d + 0.1) { paymentTime *= d; queueTaskTime *= d; messageTime *= d; employeeTime *= d; queueTime *= d; for (Exception e : exceptionList) { Commander c = buildCommanderObjectWithDB(true, true, e); var order = new Order(new User("K", null), "pen", 1f); for (Order.MessageSent ms : Order.MessageSent.values()) { c.placeOrder(order); assertFalse(StringUtils.isBlank(order.id)); } c = buildCommanderObjectWithDB(true, false, e); order = new Order(new User("K", null), "pen", 1f); for (Order.MessageSent ms : Order.MessageSent.values()) { c.placeOrder(order); assertFalse(StringUtils.isBlank(order.id)); } c = buildCommanderObjectWithDB(false, false, e); order = new Order(new User("K", null), "pen", 1f); for (Order.MessageSent ms : Order.MessageSent.values()) { c.placeOrder(order); assertFalse(StringUtils.isBlank(order.id)); } c = buildCommanderObjectWithDB(false, true, e); order = new Order(new User("K", null), "pen", 1f); for (Order.MessageSent ms : Order.MessageSent.values()) { c.placeOrder(order); assertFalse(StringUtils.isBlank(order.id)); } } } }
public static CommonCertificateVerifier getVerifier(boolean isIndividual) { if (isIndividual) { if (individualVerifier == null) { individualVerifier = loadClass(System.getProperty("INDIVIDUAL_CERT_VERIFIER_CLASS_NAME")); } return individualVerifier; } else { if (corporateVerifier == null) { corporateVerifier = loadClass(System.getProperty("CORPORATE_CERT_VERIFIER_CLASS_NAME")); } return corporateVerifier; } }
@Test public void testGetVerifierIsInstanceOfNullJuridicalCertificateVerifierOnInvalidEnvironment() throws Exception { Field singleton = CertificateVerifierHolder.class.getDeclaredField("individualVerifier"); singleton.setAccessible(true); singleton.set(null, null); CommonCertificateVerifier verifier = CertificateVerifierHolder.getVerifier(true); assertThat(verifier).isInstanceOf(NullCommonCertificateVerifier.class); }
static List<Export> exportedPackages(File jarFile) { var manifest = getOsgiManifest(jarFile); if (manifest == null) return List.of(); try { return parseExports(manifest); } catch (Exception e) { throw new RuntimeException(String.format("Invalid manifest in bundle '%s'", jarFile.getPath()), e); } }
@Test void require_that_invalid_exports_throws_exception() { try { AnalyzeBundle.exportedPackages(jarFile("errorExport.jar")); fail(); } catch (RuntimeException e) { assertTrue(e.getMessage().contains("Invalid manifest in bundle 'src/test/resources/jar/errorExport.jar'")); assertTrue(e.getCause().getMessage().startsWith("Failed parsing Export-Package"), e.getCause().getMessage()); } }
@ReadOperation public Map<String, Object> router(@Selector String dstService) { Map<String, Object> result = new HashMap<>(); if (StringUtils.hasText(dstService)) { List<RoutingProto.Route> routerRules = serviceRuleManager.getServiceRouterRule(MetadataContext.LOCAL_NAMESPACE, MetadataContext.LOCAL_SERVICE, dstService); List<Object> rules = new LinkedList<>(); result.put("routerRules", rules); if (CollectionUtils.isEmpty(routerRules)) { return result; } for (RoutingProto.Route route : routerRules) { rules.add(parseRouterRule(route)); } } return result; }
@Test public void testHasNotRouterRule() { List<RoutingProto.Route> routes = new LinkedList<>(); when(serviceRuleManager.getServiceRouterRule(anyString(), anyString(), anyString())).thenReturn(routes); Map<String, Object> actuator = polarisRouterEndpoint.router(testDestService); assertThat(actuator.get("routerRules")).isNotNull(); assertThat(((List<?>) actuator.get("routerRules")).size()).isEqualTo(0); }
public static <T> void forEachWithIndex(Iterable<T> iterable, ObjectIntProcedure<? super T> procedure) { FJIterate.forEachWithIndex(iterable, procedure, FJIterate.FORK_JOIN_POOL); }
@Test public void testForEachWithIndexException() { assertThrows(RuntimeException.class, () -> FJIterate.forEachWithIndex( FJIterateTest.createIntegerList(5), new PassThruObjectIntProcedureFactory<>(EXCEPTION_OBJECT_INT_PROCEDURE), new PassThruCombiner<>(), 1, 5)); }
public static double parseDouble(final String str) { final double d = Double.parseDouble(str); if (Double.isInfinite(d) || Double.isNaN(d)) { throw new NumberFormatException("Invalid double value: " + str); } return d; }
@Test public void shouldThrowOnInfinity() { assertThrows(NumberFormatException.class, () -> SqlDoubles.parseDouble("Infinity")); assertThrows(NumberFormatException.class, () -> SqlDoubles.parseDouble("-Infinity")); }
abstract HttpTracing httpTracing(ApplicationContext ctx);
@Test void WebMvc25_httpTracing_byName() { ApplicationContext context = mock(ApplicationContext.class); when(context.containsBean("httpTracing")).thenReturn(true); when(context.getBean("httpTracing")).thenReturn(mock(HttpTracing.class)); new WebMvc25().httpTracing(context); verify(context).containsBean("httpTracing"); verify(context).getBean("httpTracing"); verifyNoMoreInteractions(context); }
public static void main(String[] args) { var form = new RegisterWorkerForm(NAME, OCCUPATION, DATE_OF_BIRTH); form.submit(); }
@Test void shouldExecuteApplicationWithoutException() { assertDoesNotThrow(() -> App.main(new String[]{})); }
@Override public SchemaKStream<?> buildStream(final PlanBuildContext buildContext) { if (!joinKey.isForeignKey()) { ensureMatchingPartitionCounts(buildContext.getServiceContext().getTopicClient()); } final JoinerFactory joinerFactory = new JoinerFactory( buildContext, this, buildContext.buildNodeContext(getId().toString())); return joinerFactory.getJoiner(left.getNodeOutputType(), right.getNodeOutputType()).join(); }
@Test public void shouldHandleJoinIfTableHasNoKeyAndJoinFieldIsRowKey() { // Given: setupStream(left, leftSchemaKStream); setupTable(right, rightSchemaKTable); final JoinNode joinNode = new JoinNode(nodeId, LEFT, joinKey, true, left, right, empty(), "KAFKA"); // When: joinNode.buildStream(planBuildContext); // Then: verify(leftSchemaKStream).leftJoin( rightSchemaKTable, SYNTH_KEY, VALUE_FORMAT.getFormatInfo(), CONTEXT_STACKER ); }
public static String getSignature(String methodName, Class<?>[] parameterTypes) { StringBuilder sb = new StringBuilder(methodName); sb.append('('); if (parameterTypes != null && parameterTypes.length > 0) { boolean first = true; for (Class<?> type : parameterTypes) { if (first) { first = false; } else { sb.append(','); } sb.append(type.getName()); } } sb.append(')'); return sb.toString(); }
@Test void testGetSignature() throws Exception { Method m = Foo2.class.getDeclaredMethod("hello", int[].class); assertThat(ReflectUtils.getSignature("greeting", m.getParameterTypes()), equalTo("greeting([I)")); }
@Override public ExecuteContext before(ExecuteContext context) { Object[] arguments = context.getArguments(); String application = DubboReflectUtils.getParameter(arguments[0], APPLICATION_KEY); // 保存接口与服务名之间的映射 DubboCache.INSTANCE.putApplication(DubboReflectUtils.getServiceInterface(arguments[0]), application); return context; }
@Test public void testBefore() { Object[] arguments = new Object[1]; arguments[0] = APACHE_URL; ExecuteContext context = ExecuteContext.forMemberMethod(new Object(), null, arguments, null, null); interceptor.before(context); Assert.assertEquals(SERVICE_NAME, DubboCache.INSTANCE.getApplication(SERVICE_INTERFACE)); }
public static DisplayData from(HasDisplayData component) { checkNotNull(component, "component argument cannot be null"); InternalBuilder builder = new InternalBuilder(); builder.include(Path.root(), component); return builder.build(); }
@Test public void testExceptionsNotWrappedRecursively() { final RuntimeException cause = new RuntimeException("oh noes!"); HasDisplayData component = new HasDisplayData() { @Override public void populateDisplayData(DisplayData.Builder builder) { builder.include( "p", new HasDisplayData() { @Override public void populateDisplayData(DisplayData.Builder builder) { throw cause; } }); } }; thrown.expectCause(is(cause)); DisplayData.from(component); }
public static PTransformMatcher classEqualTo(Class<? extends PTransform> clazz) { return new EqualClassPTransformMatcher(clazz); }
@Test public void classEqualToDoesNotMatchUnrelatedClass() { PTransformMatcher matcher = PTransformMatchers.classEqualTo(ParDo.SingleOutput.class); AppliedPTransform<?, ?, ?> application = getAppliedTransform(Window.<KV<String, Integer>>into(new GlobalWindows())); assertThat(matcher.matches(application), is(false)); }
public StatusInfo getStatusInfo() { StatusInfo.Builder builder = StatusInfo.Builder.newBuilder(); // Add application level status int upReplicasCount = 0; StringBuilder upReplicas = new StringBuilder(); StringBuilder downReplicas = new StringBuilder(); StringBuilder replicaHostNames = new StringBuilder(); for (PeerEurekaNode node : peerEurekaNodes.getPeerEurekaNodes()) { if (replicaHostNames.length() > 0) { replicaHostNames.append(", "); } replicaHostNames.append(node.getServiceUrl()); if (isReplicaAvailable(node.getServiceUrl())) { upReplicas.append(node.getServiceUrl()).append(','); upReplicasCount++; } else { downReplicas.append(node.getServiceUrl()).append(','); } } builder.add("registered-replicas", replicaHostNames.toString()); builder.add("available-replicas", upReplicas.toString()); builder.add("unavailable-replicas", downReplicas.toString()); // Only set the healthy flag if a threshold has been configured. if (peerEurekaNodes.getMinNumberOfAvailablePeers() > -1) { builder.isHealthy(upReplicasCount >= peerEurekaNodes.getMinNumberOfAvailablePeers()); } builder.withInstanceInfo(this.instanceInfo); return builder.build(); }
@Test public void testGetStatusInfoUnsetHealth() { StatusUtil statusUtil = getStatusUtil(5, 3, -1); StatusInfo statusInfo = statusUtil.getStatusInfo(); try { statusInfo.isHealthy(); } catch (NullPointerException e) { // Expected that the healthy flag is not set when the minimum value is -1 return; } fail("Excpected NPE to be thrown when healthy threshold is not set"); }
@CanIgnoreReturnValue public final Ordered containsExactly() { return containsExactlyEntriesIn(ImmutableMap.of()); }
@Test public void containsExactlyExtraKeyAndWrongValue() { ImmutableMap<String, Integer> actual = ImmutableMap.of("jan", 1, "feb", 2, "march", 3); expectFailureWhenTestingThat(actual).containsExactly("jan", 1, "march", 33); assertFailureKeys( "keys with wrong values", "for key", "expected value", "but got value", "unexpected keys", "for key", "unexpected value", "---", "expected", "but was"); assertFailureValueIndexed("for key", 0, "march"); assertFailureValue("expected value", "33"); assertFailureValue("but got value", "3"); assertFailureValueIndexed("for key", 1, "feb"); assertFailureValue("unexpected value", "2"); }
@Subscribe public synchronized void renew(final SchemaAddedEvent event) { contextManager.getMetaDataContextManager().getSchemaMetaDataManager().addSchema(event.getDatabaseName(), event.getSchemaName()); refreshShardingSphereStatisticsData(); }
@Test void assertRenewForSchemaDeleted() { when(contextManager.getMetaDataContexts().getMetaData().getDatabase("db").containsSchema("foo_schema")).thenReturn(true); subscriber.renew(new SchemaDeletedEvent("db", "foo_schema")); verify(contextManager.getMetaDataContexts().getMetaData().getDatabase("db")).dropSchema("foo_schema"); }
public JmxCollector register() { return register(PrometheusRegistry.defaultRegistry); }
@Test public void testEmptyLabelsAreIgnored() throws Exception { new JmxCollector( "\n---\nrules:\n- pattern: `^hadoop<service=DataNode, name=DataNodeActivity-ams-hdd001-50010><>replaceBlockOpMinTime:`\n name: foo\n labels:\n '': v\n l: ''" .replace('`', '"')) .register(prometheusRegistry); assertEquals(200, getSampleValue("foo", new String[] {}, new String[] {}), .001); }
@Override public Set<UnloadDecision> findBundlesForUnloading(LoadManagerContext context, Map<String, Long> recentlyUnloadedBundles, Map<String, Long> recentlyUnloadedBrokers) { final var conf = context.brokerConfiguration(); decisionCache.clear(); stats.clear(); Map<String, BrokerLookupData> availableBrokers; try { availableBrokers = context.brokerRegistry().getAvailableBrokerLookupDataAsync() .get(context.brokerConfiguration().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS); } catch (ExecutionException | InterruptedException | TimeoutException e) { counter.update(Failure, Unknown); log.warn("Failed to fetch available brokers. Stop unloading.", e); return decisionCache; } try { final var loadStore = context.brokerLoadDataStore(); stats.setLoadDataStore(loadStore); boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log); var skipReason = stats.update( context.brokerLoadDataStore(), availableBrokers, recentlyUnloadedBrokers, conf); if (skipReason.isPresent()) { if (debugMode) { log.warn(CANNOT_CONTINUE_UNLOAD_MSG + " Skipped the load stat update. Reason:{}.", skipReason.get()); } counter.update(Skip, skipReason.get()); return decisionCache; } counter.updateLoadData(stats.avg, stats.std); if (debugMode) { log.info("brokers' load stats:{}", stats); } // skip metrics int numOfBrokersWithEmptyLoadData = 0; int numOfBrokersWithFewBundles = 0; final double targetStd = conf.getLoadBalancerBrokerLoadTargetStd(); boolean transfer = conf.isLoadBalancerTransferEnabled(); if (stats.std() > targetStd || isUnderLoaded(context, stats.peekMinBroker(), stats) || isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { unloadConditionHitCount++; } else { unloadConditionHitCount = 0; } if (unloadConditionHitCount <= conf.getLoadBalancerSheddingConditionHitCountThreshold()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Shedding condition hit count:{} is less than or equal to the threshold:{}.", unloadConditionHitCount, conf.getLoadBalancerSheddingConditionHitCountThreshold()); } counter.update(Skip, HitCount); return decisionCache; } while (true) { if (!stats.hasTransferableBrokers()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Exhausted target transfer brokers."); } break; } UnloadDecision.Reason reason; if (stats.std() > targetStd) { reason = Overloaded; } else if (isUnderLoaded(context, stats.peekMinBroker(), stats)) { reason = Underloaded; if (debugMode) { log.info(String.format("broker:%s is underloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this underloaded broker.", stats.peekMinBroker(), context.brokerLoadDataStore().get(stats.peekMinBroker()).get(), stats.std(), targetStd)); } } else if (isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { reason = Overloaded; if (debugMode) { log.info(String.format("broker:%s is overloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this overloaded broker.", stats.peekMaxBroker(), context.brokerLoadDataStore().get(stats.peekMaxBroker()).get(), stats.std(), targetStd)); } } else { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + "The overall cluster load meets the target, std:{} <= targetStd:{}." + "minBroker:{} is not underloaded. maxBroker:{} is not overloaded.", stats.std(), targetStd, stats.peekMinBroker(), stats.peekMaxBroker()); } break; } String maxBroker = stats.pollMaxBroker(); String minBroker = stats.peekMinBroker(); Optional<BrokerLoadData> maxBrokerLoadData = context.brokerLoadDataStore().get(maxBroker); Optional<BrokerLoadData> minBrokerLoadData = context.brokerLoadDataStore().get(minBroker); if (maxBrokerLoadData.isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " MaxBrokerLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } if (minBrokerLoadData.isEmpty()) { log.error("Can't transfer load to broker:{}. MinBrokerLoadData is empty.", minBroker); numOfBrokersWithEmptyLoadData++; continue; } double maxLoad = maxBrokerLoadData.get().getWeightedMaxEMA(); double minLoad = minBrokerLoadData.get().getWeightedMaxEMA(); double offload = (maxLoad - minLoad) / 2; BrokerLoadData brokerLoadData = maxBrokerLoadData.get(); double maxBrokerThroughput = brokerLoadData.getMsgThroughputIn() + brokerLoadData.getMsgThroughputOut(); double minBrokerThroughput = minBrokerLoadData.get().getMsgThroughputIn() + minBrokerLoadData.get().getMsgThroughputOut(); double offloadThroughput = maxBrokerThroughput * offload / maxLoad; if (debugMode) { log.info(String.format( "Attempting to shed load from broker:%s%s, which has the max resource " + "usage:%.2f%%, targetStd:%.2f," + " -- Trying to offload %.2f%%, %.2f KByte/s of traffic.", maxBroker, transfer ? " to broker:" + minBroker : "", maxLoad * 100, targetStd, offload * 100, offloadThroughput / KB )); } double trafficMarkedToOffload = 0; double trafficMarkedToGain = 0; Optional<TopBundlesLoadData> bundlesLoadData = context.topBundleLoadDataStore().get(maxBroker); if (bundlesLoadData.isEmpty() || bundlesLoadData.get().getTopBundlesLoadData().isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " TopBundlesLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } var maxBrokerTopBundlesLoadData = bundlesLoadData.get().getTopBundlesLoadData(); if (maxBrokerTopBundlesLoadData.size() == 1) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Sole namespace bundle:%s is overloading the broker. ", maxBroker, maxBrokerTopBundlesLoadData.iterator().next())); continue; } Optional<TopBundlesLoadData> minBundlesLoadData = context.topBundleLoadDataStore().get(minBroker); var minBrokerTopBundlesLoadDataIter = minBundlesLoadData.isPresent() ? minBundlesLoadData.get().getTopBundlesLoadData().iterator() : null; if (maxBrokerTopBundlesLoadData.isEmpty()) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Broker overloaded despite having no bundles", maxBroker)); continue; } int remainingTopBundles = maxBrokerTopBundlesLoadData.size(); for (var e : maxBrokerTopBundlesLoadData) { String bundle = e.bundleName(); if (channel != null && !channel.isOwner(bundle, maxBroker)) { if (debugMode) { log.warn(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " MaxBroker:%s is not the owner.", bundle, maxBroker)); } continue; } if (recentlyUnloadedBundles.containsKey(bundle)) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " Bundle has been recently unloaded at ts:%d.", bundle, recentlyUnloadedBundles.get(bundle))); } continue; } if (!isTransferable(context, availableBrokers, bundle, maxBroker, Optional.of(minBroker))) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " This unload can't meet " + "affinity(isolation) or anti-affinity group policies.", bundle)); } continue; } if (remainingTopBundles <= 1) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The remaining bundles in TopBundlesLoadData from the maxBroker:%s is" + " less than or equal to 1.", bundle, maxBroker)); } break; } var bundleData = e.stats(); double maxBrokerBundleThroughput = bundleData.msgThroughputIn + bundleData.msgThroughputOut; boolean swap = false; List<Unload> minToMaxUnloads = new ArrayList<>(); double minBrokerBundleSwapThroughput = 0.0; if (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput > offloadThroughput) { // see if we can swap bundles from min to max broker to balance better. if (transfer && minBrokerTopBundlesLoadDataIter != null) { var maxBrokerNewThroughput = maxBrokerThroughput - trafficMarkedToOffload + trafficMarkedToGain - maxBrokerBundleThroughput; var minBrokerNewThroughput = minBrokerThroughput + trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput; while (minBrokerTopBundlesLoadDataIter.hasNext()) { var minBrokerBundleData = minBrokerTopBundlesLoadDataIter.next(); if (!isTransferable(context, availableBrokers, minBrokerBundleData.bundleName(), minBroker, Optional.of(maxBroker))) { continue; } var minBrokerBundleThroughput = minBrokerBundleData.stats().msgThroughputIn + minBrokerBundleData.stats().msgThroughputOut; var maxBrokerNewThroughputTmp = maxBrokerNewThroughput + minBrokerBundleThroughput; var minBrokerNewThroughputTmp = minBrokerNewThroughput - minBrokerBundleThroughput; if (maxBrokerNewThroughputTmp < maxBrokerThroughput && minBrokerNewThroughputTmp < maxBrokerThroughput) { minToMaxUnloads.add(new Unload(minBroker, minBrokerBundleData.bundleName(), Optional.of(maxBroker))); maxBrokerNewThroughput = maxBrokerNewThroughputTmp; minBrokerNewThroughput = minBrokerNewThroughputTmp; minBrokerBundleSwapThroughput += minBrokerBundleThroughput; if (minBrokerNewThroughput <= maxBrokerNewThroughput && maxBrokerNewThroughput < maxBrokerThroughput * 0.75) { swap = true; break; } } } } if (!swap) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The traffic to unload:%.2f - gain:%.2f = %.2f KByte/s is " + "greater than the target :%.2f KByte/s.", bundle, (trafficMarkedToOffload + maxBrokerBundleThroughput) / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput) / KB, offloadThroughput / KB)); } break; } } Unload unload; if (transfer) { if (swap) { minToMaxUnloads.forEach(minToMaxUnload -> { if (debugMode) { log.info("Decided to gain bundle:{} from min broker:{}", minToMaxUnload.serviceUnit(), minToMaxUnload.sourceBroker()); } var decision = new UnloadDecision(); decision.setUnload(minToMaxUnload); decision.succeed(reason); decisionCache.add(decision); }); if (debugMode) { log.info(String.format( "Total traffic %.2f KByte/s to transfer from min broker:%s to max broker:%s.", minBrokerBundleSwapThroughput / KB, minBroker, maxBroker)); trafficMarkedToGain += minBrokerBundleSwapThroughput; } } unload = new Unload(maxBroker, bundle, Optional.of(minBroker)); } else { unload = new Unload(maxBroker, bundle); } var decision = new UnloadDecision(); decision.setUnload(unload); decision.succeed(reason); decisionCache.add(decision); trafficMarkedToOffload += maxBrokerBundleThroughput; remainingTopBundles--; if (debugMode) { log.info(String.format("Decided to unload bundle:%s, throughput:%.2f KByte/s." + " The traffic marked to unload:%.2f - gain:%.2f = %.2f KByte/s." + " Target:%.2f KByte/s.", bundle, maxBrokerBundleThroughput / KB, trafficMarkedToOffload / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain) / KB, offloadThroughput / KB)); } } if (trafficMarkedToOffload > 0) { var adjustedOffload = (trafficMarkedToOffload - trafficMarkedToGain) * maxLoad / maxBrokerThroughput; stats.offload(maxLoad, minLoad, adjustedOffload); if (debugMode) { log.info( String.format("brokers' load stats:%s, after offload{max:%.2f, min:%.2f, offload:%.2f}", stats, maxLoad, minLoad, adjustedOffload)); } } else { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " There is no bundle that can be unloaded in top bundles load data. " + "Consider splitting bundles owned by the broker " + "to make each bundle serve less traffic " + "or increasing loadBalancerMaxNumberOfBundlesInBundleLoadReport" + " to report more bundles in the top bundles load data.", maxBroker)); } } // while end if (debugMode) { log.info("decisionCache:{}", decisionCache); } if (decisionCache.isEmpty()) { UnloadDecision.Reason reason; if (numOfBrokersWithEmptyLoadData > 0) { reason = NoLoadData; } else if (numOfBrokersWithFewBundles > 0) { reason = NoBundles; } else { reason = HitCount; } counter.update(Skip, reason); } else { unloadConditionHitCount = 0; } } catch (Throwable e) { log.error("Failed to process unloading. ", e); this.counter.update(Failure, Unknown); } return decisionCache; }
@Test public void testSkipBundlesGreaterThanTargetThroughputAfterSplit() { UnloadCounter counter = new UnloadCounter(); TransferShedder transferShedder = new TransferShedder(counter); var ctx = getContext(); ctx.brokerConfiguration().setLoadBalancerBrokerLoadTargetStd(0.20); var brokerRegistry = mock(BrokerRegistry.class); doReturn(brokerRegistry).when(ctx).brokerRegistry(); doReturn(CompletableFuture.completedFuture(Map.of( "broker1:8080", mock(BrokerLookupData.class), "broker2:8080", mock(BrokerLookupData.class) ))).when(brokerRegistry).getAvailableBrokerLookupDataAsync(); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); topBundlesLoadDataStore.pushAsync("broker1:8080", getTopBundlesLoad("my-tenant/my-namespaceA", 1, 500000000)); topBundlesLoadDataStore.pushAsync("broker2:8080", getTopBundlesLoad("my-tenant/my-namespaceB", 500000000, 500000000)); var brokerLoadDataStore = ctx.brokerLoadDataStore(); brokerLoadDataStore.pushAsync("broker1:8080", getCpuLoad(ctx, 50, "broker1:8080")); brokerLoadDataStore.pushAsync("broker2:8080", getCpuLoad(ctx, 100, "broker2:8080")); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); assertTrue(res.isEmpty()); assertEquals(counter.getBreakdownCounters().get(Skip).get(NoBundles).get(), 1); }
@Override public MergedResult decorate(final QueryResult queryResult, final SQLStatementContext sqlStatementContext, final MaskRule rule) { return new MaskMergedResult(maskRule, selectStatementContext, new TransparentMergedResult(queryResult)); }
@Test void assertDecorateQueryResult() throws SQLException { QueryResult queryResult = mock(QueryResult.class); when(queryResult.next()).thenReturn(true); MaskDQLResultDecorator decorator = new MaskDQLResultDecorator(mock(MaskRule.class), mock(SelectStatementContext.class)); MergedResult actual = decorator.decorate(queryResult, mock(SQLStatementContext.class), mock(MaskRule.class)); assertTrue(actual.next()); }
protected List<MavenArtifact> processResponse(Dependency dependency, HttpURLConnection conn) throws IOException { final List<MavenArtifact> result = new ArrayList<>(); try (InputStreamReader streamReader = new InputStreamReader(conn.getInputStream(), StandardCharsets.UTF_8); JsonParser parser = objectReader.getFactory().createParser(streamReader)) { if (init(parser) && parser.nextToken() == com.fasterxml.jackson.core.JsonToken.START_OBJECT) { // at least one result do { final FileImpl file = objectReader.readValue(parser); checkHashes(dependency, file.getChecksums()); final Matcher pathMatcher = PATH_PATTERN.matcher(file.getPath()); if (!pathMatcher.matches()) { throw new IllegalStateException("Cannot extract the Maven information from the path " + "retrieved in Artifactory " + file.getPath()); } final String groupId = pathMatcher.group("groupId").replace('/', '.'); final String artifactId = pathMatcher.group("artifactId"); final String version = pathMatcher.group("version"); result.add(new MavenArtifact(groupId, artifactId, version, file.getDownloadUri(), MavenArtifact.derivePomUrl(artifactId, version, file.getDownloadUri()))); } while (parser.nextToken() == com.fasterxml.jackson.core.JsonToken.START_OBJECT); } else { throw new FileNotFoundException("Artifact " + dependency + " not found in Artifactory"); } } return result; }
@Test public void shouldHandleNoMatches() throws IOException { // Given Dependency dependency = new Dependency(); dependency.setSha1sum("94a9ce681a42d0352b3ad22659f67835e560d108"); final HttpURLConnection urlConnection = mock(HttpURLConnection.class); final byte[] payload = ("{\n" + " \"results\" : [ ]}").getBytes(StandardCharsets.UTF_8); when(urlConnection.getInputStream()).thenReturn(new ByteArrayInputStream(payload)); // When try { searcher.processResponse(dependency, urlConnection); fail("No Match found, should throw an exception!"); } catch (FileNotFoundException e) { // Then assertEquals("Artifact Dependency{ fileName='null', actualFilePath='null', filePath='null', packagePath='null'} not found in Artifactory", e.getMessage()); } }
@Override public void increment() { increment(1l); }
@Test(expected = IllegalArgumentException.class) public void incrementByNegativeValue() { longCounter.increment(-100l); }
@VisibleForTesting public static List<SegmentZKMetadata> getCompletedSegments(Map<String, String> taskConfigs, List<SegmentZKMetadata> allSegments, long currentTimeInMillis) { List<SegmentZKMetadata> completedSegments = new ArrayList<>(); String bufferPeriod = taskConfigs.getOrDefault(UpsertCompactionTask.BUFFER_TIME_PERIOD_KEY, DEFAULT_BUFFER_PERIOD); long bufferMs = TimeUtils.convertPeriodToMillis(bufferPeriod); for (SegmentZKMetadata segment : allSegments) { CommonConstants.Segment.Realtime.Status status = segment.getStatus(); // initial segments selection based on status and age if (status.isCompleted() && (segment.getEndTimeMs() <= (currentTimeInMillis - bufferMs))) { completedSegments.add(segment); } } return completedSegments; }
@Test public void testGetCompletedSegments() { long currentTimeInMillis = System.currentTimeMillis(); Map<String, String> taskConfigs = new HashMap<>(); taskConfigs.put(UpsertCompactionTask.BUFFER_TIME_PERIOD_KEY, "1d"); SegmentZKMetadata metadata1 = new SegmentZKMetadata("testTable"); metadata1.setEndTime(1694198844776L); metadata1.setStatus(CommonConstants.Segment.Realtime.Status.DONE); metadata1.setTimeUnit(TimeUnit.MILLISECONDS); SegmentZKMetadata metadata2 = new SegmentZKMetadata("testTable"); metadata2.setEndTime(1699639830678L); metadata2.setStatus(CommonConstants.Segment.Realtime.Status.DONE); metadata2.setTimeUnit(TimeUnit.MILLISECONDS); SegmentZKMetadata metadata3 = new SegmentZKMetadata("testTable"); metadata3.setEndTime(currentTimeInMillis); metadata3.setStatus(CommonConstants.Segment.Realtime.Status.DONE); metadata3.setTimeUnit(TimeUnit.MILLISECONDS); List<SegmentZKMetadata> segmentZKMetadataList = new ArrayList<>(); segmentZKMetadataList.add(metadata1); segmentZKMetadataList.add(metadata2); segmentZKMetadataList.add(metadata3); List<SegmentZKMetadata> result = UpsertCompactionTaskGenerator.getCompletedSegments(taskConfigs, segmentZKMetadataList, currentTimeInMillis); Assert.assertEquals(result.size(), 2); SegmentZKMetadata metadata4 = new SegmentZKMetadata("testTable"); metadata4.setEndTime(currentTimeInMillis - TimeUtils.convertPeriodToMillis("2d") + 1); metadata4.setStatus(CommonConstants.Segment.Realtime.Status.DONE); metadata4.setTimeUnit(TimeUnit.MILLISECONDS); segmentZKMetadataList.add(metadata4); result = UpsertCompactionTaskGenerator.getCompletedSegments(taskConfigs, segmentZKMetadataList, currentTimeInMillis); Assert.assertEquals(result.size(), 3); // Check the boundary condition for buffer time period based filtering taskConfigs.put(UpsertCompactionTask.BUFFER_TIME_PERIOD_KEY, "2d"); result = UpsertCompactionTaskGenerator.getCompletedSegments(taskConfigs, segmentZKMetadataList, currentTimeInMillis); Assert.assertEquals(result.size(), 2); }
public static void debug(final Logger logger, final String format, final Supplier<Object> supplier) { if (logger.isDebugEnabled()) { logger.debug(format, supplier.get()); } }
@Test public void testNeverDebugWithFormat() { when(logger.isDebugEnabled()).thenReturn(false); LogUtils.debug(logger, "testDebug: {}", supplier); verify(supplier, never()).get(); }
@Override protected ExecuteContext doBefore(ExecuteContext context) { checkState(context, new Response<Map<String, List<String>>>(Collections.emptyMap(), null, null, null)); return context; }
@Test public void doBefore() throws NoSuchMethodException { final ExecuteContext context = interceptor.doBefore(buildContext()); Assert.assertFalse(context.isSkip()); registerConfig.setOpenMigration(true); registerConfig.setEnableSpringRegister(true); RegisterDynamicConfig.INSTANCE.setClose(true); final ExecuteContext context1 = interceptor.doBefore(buildContext()); Assert.assertTrue(context1.isSkip()); RegisterDynamicConfig.INSTANCE.setClose(false); }
public static boolean or(boolean... array) { if (ArrayUtil.isEmpty(array)) { throw new IllegalArgumentException("The Array must not be empty !"); } for (final boolean element : array) { if (element) { return true; } } return false; }
@Test public void orTest(){ assertTrue(BooleanUtil.or(true,false)); assertTrue(BooleanUtil.orOfWrap(true,false)); }
@Override public void loginFailure(HttpRequest request, AuthenticationException e) { checkRequest(request); requireNonNull(e, "AuthenticationException can't be null"); if (!LOGGER.isDebugEnabled()) { return; } Source source = e.getSource(); LOGGER.debug("login failure [cause|{}][method|{}][provider|{}|{}][IP|{}|{}][login|{}]", emptyIfNull(e.getMessage()), source.getMethod(), source.getProvider(), source.getProviderName(), request.getRemoteAddr(), getAllIps(request), preventLogFlood(emptyIfNull(e.getLogin()))); }
@Test public void login_failure_fails_with_NPE_if_request_is_null() { logTester.setLevel(Level.INFO); AuthenticationException exception = newBuilder().setSource(Source.sso()).build(); assertThatThrownBy(() -> underTest.loginFailure(null, exception)) .isInstanceOf(NullPointerException.class) .hasMessage("request can't be null"); }
@Override public Object read(final MySQLPacketPayload payload, final boolean unsigned) { if (unsigned) { return payload.getByteBuf().readUnsignedIntLE(); } return payload.getByteBuf().readIntLE(); }
@Test void assertRead() { when(payload.getByteBuf()).thenReturn(Unpooled.wrappedBuffer(new byte[]{1, 0, 0, 0, 1, 0, 0, 0})); assertThat(new MySQLInt4BinaryProtocolValue().read(payload, false), is(1)); assertThat(new MySQLInt4BinaryProtocolValue().read(payload, true), is(1L)); }
public Optional<Measure> toMeasure(@Nullable MeasureDto measureDto, Metric metric) { requireNonNull(metric); if (measureDto == null) { return Optional.empty(); } Double value = measureDto.getValue(); String data = measureDto.getData(); switch (metric.getType().getValueType()) { case INT: return toIntegerMeasure(measureDto, value, data); case LONG: return toLongMeasure(measureDto, value, data); case DOUBLE: return toDoubleMeasure(measureDto, value, data); case BOOLEAN: return toBooleanMeasure(measureDto, value, data); case STRING: return toStringMeasure(measureDto, data); case LEVEL: return toLevelMeasure(measureDto, data); case NO_VALUE: return toNoValueMeasure(measureDto); default: throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType()); } }
@Test public void toMeasure_for_LEVEL_Metric_ignores_data() { MeasureDto measureDto = new MeasureDto().setAlertStatus(Level.ERROR.name()).setData(SOME_DATA); Optional<Measure> measure = underTest.toMeasure(measureDto, SOME_LEVEL_METRIC); assertThat(measure).isPresent(); assertThatThrownBy(() ->measure.get().getStringValue()) .isInstanceOf(IllegalStateException.class); }
public static void validateValue(Schema schema, Object value) { validateValue(null, schema, value); }
@Test public void testValidateValueMismatchDate() { assertThrows(DataException.class, () -> ConnectSchema.validateValue(Date.SCHEMA, 1000L)); }