focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public void setSendIfModifiedSince(boolean sendIfModifiedSince) { kp.put("sendIfModifiedSince",sendIfModifiedSince); }
@Test public void testSendIfModifiedSince() throws Exception { fetcher().setSendIfModifiedSince(true); CrawlURI curi = makeCrawlURI("http://localhost:7777/if-modified-since"); fetcher().process(curi); assertFalse(httpRequestString(curi).toLowerCase().contains("if-modified-since: ")); assertTrue(curi.getHttpResponseHeader("last-modified").equals("Thu, 01 Jan 1970 00:00:00 GMT")); runDefaultChecks(curi, "requestLine"); // logger.info("before FetchHistoryProcessor fetchHistory=" + Arrays.toString(curi.getFetchHistory())); FetchHistoryProcessor fetchHistoryProcessor = new FetchHistoryProcessor(); fetchHistoryProcessor.process(curi); // logger.info("after FetchHistoryProcessor fetchHistory=" + Arrays.toString(curi.getFetchHistory())); fetcher().process(curi); // logger.info("\n" + httpRequestString(curi)); // logger.info("\n" + rawResponseString(curi)); assertTrue(httpRequestString(curi).contains("If-Modified-Since: Thu, 01 Jan 1970 00:00:00 GMT\r\n")); assertTrue(curi.getFetchStatus() == 304); assertNull(curi.getRevisitProfile()); fetchHistoryProcessor.process(curi); assertNotNull(curi.getRevisitProfile()); assertTrue(curi.getRevisitProfile() instanceof ServerNotModifiedRevisit); ServerNotModifiedRevisit revisit = (ServerNotModifiedRevisit) curi.getRevisitProfile(); assertEquals("Thu, 01 Jan 1970 00:00:00 GMT", revisit.getLastModified()); assertNull(revisit.getETag()); }
public ConcurrentLongHashMap<CompletableFuture<Producer>> getProducers() { return producers; }
@Test(timeOut = 30000) public void testProducerFailureOnEncryptionRequiredTopic() throws Exception { resetChannel(); setChannelConnected(); // Set encryption_required to true Policies policies = mock(Policies.class); policies.encryption_required = true; policies.topicDispatchRate = new HashMap<>(); policies.clusterSubscribeRate = new HashMap<>(); // add `clusterDispatchRate` otherwise there will be a NPE // `org.apache.pulsar.broker.service.persistent.DispatchRateLimiter.getPoliciesDispatchRate` policies.clusterDispatchRate = new HashMap<>(); // add `clusterDispatchRate` otherwise there will be a NPE // `org.apache.pulsar.broker.service.AbstractTopic.updateNamespaceSubscriptionDispatchRate` policies.subscriptionDispatchRate = new HashMap<>(); // add `clusterDispatchRate` otherwise there will be a NPE // `org.apache.pulsar.broker.service.AbstractTopic.updateNamespaceReplicatorDispatchRate` policies.replicatorDispatchRate = new HashMap<>(); pulsarTestContext.getPulsarResources().getNamespaceResources() .createPolicies(TopicName.get(encryptionRequiredTopicName).getNamespaceObject(), policies); // test failure case: unencrypted producer cannot connect ByteBuf clientCommand = Commands.newProducer(encryptionRequiredTopicName, 2 /* producer id */, 2 /* request id */, "unencrypted-producer", false, Collections.emptyMap(), false); channel.writeInbound(clientCommand); Object response = getResponse(); assertEquals(response.getClass(), CommandError.class); CommandError errorResponse = (CommandError) response; assertEquals(errorResponse.getError(), ServerError.MetadataError); PersistentTopic topicRef = (PersistentTopic) brokerService.getTopicReference(encryptionRequiredTopicName).get(); assertNotNull(topicRef); assertEquals(topicRef.getProducers().size(), 0); channel.finish(); }
@Override public KsMaterializedQueryResult<WindowedRow> get( final GenericKey key, final int partition, final Range<Instant> windowStartBounds, final Range<Instant> windowEndBounds, final Optional<Position> position ) { try { final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds); final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds); final WindowKeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = WindowKeyQuery.withKeyAndWindowStartRange(key, lower, upper); StateQueryRequest<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> request = inStore(stateStore.getStateStoreName()).withQuery(query); if (position.isPresent()) { request = request.withPositionBound(PositionBound.at(position.get())); } final KafkaStreams streams = stateStore.getKafkaStreams(); final StateQueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> result = streams.query(request); final QueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> queryResult = result.getPartitionResults().get(partition); if (queryResult.isFailure()) { throw failedQueryException(queryResult); } if (queryResult.getResult() == null) { return KsMaterializedQueryResult.rowIteratorWithPosition( Collections.emptyIterator(), queryResult.getPosition()); } try (WindowStoreIterator<ValueAndTimestamp<GenericRow>> it = queryResult.getResult()) { final Builder<WindowedRow> builder = ImmutableList.builder(); while (it.hasNext()) { final KeyValue<Long, ValueAndTimestamp<GenericRow>> next = it.next(); final Instant windowStart = Instant.ofEpochMilli(next.key); if (!windowStartBounds.contains(windowStart)) { continue; } final Instant windowEnd = windowStart.plus(windowSize); if (!windowEndBounds.contains(windowEnd)) { continue; } final TimeWindow window = new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli()); final WindowedRow row = WindowedRow.of( stateStore.schema(), new Windowed<>(key, window), next.value.value(), next.value.timestamp() ); builder.add(row); } return KsMaterializedQueryResult.rowIteratorWithPosition( builder.build().iterator(), queryResult.getPosition()); } } catch (final NotUpToBoundException | MaterializationException e) { throw e; } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test @SuppressWarnings("unchecked") public void shouldReturnValuesForClosedStartBounds_fetchAll() { // Given: final Range<Instant> start = Range.closed( Instant.ofEpochMilli(System.currentTimeMillis()), NOW.plusSeconds(10) ); final StateQueryResult<KeyValueIterator<Windowed<GenericKey>, ValueAndTimestamp<GenericRow>>> partitionResult = new StateQueryResult<>(); final QueryResult<KeyValueIterator<Windowed<GenericKey>, ValueAndTimestamp<GenericRow>>> queryResult = QueryResult.forResult(keyValueIterator); queryResult.setPosition(POSITION); partitionResult.addResult(PARTITION, queryResult); when(kafkaStreams.query(any(StateQueryRequest.class))).thenReturn(partitionResult); when(keyValueIterator.hasNext()).thenReturn(true, true, false); when(keyValueIterator.next()) .thenReturn(new KeyValue<>(new Windowed<>(A_KEY, new TimeWindow(start.lowerEndpoint().toEpochMilli(), start.lowerEndpoint().toEpochMilli() + WINDOW_SIZE.toMillis())), VALUE_1)) .thenReturn(new KeyValue<>(new Windowed<>(A_KEY2, new TimeWindow(start.upperEndpoint().toEpochMilli(), start.upperEndpoint().toEpochMilli() + WINDOW_SIZE.toMillis())), VALUE_2)) .thenThrow(new AssertionError()); // When: final KsMaterializedQueryResult<WindowedRow> result = table.get(PARTITION, start, Range.all()); // Then: final Iterator<WindowedRow> rowIterator = result.getRowIterator(); assertThat(rowIterator.hasNext(), is(true)); assertThat(rowIterator.next(), is (WindowedRow.of( SCHEMA, windowedKey(A_KEY, start.lowerEndpoint()), VALUE_1.value(), VALUE_1.timestamp()))); assertThat(rowIterator.hasNext(), is(true)); assertThat(rowIterator.next(), is(WindowedRow.of( SCHEMA, windowedKey(A_KEY2, start.upperEndpoint()), VALUE_2.value(), VALUE_2.timestamp()))); assertThat(rowIterator.hasNext(), is(false)); assertThat(result.getPosition(), not(Optional.empty())); assertThat(result.getPosition().get(), is(POSITION)); }
public void processGv01(Gv01 gv01){ String oldBsn = CategorieUtil.findBsnOudeWaarde(gv01.getCategorie()); if (oldBsn != null && CategorieUtil.findBsn(gv01.getCategorie()) != null){ digidXClient.remoteLogSpontaneVerstrekking(Log.BSN_CHANGED, "Gv01", gv01.getANummer(), oldBsn); } String aNummer = gv01.getANummer(); String redenOpschorting = CategorieUtil.findRedenOpschorting(gv01.getCategorie()); digidXClient.setOpschortingsStatus(aNummer, redenOpschorting); logger.info("Finished processing Gv01 message"); }
@Test public void testProcessGv01() { String testAnummer = "SSSSSSSSSS"; String testBsnOud = "SSSSSSSSS"; String testBsnNieuw = "SSSSSSSSS"; Gv01 testGv01 = TestDglMessagesUtil.createTestGv01(testAnummer, "O", testBsnOud, testBsnNieuw); classUnderTest.processGv01(testGv01); verify(digidXClient, times(1)).setOpschortingsStatus(testAnummer, "O"); verify(digidXClient, times(1)).remoteLogSpontaneVerstrekking(Log.BSN_CHANGED, "Gv01", testAnummer, testBsnOud); }
@Override public @Nullable V putIfAbsent(K key, V value) { return put(key, value, expiry(), /* onlyIfAbsent */ true); }
@CheckMaxLogLevel(ERROR) @Test(dataProvider = "caches") @CacheSpec(population = Population.EMPTY, keys = ReferenceType.STRONG) public void brokenEquality_putIfAbsent( BoundedLocalCache<MutableInt, Int> cache, CacheContext context) { testForBrokenEquality(cache, context, key -> { var value = cache.putIfAbsent(key, context.absentValue()); assertThat(value).isEqualTo(context.absentValue()); }); }
public static Instant garbageCollectionTime( BoundedWindow window, WindowingStrategy windowingStrategy) { return garbageCollectionTime(window, windowingStrategy.getAllowedLateness()); }
@Test public void garbageCollectionTimeAfterEndOfGlobalWindowWithLateness() { FixedWindows windowFn = FixedWindows.of(Duration.standardMinutes(5)); Duration allowedLateness = Duration.millis(Long.MAX_VALUE); WindowingStrategy<?, ?> strategy = WindowingStrategy.globalDefault() .withWindowFn(windowFn) .withAllowedLateness(allowedLateness); IntervalWindow window = windowFn.assignWindow(new Instant(-100)); assertThat( window.maxTimestamp().plus(allowedLateness), Matchers.greaterThan(GlobalWindow.INSTANCE.maxTimestamp())); assertThat( LateDataUtils.garbageCollectionTime(window, strategy), equalTo(GlobalWindow.INSTANCE.maxTimestamp())); }
@Override public List<TaskProperty> getPropertiesForDisplay() { ArrayList<TaskProperty> taskProperties = new ArrayList<>(); if (PluggableTaskConfigStore.store().hasPreferenceFor(pluginConfiguration.getId())) { TaskPreference preference = taskPreference(); List<? extends Property> propertyDefinitions = preference.getConfig().list(); for (Property propertyDefinition : propertyDefinitions) { ConfigurationProperty configuredProperty = configuration.getProperty(propertyDefinition.getKey()); if (configuredProperty == null) continue; taskProperties.add(new TaskProperty(propertyDefinition.getOption(Property.DISPLAY_NAME), configuredProperty.getDisplayValue(), configuredProperty.getConfigKeyName())); } return taskProperties; } for (ConfigurationProperty property : configuration) { taskProperties.add(new TaskProperty(property.getConfigKeyName(), property.getDisplayValue())); } return taskProperties; }
@Test public void shouldGetOnlyConfiguredPropertiesIfACertainPropertyDefinedByPluginIsNotConfiguredByUser() throws Exception { Task taskDetails = mock(Task.class); TaskConfig taskConfig = new TaskConfig(); addProperty(taskConfig, "KEY2", "Key 2", 1); addProperty(taskConfig, "KEY1", "Key 1", 0); addProperty(taskConfig, "KEY3", "Key 3", 2); when(taskDetails.config()).thenReturn(taskConfig); when(taskDetails.view()).thenReturn(mock(TaskView.class)); String pluginId = "plugin_with_all_details"; PluggableTaskConfigStore.store().setPreferenceFor(pluginId, new TaskPreference(taskDetails)); Configuration configuration = new Configuration( ConfigurationPropertyMother.create("KEY1", false, "value1"), ConfigurationPropertyMother.create("KEY2", false, "value2") ); PluggableTask task = new PluggableTask(new PluginConfiguration(pluginId, "1"), configuration); List<TaskProperty> propertiesForDisplay = task.getPropertiesForDisplay(); assertThat(propertiesForDisplay.size(), is(2)); assertProperty(propertiesForDisplay.get(0), "Key 1", "value1", "key1"); assertProperty(propertiesForDisplay.get(1), "Key 2", "value2", "key2"); }
@Override public String[] split(String text) { boundary.setText(text); List<String> words = new ArrayList<>(); int start = boundary.first(); int end = boundary.next(); while (end != BreakIterator.DONE) { String word = text.substring(start, end).trim(); if (!word.isEmpty()) { words.add(word); } start = end; end = boundary.next(); } return words.toArray(new String[0]); }
@Test public void testSplitAbbreviation() { System.out.println("tokenize"); String text = "Here are some examples of abbreviations: A.B., abbr., " + "acad., A.D., alt., A.M., B.C., etc."; String[] expResult = {"Here", "are", "some", "examples", "of", "abbreviations", ":", "A.B", ".", ",", "abbr", ".", ",", "acad", ".", ",", "A.D", ".", ",", "alt", ".", ",", "A.M", ".", ",", "B.C", ".", ",", "etc", "."}; BreakIteratorTokenizer instance = new BreakIteratorTokenizer(); String[] result = instance.split(text); assertEquals(expResult.length, result.length); for (int i = 0; i < result.length; i++) { assertEquals(expResult[i], result[i]); } }
public static DataSourceProvider tryGetDataSourceProviderOrNull(Configuration hdpConfig) { final String configuredPoolingType = MetastoreConf.getVar(hdpConfig, MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE); return Iterables.tryFind(FACTORIES, factory -> { String poolingType = factory.getPoolingType(); return poolingType != null && poolingType.equalsIgnoreCase(configuredPoolingType); }).orNull(); }
@Test public void testSetHikariCpBooleanProperty() throws SQLException { MetastoreConf.setVar(conf, ConfVars.CONNECTION_POOLING_TYPE, HikariCPDataSourceProvider.HIKARI); conf.set(HikariCPDataSourceProvider.HIKARI + ".allowPoolSuspension", "false"); conf.set(HikariCPDataSourceProvider.HIKARI + ".initializationFailTimeout", "-1"); DataSourceProvider dsp = DataSourceProviderFactory.tryGetDataSourceProviderOrNull(conf); Assert.assertNotNull(dsp); DataSource ds = dsp.create(conf); Assert.assertTrue(ds instanceof HikariDataSource); Assert.assertEquals(false, ((HikariDataSource)ds).isAllowPoolSuspension()); }
@VisibleForTesting void forceFreeMemory() { memoryManager.close(); }
@Test public void testForceFreeMemory() throws Throwable { BroadcastOutputBuffer buffer = createBroadcastBuffer( createInitialEmptyOutputBuffers(BROADCAST) .withBuffer(FIRST, BROADCAST_PARTITION_ID) .withNoMoreBufferIds(), sizeOfPages(5)); for (int i = 0; i < 3; i++) { addPage(buffer, createPage(1), 0); } OutputBufferMemoryManager memoryManager = buffer.getMemoryManager(); assertTrue(memoryManager.getBufferedBytes() > 0); buffer.forceFreeMemory(); assertEquals(memoryManager.getBufferedBytes(), 0); // adding a page after forceFreeMemory() should be NOOP addPage(buffer, createPage(1)); assertEquals(memoryManager.getBufferedBytes(), 0); }
public static Optional<ScalablePushRegistry> create( final LogicalSchema logicalSchema, final Supplier<List<PersistentQueryMetadata>> allPersistentQueries, final boolean isTable, final Map<String, Object> streamsProperties, final Map<String, Object> consumerProperties, final String sourceApplicationId, final KsqlTopic ksqlTopic, final ServiceContext serviceContext, final KsqlConfig ksqlConfig ) { final Object appServer = streamsProperties.get(StreamsConfig.APPLICATION_SERVER_CONFIG); if (appServer == null) { return Optional.empty(); } if (!(appServer instanceof String)) { throw new IllegalArgumentException(StreamsConfig.APPLICATION_SERVER_CONFIG + " not String"); } final URL localhost; try { localhost = new URL((String) appServer); } catch (final MalformedURLException e) { throw new IllegalArgumentException(StreamsConfig.APPLICATION_SERVER_CONFIG + " malformed: " + "'" + appServer + "'"); } final PushLocator pushLocator = new AllHostsLocator(allPersistentQueries, localhost); return Optional.of(new ScalablePushRegistry( pushLocator, logicalSchema, isTable, consumerProperties, ksqlTopic, serviceContext, ksqlConfig, sourceApplicationId, KafkaConsumerFactory::create, LatestConsumer::new, CatchupConsumer::new, Executors.newSingleThreadExecutor(), Executors.newScheduledThreadPool( ksqlConfig.getInt(KsqlConfig.KSQL_QUERY_PUSH_V2_MAX_CATCHUP_CONSUMERS)))); }
@Test public void shouldCreate_badApplicationServer() { // When final Exception e = assertThrows( IllegalArgumentException.class, () -> ScalablePushRegistry.create(SCHEMA, Collections::emptyList, false, ImmutableMap.of(StreamsConfig.APPLICATION_SERVER_CONFIG, 123), ImmutableMap.of(), SOURCE_APP_ID, ksqlTopic, serviceContext, ksqlConfig) ); // Then assertThat(e.getMessage(), containsString("not String")); }
@Nullable public String getStorageClass() { return _storageClass; }
@Test public void testDefaultStorageClassIsNull() { PinotConfiguration pinotConfig = new PinotConfiguration(); S3Config cfg = new S3Config(pinotConfig); Assert.assertNull(cfg.getStorageClass()); }
public static Dependency parseDependency(String dependency) { List<String> dependencyAndExclusions = Arrays.asList(dependency.split("\\^")); Collection<Exclusion> exclusions = new ArrayList<>(); for (int idx = 1 ; idx < dependencyAndExclusions.size() ; idx++) { exclusions.add(AetherUtils.createExclusion(dependencyAndExclusions.get(idx))); } Artifact artifact = new DefaultArtifact(dependencyAndExclusions.get(0)); return new Dependency(artifact, JavaScopes.COMPILE, false, exclusions); }
@Test public void parseDependency() { String testDependency = "testgroup:testartifact:1.0.0^testgroup:testexcartifact^testgroup:*"; Dependency dependency = AetherUtils.parseDependency(testDependency); assertEquals("testgroup", dependency.getArtifact().getGroupId()); assertEquals("testartifact", dependency.getArtifact().getArtifactId()); assertEquals("1.0.0", dependency.getArtifact().getVersion()); assertEquals(JavaScopes.COMPILE, dependency.getScope()); assertEquals(2, dependency.getExclusions().size()); List<Exclusion> exclusions = Lists.newArrayList(dependency.getExclusions()); Exclusion exclusion = exclusions.get(0); assertEquals("testgroup", exclusion.getGroupId()); assertEquals("testexcartifact", exclusion.getArtifactId()); assertEquals(JavaScopes.COMPILE, dependency.getScope()); exclusion = exclusions.get(1); assertEquals("testgroup", exclusion.getGroupId()); assertEquals("*", exclusion.getArtifactId()); assertEquals(JavaScopes.COMPILE, dependency.getScope()); }
public static void main(String[] args) { Map<Integer, Instance> instanceMap = new HashMap<>(); var messageManager = new RingMessageManager(instanceMap); var instance1 = new RingInstance(messageManager, 1, 1); var instance2 = new RingInstance(messageManager, 2, 1); var instance3 = new RingInstance(messageManager, 3, 1); var instance4 = new RingInstance(messageManager, 4, 1); var instance5 = new RingInstance(messageManager, 5, 1); instanceMap.put(1, instance1); instanceMap.put(2, instance2); instanceMap.put(3, instance3); instanceMap.put(4, instance4); instanceMap.put(5, instance5); instance2.onMessage(new Message(MessageType.HEARTBEAT_INVOKE, "")); final var thread1 = new Thread(instance1); final var thread2 = new Thread(instance2); final var thread3 = new Thread(instance3); final var thread4 = new Thread(instance4); final var thread5 = new Thread(instance5); thread1.start(); thread2.start(); thread3.start(); thread4.start(); thread5.start(); instance1.setAlive(false); }
@Test void shouldExecuteApplicationWithoutException() { assertDoesNotThrow(() -> RingApp.main(new String[]{})); }
public static long validateMillisecondDuration(final Duration duration, final String messagePrefix) { try { if (duration == null) { throw new IllegalArgumentException(messagePrefix + VALIDATE_MILLISECOND_NULL_SUFFIX); } return duration.toMillis(); } catch (final ArithmeticException e) { throw new IllegalArgumentException(messagePrefix + VALIDATE_MILLISECOND_OVERFLOW_SUFFIX, e); } }
@Test public void shouldReturnMillisecondsOnValidDuration() { final Duration sampleDuration = Duration.ofDays(MAX_ACCEPTABLE_DAYS_FOR_DURATION_TO_MILLIS); assertEquals(sampleDuration.toMillis(), validateMillisecondDuration(sampleDuration, "sampleDuration")); }
@Override public List<? extends Instance> listInstances(String namespaceId, String groupName, String serviceName, String clusterName) throws NacosException { Service service = Service.newService(namespaceId, groupName, serviceName); if (!ServiceManager.getInstance().containSingleton(service)) { throw new NacosException(NacosException.NOT_FOUND, String.format("service %s@@%s is not found!", groupName, serviceName)); } if (!serviceStorage.getClusters(service).contains(clusterName)) { throw new NacosException(NacosException.NOT_FOUND, "cluster " + clusterName + " is not found!"); } ServiceInfo serviceInfo = serviceStorage.getData(service); ServiceInfo result = ServiceUtil.selectInstances(serviceInfo, clusterName); return result.getHosts(); }
@Test void testListInstancesNonExistCluster() throws NacosException { assertThrows(NacosException.class, () -> { catalogServiceV2Impl.listInstances("A", "B", "C", "DD"); }); }
@Override public void start() { client = new ScClient(); client.init(); }
@Test public void start() { scRegister.start(); }
public static NamingSelector newMetadataSelector(Map<String, String> metadata) { return newMetadataSelector(metadata, false); }
@Test public void testNewMetadataSelector2() { Instance ins1 = new Instance(); ins1.addMetadata("a", "1"); ins1.addMetadata("c", "3"); Instance ins2 = new Instance(); ins2.addMetadata("b", "2"); Instance ins3 = new Instance(); ins3.addMetadata("c", "3"); NamingContext namingContext = mock(NamingContext.class); when(namingContext.getInstances()).thenReturn(Arrays.asList(ins1, ins2, ins3)); NamingSelector metadataSelector = NamingSelectorFactory.newMetadataSelector(new HashMap() { { put("a", "1"); put("b", "2"); } }, true); List<Instance> result = metadataSelector.select(namingContext).getResult(); assertEquals(2, result.size()); assertEquals(ins1, result.get(0)); assertEquals(ins2, result.get(1)); }
public static List<MusicProtocol.MusicArtist> convertMusicGroupsToMusicArtists(Collection<MusicGroup> musicGroups) { if (musicGroups == null || musicGroups.isEmpty()) { return Collections.emptyList(); } List<MusicProtocol.MusicArtist> musicArtists = new ArrayList<>(); for (MusicGroup musicGroup : musicGroups) { if (!StringUtils.isBlank(musicGroup.getName())) { MusicProtocol.MusicArtist artist = MusicProtocol.MusicArtist.newBuilder() .setName(musicGroup.getName()) .build(); musicArtists.add(artist); } } return musicArtists; }
@Test public void testConvertMusicGroupsToMusicArtists() { String expectedName = "Expected Artist Name"; String expectedName2 = "Expected Second Artist Name"; MusicGroup musicGroup = new MusicGroup(expectedName); MusicGroup secondMusicGroup = new MusicGroup(expectedName2); // Null Collection List<MusicProtocol.MusicArtist> nullResult = AppleMusicPlaylistConverter.convertMusicGroupsToMusicArtists(null); Assertions.assertTrue(nullResult.isEmpty()); // Empty Collection List<MusicProtocol.MusicArtist> emptyResult = AppleMusicPlaylistConverter.convertMusicGroupsToMusicArtists(List.of()); Assertions.assertTrue(emptyResult.isEmpty()); // Single Group List<MusicProtocol.MusicArtist> result1 = AppleMusicPlaylistConverter.convertMusicGroupsToMusicArtists(List.of(musicGroup)); Assertions.assertFalse(result1.isEmpty()); Assertions.assertEquals(result1.size(), 1); Assertions.assertNotNull(result1.get(0)); Assertions.assertTrue(result1.get(0).hasName()); Assertions.assertEquals(result1.get(0).getName(), expectedName); // Multiple Groups List<MusicProtocol.MusicArtist> result2 = AppleMusicPlaylistConverter.convertMusicGroupsToMusicArtists(List.of(musicGroup, secondMusicGroup)); Assertions.assertFalse(result2.isEmpty()); Assertions.assertEquals(result2.size(), 2); Assertions.assertNotNull(result2.get(0)); Assertions.assertTrue(result2.get(0).hasName()); Assertions.assertEquals(result2.get(0).getName(), expectedName); Assertions.assertNotNull(result2.get(1)); Assertions.assertTrue(result2.get(1).hasName()); Assertions.assertEquals(result2.get(1).getName(), expectedName2); }
public Map<String, String> subjectAltNames() { Map<String, String> san = new HashMap<>(); int i = 0; for (String name : dnsNames()) { san.put("DNS." + (i++), name); } i = 0; for (String ip : ipAddresses()) { san.put("IP." + (i++), ip); } return san; }
@Test public void testIPV6() { Subject subject = new Subject.Builder() .withCommonName("joe") .addIpAddress("fc01::8d1c") .addIpAddress("1762:0000:0000:00:0000:0B03:0001:AF18") .addIpAddress("1974:0:0:0:0:B03:1:AF74") .build(); assertEquals(Map.of( "IP.0", "fc01:0:0:0:0:0:0:8d1c", "IP.1", "1974:0:0:0:0:b03:1:af74", "IP.2", "1762:0:0:0:0:b03:1:af18"), subject.subjectAltNames()); }
public List<IssueDto> sort() { String sort = query.sort(); Boolean asc = query.asc(); if (sort != null && asc != null) { return getIssueProcessor(sort).sort(issues, asc); } return issues; }
@Test public void should_sort_by_close_date() { Date date = new Date(); Date date1 = DateUtils.addDays(date, -3); Date date2 = DateUtils.addDays(date, -2); Date date3 = DateUtils.addDays(date, -1); IssueDto issue1 = new IssueDto().setKee("A").setIssueCloseDate(date1); IssueDto issue2 = new IssueDto().setKee("B").setIssueCloseDate(date3); IssueDto issue3 = new IssueDto().setKee("C").setIssueCloseDate(date2); List<IssueDto> dtoList = newArrayList(issue1, issue2, issue3); IssueQuery query = IssueQuery.builder().sort(IssueQuery.SORT_BY_CLOSE_DATE).asc(false).build(); IssuesFinderSort issuesFinderSort = new IssuesFinderSort(dtoList, query); List<IssueDto> result = newArrayList(issuesFinderSort.sort()); assertThat(result).hasSize(3); assertThat(result.get(0).getIssueCloseDate()).isEqualTo(date3); assertThat(result.get(1).getIssueCloseDate()).isEqualTo(date2); assertThat(result.get(2).getIssueCloseDate()).isEqualTo(date1); }
@Override protected double maintain() { if (!nodeRepository().nodes().isWorking()) return 0.0; if (!nodeRepository().zone().environment().isProduction()) return 1.0; NodeList allNodes = nodeRepository().nodes().list(); // Lockless as strong consistency is not needed if (!zoneIsStable(allNodes)) return 1.0; Move bestMove = findBestMove(allNodes); if (!bestMove.isEmpty()) { LOG.info("Trying " + bestMove + " (" + bestMove.fromHost().switchHostname().orElse("<none>") + " -> " + bestMove.toHost().switchHostname().orElse("<none>") + ")"); } bestMove.execute(false, Agent.SwitchRebalancer, deployer, metric, nodeRepository()); return 1.0; }
@Test public void rebalance() { ClusterSpec.Id cluster1 = ClusterSpec.Id.from("c1"); ClusterSpec.Id cluster2 = ClusterSpec.Id.from("c2"); ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))) .spareCount(1) .build(); MockDeployer deployer = deployer(tester, cluster1, cluster2); SwitchRebalancer rebalancer = new SwitchRebalancer(tester.nodeRepository(), Duration.ofDays(1), new TestMetric(), deployer); // Provision initial hosts on same switch NodeResources hostResources = new NodeResources(48, 128, 500, 10); String switch0 = "switch0"; provisionHosts(3, switch0, hostResources, tester); // Deploy application deployer.deployFromLocalActive(app).get().activate(); tester.assertSwitches(Set.of(switch0), app, cluster1); tester.assertSwitches(Set.of(switch0), app, cluster2); // Rebalancing does nothing as there are no better moves to perform tester.clock().advance(SwitchRebalancer.waitTimeAfterPreviousDeployment); assertNoMoves(rebalancer, tester); // Provision a single host on a new switch provisionHost("switch1", hostResources, tester); // Application is redeployed and rebalancer does nothing as not enough time has passed since deployment deployer.deployFromLocalActive(app).get().activate(); assertNoMoves(rebalancer, tester); // No rebalancing because the additional host is counted as a spare tester.clock().advance(SwitchRebalancer.waitTimeAfterPreviousDeployment); assertNoMoves(rebalancer, tester); // More hosts are provisioned. Rebalancer now retires one node from non-exclusive switch in each cluster, and // allocates a new one provisionHost("switch2", hostResources, tester); provisionHost("switch3", hostResources, tester); Set<ClusterSpec.Id> clusters = Set.of(cluster1, cluster2); Set<ClusterSpec.Id> rebalancedClusters = new HashSet<>(); for (int i = 0; i < clusters.size(); i++) { tester.clock().advance(SwitchRebalancer.waitTimeAfterPreviousDeployment); rebalancer.maintain(); NodeList appNodes = tester.nodeRepository().nodes().list().owner(app).state(Node.State.active); NodeList retired = appNodes.retired(); ClusterSpec.Id cluster = retired.first().get().allocation().get().membership().cluster().id(); assertEquals("Node is retired in " + cluster + " " + retired, 1, retired.size()); NodeList clusterNodes = appNodes.cluster(cluster); assertEquals("Cluster " + cluster + " allocates nodes on distinct switches", 2, tester.switchesOf(clusterNodes, tester.nodeRepository().nodes().list()).size()); rebalancedClusters.add(cluster); // Retired node becomes inactive and makes zone stable deactivate(tester, retired); } assertEquals("Rebalanced all clusters", clusters, rebalancedClusters); // Next run does nothing tester.clock().advance(SwitchRebalancer.waitTimeAfterPreviousDeployment); assertNoMoves(rebalancer, tester); }
@Override public boolean contains(Object o) { for (M member : members) { if (selector.select(member) && o.equals(member)) { return true; } } return false; }
@Test public void testDoesNotContainOtherMemberWhenLiteMembersSelected() { Collection<MemberImpl> collection = new MemberSelectingCollection<>(members, LITE_MEMBER_SELECTOR); assertFalse(collection.contains(nonExistingMember)); }
@JsonProperty("status") public Status status() { if (indices.isEmpty() || indices.stream().allMatch(i -> i.status() == Status.NOT_STARTED)) { return Status.NOT_STARTED; } else if (indices.stream().allMatch(RemoteReindexIndex::isCompleted)) { // all are now completed, either finished or errored if (indices.stream().anyMatch(i -> i.status() == Status.ERROR)) { return Status.ERROR; } else { return Status.FINISHED; } } else { return Status.RUNNING; } }
@Test void testStatusCompletedWithError() { final RemoteReindexMigration migration = withIndices( index("one", RemoteReindexingMigrationAdapter.Status.FINISHED), index("two", RemoteReindexingMigrationAdapter.Status.ERROR) ); Assertions.assertThat(migration.status()).isEqualTo(RemoteReindexingMigrationAdapter.Status.ERROR); }
public void finish(Promise<Void> aggregatePromise) { ObjectUtil.checkNotNull(aggregatePromise, "aggregatePromise"); checkInEventLoop(); if (this.aggregatePromise != null) { throw new IllegalStateException("Already finished"); } this.aggregatePromise = aggregatePromise; if (doneCount == expectedCount) { tryPromise(); } }
@Test public void testNullArgument() { try { combiner.finish(null); fail(); } catch (NullPointerException expected) { // expected } combiner.finish(p1); verify(p1).trySuccess(null); }
public static PredicateTreeAnnotations createPredicateTreeAnnotations(Predicate predicate) { PredicateTreeAnalyzerResult analyzerResult = PredicateTreeAnalyzer.analyzePredicateTree(predicate); // The tree size is used as the interval range. int intervalEnd = analyzerResult.treeSize; AnnotatorContext context = new AnnotatorContext(intervalEnd, analyzerResult.sizeMap); assignIntervalLabels(predicate, Interval.INTERVAL_BEGIN, intervalEnd, false, context); return new PredicateTreeAnnotations( analyzerResult.minFeature, intervalEnd, context.intervals, context.intervalsWithBounds, context.featureConjunctions); }
@Test void require_that_or_intervals_are_the_same() { Predicate p = or( feature("key1").inSet("value1"), feature("key2").inSet("value2")); PredicateTreeAnnotations r = PredicateTreeAnnotator.createPredicateTreeAnnotations(p); assertEquals(1, r.minFeature); assertEquals(2, r.intervalEnd); assertEquals(2, r.intervalMap.size()); assertIntervalContains(r, "key1=value1", 0x00010002); assertIntervalContains(r, "key2=value2", 0x00010002); }
@Override public <K, T_OTHER, OUT> ProcessConfigurableAndNonKeyedPartitionStream<OUT> connectAndProcess( KeyedPartitionStream<K, T_OTHER> other, TwoInputBroadcastStreamProcessFunction<T_OTHER, T, OUT> processFunction) { // no state redistribution mode check is required here, since all redistribution modes are // acceptable TypeInformation<OUT> outTypeInfo = StreamUtils.getOutputTypeForTwoInputBroadcastProcessFunction( processFunction, ((KeyedPartitionStreamImpl<K, T_OTHER>) other).getType(), getType()); KeyedTwoInputBroadcastProcessOperator<K, T_OTHER, T, OUT> processOperator = new KeyedTwoInputBroadcastProcessOperator<>(processFunction); Transformation<OUT> outTransformation = StreamUtils.getTwoInputTransformation( "Broadcast-Keyed-TwoInput-Process", (KeyedPartitionStreamImpl<K, T_OTHER>) other, // we should always take the broadcast input as second input. this, outTypeInfo, processOperator); environment.addOperator(outTransformation); return StreamUtils.wrapWithConfigureHandle( new NonKeyedPartitionStreamImpl<>(environment, outTransformation)); }
@Test void testConnectKeyedStreamWithOutputKeySelector() throws Exception { ExecutionEnvironmentImpl env = StreamTestUtils.getEnv(); BroadcastStreamImpl<Integer> stream = new BroadcastStreamImpl<>(env, new TestingTransformation<>("t1", Types.INT, 1)); NonKeyedPartitionStreamImpl<Long> nonKeyedStream = new NonKeyedPartitionStreamImpl<>( env, new TestingTransformation<>("t2", Types.LONG, 2)); KeyedPartitionStream<Long, Long> resultStream = stream.connectAndProcess( nonKeyedStream.keyBy(x -> x), new StreamTestUtils.NoOpTwoInputBroadcastStreamProcessFunction(), x -> x); assertThat(resultStream).isInstanceOf(KeyedPartitionStream.class); List<Transformation<?>> transformations = env.getTransformations(); assertThat(transformations).hasSize(1); assertProcessType(transformations.get(0), TwoInputTransformation.class, Types.LONG); }
public synchronized boolean maybeUpdateGetRequestTimestamp(long currentTime) { long lastRequestTimestamp = Math.max(lastGetRequestTimestamp, lastPushRequestTimestamp); long timeElapsedSinceLastMsg = currentTime - lastRequestTimestamp; if (timeElapsedSinceLastMsg >= pushIntervalMs) { lastGetRequestTimestamp = currentTime; return true; } return false; }
@Test public void testMaybeUpdateGetRequestAfterElapsedTimeValid() { assertTrue(clientInstance.maybeUpdateGetRequestTimestamp(System.currentTimeMillis() - ClientMetricsConfigs.DEFAULT_INTERVAL_MS)); // Second request should be accepted as time since last request is greater than the push interval. assertTrue(clientInstance.maybeUpdateGetRequestTimestamp(System.currentTimeMillis())); }
@Override public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo, List<String> partNames, boolean areAllPartsFound) throws MetaException { checkStatisticsList(colStatsWithSourceInfo); ColumnStatisticsObj statsObj = null; String colType; String colName = null; // check if all the ColumnStatisticsObjs contain stats and all the ndv are // bitvectors boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size(); NumDistinctValueEstimator ndvEstimator = null; boolean areAllNDVEstimatorsMergeable = true; for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); if (statsObj == null) { colName = cso.getColName(); colType = cso.getColType(); statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso.getStatsData().getSetField()); LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName, doAllPartitionContainStats); } TimestampColumnStatsDataInspector columnStatsData = timestampInspectorFromStats(cso); // check if we can merge NDV estimators if (columnStatsData.getNdvEstimator() == null) { areAllNDVEstimatorsMergeable = false; break; } else { NumDistinctValueEstimator estimator = columnStatsData.getNdvEstimator(); if (ndvEstimator == null) { ndvEstimator = estimator; } else { if (!ndvEstimator.canMerge(estimator)) { areAllNDVEstimatorsMergeable = false; break; } } } } if (areAllNDVEstimatorsMergeable && ndvEstimator != null) { ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator); } LOG.debug("all of the bit vectors can merge for {} is {}", colName, areAllNDVEstimatorsMergeable); ColumnStatisticsData columnStatisticsData = initColumnStatisticsData(); if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) { TimestampColumnStatsDataInspector aggregateData = null; long lowerBound = 0; long higherBound = 0; double densityAvgSum = 0.0; TimestampColumnStatsMerger merger = new TimestampColumnStatsMerger(); for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); TimestampColumnStatsDataInspector newData = timestampInspectorFromStats(cso); lowerBound = Math.max(lowerBound, newData.getNumDVs()); higherBound += newData.getNumDVs(); if (newData.isSetLowValue() && newData.isSetHighValue()) { densityAvgSum += ((double) diff(newData.getHighValue(), newData.getLowValue())) / newData.getNumDVs(); } if (areAllNDVEstimatorsMergeable && ndvEstimator != null) { ndvEstimator.mergeEstimators(newData.getNdvEstimator()); } if (aggregateData == null) { aggregateData = newData.deepCopy(); } else { aggregateData.setLowValue(merger.mergeLowValue( merger.getLowValue(aggregateData), merger.getLowValue(newData))); aggregateData.setHighValue(merger.mergeHighValue( merger.getHighValue(aggregateData), merger.getHighValue(newData))); aggregateData.setNumNulls(merger.mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls())); aggregateData.setNumDVs(merger.mergeNumDVs(aggregateData.getNumDVs(), newData.getNumDVs())); } } if (areAllNDVEstimatorsMergeable && ndvEstimator != null) { // if all the ColumnStatisticsObjs contain bitvectors, we do not need to // use uniform distribution assumption because we can merge bitvectors // to get a good estimation. aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); } else { long estimation; if (useDensityFunctionForNDVEstimation && aggregateData != null && aggregateData.isSetLowValue() && aggregateData.isSetHighValue() ) { // We have estimation, lowerbound and higherbound. We use estimation // if it is between lowerbound and higherbound. double densityAvg = densityAvgSum / partNames.size(); estimation = (long) (diff(aggregateData.getHighValue(), aggregateData.getLowValue()) / densityAvg); if (estimation < lowerBound) { estimation = lowerBound; } else if (estimation > higherBound) { estimation = higherBound; } } else { estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner); } aggregateData.setNumDVs(estimation); } columnStatisticsData.setTimestampStats(aggregateData); } else { // TODO: bail out if missing stats are over a certain threshold // we need extrapolation LOG.debug("start extrapolation for {}", colName); Map<String, Integer> indexMap = new HashMap<>(); for (int index = 0; index < partNames.size(); index++) { indexMap.put(partNames.get(index), index); } Map<String, Double> adjustedIndexMap = new HashMap<>(); Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>(); // while we scan the css, we also get the densityAvg, lowerbound and // higherbound when useDensityFunctionForNDVEstimation is true. double densityAvgSum = 0.0; if (!areAllNDVEstimatorsMergeable) { // if not every partition uses bitvector for ndv, we just fall back to // the traditional extrapolation methods. for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); String partName = csp.getPartName(); TimestampColumnStatsData newData = cso.getStatsData().getTimestampStats(); if (useDensityFunctionForNDVEstimation && newData.isSetLowValue() && newData.isSetHighValue()) { densityAvgSum += ((double) diff(newData.getHighValue(), newData.getLowValue())) / newData.getNumDVs(); } adjustedIndexMap.put(partName, (double) indexMap.get(partName)); adjustedStatsMap.put(partName, cso.getStatsData()); } } else { // we first merge all the adjacent bitvectors that we could merge and // derive new partition names and index. StringBuilder pseudoPartName = new StringBuilder(); double pseudoIndexSum = 0; int length = 0; int curIndex = -1; TimestampColumnStatsDataInspector aggregateData = null; for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); String partName = csp.getPartName(); TimestampColumnStatsDataInspector newData = timestampInspectorFromStats(cso); // newData.isSetBitVectors() should be true for sure because we // already checked it before. if (indexMap.get(partName) != curIndex) { // There is bitvector, but it is not adjacent to the previous ones. if (length > 0) { // we have to set ndv adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); ColumnStatisticsData csd = new ColumnStatisticsData(); csd.setTimestampStats(aggregateData); adjustedStatsMap.put(pseudoPartName.toString(), csd); if (useDensityFunctionForNDVEstimation) { densityAvgSum += ((double) diff(aggregateData.getHighValue(), aggregateData.getLowValue())) / aggregateData.getNumDVs(); } // reset everything pseudoPartName = new StringBuilder(); pseudoIndexSum = 0; length = 0; ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator); } aggregateData = null; } curIndex = indexMap.get(partName); pseudoPartName.append(partName); pseudoIndexSum += curIndex; length++; curIndex++; if (aggregateData == null) { aggregateData = newData.deepCopy(); } else { aggregateData.setLowValue(min(aggregateData.getLowValue(), newData.getLowValue())); aggregateData.setHighValue(max(aggregateData.getHighValue(), newData.getHighValue())); aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); } ndvEstimator.mergeEstimators(newData.getNdvEstimator()); } if (length > 0) { // we have to set ndv adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); ColumnStatisticsData csd = new ColumnStatisticsData(); csd.setTimestampStats(aggregateData); adjustedStatsMap.put(pseudoPartName.toString(), csd); if (useDensityFunctionForNDVEstimation) { densityAvgSum += ((double) diff(aggregateData.getHighValue(), aggregateData.getLowValue())) / aggregateData.getNumDVs(); } } } extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(), adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size()); } LOG.debug( "Ndv estimation for {} is {}. # of partitions requested: {}. # of partitions found: {}", colName, columnStatisticsData.getTimestampStats().getNumDVs(), partNames.size(), colStatsWithSourceInfo.size()); KllHistogramEstimator mergedKllHistogramEstimator = mergeHistograms(colStatsWithSourceInfo); if (mergedKllHistogramEstimator != null) { columnStatisticsData.getTimestampStats().setHistogram(mergedKllHistogramEstimator.serialize()); } statsObj.setStatsData(columnStatisticsData); return statsObj; }
@Test public void testAggregateSingleStat() throws MetaException { List<String> partitions = Collections.singletonList("part1"); long[] values = { TS_1.getSecondsSinceEpoch(), TS_3.getSecondsSinceEpoch() }; ColumnStatisticsData data1 = new ColStatsBuilder<>(Timestamp.class).numNulls(1).numDVs(2).low(TS_1) .high(TS_3).hll(values).kll(values).build(); List<ColStatsObjWithSourceInfo> statsList = Collections.singletonList(createStatsWithInfo(data1, TABLE, COL, partitions.get(0))); TimestampColumnStatsAggregator aggregator = new TimestampColumnStatsAggregator(); ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, true); assertEqualStatistics(data1, computedStatsObj.getStatsData()); }
@JsonIgnore public boolean isForeachIterationRestartable(long iterationId) { if (restartInfo != null && restartInfo.contains(iterationId)) { return false; } if (details == null) { return false; } return details.isForeachIterationRestartable(iterationId); }
@Test public void testisForeachIterationRestartable() throws Exception { ForeachStepOverview overview = loadObject( "fixtures/instances/sample-foreach-step-overview.json", ForeachStepOverview.class); assertFalse(overview.isForeachIterationRestartable(123L)); overview.addOne(123L, WorkflowInstance.Status.FAILED, null); overview.refreshDetail(); assertTrue(overview.isForeachIterationRestartable(123L)); assertEquals(0, overview.getRunningStatsCount(true)); overview.updateForRestart( 123L, WorkflowInstance.Status.CREATED, WorkflowInstance.Status.FAILED, null); overview.refreshDetail(); assertFalse(overview.isForeachIterationRestartable(123L)); assertEquals(79993, overview.getRunningStatsCount(true)); }
@Override public ExecuteResult execute(final ServiceContext serviceContext, final ConfiguredKsqlPlan plan, final boolean restoreInProgress) { try { final ExecuteResult result = EngineExecutor .create(primaryContext, serviceContext, plan.getConfig()) .execute(plan.getPlan(), restoreInProgress); return result; } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { // add the statement text to the KsqlException throw new KsqlStatementException( e.getMessage(), e.getMessage(), plan.getPlan().getStatementText(), e.getCause() ); } }
@Test public void shouldThrowOnInsertIntoStreamWithTableResult() { // Given: setupKsqlEngineWithSharedRuntimeEnabled(); KsqlEngineTestUtil.execute( serviceContext, ksqlEngine, "create stream bar as select ordertime, itemid, orderid from orders;", ksqlConfig, Collections.emptyMap() ); // When: final KsqlStatementException e = assertThrows( KsqlStatementException.class, () -> KsqlEngineTestUtil.execute( serviceContext, ksqlEngine, "insert into bar select itemid, count(*) from orders group by itemid;", ksqlConfig, Collections.emptyMap() ) ); // Then: assertThat(e, rawMessage(containsString( "Incompatible data sink and query result. " + "Data sink (BAR) type is KSTREAM but select query result is KTABLE."))); assertThat(e, statementText(is( "insert into bar select itemid, count(*) from orders group by itemid;"))); }
@Override public ExecuteContext doAfter(ExecuteContext context) { final Object result = context.getResult(); if (result instanceof Boolean) { final boolean heartbeatResult = (Boolean) result; if (heartbeatResult) { RegisterContext.INSTANCE.compareAndSet(false, true); } else { RegisterContext.INSTANCE.compareAndSet(true, false); } } return context; }
@Test public void doAfter() throws NoSuchMethodException { final ExecuteContext context = buildContext(); context.changeResult(true); interceptor.doAfter(context); Assert.assertTrue(RegisterContext.INSTANCE.isAvailable()); context.changeResult(false); interceptor.doAfter(context); Assert.assertFalse(RegisterContext.INSTANCE.isAvailable()); }
public Certificate add(CvCertificate cert) { final Certificate db = Certificate.from(cert); if (repository.countByIssuerAndSubject(db.getIssuer(), db.getSubject()) > 0) { throw new ClientException(String.format( "Certificate of subject %s and issuer %s already exists", db.getSubject(), db.getIssuer())); } // Special case for first CVCA certificate for this document type if (db.getType() == Certificate.Type.CVCA && repository.countByDocumentTypeAndType(db.getDocumentType(), db.getType()) == 0) { signatureService.verify(cert, cert.getBody().getPublicKey(), cert.getBody().getPublicKey().getParams()); logger.warn("Added first CVCA certificate for {}, set trusted flag manually", db.getDocumentType()); } else { verify(cert); if (db.getType() == Certificate.Type.AT) { verifyPublicKey(cert); } } return repository.saveAndFlush(db); }
@Test public void shouldNotAddCertificateIfFirstButNotSelfSigned() { ClientException thrown = assertThrows(ClientException.class, () -> service.add(readCvCertificate("rdw/acc/dvca.cvcert"))); assertEquals("Could not find trust chain", thrown.getMessage()); }
public double[][] test(DataFrame data) { DataFrame x = formula.x(data); int n = x.nrow(); int ntrees = trees.length; double[][] prediction = new double[ntrees][n]; for (int j = 0; j < n; j++) { Tuple xj = x.get(j); double base = b; for (int i = 0; i < ntrees; i++) { base += shrinkage * trees[i].predict(xj); prediction[i][j] = base; } } return prediction; }
@Test public void testPuma8nhHuber() { test(Loss.huber(0.9), "puma8nh", Puma8NH.formula, Puma8NH.data, 3.2429); }
public static void processEnvVariables(Map<String, String> inputProperties) { processEnvVariables(inputProperties, System.getenv()); }
@Test void throwIfInvalidFormat() { var inputProperties = new HashMap<String, String>(); var env = Map.of("SONAR_SCANNER_JSON_PARAMS", "{garbage"); var thrown = assertThrows(IllegalArgumentException.class, () -> EnvironmentConfig.processEnvVariables(inputProperties, env)); assertThat(thrown).hasMessage("Failed to parse JSON properties from environment variable 'SONAR_SCANNER_JSON_PARAMS'"); }
public static Collection<MdbValidityStatus> assertEjbClassValidity(final ClassInfo mdbClass) { Collection<MdbValidityStatus> mdbComplianceIssueList = new ArrayList<>(MdbValidityStatus.values().length); final String className = mdbClass.name().toString(); verifyModifiers(className, mdbClass.flags(), mdbComplianceIssueList); for (MethodInfo method : mdbClass.methods()) { if ("onMessage".equals(method.name())) { verifyOnMessageMethod(className, method.flags(), mdbComplianceIssueList); } if ("finalize".equals(method.name())) { EjbLogger.DEPLOYMENT_LOGGER.mdbCantHaveFinalizeMethod(className); mdbComplianceIssueList.add(MdbValidityStatus.MDB_SHOULD_NOT_HAVE_FINALIZE_METHOD); } } return mdbComplianceIssueList; }
@Test public void mdbWithInterface() { assertTrue(assertEjbClassValidity(buildClassInfoForClass(InvalidMdbInterface.class.getName())).contains( MdbValidityStatus.MDB_CANNOT_BE_AN_INTERFACE)); }
public static Application mergeApplication(Application first, Application second) { if (!first.getName().equals(second.getName())) { throw new IllegalArgumentException("Cannot merge applications with different names"); } Application merged = copyApplication(first); for (InstanceInfo instance : second.getInstances()) { switch (instance.getActionType()) { case ADDED: case MODIFIED: merged.addInstance(instance); break; case DELETED: merged.removeInstance(instance); } } return merged; }
@Test public void testMergeApplicationIfActionTypeModifiedReturnApplication() { Application application = createSingleInstanceApp("foo", "foo", InstanceInfo.ActionType.MODIFIED); Assert.assertEquals(application.getInstances(), EurekaEntityFunctions.mergeApplication( application, application).getInstances()); }
@Override public ValidationTaskResult validateImpl(Map<String, String> optionMap) throws InterruptedException { String hadoopVersion; try { hadoopVersion = getHadoopVersion(); } catch (IOException e) { return new ValidationTaskResult(ValidationUtils.State.FAILED, getName(), String.format("Failed to get hadoop version:%n%s.", ExceptionUtils.asPlainText(e)), "Please check if hadoop is on your PATH."); } String version = mConf.getString(PropertyKey.UNDERFS_VERSION); for (String prefix : new String[] {CDH_PREFIX, HADOOP_PREFIX}) { if (version.startsWith(prefix)) { version = version.substring(prefix.length()); break; } } if (hadoopVersion.contains(version)) { return new ValidationTaskResult(ValidationUtils.State.OK, getName(), String.format("Hadoop version %s contains UFS version defined in alluxio %s=%s.", hadoopVersion, PropertyKey.UNDERFS_VERSION, version), ""); } return new ValidationTaskResult(ValidationUtils.State.FAILED, getName(), String.format("Hadoop version %s does not match %s=%s.", hadoopVersion, PropertyKey.UNDERFS_VERSION, version), String.format("Please configure %s to match the HDFS version.", PropertyKey.UNDERFS_VERSION)); }
@Test public void minorVersionConflict() throws Exception { PowerMockito.mockStatic(ShellUtils.class); String[] cmd = new String[]{"hadoop", "version"}; // Alluxio defines a different minor version, which should not work BDDMockito.given(ShellUtils.execCommand(cmd)).willReturn("Hadoop 2.6.2"); CONF.set(PropertyKey.UNDERFS_VERSION, "2.6.3"); HdfsVersionValidationTask task = new HdfsVersionValidationTask(CONF); ValidationTaskResult result = task.validateImpl(ImmutableMap.of()); assertEquals(ValidationUtils.State.FAILED, result.getState()); assertThat(result.getResult(), containsString( "Hadoop version 2.6.2 does not match alluxio.underfs.version=2.6.3")); }
@Override public FetchedAppReport getApplicationReport(ApplicationId appId) throws YarnException, IOException { SubClusterId scid = federationFacade.getApplicationHomeSubCluster(appId); createSubclusterIfAbsent(scid); ApplicationClientProtocol applicationsManager = subClusters.get(scid).getRight(); return super.getApplicationReport(applicationsManager, appId); }
@Test public void testFetchReportAHSDisabled() throws Exception { testHelper(false); /* RM will not know of the app and Application History Service is disabled * So we will not try to get the report from AHS and RM will throw * ApplicationNotFoundException */ LambdaTestUtils.intercept(ApplicationNotFoundException.class, appNotFoundExceptionMsg, () -> fetcher.getApplicationReport(appId1)); LambdaTestUtils.intercept(ApplicationNotFoundException.class, appNotFoundExceptionMsg, () -> fetcher.getApplicationReport(appId2)); Mockito.verify(appManager1, Mockito.times(1)) .getApplicationReport(Mockito.any(GetApplicationReportRequest.class)); Mockito.verify(appManager2, Mockito.times(1)) .getApplicationReport(Mockito.any(GetApplicationReportRequest.class)); Assert.assertNull("HistoryManager should be null as AHS is disabled", history); }
public static MemberSelector and(MemberSelector... selectors) { return new AndMemberSelector(selectors); }
@Test public void testAndMemberSelector2() { MemberSelector selector = MemberSelectors.and(LOCAL_MEMBER_SELECTOR, LITE_MEMBER_SELECTOR); assertFalse(selector.select(member)); verify(member).localMember(); verify(member, never()).isLiteMember(); }
@Override public void processElement(RowData input, Context ctx, Collector<RowData> out) throws Exception { processFirstRowOnProcTime(input, state, out); }
@Test public void testWithStateTtl() throws Exception { ProcTimeDeduplicateKeepFirstRowFunction func = new ProcTimeDeduplicateKeepFirstRowFunction(minTime.toMilliseconds()); OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = createTestHarness(func); testHarness.open(); testHarness.processElement(insertRecord("book", 1L, 12)); testHarness.processElement(insertRecord("book", 2L, 11)); testHarness.processElement(insertRecord("book", 1L, 13)); testHarness.setStateTtlProcessingTime(30); testHarness.processElement(insertRecord("book", 1L, 17)); testHarness.processElement(insertRecord("book", 2L, 18)); testHarness.processElement(insertRecord("book", 1L, 19)); // Keep FirstRow in deduplicate will not send retraction List<Object> expectedOutput = new ArrayList<>(); expectedOutput.add(insertRecord("book", 1L, 12)); expectedOutput.add(insertRecord("book", 2L, 11)); // (1L,12),(2L,11) has retired, so output (1L,17) and (2L,18) expectedOutput.add(insertRecord("book", 1L, 17)); expectedOutput.add(insertRecord("book", 2L, 18)); assertor.assertOutputEqualsSorted("output wrong.", expectedOutput, testHarness.getOutput()); testHarness.close(); }
public void send(String body) { if (isStopped) { throw new IllegalStateException(String.format( "Producer %s was stopped and fail to deliver requested message [%s].", body, name)); } var msg = new SimpleMessage(); msg.addHeader(Headers.DATE, new Date().toString()); msg.addHeader(Headers.SENDER, name); msg.setBody(body); try { queue.put(msg); } catch (InterruptedException e) { // allow thread to exit LOGGER.error("Exception caught.", e); } }
@Test void testSend() throws Exception { final var publishPoint = mock(MqPublishPoint.class); final var producer = new Producer("producer", publishPoint); verifyNoMoreInteractions(publishPoint); producer.send("Hello!"); final var messageCaptor = ArgumentCaptor.forClass(Message.class); verify(publishPoint).put(messageCaptor.capture()); final var message = messageCaptor.getValue(); assertNotNull(message); assertEquals("producer", message.getHeader(Message.Headers.SENDER)); assertNotNull(message.getHeader(Message.Headers.DATE)); assertEquals("Hello!", message.getBody()); verifyNoMoreInteractions(publishPoint); }
public Future<Collection<Integer>> resizeAndReconcilePvcs(KafkaStatus kafkaStatus, List<PersistentVolumeClaim> pvcs) { Set<Integer> podIdsToRestart = new HashSet<>(); List<Future<Void>> futures = new ArrayList<>(pvcs.size()); for (PersistentVolumeClaim desiredPvc : pvcs) { Future<Void> perPvcFuture = pvcOperator.getAsync(reconciliation.namespace(), desiredPvc.getMetadata().getName()) .compose(currentPvc -> { if (currentPvc == null || currentPvc.getStatus() == null || !"Bound".equals(currentPvc.getStatus().getPhase())) { // This branch handles the following conditions: // * The PVC doesn't exist yet, we should create it // * The PVC is not Bound, we should reconcile it return pvcOperator.reconcile(reconciliation, reconciliation.namespace(), desiredPvc.getMetadata().getName(), desiredPvc) .map((Void) null); } else if (currentPvc.getStatus().getConditions().stream().anyMatch(cond -> "Resizing".equals(cond.getType()) && "true".equals(cond.getStatus().toLowerCase(Locale.ENGLISH)))) { // The PVC is Bound, but it is already resizing => Nothing to do, we should let it resize LOGGER.debugCr(reconciliation, "The PVC {} is resizing, nothing to do", desiredPvc.getMetadata().getName()); return Future.succeededFuture(); } else if (currentPvc.getStatus().getConditions().stream().anyMatch(cond -> "FileSystemResizePending".equals(cond.getType()) && "true".equals(cond.getStatus().toLowerCase(Locale.ENGLISH)))) { // The PVC is Bound and resized but waiting for FS resizing => We need to restart the pod which is using it podIdsToRestart.add(getPodIndexFromPvcName(desiredPvc.getMetadata().getName())); LOGGER.infoCr(reconciliation, "The PVC {} is waiting for file system resizing and the pod using it might need to be restarted.", desiredPvc.getMetadata().getName()); return Future.succeededFuture(); } else { // The PVC is Bound and resizing is not in progress => We should check if the SC supports resizing and check if size changed Long currentSize = StorageUtils.convertToMillibytes(currentPvc.getSpec().getResources().getRequests().get("storage")); Long desiredSize = StorageUtils.convertToMillibytes(desiredPvc.getSpec().getResources().getRequests().get("storage")); if (!currentSize.equals(desiredSize)) { // The sizes are different => we should resize (shrinking will be handled in StorageDiff, so we do not need to check that) return resizePvc(kafkaStatus, currentPvc, desiredPvc); } else { // size didn't change, just reconcile return pvcOperator.reconcile(reconciliation, reconciliation.namespace(), desiredPvc.getMetadata().getName(), desiredPvc) .map((Void) null); } } }); futures.add(perPvcFuture); } return Future.all(futures) .map(podIdsToRestart); }
@Test public void testVolumesBoundNonExpandableStorageClass(VertxTestContext context) { List<PersistentVolumeClaim> pvcs = List.of( createPvc("data-pod-0"), createPvc("data-pod-1"), createPvc("data-pod-2") ); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); // Mock the PVC Operator PvcOperator mockPvcOps = supplier.pvcOperations; when(mockPvcOps.getAsync(eq(NAMESPACE), ArgumentMatchers.startsWith("data-"))) .thenAnswer(invocation -> { String pvcName = invocation.getArgument(1); PersistentVolumeClaim currentPvc = pvcs.stream().filter(pvc -> pvcName.equals(pvc.getMetadata().getName())).findFirst().orElse(null); if (currentPvc != null) { PersistentVolumeClaim pvcWithStatus = new PersistentVolumeClaimBuilder(currentPvc) .editSpec() .withNewResources() .withRequests(Map.of("storage", new Quantity("50Gi", null))) .endResources() .endSpec() .withNewStatus() .withPhase("Bound") .withCapacity(Map.of("storage", new Quantity("50Gi", null))) .endStatus() .build(); return Future.succeededFuture(pvcWithStatus); } else { return Future.succeededFuture(); } }); ArgumentCaptor<PersistentVolumeClaim> pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class); when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock the StorageClass Operator StorageClassOperator mockSco = supplier.storageClassOperations; when(mockSco.getAsync(eq(STORAGE_CLASS_NAME))).thenReturn(Future.succeededFuture(NONRESIZABLE_STORAGE_CLASS)); // Reconcile the PVCs PvcReconciler reconciler = new PvcReconciler( new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), mockPvcOps, mockSco ); // Used to capture the warning condition KafkaStatus kafkaStatus = new KafkaStatus(); Checkpoint async = context.checkpoint(); reconciler.resizeAndReconcilePvcs(kafkaStatus, pvcs) .onComplete(res -> { assertThat(res.succeeded(), is(true)); assertThat(res.result().size(), is(0)); assertThat(pvcCaptor.getAllValues().size(), is(0)); assertThat(kafkaStatus.getConditions().size(), is(3)); kafkaStatus.getConditions().stream().forEach(c -> { assertThat(c.getReason(), is("PvcResizingWarning")); assertThat(c.getMessage(), containsString("Storage Class mysc does not support resizing of volumes.")); }); async.flag(); }); }
@SuppressWarnings("unchecked") public static <S, F> S visit(final SqlType type, final SqlTypeWalker.Visitor<S, F> visitor) { final BiFunction<SqlTypeWalker.Visitor<?, ?>, SqlType, Object> handler = HANDLER .get(type.baseType()); if (handler == null) { throw new UnsupportedOperationException("Unsupported schema type: " + type.baseType()); } return (S) handler.apply(visitor, type); }
@Test public void shouldVisitTimestamp() { // Given: final SqlPrimitiveType type = SqlTypes.TIMESTAMP; when(visitor.visitTimestamp(any())).thenReturn("Expected"); // When: final String result = SqlTypeWalker.visit(type, visitor); // Then: verify(visitor).visitTimestamp(same(type)); assertThat(result, is("Expected")); }
@Override @NonNull public Iterable<String> getNextWords( @NonNull String currentWord, int maxResults, int minWordUsage) { if (mNextNameParts.containsKey(currentWord)) { return Arrays.asList(mNextNameParts.get(currentWord)); } else { return Collections.emptyList(); } }
@Test public void testGetNextWords() throws Exception { Iterator<String> nextWords = mDictionaryUnderTest.getNextWords("Menny", 2, 1).iterator(); Assert.assertTrue(nextWords.hasNext()); Assert.assertEquals("Even-Danan", nextWords.next()); Assert.assertFalse(nextWords.hasNext()); nextWords = mDictionaryUnderTest.getNextWords("Dummy", 2, 1).iterator(); Assert.assertFalse(nextWords.hasNext()); nextWords = mDictionaryUnderTest.getNextWords("Erela", 2, 1).iterator(); Assert.assertTrue(nextWords.hasNext()); Assert.assertEquals("Portugaly", nextWords.next()); Assert.assertFalse(nextWords.hasNext()); nextWords = mDictionaryUnderTest.getNextWords("John", 2, 1).iterator(); Assert.assertTrue(nextWords.hasNext()); Assert.assertEquals("Lennon", nextWords.next()); Assert.assertTrue(nextWords.hasNext()); Assert.assertEquals("Smith", nextWords.next()); Assert.assertFalse(nextWords.hasNext()); nextWords = mDictionaryUnderTest.getNextWords("Mika", 2, 1).iterator(); Assert.assertTrue(nextWords.hasNext()); Assert.assertEquals("Michael", nextWords.next()); Assert.assertFalse(nextWords.hasNext()); // next part of the name nextWords = mDictionaryUnderTest.getNextWords("Michael", 2, 1).iterator(); Assert.assertTrue(nextWords.hasNext()); Assert.assertEquals("Michelle", nextWords.next()); Assert.assertFalse(nextWords.hasNext()); nextWords = mDictionaryUnderTest.getNextWords("Jonathan", 2, 1).iterator(); Assert.assertTrue(nextWords.hasNext()); Assert.assertEquals("With'In", nextWords.next()); Assert.assertFalse(nextWords.hasNext()); }
@ApiOperation(value = "Get Widget Type (getWidgetType)", notes = "Get the Widget Type by FQN. " + WIDGET_TYPE_DESCRIPTION + AVAILABLE_FOR_ANY_AUTHORIZED_USER, hidden = true) @PreAuthorize("hasAnyAuthority('SYS_ADMIN', 'TENANT_ADMIN', 'CUSTOMER_USER')") @RequestMapping(value = "/widgetType", params = {"fqn"}, method = RequestMethod.GET) @ResponseBody public WidgetType getWidgetType( @Parameter(description = "Widget Type fqn", required = true) @RequestParam String fqn) throws ThingsboardException { String[] parts = fqn.split("\\."); String scopeQualifier = parts.length > 0 ? parts[0] : null; if (parts.length < 2 || (!scopeQualifier.equals("system") && !scopeQualifier.equals("tenant"))) { throw new ThingsboardException("Invalid fqn!", ThingsboardErrorCode.BAD_REQUEST_PARAMS); } TenantId tenantId; if ("system".equals(scopeQualifier)) { tenantId = TenantId.fromUUID(ModelConstants.NULL_UUID); } else { tenantId = getCurrentUser().getTenantId(); } String typeFqn = fqn.substring(scopeQualifier.length() + 1); WidgetType widgetType = widgetTypeService.findWidgetTypeByTenantIdAndFqn(tenantId, typeFqn); checkNotNull(widgetType); accessControlService.checkPermission(getCurrentUser(), Resource.WIDGET_TYPE, Operation.READ, widgetType.getId(), widgetType); return widgetType; }
@Test public void testGetWidgetType() throws Exception { WidgetTypeDetails widgetType = new WidgetTypeDetails(); widgetType.setName("Widget Type"); widgetType.setDescriptor(JacksonUtil.fromString("{ \"someKey\": \"someValue\" }", JsonNode.class)); WidgetTypeDetails savedWidgetType = doPost("/api/widgetType", widgetType, WidgetTypeDetails.class); WidgetType foundWidgetType = doGet("/api/widgetType?fqn={fqn}", WidgetType.class, "tenant." + savedWidgetType.getFqn()); Assert.assertNotNull(foundWidgetType); Assert.assertEquals(new WidgetType(savedWidgetType), foundWidgetType); }
@Override public void write(final PostgreSQLPacketPayload payload, final Object value) { throw new UnsupportedSQLOperationException("PostgreSQLFloat4ArrayBinaryProtocolValue.write()"); }
@Test void assertWrite() { assertThrows(UnsupportedSQLOperationException.class, () -> newInstance().write(new PostgreSQLPacketPayload(null, StandardCharsets.UTF_8), "val")); }
@Override public int read() throws IOException { throttle(); int data = rawStream.read(); if (data != -1) { bytesRead++; } return data; }
@Test public void testRead() { File tmpFile; File outFile; try { tmpFile = createFile(1024); outFile = createFile(); tmpFile.deleteOnExit(); outFile.deleteOnExit(); // Correction: we should use CB.ONE_C mode to calculate the maxBandwidth, // because CB.ONE_C's speed is the lowest. long maxBandwidth = copyAndAssert(tmpFile, outFile, 0, 1, -1, CB.ONE_C); copyAndAssert(tmpFile, outFile, maxBandwidth, 20, 0, CB.BUFFER); /* copyAndAssert(tmpFile, outFile, maxBandwidth, 10, 0, CB.BUFFER); copyAndAssert(tmpFile, outFile, maxBandwidth, 50, 0, CB.BUFFER); */ copyAndAssert(tmpFile, outFile, maxBandwidth, 20, 0, CB.BUFF_OFFSET); /* copyAndAssert(tmpFile, outFile, maxBandwidth, 10, 0, CB.BUFF_OFFSET); copyAndAssert(tmpFile, outFile, maxBandwidth, 50, 0, CB.BUFF_OFFSET); */ copyAndAssert(tmpFile, outFile, maxBandwidth, 20, 0, CB.ONE_C); /* copyAndAssert(tmpFile, outFile, maxBandwidth, 10, 0, CB.ONE_C); copyAndAssert(tmpFile, outFile, maxBandwidth, 50, 0, CB.ONE_C); */ } catch (IOException e) { LOG.error("Exception encountered ", e); } }
void forwardToStateService(DeviceStateServiceMsgProto deviceStateServiceMsg, TbCallback callback) { if (statsEnabled) { stats.log(deviceStateServiceMsg); } stateService.onQueueMsg(deviceStateServiceMsg, callback); }
@Test public void givenStatsDisabled_whenForwardingDeviceStateMsgToStateService_thenStatsAreNotRecorded() { // GIVEN ReflectionTestUtils.setField(defaultTbCoreConsumerServiceMock, "stats", statsMock); ReflectionTestUtils.setField(defaultTbCoreConsumerServiceMock, "statsEnabled", false); var stateMsg = TransportProtos.DeviceStateServiceMsgProto.newBuilder() .setTenantIdMSB(tenantId.getId().getMostSignificantBits()) .setTenantIdLSB(tenantId.getId().getLeastSignificantBits()) .setDeviceIdMSB(deviceId.getId().getMostSignificantBits()) .setDeviceIdLSB(deviceId.getId().getLeastSignificantBits()) .setAdded(true) .setUpdated(false) .setDeleted(false) .build(); doCallRealMethod().when(defaultTbCoreConsumerServiceMock).forwardToStateService(stateMsg, tbCallbackMock); // WHEN defaultTbCoreConsumerServiceMock.forwardToStateService(stateMsg, tbCallbackMock); // THEN then(statsMock).should(never()).log(stateMsg); }
public final void isPositiveInfinity() { isEqualTo(Double.POSITIVE_INFINITY); }
@Test public void isPositiveInfinity() { assertThat(Double.POSITIVE_INFINITY).isPositiveInfinity(); assertThatIsPositiveInfinityFails(1.23); assertThatIsPositiveInfinityFails(Double.NEGATIVE_INFINITY); assertThatIsPositiveInfinityFails(Double.NaN); assertThatIsPositiveInfinityFails(null); }
public static DiskValidator getInstance(Class<? extends DiskValidator> clazz) { DiskValidator diskValidator; if (INSTANCES.containsKey(clazz)) { diskValidator = INSTANCES.get(clazz); } else { diskValidator = ReflectionUtils.newInstance(clazz, null); // check the return of putIfAbsent() to see if any other thread have put // the instance with the same key into INSTANCES DiskValidator diskValidatorRet = INSTANCES.putIfAbsent(clazz, diskValidator); if (diskValidatorRet != null) { diskValidator = diskValidatorRet; } } return diskValidator; }
@Test(expected = DiskErrorException.class) public void testGetInstanceOfNonExistClass() throws DiskErrorException { DiskValidatorFactory.getInstance("non-exist"); }
@Override public void setOutputBuffers(OutputBuffers newOutputBuffers) { requireNonNull(newOutputBuffers, "newOutputBuffers is null"); checkArgument(outputBuffers.getType() == SPOOLING, "Invalid output buffers type"); checkArgument(outputBuffers.isNoMoreBufferIds(), "invalid noMoreBufferIds"); if (state.get().isTerminal() || outputBuffers.getVersion() >= newOutputBuffers.getVersion()) { return; } outputBuffers.checkValidTransition(newOutputBuffers); }
@Test public void testSetOutputBuffers() { SpoolingOutputBuffer buffer = createSpoolingOutputBuffer(); OutputBuffers newBuffers = new OutputBuffers(SPOOLING, 1, true, ImmutableMap.of()); buffer.setOutputBuffers(newBuffers); OutputBuffers invalidBuffers = new OutputBuffers(PARTITIONED, 1, true, ImmutableMap.of()); try { buffer.setOutputBuffers(invalidBuffers); } catch (IllegalArgumentException e) { assertEquals(e.getMessage(), "Invalid output buffers type"); } }
public static <EventT> Write<EventT> write() { return new AutoValue_JmsIO_Write.Builder<EventT>().build(); }
@Test public void testWriteMessageWithRetryPolicy() throws Exception { int waitingSeconds = 5; // Margin of the pipeline execution in seconds that should be taken into consideration int pipelineDuration = 5; Instant now = Instant.now(); String messageText = now.toString(); List<String> data = Collections.singletonList(messageText); RetryConfiguration retryPolicy = RetryConfiguration.create( 3, Duration.standardSeconds(waitingSeconds), Duration.standardDays(10)); WriteJmsResult<String> output = pipeline .apply(Create.of(data)) .apply( JmsIO.<String>write() .withConnectionFactory(connectionFactory) .withValueMapper(new TextMessageMapperWithErrorCounter()) .withRetryConfiguration(retryPolicy) .withQueue(QUEUE) .withUsername(USERNAME) .withPassword(PASSWORD)); PAssert.that(output.getFailedMessages()).empty(); pipeline.run(); Connection connection = connectionFactory.createConnection(USERNAME, PASSWORD); connection.start(); Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); MessageConsumer consumer = session.createConsumer(session.createQueue(QUEUE)); Message message = consumer.receive(1000); assertNotNull(message); long maximumTimestamp = now.plus(java.time.Duration.ofSeconds(waitingSeconds + pipelineDuration)).toEpochMilli(); assertThat( message.getJMSTimestamp(), allOf(greaterThanOrEqualTo(now.toEpochMilli()), lessThan(maximumTimestamp))); assertNull(consumer.receiveNoWait()); }
@VisibleForTesting static Duration parseToDuration(String timeStr) { final Matcher matcher = Pattern.compile("(?<value>\\d+)\\s*(?<time>[dhms])").matcher(timeStr); if (!matcher.matches()) { throw new IllegalArgumentException("Expected a time specification in the form <number>[d,h,m,s], e.g. 3m, but found [" + timeStr + "]"); } final int value = Integer.parseInt(matcher.group("value")); final String timeSpecifier = matcher.group("time"); final TemporalUnit unit; switch (timeSpecifier) { case "d": unit = ChronoUnit.DAYS; break; case "h": unit = ChronoUnit.HOURS; break; case "m": unit = ChronoUnit.MINUTES; break; case "s": unit = ChronoUnit.SECONDS; break; default: throw new IllegalStateException("Expected a time unit specification from d,h,m,s but found: [" + timeSpecifier + "]"); } return Duration.of(value, unit); }
@Test(expected = IllegalArgumentException.class) public void testParseToDurationWithUnrecognizedTimeUnitThrowsAnError() { AbstractPipelineExt.parseToDuration("3y"); }
public boolean eval(StructLike data) { return new EvalVisitor().eval(data); }
@Test public void testGreaterThanOrEqual() { Evaluator evaluator = new Evaluator(STRUCT, greaterThanOrEqual("x", 7)); assertThat(evaluator.eval(TestHelpers.Row.of(7, 8, null))).as("7 >= 7 => true").isTrue(); assertThat(evaluator.eval(TestHelpers.Row.of(6, 8, null))).as("6 >= 7 => false").isFalse(); assertThat(evaluator.eval(TestHelpers.Row.of(8, 8, null))).as("8 >= 7 => true").isTrue(); Evaluator structEvaluator = new Evaluator(STRUCT, greaterThanOrEqual("s1.s2.s3.s4.i", 7)); assertThat( structEvaluator.eval( TestHelpers.Row.of( 7, 8, null, TestHelpers.Row.of( TestHelpers.Row.of(TestHelpers.Row.of(TestHelpers.Row.of(7))))))) .as("7 >= 7 => true") .isTrue(); assertThat( structEvaluator.eval( TestHelpers.Row.of( 7, 8, null, TestHelpers.Row.of( TestHelpers.Row.of(TestHelpers.Row.of(TestHelpers.Row.of(6))))))) .as("6 >= 7 => false") .isFalse(); assertThat( structEvaluator.eval( TestHelpers.Row.of( 7, 8, null, TestHelpers.Row.of( TestHelpers.Row.of(TestHelpers.Row.of(TestHelpers.Row.of(8))))))) .as("8 >= 7 => true") .isTrue(); }
@Override public Connection getConnection(final String databaseName, final ContextManager contextManager) { return new CircuitBreakerConnection(); }
@Test void assertGetConnection() { Connection actual = new CircuitBreakDriverState().getConnection(DefaultDatabase.LOGIC_NAME, mock(ContextManager.class, RETURNS_DEEP_STUBS)); assertThat(actual, instanceOf(CircuitBreakerConnection.class)); }
@Override public String resolveContentType(String resourceName) { if (resourceName != null && !resourceName.isEmpty()) { String lowerResourceName = resourceName.toLowerCase(); String fileExtension = StringUtils.substringAfterLast(lowerResourceName, '.'); return fileExtensionToContentType.getOrDefault(fileExtension, unknownFileContentType); } return null; }
@Test void missingResourceName() { ContentTypeResolver contentTypeResolver = new DefaultContentTypeResolver(); assertThat(contentTypeResolver.resolveContentType(null)).isNull(); assertThat(contentTypeResolver.resolveContentType("")).isNull(); }
public static InetSocketAddress createUnresolved(String hostname, int port) { return createInetSocketAddress(hostname, port, false); }
@Test void shouldAlwaysCreateResolvedNumberIPAddress() { InetSocketAddress socketAddress = AddressUtils.createUnresolved("127.0.0.1", 8080); assertThat(socketAddress.isUnresolved()).isFalse(); assertThat(socketAddress.getAddress().getHostAddress()).isEqualTo("127.0.0.1"); assertThat(socketAddress.getPort()).isEqualTo(8080); assertThat(socketAddress.getHostString()).isEqualTo("127.0.0.1"); }
private static void network(XmlGenerator gen, ClientNetworkConfig network) { gen.open("network") .node("cluster-routing", null, "mode", network.getClusterRoutingConfig().getRoutingMode().name()) .node("redo-operation", network.isRedoOperation()) .node("connection-timeout", network.getConnectionTimeout()); clusterMembers(gen, network.getAddresses()); socketOptions(gen, network.getSocketOptions()); socketInterceptor(gen, network.getSocketInterceptorConfig()); ssl(gen, network.getSSLConfig()); cloud(gen, network.getCloudConfig()); aliasedDiscoveryConfigsGenerator(gen, aliasedDiscoveryConfigsFrom(network)); autoDetection(gen, network.getAutoDetectionConfig()); discovery(gen, network.getDiscoveryConfig()); outboundPort(gen, network.getOutboundPortDefinitions()); icmp(gen, network.getClientIcmpPingConfig()); gen.close(); }
@Test public void network() { ClientNetworkConfig expected = new ClientNetworkConfig(); expected.setRedoOperation(true) .setConnectionTimeout(randomInt()) .addAddress(randomString()) .setOutboundPortDefinitions(Collections.singleton(randomString())) .getClusterRoutingConfig().setRoutingMode(RoutingMode.MULTI_MEMBER); clientConfig.setNetworkConfig(expected); ClientNetworkConfig actual = newConfigViaGenerator().getNetworkConfig(); assertEquals(RoutingMode.MULTI_MEMBER, actual.getClusterRoutingConfig().getRoutingMode()); assertTrue(actual.isRedoOperation()); assertEquals(expected.getConnectionTimeout(), actual.getConnectionTimeout()); assertCollection(expected.getAddresses(), actual.getAddresses()); assertCollection(expected.getOutboundPortDefinitions(), actual.getOutboundPortDefinitions()); }
public ConvertedTime getConvertedTime(long duration) { Set<Seconds> keys = RULES.keySet(); for (Seconds seconds : keys) { if (duration <= seconds.getSeconds()) { return RULES.get(seconds).getConvertedTime(duration); } } return new TimeConverter.OverTwoYears().getConvertedTime(duration); }
@Test public void testShouldReportAbout2HoursHourFor89Minutes30Seconds() throws Exception { assertEquals(TimeConverter.ABOUT_X_HOURS_AGO.argument(2), timeConverter .getConvertedTime(1 * TimeConverter.HOUR_IN_SECONDS + 29 * 60 + 30)); }
public ProcessingExceptionHandler processingExceptionHandler() { return getConfiguredInstance(PROCESSING_EXCEPTION_HANDLER_CLASS_CONFIG, ProcessingExceptionHandler.class); }
@Test public void shouldOverrideDefaultProcessingExceptionHandler() { props.put(StreamsConfig.PROCESSING_EXCEPTION_HANDLER_CLASS_CONFIG, "org.apache.kafka.streams.errors.LogAndContinueProcessingExceptionHandler"); final StreamsConfig streamsConfig = new StreamsConfig(props); assertEquals("org.apache.kafka.streams.errors.LogAndContinueProcessingExceptionHandler", streamsConfig.processingExceptionHandler().getClass().getName()); }
@Override public void onEvent(Event e) { LOGGER.info("Received event from the King's Hand: {}", e.toString()); }
@Test void testOnEvent() { final var kingJoffrey = new KingJoffrey(); IntStream.range(0, Event.values().length).forEach(i -> { assertEquals(i, appender.getLogSize()); var event = Event.values()[i]; kingJoffrey.onEvent(event); final var expectedMessage = "Received event from the King's Hand: " + event; assertEquals(expectedMessage, appender.getLastMessage()); assertEquals(i + 1, appender.getLogSize()); }); }
public static void validateFineGrainedAuth(Method endpointMethod, UriInfo uriInfo, HttpHeaders httpHeaders, FineGrainedAccessControl accessControl) { if (endpointMethod.isAnnotationPresent(Authorize.class)) { final Authorize auth = endpointMethod.getAnnotation(Authorize.class); String targetId = null; // Message to use in the access denied exception String accessDeniedMsg; if (auth.targetType() == TargetType.TABLE) { // paramName is mandatory for table level authorization if (StringUtils.isEmpty(auth.paramName())) { throw new WebApplicationException( "paramName not found for table level authorization in API: " + uriInfo.getRequestUri(), Response.Status.INTERNAL_SERVER_ERROR); } // find the paramName in the path or query params targetId = findParam(auth.paramName(), uriInfo.getPathParameters(), uriInfo.getQueryParameters()); if (StringUtils.isEmpty(targetId)) { throw new WebApplicationException( "Could not find paramName " + auth.paramName() + " in path or query params of the API: " + uriInfo.getRequestUri(), Response.Status.INTERNAL_SERVER_ERROR); } // Table name may contain type, hence get raw table name for checking access targetId = DatabaseUtils.translateTableName(TableNameBuilder.extractRawTableName(targetId), httpHeaders); accessDeniedMsg = "Access denied to " + auth.action() + " for table: " + targetId; } else if (auth.targetType() == TargetType.CLUSTER) { accessDeniedMsg = "Access denied to " + auth.action() + " in the cluster"; } else { throw new WebApplicationException( "Unsupported targetType: " + auth.targetType() + " in API: " + uriInfo.getRequestUri(), Response.Status.INTERNAL_SERVER_ERROR); } boolean hasAccess; try { hasAccess = accessControl.hasAccess(httpHeaders, auth.targetType(), targetId, auth.action()); } catch (Throwable t) { // catch and log Throwable for NoSuchMethodError which can happen when there are classpath conflicts // otherwise, grizzly will return a 500 without any logs or indication of what failed String errorMsg = String.format("Failed to check for access for target type %s and target ID %s with action %s", auth.targetType(), targetId, auth.action()); LOGGER.error(errorMsg, t); throw new WebApplicationException(errorMsg, t, Response.Status.INTERNAL_SERVER_ERROR); } // Check for access now if (!hasAccess) { throw new WebApplicationException(accessDeniedMsg, Response.Status.FORBIDDEN); } } else if (!accessControl.defaultAccess(httpHeaders)) { throw new WebApplicationException("Access denied - default authorization failed", Response.Status.FORBIDDEN); } }
@Test public void testValidateFineGrainedAuthDenied() { FineGrainedAccessControl ac = Mockito.mock(FineGrainedAccessControl.class); Mockito.when(ac.hasAccess(Mockito.any(HttpHeaders.class), Mockito.any(), Mockito.any(), Mockito.any())) .thenReturn(false); UriInfo mockUriInfo = Mockito.mock(UriInfo.class); HttpHeaders mockHttpHeaders = Mockito.mock(HttpHeaders.class); try { FineGrainedAuthUtils.validateFineGrainedAuth(getAnnotatedMethod(), mockUriInfo, mockHttpHeaders, ac); Assert.fail("Expected WebApplicationException"); } catch (WebApplicationException e) { Assert.assertTrue(e.getMessage().contains("Access denied to getCluster in the cluster")); Assert.assertEquals(e.getResponse().getStatus(), Response.Status.FORBIDDEN.getStatusCode()); } }
RegistryEndpointProvider<Void> committer(URL location) { return new Committer(location); }
@Test public void testCommitter_getApiRoute() throws MalformedURLException { Assert.assertEquals( new URL("https://someurl?somequery=somevalue&digest=" + fakeDescriptorDigest), testBlobPusher.committer(new URL("https://someurl?somequery=somevalue")).getApiRoute("")); }
public static List<AwsEndpoint>[] splitByZone(List<AwsEndpoint> eurekaEndpoints, String myZone) { if (eurekaEndpoints.isEmpty()) { return new List[]{Collections.emptyList(), Collections.emptyList()}; } if (myZone == null) { return new List[]{Collections.emptyList(), new ArrayList<>(eurekaEndpoints)}; } List<AwsEndpoint> myZoneList = new ArrayList<>(eurekaEndpoints.size()); List<AwsEndpoint> remainingZonesList = new ArrayList<>(eurekaEndpoints.size()); for (AwsEndpoint endpoint : eurekaEndpoints) { if (myZone.equalsIgnoreCase(endpoint.getZone())) { myZoneList.add(endpoint); } else { remainingZonesList.add(endpoint); } } return new List[]{myZoneList, remainingZonesList}; }
@Test public void testSplitByZone() throws Exception { List<AwsEndpoint> endpoints = SampleCluster.merge(SampleCluster.UsEast1a, SampleCluster.UsEast1b, SampleCluster.UsEast1c); List<AwsEndpoint>[] parts = ResolverUtils.splitByZone(endpoints, "us-east-1b"); List<AwsEndpoint> myZoneServers = parts[0]; List<AwsEndpoint> remainingServers = parts[1]; assertThat(myZoneServers, is(equalTo(SampleCluster.UsEast1b.build()))); assertThat(remainingServers, is(equalTo(SampleCluster.merge(SampleCluster.UsEast1a, SampleCluster.UsEast1c)))); }
public Statement buildStatement(final ParserRuleContext parseTree) { return build(Optional.of(getSources(parseTree)), parseTree); }
@Test public void shouldHandleQualifiedSelect() { // Given: final SingleStatementContext stmt = givenQuery("SELECT TEST1.COL0 FROM TEST1;"); // When: final Query result = (Query) builder.buildStatement(stmt); // Then: assertThat(result.getSelect(), is(new Select(ImmutableList.of( new SingleColumn( column(TEST1_NAME, "COL0"), Optional.empty()) )))); }
public static void init(final Map<String, PluginConfiguration> pluginConfigs, final Collection<JarFile> pluginJars, final ClassLoader pluginClassLoader, final boolean isEnhancedForProxy) { if (STARTED_FLAG.compareAndSet(false, true)) { start(pluginConfigs, pluginClassLoader, isEnhancedForProxy); Runtime.getRuntime().addShutdownHook(new Thread(() -> close(pluginJars))); } }
@Test void assertInitPluginLifecycleServiceWithMockHandler() throws MalformedURLException { URLStreamHandlerFactory urlStreamHandlerFactory = mock(URLStreamHandlerFactory.class); PluginLifecycleServiceManager.init(Collections.emptyMap(), Collections.emptyList(), new PrivateMLet(new URL[]{Paths.get(System.getProperty("java.io.tmpdir"), "test.txt").toUri().toURL()}, new MultipleParentClassLoader(Collections.emptyList()), urlStreamHandlerFactory, true), true); verify(urlStreamHandlerFactory).createURLStreamHandler(anyString()); }
public static Builder builder() { return new Builder(); }
@Test public void testBackOffWithMaxAttempts() { final BackOff backOff = BackOff.builder().maxAttempts(5L).build(); final BackOffTimerTask context = new BackOffTimerTask(backOff, null, t -> true); long delay; for (int i = 1; i <= 5; i++) { delay = context.next(); assertEquals(i, context.getCurrentAttempts()); assertEquals(BackOff.DEFAULT_DELAY.toMillis(), delay); assertEquals(BackOff.DEFAULT_DELAY.toMillis(), context.getCurrentDelay()); assertEquals(BackOff.DEFAULT_DELAY.toMillis() * i, context.getCurrentElapsedTime()); } delay = context.next(); assertEquals(6, context.getCurrentAttempts()); assertEquals(BackOff.NEVER, delay); }
@Nullable public static <T extends Annotation> T extract(Class<?> targetClass, Class<T> annotationClass) { T annotation = null; if (targetClass.isAnnotationPresent(annotationClass)) { annotation = targetClass.getAnnotation(annotationClass); if (annotation == null && logger.isDebugEnabled()) { logger.debug("TargetClass has no annotation '{}'", annotationClass.getSimpleName()); annotation = targetClass.getDeclaredAnnotation(annotationClass); if (annotation == null && logger.isDebugEnabled()) { logger.debug("TargetClass has no declared annotation '{}'", annotationClass.getSimpleName()); } } } return annotation; }
@Test public void testExtract2() { CircuitBreaker circuitBreaker = AnnotationExtractor .extract(NotAnnotatedClass.class, CircuitBreaker.class); assertThat(circuitBreaker).isNull(); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { final CloudBlob blob = session.getClient().getContainerReference(containerService.getContainer(file).getName()) .getBlobReferenceFromServer(containerService.getKey(file)); if(0L == blob.getProperties().getLength()) { return new NullInputStream(0L); } final BlobRequestOptions options = new BlobRequestOptions(); options.setConcurrentRequestCount(1); final BlobInputStream in = blob.openInputStream(AccessCondition.generateEmptyCondition(), options, context); if(status.isAppend()) { try { return StreamCopier.skip(in, status.getOffset()); } catch(IndexOutOfBoundsException e) { // If offset is invalid throw new DefaultExceptionMappingService().map("Download {0} failed", e, file); } } return new ProxyInputStream(in) { @Override protected void handleIOException(final IOException e) throws IOException { if(StringUtils.equals(SR.STREAM_CLOSED, e.getMessage())) { log.warn(String.format("Ignore failure %s", e)); return; } final Throwable cause = ExceptionUtils.getRootCause(e); if(cause instanceof StorageException) { throw new IOException(e.getMessage(), new AzureExceptionMappingService().map((StorageException) cause)); } throw e; } }; } catch(StorageException e) { throw new AzureExceptionMappingService().map("Download {0} failed", e, file); } catch(URISyntaxException e) { throw new NotfoundException(e.getMessage(), e); } }
@Test public void testReadZeroLength() throws Exception { final Path container = new Path("cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path test = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new AzureTouchFeature(session, null).touch(test, new TransferStatus()); final InputStream in = new AzureReadFeature(session, null).read(test, new TransferStatus().withLength(0L), new DisabledConnectionCallback()); assertNotNull(in); in.close(); new AzureDeleteFeature(session, null).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public RouteContext route(final ShardingRule shardingRule) { Collection<String> bindingTableNames = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); Collection<RouteContext> routeContexts = new LinkedList<>(); for (String each : logicTables) { Optional<ShardingTable> shardingTable = shardingRule.findShardingTable(each); if (shardingTable.isPresent()) { if (!bindingTableNames.contains(each)) { routeContexts.add(new ShardingStandardRoutingEngine(shardingTable.get().getLogicTable(), shardingConditions, sqlStatementContext, hintValueContext, props).route(shardingRule)); } shardingRule.findBindingTableRule(each).ifPresent(optional -> bindingTableNames.addAll(optional.getShardingTables().keySet())); } } if (routeContexts.isEmpty()) { throw new ShardingTableRuleNotFoundException(logicTables); } RouteContext result = new RouteContext(); if (1 == routeContexts.size()) { RouteContext newRouteContext = routeContexts.iterator().next(); result.getOriginalDataNodes().addAll(newRouteContext.getOriginalDataNodes()); result.getRouteUnits().addAll(newRouteContext.getRouteUnits()); } else { RouteContext routeContext = new ShardingCartesianRoutingEngine(routeContexts).route(shardingRule); result.getOriginalDataNodes().addAll(routeContext.getOriginalDataNodes()); result.getRouteUnits().addAll(routeContext.getRouteUnits()); } return result; }
@Test void assertRoutingForShardingTableJoinWithUpperCase() { ShardingComplexRoutingEngine complexRoutingEngine = new ShardingComplexRoutingEngine(ShardingRoutingEngineFixtureBuilder.createShardingConditions("T_ORDER"), mock(SQLStatementContext.class), new HintValueContext(), new ConfigurationProperties(new Properties()), Arrays.asList("T_ORDER", "T_CONFIG")); RouteContext routeContext = complexRoutingEngine.route(ShardingRoutingEngineFixtureBuilder.createBroadcastShardingRule()); List<RouteUnit> routeUnits = new ArrayList<>(routeContext.getRouteUnits()); assertThat(routeContext.getRouteUnits().size(), is(1)); assertThat(routeUnits.get(0).getDataSourceMapper().getActualName(), is("ds_1")); assertThat(routeUnits.get(0).getTableMappers().size(), is(1)); assertThat(routeUnits.get(0).getTableMappers().iterator().next().getActualName(), is("t_order_1")); assertThat(routeUnits.get(0).getTableMappers().iterator().next().getLogicName(), is("t_order")); }
@Override public Flux<BooleanResponse<RenameCommand>> rename(Publisher<RenameCommand> commands) { return execute(commands, command -> { Assert.notNull(command.getKey(), "Key must not be null!"); Assert.notNull(command.getNewKey(), "New name must not be null!"); byte[] keyBuf = toByteArray(command.getKey()); byte[] newKeyBuf = toByteArray(command.getNewKey()); if (executorService.getConnectionManager().calcSlot(keyBuf) == executorService.getConnectionManager().calcSlot(newKeyBuf)) { return super.rename(commands); } return read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.DUMP, keyBuf) .filter(Objects::nonNull) .zipWith( Mono.defer(() -> pTtl(command.getKey()) .filter(Objects::nonNull) .map(ttl -> Math.max(0, ttl)) .switchIfEmpty(Mono.just(0L)) ) ) .flatMap(valueAndTtl -> { return write(newKeyBuf, StringCodec.INSTANCE, RedisCommands.RESTORE, newKeyBuf, valueAndTtl.getT2(), valueAndTtl.getT1()); }) .thenReturn(new BooleanResponse<>(command, true)) .doOnSuccess((ignored) -> del(command.getKey())); }); }
@Test public void testRename_keyNotExist() { testInClusterReactive(connection -> { Integer originalSlot = getSlotForKey(originalKey, (RedissonReactiveRedisClusterConnection) connection); newKey = getNewKeyForSlot(new String(originalKey.array()), getTargetSlot(originalSlot), connection); if (sameSlot) { // This is a quirk of the implementation - since same-slot renames use the non-cluster version, // the result is a Redis error. This behavior matches other spring-data-redis implementations assertThatThrownBy(() -> connection.keyCommands().rename(originalKey, newKey).block()) .isInstanceOf(RedisSystemException.class); } else { Boolean response = connection.keyCommands().rename(originalKey, newKey).block(); assertThat(response).isTrue(); final ByteBuffer newKeyValue = connection.stringCommands().get(newKey).block(); assertThat(newKeyValue).isEqualTo(null); } }); }
@Override public void truncate(final Long length) { this.length = length; if(temporary.exists()) { try { final RandomAccessFile file = random(); if(length < file.length()) { // Truncate current file.setLength(length); } } catch(IOException e) { log.warn(String.format("Failure truncating file %s to %d", temporary, length)); } } }
@Test public void testTruncate() throws Exception { final FileBuffer buffer = new FileBuffer(); assertEquals(0L, buffer.length(), 0L); final byte[] chunk = RandomUtils.nextBytes(100); buffer.write(chunk, 0L); assertEquals(100L, buffer.length(), 0L); buffer.truncate(1L); assertEquals(1L, buffer.length(), 0L); { final byte[] read = new byte[1]; assertEquals(1, buffer.read(read, 0L)); assertEquals(chunk[0], read[0]); } { final byte[] read = new byte[2]; assertEquals(1, buffer.read(read, 0L)); assertEquals(chunk[0], read[0]); } assertEquals(1L, buffer.length(), 0L); buffer.truncate(102L); assertEquals(102L, buffer.length(), 0L); { final byte[] read = new byte[2]; assertEquals(2, buffer.read(read, 0L)); assertEquals(chunk[0], read[0]); assertEquals(0, read[1]); } buffer.write(chunk, 0L); { final byte[] read = new byte[100]; assertEquals(100, buffer.read(read, 0L)); assertArrayEquals(chunk, read); } { final byte[] read = new byte[2]; assertEquals(2, buffer.read(read, 100L)); assertEquals(0, read[0]); assertEquals(0, read[1]); } { final byte[] read = new byte[3]; assertEquals(3, buffer.read(read, 99L)); assertEquals(chunk[99], read[0]); assertEquals(0, read[1]); assertEquals(0, read[2]); } assertEquals(IOUtils.EOF, buffer.read(new byte[1], 102L)); }
public String getFragmentByLines(int startLine, int endLine) { Preconditions.checkArgument(startLine <= endLine); return Joiner.on("\n").join(getLines(startLine, endLine)) + "\n"; }
@Test public void getFragmentByLines() { assertThat(sourceFile.getFragmentByLines(2, 2)) .isEqualTo("// eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut\n"); assertThat(sourceFile.getFragmentByLines(8, 8)).isEqualTo("// est laborum.\n"); assertThat(sourceFile.getFragmentByLines(2, 3)) .isEqualTo( "// eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut\n" + "// enim ad minim veniam, quis nostrud exercitation ullamco\n"); assertThat(sourceFile.getFragmentByLines(1, 8)).isEqualTo(SOURCE_TEXT); }
public FindTemplateResponse findByIdAndMember(MemberDto memberDto, Long id) { Member member = memberRepository.fetchById(memberDto.id()); Template template = templateRepository.fetchById(id); validateTemplateAuthorizeMember(template, member); List<SourceCode> sourceCodes = sourceCodeRepository.findAllByTemplate(template); List<Tag> tags = templateTagRepository.findAllByTemplate(template).stream() .map(TemplateTag::getTag) .toList(); return FindTemplateResponse.of(template, sourceCodes, tags); }
@Test @DisplayName("템플릿 단건 조회 실패: 권한 없음") void findOneTemplateFailWithUnauthorized() { // given MemberDto memberDto = MemberDtoFixture.getFirstMemberDto(); Member member = memberRepository.fetchById(memberDto.id()); CreateTemplateRequest createdTemplate = makeTemplateRequest("title"); Template template = saveTemplate(createdTemplate, new Category("category1", member), member); // when MemberDto otherMemberDto = MemberDtoFixture.getSecondMemberDto(); // then Long templateId = template.getId(); assertThatThrownBy(() -> templateService.findByIdAndMember(otherMemberDto, templateId)) .isInstanceOf(CodeZapException.class) .hasMessage("해당 템플릿에 대한 권한이 없습니다."); }
public static void verifyHybridTableConfigs(String rawTableName, TableConfig offlineTableConfig, TableConfig realtimeTableConfig) { Preconditions.checkNotNull(offlineTableConfig, "Found null offline table config in hybrid table check for table: %s", rawTableName); Preconditions.checkNotNull(realtimeTableConfig, "Found null realtime table config in hybrid table check for table: %s", rawTableName); LOGGER.info("Validating realtime and offline configs for the hybrid table: {}", rawTableName); SegmentsValidationAndRetentionConfig offlineSegmentConfig = offlineTableConfig.getValidationConfig(); SegmentsValidationAndRetentionConfig realtimeSegmentConfig = realtimeTableConfig.getValidationConfig(); String offlineTimeColumnName = offlineSegmentConfig.getTimeColumnName(); String realtimeTimeColumnName = realtimeSegmentConfig.getTimeColumnName(); if (offlineTimeColumnName == null || realtimeTimeColumnName == null) { throw new IllegalStateException(String.format( "'timeColumnName' cannot be null for table: %s! Offline time column name: %s. Realtime time column name: %s", rawTableName, offlineTimeColumnName, realtimeTimeColumnName)); } if (!offlineTimeColumnName.equals(realtimeTimeColumnName)) { throw new IllegalStateException(String.format( "Time column names are different for table: %s! Offline time column name: %s. Realtime time column name: %s", rawTableName, offlineTimeColumnName, realtimeTimeColumnName)); } TenantConfig offlineTenantConfig = offlineTableConfig.getTenantConfig(); TenantConfig realtimeTenantConfig = realtimeTableConfig.getTenantConfig(); String offlineBroker = offlineTenantConfig.getBroker() == null ? TagNameUtils.DEFAULT_TENANT_NAME : offlineTenantConfig.getBroker(); String realtimeBroker = realtimeTenantConfig.getBroker() == null ? TagNameUtils.DEFAULT_TENANT_NAME : realtimeTenantConfig.getBroker(); if (!offlineBroker.equals(realtimeBroker)) { throw new IllegalArgumentException(String.format( "Broker Tenants are different for table: %s! Offline broker tenant name: %s, Realtime broker tenant name: %s", rawTableName, offlineBroker, realtimeBroker)); } }
@Test public void testValidateHybridTableConfig() { TableConfig realtimeTableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).build(); TableConfig offlineTableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).build(); try { // Call validate hybrid table which realtime/offline tables are missing timeColumn. TableConfigUtils.verifyHybridTableConfigs(TABLE_NAME, offlineTableConfig, realtimeTableConfig); Assert.fail(); } catch (IllegalStateException ignored) { // Expected } realtimeTableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setTimeColumnName("secondsSinceEpoch") .build(); offlineTableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTimeColumnName("secondssinceepoch") .build(); try { // Call validate hybrid table which realtime table and offline table have different time columns. TableConfigUtils.verifyHybridTableConfigs(TABLE_NAME, offlineTableConfig, realtimeTableConfig); Assert.fail(); } catch (IllegalStateException ignored) { // Expected } realtimeTableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setTimeColumnName("secondsSinceEpoch") .setBrokerTenant("broker1").build(); offlineTableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTimeColumnName("secondsSinceEpoch") .setBrokerTenant("broker2").build(); try { // Call validate hybrid table which realtime and offline table have different brokers. TableConfigUtils.verifyHybridTableConfigs(TABLE_NAME, offlineTableConfig, realtimeTableConfig); Assert.fail(); } catch (IllegalArgumentException ignored) { // Expected } }
public static FunctionTypeInfo getFunctionTypeInfo( final ExpressionTypeManager expressionTypeManager, final FunctionCall functionCall, final UdfFactory udfFactory, final Map<String, SqlType> lambdaMapping ) { // CHECKSTYLE_RULES.ON: CyclomaticComplexity final List<Expression> arguments = functionCall.getArguments(); final List<SqlArgument> functionArgumentTypes = firstPassOverFunctionArguments( arguments, expressionTypeManager, lambdaMapping ); final KsqlScalarFunction function = udfFactory.getFunction(functionArgumentTypes); final SqlType returnSchema; final List<ArgumentInfo> argumentInfoForFunction = new ArrayList<>(); if (!functionCall.hasLambdaFunctionCallArguments()) { returnSchema = function.getReturnType(functionArgumentTypes); return FunctionTypeInfo.of( functionArgumentTypes.stream() .map(argument -> ArgumentInfo.of(argument, new HashMap<>(lambdaMapping))) .collect(Collectors.toList()), returnSchema, function ); } else { final List<ParamType> paramTypes = function.parameters(); final Map<GenericType, SqlType> reservedGenerics = new HashMap<>(); final List<SqlArgument> functionArgumentTypesWithResolvedLambdaType = new ArrayList<>(); // second pass over the function arguments to properly do lambda type checking for (int i = 0; i < arguments.size(); i++) { final Expression expression = arguments.get(i); final ParamType parameter = paramTypes.get(i); if (expression instanceof LambdaFunctionCall) { // the function returned from the UDF factory should have lambda // at this index in the function arguments if there's a // lambda node at this index in the function node argument list if (!(parameter instanceof LambdaType)) { throw new RuntimeException(String.format("Error while processing lambda function." + "Expected lambda parameter but was %s" + "This is most likely an internal error and a " + "Github issue should be filed for debugging. " + "Include the function name, the parameters passed in, the expected " + "signature, and any other relevant information.", parameter.toString())); } final ArrayList<SqlType> lambdaSqlTypes = new ArrayList<>(); final Map<String, SqlType> variableTypeMapping = mapLambdaParametersToTypes( (LambdaFunctionCall) expression, (LambdaType) parameter, reservedGenerics, lambdaSqlTypes ); final Map<String,SqlType> updateLambdaMapping = LambdaMappingUtil.resolveOldAndNewLambdaMapping(variableTypeMapping, lambdaMapping); final SqlType resolvedLambdaReturnType = expressionTypeManager.getExpressionSqlType(expression, updateLambdaMapping); final SqlArgument lambdaArgument = SqlArgument.of( SqlLambdaResolved.of(lambdaSqlTypes, resolvedLambdaReturnType)); functionArgumentTypesWithResolvedLambdaType.add(lambdaArgument); argumentInfoForFunction.add( ArgumentInfo.of( lambdaArgument, new HashMap<>(updateLambdaMapping))); } else { functionArgumentTypesWithResolvedLambdaType.add(functionArgumentTypes.get(i)); argumentInfoForFunction.add( ArgumentInfo.of( functionArgumentTypes.get(i), new HashMap<>(lambdaMapping))); } if (GenericsUtil.hasGenerics(parameter)) { final Pair<Boolean, Optional<KsqlException>> success = GenericsUtil.reserveGenerics( parameter, functionArgumentTypesWithResolvedLambdaType.get(i), reservedGenerics ); if (!success.getLeft() && success.getRight().isPresent()) { throw success.getRight().get(); } } } returnSchema = function.getReturnType(functionArgumentTypesWithResolvedLambdaType); return new FunctionTypeInfo( argumentInfoForFunction, returnSchema, function ); } }
@Test public void shouldResolveGenericsInLambdaFunction() { // Given: givenUdfWithNameAndReturnType("ComplexFunction", SqlTypes.DOUBLE); when(function.parameters()).thenReturn( ImmutableList.of( ArrayType.of(GenericType.of("X")), MapType.of(GenericType.of("K"), GenericType.of("V")), GenericType.of("Q"), LambdaType.of(ImmutableList.of(GenericType.of("K"), GenericType.of("V"), GenericType.of("Q")), GenericType.of("K")), LambdaType.of(ImmutableList.of(GenericType.of("Q"), GenericType.of("V")), GenericType.of("X")))); final FunctionCall expression = givenFunctionCallWithMultipleLambdas(); // When: final FunctionTypeInfo argumentsAndContexts = FunctionArgumentsUtil.getFunctionTypeInfo(expressionTypeManager, expression, udfFactory, Collections.emptyMap()); // Then: assertThat(argumentsAndContexts.getReturnType(), is(SqlTypes.DOUBLE)); assertThat(argumentsAndContexts.getArgumentInfos().size(), is(5)); assertThat(argumentsAndContexts.getArgumentInfos().get(3).getSqlArgument(), is(SqlArgument.of(SqlLambdaResolved.of(ImmutableList.of(SqlTypes.BIGINT, SqlTypes.DOUBLE, SqlTypes.STRING), SqlTypes.BIGINT)))); assertThat(argumentsAndContexts.getArgumentInfos().get(3).getLambdaSqlTypeMapping().get("X"), is(SqlTypes.BIGINT)); assertThat(argumentsAndContexts.getArgumentInfos().get(3).getLambdaSqlTypeMapping().get("Y"), is(SqlTypes.DOUBLE)); assertThat(argumentsAndContexts.getArgumentInfos().get(3).getLambdaSqlTypeMapping().get("Z"), is(SqlTypes.STRING)); assertThat(argumentsAndContexts.getArgumentInfos().get(4).getSqlArgument(), is(SqlArgument.of(SqlLambdaResolved.of(ImmutableList.of(SqlTypes.STRING, SqlTypes.DOUBLE), SqlTypes.DOUBLE)))); assertThat(argumentsAndContexts.getArgumentInfos().get(4).getLambdaSqlTypeMapping().get("A"), is(SqlTypes.STRING)); assertThat(argumentsAndContexts.getArgumentInfos().get(4).getLambdaSqlTypeMapping().get("B"), is(SqlTypes.DOUBLE)); // in the first pass we should have verify(udfFactory).getFunction( ImmutableList.of( SqlArgument.of(SqlTypes.array(SqlTypes.DOUBLE)), SqlArgument.of(SqlTypes.map(SqlTypes.BIGINT, SqlTypes.DOUBLE)), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlLambda.of(3)), SqlArgument.of(SqlLambda.of(2)) ) ); verify(function).getReturnType( ImmutableList.of( SqlArgument.of(SqlTypes.array(SqlTypes.DOUBLE)), SqlArgument.of(SqlTypes.map(SqlTypes.BIGINT, SqlTypes.DOUBLE)), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlLambdaResolved.of(ImmutableList.of(SqlTypes.BIGINT, SqlTypes.DOUBLE, SqlTypes.STRING), SqlTypes.BIGINT)), SqlArgument.of(SqlLambdaResolved.of(ImmutableList.of(SqlTypes.STRING, SqlTypes.DOUBLE), SqlTypes.DOUBLE)) ) ); }
public static String fastUUID() { return UUID.fastUUID().toString(); }
@Test public void fastUUIDTest() { String simpleUUID = IdUtil.fastSimpleUUID(); assertEquals(32, simpleUUID.length()); String randomUUID = IdUtil.fastUUID(); assertEquals(36, randomUUID.length()); }
public static <K, V> PTransform<PCollection<KV<K, TimestampedValue<V>>>, PCollection<KV<K, V>>> extractTimestampsFromValues() { return new ExtractTimestampsFromValues<>(); }
@Test @Category(NeedsRunner.class) public void extractFromValuesSucceeds() { PCollection<KV<String, TimestampedValue<Integer>>> preified = pipeline.apply( Create.of( KV.of("foo", TimestampedValue.of(0, new Instant(0))), KV.of("foo", TimestampedValue.of(1, new Instant(1))), KV.of("bar", TimestampedValue.of(2, new Instant(2))), KV.of("baz", TimestampedValue.of(3, new Instant(3))))); PCollection<KV<String, Integer>> timestamped = preified.apply(Reify.extractTimestampsFromValues()); PAssert.that(timestamped) .containsInAnyOrder(KV.of("foo", 0), KV.of("foo", 1), KV.of("bar", 2), KV.of("baz", 3)); timestamped.apply( "AssertElementTimestamps", ParDo.of( new DoFn<KV<String, Integer>, Void>() { @ProcessElement public void verifyTimestampsEqualValue(ProcessContext context) { assertThat( new Instant(context.element().getValue().longValue()), equalTo(context.timestamp())); } })); pipeline.run(); }
@Override public void preCommit(TransactionState txnState, List<TabletCommitInfo> finishedTablets, List<TabletFailInfo> failedTablets) throws TransactionException { Preconditions.checkState(txnState.getTransactionStatus() != TransactionStatus.COMMITTED); txnState.clearAutomaticPartitionSnapshot(); if (!finishedTablets.isEmpty()) { txnState.setTabletCommitInfos(finishedTablets); } if (table.getState() == OlapTable.OlapTableState.RESTORE) { throw new TransactionCommitFailedException("Cannot write RESTORE state table \"" + table.getName() + "\""); } dirtyPartitionSet = Sets.newHashSet(); invalidDictCacheColumns = Sets.newHashSet(); validDictCacheColumns = Maps.newHashMap(); Set<Long> finishedTabletsOfThisTable = Sets.newHashSet(); TabletInvertedIndex tabletInvertedIndex = dbTxnMgr.getGlobalStateMgr().getTabletInvertedIndex(); List<Long> tabletIds = finishedTablets.stream().map(TabletCommitInfo::getTabletId).collect(Collectors.toList()); List<TabletMeta> tabletMetaList = tabletInvertedIndex.getTabletMetaList(tabletIds); for (int i = 0; i < tabletMetaList.size(); i++) { TabletMeta tabletMeta = tabletMetaList.get(i); if (tabletMeta == TabletInvertedIndex.NOT_EXIST_TABLET_META) { continue; } if (tabletMeta.getTableId() != table.getId()) { continue; } if (table.getPhysicalPartition(tabletMeta.getPartitionId()) == null) { // this can happen when partitionId == -1 (tablet being dropping) or partition really not exist. continue; } dirtyPartitionSet.add(tabletMeta.getPartitionId()); // Invalid column set should union invalidDictCacheColumns.addAll(finishedTablets.get(i).getInvalidDictCacheColumns()); // Valid column set should intersect and remove all invalid columns // Only need to add valid column set once if (validDictCacheColumns.isEmpty() && !finishedTablets.get(i).getValidDictCacheColumns().isEmpty()) { TabletCommitInfo tabletCommitInfo = finishedTablets.get(i); List<Long> validDictCollectedVersions = tabletCommitInfo.getValidDictCollectedVersions(); List<ColumnId> validDictCacheColumns = tabletCommitInfo.getValidDictCacheColumns(); for (int j = 0; j < validDictCacheColumns.size(); j++) { long version = 0; // validDictCollectedVersions != validDictCacheColumns means be has not upgrade if (validDictCollectedVersions.size() == validDictCacheColumns.size()) { version = validDictCollectedVersions.get(j); } this.validDictCacheColumns.put(validDictCacheColumns.get(i), version); } } if (i == tabletMetaList.size() - 1) { validDictCacheColumns.entrySet().removeIf(entry -> invalidDictCacheColumns.contains(entry.getKey())); } finishedTabletsOfThisTable.add(finishedTablets.get(i).getTabletId()); } if (enableIngestSlowdown()) { long currentTimeMs = System.currentTimeMillis(); new CommitRateLimiter(compactionMgr, txnState, table.getId()).check(dirtyPartitionSet, currentTimeMs); } List<Long> unfinishedTablets = null; for (Long partitionId : dirtyPartitionSet) { PhysicalPartition partition = table.getPhysicalPartition(partitionId); List<MaterializedIndex> allIndices = txnState.getPartitionLoadedTblIndexes(table.getId(), partition); for (MaterializedIndex index : allIndices) { Optional<Tablet> unfinishedTablet = index.getTablets().stream().filter(t -> !finishedTabletsOfThisTable.contains(t.getId())) .findAny(); if (!unfinishedTablet.isPresent()) { continue; } if (unfinishedTablets == null) { unfinishedTablets = Lists.newArrayList(); } unfinishedTablets.add(unfinishedTablet.get().getId()); } } if (unfinishedTablets != null && !unfinishedTablets.isEmpty()) { throw new TransactionCommitFailedException( "table '" + table.getName() + "\" has unfinished tablets: " + unfinishedTablets); } }
@Test public void testCommitRateLimiterDisabled() throws TransactionException { new MockUp<LakeTableTxnStateListener>() { @Mock boolean enableIngestSlowdown() { return false; } }; LakeTable table = buildLakeTable(); DatabaseTransactionMgr databaseTransactionMgr = addDatabaseTransactionMgr(); LakeTableTxnStateListener listener = new LakeTableTxnStateListener(databaseTransactionMgr, table); makeCompactionScoreExceedSlowdownThreshold(); listener.preCommit(newTransactionState(), buildFullTabletCommitInfo(), Collections.emptyList()); }
public IssueQuery create(SearchRequest request) { try (DbSession dbSession = dbClient.openSession(false)) { final ZoneId timeZone = parseTimeZone(request.getTimeZone()).orElse(clock.getZone()); Collection<RuleDto> ruleDtos = ruleKeysToRuleId(dbSession, request.getRules()); Collection<String> ruleUuids = ruleDtos.stream().map(RuleDto::getUuid).collect(Collectors.toSet()); Collection<String> issueKeys = collectIssueKeys(dbSession, request); if (request.getRules() != null && request.getRules().stream().collect(Collectors.toSet()).size() != ruleDtos.size()) { ruleUuids.add("non-existing-uuid"); } IssueQuery.Builder builder = IssueQuery.builder() .issueKeys(issueKeys) .severities(request.getSeverities()) .cleanCodeAttributesCategories(request.getCleanCodeAttributesCategories()) .impactSoftwareQualities(request.getImpactSoftwareQualities()) .impactSeverities(request.getImpactSeverities()) .statuses(request.getStatuses()) .resolutions(request.getResolutions()) .issueStatuses(request.getIssueStatuses()) .resolved(request.getResolved()) .prioritizedRule(request.getPrioritizedRule()) .rules(ruleDtos) .ruleUuids(ruleUuids) .assigneeUuids(request.getAssigneeUuids()) .authors(request.getAuthors()) .scopes(request.getScopes()) .languages(request.getLanguages()) .tags(request.getTags()) .types(request.getTypes()) .pciDss32(request.getPciDss32()) .pciDss40(request.getPciDss40()) .owaspAsvs40(request.getOwaspAsvs40()) .owaspAsvsLevel(request.getOwaspAsvsLevel()) .owaspTop10(request.getOwaspTop10()) .owaspTop10For2021(request.getOwaspTop10For2021()) .stigAsdR5V3(request.getStigAsdV5R3()) .casa(request.getCasa()) .sansTop25(request.getSansTop25()) .cwe(request.getCwe()) .sonarsourceSecurity(request.getSonarsourceSecurity()) .assigned(request.getAssigned()) .createdAt(parseStartingDateOrDateTime(request.getCreatedAt(), timeZone)) .createdBefore(parseEndingDateOrDateTime(request.getCreatedBefore(), timeZone)) .facetMode(request.getFacetMode()) .timeZone(timeZone) .codeVariants(request.getCodeVariants()); List<ComponentDto> allComponents = new ArrayList<>(); boolean effectiveOnComponentOnly = mergeDeprecatedComponentParameters(dbSession, request, allComponents); addComponentParameters(builder, dbSession, effectiveOnComponentOnly, allComponents, request); setCreatedAfterFromRequest(dbSession, builder, request, allComponents, timeZone); String sort = request.getSort(); if (!isNullOrEmpty(sort)) { builder.sort(sort); builder.asc(request.getAsc()); } return builder.build(); } }
@Test public void param_componentUuids_enables_search_by_test_file() { ComponentDto project = db.components().insertPrivateProject().getMainBranchComponent(); ComponentDto file = db.components().insertComponent(newFileDto(project).setQualifier(Qualifiers.UNIT_TEST_FILE)); SearchRequest request = new SearchRequest() .setComponentUuids(asList(file.uuid())); IssueQuery query = underTest.create(request); assertThat(query.componentUuids()).containsExactly(file.uuid()); }
public boolean isSuccess() { return applicationStatus == ApplicationStatus.SUCCEEDED || (applicationStatus == ApplicationStatus.UNKNOWN && serializedThrowable == null); }
@Test void testIsNotSuccess() { final JobResult jobResult = new JobResult.Builder() .jobId(new JobID()) .serializedThrowable(new SerializedThrowable(new RuntimeException())) .netRuntime(Long.MAX_VALUE) .build(); assertThat(jobResult.isSuccess()).isFalse(); }
private RemotingCommand updateUser(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { RemotingCommand response = RemotingCommand.createResponseCommand(null); UpdateUserRequestHeader requestHeader = request.decodeCommandCustomHeader(UpdateUserRequestHeader.class); if (StringUtils.isEmpty(requestHeader.getUsername())) { response.setCode(ResponseCode.SYSTEM_ERROR); response.setRemark("The username is blank"); return response; } UserInfo userInfo = RemotingSerializable.decode(request.getBody(), UserInfo.class); userInfo.setUsername(requestHeader.getUsername()); User user = UserConverter.convertUser(userInfo); if (user.getUserType() == UserType.SUPER && isNotSuperUserLogin(request)) { response.setCode(ResponseCode.SYSTEM_ERROR); response.setRemark("The super user can only be update by super user"); return response; } this.brokerController.getAuthenticationMetadataManager().getUser(requestHeader.getUsername()) .thenCompose(old -> { if (old == null) { throw new AuthenticationException("The user is not exist"); } if (old.getUserType() == UserType.SUPER && isNotSuperUserLogin(request)) { throw new AuthenticationException("The super user can only be update by super user"); } return this.brokerController.getAuthenticationMetadataManager().updateUser(old); }).thenAccept(nil -> response.setCode(ResponseCode.SUCCESS)) .exceptionally(ex -> { LOGGER.error("update user {} error", requestHeader.getUsername(), ex); return handleAuthException(response, ex); }) .join(); return response; }
@Test public void testUpdateUser() throws RemotingCommandException { when(authenticationMetadataManager.updateUser(any(User.class))) .thenReturn(CompletableFuture.completedFuture(null)); when(authenticationMetadataManager.getUser(eq("abc"))).thenReturn(CompletableFuture.completedFuture(User.of("abc", "123", UserType.NORMAL))); when(authenticationMetadataManager.getUser(eq("super"))).thenReturn(CompletableFuture.completedFuture(User.of("super", "123", UserType.SUPER))); UpdateUserRequestHeader updateUserRequestHeader = new UpdateUserRequestHeader(); updateUserRequestHeader.setUsername("abc"); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.AUTH_UPDATE_USER, updateUserRequestHeader); request.setVersion(441); request.addExtField("AccessKey", "rocketmq"); request.makeCustomHeaderToNet(); UserInfo userInfo = UserInfo.of("abc", "123", UserType.NORMAL.getName()); request.setBody(JSON.toJSONBytes(userInfo)); RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); when(authenticationMetadataManager.isSuperUser(eq("rocketmq"))).thenReturn(CompletableFuture.completedFuture(true)); updateUserRequestHeader = new UpdateUserRequestHeader(); updateUserRequestHeader.setUsername("super"); request = RemotingCommand.createRequestCommand(RequestCode.AUTH_UPDATE_USER, updateUserRequestHeader); request.setVersion(441); request.addExtField("AccessKey", "rocketmq"); request.makeCustomHeaderToNet(); userInfo = UserInfo.of("super", "123", UserType.SUPER.getName()); request.setBody(JSON.toJSONBytes(userInfo)); response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); when(authenticationMetadataManager.isSuperUser(eq("rocketmq"))).thenReturn(CompletableFuture.completedFuture(false)); response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SYSTEM_ERROR); }
@SqlInvokedScalarFunction(value = "array_duplicates", alias = {"array_dupes"}, deterministic = true, calledOnNullInput = false) @Description("Returns set of elements that have duplicates") @SqlParameter(name = "input", type = "array(T)") @TypeParameter("T") @SqlType("array(T)") public static String arrayDuplicates() { return "RETURN CONCAT(" + "IF (cardinality(filter(input, x -> x is NULL)) > 1, array[element_at(input, find_first_index(input, x -> x IS NULL))], array[])," + "map_keys(map_filter(array_frequency(input), (k, v) -> v > 1)))"; }
@Test public void testArrayDuplicates() { assertFunction("array_duplicates(cast(null as array(varchar)))", new ArrayType(VARCHAR), null); assertFunction("array_duplicates(cast(array[] as array(varchar)))", new ArrayType(VARCHAR), ImmutableList.of()); assertFunction("array_duplicates(array[varchar 'a', varchar 'b', varchar 'a'])", new ArrayType(VARCHAR), ImmutableList.of("a")); assertFunction("array_duplicates(array[varchar 'a', varchar 'b'])", new ArrayType(VARCHAR), ImmutableList.of()); assertFunction("array_duplicates(array[varchar 'a', varchar 'a'])", new ArrayType(VARCHAR), ImmutableList.of("a")); assertFunction("array_duplicates(array[1, 2, 1])", new ArrayType(INTEGER), ImmutableList.of(1)); assertFunction("array_duplicates(array[1, 2])", new ArrayType(INTEGER), ImmutableList.of()); assertFunction("array_duplicates(array[1, 1, 1])", new ArrayType(INTEGER), ImmutableList.of(1)); assertFunction("array_duplicates(array[0, null])", new ArrayType(INTEGER), ImmutableList.of()); assertFunction("array_duplicates(array[0, null, null])", new ArrayType(INTEGER), singletonList(null)); // Test legacy name. assertFunction("array_dupes(array[1, 2, 1])", new ArrayType(INTEGER), ImmutableList.of(1)); RowType rowType = RowType.from(ImmutableList.of(RowType.field(INTEGER), RowType.field(INTEGER))); assertFunction("array_duplicates(array[array[1], array[2], array[]])", new ArrayType(new ArrayType(INTEGER)), ImmutableList.of()); assertFunction("array_duplicates(array[array[1], array[2], array[2]])", new ArrayType(new ArrayType(INTEGER)), ImmutableList.of(ImmutableList.of(2))); assertFunction("array_duplicates(array[(1, 2), (1, 2)])", new ArrayType(rowType), ImmutableList.of(ImmutableList.of(1, 2))); assertFunction("array_duplicates(array[(1, 2), (2, 2)])", new ArrayType(rowType), ImmutableList.of()); assertInvalidFunction("array_duplicates(array[(1, null), (null, 2), (null, 1)])", StandardErrorCode.NOT_SUPPORTED, "ROW comparison not supported for fields with null elements"); assertInvalidFunction("array_duplicates(array[(1, null), (null, 2), (null, null)])", StandardErrorCode.NOT_SUPPORTED, "map key cannot be null or contain nulls"); }
public boolean release(final ResourceProfile reservation) { checkResourceProfileNotNullOrUnknown(reservation); ResourceProfile newAvailableBudget = availableBudget.merge(reservation); if (!totalBudget.allFieldsNoLessThan(newAvailableBudget)) { return false; } availableBudget = newAvailableBudget; LOG.debug("Resource budget increased to {}.", availableBudget); return true; }
@Test void testRelease() { ResourceBudgetManager budgetManager = new ResourceBudgetManager(createResourceProfile(1.0, 100)); assertThat(budgetManager.reserve(createResourceProfile(0.7, 70))).isEqualTo(true); assertThat(budgetManager.release(createResourceProfile(0.5, 50))).isEqualTo(true); assertThat(budgetManager.getAvailableBudget()).isEqualTo(createResourceProfile(0.8, 80)); }
public static Builder newBuilder() { return new Builder(); }
@Test public void largeLimitIncrease() { VegasLimit limit = VegasLimit.newBuilder() .initialLimit(10000) .maxConcurrency(20000) .build(); limit.onSample(0, TimeUnit.SECONDS.toNanos(10), 5000, false); Assert.assertEquals(10000, limit.getLimit()); limit.onSample(0, TimeUnit.SECONDS.toNanos(10), 6000, false); Assert.assertEquals(10024, limit.getLimit()); }
public static void mergeMap(boolean decrypt, Map<String, Object> config) { merge(decrypt, config); }
@Test(expected = ConfigException.class) public void testMap_key_mergeWhenFieldNotInValues_throwsException() { Map<String, Object> testMap = new HashMap<>(); testMap.put("${TEST.somethingNotInValues}", "value"); CentralizedManagement.mergeMap(true, testMap); }
@Operation(summary = "update", description = "Update a cluster") @PutMapping("/{id}") public ResponseEntity<ClusterVO> update(@PathVariable Long id, @RequestBody @Validated ClusterReq clusterReq) { ClusterDTO clusterDTO = ClusterConverter.INSTANCE.fromReq2DTO(clusterReq); return ResponseEntity.success(clusterService.update(id, clusterDTO)); }
@Test void updateModifiesCluster() { Long id = 1L; ClusterReq clusterReq = new ClusterReq(); ClusterVO updatedCluster = new ClusterVO(); when(clusterService.update(eq(id), any())).thenReturn(updatedCluster); ResponseEntity<ClusterVO> response = clusterController.update(id, clusterReq); assertTrue(response.isSuccess()); assertEquals(updatedCluster, response.getData()); }
public static Number parseNumber(String numberStr) throws NumberFormatException { if (StrUtil.startWithIgnoreCase(numberStr, "0x")) { // 0x04表示16进制数 return Long.parseLong(numberStr.substring(2), 16); } else if (StrUtil.startWith(numberStr, '+')) { // issue#I79VS7 numberStr = StrUtil.subSuf(numberStr, 1); } try { final NumberFormat format = NumberFormat.getInstance(); if (format instanceof DecimalFormat) { // issue#1818@Github // 当字符串数字超出double的长度时,会导致截断,此处使用BigDecimal接收 ((DecimalFormat) format).setParseBigDecimal(true); } return format.parse(numberStr); } catch (ParseException e) { final NumberFormatException nfe = new NumberFormatException(e.getMessage()); nfe.initCause(e); throw nfe; } }
@Test public void issue3636Test() { final Number number = NumberUtil.parseNumber("12,234,456"); assertEquals(new BigDecimal(12234456), number); }
public void putValue(String fieldName, @Nullable Object value) { _fieldToValueMap.put(fieldName, value); }
@Test public void testNullAndNonNullValuesNotEqual() { GenericRow first = new GenericRow(); first.putValue("one", null); GenericRow second = new GenericRow(); second.putValue("one", 1); Assert.assertNotEquals(first, second); first = new GenericRow(); first.putValue("one", 1); second = new GenericRow(); second.putValue("one", null); Assert.assertNotEquals(first, second); }
public void registerHandlerMethods(String pluginId, Object handler) { Class<?> handlerType = (handler instanceof String beanName ? obtainApplicationContext().getType(beanName) : handler.getClass()); if (handlerType != null) { final Class<?> userType = ClassUtils.getUserClass(handlerType); Map<Method, RequestMappingInfo> methods = MethodIntrospector.selectMethods(userType, (MethodIntrospector.MetadataLookup<RequestMappingInfo>) method -> getPluginMappingForMethod(pluginId, method, userType)); if (logger.isTraceEnabled()) { logger.trace(formatMappings(userType, methods)); } else if (mappingsLogger.isDebugEnabled()) { mappingsLogger.debug(formatMappings(userType, methods)); } methods.forEach((method, mapping) -> { Method invocableMethod = AopUtils.selectInvocableMethod(method, userType); registerHandlerMethod(handler, invocableMethod, mapping); pluginMappingInfo.add(pluginId, mapping); }); } }
@Test public void getHandlerDirectMatch() { // register handler methods first handlerMapping.registerHandlerMethods("fakePlugin", new TestController()); // resolve an expected method from TestController Method expected = ResolvableMethod.on(TestController.class).annot(getMapping("/foo")).build(); // get handler by mock exchange ServerWebExchange exchange = MockServerWebExchange.from( get("/apis/api.plugin.halo.run/v1alpha1/plugins/fakePlugin/foo")); HandlerMethod hm = (HandlerMethod) this.handlerMapping.getHandler(exchange).block(); assertThat(hm).isNotNull(); assertThat(hm.getMethod()).isEqualTo(expected); }
@Override public MetricType getType() { return MetricType.COUNTER_LONG; }
@Test public void getType() { assertEquals(MetricType.COUNTER_LONG, new UptimeMetric().getType()); }
public static int compose(final int major, final int minor, final int patch) { if (major < 0 || major > 255) { throw new IllegalArgumentException("major must be 0-255: " + major); } if (minor < 0 || minor > 255) { throw new IllegalArgumentException("minor must be 0-255: " + minor); } if (patch < 0 || patch > 255) { throw new IllegalArgumentException("patch must be 0-255: " + patch); } if (major + minor + patch == 0) { throw new IllegalArgumentException("all parts cannot be zero"); } return (major << 16) | (minor << 8) | patch; }
@Test void shouldDetectZeroVersion() { assertThrows(IllegalArgumentException.class, () -> SemanticVersion.compose(0, 0, 0)); }
static Callback create(@Nullable Callback delegate, Span span, CurrentTraceContext current) { if (delegate == null) return new FinishSpan(span); return new DelegateAndFinishSpan(delegate, span, current); }
@Test void on_completion_should_forward_then_tag_if_exception() { Span span = tracing.tracer().nextSpan().start(); Callback delegate = mock(Callback.class); Callback tracingCallback = TracingCallback.create(delegate, span, currentTraceContext); RecordMetadata md = createRecordMetadata(); tracingCallback.onCompletion(md, error); verify(delegate).onCompletion(md, error); assertThat(spans.get(0).finishTimestamp()).isNotZero(); assertThat(spans.get(0).error()).isEqualTo(error); }
public static String format(String source, Object... parameters) { String current = source; for (Object parameter : parameters) { if (!current.contains("{}")) { return current; } current = current.replaceFirst("\\{\\}", String.valueOf(parameter)); } return current; }
@Test public void testFormatMissingBracket() { String fmt = "Some string 1 2 3"; assertEquals("Some string 1 2 3", format(fmt, 7)); }
@Override public int hashCode() { return Long.hashCode(numberAndGeneration); }
@Test void checkHashCode() { // same object number 100 0 assertEquals(new COSObjectKey(100, 0).hashCode(), new COSObjectKey(100, 0).hashCode()); // different object numbers/same generation numbers 100 0 vs. 200 0 assertNotEquals(new COSObjectKey(100, 0).hashCode(), new COSObjectKey(200, 0).hashCode()); // different object numbers/different generation numbers/ sum of both numbers are equal 100 0 vs. 99 1 assertNotEquals(new COSObjectKey(100, 0).hashCode(), new COSObjectKey(99, 1).hashCode()); }
@Override public void doAlarm(List<AlarmMessage> alarmMessages) throws Exception { Map<String, SlackSettings> settingsMap = alarmRulesWatcher.getSlackSettings(); if (settingsMap == null || settingsMap.isEmpty()) { return; } Map<String, List<AlarmMessage>> groupedMessages = groupMessagesByHook(alarmMessages); for (Map.Entry<String, List<AlarmMessage>> entry : groupedMessages.entrySet()) { var hookName = entry.getKey(); var messages = entry.getValue(); var setting = settingsMap.get(hookName); if (setting == null || CollectionUtils.isEmpty(setting.getWebhooks()) || CollectionUtils.isEmpty( messages)) { continue; } for (final var url : setting.getWebhooks()) { final var jsonObject = new JsonObject(); final var jsonElements = new JsonArray(); for (AlarmMessage item : messages) { jsonElements.add(GSON.fromJson( String.format( setting.getTextTemplate(), item.getAlarmMessage() ), JsonObject.class)); } jsonObject.add("blocks", jsonElements); final var body = GSON.toJson(jsonObject); try { post(URI.create(url), body, Map.of()); } catch (Exception e) { log.error("Failed to send alarm message to Slack: {}", url, e); } } } }
@Test public void testWechatWebhook() throws Exception { List<String> remoteEndpoints = new ArrayList<>(); remoteEndpoints.add("http://127.0.0.1:" + SERVER.httpPort() + "/services/x/y/zssss"); Rules rules = new Rules(); String template = "{\"type\":\"section\",\"text\":{\"type\":\"mrkdwn\",\"text\":\":alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\"}}"; SlackSettings setting1 = new SlackSettings("setting1", AlarmHooksType.slack, true); setting1.setWebhooks(remoteEndpoints); setting1.setTextTemplate(template); SlackSettings setting2 = new SlackSettings("setting2", AlarmHooksType.slack, false); setting2.setWebhooks(remoteEndpoints); setting2.setTextTemplate(template); rules.getSlackSettingsMap().put(setting1.getFormattedName(), setting1); rules.getSlackSettingsMap().put(setting2.getFormattedName(), setting2); AlarmRulesWatcher alarmRulesWatcher = new AlarmRulesWatcher(rules, null); SlackhookCallback slackhookCallback = new SlackhookCallback(alarmRulesWatcher); List<AlarmMessage> alarmMessages = new ArrayList<>(2); AlarmMessage alarmMessage = new AlarmMessage(); alarmMessage.setScopeId(DefaultScopeDefine.SERVICE); alarmMessage.setRuleName("service_resp_time_rule"); alarmMessage.setAlarmMessage("alarmMessage with [DefaultScopeDefine.All]"); alarmMessage.getHooks().add(setting1.getFormattedName()); alarmMessages.add(alarmMessage); AlarmMessage anotherAlarmMessage = new AlarmMessage(); anotherAlarmMessage.setRuleName("service_resp_time_rule_2"); anotherAlarmMessage.setScopeId(DefaultScopeDefine.ENDPOINT); anotherAlarmMessage.setAlarmMessage("anotherAlarmMessage with [DefaultScopeDefine.Endpoint]"); anotherAlarmMessage.getHooks().add(setting2.getFormattedName()); alarmMessages.add(anotherAlarmMessage); slackhookCallback.doAlarm(alarmMessages); Assertions.assertTrue(IS_SUCCESS.get()); }
public void convert(FSConfigToCSConfigConverterParams params) throws Exception { validateParams(params); this.clusterResource = getClusterResource(params); this.convertPlacementRules = params.isConvertPlacementRules(); this.outputDirectory = params.getOutputDirectory(); this.rulesToFile = params.isPlacementRulesToFile(); this.usePercentages = params.isUsePercentages(); this.preemptionMode = params.getPreemptionMode(); prepareOutputFiles(params.isConsole()); loadConversionRules(params.getConversionRulesConfig()); Configuration inputYarnSiteConfig = getInputYarnSiteConfig(params); handleFairSchedulerConfig(params, inputYarnSiteConfig); convert(inputYarnSiteConfig); }
@Test public void testConvertFSConfigurationUndefinedYarnSiteConfig() throws Exception { FSConfigToCSConfigConverterParams params = FSConfigToCSConfigConverterParams.Builder.create() .withYarnSiteXmlConfig(null) .withOutputDirectory(FSConfigConverterTestCommons.OUTPUT_DIR) .build(); expectedException.expect(PreconditionException.class); expectedException.expectMessage( "yarn-site.xml configuration is not defined"); converter.convert(params); }
public static KeyValueBytesStoreSupplier lruMap(final String name, final int maxCacheSize) { Objects.requireNonNull(name, "name cannot be null"); if (maxCacheSize < 0) { throw new IllegalArgumentException("maxCacheSize cannot be negative"); } return new KeyValueBytesStoreSupplier() { @Override public String name() { return name; } @Override public KeyValueStore<Bytes, byte[]> get() { return new MemoryNavigableLRUCache(name, maxCacheSize); } @Override public String metricsScope() { return "in-memory-lru"; } }; }
@Test public void shouldThrowIfILruMapStoreNameIsNull() { final Exception e = assertThrows(NullPointerException.class, () -> Stores.lruMap(null, 0)); assertEquals("name cannot be null", e.getMessage()); }
public List<DataRecord> merge(final List<DataRecord> dataRecords) { Map<DataRecord.Key, DataRecord> result = new HashMap<>(); dataRecords.forEach(each -> { if (PipelineSQLOperationType.INSERT == each.getType()) { mergeInsert(each, result); } else if (PipelineSQLOperationType.UPDATE == each.getType()) { mergeUpdate(each, result); } else if (PipelineSQLOperationType.DELETE == each.getType()) { mergeDelete(each, result); } }); return new ArrayList<>(result.values()); }
@Test void assertUpdateBeforeInsert() { DataRecord beforeDataRecord = mockUpdateDataRecord(1, 2, 2); DataRecord afterDataRecord = mockInsertDataRecord(1, 1, 1); assertThrows(PipelineUnexpectedDataRecordOrderException.class, () -> groupEngine.merge(Arrays.asList(beforeDataRecord, afterDataRecord))); }