focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public Object valueFrom(Struct struct) { return valueFrom(struct, true); }
@Test void shouldFindValueInStruct() { Schema bazSchema = SchemaBuilder.struct() .field("inner", Schema.STRING_SCHEMA) .optional() .build(); Schema barSchema = SchemaBuilder.struct() .field("bar", Schema.INT32_SCHEMA) .field("baz", bazSchema) .build(); Schema schema = SchemaBuilder.struct().field("foo", barSchema).build(); Struct foo = new Struct(barSchema) .put("bar", 42) .put("baz", null); Struct struct = new Struct(schema).put("foo", foo); assertEquals(42, pathV2("foo.bar").valueFrom(struct)); assertNull(pathV2("foo.baz").valueFrom(struct)); }
@VisibleForTesting Object evaluate(final GenericRow row) { return term.getValue(new TermEvaluationContext(row)); }
@Test public void shouldEvaluateCastToTimestamp() { // Given: final Expression cast1 = new Cast( new TimestampLiteral(Timestamp.from(Instant.ofEpochMilli(1000))), new Type(SqlPrimitiveType.of("TIMESTAMP")) ); final Expression cast2 = new Cast( new StringLiteral("2017-11-13T23:59:58"), new Type(SqlPrimitiveType.of("TIMESTAMP")) ); final Expression cast3 = new Cast( new DateLiteral(new Date(864000000)), new Type(SqlPrimitiveType.of("TIMESTAMP")) ); // When: InterpretedExpression interpreter1 = interpreter(cast1); InterpretedExpression interpreter2 = interpreter(cast2); InterpretedExpression interpreter3 = interpreter(cast3); // Then: assertThat(interpreter1.evaluate(ROW), is(new Timestamp(1000L))); assertThat(interpreter2.evaluate(ROW), is(new Timestamp(1510617598000L))); assertThat(interpreter3.evaluate(ROW), is(new Timestamp(864000000))); }
public static int parseInt(String number) throws NumberFormatException { if (StrUtil.isBlank(number)) { return 0; } if (StrUtil.startWithIgnoreCase(number, "0x")) { // 0x04表示16进制数 return Integer.parseInt(number.substring(2), 16); } if (StrUtil.containsIgnoreCase(number, "E")) { // 科学计数法忽略支持,科学计数法一般用于表示非常小和非常大的数字,这类数字转换为int后精度丢失,没有意义。 throw new NumberFormatException(StrUtil.format("Unsupported int format: [{}]", number)); } try { return Integer.parseInt(number); } catch (NumberFormatException e) { return parseNumber(number).intValue(); } }
@Test public void parseIntTest2() { // from 5.4.8 issue#I23ORQ@Gitee // 千位分隔符去掉 final int v1 = NumberUtil.parseInt("1,482.00"); assertEquals(1482, v1); }
@Override public String getName() { return ANALYZER_NAME; }
@Test public void testAnalyzePackageLock() throws Exception { try (Engine engine = new Engine(getSettings())) { final Dependency result = new Dependency(BaseTest.getResourceAsFile(this, "pip/Pipfile.lock")); engine.addDependency(result); analyzer.analyze(result, engine); assertFalse(ArrayUtils.contains(engine.getDependencies(), result)); assertEquals(76, engine.getDependencies().length); boolean found = false; for (Dependency d : engine.getDependencies()) { if ("alabaster".equals(d.getName())) { found = true; assertEquals("0.7.12", d.getVersion()); assertThat(d.getDisplayFileName(), equalTo("alabaster:0.7.12")); assertEquals(PythonDistributionAnalyzer.DEPENDENCY_ECOSYSTEM, d.getEcosystem()); break; } } assertTrue("Expeced to find urllib3", found); } }
@Override public Set<FSTFlags> getFlags() { return _flags; }
@Test public void testVersion5WithNumbers() throws IOException { try (InputStream inputStream = getClass().getClassLoader().getResourceAsStream("data/abc-numbers.native.fst")) { FST fst = FST.read(inputStream); assertTrue(fst.getFlags().contains(FSTFlags.NUMBERS)); verifyContent(fst, _expected); } }
public static IRubyObject deep(final Ruby runtime, final Object input) { if (input == null) { return runtime.getNil(); } final Class<?> cls = input.getClass(); final Rubyfier.Converter converter = CONVERTER_MAP.get(cls); if (converter != null) { return converter.convert(runtime, input); } return fallbackConvert(runtime, input, cls); }
@Test public void testDeepWithInteger() { Object result = Rubyfier.deep(RubyUtil.RUBY, 1); assertEquals(RubyFixnum.class, result.getClass()); assertEquals(1L, ((RubyFixnum)result).getLongValue()); }
@VisibleForTesting static LookupResult parseBody(JsonPath singleJsonPath, @Nullable JsonPath multiJsonPath, InputStream body) { try { final DocumentContext documentContext = JsonPath.parse(body); LookupResult.Builder builder = LookupResult.builder().cacheTTL(Long.MAX_VALUE); if (multiJsonPath != null) { try { final Object multiValue = documentContext.read(multiJsonPath); if (multiValue instanceof Map) { //noinspection unchecked builder = builder.multiValue((Map<Object, Object>) multiValue); } else if (multiValue instanceof List) { //noinspection unchecked final List<String> stringList = ((List<Object>) multiValue).stream().map(Object::toString).collect(Collectors.toList()); builder = builder.stringListValue(stringList); // for backwards compatibility builder = builder.multiSingleton(multiValue); } else { builder = builder.multiSingleton(multiValue); } } catch (PathNotFoundException e) { LOG.warn("Couldn't read multi JSONPath from response - skipping multi value ({})", e.getMessage()); } } try { final Object singleValue = documentContext.read(singleJsonPath); if (singleValue instanceof CharSequence) { return builder.single((CharSequence) singleValue).build(); } else if (singleValue instanceof Number) { return builder.single((Number) singleValue).build(); } else if (singleValue instanceof Boolean) { return builder.single((Boolean) singleValue).build(); } else { throw new IllegalArgumentException("Single value data type cannot be: " + singleValue.getClass().getCanonicalName()); } } catch (PathNotFoundException e) { LOG.warn("Couldn't read single JSONPath from response - returning empty result ({})", e.getMessage()); return null; } } catch (InvalidJsonException e) { LOG.error("Couldn't parse JSON response", e); return null; } catch (ClassCastException e) { LOG.error("Couldn't assign value type", e); return null; } catch (Exception e) { LOG.error("Unexpected error parsing JSON response", e); return null; } }
@Test public void parseEmptyBody() throws Exception { final JsonPath singlePath = JsonPath.compile("$.hello"); final JsonPath multiPath = JsonPath.compile("$.list"); final LookupResult result = HTTPJSONPathDataAdapter.parseBody(singlePath, multiPath, emptyBody); assertThat(result).isNull(); }
public File zip(File source, File destZipFile, int level) throws IOException { zipContents(source, new FileOutputStream(destZipFile), level, false); return destZipFile; }
@Test void shouldPreserveFileTimestampWhileGeneratingTheZipFile() throws Exception { File file = createFileInTempDir(); file.setLastModified(1297989100000L); // Set this to any date in the past which is greater than the epoch File zip = zipUtil.zip(file, tempDir.resolve("foo.zip").toFile(), Deflater.DEFAULT_COMPRESSION); try (ZipFile actualZip = new ZipFile(zip.getAbsolutePath())) { ZipEntry entry = actualZip.getEntry(file.getName()); assertThat(entry.getTime()).isEqualTo(file.lastModified()); } }
public StringSubject factValue(String key) { return doFactValue(key, null); }
@Test public void factValueInt() { assertThat(fact("foo", "the foo")).factValue("foo", 0).isEqualTo("the foo"); }
@Override public String getMediaType() { return firstNonNull( mediaTypeFromUrl(source.getRequestURI()), firstNonNull( acceptedContentTypeInResponse(), MediaTypes.DEFAULT)); }
@Test public void media_type_taken_in_url_first() { when(source.getHeader(HttpHeaders.ACCEPT)).thenReturn(MediaTypes.JSON); when(source.getRequestURI()).thenReturn("/path/to/resource/search.protobuf"); assertThat(underTest.getMediaType()).isEqualTo(MediaTypes.PROTOBUF); }
public List<ChangeStreamRecord> toChangeStreamRecords( PartitionMetadata partition, ChangeStreamResultSet resultSet, ChangeStreamResultSetMetadata resultSetMetadata) { if (this.isPostgres()) { // In PostgresQL, change stream records are returned as JsonB. return Collections.singletonList( toChangeStreamRecordJson(partition, resultSet.getPgJsonb(0), resultSetMetadata)); } // In GoogleSQL, change stream records are returned as an array of structs. return resultSet.getCurrentRowAsStruct().getStructList(0).stream() .flatMap(struct -> toChangeStreamRecord(partition, struct, resultSetMetadata)) .collect(Collectors.toList()); }
@Test public void testMappingStructRowToChildPartitionRecord() { final ChildPartitionsRecord childPartitionsRecord = new ChildPartitionsRecord( Timestamp.ofTimeSecondsAndNanos(10L, 20), "1", Arrays.asList( new ChildPartition("childToken1", Sets.newHashSet("parentToken1", "parentToken2")), new ChildPartition("childToken2", Sets.newHashSet("parentToken1", "parentToken2"))), null); final Struct struct = recordsToStructWithStrings(childPartitionsRecord); ChangeStreamResultSet resultSet = mock(ChangeStreamResultSet.class); when(resultSet.getCurrentRowAsStruct()).thenReturn(struct); assertEquals( Collections.singletonList(childPartitionsRecord), mapper.toChangeStreamRecords(partition, resultSet, resultSetMetadata)); }
public static Queue<Consumer<byte[]>> stopConsumers(final Queue<Consumer<byte[]>> consumers) throws PulsarClientException { while (!consumers.isEmpty()) { Consumer<byte[]> consumer = consumers.poll(); if (consumer != null) { try { consumer.close(); } catch (PulsarClientException.AlreadyClosedException e) { // ignore during stopping } catch (Exception e) { LOG.debug("Error stopping consumer: {} due to {}. This exception is ignored", consumer, e.getMessage(), e); } } } return new ConcurrentLinkedQueue<>(); }
@Test public void givenConsumerQueueIsNotEmptywhenIStopConsumersverifyEmptyQueueIsReturned() throws PulsarClientException { Queue<Consumer<byte[]>> consumers = new ConcurrentLinkedQueue<>(); consumers.add(mock(Consumer.class)); Queue<Consumer<byte[]>> expected = PulsarUtils.stopConsumers(consumers); assertTrue(expected.isEmpty()); }
public void logAndProcessFailure( String computationId, ExecutableWork executableWork, Throwable t, Consumer<Work> onInvalidWork) { if (shouldRetryLocally(computationId, executableWork.work(), t)) { // Try again after some delay and at the end of the queue to avoid a tight loop. executeWithDelay(retryLocallyDelayMs, executableWork); } else { // Consider the item invalid. It will eventually be retried by Windmill if it still needs to // be processed. onInvalidWork.accept(executableWork.work()); } }
@Test public void logAndProcessFailure_doesNotRetryOOM() { Set<Work> executedWork = new HashSet<>(); ExecutableWork work = createWork(executedWork::add); WorkFailureProcessor workFailureProcessor = createWorkFailureProcessor(streamingEngineFailureReporter()); Set<Work> invalidWork = new HashSet<>(); workFailureProcessor.logAndProcessFailure( DEFAULT_COMPUTATION_ID, work, new OutOfMemoryError(), invalidWork::add); assertThat(executedWork).isEmpty(); assertThat(invalidWork).containsExactly(work.work()); }
public static NotificationDispatcherMetadata newMetadata() { return METADATA; }
@Test public void verify_reportFailures_notification_dispatcher_key() { NotificationDispatcherMetadata metadata = ReportAnalysisFailureNotificationHandler.newMetadata(); assertThat(metadata.getDispatcherKey()).isEqualTo(REPORT_FAILURE_DISPATCHER_KEY); }
public ComputeNode getBackendOrComputeNode(long nodeId) { ComputeNode backend = idToBackendRef.get(nodeId); if (backend == null) { backend = idToComputeNodeRef.get(nodeId); } return backend; }
@Test public void testGetBackendOrComputeNode() { mockNet(); Backend be = new Backend(10001, "host1", 1000); service.addBackend(be); ComputeNode cn = new ComputeNode(10002, "host2", 1000); cn.setBePort(1001); service.addComputeNode(cn); Assert.assertEquals(be, service.getBackendOrComputeNode(be.getId())); Assert.assertEquals(cn, service.getBackendOrComputeNode(cn.getId())); Assert.assertNull(service.getBackendOrComputeNode(/* Not Exist */ 100)); Assert.assertEquals(cn, service.getBackendOrComputeNodeWithBePort("host2", 1001)); Assert.assertFalse(service.checkNodeAvailable(cn)); Assert.assertFalse(service.checkNodeAvailable(be)); List<ComputeNode> nodes = service.backendAndComputeNodeStream().collect(Collectors.toList()); Assert.assertEquals(2, nodes.size()); Assert.assertEquals(be, nodes.get(0)); Assert.assertEquals(cn, nodes.get(1)); }
@Override public Class<? extends StorageBuilder> builder() { return MaxStorageBuilder.class; }
@Test public void testBuilder() throws IllegalAccessException, InstantiationException { function.accept(MeterEntity.newService("service-test", Layer.GENERAL), LARGE_VALUE); function.calculate(); StorageBuilder<MaxFunction> storageBuilder = function.builder().newInstance(); final HashMapConverter.ToStorage toStorage = new HashMapConverter.ToStorage(); storageBuilder.entity2Storage(function, toStorage); final Map<String, Object> map = toStorage.obtain(); map.put(MaxFunction.VALUE, map.get(MaxFunction.VALUE)); MaxFunction function2 = storageBuilder.storage2Entity(new HashMapConverter.ToEntity(map)); assertThat(function2.getValue()).isEqualTo(function.getValue()); }
public void deleteShardGroup(List<Long> groupIds) { prepare(); try { client.deleteShardGroup(serviceId, groupIds, true); } catch (StarClientException e) { LOG.warn("Failed to delete shard group. error: {}", e.getMessage()); } }
@Test public void testDeleteShardGroup() throws StarClientException, DdlException { new Expectations() { { client.deleteShardGroup("1", (List<Long>) any, true); minTimes = 0; result = null; } }; Deencapsulation.setField(starosAgent, "serviceId", "1"); // test delete shard group ExceptionChecker.expectThrowsNoException(() -> starosAgent.deleteShardGroup(Lists.newArrayList(1L, 2L))); }
public List<Bson> parse(final List<String> filterExpressions, final List<EntityAttribute> attributes) { if (filterExpressions == null || filterExpressions.isEmpty()) { return List.of(); } final Map<String, List<Filter>> groupedByField = filterExpressions.stream() .map(expr -> singleFilterParser.parseSingleExpression(expr, attributes)) .collect(groupingBy(Filter::field)); return groupedByField.values().stream() .map(grouped -> grouped.stream() .map(Filter::toBson) .collect(Collectors.toList())) .map(groupedFilters -> { if (groupedFilters.size() == 1) { return groupedFilters.get(0); } else { return Filters.or(groupedFilters); } }) .toList(); }
@Test void returnsEmptyListOnNullFilterList() { assertThat(toTest.parse(null, List.of())) .isEmpty(); }
public PaginatedList<StreamDestinationFilterRuleDTO> findPaginatedForStreamAndTarget( String streamId, String targetId, String queryString, Bson sort, int perPage, int page, Predicate<String> permissionSelector ) { final var query = parseQuery(queryString); return paginationHelper.filter(and(eq(FIELD_STREAM_ID, streamId), eq(FIELD_DESTINATION_TYPE, targetId), query)) .sort(sort) .perPage(perPage) .page(page, dto -> permissionSelector.test(dto.id())); }
@Test @MongoDBFixtures("StreamDestinationFilterServiceTest-2024-07-01-1.json") void findPaginatedForStreamAndTargetWithQuery() { final var result = service.findPaginatedForStreamAndTarget("54e3deadbeefdeadbeef1000", "indexer", "status:disabled", Sorts.ascending("title"), 10, 1, id -> true); assertThat(result.delegate()).hasSize(1); assertThat(result.delegate().get(0).status()).isEqualTo(StreamDestinationFilterRuleDTO.Status.DISABLED); }
@SuppressWarnings("unchecked") @Override public synchronized ProxyInfo<T> getProxy() { if (currentUsedHandler != null) { return currentUsedHandler; } Map<String, ProxyInfo<T>> targetProxyInfos = new HashMap<>(); StringBuilder combinedInfo = new StringBuilder("["); for (int i = 0; i < proxies.size(); i++) { ProxyInfo<T> pInfo = super.getProxy(); incrementProxyIndex(); targetProxyInfos.put(pInfo.proxyInfo, pInfo); combinedInfo.append(pInfo.proxyInfo).append(','); } combinedInfo.append(']'); T wrappedProxy = (T) Proxy.newProxyInstance( RequestHedgingInvocationHandler.class.getClassLoader(), new Class<?>[]{xface}, new RequestHedgingInvocationHandler(targetProxyInfos)); currentUsedHandler = new ProxyInfo<T>(wrappedProxy, combinedInfo.toString()); return currentUsedHandler; }
@Test public void testHedgingWhenConnectException() throws Exception { ClientProtocol active = Mockito.mock(ClientProtocol.class); Mockito.when(active.getStats()).thenThrow(new ConnectException()); ClientProtocol standby = Mockito.mock(ClientProtocol.class); Mockito.when(standby.getStats()) .thenThrow( new RemoteException("org.apache.hadoop.ipc.StandbyException", "Standby NameNode")); RequestHedgingProxyProvider<ClientProtocol> provider = new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class, createFactory(active, standby)); try { provider.getProxy().proxy.getStats(); Assert.fail("Should fail since the active namenode throws" + " ConnectException!"); } catch (MultiException me) { for (Exception ex : me.getExceptions().values()) { if (ex instanceof RemoteException) { Exception rEx = ((RemoteException) ex) .unwrapRemoteException(); Assert.assertTrue("Unexpected RemoteException: " + rEx.getMessage(), rEx instanceof StandbyException); } else { Assert.assertTrue(ex instanceof ConnectException); } } } Mockito.verify(active).getStats(); Mockito.verify(standby).getStats(); }
@Override public HttpResponseOutputStream<Metadata> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { final DbxUserFilesRequests files = new DbxUserFilesRequests(session.getClient(file)); final UploadSessionStartUploader start = files.uploadSessionStart(); new DefaultStreamCloser().close(start.getOutputStream()); final String sessionId = start.finish().getSessionId(); if(log.isDebugEnabled()) { log.debug(String.format("Obtained session id %s for upload %s", sessionId, file)); } final UploadSessionAppendV2Uploader uploader = open(files, sessionId, 0L); return new SegmentingUploadProxyOutputStream(file, status, files, uploader, sessionId); } catch(DbxException ex) { throw new DropboxExceptionMappingService().map("Upload failed.", ex, file); } }
@Test(expected = AccessDeniedException.class) public void testWriteDS_Store() throws Exception { final DropboxWriteFeature write = new DropboxWriteFeature(session); final TransferStatus status = new TransferStatus(); final byte[] content = RandomUtils.nextBytes(0); status.setLength(content.length); final Path test = new Path(new DefaultHomeFinderService(session).find(), ".DS_Store", EnumSet.of(Path.Type.file)); final OutputStream out = write.write(test, status, new DisabledConnectionCallback()); new StreamCopier(new TransferStatus(), new TransferStatus()).transfer(new ByteArrayInputStream(content), out); }
@Override public void next(DeviceId deviceId, NextObjective nextObjective) { process(deviceId, nextObjective); }
@Test public void next() { // Note: ADD operation won't query this expect(mgr.flowObjectiveStore.getNextGroup(NID1)).andReturn(NGRP1).times(3); expect(mgr.flowObjectiveStore.getNextGroup(NID2)).andReturn(NGRP2).times(3); replay(mgr.flowObjectiveStore); expectNextObjs.forEach(nextObj -> mgr.next(DEV1, nextObj)); // Wait for the pipeline operation to complete int expectedTime = (bound + offset) * 8; assertAfter(expectedTime, expectedTime * 5, () -> assertEquals(expectNextObjs.size(), actualObjs.size())); assertTrue(actualObjs.indexOf(NEXT1) < actualObjs.indexOf(NEXT3)); assertTrue(actualObjs.indexOf(NEXT3) < actualObjs.indexOf(NEXT5)); assertTrue(actualObjs.indexOf(NEXT5) < actualObjs.indexOf(NEXT7)); assertTrue(actualObjs.indexOf(NEXT2) < actualObjs.indexOf(NEXT4)); assertTrue(actualObjs.indexOf(NEXT4) < actualObjs.indexOf(NEXT6)); assertTrue(actualObjs.indexOf(NEXT6) < actualObjs.indexOf(NEXT8)); verify(mgr.flowObjectiveStore); }
@Override @CheckForNull public String message(Locale locale, String key, @Nullable String defaultValue, Object... parameters) { String bundleKey = propertyToBundles.get(key); String value = null; if (bundleKey != null) { try { ResourceBundle resourceBundle = ResourceBundle.getBundle(bundleKey, locale, classloader, control); value = resourceBundle.getString(key); } catch (MissingResourceException e1) { // ignore } } if (value == null) { value = defaultValue; } return formatMessage(value, parameters); }
@Test public void get_english_labels_when_default_locale_is_not_english() { Locale defaultLocale = Locale.getDefault(); try { Locale.setDefault(Locale.FRENCH); assertThat(underTest.message(Locale.ENGLISH, "any", null)).isEqualTo("Any"); assertThat(underTest.message(Locale.ENGLISH, "sqale.page", null)).isEqualTo("Sqale page title"); assertThat(underTest.message(Locale.ENGLISH, "checkstyle.rule1.name", null)).isEqualTo("Rule one"); } finally { Locale.setDefault(defaultLocale); } }
void fetchPluginSettingsMetaData(GoPluginDescriptor pluginDescriptor) { String pluginId = pluginDescriptor.id(); List<ExtensionSettingsInfo> allMetadata = findSettingsAndViewOfAllExtensionsIn(pluginId); List<ExtensionSettingsInfo> validMetadata = allSettingsAndViewPairsWhichAreValid(allMetadata); if (validMetadata.isEmpty()) { LOGGER.warn("Failed to fetch plugin settings metadata for plugin {}. Maybe the plugin does not implement plugin settings and view?", pluginId); LOGGER.warn("Plugin: {} - Metadata load info: {}", pluginId, allMetadata); LOGGER.warn("Not all plugins are required to implement the request above. This error may be safe to ignore."); return; } if (validMetadata.size() > 1) { throw new RuntimeException(String.format("Plugin with ID: %s has more than one extension which supports plugin settings. " + "Only one extension should support it and respond to %s and %s.", pluginId, REQUEST_PLUGIN_SETTINGS_CONFIGURATION, REQUEST_PLUGIN_SETTINGS_VIEW)); } ExtensionSettingsInfo extensionSettingsInfo = validMetadata.get(0); metadataStore.addMetadataFor(pluginId, extensionSettingsInfo.extensionName, extensionSettingsInfo.configuration, extensionSettingsInfo.viewTemplate); }
@Test public void shouldNotStoreMetadataIfViewTemplateIsMissing() { GoPluginDescriptor pluginDescriptor = GoPluginDescriptor.builder().id("plugin-id").isBundledPlugin(true).build(); setupSettingsResponses(packageRepositoryExtension, pluginDescriptor.id(), null, null); metadataLoader.fetchPluginSettingsMetaData(pluginDescriptor); assertThat(PluginSettingsMetadataStore.getInstance().hasPlugin(pluginDescriptor.id())).isFalse(); }
public static String getDatabaseName(final SQLStatement sqlStatement, final String currentDatabaseName) { Optional<DatabaseSegment> databaseSegment = sqlStatement instanceof FromDatabaseAvailable ? ((FromDatabaseAvailable) sqlStatement).getDatabase() : Optional.empty(); return databaseSegment.map(optional -> optional.getIdentifier().getValue()).orElse(currentDatabaseName); }
@Test void assertDatabaseNameWhenNotAvailableInSQLStatement() { assertThat(DatabaseNameUtils.getDatabaseName(mock(SQLStatement.class), "foo_db"), is("foo_db")); }
@Override public String toString() { return predicate.toString(); }
@Test public void testLongPredicate() { TestHazelcastInstanceFactory factory = new TestHazelcastInstanceFactory(1); HazelcastInstance hazelcastInstance = factory.newHazelcastInstance(); IMap<Integer, Integer> map = hazelcastInstance.getMap(randomString()); for (int i = 0; i < 8000; i++) { map.put(i, i); } StringBuilder sb = new StringBuilder(); for (int i = 0; i < 8000; i++) { sb.append("intValue() == ").append(i).append(" or "); } sb.append(" intValue() == -1"); SqlPredicate predicate = new SqlPredicate(sb.toString()); // all entries must match Set<Map.Entry<Integer, Integer>> entries = map.entrySet(predicate); assertEquals(map.size(), entries.size()); factory.terminateAll(); }
void validateLogLevelConfigs(Collection<AlterableConfig> ops) { ops.forEach(op -> { String loggerName = op.name(); switch (OpType.forId(op.configOperation())) { case SET: validateLoggerNameExists(loggerName); String logLevel = op.value(); if (!LogLevelConfig.VALID_LOG_LEVELS.contains(logLevel)) { throw new InvalidConfigurationException("Cannot set the log level of " + loggerName + " to " + logLevel + " as it is not a supported log level. " + "Valid log levels are " + VALID_LOG_LEVELS_STRING); } break; case DELETE: validateLoggerNameExists(loggerName); if (loggerName.equals(Log4jController.ROOT_LOGGER())) { throw new InvalidRequestException("Removing the log level of the " + Log4jController.ROOT_LOGGER() + " logger is not allowed"); } break; case APPEND: throw new InvalidRequestException(OpType.APPEND + " operation is not allowed for the " + BROKER_LOGGER + " resource"); case SUBTRACT: throw new InvalidRequestException(OpType.SUBTRACT + " operation is not allowed for the " + BROKER_LOGGER + " resource"); default: throw new InvalidRequestException("Unknown operation type " + (int) op.configOperation() + " is not allowed for the " + BROKER_LOGGER + " resource"); } }); }
@Test public void testValidateRemoveRootLogLevelConfigNotAllowed() { assertEquals("Removing the log level of the " + Log4jController.ROOT_LOGGER() + " logger is not allowed", Assertions.assertThrows(InvalidRequestException.class, () -> MANAGER.validateLogLevelConfigs(Arrays.asList(new AlterableConfig(). setName(Log4jController.ROOT_LOGGER()). setConfigOperation(OpType.DELETE.id()). setValue("")))).getMessage()); }
private Set<ServiceInstance> servicesDownAndNotInGroup() { if (servicesDownAndNotInGroup == null) { servicesDownAndNotInGroup = servicesNotInGroup.stream().filter(this::serviceEffectivelyDown).collect(Collectors.toSet()); } return servicesDownAndNotInGroup; }
@Test public void testServicesDownAndNotInGroup() { HostName hostName1 = new HostName("host1"); HostName hostName2 = new HostName("host2"); HostName hostName3 = new HostName("host3"); HostName hostName4 = new HostName("host4"); HostName hostName5 = new HostName("host5"); ServiceCluster serviceCluster = modelUtils.createServiceCluster( "cluster", new ServiceType("service-type"), List.of( modelUtils.createServiceInstance("service-1", hostName1, ServiceStatus.UP), modelUtils.createServiceInstance("service-2", hostName2, ServiceStatus.DOWN), modelUtils.createServiceInstance("service-3", hostName3, ServiceStatus.UP), modelUtils.createServiceInstance("service-4", hostName4, ServiceStatus.DOWN), modelUtils.createServiceInstance("service-5", hostName5, ServiceStatus.UP) ) ); modelUtils.createApplicationInstance(List.of(serviceCluster)); modelUtils.createNode(hostName1, HostStatus.NO_REMARKS); modelUtils.createNode(hostName2, HostStatus.NO_REMARKS); modelUtils.createNode(hostName3, HostStatus.ALLOWED_TO_BE_DOWN); modelUtils.createNode(hostName4, HostStatus.ALLOWED_TO_BE_DOWN); modelUtils.createNode(hostName5, HostStatus.NO_REMARKS); ClusterApiImpl clusterApi = new ClusterApiImpl( applicationApi, serviceCluster, new NodeGroup(modelUtils.createApplicationInstance(new ArrayList<>()), hostName5), modelUtils.getHostInfos(), modelUtils.getClusterControllerClientFactory(), ModelTestUtils.APPLICATION_PARAMS.clusterParamsFor(serviceCluster), clock); assertEquals("{ clusterId=cluster, serviceType=service-type }", clusterApi.clusterInfo()); assertFalse(clusterApi.isStorageCluster()); assertEquals(" [host3, host4] are suspended. [ServiceInstance{configId=service-2, hostName=host2, " + "serviceStatus=ServiceStatusInfo{status=DOWN, since=Optional.empty, lastChecked=Optional.empty}}] " + "is down.", clusterApi.downDescription()); assertEquals(60, clusterApi.percentageOfServicesDownOutsideGroup()); assertEquals(80, clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown()); }
static Serde<List<?>> createSerde(final PersistenceSchema schema) { final List<SimpleColumn> columns = schema.columns(); if (columns.isEmpty()) { // No columns: return new KsqlVoidSerde<>(); } if (columns.size() != 1) { throw new KsqlException("The '" + FormatFactory.KAFKA.name() + "' format only supports a single field. Got: " + columns); } final SimpleColumn singleColumn = columns.get(0); final Class<?> javaType = SchemaConverters.sqlToJavaConverter() .toJavaType(singleColumn.type()); return createSerde(singleColumn, javaType); }
@Test public void shouldThroIfBoolean() { // Given: final PersistenceSchema schema = schemaWithFieldOfType(SqlTypes.BOOLEAN); // When: final Exception e = assertThrows( KsqlException.class, () -> KafkaSerdeFactory.createSerde(schema) ); // Then: assertThat(e.getMessage(), containsString("The 'KAFKA' format does not support type 'BOOLEAN'")); }
@Override void put(String name, Mapping mapping) { storage().put(name, mapping); }
@Test public void when_put_then_overridesPrevious() { String name = randomName(); Mapping originalMapping = mapping(name, "type1"); Mapping updatedMapping = mapping(name, "type2"); storage.put(name, originalMapping); storage.put(name, updatedMapping); assertTrue(storage.allObjects().stream().noneMatch(m -> m.equals(originalMapping))); assertTrue(storage.allObjects().stream().anyMatch(m -> m.equals(updatedMapping))); }
public void addPattern(String pattern) { final List<String> tokens = splitByCharacter(pattern); PatternToken current = null; for (final PatternToken patternToken : roots) { if (patternToken.isMatch(tokens.get(0))) { current = patternToken; break; } } if (current == null) { current = new StringToken(tokens.get(0)); roots.add(current); } if (tokens.size() == 1) { current.setExpression(pattern); return; } for (int i = 1; i < tokens.size(); i++) { final String token = tokens.get(i); PatternToken newToken; if (VarToken.VAR_TOKEN.equals(token)) { newToken = new VarToken(); } else { newToken = new StringToken(token); } final PatternToken found = current.find(newToken); if (found == null) { current = current.add(newToken); } else { current = found; } } current.setExpression(pattern); }
@Test public void testTreeBuild() throws NoSuchFieldException, IllegalAccessException { PatternTree tree = new PatternTree(); tree.addPattern("/products/{var}"); tree.addPattern("/products/{var}/detail"); tree.addPattern("/products/{var}/refund"); tree.addPattern("/products/{var}/reorder/extra"); tree.addPattern("/sales/{var}"); tree.addPattern("/employees/{var}/profile"); // This should map to exact same tree nodes tree.addPattern("produces/{var}/profile"); tree.addPattern("GET:/posts/{var}"); tree.addPattern("https://abc.com/posts/{var}"); final Field rootField = PatternTree.class.getDeclaredField("roots"); rootField.setAccessible(true); final List<PatternToken> roots = (List<PatternToken>) rootField.get(tree); final PatternToken prodToken = roots.get(0); Assertions.assertEquals(new StringToken("products"), prodToken); Assertions.assertEquals(1, prodToken.children().size()); final PatternToken varToken = prodToken.children().get(0); Assertions.assertEquals(new VarToken(), varToken); Assertions.assertEquals(3, varToken.children().size()); final PatternToken detailToken = varToken.children().get(0); Assertions.assertEquals(new StringToken("detail"), detailToken); final PatternToken salesToken = roots.get(1); Assertions.assertEquals(new StringToken("sales"), salesToken); Assertions.assertEquals(1, salesToken.children().size()); final PatternToken employeeToken = roots.get(2); Assertions.assertEquals(new StringToken("employees"), employeeToken); Assertions.assertEquals(1, employeeToken.children().size()); final PatternToken producesToken = roots.get(3); Assertions.assertEquals(new StringToken("produces"), producesToken); Assertions.assertEquals(1, producesToken.children().size()); final PatternToken getPostsToken = roots.get(4); Assertions.assertEquals(new StringToken("GET:"), getPostsToken); final PatternToken abcToken = roots.get(5); Assertions.assertEquals(new StringToken("https:"), abcToken); final PatternToken abcComToken = abcToken.children().get(0); // For general performance purposes, double / will result in an empty string token // This is considered an intentional feature rather than a bug Assertions.assertEquals(new StringToken(""), abcComToken); }
@Override public Map<String, Object> load(String configKey) { if (targetUri == null) { return null; } // Check for new file every so often int currentTimeSecs = Time.currentTimeSecs(); if (lastReturnedValue != null && ((currentTimeSecs - lastReturnedTime) < artifactoryPollTimeSecs)) { LOG.debug("currentTimeSecs: {}; lastReturnedTime {}; artifactoryPollTimeSecs: {}. Returning our last map.", currentTimeSecs, lastReturnedTime, artifactoryPollTimeSecs); return (Map<String, Object>) lastReturnedValue.get(configKey); } try { Map<String, Object> raw = loadFromUri(targetUri); if (raw != null) { return (Map<String, Object>) raw.get(configKey); } } catch (Exception e) { LOG.error("Failed to load from uri {}", targetUri); } return null; }
@Test public void testMalformedYaml() { // This is a test where we are configured to point right at a single artifact Config conf = new Config(); conf.put(DaemonConfig.SCHEDULER_CONFIG_LOADER_URI, ARTIFACTORY_HTTP_SCHEME_PREFIX + "bogushost.yahoo.com:9999/location/of/this/artifact"); conf.put(Config.STORM_LOCAL_DIR, tmpDirPath.toString()); ArtifactoryConfigLoaderMock loaderMock = new ArtifactoryConfigLoaderMock(conf); loaderMock.setData("Anything", "/location/of/this/artifact", "{ \"downloadUri\": \"anything\"}"); loaderMock.setData(null, null, "ThisIsNotValidYaml"); Map<String, Object> ret = loaderMock.load(DaemonConfig.MULTITENANT_SCHEDULER_USER_POOLS); assertNull(ret, "Unexpectedly returned a map"); }
@Override public boolean enableSendingOldValues(final boolean forceMaterialization) { if (queryableName != null) { sendOldValues = true; return true; } if (parent.enableSendingOldValues(forceMaterialization)) { sendOldValues = true; } return sendOldValues; }
@Test public void shouldEnableSendOldValuesWhenMaterializedAlreadyAndForcedToMaterialize() { final StreamsBuilder builder = new StreamsBuilder(); final String topic1 = "topic1"; final KTableImpl<String, Integer, Integer> table1 = (KTableImpl<String, Integer, Integer>) builder.table(topic1, consumed); final KTableImpl<String, Integer, Integer> table2 = (KTableImpl<String, Integer, Integer>) table1.filter(predicate, Materialized.as("store2")); table2.enableSendingOldValues(true); assertThat(table1.sendingOldValueEnabled(), is(false)); assertThat(table2.sendingOldValueEnabled(), is(true)); doTestSendingOldValue(builder, table1, table2, topic1); }
public void setDrainEventsOnStop() { drainEventsOnStop = true; }
@Test @Timeout(10000) void testDispatchStopOnTimeout() throws Exception { BlockingQueue<Event> eventQueue = new LinkedBlockingQueue<Event>(); eventQueue = spy(eventQueue); // simulate dispatcher is not drained. when(eventQueue.isEmpty()).thenReturn(false); YarnConfiguration conf = new YarnConfiguration(); conf.setInt(YarnConfiguration.DISPATCHER_DRAIN_EVENTS_TIMEOUT, 2000); DrainDispatcher disp = new DrainDispatcher(eventQueue); disp.init(conf); disp.setDrainEventsOnStop(); disp.start(); disp.waitForEventThreadToWait(); disp.close(); }
@CanIgnoreReturnValue @SuppressWarnings("deprecation") // TODO(b/134064106): design an alternative to no-arg check() public final Ordered containsExactly() { return check().about(iterableEntries()).that(checkNotNull(actual).entries()).containsExactly(); }
@Test public void containsExactlyVarargFailureBoth() { ImmutableMultimap<Integer, String> expected = ImmutableMultimap.of(3, "one", 3, "six", 3, "two", 4, "five", 4, "four"); ListMultimap<Integer, String> actual = LinkedListMultimap.create(expected); actual.remove(3, "six"); actual.remove(4, "five"); actual.put(4, "nine"); actual.put(5, "eight"); expectFailureWhenTestingThat(actual) .containsExactly(3, "one", 3, "six", 3, "two", 4, "five", 4, "four"); assertFailureKeys("missing", "unexpected", "---", "expected", "but was"); assertFailureValue("missing", "{3=[six], 4=[five]}"); assertFailureValue("unexpected", "{4=[nine], 5=[eight]}"); }
static Optional<SearchPath> fromString(String path) { if (path == null || path.isEmpty()) { return Optional.empty(); } if (path.indexOf(';') >= 0) { return Optional.empty(); // multi-level not supported at this time } try { SearchPath sp = parseElement(path); if (sp.isEmpty()) { return Optional.empty(); } else { return Optional.of(sp); } } catch (NumberFormatException | InvalidSearchPathException e) { throw new InvalidSearchPathException("Invalid search path '" + path + "'", e); } }
@Test void invalidRowMustThrowException() { try { SearchPath.fromString("1,2,3/r"); fail("Expected exception"); } catch (InvalidSearchPathException e) { // success } }
public void retrieveDocuments() throws DocumentRetrieverException { boolean first = true; String route = params.cluster.isEmpty() ? params.route : resolveClusterRoute(params.cluster); MessageBusParams messageBusParams = createMessageBusParams(params.configId, params.timeout, route); documentAccess = documentAccessFactory.createDocumentAccess(messageBusParams); session = documentAccess.createSyncSession(new SyncParameters.Builder().build()); int trace = params.traceLevel; if (trace > 0) { session.setTraceLevel(trace); } Iterator<String> iter = params.documentIds; if (params.jsonOutput && !params.printIdsOnly) { System.out.println('['); } while (iter.hasNext()) { if (params.jsonOutput && !params.printIdsOnly) { if (!first) { System.out.println(','); } else { first = false; } } String docid = iter.next(); Message msg = createDocumentRequest(docid); Reply reply = session.syncSend(msg); printReply(reply); } if (params.jsonOutput && !params.printIdsOnly) { System.out.println(']'); } }
@Test void testPrintIdOnly() throws DocumentRetrieverException { ClientParameters params = createParameters() .setDocumentIds(asIterator(DOC_ID_1)) .setPrintIdsOnly(true) .build(); when(mockedSession.syncSend(any())).thenReturn(createDocumentReply(DOC_ID_1)); DocumentRetriever documentRetriever = createDocumentRetriever(params); documentRetriever.retrieveDocuments(); assertEquals(DOC_ID_1 + "\n", outContent.toString()); }
static Map<String, String> decodeOpaqueSecrets(Secret secret) { if (secret == null) { return Collections.emptyMap(); } String opaqueIdentities = secret.getData().get("identities.yaml"); String yaml = new String(Base64.getDecoder().decode(opaqueIdentities)); YamlConfigurationReader reader = new YamlConfigurationReader(new StringReader(yaml), new URLConfigurationResourceResolver(null), new Properties(), PropertyReplacer.DEFAULT, NamingStrategy.KEBAB_CASE); Map<String, Object> identities = reader.asMap(); List<Map<String, String>> credentialsList = (List<Map<String, String>>) identities.get("credentials"); Map<String, String> res = new LinkedHashMap<>(identities.size()); for (Map<String, String> credentials : credentialsList) { res.put(credentials.get("username"), credentials.get("password")); } return res; }
@Test public void testKubeSecrets() { Secret secret = new Secret(); secret.setData(Collections.singletonMap("identities.yaml", "Y3JlZGVudGlhbHM6Ci0gdXNlcm5hbWU6IGFkbWluCiAgcGFzc3dvcmQ6IHBhc3N3b3JkCgo=")); Map<String, String> map = Kube.decodeOpaqueSecrets(secret); assertEquals(1, map.size()); Map.Entry<String, String> next = map.entrySet().iterator().next(); assertEquals("admin", next.getKey()); assertEquals("password", next.getValue()); }
public ProjectCleaner purge(DbSession session, String rootUuid, String projectUuid, Configuration projectConfig, Set<String> disabledComponentUuids) { long start = System.currentTimeMillis(); profiler.reset(); periodCleaner.clean(session, rootUuid, projectConfig); PurgeConfiguration configuration = newDefaultPurgeConfiguration(projectConfig, rootUuid, projectUuid, disabledComponentUuids); purgeDao.purge(session, configuration, purgeListener, profiler); session.commit(); logProfiling(start, projectConfig); return this; }
@Test public void no_profiling_when_property_is_false() { settings.setProperty(CoreProperties.PROFILING_LOG_PROPERTY, false); underTest.purge(mock(DbSession.class), "root", "project", settings.asConfig(), emptySet()); verify(profiler, never()).getProfilingResult(anyLong()); assertThat(logTester.getLogs().stream() .map(LogAndArguments::getFormattedMsg) .collect(Collectors.joining())) .doesNotContain("Profiling for purge"); }
protected TransMeta processLinkedTrans( TransMeta transMeta ) { for ( StepMeta stepMeta : transMeta.getSteps() ) { if ( stepMeta.getStepID().equalsIgnoreCase( "TransExecutor" ) ) { TransExecutorMeta tem = (TransExecutorMeta) stepMeta.getStepMetaInterface(); ObjectLocationSpecificationMethod specMethod = tem.getSpecificationMethod(); // If the reference is by filename, change it to Repository By Name. Otherwise it's fine so leave it alone if ( specMethod == ObjectLocationSpecificationMethod.FILENAME ) { tem.setSpecificationMethod( ObjectLocationSpecificationMethod.REPOSITORY_BY_NAME ); String filename = tem.getFileName(); String jobname = filename.substring( filename.lastIndexOf( "/" ) + 1, filename.lastIndexOf( '.' ) ); String directory = filename.substring( 0, filename.lastIndexOf( "/" ) ); tem.setTransName( jobname ); tem.setDirectoryPath( directory ); } } } return transMeta; }
@Test public void testProcessLinkedTransWithFilename() { TransExecutorMeta transExecutorMeta = spy( new TransExecutorMeta() ); transExecutorMeta.setFileName( "/path/to/Transformation2.ktr" ); transExecutorMeta.setSpecificationMethod( ObjectLocationSpecificationMethod.FILENAME ); StepMeta transExecutorStep = mock( StepMeta.class ); when( transExecutorStep.getStepID() ).thenReturn( "TransExecutor" ); when( transExecutorStep.getStepMetaInterface() ).thenReturn( transExecutorMeta ); TransMeta parent = mock( TransMeta.class ); when( parent.getSteps() ).thenReturn( Arrays.asList( transExecutorStep ) ); TransMeta result = transFileListener.processLinkedTrans( parent ); boolean found = false; for ( StepMeta stepMeta : result.getSteps() ) { if ( stepMeta.getStepID().equalsIgnoreCase( "TransExecutor" ) ) { found = true; TransExecutorMeta resultExecMeta = (TransExecutorMeta) stepMeta.getStepMetaInterface(); assertEquals( ObjectLocationSpecificationMethod.REPOSITORY_BY_NAME, resultExecMeta.getSpecificationMethod() ); assertEquals( resultExecMeta.getDirectoryPath(), "/path/to" ); assertEquals( resultExecMeta.getTransName(), "Transformation2" ); } } assertTrue( found ); }
@Override public Collection<String> searchServiceName(String namespaceId, String expr) throws NacosException { String regex = Constants.ANY_PATTERN + expr + Constants.ANY_PATTERN; Collection<String> result = new HashSet<>(); for (Service each : ServiceManager.getInstance().getSingletons(namespaceId)) { String groupedServiceName = each.getGroupedServiceName(); if (groupedServiceName.matches(regex)) { result.add(groupedServiceName); } } return result; }
@Test void testSearchServiceName() throws NacosException { Collection<String> res = serviceOperatorV2.searchServiceName("A", ""); assertEquals(1, res.size()); }
public TargetAssignmentResult build() throws PartitionAssignorException { Map<String, MemberSubscriptionAndAssignmentImpl> memberSpecs = new HashMap<>(); // Prepare the member spec for all members. members.forEach((memberId, member) -> memberSpecs.put(memberId, createMemberSubscriptionAndAssignment( member, targetAssignment.getOrDefault(memberId, Assignment.EMPTY), topicsImage )) ); // Update the member spec if updated or deleted members. updatedMembers.forEach((memberId, updatedMemberOrNull) -> { if (updatedMemberOrNull == null) { memberSpecs.remove(memberId); } else { Assignment assignment = targetAssignment.getOrDefault(memberId, Assignment.EMPTY); // A new static member joins and needs to replace an existing departed one. if (updatedMemberOrNull.instanceId() != null) { String previousMemberId = staticMembers.get(updatedMemberOrNull.instanceId()); if (previousMemberId != null && !previousMemberId.equals(memberId)) { assignment = targetAssignment.getOrDefault(previousMemberId, Assignment.EMPTY); } } memberSpecs.put(memberId, createMemberSubscriptionAndAssignment( updatedMemberOrNull, assignment, topicsImage )); } }); // Prepare the topic metadata. Map<Uuid, TopicMetadata> topicMetadataMap = new HashMap<>(); subscriptionMetadata.forEach((topicName, topicMetadata) -> topicMetadataMap.put( topicMetadata.id(), topicMetadata ) ); // Compute the assignment. GroupAssignment newGroupAssignment = assignor.assign( new GroupSpecImpl( Collections.unmodifiableMap(memberSpecs), subscriptionType, invertedTargetAssignment ), new SubscribedTopicDescriberImpl(topicMetadataMap) ); // Compute delta from previous to new target assignment and create the // relevant records. List<CoordinatorRecord> records = new ArrayList<>(); for (String memberId : memberSpecs.keySet()) { Assignment oldMemberAssignment = targetAssignment.get(memberId); Assignment newMemberAssignment = newMemberAssignment(newGroupAssignment, memberId); if (!newMemberAssignment.equals(oldMemberAssignment)) { // If the member had no assignment or had a different assignment, we // create a record for the new assignment. records.add(targetAssignmentRecordBuilder.build( groupId, memberId, newMemberAssignment.partitions() )); } } // Bump the target assignment epoch. records.add(targetAssignmentEpochRecordBuilder.build(groupId, groupEpoch)); return new TargetAssignmentResult(records, newGroupAssignment.members()); }
@Test public void testNewMember() { TargetAssignmentBuilderTestContext context = new TargetAssignmentBuilderTestContext( "my-group", 20 ); Uuid fooTopicId = context.addTopicMetadata("foo", 6, Collections.emptyMap()); Uuid barTopicId = context.addTopicMetadata("bar", 6, Collections.emptyMap()); context.addGroupMember("member-1", Arrays.asList("foo", "bar", "zar"), mkAssignment( mkTopicAssignment(fooTopicId, 1, 2, 3), mkTopicAssignment(barTopicId, 1, 2, 3) )); context.addGroupMember("member-2", Arrays.asList("foo", "bar", "zar"), mkAssignment( mkTopicAssignment(fooTopicId, 4, 5, 6), mkTopicAssignment(barTopicId, 4, 5, 6) )); context.updateMemberSubscription("member-3", Arrays.asList("foo", "bar", "zar")); context.prepareMemberAssignment("member-1", mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) )); context.prepareMemberAssignment("member-2", mkAssignment( mkTopicAssignment(fooTopicId, 3, 4), mkTopicAssignment(barTopicId, 3, 4) )); context.prepareMemberAssignment("member-3", mkAssignment( mkTopicAssignment(fooTopicId, 5, 6), mkTopicAssignment(barTopicId, 5, 6) )); TargetAssignmentBuilder.TargetAssignmentResult result = context.build(); assertEquals(4, result.records().size()); assertUnorderedListEquals(Arrays.asList( newConsumerGroupTargetAssignmentRecord("my-group", "member-1", mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) )), newConsumerGroupTargetAssignmentRecord("my-group", "member-2", mkAssignment( mkTopicAssignment(fooTopicId, 3, 4), mkTopicAssignment(barTopicId, 3, 4) )), newConsumerGroupTargetAssignmentRecord("my-group", "member-3", mkAssignment( mkTopicAssignment(fooTopicId, 5, 6), mkTopicAssignment(barTopicId, 5, 6) )) ), result.records().subList(0, 3)); assertEquals(newConsumerGroupTargetAssignmentEpochRecord( "my-group", 20 ), result.records().get(3)); Map<String, MemberAssignment> expectedAssignment = new HashMap<>(); expectedAssignment.put("member-1", new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) ))); expectedAssignment.put("member-2", new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4), mkTopicAssignment(barTopicId, 3, 4) ))); expectedAssignment.put("member-3", new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 5, 6), mkTopicAssignment(barTopicId, 5, 6) ))); assertEquals(expectedAssignment, result.targetAssignment()); }
@Override public void loadConfiguration(NacosLoggingProperties loggingProperties) { String location = loggingProperties.getLocation(); configurator.setLoggingProperties(loggingProperties); LoggerContext loggerContext = loadConfigurationOnStart(location); if (hasNoListener(loggerContext)) { addListener(loggerContext, location); } }
@Test void testLoadConfigurationFailure() { assertThrows(IllegalStateException.class, () -> { System.setProperty("nacos.logging.config", "http://localhost"); loggingProperties = new NacosLoggingProperties("classpath:nacos-logback12.xml", System.getProperties()); logbackNacosLoggingAdapter.loadConfiguration(loggingProperties); }); }
@Override public JobState getJobStatus(String project, String region, String jobId) throws IOException { return handleJobState(getJob(project, region, jobId)); }
@Test public void testGetJobStatus() throws IOException { Get get = mock(Get.class); Job job = new Job().setCurrentState(JobState.RUNNING.toString()); when(getLocationJobs(client).get(any(), any(), any()).setView(any())) .thenThrow(new RuntimeException("Server is not responding")) .thenReturn(get); when(get.execute()).thenThrow(new IOException("Connection reset")).thenReturn(job); JobState actual = new FakePipelineLauncher(client).getJobStatus(PROJECT, REGION, JOB_ID); verify(getLocationJobs(client), times(4)) .get(projectCaptor.capture(), regionCaptor.capture(), jobIdCaptor.capture()); assertThat(projectCaptor.getValue()).isEqualTo(PROJECT); assertThat(regionCaptor.getValue()).isEqualTo(REGION); assertThat(jobIdCaptor.getValue()).isEqualTo(JOB_ID); assertThat(actual).isEqualTo(JobState.RUNNING); }
private void fail(final ChannelHandlerContext ctx, int length) { fail(ctx, String.valueOf(length)); }
@Test public void testTooLongLineWithFailFast() throws Exception { EmbeddedChannel ch = new EmbeddedChannel(new LineBasedFrameDecoder(16, false, true)); try { ch.writeInbound(copiedBuffer("12345678901234567", CharsetUtil.US_ASCII)); fail(); } catch (Exception e) { assertThat(e, is(instanceOf(TooLongFrameException.class))); } assertThat(ch.writeInbound(copiedBuffer("890", CharsetUtil.US_ASCII)), is(false)); assertThat(ch.writeInbound(copiedBuffer("123\r\nfirst\r\n", CharsetUtil.US_ASCII)), is(true)); ByteBuf buf = ch.readInbound(); ByteBuf buf2 = copiedBuffer("first\r\n", CharsetUtil.US_ASCII); assertThat(buf, is(buf2)); assertThat(ch.finish(), is(false)); buf.release(); buf2.release(); }
@Override public V get(Object o) { if (o == null) return null; // null keys are not allowed int i = arrayIndexOfKey(o); return i != -1 ? value(i + 1) : null; }
@Test void equalValues() { array[0] = "1"; array[1] = "1"; array[2] = "2"; array[3] = "2"; array[4] = "3"; array[5] = "3"; Map<String, String> map = builder.build(array); assertSize(map, 3); assertBaseCase(map); assertThat(map).containsOnly( entry("1", "1"), entry("2", "2"), entry("3", "3") ); assertThat(map).hasToString( "UnsafeArrayMap{1=1,2=2,3=3}" ); assertThat(map.get("1")).isEqualTo("1"); assertThat(map.get("2")).isEqualTo("2"); assertThat(map.get("3")).isEqualTo("3"); }
@Override public void push(String queueName, String id, long offsetTimeInSecond) { push(queueName, id, 0, offsetTimeInSecond); }
@Test public void testPush() { String queueName = "test-queue"; String id = "abcd-1234-defg-5678"; queueDao.push(queueName, id, 123); assertEquals(1, internalQueue.size()); assertTrue(internalQueue.containsKey(queueName)); assertEquals(1, internalQueue.get(queueName).size()); assertEquals(id, internalQueue.get(queueName).peek()); }
public List<R> scanForClasspathResource(String resourceName, Predicate<String> packageFilter) { requireNonNull(resourceName, "resourceName must not be null"); requireNonNull(packageFilter, "packageFilter must not be null"); List<URI> urisForResource = getUrisForResource(getClassLoader(), resourceName); BiFunction<Path, Path, Resource> createResource = createClasspathResource(resourceName); return findResourcesForUris(urisForResource, DEFAULT_PACKAGE_NAME, packageFilter, createResource); }
@Test void scanForClasspathPackageResource() { String resourceName = "io/cucumber/core/resource"; List<URI> resources = resourceScanner.scanForClasspathResource(resourceName, aPackage -> true); assertThat(resources, containsInAnyOrder( URI.create("classpath:io/cucumber/core/resource/test/resource.txt"), URI.create("classpath:io/cucumber/core/resource/test/other-resource.txt"), URI.create("classpath:io/cucumber/core/resource/test/spaces%20in%20name%20resource.txt"))); }
@Override public int read() throws IOException { byte[] b = new byte[1]; if (read(b, 0, 1) != 1) { return -1; } else { return b[0]; } }
@Test public void testReadEos() throws Exception { DistributedLogManager dlm = mock(DistributedLogManager.class); LogReader reader = mock(LogReader.class); when(dlm.getInputStream(any(DLSN.class))).thenReturn(reader); when(reader.readNext(anyBoolean())).thenThrow(new EndOfStreamException("eos")); byte[] b = new byte[1]; DLInputStream in = new DLInputStream(dlm); assertEquals("Should return 0 when reading an empty eos stream", 0, in.read(b, 0, 1)); assertEquals("Should return -1 when reading an empty eos stream", -1, in.read(b, 0, 1)); }
@Override public void write(int b) throws IOException { if (buffer.length <= bufferIdx) { flushInternalBuffer(); } buffer[bufferIdx] = (byte) b; ++bufferIdx; }
@Test void testFailingPrimaryWriteArrayOffsFail() throws Exception { DuplicatingCheckpointOutputStream duplicatingStream = createDuplicatingStreamWithFailingPrimary(); testFailingPrimaryStream( duplicatingStream, () -> duplicatingStream.write(new byte[512], 20, 130)); }
public static Configuration windows() { return WindowsHolder.WINDOWS; }
@Test public void testDefaultWindowsConfiguration() { Configuration config = Configuration.windows(); assertThat(config.pathType).isEqualTo(PathType.windows()); assertThat(config.roots).containsExactly("C:\\"); assertThat(config.workingDirectory).isEqualTo("C:\\work"); assertThat(config.nameCanonicalNormalization).containsExactly(CASE_FOLD_ASCII); assertThat(config.nameDisplayNormalization).isEmpty(); assertThat(config.pathEqualityUsesCanonicalForm).isTrue(); assertThat(config.blockSize).isEqualTo(8192); assertThat(config.maxSize).isEqualTo(4L * 1024 * 1024 * 1024); assertThat(config.maxCacheSize).isEqualTo(-1); assertThat(config.attributeViews).containsExactly("basic"); assertThat(config.attributeProviders).isEmpty(); assertThat(config.defaultAttributeValues).isEmpty(); assertThat(config.fileTimeSource).isEqualTo(SystemFileTimeSource.INSTANCE); }
@Override public CompletableFuture<MetricsResponse> fetchMetrics(ApplicationId application) { NodeList applicationNodes = nodeRepository.nodes().list().owner(application).state(Node.State.active); Optional<Node> metricsV2Container = applicationNodes.container() .matching(this::expectedUp) .stream() .filter(node -> ! newNode(node)) // Skip newly added nodes, as they may not be reachable .findFirst(); if (metricsV2Container.isEmpty()) { return CompletableFuture.completedFuture(MetricsResponse.empty()); } else { // Collector 'autoscaling' defined in com.yahoo.vespa.model.admin.monitoring.MetricConsumer String url = "http://" + metricsV2Container.get().hostname() + ":" + 4080 + apiPath + "?consumer=autoscaling"; return httpClient.get(url) .thenApply(response -> new MetricsResponse(response, applicationNodes)); } }
@Test public void testMetricsFetch() throws Exception { NodeResources resources = new NodeResources(1, 10, 100, 1); ProvisioningTester tester = new ProvisioningTester.Builder().build(); OrchestratorMock orchestrator = new OrchestratorMock(); MockHttpClient httpClient = new MockHttpClient(); MetricsV2MetricsFetcher fetcher = new MetricsV2MetricsFetcher(tester.nodeRepository(), orchestrator, httpClient); tester.makeReadyNodes(4, resources); // Creates (in order) host-1.yahoo.com, host-2.yahoo.com, host-3.yahoo.com, host-4.yahoo.com tester.clock().advance(Duration.ofMinutes(5)); // Make sure these are not considered new nodes (metrics will not be fetched for them) tester.activateTenantHosts(); ApplicationId application1 = ProvisioningTester.applicationId(); ApplicationId application2 = ProvisioningTester.applicationId(); tester.deploy(application1, Capacity.from(new ClusterResources(2, 1, resources))); // host-1.yahoo.com, host-2.yahoo.com tester.deploy(application2, Capacity.from(new ClusterResources(2, 1, resources))); // host-4.yahoo.com, host-3.yahoo.com orchestrator.suspend(new HostName("host-4.yahoo.com")); { httpClient.cannedResponse = cannedResponseForApplication1; List<Pair<String, NodeMetricSnapshot>> values = new ArrayList<>(fetcher.fetchMetrics(application1).get().nodeMetrics()); assertEquals("http://host-1.yahoo.com:4080/metrics/v2/values?consumer=autoscaling", httpClient.requestsReceived.get(0)); assertEquals(2, values.size()); assertEquals("host-1.yahoo.com", values.get(0).getFirst()); assertEquals(0.162, values.get(0).getSecond().load().cpu(), delta); assertEquals(0.231, values.get(0).getSecond().load().memory(), delta); assertEquals(0.820, values.get(0).getSecond().load().disk(), delta); assertEquals("host-2.yahoo.com", values.get(1).getFirst()); assertEquals(0.0, values.get(1).getSecond().load().cpu(), delta); assertEquals(0.35, values.get(1).getSecond().load().memory(), delta); assertEquals(0.45, values.get(1).getSecond().load().disk(), delta); assertEquals(45.0, values.get(1).getSecond().queryRate(), delta); } { httpClient.cannedResponse = cannedResponseForApplication2; List<Pair<String, NodeMetricSnapshot>> values = new ArrayList<>(fetcher.fetchMetrics(application2).get().nodeMetrics()); assertEquals("http://host-3.yahoo.com:4080/metrics/v2/values?consumer=autoscaling", httpClient.requestsReceived.get(1)); assertEquals(1, values.size()); assertEquals("host-3.yahoo.com", values.get(0).getFirst()); assertEquals(0.10, values.get(0).getSecond().load().cpu(), delta); assertEquals(0.15, values.get(0).getSecond().load().memory(), delta); assertEquals(0.20, values.get(0).getSecond().load().disk(), delta); assertEquals(3, values.get(0).getSecond().generation(), delta); assertFalse(values.get(0).getSecond().inService()); assertTrue(values.get(0).getSecond().stable()); } { // read response 2 when unstable httpClient.cannedResponse = cannedResponseForApplication2; try (Mutex lock = tester.nodeRepository().applications().lock(application1)) { tester.nodeRepository().nodes().write(tester.nodeRepository().nodes().list(Node.State.active).owner(application2) .first().get().retire(tester.clock().instant()), lock); } List<Pair<String, NodeMetricSnapshot>> values = new ArrayList<>(fetcher.fetchMetrics(application2).get().nodeMetrics()); assertFalse(values.get(0).getSecond().stable()); } { httpClient.cannedResponse = cannedResponseForApplication3; List<Pair<String, NodeMetricSnapshot>> values = new ArrayList<>(fetcher.fetchMetrics(application2).get().nodeMetrics()); assertEquals("http://host-3.yahoo.com:4080/metrics/v2/values?consumer=autoscaling", httpClient.requestsReceived.get(1)); assertEquals(1, values.size()); assertEquals("host-3.yahoo.com", values.get(0).getFirst()); assertEquals(0.13, values.get(0).getSecond().load().cpu(), delta); assertEquals(0.9375, values.get(0).getSecond().load().memory(), delta); assertEquals(0.13, values.get(0).getSecond().load().gpu(), delta); assertEquals(0.9375, values.get(0).getSecond().load().gpuMemory(), delta); assertFalse("Unstable because buckets are being merged", values.get(0).getSecond().stable()); } }
public List<Release> findByReleaseIds(Set<Long> releaseIds) { Iterable<Release> releases = releaseRepository.findAllById(releaseIds); if (releases == null) { return Collections.emptyList(); } return Lists.newArrayList(releases); }
@Test public void testFindByReleaseIds() throws Exception { Release someRelease = mock(Release.class); Release anotherRelease = mock(Release.class); long someReleaseId = 1; long anotherReleaseId = 2; List<Release> someReleases = Lists.newArrayList(someRelease, anotherRelease); Set<Long> someReleaseIds = Sets.newHashSet(someReleaseId, anotherReleaseId); when(releaseRepository.findAllById(someReleaseIds)).thenReturn(someReleases); List<Release> result = releaseService.findByReleaseIds(someReleaseIds); assertEquals(someReleases, result); }
public Encoding getEncoding() { return encoding; }
@Test public void testAsciiEncoding() { MetaStringEncoder encoder = new MetaStringEncoder('_', '$'); String testString = "asciiOnly"; MetaString encodedMetaString = encoder.encode(testString); assertNotSame(encodedMetaString.getEncoding(), MetaString.Encoding.UTF_8); assertEquals(encodedMetaString.getEncoding(), MetaString.Encoding.ALL_TO_LOWER_SPECIAL); }
public static ClusterOperatorConfig buildFromMap(Map<String, String> map) { warningsForRemovedEndVars(map); KafkaVersion.Lookup lookup = parseKafkaVersions(map.get(STRIMZI_KAFKA_IMAGES), map.get(STRIMZI_KAFKA_CONNECT_IMAGES), map.get(STRIMZI_KAFKA_MIRROR_MAKER_IMAGES), map.get(STRIMZI_KAFKA_MIRROR_MAKER_2_IMAGES)); return buildFromMap(map, lookup); }
@Test public void testImagePullSecretsThrowsWithInvalidCharacter() { Map<String, String> envVars = new HashMap<>(ClusterOperatorConfigTest.ENV_VARS); envVars.put(ClusterOperatorConfig.IMAGE_PULL_SECRETS.key(), "secret1, secret2 , secret_3 "); assertThrows(InvalidConfigurationException.class, () -> ClusterOperatorConfig.buildFromMap(envVars, KafkaVersionTestUtils.getKafkaVersionLookup()) ); }
public Map<String, Parameter> generateMergedWorkflowParams( WorkflowInstance instance, RunRequest request) { Workflow workflow = instance.getRuntimeWorkflow(); Map<String, ParamDefinition> allParamDefs = new LinkedHashMap<>(); Map<String, ParamDefinition> defaultWorkflowParams = defaultParamManager.getDefaultWorkflowParams(); // merge workflow params for start if (request.isFreshRun()) { // merge default workflow params ParamsMergeHelper.mergeParams( allParamDefs, defaultWorkflowParams, ParamsMergeHelper.MergeContext.workflowCreate(ParamSource.SYSTEM_DEFAULT, request)); // merge defined workflow params if (workflow.getParams() != null) { ParamsMergeHelper.mergeParams( allParamDefs, workflow.getParams(), ParamsMergeHelper.MergeContext.workflowCreate(ParamSource.DEFINITION, request)); } } // merge workflow params from previous instance for restart if (!request.isFreshRun() && instance.getParams() != null) { Map<String, ParamDefinition> previousParamDefs = instance.getParams().entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().toDefinition())); // remove reserved params, which should be injected again by the system. for (String paramName : Constants.RESERVED_PARAM_NAMES) { previousParamDefs.remove(paramName); } ParamsMergeHelper.mergeParams( allParamDefs, previousParamDefs, ParamsMergeHelper.MergeContext.workflowCreate(ParamSource.SYSTEM, false)); } // merge run params if (request.getRunParams() != null) { ParamSource source = getParamSource(request.getInitiator(), request.isFreshRun()); ParamsMergeHelper.mergeParams( allParamDefs, request.getRunParams(), ParamsMergeHelper.MergeContext.workflowCreate(source, request)); } // merge user provided restart run params getUserRestartParam(request) .ifPresent( userRestartParams -> { ParamSource source = getParamSource(request.getInitiator(), request.isFreshRun()); ParamsMergeHelper.mergeParams( allParamDefs, userRestartParams, ParamsMergeHelper.MergeContext.workflowCreate(source, request)); }); // cleanup any placeholder params and convert to params return ParamsMergeHelper.convertToParameters(ParamsMergeHelper.cleanupParams(allParamDefs)); }
@Test public void testWorkflowParamRunParamsUpstreamInitiator() { Map<String, ParamDefinition> runParams = singletonMap("p1", ParamDefinition.buildParamDefinition("p1", "d1")); ParamSource[] expectedSources = new ParamSource[] {ParamSource.FOREACH, ParamSource.SUBWORKFLOW, ParamSource.TEMPLATE}; Initiator.Type[] initiators = new Initiator.Type[] { Initiator.Type.FOREACH, Initiator.Type.SUBWORKFLOW, Initiator.Type.TEMPLATE }; for (int i = 0; i < initiators.length; i++) { RunRequest request = RunRequest.builder() .initiator(UpstreamInitiator.withType(initiators[i])) .currentPolicy(RunPolicy.START_FRESH_NEW_RUN) .runParams(runParams) .build(); Map<String, Parameter> workflowParams = paramsManager.generateMergedWorkflowParams(workflowInstance, request); Assert.assertFalse(workflowParams.isEmpty()); Assert.assertEquals("d1", workflowParams.get("p1").asStringParam().getValue()); Assert.assertEquals(expectedSources[i], workflowParams.get("p1").getSource()); } }
public static BytesInput from(InputStream in, int bytes) { return new StreamBytesInput(in, bytes); }
@Test public void testFromCapacityByteArrayOutputStreamOneSlab() throws IOException { byte[] data = new byte[1000]; RANDOM.nextBytes(data); List<CapacityByteArrayOutputStream> toClose = new ArrayList<>(); Supplier<BytesInput> factory = () -> { CapacityByteArrayOutputStream cbaos = new CapacityByteArrayOutputStream(10, 1000, allocator); toClose.add(cbaos); try { cbaos.write(data); } catch (IOException e) { throw new RuntimeException(e); } return BytesInput.from(cbaos); }; try { validate(data, factory); validateToByteBufferIsInternal(factory); } finally { AutoCloseables.uncheckedClose(toClose); } }
@VisibleForTesting ZonedDateTime parseZoned(final String text, final ZoneId zoneId) { final TemporalAccessor parsed = formatter.parse(text); final ZoneId parsedZone = parsed.query(TemporalQueries.zone()); ZonedDateTime resolved = DEFAULT_ZONED_DATE_TIME.apply( ObjectUtils.defaultIfNull(parsedZone, zoneId)); for (final TemporalField override : ChronoField.values()) { if (parsed.isSupported(override)) { if (!resolved.isSupported(override)) { throw new KsqlException( "Unsupported temporal field in timestamp: " + text + " (" + override + ")"); } final long value = parsed.getLong(override); if (override == ChronoField.DAY_OF_YEAR && value == LEAP_DAY_OF_THE_YEAR) { if (!parsed.isSupported(ChronoField.YEAR)) { throw new KsqlException("Leap day cannot be parsed without supplying the year field"); } // eagerly override year, to avoid mismatch with epoch year, which is not a leap year resolved = resolved.withYear(parsed.get(ChronoField.YEAR)); } resolved = resolved.with(override, value); } } return resolved; }
@Test public void shouldParseFullLocalDateWithTimeZone() { // Given // NOTE: a trailing space is required due to JDK bug, fixed in JDK 9b116 // https://bugs.openjdk.java.net/browse/JDK-8154050 final String format = "yyyy-MM-dd HH O "; final String timestamp = "1605-11-05 10 GMT+3 "; // When final ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, IGNORED); // Then assertThat(ts, is(sameInstant(FIFTH_OF_NOVEMBER.withHour(10).withZoneSameLocal(GMT_3)))); }
public void cloneGroupOffset(final String addr, final String srcGroup, final String destGroup, final String topic, final boolean isOffline, final long timeoutMillis) throws RemotingException, MQClientException, InterruptedException { CloneGroupOffsetRequestHeader requestHeader = new CloneGroupOffsetRequestHeader(); requestHeader.setSrcGroup(srcGroup); requestHeader.setDestGroup(destGroup); requestHeader.setTopic(topic); requestHeader.setOffline(isOffline); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.CLONE_GROUP_OFFSET, requestHeader); RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr), request, timeoutMillis); assert response != null; switch (response.getCode()) { case ResponseCode.SUCCESS: { return; } default: break; } throw new MQClientException(response.getCode(), response.getRemark()); }
@Test public void testCloneGroupOffset() throws RemotingException, InterruptedException, MQClientException { mockInvokeSync(); mqClientAPI.cloneGroupOffset(defaultBrokerAddr, "", "", defaultTopic, false, defaultTimeout); }
public static Version of(int major, int minor) { if (major == UNKNOWN_VERSION && minor == UNKNOWN_VERSION) { return UNKNOWN; } else { return new Version(major, minor); } }
@Test(expected = IllegalArgumentException.class) public void ofMalformed() { Version.of("3,9"); }
@Override public Map<String, Integer> getCounts(UUID jobId) { return counts.computeIfAbsent(jobId, k -> new ConcurrentHashMap<>()); }
@Test public void canAddExistingKeysToCurrentCountsTest() { addItemToJobStoreCounts(ITEM_NAME); addItemToJobStoreCounts(ITEM_NAME); final Map<String, Integer> counts = localJobStore.getCounts(jobId); Truth.assertThat(counts.size()).isEqualTo(1); Truth.assertThat(counts.get(ITEM_NAME)).isEqualTo(2); }
public boolean insertOrReplace(E entry, Predicate<E> entryTest) { AtomicBoolean updated = new AtomicBoolean(false); map.compute(checkNotNull(entry), (k, v) -> { if (v == null || entryTest.test(v)) { updated.set(true); return entry; } return v; }); return updated.get(); }
@Test public void testInsertOrReplace() { ExtendedSet<TestValue> set = new ExtendedSet<>(Maps.newConcurrentMap()); TestValue small = new TestValue("foo", 1); TestValue medium = new TestValue("foo", 2); TestValue large = new TestValue("foo", 3); // input TestValue will replace existing TestValue if its value2() is greater // than existing entry's value2() assertTrue(set.insertOrReplace(small, existing -> existing.value2() < small.value2())); assertTrue(set.insertOrReplace(large, existing -> existing.value2() < large.value2())); assertFalse(set.insertOrReplace(medium, existing -> existing.value2() < medium.value2())); assertTrue(set.contains(small)); assertTrue(set.contains(medium)); assertTrue(set.contains(large)); }
@Udf public String concat(@UdfParameter final String... jsonStrings) { if (jsonStrings == null) { return null; } final List<JsonNode> nodes = new ArrayList<>(jsonStrings.length); boolean allObjects = true; for (final String jsonString : jsonStrings) { if (jsonString == null) { return null; } final JsonNode node = UdfJsonMapper.parseJson(jsonString); if (node.isMissingNode()) { return null; } if (allObjects && !node.isObject()) { allObjects = false; } nodes.add(node); } JsonNode result = nodes.get(0); if (allObjects) { for (int i = 1; i < nodes.size(); i++) { result = concatObjects((ObjectNode) result, (ObjectNode) nodes.get(i)); } } else { for (int i = 1; i < nodes.size(); i++) { result = concatArrays(toArrayNode(result), toArrayNode(nodes.get(i))); } } return UdfJsonMapper.writeValueAsJson(result); }
@Test public void shouldMergeEmptyArrays() { // When: final String result = udf.concat("[]", "[]"); // Then: assertEquals("[]", result); }
@Nullable @Override public Message decode(@Nonnull RawMessage rawMessage) { final byte[] payload = rawMessage.getPayload(); final JsonNode event; try { event = objectMapper.readTree(payload); if (event == null || event.isMissingNode()) { throw new IOException("null result"); } } catch (IOException e) { LOG.error("Couldn't decode raw message {}", rawMessage); return null; } return parseEvent(event); }
@Test public void decodeMessagesHandlesTopbeatMessages() throws Exception { final Message message = codec.decode(messageFromJson("topbeat-system.json")); assertThat(message).isNotNull(); assertThat(message.getSource()).isEqualTo("example.local"); assertThat(message.getTimestamp()).isEqualTo(new DateTime(2016, 4, 1, 0, 0, DateTimeZone.UTC)); assertThat(message.getField("beats_type")).isEqualTo("topbeat"); assertThat(message.getField("topbeat_type")).isEqualTo("system"); }
@Override public int getJDBCMinorVersion() { return 0; }
@Test void assertGetJDBCMinorVersion() { assertThat(metaData.getJDBCMinorVersion(), is(0)); }
ImmutableList<PayloadDefinition> validatePayloads(List<PayloadDefinition> payloads) { for (PayloadDefinition p : payloads) { checkArgument(p.hasName(), "Parsed payload does not have a name."); checkArgument( p.getInterpretationEnvironment() != PayloadGeneratorConfig.InterpretationEnvironment .INTERPRETATION_ENVIRONMENT_UNSPECIFIED, "Parsed payload does not have an interpretation_environment."); checkArgument( p.getExecutionEnvironment() != PayloadGeneratorConfig.ExecutionEnvironment.EXECUTION_ENVIRONMENT_UNSPECIFIED, "Parsed payload does not have an exeuction_environment."); checkArgument( !p.getVulnerabilityTypeList().isEmpty(), "Parsed payload has no entries for vulnerability_type."); checkArgument(p.hasPayloadString(), "Parsed payload does not have a payload_string."); if (p.getUsesCallbackServer().getValue()) { checkArgument( p.getPayloadString().getValue().contains("$TSUNAMI_PAYLOAD_TOKEN_URL"), "Parsed payload uses callback server but $TSUNAMI_PAYLOAD_TOKEN_URL not found in" + " payload_string."); } else { checkArgument( p.getValidationType() != PayloadValidationType.VALIDATION_TYPE_UNSPECIFIED, "Parsed payload has no validation_type and does not use the callback server."); if (p.getValidationType() == PayloadValidationType.VALIDATION_REGEX) { checkArgument( p.hasValidationRegex(), "Parsed payload has no validation_regex but uses PayloadValidationType.REGEX"); } } } return ImmutableList.copyOf(payloads); }
@Test public void validatePayloads_withoutPayloadString_throwsException() throws IOException { PayloadDefinition p = goodCallbackDefinition.clearPayloadString().build(); Throwable thrown = assertThrows( IllegalArgumentException.class, () -> module.validatePayloads(ImmutableList.of(p))); assertThat(thrown).hasMessageThat().contains("payload_string"); }
@Override public SchemaResult getValueSchema( final Optional<String> topicName, final Optional<Integer> schemaId, final FormatInfo expectedFormat, final SerdeFeatures serdeFeatures ) { return getSchema(topicName, schemaId, expectedFormat, serdeFeatures, false); }
@Test public void shouldReturnErrorFromGetValueSchemaIfCanNotConvertToConnectSchema() { // Given: when(schemaTranslator.toColumns(any(), any(), anyBoolean())) .thenThrow(new RuntimeException("it went boom")); // When: final SchemaResult result = supplier .getValueSchema(Optional.of(TOPIC_NAME), Optional.empty(), expectedFormat, SerdeFeatures.of()); // Then: assertThat(result.schemaAndId, is(Optional.empty())); assertThat(result.failureReason.get().getMessage(), containsString( "Unable to verify if the value schema for topic: some-topic is compatible with ksqlDB.")); assertThat(result.failureReason.get().getMessage(), containsString( "it went boom")); assertThat(result.failureReason.get().getMessage(), containsString(AVRO_SCHEMA)); }
@ExceptionHandler(SoapValidationException.class) @ResponseBody public Map<String, String> handleSoapValidationException(SoapValidationException exception) { Map<String,String> errorResponse = new HashMap<>(); errorResponse.put("status", "NOK"); return errorResponse; }
@Test public void handleSoapValidationExceptionShouldReturnStatusNok() { Map<String, String> controllerResponse = controller.handleSoapValidationException(new SoapValidationException("Soap Validation error")); assertEquals("NOK", controllerResponse.get("status")); }
@Override public GoPluginApiResponse submitTo(final String pluginId, String extensionType, final GoPluginApiRequest apiRequest) { return goPluginOSGiFramework.doOn(GoPlugin.class, pluginId, extensionType, (plugin, pluginDescriptor) -> { ensureInitializerInvoked(pluginDescriptor, plugin, extensionType); try { return plugin.handle(apiRequest); } catch (UnhandledRequestTypeException e) { LOGGER.error(e.getMessage()); LOGGER.debug(e.getMessage(), e); throw new RuntimeException(e); } }); }
@Test void shouldSubmitPluginApiRequestToGivenPlugin() throws Exception { String extensionType = "sample-extension"; GoPluginApiRequest request = mock(GoPluginApiRequest.class); GoPluginApiResponse expectedResponse = mock(GoPluginApiResponse.class); final GoPlugin goPlugin = mock(GoPlugin.class); final GoPluginDescriptor descriptor = mock(GoPluginDescriptor.class); when(goPlugin.handle(request)).thenReturn(expectedResponse); ArgumentCaptor<PluginAwareDefaultGoApplicationAccessor> captor = ArgumentCaptor.forClass(PluginAwareDefaultGoApplicationAccessor.class); doNothing().when(goPlugin).initializeGoApplicationAccessor(captor.capture()); doAnswer(invocationOnMock -> { ActionWithReturn<GoPlugin, GoPluginApiResponse> action = (ActionWithReturn<GoPlugin, GoPluginApiResponse>) invocationOnMock.getArguments()[3]; return action.execute(goPlugin, descriptor); }).when(goPluginOSGiFramework).doOn(eq(GoPlugin.class), eq("plugin-id"), eq(extensionType), any(ActionWithReturn.class)); DefaultPluginManager pluginManager = new DefaultPluginManager(monitor, registry, goPluginOSGiFramework, jarChangeListener, pluginRequestProcessorRegistry, systemEnvironment, pluginLoader); GoPluginApiResponse actualResponse = pluginManager.submitTo("plugin-id", extensionType, request); assertThat(actualResponse).isEqualTo(expectedResponse); PluginAwareDefaultGoApplicationAccessor accessor = captor.getValue(); assertThat(accessor.pluginDescriptor()).isEqualTo(descriptor); }
@Override public void prepare() { boolean isCollectorSealed = collectorSealed.compareAndSet(true, false); checkState( isCollectorSealed, "Failed to prepare the collector. Collector needs to be sealed before prepare() is invoked."); }
@Test public void testMultiplePrepareCallsWithoutFinishThrowsException() { futureCollector.prepare(); try { futureCollector.prepare(); Assert.fail("Second invocation of prepare should throw IllegalStateException"); } catch (IllegalStateException ex) { } }
public static String schemaToPdl(DataSchema schema, EncodingStyle encodingStyle) { StringWriter writer = new StringWriter(); SchemaToPdlEncoder encoder = new SchemaToPdlEncoder(writer); encoder.setEncodingStyle(encodingStyle); try { encoder.encode(schema); } catch (IOException e) { throw new IllegalStateException(e); } return writer.toString(); }
@Test public void testEncodeDefaultValueFieldsInSchemaOrder() throws IOException { String inputSchema = String.join("\n", "record A {", "", " b: record B {", " b1: string", "", " c: record C {", " c2: int", " c1: boolean", "", " c3: array[string]", " }", " b2: double", " } = {", " \"b1\" : \"hello\",", " \"b2\" : 0.05,", " \"c\" : {", " \"c1\" : true,", " \"c2\" : 100,", " \"c3\" : [ \"one\", \"two\" ]", " }", " }", "}"); DataSchema schema = TestUtil.dataSchemaFromPdlString(inputSchema); String indentedSchema = SchemaToPdlEncoder.schemaToPdl(schema, SchemaToPdlEncoder.EncodingStyle.INDENTED); assertEquals(String.join("\n", "record A {", "", " b: record B {", " b1: string", "", " c: record C {", " c2: int", " c1: boolean", "", " c3: array[string]", " }", " b2: double", " } = {", " \"b1\" : \"hello\",", " \"c\" : {", " \"c2\" : 100,", " \"c1\" : true,", " \"c3\" : [ \"one\", \"two\" ]", " },", " \"b2\" : 0.05", " }", "}"), indentedSchema); String compactSchema = SchemaToPdlEncoder.schemaToPdl(schema, SchemaToPdlEncoder.EncodingStyle.COMPACT); assertEquals(Stream.of( "record A{", " b:record B{", " b1:string,", " c:record C{", " c2:int,", " c1:boolean,", " c3:array[string]", " }", " b2:double", " }={", " \"b1\":\"hello\",", " \"c\":{", " \"c2\":100,", " \"c1\":true,", " \"c3\":[\"one\",\"two\"]", " },", " \"b2\":0.05", " }", "}") .map(String::trim) .collect(Collectors.joining()), compactSchema); }
@Override public List<String> getKeys() throws IOException { return doOp(new ProviderCallable<List<String>>() { @Override public List<String> call(KMSClientProvider provider) throws IOException { return provider.getKeys(); } }, nextIdx(), true); }
@Test public void testClientRetriesIdempotentOpWithSocketTimeoutExceptionSucceeds() throws Exception { Configuration conf = new Configuration(); conf.setInt( CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 3); final List<String> keys = Arrays.asList("testKey"); KMSClientProvider p1 = mock(KMSClientProvider.class); when(p1.getKeys()) .thenThrow(new SocketTimeoutException("p1")) .thenReturn(keys); KMSClientProvider p2 = mock(KMSClientProvider.class); when(p2.getKeys()).thenThrow(new SocketTimeoutException("p2")); when(p1.getKMSUrl()).thenReturn("p1"); when(p2.getKMSUrl()).thenReturn("p2"); LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider( new KMSClientProvider[] {p1, p2}, 0, conf); List<String> result = kp.getKeys(); assertEquals(keys, result); verify(p1, Mockito.times(2)).getKeys(); verify(p2, Mockito.times(1)).getKeys(); }
public void remove(ConnectorTaskId id) { final ScheduledFuture<?> task = committers.remove(id); if (task == null) return; try (LoggingContext loggingContext = LoggingContext.forTask(id)) { task.cancel(false); if (!task.isDone()) task.get(); } catch (CancellationException e) { // ignore log.trace("Offset commit thread was cancelled by another thread while removing connector task with id: {}", id); } catch (ExecutionException | InterruptedException e) { throw new ConnectException("Unexpected interruption in SourceTaskOffsetCommitter while removing task with id: " + id, e); } }
@Test public void testRemoveNonExistentTask() { assertTrue(committers.isEmpty()); committer.remove(taskId); assertTrue(committers.isEmpty()); }
@Override public String named() { return PluginEnum.SOFA.getName(); }
@Test public void testNamed() { final String result = sofaPlugin.named(); assertEquals(PluginEnum.SOFA.getName(), result); }
@EventListener void startup(StartupEvent event) { if (configuration.getBackgroundJobServer().isEnabled()) { backgroundJobServer.get().start(); } if (configuration.getDashboard().isEnabled()) { dashboardWebServer.get().start(); } }
@Test void onStartOptionalsAreNotToBootstrapIfConfigured() { when(backgroundJobServerConfiguration.isEnabled()).thenReturn(true); when(dashboardConfiguration.isEnabled()).thenReturn(true); jobRunrStarter.startup(null); verify(backgroundJobServer).start(); verify(dashboardWebServer).start(); }
@Override public void verify(byte[] data, byte[] signature, MessageDigest digest) { verify(data, new EcSignature(signature), digest); }
@Test public void shouldValidateSignature() { verify(D, Q, "SHA-256"); }
public static void reloadFailureRecovery(File indexDir) throws IOException { File parentDir = indexDir.getParentFile(); // Recover index directory from segment backup directory if the segment backup directory exists File segmentBackupDir = new File(parentDir, indexDir.getName() + CommonConstants.Segment.SEGMENT_BACKUP_DIR_SUFFIX); if (segmentBackupDir.exists()) { LOGGER .info("Trying to recover index directory: {} from segment backup directory: {}", indexDir, segmentBackupDir); if (indexDir.exists()) { LOGGER.info("Deleting index directory: {}", indexDir); FileUtils.forceDelete(indexDir); } // The renaming operation is atomic, so if a failure happens during failure recovery, we will be left with the // segment backup directory, and can recover from that. Preconditions.checkState(segmentBackupDir.renameTo(indexDir), "Failed to rename segment backup directory: %s to index directory: %s", segmentBackupDir, indexDir); } // Delete segment temporary directory if it exists File segmentTempDir = new File(parentDir, indexDir.getName() + CommonConstants.Segment.SEGMENT_TEMP_DIR_SUFFIX); if (segmentTempDir.exists()) { LOGGER.info("Trying to delete segment temporary directory: {}", segmentTempDir); FileUtils.forceDelete(segmentTempDir); } }
@Test public void testReloadFailureRecovery() throws IOException { String segmentName = "dummySegment"; String indexFileName = "dummyIndex"; File indexDir = new File(TEST_DIR, segmentName); File segmentBackupDir = new File(TEST_DIR, segmentName + CommonConstants.Segment.SEGMENT_BACKUP_DIR_SUFFIX); File segmentTempDir = new File(TEST_DIR, segmentName + CommonConstants.Segment.SEGMENT_TEMP_DIR_SUFFIX); // Only index directory exists (normal case, or failed before the first renaming) Assert.assertTrue(indexDir.mkdir()); FileUtils.touch(new File(indexDir, indexFileName)); LoaderUtils.reloadFailureRecovery(indexDir); Assert.assertTrue(indexDir.exists()); Assert.assertTrue(new File(indexDir, indexFileName).exists()); Assert.assertFalse(segmentBackupDir.exists()); Assert.assertFalse(segmentTempDir.exists()); FileUtils.deleteDirectory(indexDir); // Only segment backup directory exists (failed after the first renaming but before copying happened) Assert.assertTrue(segmentBackupDir.mkdir()); FileUtils.touch(new File(segmentBackupDir, indexFileName)); LoaderUtils.reloadFailureRecovery(indexDir); Assert.assertTrue(indexDir.exists()); Assert.assertTrue(new File(indexDir, indexFileName).exists()); Assert.assertFalse(segmentBackupDir.exists()); Assert.assertFalse(segmentTempDir.exists()); FileUtils.deleteDirectory(indexDir); // Index directory and segment backup directory exist (failed before second renaming) Assert.assertTrue(indexDir.mkdir()); Assert.assertTrue(segmentBackupDir.mkdir()); FileUtils.touch(new File(segmentBackupDir, indexFileName)); LoaderUtils.reloadFailureRecovery(indexDir); Assert.assertTrue(indexDir.exists()); Assert.assertTrue(new File(indexDir, indexFileName).exists()); Assert.assertFalse(segmentBackupDir.exists()); Assert.assertFalse(segmentTempDir.exists()); FileUtils.deleteDirectory(indexDir); // Index directory and segment temporary directory exist (failed after second renaming) Assert.assertTrue(indexDir.mkdir()); FileUtils.touch(new File(indexDir, indexFileName)); Assert.assertTrue(segmentTempDir.mkdir()); LoaderUtils.reloadFailureRecovery(indexDir); Assert.assertTrue(indexDir.exists()); Assert.assertTrue(new File(indexDir, indexFileName).exists()); Assert.assertFalse(segmentBackupDir.exists()); Assert.assertFalse(segmentTempDir.exists()); FileUtils.deleteDirectory(indexDir); }
private void setConsumeEndTime(SegmentZKMetadata segmentZKMetadata, long now) { long maxConsumeTimeMillis = _streamConfig.getFlushThresholdTimeMillis(); _consumeEndTime = segmentZKMetadata.getCreationTime() + maxConsumeTimeMillis; // When we restart a server, the consuming segments retain their creationTime (derived from segment // metadata), but a couple of corner cases can happen: // (1) The server was down for a very long time, and the consuming segment is not yet completed. // (2) The consuming segment was just about to be completed, but the server went down. // In either of these two cases, if a different replica could not complete the segment, it is possible // that we get a value for _consumeEndTime that is in the very near future, or even in the past. In such // cases, we let some minimum consumption happen before we attempt to complete the segment (unless, of course // the max consumption time has been configured to be less than the minimum time we use in this class). long minConsumeTimeMillis = Math.min(maxConsumeTimeMillis, TimeUnit.MILLISECONDS.convert(MINIMUM_CONSUME_TIME_MINUTES, TimeUnit.MINUTES)); if (_consumeEndTime - now < minConsumeTimeMillis) { _consumeEndTime = now + minConsumeTimeMillis; } }
@Test public void testEndCriteriaChecking() throws Exception { // test reaching max row limit try (FakeRealtimeSegmentDataManager segmentDataManager = createFakeSegmentManager()) { segmentDataManager._state.set(segmentDataManager, RealtimeSegmentDataManager.State.INITIAL_CONSUMING); Assert.assertFalse(segmentDataManager.invokeEndCriteriaReached()); segmentDataManager.setNumRowsIndexed(Fixtures.MAX_ROWS_IN_SEGMENT - 1); Assert.assertFalse(segmentDataManager.invokeEndCriteriaReached()); segmentDataManager.setNumRowsIndexed(Fixtures.MAX_ROWS_IN_SEGMENT); Assert.assertTrue(segmentDataManager.invokeEndCriteriaReached()); Assert.assertEquals(segmentDataManager.getStopReason(), SegmentCompletionProtocol.REASON_ROW_LIMIT); } // test reaching max time limit try (FakeRealtimeSegmentDataManager segmentDataManager = createFakeSegmentManager()) { segmentDataManager._state.set(segmentDataManager, RealtimeSegmentDataManager.State.INITIAL_CONSUMING); Assert.assertFalse(segmentDataManager.invokeEndCriteriaReached()); // We should still get false because there is no messages fetched segmentDataManager._timeSupplier.add(Fixtures.MAX_TIME_FOR_SEGMENT_CLOSE_MS + 1); Assert.assertFalse(segmentDataManager.invokeEndCriteriaReached()); // Once there are messages fetched, and the time exceeds the extended hour, we should get true setHasMessagesFetched(segmentDataManager, true); segmentDataManager._timeSupplier.add(TimeUnit.HOURS.toMillis(1)); Assert.assertTrue(segmentDataManager.invokeEndCriteriaReached()); Assert.assertEquals(segmentDataManager.getStopReason(), SegmentCompletionProtocol.REASON_TIME_LIMIT); } // In catching up state, test reaching final offset try (FakeRealtimeSegmentDataManager segmentDataManager = createFakeSegmentManager()) { segmentDataManager._state.set(segmentDataManager, RealtimeSegmentDataManager.State.CATCHING_UP); final long finalOffset = START_OFFSET_VALUE + 100; segmentDataManager.setFinalOffset(finalOffset); segmentDataManager.setCurrentOffset(finalOffset - 1); Assert.assertFalse(segmentDataManager.invokeEndCriteriaReached()); segmentDataManager.setCurrentOffset(finalOffset); Assert.assertTrue(segmentDataManager.invokeEndCriteriaReached()); } // In catching up state, test reaching final offset ignoring time try (FakeRealtimeSegmentDataManager segmentDataManager = createFakeSegmentManager()) { segmentDataManager._timeSupplier.add(Fixtures.MAX_TIME_FOR_SEGMENT_CLOSE_MS); segmentDataManager._state.set(segmentDataManager, RealtimeSegmentDataManager.State.CATCHING_UP); final long finalOffset = START_OFFSET_VALUE + 100; segmentDataManager.setFinalOffset(finalOffset); segmentDataManager.setCurrentOffset(finalOffset - 1); Assert.assertFalse(segmentDataManager.invokeEndCriteriaReached()); segmentDataManager.setCurrentOffset(finalOffset); Assert.assertTrue(segmentDataManager.invokeEndCriteriaReached()); } // When we go from consuming to online state, time and final offset matter. // Case 1. We have reached final offset. try (FakeRealtimeSegmentDataManager segmentDataManager = createFakeSegmentManager()) { segmentDataManager._timeSupplier.add(1); segmentDataManager._state.set(segmentDataManager, RealtimeSegmentDataManager.State.CONSUMING_TO_ONLINE); segmentDataManager.setConsumeEndTime(segmentDataManager._timeSupplier.get() + 10); final long finalOffset = START_OFFSET_VALUE + 100; segmentDataManager.setFinalOffset(finalOffset); segmentDataManager.setCurrentOffset(finalOffset - 1); Assert.assertFalse(segmentDataManager.invokeEndCriteriaReached()); segmentDataManager.setCurrentOffset(finalOffset); Assert.assertTrue(segmentDataManager.invokeEndCriteriaReached()); } // Case 2. We have reached time limit. try (FakeRealtimeSegmentDataManager segmentDataManager = createFakeSegmentManager()) { segmentDataManager._state.set(segmentDataManager, RealtimeSegmentDataManager.State.CONSUMING_TO_ONLINE); final long endTime = segmentDataManager._timeSupplier.get() + 10; segmentDataManager.setConsumeEndTime(endTime); final long finalOffset = START_OFFSET_VALUE + 100; segmentDataManager.setFinalOffset(finalOffset); segmentDataManager.setCurrentOffset(finalOffset - 1); segmentDataManager._timeSupplier.set(endTime - 1); Assert.assertFalse(segmentDataManager.invokeEndCriteriaReached()); segmentDataManager._timeSupplier.set(endTime); Assert.assertTrue(segmentDataManager.invokeEndCriteriaReached()); } }
public EndpointResponse streamQuery( final KsqlSecurityContext securityContext, final KsqlRequest request, final CompletableFuture<Void> connectionClosedFuture, final Optional<Boolean> isInternalRequest, final MetricsCallbackHolder metricsCallbackHolder, final Context context ) { throwIfNotConfigured(); activenessRegistrar.updateLastRequestTime(); final PreparedStatement<?> statement = parseStatement(request); CommandStoreUtil.httpWaitForCommandSequenceNumber( commandQueue, request, commandQueueCatchupTimeout); return handleStatement(securityContext, request, statement, connectionClosedFuture, isInternalRequest, metricsCallbackHolder, context); }
@Test public void shouldUpdateTheLastRequestTime() { /// When: testResource.streamQuery( securityContext, new KsqlRequest(PUSH_QUERY_STRING, Collections.emptyMap(), Collections.emptyMap(), null), new CompletableFuture<>(), Optional.empty(), new MetricsCallbackHolder(), context ); // Then: verify(activenessRegistrar).updateLastRequestTime(); }
public static List<ColumnType> columnTypesFromStrings(final List<String> columnTypes) { return columnTypes.stream().map(RowUtil::columnTypeFromString).collect(Collectors.toList()); }
@Test public void shouldGetColumnTypesFromStrings() { // Given final List<String> stringTypes = ImmutableList.of( "STRING", "INTEGER", "BIGINT", "BOOLEAN", "DOUBLE", "ARRAY<STRING>", "MAP<STRING, STRING>", "DECIMAL(4, 2)", "STRUCT<`F1` STRING, `F2` INTEGER>", "TIMESTAMP", "DATE", "TIME" ); // When final List<ColumnType> columnTypes = RowUtil.columnTypesFromStrings(stringTypes); // Then assertThat( columnTypes.stream() .map(t -> t.getType().toString()) .collect(Collectors.toList()), contains( "STRING", "INTEGER", "BIGINT", "BOOLEAN", "DOUBLE", "ARRAY", "MAP", "DECIMAL", "STRUCT", "TIMESTAMP", "DATE", "TIME" )); }
@Override public <T extends State> T state(StateNamespace namespace, StateTag<T> address) { return workItemState.get(namespace, address, StateContexts.nullContext()); }
@Test public void testOrderedListIsEmptyTrue() throws Exception { StateTag<OrderedListState<String>> addr = StateTags.orderedList("orderedList", StringUtf8Coder.of()); OrderedListState<String> orderedList = underTest.state(NAMESPACE, addr); SettableFuture<Iterable<TimestampedValue<String>>> future = SettableFuture.create(); when(mockReader.orderedListFuture( FULL_ORDERED_LIST_RANGE, key(NAMESPACE, "orderedList"), STATE_FAMILY, StringUtf8Coder.of())) .thenReturn(future); ReadableState<Boolean> result = orderedList.isEmpty().readLater(); Mockito.verify(mockReader) .orderedListFuture( FULL_ORDERED_LIST_RANGE, key(NAMESPACE, "orderedList"), STATE_FAMILY, StringUtf8Coder.of()); waitAndSet(future, Collections.emptyList(), 200); assertThat(result.read(), Matchers.is(true)); }
static <U, V> ImmutableBiMap<U, V> maximumCardinalityBipartiteMatching(Multimap<U, V> graph) { return HopcroftKarp.overBipartiteGraph(graph).perform(); }
@Test public void maximumCardinalityBipartiteMatching_failsWithNullLhs() { ListMultimap<String, String> edges = LinkedListMultimap.create(); edges.put(null, "R1"); try { BiMap<String, String> unused = maximumCardinalityBipartiteMatching(edges); fail("Should have thrown."); } catch (NullPointerException expected) { } }
public COSArray toCOSArray() { COSArray array = new COSArray(); array.add(new COSFloat(single[0])); array.add(new COSFloat(single[1])); array.add(new COSFloat(single[3])); array.add(new COSFloat(single[4])); array.add(new COSFloat(single[6])); array.add(new COSFloat(single[7])); return array; }
@Test void testPdfbox2872() { Matrix m = new Matrix(2, 4, 5, 8, 2, 0); COSArray toCOSArray = m.toCOSArray(); assertEquals(new COSFloat(2), toCOSArray.get(0)); assertEquals(new COSFloat(4), toCOSArray.get(1)); assertEquals(new COSFloat(5), toCOSArray.get(2)); assertEquals(new COSFloat(8), toCOSArray.get(3)); assertEquals(new COSFloat(2), toCOSArray.get(4)); assertEquals(COSFloat.ZERO, toCOSArray.get(5)); }
public static List<ExportPackages.Export> parseExports(String exportAttribute) { ParsingContext p = new ParsingContext(exportAttribute.trim()); List<ExportPackages.Export> exports = parseExportPackage(p); if (exports.isEmpty()) { p.fail("Expected a list of exports"); } else if (p.atEnd() == false) { p.fail("Exports not fully processed"); } return exports; }
@Test void require_that_spaces_between_separators_are_allowed() { List<Export> exports = ExportPackageParser.parseExports("exported.package1 , exported.package2 ; version = \"1.2.3.sample\" "); assertEquals(2, exports.size()); Export export = exports.get(0); assertTrue(export.getPackageNames().contains("exported.package1")); export = exports.get(1); assertTrue(export.getPackageNames().contains("exported.package2")); assertTrue(export.getParameters().contains(versionParameter)); }
@Override @Transactional(rollbackFor = Exception.class) public void updateJobStatus(Long id, Integer status) throws SchedulerException { // 校验 status if (!containsAny(status, JobStatusEnum.NORMAL.getStatus(), JobStatusEnum.STOP.getStatus())) { throw exception(JOB_CHANGE_STATUS_INVALID); } // 校验存在 JobDO job = validateJobExists(id); // 校验是否已经为当前状态 if (job.getStatus().equals(status)) { throw exception(JOB_CHANGE_STATUS_EQUALS); } // 更新 Job 状态 JobDO updateObj = JobDO.builder().id(id).status(status).build(); jobMapper.updateById(updateObj); // 更新状态 Job 到 Quartz 中 if (JobStatusEnum.NORMAL.getStatus().equals(status)) { // 开启 schedulerManager.resumeJob(job.getHandlerName()); } else { // 暂停 schedulerManager.pauseJob(job.getHandlerName()); } }
@Test public void testUpdateJobStatus_normalSuccess() throws SchedulerException { // mock 数据 JobDO job = randomPojo(JobDO.class, o -> o.setStatus(JobStatusEnum.STOP.getStatus())); jobMapper.insert(job); // 调用 jobService.updateJobStatus(job.getId(), JobStatusEnum.NORMAL.getStatus()); // 校验记录的属性是否正确 JobDO dbJob = jobMapper.selectById(job.getId()); assertEquals(JobStatusEnum.NORMAL.getStatus(), dbJob.getStatus()); // 校验调用 verify(schedulerManager).resumeJob(eq(job.getHandlerName())); }
@ExceptionHandler(HttpRequestMethodNotSupportedException.class) protected ShenyuAdminResult handleHttpRequestMethodNotSupportedException(final HttpRequestMethodNotSupportedException e) { LOG.warn("http request method not supported", e); StringBuilder sb = new StringBuilder(); sb.append(e.getMethod()); sb.append( " method is not supported for this request. Supported methods are "); Objects.requireNonNull(e.getSupportedHttpMethods()).forEach(t -> sb.append(t).append(" ")); return ShenyuAdminResult.error(sb.toString()); }
@Test public void testHandleHttpRequestMethodNotSupportedException() { String[] supportedMethod = new String[]{"POST", "GET"}; HttpRequestMethodNotSupportedException exception = new HttpRequestMethodNotSupportedException("POST" + supportedMethod + "request method", Arrays.asList(supportedMethod)); ShenyuAdminResult result = exceptionHandlersUnderTest.handleHttpRequestMethodNotSupportedException(exception); Assertions.assertEquals(result.getCode().intValue(), CommonErrorCode.ERROR); MatcherAssert.assertThat(result.getMessage(), containsString("method is not supported for this request. Supported methods are")); }
public static int readVInt(ByteData arr, long position) { byte b = arr.get(position++); if(b == (byte) 0x80) throw new RuntimeException("Attempting to read null value as int"); int value = b & 0x7F; while ((b & 0x80) != 0) { b = arr.get(position++); value <<= 7; value |= (b & 0x7F); } return value; }
@Test(expected = EOFException.class) public void testReadVIntEmptyHollowBlobInput() throws IOException { HollowBlobInput hbi = HollowBlobInput.serial(BYTES_EMPTY); VarInt.readVInt(hbi); }
public static boolean isValidRule(ParamFlowRule rule) { return rule != null && !StringUtil.isBlank(rule.getResource()) && rule.getCount() >= 0 && rule.getGrade() >= 0 && rule.getParamIdx() != null && rule.getBurstCount() >= 0 && rule.getControlBehavior() >= 0 && rule.getDurationInSec() > 0 && rule.getMaxQueueingTimeMs() >= 0 && checkCluster(rule) & checkRegexField(rule); }
@Test public void testCheckValidHotParamRule() { // Null or empty resource; ParamFlowRule rule1 = new ParamFlowRule(); ParamFlowRule rule2 = new ParamFlowRule(""); assertFalse(ParamFlowRuleUtil.isValidRule(null)); assertFalse(ParamFlowRuleUtil.isValidRule(rule1)); assertFalse(ParamFlowRuleUtil.isValidRule(rule2)); // Invalid threshold count. ParamFlowRule rule3 = new ParamFlowRule("abc") .setCount(-1) .setParamIdx(1); assertFalse(ParamFlowRuleUtil.isValidRule(rule3)); // Parameter index not set or invalid. ParamFlowRule rule4 = new ParamFlowRule("abc") .setCount(1); ParamFlowRule rule5 = new ParamFlowRule("abc") .setCount(1) .setParamIdx(-1); assertFalse(ParamFlowRuleUtil.isValidRule(rule4)); assertTrue(ParamFlowRuleUtil.isValidRule(rule5)); ParamFlowRule goodRule = new ParamFlowRule("abc") .setCount(10) .setParamIdx(1); assertTrue(ParamFlowRuleUtil.isValidRule(goodRule)); }
public static DescriptorDigest fromHash(String hash) throws DigestException { if (!hash.matches(HASH_REGEX)) { throw new DigestException("Invalid hash: " + hash); } return new DescriptorDigest(hash); }
@Test public void testCreateFromHash_fail() { String badHash = "not a valid hash"; try { DescriptorDigest.fromHash(badHash); Assert.fail("Invalid hash should have caused digest creation failure."); } catch (DigestException ex) { Assert.assertEquals("Invalid hash: " + badHash, ex.getMessage()); } }
@Override protected Result[] run(String value) { final Grok grok = grokPatternRegistry.cachedGrokForPattern(this.pattern, this.namedCapturesOnly); // the extractor instance is rebuilt every second anyway final Match match = grok.match(value); final Map<String, Object> matches = match.captureFlattened(); final List<Result> results = new ArrayList<>(matches.size()); for (final Map.Entry<String, Object> entry : matches.entrySet()) { // never add null values to the results, those don't make sense for us if (entry.getValue() != null) { results.add(new Result(entry.getValue(), entry.getKey(), -1, -1)); } } return results.toArray(new Result[0]); }
@Test public void testDateExtraction() { final GrokExtractor extractor = makeExtractor("%{GREEDY:timestamp;date;yyyy-MM-dd'T'HH:mm:ss.SSSX}"); final Extractor.Result[] results = extractor.run("2015-07-31T10:05:36.773Z"); assertEquals("ISO date is parsed", 1, results.length); Object value = results[0].getValue(); assertTrue(value instanceof Instant); DateTime date = new DateTime(((Instant) value).toEpochMilli(), DateTimeZone.UTC); assertEquals(2015, date.getYear()); assertEquals(7, date.getMonthOfYear()); assertEquals(31, date.getDayOfMonth()); assertEquals(10, date.getHourOfDay()); assertEquals(5, date.getMinuteOfHour()); assertEquals(36, date.getSecondOfMinute()); assertEquals(773, date.getMillisOfSecond()); }
public void start() { Preconditions.checkState(state.compareAndSet(State.LATENT, State.STARTED), "Cannot be started more than once"); Preconditions.checkState(!executorService.isShutdown(), "Already started"); Preconditions.checkState(!hasLeadership, "Already has leadership"); client.getConnectionStateListenable().addListener(listener); requeue(); }
@Test public void testInterruption() throws Exception { Timing2 timing = new Timing2(); LeaderSelector selector = null; CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1)); try { client.start(); CountDownLatch exitLatch = new CountDownLatch(1); BlockingQueue<Thread> threadExchange = new ArrayBlockingQueue<>(1); LeaderSelectorListener listener = new LeaderSelectorListenerAdapter() { @Override public void takeLeadership(CuratorFramework client) throws Exception { threadExchange.put(Thread.currentThread()); try { Thread.currentThread().join(); } finally { exitLatch.countDown(); } } }; selector = new LeaderSelector(client, PATH_NAME, listener); selector.failedMutexReleaseCount = new AtomicInteger(); selector.start(); Thread leaderThread = timing.takeFromQueue(threadExchange); leaderThread.interrupt(); assertTrue(timing.awaitLatch(exitLatch)); timing.sleepABit(); // wait for leader selector to clear nodes assertEquals(0, selector.failedMutexReleaseCount.get()); } finally { CloseableUtils.closeQuietly(selector); CloseableUtils.closeQuietly(client); } }
@Override public int compareTo(Resource other) { checkArgument(other != null && getClass() == other.getClass() && name.equals(other.name)); return value.compareTo(other.value); }
@Test void testCompareToFailNull() { // new TestResource(0.0).compareTo(null); assertThatThrownBy(() -> new TestResource(0.0).compareTo(null)) .isInstanceOf(IllegalArgumentException.class); }
@Override public boolean find(Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return true; } try { try { final boolean found; if(containerService.isContainer(file)) { final CloudBlobContainer container = session.getClient().getContainerReference(containerService.getContainer(file).getName()); return container.exists(null, null, context); } if(file.isFile() || file.isPlaceholder()) { try { final CloudBlob blob = session.getClient().getContainerReference(containerService.getContainer(file).getName()) .getBlobReferenceFromServer(containerService.getKey(file)); return blob.exists(null, null, context); } catch(StorageException e) { switch(e.getHttpStatusCode()) { case HttpStatus.SC_NOT_FOUND: if(file.isPlaceholder()) { // Ignore failure and look for common prefix break; } default: throw e; } } } if(log.isDebugEnabled()) { log.debug(String.format("Search for common prefix %s", file)); } // Check for common prefix try { new AzureObjectListService(session, context).list(file, new CancellingListProgressListener()); return true; } catch(ListCanceledException l) { // Found common prefix return true; } } catch(StorageException e) { throw new AzureExceptionMappingService().map("Failure to read attributes of {0}", e, file); } catch(URISyntaxException e) { return false; } } catch(NotfoundException e) { return false; } }
@Test public void testFindCommonPrefix() throws Exception { final Path container = new Path("cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); assertTrue(new AzureFindFeature(session, null).find(container)); final String prefix = new AlphanumericRandomStringService().random(); final Path intermediate = new Path(container, prefix, EnumSet.of(Path.Type.directory)); final Path test = new AzureTouchFeature(session, null).touch(new Path(intermediate, new AsciiRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); assertTrue(new AzureFindFeature(session, null).find(test)); assertFalse(new AzureFindFeature(session, null).find(new Path(test.getAbsolute(), EnumSet.of(Path.Type.directory)))); assertTrue(new AzureFindFeature(session, null).find(intermediate)); // Ignore 404 for placeholder and search for common prefix assertTrue(new AzureFindFeature(session, null).find(new Path(container, prefix, EnumSet.of(Path.Type.directory, Path.Type.placeholder)))); assertTrue(new AzureObjectListService(session, null).list(intermediate, new DisabledListProgressListener()).contains(test)); new AzureDeleteFeature(session, null).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(new AzureFindFeature(session, null).find(test)); assertFalse(new AzureFindFeature(session, null).find(intermediate)); final PathCache cache = new PathCache(1); final Path directory = new Path(container, prefix, EnumSet.of(Path.Type.directory, Path.Type.placeholder)); assertFalse(new CachingFindFeature(session, cache, new AzureFindFeature(session, null)).find(directory)); assertFalse(cache.isCached(directory)); assertFalse(new AzureFindFeature(session, null).find(new Path(container, prefix, EnumSet.of(Path.Type.directory, Path.Type.placeholder)))); }
public Object get(final Object bean) { return get(this.patternParts, bean, false); }
@Test public void beanPathTest1() { final BeanPath pattern = new BeanPath("userInfo.examInfoDict[0].id"); assertEquals("userInfo", pattern.patternParts.get(0)); assertEquals("examInfoDict", pattern.patternParts.get(1)); assertEquals("0", pattern.patternParts.get(2)); assertEquals("id", pattern.patternParts.get(3)); }
public static <K, E> Collector<E, ImmutableSetMultimap.Builder<K, E>, ImmutableSetMultimap<K, E>> unorderedIndex(Function<? super E, K> keyFunction) { return unorderedIndex(keyFunction, Function.identity()); }
@Test public void unorderedIndex_parallel_stream() { SetMultimap<String, String> multimap = HUGE_LIST.parallelStream().collect(unorderedIndex(identity())); assertThat(multimap.keySet()).isEqualTo(HUGE_SET); }
@Override protected DAVClient connect(final ProxyFinder proxy, final HostKeyCallback key, final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException { final HttpClientBuilder configuration = this.getConfiguration(proxy, prompt); return new DAVClient(new HostUrlProvider().withUsername(false).get(host), configuration); }
@Test(expected = ConnectionRefusedException.class) public void testProxyNoConnect() throws Exception { final Host host = new Host(new DAVSSLProtocol(), "svn.cyberduck.io"); final DAVSession session = new DAVSession(host, new DefaultX509TrustManager(), new KeychainX509KeyManager(new DisabledCertificateIdentityCallback(), host, new DisabledCertificateStore())) { }; final LoginConnectionService c = new LoginConnectionService( new DisabledLoginCallback(), new DisabledHostKeyCallback(), new DisabledPasswordStore(), new DisabledProgressListener(), new ProxyFinder() { @Override public Proxy find(final String target) { return new Proxy(Proxy.Type.HTTP, "localhost", 3128); } } ); c.connect(session, new DisabledCancelCallback()); session.close(); }
public static byte[] parseMAC(String value) { final byte[] machineId; final char separator; switch (value.length()) { case 17: separator = value.charAt(2); validateMacSeparator(separator); machineId = new byte[EUI48_MAC_ADDRESS_LENGTH]; break; case 23: separator = value.charAt(2); validateMacSeparator(separator); machineId = new byte[EUI64_MAC_ADDRESS_LENGTH]; break; default: throw new IllegalArgumentException("value is not supported [MAC-48, EUI-48, EUI-64]"); } final int end = machineId.length - 1; int j = 0; for (int i = 0; i < end; ++i, j += 3) { final int sIndex = j + 2; machineId[i] = StringUtil.decodeHexByte(value, j); if (value.charAt(sIndex) != separator) { throw new IllegalArgumentException("expected separator '" + separator + " but got '" + value.charAt(sIndex) + "' at index: " + sIndex); } } machineId[end] = StringUtil.decodeHexByte(value, j); return machineId; }
@Test public void testParseMacEUI48ToEUI64() { // EUI-48 into an EUI-64 assertArrayEquals(new byte[]{0, (byte) 0xaa, 0x11, (byte) 0xff, (byte) 0xfe, (byte) 0xbb, 0x22, (byte) 0xcc}, parseMAC("00-AA-11-FF-FE-BB-22-CC")); assertArrayEquals(new byte[]{0, (byte) 0xaa, 0x11, (byte) 0xff, (byte) 0xfe, (byte) 0xbb, 0x22, (byte) 0xcc}, parseMAC("00:AA:11:FF:FE:BB:22:CC")); }
@VisibleForTesting CompletableFuture<Void> checkPersistencePolicies() { TopicName topicName = TopicName.get(topic); CompletableFuture<Void> future = new CompletableFuture<>(); brokerService.getManagedLedgerConfig(topicName).thenAccept(config -> { // update managed-ledger config and managed-cursor.markDeleteRate this.ledger.setConfig(config); future.complete(null); }).exceptionally(ex -> { log.warn("[{}] Failed to update persistence-policies {}", topic, ex.getMessage()); future.completeExceptionally(ex); return null; }); return future; }
@Test public void testCheckPersistencePolicies() throws Exception { final String myNamespace = "prop/ns"; admin.namespaces().createNamespace(myNamespace, Sets.newHashSet("test")); final String topic = "persistent://" + myNamespace + "/testConfig" + UUID.randomUUID(); conf.setForceDeleteNamespaceAllowed(true); pulsarClient.newProducer().topic(topic).create().close(); RetentionPolicies retentionPolicies = new RetentionPolicies(1, 1); PersistentTopic persistentTopic = spy((PersistentTopic) pulsar.getBrokerService().getTopicIfExists(topic).get().get()); TopicPoliciesService policiesService = spy(pulsar.getTopicPoliciesService()); doReturn(policiesService).when(pulsar).getTopicPoliciesService(); TopicPolicies policies = new TopicPolicies(); policies.setRetentionPolicies(retentionPolicies); doReturn(CompletableFuture.completedFuture(Optional.of(policies))).when(policiesService).getTopicPoliciesAsync(TopicName.get(topic)); persistentTopic.onUpdate(policies); verify(persistentTopic, times(1)).checkPersistencePolicies(); Awaitility.await().untilAsserted(() -> { assertEquals(persistentTopic.getManagedLedger().getConfig().getRetentionSizeInMB(), 1L); assertEquals(persistentTopic.getManagedLedger().getConfig().getRetentionTimeMillis(), TimeUnit.MINUTES.toMillis(1)); }); // throw exception doReturn(CompletableFuture.failedFuture(new RuntimeException())).when(persistentTopic).checkPersistencePolicies(); policies.setRetentionPolicies(new RetentionPolicies(2, 2)); persistentTopic.onUpdate(policies); assertEquals(persistentTopic.getManagedLedger().getConfig().getRetentionSizeInMB(), 1L); assertEquals(persistentTopic.getManagedLedger().getConfig().getRetentionTimeMillis(), TimeUnit.MINUTES.toMillis(1)); }
@Override public void publish(ScannerReportWriter writer) { Optional<String> targetBranch = getTargetBranch(); if (targetBranch.isPresent()) { Profiler profiler = Profiler.create(LOG).startInfo(LOG_MSG); int count = writeChangedLines(scmConfiguration.provider(), writer, targetBranch.get()); LOG.debug("SCM reported changed lines for {} {} in the branch", count, ScannerUtils.pluralize("file", count)); profiler.stopInfo(); } }
@Test public void skip_if_target_branch_is_null() { when(branchConfiguration.targetBranchName()).thenReturn(null); publisher.publish(writer); verifyNoInteractions(inputComponentStore, inputModuleHierarchy, provider); assertNotPublished(); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { if(!session.getClient().setFileType(FTP.BINARY_FILE_TYPE)) { throw new FTPException(session.getClient().getReplyCode(), session.getClient().getReplyString()); } if(status.isAppend()) { session.getClient().setRestartOffset(status.getOffset()); } final InputStream in = new DataConnectionActionExecutor(session).data(new DataConnectionAction<InputStream>() { @Override public InputStream execute() throws BackgroundException { try { return session.getClient().retrieveFileStream(file.getAbsolute()); } catch(IOException e) { throw new FTPExceptionMappingService().map(e); } } }); return new ReadReplyInputStream(in, status); } catch(IOException e) { throw new FTPExceptionMappingService().map("Download {0} failed", e, file); } }
@Test public void testRead() throws Exception { final Path home = new FTPWorkdirService(session).find(); final Path test = new Path(home, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); new FTPTouchFeature(session).touch(test, new TransferStatus()); final int length = 39865; final byte[] content = RandomUtils.nextBytes(length); { final TransferStatus status = new TransferStatus().withLength(content.length); final OutputStream out = new FTPWriteFeature(session).write(test, status, new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(status, status).withLimit(new Long(content.length)).transfer(new ByteArrayInputStream(content), out); out.close(); } { final TransferStatus status = new TransferStatus(); status.setLength(content.length); final InputStream in = new FTPReadFeature(session).read(test, status, new DisabledConnectionCallback()); assertNotNull(in); final ByteArrayOutputStream buffer = new ByteArrayOutputStream(content.length); new StreamCopier(status, status).withLimit(new Long(content.length)).transfer(in, buffer); in.close(); assertArrayEquals(content, buffer.toByteArray()); } new FTPDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }