focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void replay( long offset, long producerId, short producerEpoch, CoordinatorRecord record ) throws RuntimeException { ApiMessageAndVersion key = record.key(); ApiMessageAndVersion value = record.value(); switch (key.version()) { case 0: case 1: offsetMetadataManager.replay( offset, producerId, (OffsetCommitKey) key.message(), (OffsetCommitValue) Utils.messageOrNull(value) ); break; case 2: groupMetadataManager.replay( (GroupMetadataKey) key.message(), (GroupMetadataValue) Utils.messageOrNull(value) ); break; case 3: groupMetadataManager.replay( (ConsumerGroupMetadataKey) key.message(), (ConsumerGroupMetadataValue) Utils.messageOrNull(value) ); break; case 4: groupMetadataManager.replay( (ConsumerGroupPartitionMetadataKey) key.message(), (ConsumerGroupPartitionMetadataValue) Utils.messageOrNull(value) ); break; case 5: groupMetadataManager.replay( (ConsumerGroupMemberMetadataKey) key.message(), (ConsumerGroupMemberMetadataValue) Utils.messageOrNull(value) ); break; case 6: groupMetadataManager.replay( (ConsumerGroupTargetAssignmentMetadataKey) key.message(), (ConsumerGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value) ); break; case 7: groupMetadataManager.replay( (ConsumerGroupTargetAssignmentMemberKey) key.message(), (ConsumerGroupTargetAssignmentMemberValue) Utils.messageOrNull(value) ); break; case 8: groupMetadataManager.replay( (ConsumerGroupCurrentMemberAssignmentKey) key.message(), (ConsumerGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value) ); break; case 9: groupMetadataManager.replay( (ShareGroupPartitionMetadataKey) key.message(), (ShareGroupPartitionMetadataValue) Utils.messageOrNull(value) ); break; case 10: groupMetadataManager.replay( (ShareGroupMemberMetadataKey) key.message(), (ShareGroupMemberMetadataValue) Utils.messageOrNull(value) ); break; case 11: groupMetadataManager.replay( (ShareGroupMetadataKey) key.message(), (ShareGroupMetadataValue) Utils.messageOrNull(value) ); break; case 12: groupMetadataManager.replay( (ShareGroupTargetAssignmentMetadataKey) key.message(), (ShareGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value) ); break; case 13: groupMetadataManager.replay( (ShareGroupTargetAssignmentMemberKey) key.message(), (ShareGroupTargetAssignmentMemberValue) Utils.messageOrNull(value) ); break; case 14: groupMetadataManager.replay( (ShareGroupCurrentMemberAssignmentKey) key.message(), (ShareGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value) ); break; default: throw new IllegalStateException("Received an unknown record type " + key.version() + " in " + record); } }
@Test public void testReplayShareGroupMetadataWithNullValue() { GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class); OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class); CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class); CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class); GroupCoordinatorShard coordinator = new GroupCoordinatorShard( new LogContext(), groupMetadataManager, offsetMetadataManager, Time.SYSTEM, new MockCoordinatorTimer<>(Time.SYSTEM), mock(GroupCoordinatorConfig.class), coordinatorMetrics, metricsShard ); ShareGroupMetadataKey key = new ShareGroupMetadataKey(); coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, new CoordinatorRecord( new ApiMessageAndVersion(key, (short) 11), null )); verify(groupMetadataManager, times(1)).replay(key, null); }
@Override public Set<EntityExcerpt> listEntityExcerpts() { return grokPatternService.loadAll().stream() .map(this::createExcerpt) .collect(Collectors.toSet()); }
@Test public void listEntityExcerpts() throws ValidationException { grokPatternService.save(GrokPattern.create("Test1", "[a-z]+")); grokPatternService.save(GrokPattern.create("Test2", "[a-z]+")); final EntityExcerpt expectedEntityExcerpt1 = EntityExcerpt.builder() .id(ModelId.of("1")) .type(ModelTypes.GROK_PATTERN_V1) .title("Test1") .build(); final EntityExcerpt expectedEntityExcerpt2 = EntityExcerpt.builder() .id(ModelId.of("2")) .type(ModelTypes.GROK_PATTERN_V1) .title("Test2") .build(); final Set<EntityExcerpt> entityExcerpts = facade.listEntityExcerpts(); assertThat(entityExcerpts) .hasSize(2) .contains(expectedEntityExcerpt1, expectedEntityExcerpt2); }
@Override public Set<RuleDescriptionSectionDto> generateSections(RulesDefinition.Rule rule) { return getDescriptionInHtml(rule) .map(this::generateSections) .orElse(emptySet()); }
@Test public void parse_md_rule_description() { String ruleDescription = "This is the custom rule description"; String exceptionsContent = "This the exceptions section content"; String askContent = "This is the ask section content"; String recommendedContent = "This is the recommended section content"; when(rule.markdownDescription()).thenReturn(ruleDescription + "\n" + "== Exceptions" + "\n" + exceptionsContent + "\n" + "== Ask Yourself Whether" + "\n" + askContent + "\n" + "== Recommended Secure Coding Practices" + "\n" + recommendedContent + "\n"); Set<RuleDescriptionSectionDto> results = generator.generateSections(rule); Map<String, String> sectionKeyToContent = results.stream().collect(toMap(RuleDescriptionSectionDto::getKey, RuleDescriptionSectionDto::getContent)); assertThat(sectionKeyToContent).hasSize(4) .containsEntry(DEFAULT_SECTION_KEY, Markdown.convertToHtml(rule.markdownDescription())) .containsEntry(ROOT_CAUSE_SECTION_KEY, ruleDescription + "<br/>" + "<h2>Exceptions</h2>" + exceptionsContent + "<br/>") .containsEntry(ASSESS_THE_PROBLEM_SECTION_KEY,"<h2>Ask Yourself Whether</h2>" + askContent + "<br/>") .containsEntry(HOW_TO_FIX_SECTION_KEY, "<h2>Recommended Secure Coding Practices</h2>" + recommendedContent + "<br/>"); }
@Override public int read() throws IOException { checkClosed(); if (pointer >= this.size) { return -1; } if (currentBufferPointer >= chunkSize) { if (bufferListIndex >= bufferListMaxIndex) { return -1; } else { currentBuffer = bufferList.get(++bufferListIndex); currentBufferPointer = 0; } } pointer++; return currentBuffer.get(currentBufferPointer++) & 0xff; }
@Test void testPDFBOX5764() throws IOException { int bufferSize = 4096; int limit = 2048; ByteBuffer buffer = ByteBuffer.wrap(new byte[bufferSize]); buffer.limit(limit); try (RandomAccessRead rar = new RandomAccessReadBuffer(buffer)) { byte[] buf = new byte[bufferSize]; int bytesRead = rar.read(buf); assertEquals(limit, bytesRead); } }
public boolean eval(ContentFile<?> file) { // TODO: detect the case where a column is missing from the file using file's max field id. return new MetricsEvalVisitor().eval(file); }
@Test public void testIntegerNotEqRewritten() { boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(equal("id", INT_MIN_VALUE - 25))).eval(FILE); assertThat(shouldRead).as("Should read: id below lower bound").isTrue(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(equal("id", INT_MIN_VALUE - 1))).eval(FILE); assertThat(shouldRead).as("Should read: id below lower bound").isTrue(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(equal("id", INT_MIN_VALUE))).eval(FILE); assertThat(shouldRead).as("Should read: id equal to lower bound").isTrue(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(equal("id", INT_MAX_VALUE - 4))).eval(FILE); assertThat(shouldRead).as("Should read: id between lower and upper bounds").isTrue(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(equal("id", INT_MAX_VALUE))).eval(FILE); assertThat(shouldRead).as("Should read: id equal to upper bound").isTrue(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(equal("id", INT_MAX_VALUE + 1))).eval(FILE); assertThat(shouldRead).as("Should read: id above upper bound").isTrue(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(equal("id", INT_MAX_VALUE + 6))).eval(FILE); assertThat(shouldRead).as("Should read: id above upper bound").isTrue(); }
@Override public boolean isWarProject() { String packaging = project.getPackaging(); return "war".equals(packaging) || "gwt-app".equals(packaging); }
@Test public void testIsWarProject_jarPackagingIsNotWar() { when(mockMavenProject.getPackaging()).thenReturn("jar"); assertThat(mavenProjectProperties.isWarProject()).isFalse(); }
@Override public CompletableFuture<Acknowledge> notifyNewBlockedNodes(Collection<BlockedNode> newNodes) { blocklistHandler.addNewBlockedNodes(newNodes); return CompletableFuture.completedFuture(Acknowledge.get()); }
@Test void testUnblockResourcesWillTriggerResourceRequirementsCheck() throws Exception { final CompletableFuture<Void> triggerRequirementsCheckFuture = new CompletableFuture<>(); final SlotManager slotManager = new TestingSlotManagerBuilder() .setTriggerRequirementsCheckConsumer( triggerRequirementsCheckFuture::complete) .createSlotManager(); resourceManager = new ResourceManagerBuilder() .withSlotManager(slotManager) .withBlocklistHandlerFactory( new DefaultBlocklistHandler.Factory(Duration.ofMillis(100L))) .buildAndStart(); final ResourceManagerGateway resourceManagerGateway = resourceManager.getSelfGateway(ResourceManagerGateway.class); resourceManagerGateway.notifyNewBlockedNodes( Collections.singleton( new BlockedNode("node", "Test cause", System.currentTimeMillis()))); triggerRequirementsCheckFuture.get(); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { return this.list(directory, listener, String.valueOf(Path.DELIMITER)); }
@Test public void testListPlaceholderTilde() throws Exception { final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final S3AccessControlListFeature acl = new S3AccessControlListFeature(session); final Path placeholderTildeEnd = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir( new Path(container, String.format("%s~", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory)), new TransferStatus()); assertTrue(new S3ObjectListService(session, acl).list(container, new DisabledListProgressListener()).contains(placeholderTildeEnd)); final Path placeholderTildeStart = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir( new Path(container, String.format("~%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory)), new TransferStatus()); assertTrue(new S3ObjectListService(session, acl).list(container, new DisabledListProgressListener()).contains(placeholderTildeStart)); assertTrue(new S3ObjectListService(session, acl).list(placeholderTildeEnd, new DisabledListProgressListener()).isEmpty()); assertTrue(new S3ObjectListService(session, acl).list(placeholderTildeStart, new DisabledListProgressListener()).isEmpty()); new S3DefaultDeleteFeature(session).delete(Arrays.asList(placeholderTildeEnd, placeholderTildeStart), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@PUT @Path("{id}") @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) public Response updateNetwork(@PathParam("id") String id, InputStream input) throws IOException { log.trace(String.format(MESSAGE, "UPDATE " + id)); String inputStr = IOUtils.toString(input, REST_UTF8); if (!haService.isActive() && !DEFAULT_ACTIVE_IP_ADDRESS.equals(haService.getActiveIp())) { return syncPut(haService, NETWORKS, id, inputStr); } final NeutronNetwork net = (NeutronNetwork) jsonToModelEntity(inputStr, NeutronNetwork.class); adminService.updateNetwork(net); return status(Response.Status.OK).build(); }
@Test public void testUpdateNetworkWithUpdatingOperation() { mockOpenstackNetworkAdminService.updateNetwork(anyObject()); replay(mockOpenstackNetworkAdminService); expect(mockOpenstackHaService.isActive()).andReturn(true).anyTimes(); replay(mockOpenstackHaService); final WebTarget wt = target(); InputStream jsonStream = OpenstackNetworkWebResourceTest.class .getResourceAsStream("openstack-network.json"); Response response = wt.path(PATH + "/396f12f8-521e-4b91-8e21-2e003500433a") .request(MediaType.APPLICATION_JSON_TYPE) .put(Entity.json(jsonStream)); final int status = response.getStatus(); assertThat(status, is(200)); verify(mockOpenstackNetworkAdminService); }
public Properties getProperties() { return properties; }
@Test public void testHibernateTypesOverrideProperties() { assertEquals("ghi", Configuration.INSTANCE.getProperties().getProperty("hibernate.types.def")); }
public static void rethrowIOException(Throwable cause) throws IOException { if (cause instanceof IOException) { throw (IOException) cause; } else if (cause instanceof RuntimeException) { throw (RuntimeException) cause; } else if (cause instanceof Error) { throw (Error) cause; } else { throw new IOException(cause.getMessage(), cause); } }
@Test public void testRethrowErrorAsIOException() throws IOException { Error error = new Error("test"); try { rethrowIOException(error); fail("Should rethrow Error"); } catch (Error e) { assertSame(error, e); } }
public static <T> T toObj(byte[] json, Class<T> cls) { try { return mapper.readValue(json, cls); } catch (Exception e) { throw new NacosDeserializationException(cls, e); } }
@Test void tesToObjForTypeWithException() { assertThrows(NacosDeserializationException.class, () -> { JacksonUtils.toObj("aaa", TypeUtils.parameterize(JsonNode.class)); }); }
@Override public WhitelistedSite update(WhitelistedSite oldWhitelistedSite, WhitelistedSite whitelistedSite) { if (oldWhitelistedSite == null || whitelistedSite == null) { throw new IllegalArgumentException("Neither the old or new sites may be null"); } return repository.update(oldWhitelistedSite, whitelistedSite); }
@Test public void update_success() { WhitelistedSite oldSite = Mockito.mock(WhitelistedSite.class); WhitelistedSite newSite = Mockito.mock(WhitelistedSite.class); service.update(oldSite, newSite); Mockito.verify(repository).update(oldSite, newSite); }
public boolean writable(final SelectableChannel channel) { return writable((Object) channel); }
@Test(timeout = 5000) public void testWritable() { ZContext ctx = new ZContext(); ZPoller poller = new ZPoller(ctx); try { Socket socket = ctx.createSocket(SocketType.XPUB); poller.register(socket, ZPoller.OUT); boolean rc = poller.writable(socket); assertThat(rc, is(false)); rc = poller.isWritable(socket); assertThat(rc, is(false)); rc = poller.pollout(socket); assertThat(rc, is(false)); } finally { poller.close(); ctx.close(); } }
public void updateSchema( PartitionSchema schema ) { if ( schema != null && schema.getName() != null ) { stepMeta.getStepPartitioningMeta().setPartitionSchema( schema ); } }
@Test public void metaIsNotUpdatedWithNameless() { PartitionSchema schema = new PartitionSchema( null, Collections.<String>emptyList() ); StepPartitioningMeta meta = mock( StepPartitioningMeta.class ); when( stepMeta.getStepPartitioningMeta() ).thenReturn( meta ); settings.updateSchema( null ); verify( meta, never() ).setPartitionSchema( any( PartitionSchema.class ) ); }
@Override void toPdf() throws DocumentException { final List<CounterError> errors = counter.getErrors(); if (errors.isEmpty()) { addToDocument(new Phrase(getString("Aucune_erreur"), normalFont)); } else { writeErrors(errors); } }
@Test public void testCounterError() throws IOException { final Counter errorCounter = new Counter(Counter.ERROR_COUNTER_NAME, null); final Collector collector = new Collector("test", Collections.singletonList(errorCounter)); final JavaInformations javaInformations = new JavaInformations(null, true); final ByteArrayOutputStream output = new ByteArrayOutputStream(); final PdfReport pdfReport = new PdfReport(collector, false, Collections.singletonList(javaInformations), Period.TOUT, output); while (errorCounter.getErrorsCount() < Counter.MAX_ERRORS_COUNT) { errorCounter.addErrors(Collections.singletonList(new CounterError("erreur", null))); } pdfReport.toPdf(); assertNotEmptyAndClear(output); final HttpServletRequest httpRequest = createNiceMock(HttpServletRequest.class); expect(httpRequest.getAttribute(CounterError.REQUEST_KEY)).andReturn("/test GET"); expect(httpRequest.getRemoteUser()).andReturn("me"); replay(httpRequest); CounterError.bindRequest(httpRequest); errorCounter.addErrors(Collections.singletonList(new CounterError("with request", null))); CounterError.unbindRequest(); verify(httpRequest); final PdfReport pdfReport2 = new PdfReport(collector, false, Collections.singletonList(javaInformations), Period.TOUT, output); pdfReport2.toPdf(); assertNotEmptyAndClear(output); }
public static String lastElement(List<String> strings) { checkArgument(!strings.isEmpty(), "empty list"); return strings.get(strings.size() - 1); }
@Test public void testLastElementSingle() { assertEquals("first", lastElement(l("first"))); }
@Override public void updateIndices(SegmentDirectory.Writer segmentWriter) throws Exception { Map<String, List<Operation>> columnOperationsMap = computeOperations(segmentWriter); if (columnOperationsMap.isEmpty()) { return; } for (Map.Entry<String, List<Operation>> entry : columnOperationsMap.entrySet()) { String column = entry.getKey(); List<Operation> operations = entry.getValue(); for (Operation operation : operations) { switch (operation) { case DISABLE_FORWARD_INDEX: // Deletion of the forward index will be handled outside the index handler to ensure that other index // handlers that need the forward index to construct their own indexes will have it available. _tmpForwardIndexColumns.add(column); break; case ENABLE_FORWARD_INDEX: ColumnMetadata columnMetadata = createForwardIndexIfNeeded(segmentWriter, column, false); if (columnMetadata.hasDictionary()) { if (!segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException(String.format( "Dictionary should still exist after rebuilding forward index for dictionary column: %s", column)); } } else { if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after rebuilding forward index for raw column: %s", column)); } } break; case DISABLE_DICTIONARY: Set<String> newForwardIndexDisabledColumns = FieldIndexConfigsUtil.columnsWithIndexDisabled(_fieldIndexConfigs.keySet(), StandardIndexes.forward(), _fieldIndexConfigs); if (newForwardIndexDisabledColumns.contains(column)) { removeDictionaryFromForwardIndexDisabledColumn(column, segmentWriter); if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after disabling dictionary for column: %s", column)); } } else { disableDictionaryAndCreateRawForwardIndex(column, segmentWriter); } break; case ENABLE_DICTIONARY: createDictBasedForwardIndex(column, segmentWriter); if (!segmentWriter.hasIndexFor(column, StandardIndexes.forward())) { throw new IllegalStateException(String.format("Forward index was not created for column: %s", column)); } break; case CHANGE_INDEX_COMPRESSION_TYPE: rewriteForwardIndexForCompressionChange(column, segmentWriter); break; default: throw new IllegalStateException("Unsupported operation for column " + column); } } } }
@Test public void testDisableDictionaryForMultipleColumns() throws Exception { IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig); SegmentMetadataImpl existingSegmentMetadata = new SegmentMetadataImpl(_segmentDirectory); SegmentDirectory segmentLocalFSDirectory = new SegmentLocalFSDirectory(_segmentDirectory, existingSegmentMetadata, ReadMode.mmap); SegmentDirectory.Writer writer = segmentLocalFSDirectory.createWriter(); // Column 1 Random rand = new Random(); int randomIdx = rand.nextInt(DICT_ENABLED_COLUMNS_WITH_FORWARD_INDEX.size()); String column1 = DICT_ENABLED_COLUMNS_WITH_FORWARD_INDEX.get(randomIdx); indexLoadingConfig.addNoDictionaryColumns(column1); // Column 2 randomIdx = rand.nextInt(DICT_ENABLED_COLUMNS_WITH_FORWARD_INDEX.size()); String column2 = DICT_ENABLED_COLUMNS_WITH_FORWARD_INDEX.get(randomIdx); indexLoadingConfig.addNoDictionaryColumns(column2); ForwardIndexHandler fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema); fwdIndexHandler.updateIndices(writer); fwdIndexHandler.postUpdateIndicesCleanup(writer); segmentLocalFSDirectory.close(); // Column1 validation. ColumnMetadata metadata = existingSegmentMetadata.getColumnMetadataFor(column1); testIndexExists(column1, StandardIndexes.forward()); validateIndexMap(column1, false, false); // All the columns are dimensions. So default compression type is LZ4. validateForwardIndex(column1, CompressionCodec.LZ4, metadata.isSorted()); // In column metadata, nothing other than hasDictionary and dictionaryElementSize should change. validateMetadataProperties(column1, false, 0, metadata.getCardinality(), metadata.getTotalDocs(), metadata.getDataType(), metadata.getFieldType(), metadata.isSorted(), metadata.isSingleValue(), metadata.getMaxNumberOfMultiValues(), metadata.getTotalNumberOfEntries(), metadata.isAutoGenerated(), metadata.getMinValue(), metadata.getMaxValue(), false); // Column2 validation. metadata = existingSegmentMetadata.getColumnMetadataFor(column2); testIndexExists(column2, StandardIndexes.forward()); validateIndexMap(column2, false, false); // All the columns are dimensions. So default compression type is LZ4. validateForwardIndex(column2, CompressionCodec.LZ4, metadata.isSorted()); // In column metadata, nothing other than hasDictionary and dictionaryElementSize should change. validateMetadataProperties(column2, false, 0, metadata.getCardinality(), metadata.getTotalDocs(), metadata.getDataType(), metadata.getFieldType(), metadata.isSorted(), metadata.isSingleValue(), metadata.getMaxNumberOfMultiValues(), metadata.getTotalNumberOfEntries(), metadata.isAutoGenerated(), metadata.getMinValue(), metadata.getMaxValue(), false); }
@Override public <R> R eval(Mode mode, String luaScript, ReturnType returnType) { return eval(mode, luaScript, returnType, Collections.emptyList()); }
@Test public void testEval() { RScript script = redisson.getScript(StringCodec.INSTANCE); List<Object> res = script.eval(RScript.Mode.READ_ONLY, "return {'1','2','3.3333','foo',nil,'bar'}", RScript.ReturnType.MULTI, Collections.emptyList()); assertThat(res).containsExactly("1", "2", "3.3333", "foo"); }
@Override public DescriptiveUrlBag toUrl(final Path file) { final DescriptiveUrlBag list = new DescriptiveUrlBag(); if(new HostPreferences(session.getHost()).getBoolean("s3.bucket.virtualhost.disable")) { list.addAll(new DefaultUrlProvider(session.getHost()).toUrl(file)); } else { list.add(this.toUrl(file, session.getHost().getProtocol().getScheme(), session.getHost().getPort())); list.add(this.toUrl(file, Scheme.http, 80)); if(StringUtils.isNotBlank(session.getHost().getWebURL())) { // Only include when custom domain is configured list.addAll(new HostWebUrlProvider(session.getHost()).toUrl(file)); } } if(file.isFile()) { if(!session.getHost().getCredentials().isAnonymousLogin()) { // X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less // than 604800 seconds // In one hour list.add(this.toSignedUrl(file, (int) TimeUnit.HOURS.toSeconds(1))); // Default signed URL expiring in 24 hours. list.add(this.toSignedUrl(file, (int) TimeUnit.SECONDS.toSeconds( new HostPreferences(session.getHost()).getInteger("s3.url.expire.seconds")))); // 1 Week list.add(this.toSignedUrl(file, (int) TimeUnit.DAYS.toSeconds(7))); switch(session.getSignatureVersion()) { case AWS2: // 1 Month list.add(this.toSignedUrl(file, (int) TimeUnit.DAYS.toSeconds(30))); // 1 Year list.add(this.toSignedUrl(file, (int) TimeUnit.DAYS.toSeconds(365))); break; case AWS4HMACSHA256: break; } } } // AWS services require specifying an Amazon S3 bucket using S3://bucket list.add(new DescriptiveUrl(URI.create(String.format("s3://%s%s", containerService.getContainer(file).getName(), file.isRoot() ? Path.DELIMITER : containerService.isContainer(file) ? Path.DELIMITER : String.format("/%s", URIEncoder.encode(containerService.getKey(file))))), DescriptiveUrl.Type.provider, MessageFormat.format(LocaleFactory.localizedString("{0} URL"), "S3"))); // Filter by matching container name final Optional<Set<Distribution>> filtered = distributions.entrySet().stream().filter(entry -> new SimplePathPredicate(containerService.getContainer(file)).test(entry.getKey())) .map(Map.Entry::getValue).findFirst(); if(filtered.isPresent()) { // Add CloudFront distributions for(Distribution distribution : filtered.get()) { list.addAll(new DistributionUrlProvider(distribution).toUrl(file)); } } return list; }
@Test public void testToSignedUrlThirdparty() throws Exception { final S3Session session = new S3Session(new Host(new S3Protocol(), "s.greenqloud.com", new Credentials("k", "s"))) { @Override public RequestEntityRestStorageService getClient() { try { return this.connect(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); } catch(BackgroundException e) { fail(); throw new RuntimeException(e); } } }; final S3UrlProvider provider = new S3UrlProvider(session, Collections.emptyMap(), new DisabledPasswordStore() { @Override public String findLoginPassword(final Host bookmark) { return "k"; } }); assertNotNull( provider.toUrl(new Path("/test-eu-west-1-cyberduck/test", EnumSet.of(Path.Type.file))).find(DescriptiveUrl.Type.signed) ); }
@Override public Object merge(T mergingValue, T existingValue) { if (existingValue == null) { return null; } return existingValue.getRawValue(); }
@Test @SuppressWarnings("ConstantConditions") public void merge_mergingNull() { MapMergeTypes existing = mergingValueWithGivenValue(EXISTING); MapMergeTypes merging = null; assertEquals(EXISTING, mergePolicy.merge(merging, existing)); }
public static void validateSourceConfig(SourceConfig sourceConfig, ValidatableFunctionPackage sourceFunction) { try { ConnectorDefinition defn = sourceFunction.getFunctionMetaData(ConnectorDefinition.class); if (defn != null && defn.getSourceConfigClass() != null) { Class configClass = Class.forName(defn.getSourceConfigClass(), true, sourceFunction.getClassLoader()); validateSourceConfig(sourceConfig, configClass); } } catch (ClassNotFoundException e) { throw new IllegalArgumentException("Could not find source config class"); } }
@Test public void testValidateConfig() { SourceConfig sourceConfig = createSourceConfig(); // Good config sourceConfig.getConfigs().put("configParameter", "Test"); SourceConfigUtils.validateSourceConfig(sourceConfig, SourceConfigUtilsTest.TestSourceConfig.class); // Bad config sourceConfig.getConfigs().put("configParameter", null); Exception e = expectThrows(IllegalArgumentException.class, () -> SourceConfigUtils.validateSourceConfig(sourceConfig, SourceConfigUtilsTest.TestSourceConfig.class)); assertTrue(e.getMessage().contains("Could not validate source config: Field 'configParameter' cannot be null!")); }
public static Stream<Vertex> depthFirst(Graph g) { return depthFirst(g.getRoots()); }
@Test public void testDFSVertex() { DepthFirst.depthFirst(g.getRoots()).forEach(v -> visitCount.incrementAndGet()); assertEquals("It should visit each node once", visitCount.get(), 3); }
static void handleUpgrade(Namespace namespace, Admin adminClient) throws TerseException { handleUpgradeOrDowngrade("upgrade", namespace, adminClient, FeatureUpdate.UpgradeType.UPGRADE); }
@Test public void testHandleUpgrade() { Map<String, Object> namespace = new HashMap<>(); namespace.put("metadata", "3.3-IV1"); namespace.put("feature", Collections.singletonList("foo.bar=6")); namespace.put("dry_run", false); String upgradeOutput = ToolsTestUtils.captureStandardOut(() -> { Throwable t = assertThrows(TerseException.class, () -> FeatureCommand.handleUpgrade(new Namespace(namespace), buildAdminClient())); assertTrue(t.getMessage().contains("1 out of 2 operation(s) failed.")); }); assertEquals(format("foo.bar was upgraded to 6.%n" + "Could not upgrade metadata.version to 5. Can't upgrade to lower version."), upgradeOutput); }
protected void initializePipeline(DeviceId deviceId) { // for inbound table transition connectTables(deviceId, Constants.STAT_INBOUND_TABLE, Constants.VTAP_INBOUND_TABLE); connectTables(deviceId, Constants.VTAP_INBOUND_TABLE, Constants.DHCP_TABLE); // for DHCP and vTag table transition connectTables(deviceId, Constants.DHCP_TABLE, Constants.VTAG_TABLE); // for vTag and ARP table transition connectTables(deviceId, Constants.VTAG_TABLE, Constants.ARP_TABLE); // for ARP and ACL table transition connectTables(deviceId, Constants.ARP_TABLE, Constants.ACL_INGRESS_TABLE); // for ACL and JUMP table transition connectTables(deviceId, Constants.ACL_EGRESS_TABLE, Constants.JUMP_TABLE); // for JUMP table transition // we need JUMP table for bypassing routing table which contains large // amount of flow rules which might cause performance degradation during // table lookup setupJumpTable(deviceId); // for setting up default FLAT table behavior which is NORMAL setupFlatTable(deviceId); // for outbound table transition connectTables(deviceId, Constants.STAT_OUTBOUND_TABLE, Constants.VTAP_OUTBOUND_TABLE); connectTables(deviceId, Constants.VTAP_OUTBOUND_TABLE, Constants.FORWARDING_TABLE); // for PRE_FLAT and FLAT table transition connectTables(deviceId, Constants.PRE_FLAT_TABLE, Constants.FLAT_TABLE); }
@Test public void testInitializePipeline() { fros = Sets.newConcurrentHashSet(); target.initializePipeline(DEVICE_ID); assertEquals("Flow Rule size was not match", 12, fros.size()); Map<Integer, Integer> fromToTableMap = Maps.newConcurrentMap(); fromToTableMap.put(STAT_INBOUND_TABLE, VTAP_INBOUND_TABLE); fromToTableMap.put(VTAP_INBOUND_TABLE, DHCP_TABLE); fromToTableMap.put(DHCP_TABLE, VTAG_TABLE); fromToTableMap.put(VTAG_TABLE, ARP_TABLE); fromToTableMap.put(ARP_TABLE, ACL_INGRESS_TABLE); fromToTableMap.put(ACL_EGRESS_TABLE, JUMP_TABLE); fromToTableMap.put(STAT_OUTBOUND_TABLE, VTAP_OUTBOUND_TABLE); fromToTableMap.put(VTAP_OUTBOUND_TABLE, FORWARDING_TABLE); fromToTableMap.put(PRE_FLAT_TABLE, FLAT_TABLE); fros.stream().map(FlowRuleOperation::rule).forEach(fr -> { if (fr.tableId() != JUMP_TABLE && fr.tableId() != FLAT_TABLE) { assertEquals("To Table did not match,", fromToTableMap.get(fr.tableId()), fr.treatment().tableTransition().tableId()); } }); }
@Override protected void runTask() { LOGGER.debug("Updating currently processed jobs... "); convertAndProcessJobs(new ArrayList<>(backgroundJobServer.getJobSteward().getJobsInProgress()), this::updateCurrentlyProcessingJob); }
@Test void noExceptionIsThrownIfAJobHasSucceededWhileUpdateProcessingIsCalled() { // GIVEN final Job job = anEnqueuedJob().withId().build(); startProcessingJob(job); // WHEN job.succeeded(); runTask(task); // THEN assertThat(logger).hasNoWarnLogMessages(); verify(storageProvider, never()).save(anyList()); }
public Optional<Measure> toMeasure(@Nullable MeasureDto measureDto, Metric metric) { requireNonNull(metric); if (measureDto == null) { return Optional.empty(); } Double value = measureDto.getValue(); String data = measureDto.getData(); switch (metric.getType().getValueType()) { case INT: return toIntegerMeasure(measureDto, value, data); case LONG: return toLongMeasure(measureDto, value, data); case DOUBLE: return toDoubleMeasure(measureDto, value, data); case BOOLEAN: return toBooleanMeasure(measureDto, value, data); case STRING: return toStringMeasure(measureDto, data); case LEVEL: return toLevelMeasure(measureDto, data); case NO_VALUE: return toNoValueMeasure(measureDto); default: throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType()); } }
@Test public void toMeasure_returns_no_value_if_dto_has_no_data_for_Level_Metric() { Optional<Measure> measure = underTest.toMeasure(EMPTY_MEASURE_DTO, SOME_LEVEL_METRIC); assertThat(measure).isPresent(); assertThat(measure.get().getValueType()).isEqualTo(Measure.ValueType.NO_VALUE); }
public static Socket acceptWithoutTimeout(ServerSocket serverSocket) throws IOException { Preconditions.checkArgument( serverSocket.getSoTimeout() == 0, "serverSocket SO_TIMEOUT option must be 0"); while (true) { try { return serverSocket.accept(); } catch (SocketTimeoutException exception) { // This should be impossible given that the socket timeout is set to zero // which indicates an infinite timeout. This is due to the underlying JDK-8237858 // bug. We retry the accept call indefinitely to replicate the expected behavior. } } }
@Test void testAcceptWithoutTimeoutDefaultTimeout() throws IOException { // Default timeout (should be zero) final Socket expected = new Socket(); try (final ServerSocket serverSocket = new ServerSocket(0) { @Override public Socket accept() { return expected; } }) { assertThat(NetUtils.acceptWithoutTimeout(serverSocket)).isEqualTo(expected); } }
@Override public Optional<GaugeMetricFamilyMetricsCollector> export(final String pluginType) { if (null == ProxyContext.getInstance().getContextManager()) { return Optional.empty(); } GaugeMetricFamilyMetricsCollector result = MetricsCollectorRegistry.get(config, pluginType); result.cleanMetrics(); MetaDataContexts metaDataContexts = ProxyContext.getInstance().getContextManager().getMetaDataContexts(); result.addMetric(Collections.singletonList("database_count"), metaDataContexts.getMetaData().getDatabases().size()); result.addMetric(Collections.singletonList("storage_unit_count"), getStorageUnitCount(metaDataContexts)); return Optional.of(result); }
@Test void assertExportWithoutContextManager() { when(ProxyContext.getInstance().getContextManager()).thenReturn(null); assertFalse(new ProxyMetaDataInfoExporter().export("FIXTURE").isPresent()); }
@Override public Serializable read(final MySQLBinlogColumnDef columnDef, final MySQLPacketPayload payload) { long datetime = readDatetimeV2FromPayload(payload); return 0L == datetime ? MySQLTimeValueUtils.DATETIME_OF_ZERO : readDatetime(columnDef, datetime, payload); }
@Test void assertReadWithoutFraction1() { columnDef.setColumnMeta(1); when(payload.readInt1()).thenReturn(0xfe, 0xf3, 0xff, 0x7e, 0xfb, 0x00); LocalDateTime expected = LocalDateTime.of(9999, 12, 31, 23, 59, 59, 0); assertThat(new MySQLDatetime2BinlogProtocolValue().read(columnDef, payload), is(Timestamp.valueOf(expected))); }
@Override public KeyValueIterator<Windowed<K>, V> backwardFindSessions(final K key, final long earliestSessionEndTime, final long latestSessionStartTime) { Objects.requireNonNull(key, "key cannot be null"); final Bytes bytesKey = keyBytes(key); return new MeteredWindowedKeyValueIterator<>( wrapped().backwardFindSessions( bytesKey, earliestSessionEndTime, latestSessionStartTime ), fetchSensor, iteratorDurationSensor, streamsMetrics, serdes::keyFrom, serdes::valueFrom, time, numOpenIterators, openIterators ); }
@Test public void shouldThrowNullPointerOnBackwardFindSessionsRangeIfToIsNull() { setUpWithoutContext(); assertThrows(NullPointerException.class, () -> store.backwardFindSessions("a", null, 0, 0)); }
static String encodeBytes(BytesType bytesType) { byte[] value = bytesType.getValue(); int length = value.length; int mod = length % MAX_BYTE_LENGTH; byte[] dest; if (mod != 0) { int padding = MAX_BYTE_LENGTH - mod; dest = new byte[length + padding]; System.arraycopy(value, 0, dest, 0, length); } else { dest = value; } return Numeric.toHexStringNoPrefix(dest); }
@Test public void testStaticBytes() { Bytes staticBytes = new Bytes6(new byte[] {0, 1, 2, 3, 4, 5}); assertEquals( TypeEncoder.encodeBytes(staticBytes), ("0001020304050000000000000000000000000000000000000000000000000000")); Bytes empty = new Bytes1(new byte[] {0}); assertEquals( TypeEncoder.encodeBytes(empty), ("0000000000000000000000000000000000000000000000000000000000000000")); Bytes ones = new Bytes1(new byte[] {127}); assertEquals( TypeEncoder.encodeBytes(ones), ("7f00000000000000000000000000000000000000000000000000000000000000")); Bytes dave = new Bytes4("dave".getBytes()); assertEquals( TypeEncoder.encodeBytes(dave), ("6461766500000000000000000000000000000000000000000000000000000000")); }
protected boolean databaseForBothDbInterfacesIsTheSame( DatabaseInterface primary, DatabaseInterface secondary ) { if ( primary == null || secondary == null ) { throw new IllegalArgumentException( "DatabaseInterface shouldn't be null!" ); } if ( primary.getPluginId() == null || secondary.getPluginId() == null ) { return false; } if ( primary.getPluginId().equals( secondary.getPluginId() ) ) { return true; } return primary.getClass().isAssignableFrom( secondary.getClass() ); }
@Test public void databases_WithDifferentDbConnTypes_AreTheSame_IfOneConnTypeIsSubsetOfAnother_2LevelHierarchy() { DatabaseInterface mssqlServerDatabaseMeta = new MSSQLServerDatabaseMeta(); mssqlServerDatabaseMeta.setPluginId( "MSSQL" ); DatabaseInterface mssqlServerNativeDatabaseMeta = new MSSQLServerNativeDatabaseMeta(); mssqlServerNativeDatabaseMeta.setPluginId( "MSSQLNATIVE" ); assertTrue( databaseMeta.databaseForBothDbInterfacesIsTheSame( mssqlServerDatabaseMeta, mssqlServerNativeDatabaseMeta ) ); }
@Override public PMML_MODEL getPMMLModelType() { return PMML_MODEL.SCORECARD_MODEL; }
@Test void getPMMLModelType() { assertThat(evaluator.getPMMLModelType()).isEqualTo(PMML_MODEL.SCORECARD_MODEL); }
@GetMapping("/name/{name}") public ShenyuAdminResult queryByName(@PathVariable("name") @Valid final String name) { List<TagVO> tagVO = tagService.findByQuery(name); return ShenyuAdminResult.success(ShenyuResultMessage.DETAIL_SUCCESS, tagVO); }
@Test public void testQueryByName() throws Exception { List<TagVO> list = new ArrayList<>(); list.add(buildTagVO()); given(tagService.findByQuery(anyString())).willReturn(list); this.mockMvc.perform(MockMvcRequestBuilders.get("/tag/name/{name}", "123")) .andExpect(status().isOk()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.DETAIL_SUCCESS))) .andReturn(); }
@Override public void accept(final MeterEntity entity, final DataTable value) { setEntityId(entity.id()); setServiceId(entity.serviceId()); this.value.setMinValue(value); }
@Test public void testAccept() { function.accept(MeterEntity.newService("service-test", Layer.GENERAL), HTTP_CODE_COUNT_1); assertThat(function.getValue()).isEqualTo(HTTP_CODE_COUNT_1); function.accept(MeterEntity.newService("service-test", Layer.GENERAL), HTTP_CODE_COUNT_2); assertThat(function.getValue()).isEqualTo(HTTP_CODE_COUNT_3); }
@Audit @Operation(summary = "command", description = "Command for component by [host,component,service,cluster]") @PostMapping public ResponseEntity<CommandVO> command(@RequestBody @Validated CommandReq commandReq) { CommandDTO commandDTO = CommandConverter.INSTANCE.fromReq2DTO(commandReq); CommandVO commandVO = commandService.command(commandDTO); return ResponseEntity.success(commandVO); }
@Test void commandExecutesSuccessfully() { CommandReq commandReq = new CommandReq(); CommandVO commandVO = new CommandVO(); when(commandService.command(any(CommandDTO.class))).thenReturn(commandVO); ResponseEntity<CommandVO> response = commandController.command(commandReq); assertTrue(response.isSuccess()); assertEquals(commandVO, response.getData()); }
public static List<ProcessId> getEnabledProcesses(AppSettings settings) { if (!isClusterEnabled(settings)) { return asList(ProcessId.ELASTICSEARCH, ProcessId.WEB_SERVER, ProcessId.COMPUTE_ENGINE); } NodeType nodeType = NodeType.parse(settings.getValue(CLUSTER_NODE_TYPE.getKey()).orElse("")); switch (nodeType) { case APPLICATION: return asList(ProcessId.WEB_SERVER, ProcessId.COMPUTE_ENGINE); case SEARCH: return singletonList(ProcessId.ELASTICSEARCH); default: throw new IllegalArgumentException("Unexpected node type " + nodeType); } }
@Test public void getEnabledProcesses_returns_all_processes_in_standalone_mode() { TestAppSettings settings = new TestAppSettings(of(CLUSTER_ENABLED.getKey(), "false")); assertThat(ClusterSettings.getEnabledProcesses(settings)).containsOnly(COMPUTE_ENGINE, ELASTICSEARCH, WEB_SERVER); }
@Override public void start() { }
@Test public void start() { provider.start(); }
public static <T> T createInstance(String userClassName, Class<T> xface, ClassLoader classLoader) { Class<?> theCls; try { theCls = Class.forName(userClassName, true, classLoader); } catch (ClassNotFoundException | NoClassDefFoundError cnfe) { throw new RuntimeException("User class must be in class path", cnfe); } if (!xface.isAssignableFrom(theCls)) { throw new RuntimeException(userClassName + " does not implement " + xface.getName()); } Class<T> tCls = (Class<T>) theCls.asSubclass(xface); T result; try { Constructor<T> meth = (Constructor<T>) constructorCache.get(theCls); if (null == meth) { meth = tCls.getDeclaredConstructor(); meth.setAccessible(true); constructorCache.put(theCls, meth); } result = meth.newInstance(); } catch (InstantiationException ie) { throw new RuntimeException("User class must be concrete", ie); } catch (NoSuchMethodException e) { throw new RuntimeException("User class must have a no-arg constructor", e); } catch (IllegalAccessException e) { throw new RuntimeException("User class must have a public constructor", e); } catch (InvocationTargetException e) { throw new RuntimeException("User class constructor throws exception", e); } return result; }
@Test public void testCreateTypedInstanceConstructorThrowsException() { try { createInstance(ThrowExceptionClass.class.getName(), aInterface.class, classLoader); fail("Should fail to load class whose constructor throws exceptions"); } catch (RuntimeException re) { assertTrue(re.getCause() instanceof InvocationTargetException); } }
@Udf public Map<String, String> records(@UdfParameter final String jsonObj) { if (jsonObj == null) { return null; } final JsonNode node = UdfJsonMapper.parseJson(jsonObj); if (node.isMissingNode() || !node.isObject()) { return null; } final Map<String, String> ret = new HashMap<>(node.size()); node.fieldNames().forEachRemaining(k -> { final JsonNode value = node.get(k); if (value instanceof TextNode) { ret.put(k, value.textValue()); } else { ret.put(k, value.toString()); } }); return ret; }
@Test public void shouldReturnNullForJsonNull() { assertNull(udf.records("null")); }
@VisibleForTesting void validateDeptNameUnique(Long id, Long parentId, String name) { DeptDO dept = deptMapper.selectByParentIdAndName(parentId, name); if (dept == null) { return; } // 如果 id 为空,说明不用比较是否为相同 id 的部门 if (id == null) { throw exception(DEPT_NAME_DUPLICATE); } if (ObjectUtil.notEqual(dept.getId(), id)) { throw exception(DEPT_NAME_DUPLICATE); } }
@Test public void testValidateNameUnique_duplicate() { // mock 数据 DeptDO deptDO = randomPojo(DeptDO.class); deptMapper.insert(deptDO); // 准备参数 Long id = randomLongId(); Long parentId = deptDO.getParentId(); String name = deptDO.getName(); // 调用, 并断言异常 assertServiceException(() -> deptService.validateDeptNameUnique(id, parentId, name), DEPT_NAME_DUPLICATE); }
public void fillMaxSpeed(Graph graph, EncodingManager em) { // In DefaultMaxSpeedParser and in OSMMaxSpeedParser we don't have the rural/urban info, // but now we have and can fill the country-dependent max_speed value where missing. EnumEncodedValue<UrbanDensity> udEnc = em.getEnumEncodedValue(UrbanDensity.KEY, UrbanDensity.class); fillMaxSpeed(graph, em, edge -> edge.get(udEnc) != UrbanDensity.RURAL); }
@Test public void testUnsupportedCountry() { ReaderWay way = new ReaderWay(0L); way.setTag("country", Country.AIA); way.setTag("highway", "primary"); EdgeIteratorState edge = createEdge(way).set(urbanDensity, CITY); calc.fillMaxSpeed(graph, em); assertEquals(UNSET_SPEED, edge.get(maxSpeedEnc), 1); }
@Override public ThreadPoolRunStateInfo supplement(ThreadPoolRunStateInfo poolRunStateInfo) { long used = MemoryUtil.heapMemoryUsed(); long max = MemoryUtil.heapMemoryMax(); String memoryProportion = StringUtil.newBuilder( "Allocation: ", ByteConvertUtil.getPrintSize(used), " / Maximum available: ", ByteConvertUtil.getPrintSize(max)); poolRunStateInfo.setCurrentLoad(poolRunStateInfo.getCurrentLoad() + "%"); poolRunStateInfo.setPeakLoad(poolRunStateInfo.getPeakLoad() + "%"); String ipAddress = hippo4jInetUtils.findFirstNonLoopBackHostInfo().getIpAddress(); poolRunStateInfo.setHost(ipAddress); poolRunStateInfo.setMemoryProportion(memoryProportion); poolRunStateInfo.setFreeMemory(ByteConvertUtil.getPrintSize(Math.subtractExact(max, used))); String threadPoolId = poolRunStateInfo.getTpId(); ThreadPoolExecutorHolder executorHolder = ThreadPoolExecutorRegistry.getHolder(threadPoolId); ThreadPoolExecutor pool = executorHolder.getExecutor(); String rejectedName; rejectedName = pool.getRejectedExecutionHandler().getClass().getSimpleName(); poolRunStateInfo.setRejectedName(rejectedName); ManyThreadPoolRunStateInfo manyThreadPoolRunStateInfo = convert(poolRunStateInfo); manyThreadPoolRunStateInfo.setIdentify(CLIENT_IDENTIFICATION_VALUE); String active = environment.getProperty("spring.profiles.active", "UNKNOWN"); manyThreadPoolRunStateInfo.setActive(active.toUpperCase()); String threadPoolState = ThreadPoolStatusHandler.getThreadPoolState(pool); manyThreadPoolRunStateInfo.setState(threadPoolState); return manyThreadPoolRunStateInfo; }
@Test void testSupplement() { long used = MemoryUtil.heapMemoryUsed(); long max = MemoryUtil.heapMemoryMax(); String memoryProportion = StringUtil.newBuilder( "Allocation: ", ByteConvertUtil.getPrintSize(used), " / Maximum available: ", ByteConvertUtil.getPrintSize(max)); String ipAddress = "127.0.0.1"; poolRunStateInfo.setHost(ipAddress); poolRunStateInfo.setMemoryProportion(memoryProportion); poolRunStateInfo.setFreeMemory(ByteConvertUtil.getPrintSize(Math.subtractExact(max, used))); String threadPoolId = poolRunStateInfo.getTpId(); try (final MockedStatic<ThreadPoolExecutorRegistry> globalThreadPoolManage = mockStatic(ThreadPoolExecutorRegistry.class)) { globalThreadPoolManage.when(() -> ThreadPoolExecutorRegistry.getHolder("1")).thenReturn(new ThreadPoolExecutorHolder()); ThreadPoolExecutorHolder executorHolder = ThreadPoolExecutorRegistry.getHolder(threadPoolId); Assertions.assertNotNull(executorHolder); } ThreadPoolExecutorHolder threadPoolExecutorHolderMock = mock(ThreadPoolExecutorHolder.class); when(threadPoolExecutorHolderMock.getExecutor()).thenReturn(new ThreadPoolExecutor(2, 2, 2000, TimeUnit.SECONDS, new SynchronousQueue<>())); ThreadPoolExecutor pool = threadPoolExecutorHolderMock.getExecutor(); Assertions.assertNotNull(pool); String rejectedName; rejectedName = "java.util.concurrent.ThreadPoolExecutor.AbortPolicy"; poolRunStateInfo.setRejectedName(rejectedName); ManyThreadPoolRunStateInfo manyThreadPoolRunStateInfo = BeanUtil.convert(poolRunStateInfo, ManyThreadPoolRunStateInfo.class); manyThreadPoolRunStateInfo.setIdentify(CLIENT_IDENTIFICATION_VALUE); String active = environment.getProperty("spring.profiles.active", "UNKNOWN"); manyThreadPoolRunStateInfo.setActive("TRUE"); String threadPoolState = ThreadPoolStatusHandler.getThreadPoolState(pool); manyThreadPoolRunStateInfo.setState(threadPoolState); Assertions.assertNotNull(manyThreadPoolRunStateInfo); }
public boolean matchStage(StageConfigIdentifier stageIdentifier, StageEvent event) { return this.event.include(event) && appliesTo(stageIdentifier.getPipelineName(), stageIdentifier.getStageName()); }
@Test void anyPipelineShouldAlwaysMatch() { NotificationFilter filter = new NotificationFilter(GoConstants.ANY_PIPELINE, GoConstants.ANY_STAGE, StageEvent.Breaks, false); assertThat(filter.matchStage(new StageConfigIdentifier("cruise", "dev"), StageEvent.Breaks)).isTrue(); }
public Map<String, Object> getKsqlStreamConfigProps(final String applicationId) { final Map<String, Object> map = new HashMap<>(getKsqlStreamConfigProps()); map.put( MetricCollectors.RESOURCE_LABEL_PREFIX + StreamsConfig.APPLICATION_ID_CONFIG, applicationId ); // Streams client metrics aren't used in Confluent deployment possiblyConfigureConfluentTelemetry(map); return Collections.unmodifiableMap(map); }
@Test public void shouldSetLogAndContinueExceptionHandlerWhenFailOnDeserializationErrorFalse() { final KsqlConfig ksqlConfig = new KsqlConfig(Collections.singletonMap(KsqlConfig.FAIL_ON_DESERIALIZATION_ERROR_CONFIG, false)); final Object result = ksqlConfig.getKsqlStreamConfigProps().get(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG); assertThat(result, equalTo(LogMetricAndContinueExceptionHandler.class)); }
@Subscribe @AllowConcurrentEvents public void handleIndexDeletion(IndicesDeletedEvent event) { for (String index : event.indices()) { LOG.debug("Index \"{}\" has been deleted. Removing index range.", index); if (remove(index)) { auditEventSender.success(AuditActor.system(nodeId), ES_INDEX_RANGE_DELETE, ImmutableMap.of("index_name", index)); } } }
@Test @MongoDBFixtures("MongoIndexRangeServiceTest.json") public void testHandleIndexDeletion() throws Exception { assertThat(indexRangeService.findAll()).hasSize(2); localEventBus.post(IndicesDeletedEvent.create(Collections.singleton("graylog_1"))); assertThat(indexRangeService.findAll()).hasSize(1); }
public MeasureDto toMeasureDto(Measure measure, Metric metric, Component component) { MeasureDto out = new MeasureDto(); out.setMetricUuid(metric.getUuid()); out.setComponentUuid(component.getUuid()); out.setAnalysisUuid(analysisMetadataHolder.getUuid()); if (measure.hasQualityGateStatus()) { setAlert(out, measure.getQualityGateStatus()); } out.setValue(valueAsDouble(measure)); out.setData(data(measure)); return out; }
@Test @UseDataProvider("all_types_Measures") public void toMeasureDto_returns_Dto_without_alertStatus_nor_alertText_if_Measure_has_no_QualityGateStatus(Measure measure, Metric metric) { MeasureDto measureDto = underTest.toMeasureDto(measure, metric, SOME_COMPONENT); assertThat(measureDto.getAlertStatus()).isNull(); assertThat(measureDto.getAlertText()).isNull(); }
private Set<TimelineEntity> getEntities(Path dir, String entityType, TimelineEntityFilters filters, TimelineDataToRetrieve dataToRetrieve) throws IOException { // First sort the selected entities based on created/start time. Map<Long, Set<TimelineEntity>> sortedEntities = new TreeMap<>( new Comparator<Long>() { @Override public int compare(Long l1, Long l2) { return l2.compareTo(l1); } } ); dir = getNormalPath(dir); if (dir != null) { RemoteIterator<LocatedFileStatus> fileStatuses = fs.listFiles(dir, false); if (fileStatuses != null) { while (fileStatuses.hasNext()) { LocatedFileStatus locatedFileStatus = fileStatuses.next(); Path entityFile = locatedFileStatus.getPath(); if (!entityFile.getName() .contains(TIMELINE_SERVICE_STORAGE_EXTENSION)) { continue; } try (BufferedReader reader = new BufferedReader( new InputStreamReader(fs.open(entityFile), StandardCharsets.UTF_8))) { TimelineEntity entity = readEntityFromFile(reader); if (!entity.getType().equals(entityType)) { continue; } if (!isTimeInRange(entity.getCreatedTime(), filters.getCreatedTimeBegin(), filters.getCreatedTimeEnd())) { continue; } if (filters.getRelatesTo() != null && !filters.getRelatesTo().getFilterList().isEmpty() && !TimelineStorageUtils.matchRelatesTo(entity, filters.getRelatesTo())) { continue; } if (filters.getIsRelatedTo() != null && !filters.getIsRelatedTo().getFilterList().isEmpty() && !TimelineStorageUtils.matchIsRelatedTo(entity, filters.getIsRelatedTo())) { continue; } if (filters.getInfoFilters() != null && !filters.getInfoFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchInfoFilters(entity, filters.getInfoFilters())) { continue; } if (filters.getConfigFilters() != null && !filters.getConfigFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchConfigFilters(entity, filters.getConfigFilters())) { continue; } if (filters.getMetricFilters() != null && !filters.getMetricFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchMetricFilters(entity, filters.getMetricFilters())) { continue; } if (filters.getEventFilters() != null && !filters.getEventFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchEventFilters(entity, filters.getEventFilters())) { continue; } TimelineEntity entityToBeReturned = createEntityToBeReturned( entity, dataToRetrieve.getFieldsToRetrieve()); Set<TimelineEntity> entitiesCreatedAtSameTime = sortedEntities.get(entityToBeReturned.getCreatedTime()); if (entitiesCreatedAtSameTime == null) { entitiesCreatedAtSameTime = new HashSet<TimelineEntity>(); } entitiesCreatedAtSameTime.add(entityToBeReturned); sortedEntities.put(entityToBeReturned.getCreatedTime(), entitiesCreatedAtSameTime); } } } } Set<TimelineEntity> entities = new HashSet<TimelineEntity>(); long entitiesAdded = 0; for (Set<TimelineEntity> entitySet : sortedEntities.values()) { for (TimelineEntity entity : entitySet) { entities.add(entity); ++entitiesAdded; if (entitiesAdded >= filters.getLimit()) { return entities; } } } return entities; }
@Test void testGetAllEntities() throws Exception { Set<TimelineEntity> result = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null, null, null)); // All 4 entities will be returned assertEquals(4, result.size()); }
@Override public void enableDeepLinkInstallSource(boolean enable) { }
@Test public void enableDeepLinkInstallSource() { mSensorsAPI.enableDeepLinkInstallSource(true); Assert.assertFalse(mSensorsAPI.isNetworkRequestEnable()); }
@Override public UniquenessLevel getIndexUniquenessLevel() { return UniquenessLevel.SCHEMA_LEVEL; }
@Test void assertGetIndexUniquenessLevel() { assertThat(uniquenessLevelProvider.getIndexUniquenessLevel(), is(UniquenessLevel.SCHEMA_LEVEL)); }
public RepositoryMeta createRepository( String id, Map<String, Object> items ) { RepositoryMeta repositoryMeta; try { repositoryMeta = pluginRegistry.loadClass( RepositoryPluginType.class, id, RepositoryMeta.class ); repositoryMeta.populate( items, repositoriesMeta ); if ( repositoryMeta.getName() != null ) { Repository repository = pluginRegistry.loadClass( RepositoryPluginType.class, repositoryMeta.getId(), Repository.class ); repository.init( repositoryMeta ); repositoriesMeta.addRepository( repositoryMeta ); repositoriesMeta.writeData(); currentRepository = repositoryMeta; if ( !testRepository( repository ) ) { return null; } ( (AbstractRepository) repository ).create(); } } catch ( KettleException ke ) { log.logError( "Unable to load repository type", ke ); return null; } return repositoryMeta; }
@Test public void testCreateRepository() throws Exception { String id = ID; Map<String, Object> items = new HashMap<>(); when( pluginRegistry.loadClass( RepositoryPluginType.class, id, RepositoryMeta.class ) ) .thenReturn( repositoryMeta ); when( pluginRegistry.loadClass( RepositoryPluginType.class, repositoryMeta.getId(), Repository.class ) ) .thenReturn( repository ); when( repository.test() ).thenReturn( true ); RepositoryMeta result = controller.createRepository( id, items ); assertNotEquals( null, result ); when( repository.test() ).thenReturn( false ); result = controller.createRepository( id, items ); assertEquals( null, result ); doThrow( new KettleException( "forced exception" ) ).when( repositoriesMeta ).writeData(); result = controller.createRepository( id, items ); assertEquals( null, result ); }
@Description("Returns true if the input geometry is well formed") @ScalarFunction("ST_IsValid") @SqlType(BOOLEAN) public static boolean stIsValid(@SqlType(GEOMETRY_TYPE_NAME) Slice input) { try { return deserialize(input).isValid(); } catch (PrestoException e) { if (e.getCause() instanceof TopologyException) { return false; } throw e; } }
@Test public void testSTIsValid() { // empty geometries are valid assertValidGeometry("POINT EMPTY"); assertValidGeometry("MULTIPOINT EMPTY"); assertValidGeometry("LINESTRING EMPTY"); assertValidGeometry("MULTILINESTRING EMPTY"); assertValidGeometry("POLYGON EMPTY"); assertValidGeometry("MULTIPOLYGON EMPTY"); assertValidGeometry("GEOMETRYCOLLECTION EMPTY"); // valid geometries assertValidGeometry("POINT (1 2)"); assertValidGeometry("MULTIPOINT (1 2, 3 4)"); assertValidGeometry("LINESTRING (0 0, 1 2, 3 4)"); assertValidGeometry("MULTILINESTRING ((1 1, 5 1), (2 4, 4 4))"); assertValidGeometry("POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))"); assertValidGeometry("MULTIPOLYGON (((1 1, 1 3, 3 3, 3 1, 1 1)), ((2 4, 2 6, 6 6, 6 4, 2 4)))"); assertValidGeometry("GEOMETRYCOLLECTION (POINT (1 2), LINESTRING (0 0, 1 2, 3 4), POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0)))"); assertValidGeometry("MULTIPOINT ((0 0), (0 1), (1 1), (0 1))"); // JTS considers LineStrings with repeated points valid/simple (it drops the dups) assertValidGeometry("LINESTRING (0 0, 0 1, 0 1, 1 1, 1 0, 0 0)"); // Valid but not simple assertValidGeometry("LINESTRING (0 0, -1 0.5, 0 1, 1 1, 1 0, 0 1, 0 0)"); // invalid geometries assertInvalidGeometry("POLYGON ((0 0, 1 1, 0 1, 1 0, 0 0))"); assertInvalidGeometry("POLYGON ((0 0, 0 1, 0 1, 1 1, 1 0, 0 0), (2 2, 2 3, 3 3, 3 2, 2 2))"); assertInvalidGeometry("POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0), (2 2, 2 3, 3 3, 3 2, 2 2))"); assertInvalidGeometry("POLYGON ((0 0, 0 1, 2 1, 1 1, 1 0, 0 0))"); assertInvalidGeometry("POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0), (0 1, 1 1, 0.5 0.5, 0 1))"); assertInvalidGeometry("POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0), (0 0, 0.5 0.7, 1 1, 0.5 0.4, 0 0))"); assertInvalidGeometry("POLYGON ((0 0, -1 0.5, 0 1, 1 1, 1 0, 0 1, 0 0))"); assertInvalidGeometry("MULTIPOLYGON (((0 0, 0 1, 1 1, 1 0, 0 0)), ((0.5 0.5, 0.5 2, 2 2, 2 0.5, 0.5 0.5)))"); assertInvalidGeometry("GEOMETRYCOLLECTION (POINT (1 2), POLYGON ((0 0, 0 1, 2 1, 1 1, 1 0, 0 0)))"); // corner cases assertFunction("ST_IsValid(ST_GeometryFromText(null))", BOOLEAN, null); assertFunction("geometry_invalid_reason(ST_GeometryFromText(null))", VARCHAR, null); }
public static Iterable<Snapshot> ancestorsOf(long snapshotId, Function<Long, Snapshot> lookup) { Snapshot start = lookup.apply(snapshotId); Preconditions.checkArgument(start != null, "Cannot find snapshot: %s", snapshotId); return ancestorsOf(start, lookup); }
@Test public void ancestorsOf() { Iterable<Snapshot> snapshots = SnapshotUtil.ancestorsOf(snapshotFork2Id, table::snapshot); expectedSnapshots(new long[] {snapshotFork2Id, snapshotFork1Id}, snapshots); Iterator<Snapshot> snapshotIter = snapshots.iterator(); while (snapshotIter.hasNext()) { snapshotIter.next(); } // Once snapshot iterator has been exhausted, call hasNext again to make sure it is stable. assertThat(snapshotIter).isExhausted(); }
@Override public void execute(Context context) { List<MeasureComputerWrapper> wrappers = Arrays.stream(measureComputers).map(ToMeasureWrapper.INSTANCE).toList(); validateMetrics(wrappers); measureComputersHolder.setMeasureComputers(sortComputers(wrappers)); }
@Test public void return_empty_list_when_no_metrics_neither_measure_computers() { ComputationStep underTest = new LoadMeasureComputersStep(holder); underTest.execute(new TestComputationStepContext()); assertThat(holder.getMeasureComputers()).isEmpty(); }
public static SlaveConnectionManager getInstance() { if ( slaveConnectionManager == null ) { slaveConnectionManager = new SlaveConnectionManager(); } return slaveConnectionManager; }
@Test public void shouldNotOverrideDefaultSSLContextIfKeystoreIsSet() throws Exception { System.setProperty( "javax.net.ssl.keyStore", "NONE" ); SlaveConnectionManager instance = SlaveConnectionManager.getInstance(); assertEquals( defaultContext, SSLContext.getDefault() ); }
@Override public boolean fastPutIfAbsent(K key, V value, Duration ttl) { return get(fastPutIfAbsentAsync(key, value, ttl)); }
@Test public void testFastPutIfAbsentWithTTL() throws Exception { RMapCacheNative<SimpleKey, SimpleValue> map = redisson.getMapCacheNative("simpleTTL"); SimpleKey key = new SimpleKey("1"); SimpleValue value = new SimpleValue("2"); map.fastPutIfAbsent(key, value, Duration.ofSeconds(1)); assertThat(map.fastPutIfAbsent(key, new SimpleValue("3"), Duration.ofSeconds(1))).isFalse(); assertThat(map.get(key)).isEqualTo(value); Thread.sleep(1100); assertThat(map.fastPutIfAbsent(key, new SimpleValue("3"), Duration.ofSeconds(1))).isTrue(); assertThat(map.get(key)).isEqualTo(new SimpleValue("3")); assertThat(map.fastPutIfAbsent(key, new SimpleValue("4"), Duration.ofSeconds(1))).isFalse(); assertThat(map.get(key)).isEqualTo(new SimpleValue("3")); Thread.sleep(1100); assertThat(map.fastPutIfAbsent(key, new SimpleValue("4"), Duration.ofSeconds(1))).isTrue(); map.destroy(); }
public final Logger getLogger(final Class<?> clazz) { return getLogger(clazz.getName()); }
@Test public void testLoggerX() { Logger x = lc.getLogger("x"); assertNotNull(x); assertEquals("x", x.getName()); assertNull(x.getLevel()); assertEquals(Level.DEBUG, x.getEffectiveLevel()); }
boolean isWriteShareGroupStateSuccessful(List<PersisterStateBatch> stateBatches) { WriteShareGroupStateResult response; try { response = persister.writeState(new WriteShareGroupStateParameters.Builder() .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder<PartitionStateBatchData>() .setGroupId(this.groupId) .setTopicsData(Collections.singletonList(new TopicData<>(topicIdPartition.topicId(), Collections.singletonList(PartitionFactory.newPartitionStateBatchData( topicIdPartition.partition(), stateEpoch, startOffset, 0, stateBatches)))) ).build()).build()).get(); } catch (InterruptedException | ExecutionException e) { log.error("Failed to write the share group state for share partition: {}-{}", groupId, topicIdPartition, e); throw new IllegalStateException(String.format("Failed to write the share group state for share partition %s-%s", groupId, topicIdPartition), e); } if (response == null || response.topicsData() == null || response.topicsData().size() != 1) { log.error("Failed to write the share group state for share partition: {}-{}. Invalid state found: {}", groupId, topicIdPartition, response); throw new IllegalStateException(String.format("Failed to write the share group state for share partition %s-%s", groupId, topicIdPartition)); } TopicData<PartitionErrorData> state = response.topicsData().get(0); if (state.topicId() != topicIdPartition.topicId() || state.partitions().size() != 1 || state.partitions().get(0).partition() != topicIdPartition.partition()) { log.error("Failed to write the share group state for share partition: {}-{}. Invalid topic partition response: {}", groupId, topicIdPartition, response); throw new IllegalStateException(String.format("Failed to write the share group state for share partition %s-%s", groupId, topicIdPartition)); } PartitionErrorData partitionData = state.partitions().get(0); if (partitionData.errorCode() != Errors.NONE.code()) { Exception exception = Errors.forCode(partitionData.errorCode()).exception(partitionData.errorMessage()); log.error("Failed to write the share group state for share partition: {}-{} due to exception", groupId, topicIdPartition, exception); return false; } return true; }
@Test public void testWriteShareGroupStateWithNoOpShareStatePersister() { SharePartition sharePartition = SharePartitionBuilder.builder().build(); List<PersisterStateBatch> stateBatches = Arrays.asList( new PersisterStateBatch(5L, 10L, RecordState.AVAILABLE.id, (short) 2), new PersisterStateBatch(11L, 15L, RecordState.ARCHIVED.id, (short) 3)); assertTrue(sharePartition.isWriteShareGroupStateSuccessful(stateBatches)); }
@Override public void destroy() { if (evictionScheduler != null) { evictionScheduler.remove(getRawName()); } super.destroy(); List<String> channels = Arrays.asList(getCreatedChannelName(), getRemovedChannelName(), getUpdatedChannelName(), getExpiredChannelName()); for (String channel : channels) { Collection<Integer> ids = getListenerIdsByName(channel); if (ids.isEmpty()) { continue; } RTopic topic = getTopic(channel); for (Integer listenerId : ids) { removeListenerId(channel, listenerId); topic.removeListener(listenerId); } } }
@Test public void testDestroy() { RMapCache<String, String> cache = redisson.getMapCache("test"); AtomicInteger counter = new AtomicInteger(); cache.addListener(new EntryCreatedListener<>() { @Override public void onCreated(EntryEvent<Object, Object> event) { counter.incrementAndGet(); } }); cache.fastPut("1", "2"); Awaitility.await().atMost(Duration.ofSeconds(1)) .untilAsserted(() -> assertThat(counter.get()).isEqualTo(1)); EvictionScheduler evictionScheduler = ((Redisson)redisson).getEvictionScheduler(); Map<?, ?> map = Reflect.on(evictionScheduler).get("tasks"); assertThat(map.isEmpty()).isFalse(); cache.destroy(); assertThat(map.isEmpty()).isTrue(); RMapCache<String, String> cache2 = redisson.getMapCache("test"); cache2.fastPut("3", "4"); Awaitility.await().pollDelay(Duration.ofSeconds(1)).atMost(Duration.ofSeconds(2)) .untilAsserted(() -> assertThat(counter.get()).isEqualTo(1)); }
@Description("encode value as a 32-bit 2's complement big endian varbinary") @ScalarFunction("to_big_endian_32") @SqlType(StandardTypes.VARBINARY) public static Slice toBigEndian32(@SqlType(StandardTypes.INTEGER) long value) { Slice slice = Slices.allocate(Integer.BYTES); slice.setInt(0, Integer.reverseBytes((int) value)); return slice; }
@Test public void testToBigEndian32() { assertFunction("to_big_endian_32(0)", VARBINARY, sqlVarbinaryHex("00000000")); assertFunction("to_big_endian_32(1)", VARBINARY, sqlVarbinaryHex("00000001")); assertFunction("to_big_endian_32(2147483647)", VARBINARY, sqlVarbinaryHex("7FFFFFFF")); assertFunction("to_big_endian_32(-2147483647)", VARBINARY, sqlVarbinaryHex("80000001")); }
@Override public SymbolTable getResponseSymbolTable(URI requestUri, Map<String, String> requestHeaders) { return _defaultResponseSymbolTable; }
@Test public void testGetResponseSymbolTableBeforeInit() { Assert.assertNull(_provider.getResponseSymbolTable(URI.create("https://Host:100/service/symbolTable"), Collections.emptyMap())); }
@Override public NativeQuerySpec<Record> select(String sql, Object... args) { return new NativeQuerySpecImpl<>(this, sql, args, DefaultRecord::new, false); }
@Test public void testPage() { DefaultQueryHelper helper = new DefaultQueryHelper(database); database.dml() .insert("s_test") .value("id", "page-test") .value("name", "page") .value("age", 22) .execute() .sync(); database.dml() .insert("s_test") .value("id", "page-test2") .value("name", "page") .value("age", 22) .execute() .sync(); helper.select("select * from s_test") .where(dsl -> { dsl.doPaging(0, 1); }) .fetch() .as(StepVerifier::create) .expectNextCount(1) .verifyComplete(); }
public SimpleRabbitListenerContainerFactory decorateSimpleRabbitListenerContainerFactory( SimpleRabbitListenerContainerFactory factory ) { return decorateRabbitListenerContainerFactory(factory); }
@Test void decorateSimpleRabbitListenerContainerFactory_adds_TracingMessagePostProcessor() { SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory(); assertThat(rabbitTracing.decorateSimpleRabbitListenerContainerFactory(factory)) .extracting("beforeSendReplyPostProcessors") .asInstanceOf(array(MessagePostProcessor[].class)) .allMatch(postProcessor -> postProcessor instanceof TracingMessagePostProcessor); }
public Set<MediaType> getSupportedTypes(ParseContext context) { return SUPPORTED_TYPES; }
@Test public void testAccept() throws Exception { assertTrue((parser.getSupportedTypes(null) .contains(MediaType.application("vnd.ms-outlook-pst")))); }
@Udf public <T> List<String> mapKeys(final Map<String, T> input) { if (input == null) { return null; } return Lists.newArrayList(input.keySet()); }
@Test public void shouldGetKeys() { final Map<String, String> input = new HashMap<>(); input.put("foo", "spam"); input.put("bar", "baloney"); assertThat(udf.mapKeys(input), containsInAnyOrder("foo", "bar")); }
@Override public void handle(ContainersLauncherEvent event) { // TODO: ContainersLauncher launches containers one by one!! Container container = event.getContainer(); ContainerId containerId = container.getContainerId(); switch (event.getType()) { case LAUNCH_CONTAINER: Application app = context.getApplications().get( containerId.getApplicationAttemptId().getApplicationId()); ContainerLaunch launch = new ContainerLaunch(context, getConfig(), dispatcher, exec, app, event.getContainer(), dirsHandler, containerManager); containerLauncher.submit(launch); running.put(containerId, launch); break; case RELAUNCH_CONTAINER: app = context.getApplications().get( containerId.getApplicationAttemptId().getApplicationId()); ContainerRelaunch relaunch = new ContainerRelaunch(context, getConfig(), dispatcher, exec, app, event.getContainer(), dirsHandler, containerManager); containerLauncher.submit(relaunch); running.put(containerId, relaunch); break; case RECOVER_CONTAINER: app = context.getApplications().get( containerId.getApplicationAttemptId().getApplicationId()); launch = new RecoveredContainerLaunch(context, getConfig(), dispatcher, exec, app, event.getContainer(), dirsHandler, containerManager); containerLauncher.submit(launch); running.put(containerId, launch); break; case RECOVER_PAUSED_CONTAINER: app = context.getApplications().get( containerId.getApplicationAttemptId().getApplicationId()); launch = new RecoverPausedContainerLaunch(context, getConfig(), dispatcher, exec, app, event.getContainer(), dirsHandler, containerManager); containerLauncher.submit(launch); break; case CLEANUP_CONTAINER: cleanup(event, containerId, true); break; case CLEANUP_CONTAINER_FOR_REINIT: cleanup(event, containerId, false); break; case SIGNAL_CONTAINER: SignalContainersLauncherEvent signalEvent = (SignalContainersLauncherEvent) event; ContainerLaunch runningContainer = running.get(containerId); if (runningContainer == null) { // Container not launched. So nothing needs to be done. LOG.info("Container " + containerId + " not running, nothing to signal."); return; } try { runningContainer.signalContainer(signalEvent.getCommand()); } catch (IOException e) { LOG.warn("Got exception while signaling container " + containerId + " with command " + signalEvent.getCommand()); } break; case PAUSE_CONTAINER: ContainerLaunch launchedContainer = running.get(containerId); if (launchedContainer == null) { // Container not launched. So nothing needs to be done. return; } // Pause the container try { launchedContainer.pauseContainer(); } catch (Exception e) { LOG.info("Got exception while pausing container: " + StringUtils.stringifyException(e)); } break; case RESUME_CONTAINER: ContainerLaunch launchCont = running.get(containerId); if (launchCont == null) { // Container not launched. So nothing needs to be done. return; } // Resume the container. try { launchCont.resumeContainer(); } catch (Exception e) { LOG.info("Got exception while resuming container: " + StringUtils.stringifyException(e)); } break; } }
@Test public void testRecoverPausedContainerEvent() throws IllegalArgumentException { when(event.getType()) .thenReturn(ContainersLauncherEventType.RECOVER_PAUSED_CONTAINER); spy.handle(event); Mockito.verify(containerLauncher, Mockito.times(1)) .submit(Mockito.any(RecoverPausedContainerLaunch.class)); }
public static String generateWsRemoteAddress(HttpServletRequest request) { if (request == null) { throw new IllegalArgumentException("HttpServletRequest must not be null."); } StringBuilder remoteAddress = new StringBuilder(); String scheme = request.getScheme(); remoteAddress.append(scheme != null && scheme.equalsIgnoreCase("https") ? "wss://" : "ws://"); remoteAddress.append(request.getRemoteAddr()); remoteAddress.append(":"); remoteAddress.append(request.getRemotePort()); return remoteAddress.toString(); }
@Test public void testGenerateWssRemoteAddress() { HttpServletRequest request = mock(HttpServletRequest.class); when(request.getScheme()).thenReturn("https"); when(request.getRemoteAddr()).thenReturn("localhost"); when(request.getRemotePort()).thenReturn(8443); assertEquals("wss://localhost:8443", HttpTransportUtils.generateWsRemoteAddress(request)); }
@Override public Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch) { return offsetsForTimes(timestampsToSearch, Duration.ofMillis(defaultApiTimeoutMs)); }
@Test public void testOffsetsForTimes() { consumer = newConsumer(); Map<TopicPartition, OffsetAndTimestampInternal> expectedResult = mockOffsetAndTimestamp(); Map<TopicPartition, Long> timestampToSearch = mockTimestampToSearch(); doReturn(expectedResult).when(applicationEventHandler).addAndGet(any()); Map<TopicPartition, OffsetAndTimestamp> result = assertDoesNotThrow(() -> consumer.offsetsForTimes(timestampToSearch, Duration.ofMillis(1))); expectedResult.forEach((key, value) -> { OffsetAndTimestamp expected = value.buildOffsetAndTimestamp(); assertEquals(expected, result.get(key)); }); verify(applicationEventHandler).addAndGet(ArgumentMatchers.isA(ListOffsetsEvent.class)); }
@Override protected void doDelete(final List<MetaData> dataList) { dataList.forEach(metaData -> metaDataSubscribers.forEach(metaDataSubscriber -> metaDataSubscriber.unSubscribe(metaData))); }
@Test public void testDoDelete() { List<MetaData> metaDataList = createFakeMetaDataObjects(3); metaDataHandler.doDelete(metaDataList); metaDataList.forEach(metaData -> subscribers.forEach(subscriber -> verify(subscriber).unSubscribe(metaData))); }
@Override public KsMaterializedQueryResult<WindowedRow> get( final GenericKey key, final int partition, final Range<Instant> windowStartBounds, final Range<Instant> windowEndBounds, final Optional<Position> position ) { try { final ReadOnlyWindowStore<GenericKey, ValueAndTimestamp<GenericRow>> store = stateStore .store(QueryableStoreTypes.timestampedWindowStore(), partition); final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds); final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds); try (WindowStoreIterator<ValueAndTimestamp<GenericRow>> it = cacheBypassFetcher.fetch(store, key, lower, upper)) { final Builder<WindowedRow> builder = ImmutableList.builder(); while (it.hasNext()) { final KeyValue<Long, ValueAndTimestamp<GenericRow>> next = it.next(); final Instant windowStart = Instant.ofEpochMilli(next.key); if (!windowStartBounds.contains(windowStart)) { continue; } final Instant windowEnd = windowStart.plus(windowSize); if (!windowEndBounds.contains(windowEnd)) { continue; } final TimeWindow window = new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli()); final WindowedRow row = WindowedRow.of( stateStore.schema(), new Windowed<>(key, window), next.value.value(), next.value.timestamp() ); builder.add(row); } return KsMaterializedQueryResult.rowIterator(builder.build().iterator()); } } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldGetStoreWithCorrectParams_fetchAll() { // When: table.get(PARTITION, WINDOW_START_BOUNDS, WINDOW_END_BOUNDS); // Then: verify(stateStore).store(storeTypeCaptor.capture(), anyInt()); assertThat(storeTypeCaptor.getValue().getClass().getSimpleName(), is("TimestampedWindowStoreType")); }
public OptExpression next() { // For logic scan to physical scan, we only need to match once if (isPatternWithoutChildren && groupExpressionIndex.get(0) > 0) { return null; } OptExpression expression; do { this.groupTraceKey = 0; // Match with the next groupExpression of the last group node int lastNode = this.groupExpressionIndex.size() - 1; int lastNodeIndex = this.groupExpressionIndex.get(lastNode); this.groupExpressionIndex.set(lastNode, lastNodeIndex + 1); expression = match(pattern, groupExpression); } while (expression == null && this.groupExpressionIndex.size() != 1); nextIdx++; return expression; }
@Test public void testBinderMulti2() { OptExpression expr1 = OptExpression.create(new MockOperator(OperatorType.LOGICAL_JOIN, 0), OptExpression.create(new MockOperator(OperatorType.LOGICAL_PROJECT, 1)), OptExpression.create(new MockOperator(OperatorType.LOGICAL_OLAP_SCAN, 2)), OptExpression.create(new MockOperator(OperatorType.LOGICAL_OLAP_SCAN, 3)), OptExpression.create(new MockOperator(OperatorType.LOGICAL_OLAP_SCAN, 4)), OptExpression.create(new MockOperator(OperatorType.LOGICAL_PROJECT, 5))); Memo memo = new Memo(); GroupExpression ge = memo.init(expr1); Pattern pattern = Pattern.create(OperatorType.LOGICAL_JOIN) .addChildren(Pattern.create(OperatorType.LOGICAL_PROJECT)) .addChildren(Pattern.create(OperatorType.PATTERN_MULTI_LEAF)) .addChildren(Pattern.create(OperatorType.LOGICAL_OLAP_SCAN)); Binder binder = new Binder(pattern, ge); assertNull(binder.next()); }
public boolean optionallyValidateClientResponseStatusCode(int statusCode) throws Exception { HttpStatus httpStatus = HttpStatus.resolve(statusCode); if (this.statusCodesValid.isPresent() && httpStatus!=null) { if (!this.statusCodesValid.get().contains(httpStatus)) { return false; } } else { if ((httpStatus!=null && httpStatus.isError()) || (httpStatus==null && statusCode>=400) ) return false; } return true; }
@Test public void testSuccessStatus() throws Exception{ assertTrue(http2ServiceRequest.optionallyValidateClientResponseStatusCode(200)); assertTrue(http2ServiceRequest.optionallyValidateClientResponseStatusCode(201)); assertTrue(http2ServiceRequest.optionallyValidateClientResponseStatusCode(203)); assertTrue(http2ServiceRequest.optionallyValidateClientResponseStatusCode(204)); assertTrue(http2ServiceRequest.optionallyValidateClientResponseStatusCode(211)); assertTrue(http2ServiceRequest.optionallyValidateClientResponseStatusCode(303)); }
public static <T extends Throwable> void checkContains(final Collection<?> values, final Object element, final Supplier<T> exceptionSupplierIfUnexpected) throws T { if (!values.contains(element)) { throw exceptionSupplierIfUnexpected.get(); } }
@Test void assertCheckContainsToThrowsException() { assertThrows(SQLException.class, () -> ShardingSpherePreconditions.checkContains(Collections.singleton("foo"), "bar", SQLException::new)); }
@GetMapping("create") public String getNewProductPage() { return "catalogue/products/new_product"; }
@Test void getNewProductPage_ReturnsNewProductPage () { // given // when var result = this.controller.getNewProductPage(); // then assertEquals("catalogue/products/new_product", result); }
public HMac(HmacAlgorithm algorithm) { this(algorithm, (Key) null); }
@Test public void hmacTest(){ String testStr = "test中文"; byte[] key = "password".getBytes(); HMac mac = new HMac(HmacAlgorithm.HmacMD5, key); String macHex1 = mac.digestHex(testStr); assertEquals("b977f4b13f93f549e06140971bded384", macHex1); String macHex2 = mac.digestHex(IoUtil.toStream(testStr, CharsetUtil.CHARSET_UTF_8)); assertEquals("b977f4b13f93f549e06140971bded384", macHex2); }
@Override public final char readChar() throws EOFException { final char c = readChar(pos); pos += CHAR_SIZE_IN_BYTES; return c; }
@Test public void testReadChar() throws Exception { char c = in.readChar(); char expected = Bits.readChar(INIT_DATA, 0, byteOrder == BIG_ENDIAN); assertEquals(expected, c); }
public static SQLStatementParserEngine getSQLStatementParserEngine(final DatabaseType databaseType, final CacheOption sqlStatementCacheOption, final CacheOption parseTreeCacheOption) { SQLStatementParserEngine result = ENGINES.get(databaseType); if (null == result) { result = ENGINES.computeIfAbsent(databaseType, key -> new SQLStatementParserEngine(key, sqlStatementCacheOption, parseTreeCacheOption)); } else if (!result.getSqlStatementCacheOption().equals(sqlStatementCacheOption) || !result.getParseTreeCacheOption().equals(parseTreeCacheOption)) { result = new SQLStatementParserEngine(databaseType, sqlStatementCacheOption, parseTreeCacheOption); ENGINES.put(databaseType, result); } return result; }
@Test void assertGetSQLStatementParserEngineSame() { SQLStatementParserEngine before = SQLStatementParserEngineFactory.getSQLStatementParserEngine(databaseType, new CacheOption(2000, 65535L), new CacheOption(128, 1024L)); SQLStatementParserEngine after = SQLStatementParserEngineFactory.getSQLStatementParserEngine(databaseType, new CacheOption(2000, 65535L), new CacheOption(128, 1024L)); assertSame(before, after); }
Configuration getEffectiveConfiguration(String[] args) throws CliArgsException { final CommandLine commandLine = cli.parseCommandLineOptions(args, true); final Configuration effectiveConfiguration = new Configuration(baseConfiguration); effectiveConfiguration.addAll(cli.toConfiguration(commandLine)); effectiveConfiguration.set(DeploymentOptions.TARGET, KubernetesSessionClusterExecutor.NAME); return effectiveConfiguration; }
@Test void testCorrectSettingOfMaxSlots() throws Exception { final String[] params = new String[] { "-e", KubernetesSessionClusterExecutor.NAME, "-D" + TaskManagerOptions.NUM_TASK_SLOTS.key() + "=3" }; final KubernetesSessionCli cli = createFlinkKubernetesCustomCliWithJmAndTmTotalMemory(1234); final Configuration executorConfig = cli.getEffectiveConfiguration(params); final ClusterClientFactory<String> clientFactory = getClusterClientFactory(executorConfig); final ClusterSpecification clusterSpecification = clientFactory.getClusterSpecification(executorConfig); // each task manager has 3 slots but the parallelism is 7. Thus the slots should be // increased. assertThat(clusterSpecification.getSlotsPerTaskManager()).isEqualTo(3); }
@SuppressWarnings("unchecked") void openDB(final Map<String, Object> configs, final File stateDir) { // initialize the default rocksdb options final DBOptions dbOptions = new DBOptions(); final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions(); userSpecifiedOptions = new RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter(dbOptions, columnFamilyOptions); final BlockBasedTableConfigWithAccessibleCache tableConfig = new BlockBasedTableConfigWithAccessibleCache(); cache = new LRUCache(BLOCK_CACHE_SIZE); tableConfig.setBlockCache(cache); tableConfig.setBlockSize(BLOCK_SIZE); filter = new BloomFilter(); tableConfig.setFilterPolicy(filter); userSpecifiedOptions.optimizeFiltersForHits(); userSpecifiedOptions.setTableFormatConfig(tableConfig); userSpecifiedOptions.setWriteBufferSize(WRITE_BUFFER_SIZE); userSpecifiedOptions.setCompressionType(COMPRESSION_TYPE); userSpecifiedOptions.setCompactionStyle(COMPACTION_STYLE); userSpecifiedOptions.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS); userSpecifiedOptions.setCreateIfMissing(true); userSpecifiedOptions.setErrorIfExists(false); userSpecifiedOptions.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL); // this is the recommended way to increase parallelism in RocksDb // note that the current implementation of setIncreaseParallelism affects the number // of compaction threads but not flush threads (the latter remains one). Also, // the parallelism value needs to be at least two because of the code in // https://github.com/facebook/rocksdb/blob/62ad0a9b19f0be4cefa70b6b32876e764b7f3c11/util/options.cc#L580 // subtracts one from the value passed to determine the number of compaction threads // (this could be a bug in the RocksDB code and their devs have been contacted). userSpecifiedOptions.setIncreaseParallelism(Math.max(Runtime.getRuntime().availableProcessors(), 2)); wOptions = new WriteOptions(); wOptions.setDisableWAL(true); fOptions = new FlushOptions(); fOptions.setWaitForFlush(true); final Class<RocksDBConfigSetter> configSetterClass = (Class<RocksDBConfigSetter>) configs.get(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG); if (configSetterClass != null) { configSetter = Utils.newInstance(configSetterClass); configSetter.setConfig(name, userSpecifiedOptions, configs); } dbDir = new File(new File(stateDir, parentDir), name); try { Files.createDirectories(dbDir.getParentFile().toPath()); Files.createDirectories(dbDir.getAbsoluteFile().toPath()); } catch (final IOException fatal) { throw new ProcessorStateException(fatal); } // Setup statistics before the database is opened, otherwise the statistics are not updated // with the measurements from Rocks DB setupStatistics(configs, dbOptions); openRocksDB(dbOptions, columnFamilyOptions); dbAccessor = new DirectDBAccessor(db, fOptions, wOptions); open = true; addValueProvidersToMetricsRecorder(); }
@Test public void shouldNotSetCacheInValueProvidersWhenUserProvidesPlainTableFormatConfig() { rocksDBStore = getRocksDBStoreWithRocksDBMetricsRecorder(); context = getProcessorContext( RecordingLevel.DEBUG, RocksDBConfigSetterWithUserProvidedNewPlainTableFormatConfig.class ); rocksDBStore.openDB(context.appConfigs(), context.stateDir()); verify(metricsRecorder).addValueProviders(eq(DB_NAME), notNull(), isNull(), notNull()); }
@Override public List<ServiceCombServer> getInitialListOfServers() { return getUpdatedListOfServers(); }
@Test public void getInitialListOfServers() { final List<ServiceCombServer> initialListOfServers = serviceCombServiceList.getInitialListOfServers(); Assert.assertEquals(initialListOfServers.size(), instances.size()); }
public boolean isAllowed() { return get(ALLOWED, true); }
@Test public void basicTest() { ConfigApplyDelegate delegate = configApply -> { }; ObjectMapper mapper = new ObjectMapper(); TestConfig allowed = new TestConfig(); TestConfig notAllowed = new TestConfig(); allowed.init("enabled", "KEY", JsonNodeFactory.instance.objectNode(), mapper, delegate); notAllowed.init("disabled", "KEY", JsonNodeFactory.instance.objectNode(), mapper, delegate); notAllowed.isAllowed(false); assertThat(allowed.isAllowed(), is(true)); assertThat(notAllowed.isAllowed(), is(false)); notAllowed.isAllowed(true); allowed.isAllowed(false); assertThat(allowed.isAllowed(), is(false)); assertThat(notAllowed.isAllowed(), is(true)); }
@SuppressFBWarnings(value = "DMI_RANDOM_USED_ONLY_ONCE") public static LocalCommands open( final KsqlEngine ksqlEngine, final File directory ) { if (!directory.exists()) { if (!directory.mkdirs()) { throw new KsqlServerException("Couldn't create the local commands directory: " + directory.getPath() + "\n Make sure the directory exists and is readable/writable for KSQL server " + "\n or its parent directory is readable/writable by KSQL server" + "\n or change it to a readable/writable directory by setting '" + KsqlRestConfig.KSQL_LOCAL_COMMANDS_LOCATION_CONFIG + "' config in the properties file." ); } try { Files.setPosixFilePermissions(directory.toPath(), PosixFilePermissions.fromString("rwx------")); } catch (final IOException e) { throw new KsqlServerException(String.format( "Couldn't set POSIX permissions on the backups directory: %s. Error = %s", directory.getPath(), e.getMessage())); } } if (!directory.isDirectory()) { throw new KsqlServerException(directory.getPath() + " is not a directory." + "\n Make sure the directory exists and is readable/writable for KSQL server " + "\n or its parent directory is readable/writable by KSQL server" + "\n or change it to a readable/writable directory by setting '" + KsqlRestConfig.KSQL_LOCAL_COMMANDS_LOCATION_CONFIG + "' config in the properties file." ); } if (!directory.canWrite() || !directory.canRead() || !directory.canExecute()) { throw new KsqlServerException("The local commands directory is not readable/writable " + "for KSQL server: " + directory.getPath() + "\n Make sure the directory exists and is readable/writable for KSQL server " + "\n or change it to a readable/writable directory by setting '" + KsqlRestConfig.KSQL_LOCAL_COMMANDS_LOCATION_CONFIG + "' config in the properties file." ); } final File file = new File(directory, String.format("local_commands_%d_%s%s", System.currentTimeMillis(), Integer.toHexString(RANDOM.nextInt()), LOCAL_COMMANDS_FILE_SUFFIX)); return new LocalCommands(directory, ksqlEngine, LocalCommandsFile.createWriteable(file)); }
@Test public void shouldCreateCommandLocationWhenDoesNotExist() throws IOException { // Given final Path dir = Paths.get(commandsDir.newFolder().getAbsolutePath(), "ksql-local-commands"); assertThat(Files.exists(dir), is(false)); // When LocalCommands.open(ksqlEngine, dir.toFile()); // Then assertThat(Files.exists(dir), is(true)); }
@Override public Num calculate(BarSeries series, Position position) { return isConsecutive(position) ? series.one() : series.zero(); }
@Test public void calculateWithNoPositions() { MockBarSeries series = new MockBarSeries(numFunction, 100, 105, 110, 100, 95, 105); assertNumEquals(0, getCriterion(PositionFilter.LOSS).calculate(series, new BaseTradingRecord())); assertNumEquals(0, getCriterion(PositionFilter.PROFIT).calculate(series, new BaseTradingRecord())); }
@SuppressWarnings("unused") // Required for automatic type inference public static <K> Builder0<K> forClass(final Class<K> type) { return new Builder0<>(); }
@Test public void shouldNotThrowOnDuplicateHandler2() { HandlerMaps.forClass(BaseType.class).withArgTypes(String.class, Integer.class) .put(LeafTypeA.class, handler2_1) .put(LeafTypeB.class, handler2_1); }
public boolean isConfigServiceCacheKeyIgnoreCase() { return getBooleanProperty("config-service.cache.key.ignore-case", false); }
@Test public void testIsConfigServiceCacheKeyIgnoreCase() { assertFalse(bizConfig.isConfigServiceCacheKeyIgnoreCase()); when(environment.getProperty("config-service.cache.key.ignore-case")).thenReturn("true"); assertTrue(bizConfig.isConfigServiceCacheKeyIgnoreCase()); }
public ShardingSphereDatabase getDatabase(final String name) { ShardingSpherePreconditions.checkNotEmpty(name, NoDatabaseSelectedException::new); ShardingSphereMetaData metaData = getMetaDataContexts().getMetaData(); ShardingSpherePreconditions.checkState(metaData.containsDatabase(name), () -> new UnknownDatabaseException(name)); return metaData.getDatabase(name); }
@Test void assertGetDatabaseWithEmptyString() { assertThrows(NoDatabaseSelectedException.class, () -> contextManager.getDatabase("")); }
private RemotingCommand getConsumeStats(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { final RemotingCommand response = RemotingCommand.createResponseCommand(null); final GetConsumeStatsRequestHeader requestHeader = (GetConsumeStatsRequestHeader) request.decodeCommandCustomHeader(GetConsumeStatsRequestHeader.class); ConsumeStats consumeStats = new ConsumeStats(); Set<String> topics = new HashSet<>(); if (UtilAll.isBlank(requestHeader.getTopic())) { topics = this.brokerController.getConsumerOffsetManager().whichTopicByConsumer(requestHeader.getConsumerGroup()); } else { topics.add(requestHeader.getTopic()); } for (String topic : topics) { TopicConfig topicConfig = this.brokerController.getTopicConfigManager().selectTopicConfig(topic); if (null == topicConfig) { LOGGER.warn("AdminBrokerProcessor#getConsumeStats: topic config does not exist, topic={}", topic); continue; } TopicQueueMappingDetail mappingDetail = this.brokerController.getTopicQueueMappingManager().getTopicQueueMapping(topic); { SubscriptionData findSubscriptionData = this.brokerController.getConsumerManager().findSubscriptionData(requestHeader.getConsumerGroup(), topic); if (null == findSubscriptionData && this.brokerController.getConsumerManager().findSubscriptionDataCount(requestHeader.getConsumerGroup()) > 0) { LOGGER.warn( "AdminBrokerProcessor#getConsumeStats: topic does not exist in consumer group's subscription, " + "topic={}, consumer group={}", topic, requestHeader.getConsumerGroup()); continue; } } for (int i = 0; i < topicConfig.getReadQueueNums(); i++) { MessageQueue mq = new MessageQueue(); mq.setTopic(topic); mq.setBrokerName(this.brokerController.getBrokerConfig().getBrokerName()); mq.setQueueId(i); OffsetWrapper offsetWrapper = new OffsetWrapper(); long brokerOffset = this.brokerController.getMessageStore().getMaxOffsetInQueue(topic, i); if (brokerOffset < 0) { brokerOffset = 0; } long consumerOffset = this.brokerController.getConsumerOffsetManager().queryOffset( requestHeader.getConsumerGroup(), topic, i); // the consumerOffset cannot be zero for static topic because of the "double read check" strategy // just remain the logic for dynamic topic // maybe we should remove it in the future if (mappingDetail == null) { if (consumerOffset < 0) { consumerOffset = 0; } } long pullOffset = this.brokerController.getConsumerOffsetManager().queryPullOffset( requestHeader.getConsumerGroup(), topic, i); offsetWrapper.setBrokerOffset(brokerOffset); offsetWrapper.setConsumerOffset(consumerOffset); offsetWrapper.setPullOffset(Math.max(consumerOffset, pullOffset)); long timeOffset = consumerOffset - 1; if (timeOffset >= 0) { long lastTimestamp = this.brokerController.getMessageStore().getMessageStoreTimeStamp(topic, i, timeOffset); if (lastTimestamp > 0) { offsetWrapper.setLastTimestamp(lastTimestamp); } } consumeStats.getOffsetTable().put(mq, offsetWrapper); } double consumeTps = this.brokerController.getBrokerStatsManager().tpsGroupGetNums(requestHeader.getConsumerGroup(), topic); consumeTps += consumeStats.getConsumeTps(); consumeStats.setConsumeTps(consumeTps); } byte[] body = consumeStats.encode(); response.setBody(body); response.setCode(ResponseCode.SUCCESS); response.setRemark(null); return response; }
@Test public void testGetConsumeStats() throws RemotingCommandException { RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_CONSUME_STATS, null); request.addExtField("topic", "topicTest"); request.addExtField("consumerGroup", "GID-test"); RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); }
@VisibleForTesting Map<String, Object> getCustomMessageModel(EventNotificationContext ctx, String type, List<MessageSummary> backlog, DateTimeZone timeZone) { final EventNotificationModelData modelData = EventNotificationModelData.of(ctx, backlog); LOG.debug("Custom message model: {}", modelData); final Map<String, Object> objectMap = objectMapperProvider.getForTimeZone(timeZone).convertValue(modelData, TypeReferences.MAP_STRING_OBJECT); objectMap.put("type", type); objectMap.put("http_external_uri", this.httpExternalUri); final Map<String, Object> escapedModelMap = new HashMap<>(); objectMap.forEach((k, v) -> { if (v instanceof String str) { escapedModelMap.put(k, str.replace("\"", "\\\"")); } else { escapedModelMap.put(k, v); } }); LOG.debug("Finalized model map: {}", escapedModelMap); return escapedModelMap; }
@Test public void getCustomMessageModel() { List<MessageSummary> messageSummaries = generateMessageSummaries(50); Map<String, Object> customMessageModel = teamsEventNotification.getCustomMessageModel(eventNotificationContext, notificationConfig.type(), messageSummaries, DateTimeZone.UTC); assertThat(customMessageModel).isNotNull(); assertThat(customMessageModel.get("event_definition_description")).isEqualTo("Event Definition Test Description"); assertThat(customMessageModel.get("event_definition_title")).isEqualTo("Event Definition Test Title"); assertThat(customMessageModel.get("event_definition_type")).isEqualTo("test-dummy-v1"); assertThat(customMessageModel.get("type")).isEqualTo(TeamsEventNotificationConfigV2.TYPE_NAME); assertThat(customMessageModel.get("job_definition_id")).isEqualTo("<unknown>"); assertThat(customMessageModel.get("job_trigger_id")).isEqualTo("<unknown>"); }
@Override public boolean checkCredentials(String username, String password) { if (username == null || password == null) { return false; } Credentials credentials = new Credentials(username, password); if (validCredentialsCache.contains(credentials)) { return true; } else if (invalidCredentialsCache.contains(credentials)) { return false; } boolean isValid = this.username.equals(username) && this.passwordHash.equals( generatePasswordHash( algorithm, salt, iterations, keyLength, password)); if (isValid) { validCredentialsCache.add(credentials); } else { invalidCredentialsCache.add(credentials); } return isValid; }
@Test public void testPBKDF2WithHmacSHA512_withoutColon() throws Exception { String algorithm = "PBKDF2WithHmacSHA512"; int iterations = 1000; int keyLength = 128; String hash = "07:6F:E2:27:9B:CA:48:66:9B:13:9E:02:9C:AE:FC:E4:1A:2F:0F:E6:48:A3:FF:8E:D2:30:59:68:12:A6:29:34:FC:99:29:8A:98:65:AE:4B:05:7C:B6:83:A4:83:C0:32:E4:90:61:1D:DD:2E:53:17:01:FF:6A:64:48:B2:AA:22:DE:B3:BC:56:08:C6:66:EC:98:F8:96:8C:1B:DA:B2:F2:2A:6C:22:8E:19:CC:B2:62:55:3E:BE:DC:C7:58:36:9D:92:CF:D7:D2:A1:6D:8F:DC:DE:8E:E9:36:D4:E7:2D:0A:6D:A1:B8:56:0A:53:BB:17:E2:D5:DE:A0:48:51:FC:33"; hash = hash.replace(":", ""); PBKDF2Authenticator PBKDF2Authenticator = new PBKDF2Authenticator( "/", VALID_USERNAME, hash, algorithm, SALT, iterations, keyLength); for (String username : TEST_USERNAMES) { for (String password : TEST_PASSWORDS) { boolean expectedIsAuthenticated = VALID_USERNAME.equals(username) && VALID_PASSWORD.equals(password); boolean actualIsAuthenticated = PBKDF2Authenticator.checkCredentials(username, password); assertEquals(expectedIsAuthenticated, actualIsAuthenticated); } } }
@Override public void persist(final String key, final String value) { try { if (isExisted(key)) { update(key, value); return; } String tempPrefix = ""; String parent = SEPARATOR; String[] paths = Arrays.stream(key.split(SEPARATOR)).filter(each -> !Strings.isNullOrEmpty(each)).toArray(String[]::new); // Create key level directory recursively. for (int i = 0; i < paths.length - 1; i++) { String tempKey = tempPrefix + SEPARATOR + paths[i]; if (!isExisted(tempKey)) { insert(tempKey, "", parent); } tempPrefix = tempKey; parent = tempKey; } insert(key, value, parent); } catch (final SQLException ex) { log.error("Persist {} data to key: {} failed", getType(), key, ex); } }
@Test void assertPersistWithInsertForSimpleKeys() throws SQLException { final String key = "key"; final String value = "value"; when(mockJdbcConnection.prepareStatement(repositorySQL.getSelectByKeySQL())).thenReturn(mockPreparedStatement); when(mockJdbcConnection.prepareStatement(repositorySQL.getInsertSQL())).thenReturn(mockPreparedStatementForPersist); when(mockPreparedStatement.executeQuery()).thenReturn(mockResultSet); when(mockResultSet.next()).thenReturn(false); repository.persist(key, value); verify(mockPreparedStatement).setString(1, key); verify(mockPreparedStatementForPersist).setString(eq(1), anyString()); verify(mockPreparedStatementForPersist).setString(2, key); verify(mockPreparedStatementForPersist).setString(3, value); verify(mockPreparedStatementForPersist).setString(4, "/"); verify(mockPreparedStatementForPersist).executeUpdate(); }
public static void delete(final File file, final boolean ignoreFailures) { if (file.exists()) { if (file.isDirectory()) { final File[] files = file.listFiles(); if (null != files) { for (final File f : files) { delete(f, ignoreFailures); } } } if (!file.delete() && !ignoreFailures) { try { Files.delete(file.toPath()); } catch (final IOException ex) { LangUtil.rethrowUnchecked(ex); } } } }
@Test void deleteIgnoreFailuresFile() throws IOException { final Path file = tempDir.resolve("file.txt"); Files.createFile(file); IoUtil.delete(file.toFile(), false); assertFalse(Files.exists(file)); }
@Override public boolean filterTopic(String topicName) { if (StringUtils.isBlank(topicName)) { return true; } return TopicValidator.isSystemTopic(topicName) || PopAckConstants.isStartWithRevivePrefix(topicName) || this.topicBlackSet.contains(topicName) || MixAll.isLmq(topicName); }
@Test public void filterTopicTest() { MessageStoreFilter topicFilter = new MessageStoreTopicFilter(new MessageStoreConfig()); Assert.assertTrue(topicFilter.filterTopic("")); Assert.assertTrue(topicFilter.filterTopic(TopicValidator.SYSTEM_TOPIC_PREFIX + "_Topic")); String topicName = "WhiteTopic"; Assert.assertFalse(topicFilter.filterTopic(topicName)); topicFilter.addTopicToBlackList(topicName); Assert.assertTrue(topicFilter.filterTopic(topicName)); }
@VisibleForTesting void loadUdfFromClass(final Class<?>... udfClasses) { for (final Class<?> theClass : udfClasses) { loadUdfFromClass( theClass, KsqlScalarFunction.INTERNAL_PATH); } }
@Test public void shouldThrowOnMissingAnnotation() throws Exception { // Given: final MutableFunctionRegistry functionRegistry = new InternalFunctionRegistry(); final Path udfJar = new File("src/test/resources/udf-failing-tests.jar").toPath(); try (UdfClassLoader udfClassLoader = newClassLoader(udfJar, PARENT_CLASS_LOADER, resourceName -> false)) { final Class<?> clazz = udfClassLoader.loadClass("org.damian.ksql.udf.MissingAnnotationUdf"); final UdfLoader udfLoader = new UdfLoader( functionRegistry, empty(), create(EMPTY), true ); // When: final Exception e = assertThrows( KsqlException.class, () -> udfLoader.loadUdfFromClass(clazz) ); // Then: assertThat(e.getMessage(), containsString( "Cannot load UDF MissingAnnotation. DECIMAL return type is " + "not supported without an explicit schema")); } }
public static MultivaluedHashMap<String, String> getQueryParams(Map<String, String[]> parameterMap) { MultivaluedHashMap<String, String> queryParameters = new MultivaluedHashMap<>(); if (parameterMap.size() == 0) { return queryParameters; } for (Map.Entry<String, String[]> parameter : parameterMap.entrySet()) { for (String value : parameter.getValue()) { try { queryParameters.add(URLDecoder.decode(parameter.getKey(), StandardCharsets.UTF_8.name()), URLDecoder.decode(value, StandardCharsets.UTF_8.name())); } catch (UnsupportedEncodingException e) { LOGGER.error("Unable to decode query parameter", e); } } } return queryParameters; }
@Test(description = "convert query parameters to multivaluedmap") public void convertWithRightOutputSize() throws Exception { Map<String, String[]> params = new HashMap<>(); params.put("key1", new String[]{"value1", "value2"}); params.put("key2", new String[]{"value2", "value3", "value4", "value4"}); MultivaluedHashMap<String, String> multivaluedMap = ServletUtils.getQueryParams(params); assertEquals(multivaluedMap.size(), 2); assertEquals(multivaluedMap.get("key1").size(), 2); assertEquals(multivaluedMap.get("key2").size(), 4); assertEquals(multivaluedMap.keySet().iterator().next(), "key1"); }
public static Frequency ofKHz(long value) { return new Frequency(value * KHZ); }
@Test public void testofKHz() { Frequency frequency = Frequency.ofKHz(1.0); assertThat(frequency.asHz(), is(1000L)); }
static String formatAuthorizationHeader(String clientId, String clientSecret, boolean urlencode) throws UnsupportedEncodingException { clientId = sanitizeString("the token endpoint request client ID parameter", clientId); clientSecret = sanitizeString("the token endpoint request client secret parameter", clientSecret); // according to RFC-6749 clientId & clientSecret must be urlencoded, see https://tools.ietf.org/html/rfc6749#section-2.3.1 if (urlencode) { clientId = URLEncoder.encode(clientId, StandardCharsets.UTF_8.name()); clientSecret = URLEncoder.encode(clientSecret, StandardCharsets.UTF_8.name()); } String s = String.format("%s:%s", clientId, clientSecret); // Per RFC-7617, we need to use the *non-URL safe* base64 encoder. See KAFKA-14496. String encoded = Base64.getEncoder().encodeToString(Utils.utf8(s)); return String.format("Basic %s", encoded); }
@Test public void testFormatAuthorizationHeader() throws UnsupportedEncodingException { assertAuthorizationHeader("id", "secret", false, "Basic aWQ6c2VjcmV0"); }
protected int getRepoStringLength() { return database.getDatabaseMeta().getDatabaseInterface().getMaxVARCHARLength() - 1 > 0 ? database.getDatabaseMeta() .getDatabaseInterface().getMaxVARCHARLength() - 1 : KettleDatabaseRepository.REP_ORACLE_STRING_LENGTH; }
@Test public void testOracleDBRepoStringLength() throws Exception { KettleEnvironment.init(); DatabaseMeta databaseMeta = new DatabaseMeta( "OraRepo", "ORACLE", "JDBC", null, "test", null, null, null ); repositoryMeta = new KettleDatabaseRepositoryMeta( "KettleDatabaseRepository", "OraRepo", "Ora Repository", databaseMeta ); repository = new KettleDatabaseRepository(); repository.init( repositoryMeta ); KettleDatabaseRepositoryCreationHelper helper = new KettleDatabaseRepositoryCreationHelper( repository ); int repoStringLength = helper.getRepoStringLength(); assertEquals( EXPECTED_ORACLE_DB_REPO_STRING, repoStringLength ); }
@Override public void getFields( RowMetaInterface inputRowMeta, String name, RowMetaInterface[] info, StepMeta nextStep, VariableSpace space, Repository repository, IMetaStore metaStore ) throws KettleStepException { if ( nextStep != null ) { if ( nextStep.equals( executionResultTargetStepMeta ) ) { inputRowMeta.clear(); prepareExecutionResultsFields( inputRowMeta, nextStep ); } else if ( nextStep.equals( resultFilesTargetStepMeta ) ) { inputRowMeta.clear(); prepareExecutionResultsFileFields( inputRowMeta, nextStep ); } else if ( nextStep.equals( outputRowsSourceStepMeta ) ) { inputRowMeta.clear(); prepareResultsRowsFields( inputRowMeta ); } // else don't call clear on inputRowMeta, it's the main output and should mimic the input } }
@Test public void testGetFields() throws Exception { TransExecutorMeta meta = new TransExecutorMeta(); meta = spy( meta ); StepMeta nextStep = mock( StepMeta.class ); // Test null meta.getFields( null, null, null, nextStep, null, null, null ); verify( meta, never() ).addFieldToRow( any( RowMetaInterface.class ), anyString(), anyInt() ); RowMetaInterface rowMeta = mock( RowMetaInterface.class ); meta.getFields( rowMeta, null, null, nextStep, null, null, null ); verify( rowMeta, never() ).clear(); StepMeta executionResultTargetStepMeta = mock( StepMeta.class ); meta.setExecutionResultTargetStepMeta( executionResultTargetStepMeta ); meta.getFields( rowMeta, null, null, nextStep, null, null, null ); verify( rowMeta, atMost( 1 ) ).clear(); meta.setExecutionResultTargetStepMeta( null ); StepMeta resultFilesTargetStepMeta = mock( StepMeta.class ); meta.setResultFilesTargetStepMeta( resultFilesTargetStepMeta ); meta.getFields( rowMeta, null, null, nextStep, null, null, null ); verify( rowMeta, atMost( 1 ) ).clear(); meta.setResultFilesTargetStepMeta( null ); StepMeta outputRowsSourceStepMeta = mock( StepMeta.class ); meta.setOutputRowsSourceStepMeta( outputRowsSourceStepMeta ); meta.getFields( rowMeta, null, null, nextStep, null, null, null ); verify( rowMeta, atMost( 1 ) ).clear(); meta.setOutputRowsSourceStepMeta( null ); }
@Override public List<? extends Issue> getIssues() { return componentIssues; }
@Test public void get_issues() { DefaultIssue issue = new DefaultIssue() .setKey("KEY") .setRuleKey(RuleKey.of("xoo", "S01")) .setSeverity("MAJOR") .setStatus("CLOSED") .setResolution("FIXED") .setEffort(Duration.create(10L)); MeasureComputerContextImpl underTest = newContext(PROJECT_REF, Arrays.asList(issue)); assertThat(underTest.getIssues()).hasSize(1); org.sonar.api.ce.measure.Issue result = underTest.getIssues().get(0); assertThat(result.key()).isEqualTo("KEY"); assertThat(result.ruleKey()).isEqualTo(RuleKey.of("xoo", "S01")); assertThat(result.severity()).isEqualTo("MAJOR"); assertThat(result.status()).isEqualTo("CLOSED"); assertThat(result.resolution()).isEqualTo("FIXED"); assertThat(result.effort()).isEqualTo(Duration.create(10L)); }