focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public void close() throws IOException {
super.close();
this.closed = true;
if (stream != null) {
stream.close();
}
}
|
@Test
public void testClose() throws Exception {
setupData(randomData(2));
SeekableInputStream closed =
new ADLSInputStream(fileClient(), null, azureProperties, MetricsContext.nullMetrics());
closed.close();
assertThatThrownBy(() -> closed.seek(0))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Cannot seek: already closed");
}
|
@Override
public void checkAuthorization(
final KsqlSecurityContext securityContext,
final MetaStore metaStore,
final Statement statement
) {
if (statement instanceof Query) {
validateQuery(securityContext, metaStore, (Query)statement);
} else if (statement instanceof InsertInto) {
validateInsertInto(securityContext, metaStore, (InsertInto)statement);
} else if (statement instanceof CreateAsSelect) {
validateCreateAsSelect(securityContext, metaStore, (CreateAsSelect)statement);
} else if (statement instanceof PrintTopic) {
validatePrintTopic(securityContext, (PrintTopic)statement);
} else if (statement instanceof CreateSource) {
validateCreateSource(securityContext, (CreateSource)statement);
}
}
|
@Test
public void shouldSingleSelectWithReadPermissionsAllowed() {
// Given:
final Statement statement = givenStatement("SELECT * FROM " + KAFKA_STREAM_TOPIC + ";");
// When/Then:
authorizationValidator.checkAuthorization(securityContext, metaStore, statement);
}
|
@Udf
public String chr(@UdfParameter(
description = "Decimal codepoint") final Integer decimalCode) {
if (decimalCode == null) {
return null;
}
if (!Character.isValidCodePoint(decimalCode)) {
return null;
}
final char[] resultChars = Character.toChars(decimalCode);
return String.valueOf(resultChars);
}
|
@Test
public void shouldReturnNullForEmptyStringInput() {
final String result = udf.chr("");
assertThat(result, is(nullValue()));
}
|
public OpenAPI filter(OpenAPI openAPI, OpenAPISpecFilter filter, Map<String, List<String>> params, Map<String, String> cookies, Map<String, List<String>> headers) {
OpenAPI filteredOpenAPI = filterOpenAPI(filter, openAPI, params, cookies, headers);
if (filteredOpenAPI == null) {
return filteredOpenAPI;
}
OpenAPI clone = new OpenAPI();
clone.info(filteredOpenAPI.getInfo());
clone.openapi(filteredOpenAPI.getOpenapi());
clone.jsonSchemaDialect(filteredOpenAPI.getJsonSchemaDialect());
clone.setSpecVersion(filteredOpenAPI.getSpecVersion());
clone.setExtensions(filteredOpenAPI.getExtensions());
clone.setExternalDocs(filteredOpenAPI.getExternalDocs());
clone.setSecurity(filteredOpenAPI.getSecurity());
clone.setServers(filteredOpenAPI.getServers());
clone.tags(filteredOpenAPI.getTags() == null ? null : new ArrayList<>(openAPI.getTags()));
final Set<String> allowedTags = new HashSet<>();
final Set<String> filteredTags = new HashSet<>();
Paths clonedPaths = new Paths();
if (filteredOpenAPI.getPaths() != null) {
for (String resourcePath : filteredOpenAPI.getPaths().keySet()) {
PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath);
PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers);
PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags);
if (clonedPathItem != null) {
if (!clonedPathItem.readOperations().isEmpty()) {
clonedPaths.addPathItem(resourcePath, clonedPathItem);
}
}
}
clone.paths(clonedPaths);
}
filteredTags.removeAll(allowedTags);
final List<Tag> tags = clone.getTags();
if (tags != null && !filteredTags.isEmpty()) {
tags.removeIf(tag -> filteredTags.contains(tag.getName()));
if (clone.getTags().isEmpty()) {
clone.setTags(null);
}
}
if (filteredOpenAPI.getWebhooks() != null) {
for (String resourcePath : filteredOpenAPI.getWebhooks().keySet()) {
PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath);
PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers);
PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags);
if (clonedPathItem != null) {
if (!clonedPathItem.readOperations().isEmpty()) {
clone.addWebhooks(resourcePath, clonedPathItem);
}
}
}
}
if (filteredOpenAPI.getComponents() != null) {
clone.components(new Components());
clone.getComponents().setSchemas(filterComponentsSchema(filter, filteredOpenAPI.getComponents().getSchemas(), params, cookies, headers));
clone.getComponents().setSecuritySchemes(filteredOpenAPI.getComponents().getSecuritySchemes());
clone.getComponents().setCallbacks(filteredOpenAPI.getComponents().getCallbacks());
clone.getComponents().setExamples(filteredOpenAPI.getComponents().getExamples());
clone.getComponents().setExtensions(filteredOpenAPI.getComponents().getExtensions());
clone.getComponents().setHeaders(filteredOpenAPI.getComponents().getHeaders());
clone.getComponents().setLinks(filteredOpenAPI.getComponents().getLinks());
clone.getComponents().setParameters(filteredOpenAPI.getComponents().getParameters());
clone.getComponents().setRequestBodies(filteredOpenAPI.getComponents().getRequestBodies());
clone.getComponents().setResponses(filteredOpenAPI.getComponents().getResponses());
clone.getComponents().setPathItems(filteredOpenAPI.getComponents().getPathItems());
}
if (filter.isRemovingUnreferencedDefinitions()) {
clone = removeBrokenReferenceDefinitions(clone);
}
return clone;
}
|
@Test
public void shouldRemoveBrokenNestedRefsKeepArray() throws IOException {
final OpenAPI openAPI = getOpenAPI31(RESOURCE_PATH_LIST);
final RemoveUnreferencedDefinitionsFilter remover = new RemoveUnreferencedDefinitionsFilter();
final OpenAPI filtered = new SpecFilter().filter(openAPI, remover, null, null, null);
assertEquals(filtered.getComponents().getSchemas().size(), 2, "Expected to have parent and child list schemas");
assertTrue(filtered.getComponents().getSchemas().containsKey("SomeChildObject"), "Schemas should contains child list");
}
|
@SuppressWarnings("rawtypes")
public Collection<RuleConfiguration> swapToRuleConfigurations(final Collection<YamlRuleConfiguration> yamlRuleConfigs) {
Collection<RuleConfiguration> result = new LinkedList<>();
Collection<Class<?>> ruleConfigTypes = yamlRuleConfigs.stream().map(YamlRuleConfiguration::getRuleConfigurationType).collect(Collectors.toList());
for (Entry<Class<?>, YamlRuleConfigurationSwapper> entry : OrderedSPILoader.getServicesByClass(YamlRuleConfigurationSwapper.class, ruleConfigTypes).entrySet()) {
result.addAll(swapToRuleConfigurations(yamlRuleConfigs, entry.getKey(), entry.getValue()));
}
return result;
}
|
@Test
void assertSwapToRuleConfigurations() {
YamlRuleConfigurationFixture yamlRuleConfig = new YamlRuleConfigurationFixture();
yamlRuleConfig.setName("test");
Collection<RuleConfiguration> actual = new YamlRuleConfigurationSwapperEngine().swapToRuleConfigurations(Collections.singleton(yamlRuleConfig));
assertThat(actual.size(), is(1));
assertThat(((FixtureRuleConfiguration) actual.iterator().next()).getName(), is("test"));
}
|
public static String escapeAll(CharSequence content) {
return escape(content, c -> true);
}
|
@Test
public void escapeAllTest(){
String str = "*@-_+./(123你好)ABCabc";
String escape = EscapeUtil.escapeAll(str);
assertEquals("%2a%40%2d%5f%2b%2e%2f%28%31%32%33%u4f60%u597d%29%41%42%43%61%62%63", escape);
String unescape = EscapeUtil.unescape(escape);
assertEquals(str, unescape);
}
|
@Override
public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return true;
}
try {
final SMBSession.DiskShareWrapper share = session.openShare(file);
try {
if(new SMBPathContainerService(session).isContainer(file)) {
return true;
}
if(file.isDirectory()) {
return share.get().folderExists(new SMBPathContainerService(session).getKey(file));
}
return share.get().fileExists(new SMBPathContainerService(session).getKey(file));
}
catch(SMBRuntimeException e) {
throw new SMBExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
finally {
session.releaseShare(share);
}
}
catch(NotfoundException e) {
return false;
}
}
|
@Test
public void testFindNotFound() throws Exception {
assertFalse(new SMBFindFeature(session).find(new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file))));
}
|
public void onBlock(
final DirectBuffer termBuffer, final int termOffset, final int length, final int sessionId, final int termId)
{
try
{
final boolean isPaddingFrame = termBuffer.getShort(typeOffset(termOffset)) == PADDING_FRAME_TYPE;
final int dataLength = isPaddingFrame ? HEADER_LENGTH : length;
final ByteBuffer byteBuffer;
final long startNs = nanoClock.nanoTime();
if (null == checksum || isPaddingFrame)
{
byteBuffer = termBuffer.byteBuffer();
byteBuffer.limit(termOffset + dataLength).position(termOffset);
}
else
{
checksumBuffer.putBytes(0, termBuffer, termOffset, dataLength);
computeChecksum(checksum, checksumBuffer, dataLength);
byteBuffer = checksumBuffer.byteBuffer();
byteBuffer.limit(dataLength).position(0);
}
int fileOffset = segmentOffset;
do
{
fileOffset += recordingFileChannel.write(byteBuffer, fileOffset);
}
while (byteBuffer.remaining() > 0);
if (forceWrites)
{
recordingFileChannel.force(forceMetadata);
}
final long writeTimeNs = nanoClock.nanoTime() - startNs;
recorder.bytesWritten(dataLength);
recorder.writeTimeNs(writeTimeNs);
segmentOffset += length;
if (segmentOffset >= segmentLength)
{
onFileRollOver();
}
}
catch (final ClosedByInterruptException ex)
{
close();
throw new ArchiveException("file closed by interrupt, recording aborted", ex, ArchiveException.GENERIC);
}
catch (final IOException ex)
{
close();
checkErrorType(ex, length);
}
catch (final Exception ex)
{
close();
LangUtil.rethrowUnchecked(ex);
}
}
|
@Test
void onBlockThrowsNullPointerExceptionIfInitWasNotCalled()
{
final Image image = mockImage(0L);
final RecordingWriter recordingWriter = new RecordingWriter(
1,
0,
SEGMENT_LENGTH,
image,
new Context().archiveDir(archiveDir),
mock(ArchiveConductor.Recorder.class));
assertThrows(
NullPointerException.class,
() -> recordingWriter.onBlock(new UnsafeBuffer(allocate(32)), 0, 10, 5, 8));
}
|
public AdvancedCache<Object, V> getCache(String name, MediaType keyContentType, MediaType valueContentType, RestRequest request) {
Subject subject = request.getSubject();
Flag[] flags = request.getFlags();
if (isCacheIgnored.test(name)) {
throw logger.cacheUnavailable(name);
}
if (keyContentType == null || valueContentType == null) {
throw logger.missingRequiredMediaType(name);
}
checkCacheAvailable(name);
CacheInfo<Object, V> cacheInfo = knownCaches.get(name);
if (cacheInfo == null) {
AdvancedCache<Object, V> cache = instance.<Object, V>getCache(name).getAdvancedCache()
.withFlags(Flag.IGNORE_RETURN_VALUES);
cacheInfo = new CacheInfo<>(cache);
knownCaches.putIfAbsent(name, cacheInfo);
}
AdvancedCache<Object, V> cache = cacheInfo.getCache(new KeyValuePair<>(keyContentType, valueContentType), subject);
if (flags != null && flags.length > 0) cache = cache.withFlags(flags);
return cache;
}
|
@Test
public void shouldReuseEncodedCaches() {
EmbeddedCacheManager embeddedCacheManager = Mockito.spy(cacheManager);
RestCacheManager<Object> restCacheManager = new RestCacheManager<>(embeddedCacheManager, c -> Boolean.FALSE);
Map<String, CacheInfo<Object, Object>> knownCaches = TestingUtil.extractField(restCacheManager, "knownCaches");
// Request cache by simple name
SimpleRequest request = new SimpleRequest.Builder().setPath("/test").build();
restCacheManager.getCache("cache1", request);
restCacheManager.getCache("cache2", request);
// Verify they are stored internally
assertEquals(knownCaches.size(), 2);
assertEquals(cachesSize(knownCaches.get("cache1")), 1);
assertEquals(cachesSize(knownCaches.get("cache2")), 1);
// Requesting again should not cause interaction with the cache manager
Mockito.reset(embeddedCacheManager);
restCacheManager.getCache("cache1", request);
restCacheManager.getCache("cache2", request);
Mockito.verify(embeddedCacheManager, never()).getCache("cache1");
Mockito.verify(embeddedCacheManager, never()).getCache("cache2");
assertEquals(cachesSize(knownCaches.get("cache1")), 1);
assertEquals(cachesSize(knownCaches.get("cache2")), 1);
// Request caches with a different media type
restCacheManager.getCache("cache1", MediaType.MATCH_ALL, MediaType.APPLICATION_JSON, request);
restCacheManager.getCache("cache2", MediaType.MATCH_ALL, MediaType.TEXT_PLAIN, request);
// Verify they are stored internally
assertEquals(knownCaches.size(), 2);
assertEquals(cachesSize(knownCaches.get("cache1")), 2);
assertEquals(cachesSize(knownCaches.get("cache2")), 2);
// Requesting again with same media type but different parameters should not reuse internal instance
Mockito.reset(embeddedCacheManager);
restCacheManager.getCache("cache1", MediaType.MATCH_ALL, MediaType.fromString("application/json; charset=UTF-8"), request);
restCacheManager.getCache("cache2", MediaType.MATCH_ALL, MediaType.fromString("text/plain; charset=SHIFT-JIS"), request);
assertEquals(knownCaches.size(), 2);
assertEquals(cachesSize(knownCaches.get("cache1")), 3);
assertEquals(cachesSize(knownCaches.get("cache2")), 3);
Mockito.verify(embeddedCacheManager, never()).getCache("cache1");
Mockito.verify(embeddedCacheManager, never()).getCache("cache2");
// Requesting with same params should reuse
restCacheManager.getCache("cache1", MediaType.MATCH_ALL, MediaType.fromString("application/json; charset=UTF-8"), request);
restCacheManager.getCache("cache2", MediaType.MATCH_ALL, MediaType.fromString("text/plain; charset=SHIFT-JIS"), request);
assertEquals(cachesSize(knownCaches.get("cache1")), 3);
assertEquals(cachesSize(knownCaches.get("cache2")), 3);
Mockito.verify(embeddedCacheManager, never()).getCache("cache1");
Mockito.verify(embeddedCacheManager, never()).getCache("cache2");
}
|
public static Object convertAvroFormat(
FieldType beamFieldType, Object avroValue, BigQueryUtils.ConversionOptions options) {
TypeName beamFieldTypeName = beamFieldType.getTypeName();
if (avroValue == null) {
if (beamFieldType.getNullable()) {
return null;
} else {
throw new IllegalArgumentException(String.format("Field %s not nullable", beamFieldType));
}
}
switch (beamFieldTypeName) {
case BYTE:
case INT16:
case INT32:
case INT64:
case FLOAT:
case DOUBLE:
case STRING:
case BYTES:
case BOOLEAN:
return convertAvroPrimitiveTypes(beamFieldTypeName, avroValue);
case DATETIME:
// Expecting value in microseconds.
switch (options.getTruncateTimestamps()) {
case TRUNCATE:
return truncateToMillis(avroValue);
case REJECT:
return safeToMillis(avroValue);
default:
throw new IllegalArgumentException(
String.format(
"Unknown timestamp truncation option: %s", options.getTruncateTimestamps()));
}
case DECIMAL:
return convertAvroNumeric(avroValue);
case ARRAY:
return convertAvroArray(beamFieldType, avroValue, options);
case LOGICAL_TYPE:
LogicalType<?, ?> logicalType = beamFieldType.getLogicalType();
assert logicalType != null;
String identifier = logicalType.getIdentifier();
if (SqlTypes.DATE.getIdentifier().equals(identifier)) {
return convertAvroDate(avroValue);
} else if (SqlTypes.TIME.getIdentifier().equals(identifier)) {
return convertAvroTime(avroValue);
} else if (SqlTypes.DATETIME.getIdentifier().equals(identifier)) {
return convertAvroDateTime(avroValue);
} else if (SQL_DATE_TIME_TYPES.contains(identifier)) {
switch (options.getTruncateTimestamps()) {
case TRUNCATE:
return truncateToMillis(avroValue);
case REJECT:
return safeToMillis(avroValue);
default:
throw new IllegalArgumentException(
String.format(
"Unknown timestamp truncation option: %s", options.getTruncateTimestamps()));
}
} else if (logicalType instanceof PassThroughLogicalType) {
return convertAvroFormat(logicalType.getBaseType(), avroValue, options);
} else {
throw new RuntimeException("Unknown logical type " + identifier);
}
case ROW:
Schema rowSchema = beamFieldType.getRowSchema();
if (rowSchema == null) {
throw new IllegalArgumentException("Nested ROW missing row schema");
}
GenericData.Record record = (GenericData.Record) avroValue;
return toBeamRow(record, rowSchema, options);
case MAP:
return convertAvroRecordToMap(beamFieldType, avroValue, options);
default:
throw new RuntimeException(
"Does not support converting unknown type value: " + beamFieldTypeName);
}
}
|
@Test
public void testNumericType() {
// BigQuery NUMERIC type has precision 38 and scale 9
BigDecimal n = new BigDecimal("123456789.987654321").setScale(9);
assertThat(
BigQueryUtils.convertAvroFormat(
FieldType.DECIMAL,
new Conversions.DecimalConversion().toBytes(n, null, LogicalTypes.decimal(38, 9)),
REJECT_OPTIONS),
equalTo(n));
}
|
public static boolean isOnList(@Nonnull final Set<String> list, @Nonnull final String ipAddress) {
Ipv4 remoteIpv4;
try {
remoteIpv4 = Ipv4.of(ipAddress);
} catch (IllegalArgumentException e) {
Log.trace("Address '{}' is not an IPv4 address.", ipAddress);
remoteIpv4 = null;
}
Ipv6 remoteIpv6;
try {
remoteIpv6 = Ipv6.of(ipAddress);
} catch (IllegalArgumentException e) {
Log.trace("Address '{}' is not an IPv6 address.", ipAddress);
remoteIpv6 = null;
}
if (remoteIpv4 == null && remoteIpv6 == null) {
Log.warn("Unable to parse '{}' as an IPv4 or IPv6 address!", ipAddress);
}
for (final String item : list) {
// Check if the remote address is an exact match on the list.
if (item.equals(ipAddress)) {
return true;
}
// Check if the remote address is a match for an address range on the list.
if (remoteIpv4 != null) {
Ipv4Range range;
try {
range = Ipv4Range.parse(item);
} catch (IllegalArgumentException e) {
Log.trace("List entry '{}' is not an IPv4 range.", item);
range = null;
}
if (range != null && range.contains(remoteIpv4)) {
return true;
}
}
if (remoteIpv6 != null) {
Ipv6Range range;
try {
range = Ipv6Range.parse(item);
} catch (IllegalArgumentException e) {
Log.trace("List entry '{}' is not an IPv6 range.", item);
range = null;
}
if (range != null && range.contains(remoteIpv6)) {
return true;
}
}
}
return false;
}
|
@Test
public void ipOnListRange() throws Exception {
// Setup test fixture.
final String input = "203.0.113.251";
final Set<String> list = new HashSet<>();
list.add("203.0.113.25-203.0.113.251");
// Execute system under test.
final boolean result = AuthCheckFilter.isOnList(list, input);
// Verify result.
assertTrue(result);
}
|
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks) throws InterruptedException, ExecutionException {
return delegate.invokeAny(tasks);
}
|
@Test
public void invokeAny2() throws InterruptedException, ExecutionException, TimeoutException {
underTest.invokeAny(callables, timeout, SECONDS);
verify(executorService).invokeAny(callables, timeout, SECONDS);
}
|
@Override
public Collection<Permission> getPermissions(Action action) {
if (!(action instanceof DestinationAction)) {
throw new IllegalArgumentException("Action argument must be a " + DestinationAction.class.getName() + " instance.");
}
DestinationAction da = (DestinationAction) action;
return getPermissions(da);
}
|
@Test
public void testGetPermissionsWithTemporaryTopic() {
ActiveMQTempTopic topic = new ActiveMQTempTopic("myTempTopic");
DestinationAction action = new DestinationAction(new ConnectionContext(), topic, "remove");
Collection<Permission> perms = resolver.getPermissions(action);
assertPermString("temp-topic:myTempTopic:remove", perms);
}
|
@Override
public PrimitiveTypeEncoding<UTF8Buffer> getCanonicalEncoding() {
return largeBufferEncoding;
}
|
@Test
public void testGetCanonicalEncoding() {
assertNotNull(utf8BufferEncoding.getCanonicalEncoding());
}
|
@VisibleForTesting
public ProcessContinuation run(
RestrictionTracker<OffsetRange, Long> tracker,
OutputReceiver<PartitionRecord> receiver,
ManualWatermarkEstimator<Instant> watermarkEstimator,
InitialPipelineState initialPipelineState)
throws Exception {
LOG.debug("DNP: Watermark: " + watermarkEstimator.getState());
LOG.debug("DNP: CurrentTracker: " + tracker.currentRestriction().getFrom());
if (tracker.currentRestriction().getFrom() == 0L) {
if (!tracker.tryClaim(0L)) {
LOG.error(
"Could not claim initial DetectNewPartition restriction. No partitions are outputted.");
return ProcessContinuation.stop();
}
watermarkEstimator.setWatermark(initialPipelineState.getStartTime());
if (initialPipelineState.isResume()) {
resumeFromPreviousPipelineAction.run(receiver);
} else {
generateInitialPartitionsAction.run(receiver, initialPipelineState.getStartTime());
}
return ProcessContinuation.resume();
}
// Create a new partition reconciler every run to reset the state each time.
partitionReconciler = new PartitionReconciler(metadataTableDao, metrics);
orphanedMetadataCleaner = new OrphanedMetadataCleaner();
// Calculating the new value of watermark is a resource intensive process. We have to do a full
// scan of the metadata table and then ensure we're not missing partitions and then calculate
// the low watermark. This is usually a fairly fast process even with thousands of partitions.
// However, sometimes this may take so long that the runner checkpoints before the watermark is
// calculated. Because the checkpoint takes place before tryClaim, this forces the DoFn to
// restart, wasting the resources spent calculating the watermark. On restart, we will try to
// calculate the watermark again. The problem causing the slow watermark calculation can persist
// leading to a crash loop. In order to ensure we persist the calculated watermark, we calculate
// the watermark after successful tryClaim. Then we write to the metadata table the new
// watermark. On the start of each run we read the watermark and update the DoFn's watermark.
DetectNewPartitionsState detectNewPartitionsState =
metadataTableDao.readDetectNewPartitionsState();
if (detectNewPartitionsState != null) {
watermarkEstimator.setWatermark(detectNewPartitionsState.getWatermark());
}
// Terminate if endTime <= watermark that means all partitions have read up to or beyond
// watermark. We no longer need to manage splits and merges, we can terminate.
if (endTime != null && !watermarkEstimator.currentWatermark().isBefore(endTime)) {
tracker.tryClaim(tracker.currentRestriction().getTo());
return ProcessContinuation.stop();
}
if (!tracker.tryClaim(tracker.currentRestriction().getFrom())) {
LOG.warn("DNP: Checkpointing, stopping this run: " + tracker.currentRestriction());
return ProcessContinuation.stop();
}
// Read StreamPartitions to calculate watermark.
List<StreamPartitionWithWatermark> streamPartitionsWithWatermark = null;
if (shouldUpdateWatermark(tracker.currentRestriction().getFrom(), detectNewPartitionsState)) {
streamPartitionsWithWatermark = metadataTableDao.readStreamPartitionsWithWatermark();
}
// Process NewPartitions and track the ones successfully outputted.
List<NewPartition> newPartitions = metadataTableDao.readNewPartitions();
List<ByteStringRange> outputtedNewPartitions = new ArrayList<>();
for (NewPartition newPartition : newPartitions) {
if (processNewPartitionsAction.processNewPartition(newPartition, receiver)) {
outputtedNewPartitions.add(newPartition.getPartition());
} else if (streamPartitionsWithWatermark != null) {
// streamPartitionsWithWatermark is not null on runs that we update watermark. We only run
// reconciliation when we update watermark. Only add incompleteNewPartitions if
// reconciliation is being run
partitionReconciler.addIncompleteNewPartitions(newPartition);
orphanedMetadataCleaner.addIncompleteNewPartitions(newPartition);
}
}
// Process the watermark using read StreamPartitions and NewPartitions.
if (streamPartitionsWithWatermark != null) {
Optional<Instant> maybeWatermark =
getNewWatermark(streamPartitionsWithWatermark, newPartitions);
maybeWatermark.ifPresent(metadataTableDao::updateDetectNewPartitionWatermark);
// Only start reconciling after the pipeline has been running for a while.
if (tracker.currentRestriction().getFrom() > 50) {
// Using NewPartitions and StreamPartitions, evaluate partitions that are possibly not being
// streamed. This isn't perfect because there may be partitions moving between
// StreamPartitions and NewPartitions while scanning the metadata table. Also, this does not
// include NewPartitions marked as deleted from a previous DNP run not yet processed by
// RCSP.
List<ByteStringRange> existingPartitions =
streamPartitionsWithWatermark.stream()
.map(StreamPartitionWithWatermark::getPartition)
.collect(Collectors.toList());
existingPartitions.addAll(outputtedNewPartitions);
List<ByteStringRange> missingStreamPartitions =
getMissingPartitionsFromEntireKeySpace(existingPartitions);
orphanedMetadataCleaner.addMissingPartitions(missingStreamPartitions);
partitionReconciler.addMissingPartitions(missingStreamPartitions);
processReconcilerPartitions(
receiver, watermarkEstimator, initialPipelineState.getStartTime());
cleanUpOrphanedMetadata();
}
}
return ProcessContinuation.resume().withResumeDelay(Duration.millis(100));
}
|
@Test
public void testProcessMergeNewPartitions() throws Exception {
// Avoid 0 and multiples of 2 so that we can specifically test just reading new partitions.
OffsetRange offsetRange = new OffsetRange(1, Long.MAX_VALUE);
when(tracker.currentRestriction()).thenReturn(offsetRange);
when(tracker.tryClaim(offsetRange.getFrom())).thenReturn(true);
// ["a, "b") and ["b", "c") merge into ["a", "c")
ByteStringRange childPartitionAC = ByteStringRange.create("a", "c");
ByteStringRange parentPartitionAB = ByteStringRange.create("a", "b");
Instant watermarkAB = startTime;
ChangeStreamContinuationToken tokenAB =
ChangeStreamContinuationToken.create(parentPartitionAB, "ab");
NewPartition newPartitionACFromAB =
new NewPartition(childPartitionAC, Collections.singletonList(tokenAB), watermarkAB);
ByteStringRange parentPartitionBC = ByteStringRange.create("b", "c");
Instant watermarkBC = startTime.plus(Duration.millis(10));
ChangeStreamContinuationToken tokenBC =
ChangeStreamContinuationToken.create(parentPartitionBC, "bc");
NewPartition newPartitionACFromBC =
new NewPartition(childPartitionAC, Collections.singletonList(tokenBC), watermarkBC);
// Write a new partition for every parent partition that merges into the child.
metadataTableDao.writeNewPartition(newPartitionACFromAB);
metadataTableDao.writeNewPartition(newPartitionACFromBC);
assertEquals(
DoFn.ProcessContinuation.resume().withResumeDelay(Duration.millis(100)),
action.run(
tracker, receiver, watermarkEstimator, new InitialPipelineState(startTime, false)));
// The partition is outputted with watermark1 because that is the lowest of the 2 forming the
// parent low watermark.
verify(receiver, times(1))
.outputWithTimestamp(partitionRecordArgumentCaptor.capture(), eq(Instant.EPOCH));
assertEquals(childPartitionAC, partitionRecordArgumentCaptor.getValue().getPartition());
assertEquals(watermarkAB, partitionRecordArgumentCaptor.getValue().getParentLowWatermark());
assertEquals(endTime, partitionRecordArgumentCaptor.getValue().getEndTime());
assertThat(
partitionRecordArgumentCaptor.getValue().getChangeStreamContinuationTokens(),
Matchers.containsInAnyOrder(tokenAB, tokenBC));
assertTrue(metadataTableDao.readNewPartitions().isEmpty());
}
|
@Override
public ValidationTaskResult validateImpl(Map<String, String> optionMap) {
// Skip this test if NOSASL
if (mConf.get(PropertyKey.SECURITY_AUTHENTICATION_TYPE)
.equals(AuthType.NOSASL)) {
return new ValidationTaskResult(ValidationUtils.State.SKIPPED, getName(),
String.format("Impersonation validation is skipped for NOSASL"), "");
}
ValidationTaskResult loadConfig = loadHdfsConfig();
if (loadConfig.getState() != ValidationUtils.State.OK) {
mAdvice.insert(0, "Validating the proxy user requires additional HDFS "
+ "configuration. ");
return loadConfig.setAdvice(mAdvice.toString());
}
// TODO(jiacheng): validate proxyuser.hosts for the cluster
// Validate proxyuser config for the current Alluxio user
try {
String alluxioUser = getCurrentUser();
return validateProxyUsers(alluxioUser);
} catch (UnauthenticatedException e) {
mMsg.append(String.format("Failed to authenticate in Alluxio: "));
mMsg.append(ExceptionUtils.asPlainText(e));
mAdvice.append("Please fix the authentication issue.");
return new ValidationTaskResult(ValidationUtils.State.FAILED, getName(),
mMsg.toString(), mAdvice.toString());
}
}
|
@Test
public void missingProxyUser() {
String userName = System.getProperty("user.name");
// No proxy user definition in core-site.xml
prepareHdfsConfFiles(ImmutableMap.of("key1", "value1"));
HdfsProxyUserValidationTask task =
new HdfsProxyUserValidationTask("hdfs://namenode:9000/alluxio", mConf);
ValidationTaskResult result = task.validateImpl(ImmutableMap.of());
assertEquals(ValidationUtils.State.FAILED, result.getState());
assertThat(result.getResult(), containsString(
"Alluxio is not able to perform impersonation."));
assertThat(result.getAdvice(), containsString(
String.format("Please enable Alluxio user %s to impersonate", userName)));
}
|
public static void deleteTaskMetadata(HelixPropertyStore<ZNRecord> propertyStore, String taskType,
String tableNameWithType) {
String newPath = ZKMetadataProvider.constructPropertyStorePathForMinionTaskMetadata(tableNameWithType, taskType);
String oldPath =
ZKMetadataProvider.constructPropertyStorePathForMinionTaskMetadataDeprecated(taskType, tableNameWithType);
boolean newPathDeleted = propertyStore.remove(newPath, AccessOption.PERSISTENT);
boolean oldPathDeleted = propertyStore.remove(oldPath, AccessOption.PERSISTENT);
if (!newPathDeleted || !oldPathDeleted) {
throw new ZkException("Failed to delete task metadata: " + taskType + ", " + tableNameWithType);
}
}
|
@Test
public void testDeleteTaskMetadataWithException() {
// Test happy path. No exceptions thrown.
HelixPropertyStore<ZNRecord> mockPropertyStore = Mockito.mock(HelixPropertyStore.class);
when(mockPropertyStore.remove(ArgumentMatchers.anyString(), ArgumentMatchers.anyInt())).thenReturn(true);
MinionTaskMetadataUtils.deleteTaskMetadata(mockPropertyStore, TASK_TYPE, TABLE_NAME_WITH_TYPE);
// Test exception thrown
when(mockPropertyStore.remove(ArgumentMatchers.anyString(), ArgumentMatchers.anyInt())).thenReturn(false);
try {
MinionTaskMetadataUtils.deleteTaskMetadata(mockPropertyStore, TASK_TYPE, TABLE_NAME_WITH_TYPE);
fail("ZkException should have been thrown");
} catch (ZkException e) {
assertEquals(e.getMessage(), "Failed to delete task metadata: TestTaskType, TestTable_OFFLINE");
}
}
|
public static Map<String, String> decodeProperties(ByteBuffer byteBuffer) {
int sysFlag = byteBuffer.getInt(SYSFLAG_POSITION);
int magicCode = byteBuffer.getInt(MESSAGE_MAGIC_CODE_POSITION);
MessageVersion version = MessageVersion.valueOfMagicCode(magicCode);
int bornhostLength = (sysFlag & MessageSysFlag.BORNHOST_V6_FLAG) == 0 ? 8 : 20;
int storehostAddressLength = (sysFlag & MessageSysFlag.STOREHOSTADDRESS_V6_FLAG) == 0 ? 8 : 20;
int bodySizePosition = 4 // 1 TOTALSIZE
+ 4 // 2 MAGICCODE
+ 4 // 3 BODYCRC
+ 4 // 4 QUEUEID
+ 4 // 5 FLAG
+ 8 // 6 QUEUEOFFSET
+ 8 // 7 PHYSICALOFFSET
+ 4 // 8 SYSFLAG
+ 8 // 9 BORNTIMESTAMP
+ bornhostLength // 10 BORNHOST
+ 8 // 11 STORETIMESTAMP
+ storehostAddressLength // 12 STOREHOSTADDRESS
+ 4 // 13 RECONSUMETIMES
+ 8; // 14 Prepared Transaction Offset
int topicLengthPosition = bodySizePosition + 4 + byteBuffer.getInt(bodySizePosition);
byteBuffer.position(topicLengthPosition);
int topicLengthSize = version.getTopicLengthSize();
int topicLength = version.getTopicLength(byteBuffer);
int propertiesPosition = topicLengthPosition + topicLengthSize + topicLength;
short propertiesLength = byteBuffer.getShort(propertiesPosition);
byteBuffer.position(propertiesPosition + 2);
if (propertiesLength > 0) {
byte[] properties = new byte[propertiesLength];
byteBuffer.get(properties);
String propertiesString = new String(properties, CHARSET_UTF8);
return string2messageProperties(propertiesString);
}
return null;
}
|
@Test
public void testDecodeProperties() {
MessageExt messageExt = new MessageExt();
messageExt.setMsgId("645100FA00002A9F000000489A3AA09E");
messageExt.setTopic("abc");
messageExt.setBody("hello!q!".getBytes());
try {
messageExt.setBornHost(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 0));
} catch (UnknownHostException e) {
e.printStackTrace();
assertThat(Boolean.FALSE).isTrue();
}
messageExt.setBornTimestamp(System.currentTimeMillis());
messageExt.setCommitLogOffset(123456);
messageExt.setPreparedTransactionOffset(0);
messageExt.setQueueId(0);
messageExt.setQueueOffset(123);
messageExt.setReconsumeTimes(0);
try {
messageExt.setStoreHost(new InetSocketAddress(InetAddress.getLocalHost(), 0));
} catch (UnknownHostException e) {
e.printStackTrace();
assertThat(Boolean.FALSE).isTrue();
}
messageExt.putUserProperty("a", "123");
messageExt.putUserProperty("b", "hello");
messageExt.putUserProperty("c", "3.14");
{
byte[] msgBytes = new byte[0];
try {
msgBytes = MessageDecoder.encode(messageExt, false);
} catch (Exception e) {
e.printStackTrace();
assertThat(Boolean.FALSE).isTrue();
}
ByteBuffer byteBuffer = ByteBuffer.allocate(msgBytes.length);
byteBuffer.put(msgBytes);
Map<String, String> properties = MessageDecoder.decodeProperties(byteBuffer);
assertThat(properties).isNotNull();
assertThat("123").isEqualTo(properties.get("a"));
assertThat("hello").isEqualTo(properties.get("b"));
assertThat("3.14").isEqualTo(properties.get("c"));
}
{
byte[] msgBytes = new byte[0];
try {
msgBytes = MessageDecoder.encode(messageExt, false);
} catch (Exception e) {
e.printStackTrace();
assertThat(Boolean.FALSE).isTrue();
}
ByteBuffer byteBuffer = ByteBuffer.allocate(msgBytes.length);
byteBuffer.put(msgBytes);
Map<String, String> properties = MessageDecoder.decodeProperties(byteBuffer);
assertThat(properties).isNotNull();
assertThat("123").isEqualTo(properties.get("a"));
assertThat("hello").isEqualTo(properties.get("b"));
assertThat("3.14").isEqualTo(properties.get("c"));
}
}
|
static void addClusterToMirrorMaker2ConnectorConfig(Map<String, Object> config, KafkaMirrorMaker2ClusterSpec cluster, String configPrefix) {
config.put(configPrefix + "alias", cluster.getAlias());
config.put(configPrefix + AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
String securityProtocol = addTLSConfigToMirrorMaker2ConnectorConfig(config, cluster, configPrefix);
if (cluster.getAuthentication() != null) {
if (cluster.getAuthentication() instanceof KafkaClientAuthenticationTls) {
config.put(configPrefix + SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, "PKCS12");
config.put(configPrefix + SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, STORE_LOCATION_ROOT + cluster.getAlias() + KEYSTORE_SUFFIX);
config.put(configPrefix + SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, "${file:" + CONNECTORS_CONFIG_FILE + ":" + SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG + "}");
} else if (cluster.getAuthentication() instanceof KafkaClientAuthenticationPlain plainAuthentication) {
securityProtocol = cluster.getTls() != null ? "SASL_SSL" : "SASL_PLAINTEXT";
config.put(configPrefix + SaslConfigs.SASL_MECHANISM, "PLAIN");
config.put(configPrefix + SaslConfigs.SASL_JAAS_CONFIG,
AuthenticationUtils.jaasConfig("org.apache.kafka.common.security.plain.PlainLoginModule",
Map.of("username", plainAuthentication.getUsername(),
"password", "${file:" + CONNECTORS_CONFIG_FILE + ":" + cluster.getAlias() + ".sasl.password}")));
} else if (cluster.getAuthentication() instanceof KafkaClientAuthenticationScram scramAuthentication) {
securityProtocol = cluster.getTls() != null ? "SASL_SSL" : "SASL_PLAINTEXT";
config.put(configPrefix + SaslConfigs.SASL_MECHANISM, scramAuthentication instanceof KafkaClientAuthenticationScramSha256 ? "SCRAM-SHA-256" : "SCRAM-SHA-512");
config.put(configPrefix + SaslConfigs.SASL_JAAS_CONFIG,
AuthenticationUtils.jaasConfig("org.apache.kafka.common.security.scram.ScramLoginModule",
Map.of("username", scramAuthentication.getUsername(),
"password", "${file:" + CONNECTORS_CONFIG_FILE + ":" + cluster.getAlias() + ".sasl.password}")));
} else if (cluster.getAuthentication() instanceof KafkaClientAuthenticationOAuth oauthAuthentication) {
securityProtocol = cluster.getTls() != null ? "SASL_SSL" : "SASL_PLAINTEXT";
config.put(configPrefix + SaslConfigs.SASL_MECHANISM, "OAUTHBEARER");
config.put(configPrefix + SaslConfigs.SASL_JAAS_CONFIG,
oauthJaasConfig(cluster, oauthAuthentication));
config.put(configPrefix + SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS, "io.strimzi.kafka.oauth.client.JaasClientOauthLoginCallbackHandler");
}
}
// Security protocol
config.put(configPrefix + AdminClientConfig.SECURITY_PROTOCOL_CONFIG, securityProtocol);
config.putAll(cluster.getConfig().entrySet().stream()
.collect(Collectors.toMap(entry -> configPrefix + entry.getKey(), Map.Entry::getValue)));
config.putAll(cluster.getAdditionalProperties());
}
|
@Test
public void testAddClusterToMirrorMaker2ConnectorConfigWithPlain() {
Map<String, Object> config = new HashMap<>();
KafkaMirrorMaker2ClusterSpec cluster = new KafkaMirrorMaker2ClusterSpecBuilder()
.withAlias("sourceClusterAlias")
.withBootstrapServers("sourceClusterAlias.sourceNamespace.svc:9092")
.withNewKafkaClientAuthenticationPlain()
.withUsername("shaza")
.withNewPasswordSecret()
.withPassword("pa55word")
.endPasswordSecret()
.endKafkaClientAuthenticationPlain()
.build();
KafkaMirrorMaker2Connectors.addClusterToMirrorMaker2ConnectorConfig(config, cluster, PREFIX);
String jaasConfig = (String) config.remove("prefix.sasl.jaas.config");
AppConfigurationEntry configEntry = AuthenticationUtilsTest.parseJaasConfig(jaasConfig);
assertThat(configEntry.getLoginModuleName(), is("org.apache.kafka.common.security.plain.PlainLoginModule"));
assertThat(configEntry.getOptions(),
is(Map.of("username", "shaza",
"password", "${file:/tmp/strimzi-mirrormaker2-connector.properties:sourceClusterAlias.sasl.password}")));
assertThat(new TreeMap<>(config),
is(new TreeMap<>(Map.of("prefix.alias", "sourceClusterAlias",
"prefix.security.protocol", "SASL_PLAINTEXT",
"prefix.sasl.mechanism", "PLAIN",
"prefix.bootstrap.servers", "sourceClusterAlias.sourceNamespace.svc:9092"))));
}
|
@Override
public void track(String eventName, JSONObject properties) {
}
|
@Test
public void testTrack() {
mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() {
@Override
public boolean onTrackEvent(String eventName, JSONObject eventProperties) {
Assert.fail();
return false;
}
});
mSensorsAPI.track("TestTrackEvent", new JSONObject());
}
|
@Override
public boolean isWriteable(Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType) {
return typesMatch(type, genericType) && MoreMediaTypes.TEXT_CSV_TYPE.isCompatible(mediaType);
}
|
@Test
void isWritableForSimpleMessages() {
boolean isWritable = sut.isWriteable(SimpleMessageChunk.class, null, null, MoreMediaTypes.TEXT_CSV_TYPE);
assertThat(isWritable).isTrue();
}
|
public static InternalLogger getInstance(Class<?> clazz) {
return getInstance(clazz.getName());
}
|
@Test
public void testWarnWithException() {
final InternalLogger logger = InternalLoggerFactory.getInstance("mock");
logger.warn("a", e);
verify(mockLogger).warn("a", e);
}
|
static Map<String, Object> addTag(Map<String, Object> e, List<Object> tags) {
Event tempEvent = new org.logstash.Event(e);
addTag(tempEvent, tags);
return tempEvent.getData();
}
|
@Test
public void testAddTag() {
// add tag to empty event
Event e = new Event();
String testTag = "test_tag";
CommonActions.addTag(e, Collections.singletonList(testTag));
Object value = e.getField(TAGS);
Assert.assertTrue(value instanceof List);
Assert.assertEquals(1, ((List) value).size());
Assert.assertEquals(testTag, ((List) value).get(0));
// add two tags to empty event
e = new Event();
String testTag2 = "test_tag2";
CommonActions.addTag(e, Arrays.asList(testTag, testTag2));
value = e.getField(TAGS);
Assert.assertTrue(value instanceof List);
Assert.assertEquals(2, ((List) value).size());
Assert.assertEquals(testTag, ((List) value).get(0));
Assert.assertEquals(testTag2, ((List) value).get(1));
// add duplicate tag
e = new Event();
e.tag(testTag);
CommonActions.addTag(e, Collections.singletonList(testTag));
value = e.getField(TAGS);
Assert.assertTrue(value instanceof List);
Assert.assertEquals(1, ((List) value).size());
Assert.assertEquals(testTag, ((List) value).get(0));
// add dynamically-named tag
e = new Event(Collections.singletonMap(testTag, testTag2));
CommonActions.addTag(e, Collections.singletonList("%{" + testTag + "}_foo"));
value = e.getField(TAGS);
Assert.assertTrue(value instanceof List);
Assert.assertEquals(1, ((List) value).size());
Assert.assertEquals(testTag2 + "_foo", ((List) value).get(0));
// add non-string tag
e = new Event();
Long nonStringTag = 42L;
CommonActions.addTag(e, Collections.singletonList(nonStringTag));
value = e.getField(TAGS);
Assert.assertTrue(value instanceof List);
Assert.assertEquals(1, ((List) value).size());
Assert.assertEquals(nonStringTag.toString(), ((List) value).get(0));
}
|
public Optional<Account> getByAccountIdentifier(final UUID uuid) {
return checkRedisThenAccounts(
getByUuidTimer,
() -> redisGetByAccountIdentifier(uuid),
() -> accounts.getByAccountIdentifier(uuid)
);
}
|
@Test
void testGetAccountByUuidBrokenCache() {
UUID uuid = UUID.randomUUID();
UUID pni = UUID.randomUUID();
Account account = AccountsHelper.generateTestAccount("+14152222222", uuid, pni, new ArrayList<>(), new byte[UnidentifiedAccessUtil.UNIDENTIFIED_ACCESS_KEY_LENGTH]);
when(commands.get(eq("Account3::" + uuid))).thenThrow(new RedisException("Connection lost!"));
when(accounts.getByAccountIdentifier(eq(uuid))).thenReturn(Optional.of(account));
Optional<Account> retrieved = accountsManager.getByAccountIdentifier(uuid);
assertTrue(retrieved.isPresent());
assertSame(retrieved.get(), account);
verify(commands, times(1)).get(eq("Account3::" + uuid));
verify(commands, times(1)).setex(eq("AccountMap::" + pni), anyLong(), eq(uuid.toString()));
verify(commands, times(1)).setex(eq("Account3::" + uuid), anyLong(), anyString());
verifyNoMoreInteractions(commands);
verify(accounts, times(1)).getByAccountIdentifier(eq(uuid));
verifyNoMoreInteractions(accounts);
}
|
public void insert(UUID destinationUuid, byte destinationDevice, Envelope message) {
final UUID messageGuid = UUID.randomUUID();
messagesCache.insert(messageGuid, destinationUuid, destinationDevice, message);
if (message.hasSourceUuid() && !destinationUuid.toString().equals(message.getSourceUuid())) {
reportMessageManager.store(message.getSourceUuid(), messageGuid);
}
}
|
@Test
void insert() {
final UUID sourceAci = UUID.randomUUID();
final Envelope message = Envelope.newBuilder()
.setSourceUuid(sourceAci.toString())
.build();
final UUID destinationUuid = UUID.randomUUID();
messagesManager.insert(destinationUuid, Device.PRIMARY_ID, message);
verify(reportMessageManager).store(eq(sourceAci.toString()), any(UUID.class));
final Envelope syncMessage = Envelope.newBuilder(message)
.setSourceUuid(destinationUuid.toString())
.build();
messagesManager.insert(destinationUuid, Device.PRIMARY_ID, syncMessage);
verifyNoMoreInteractions(reportMessageManager);
}
|
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
this.trash(files, prompt, callback);
for(Path f : files.keySet()) {
fileid.cache(f, null);
}
}
|
@Test
public void testDeleteMultipleFiles() throws Exception {
final EueResourceIdProvider fileid = new EueResourceIdProvider(session);
final Path folder = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(AbstractPath.Type.directory));
final Path file1 = new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final Path file2 = new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new EueDirectoryFeature(session, fileid).mkdir(folder, new TransferStatus());
createFile(fileid, file1, RandomUtils.nextBytes(511));
createFile(fileid, file2, RandomUtils.nextBytes(214));
assertTrue(new EueFindFeature(session, fileid).find(file1));
assertTrue(new EueFindFeature(session, fileid).find(file2));
new EueTrashFeature(session, fileid).delete(Arrays.asList(file1, file2), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse((new EueFindFeature(session, fileid).find(file1, new DisabledListProgressListener())));
assertFalse((new EueFindFeature(session, fileid).find(file2, new DisabledListProgressListener())));
assertTrue(new EueFindFeature(session, fileid).find(folder, new DisabledListProgressListener()));
new EueTrashFeature(session, fileid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse((new EueFindFeature(session, fileid).find(folder, new DisabledListProgressListener())));
}
|
public static byte[] readEntry(ZipFile archive, String entryName, ObservationRegistry observations) {
return Observation.createNotStarted("ArchiveUtil#readEntry", observations).observe(() -> {
var entry = archive.getEntry(entryName);
if (entry == null)
return null;
return readEntry(archive, entry, observations);
});
}
|
@Test
public void testTodoTree() throws Exception {
var packageUrl = getClass().getResource("todo-tree.zip");
assertThat(packageUrl.getProtocol()).isEqualTo("file");
try (
var archive = new ZipFile(packageUrl.getPath());
) {
var packageJson = ArchiveUtil.readEntry(archive, "extension/package.json", ObservationRegistry.NOOP);
assertThat(packageJson.length).isEqualTo(44712);
var icon = ArchiveUtil.readEntry(archive, "extension/resources/todo-tree.png", ObservationRegistry.NOOP);
assertThat(icon.length).isEqualTo(8854);
}
}
|
@Override
public byte[] fromConnectData(String topic, Schema schema, Object value) {
if (schema == null && value == null) {
return null;
}
JsonNode jsonValue = config.schemasEnabled() ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value);
try {
return serializer.serialize(topic, jsonValue);
} catch (SerializationException e) {
throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e);
}
}
|
@Test
public void structToJson() {
Schema schema = SchemaBuilder.struct().field("field1", Schema.BOOLEAN_SCHEMA).field("field2", Schema.STRING_SCHEMA).field("field3", Schema.STRING_SCHEMA).field("field4", Schema.BOOLEAN_SCHEMA).build();
Struct input = new Struct(schema).put("field1", true).put("field2", "string2").put("field3", "string3").put("field4", false);
JsonNode converted = parse(converter.fromConnectData(TOPIC, schema, input));
validateEnvelope(converted);
assertEquals(parse("{ \"type\": \"struct\", \"optional\": false, \"fields\": [{ \"field\": \"field1\", \"type\": \"boolean\", \"optional\": false }, { \"field\": \"field2\", \"type\": \"string\", \"optional\": false }, { \"field\": \"field3\", \"type\": \"string\", \"optional\": false }, { \"field\": \"field4\", \"type\": \"boolean\", \"optional\": false }] }"),
converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME));
assertEquals(JsonNodeFactory.instance.objectNode()
.put("field1", true)
.put("field2", "string2")
.put("field3", "string3")
.put("field4", false),
converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME));
}
|
public static Map<Integer, Uuid> createAssignmentMap(int[] replicas, Uuid[] directories) {
if (replicas.length != directories.length) {
throw new IllegalArgumentException("The lengths for replicas and directories do not match.");
}
Map<Integer, Uuid> assignments = new HashMap<>();
for (int i = 0; i < replicas.length; i++) {
int brokerId = replicas[i];
Uuid directory = directories[i];
if (assignments.put(brokerId, directory) != null) {
throw new IllegalArgumentException("Duplicate broker ID in assignment");
}
}
return assignments;
}
|
@Test
void testCreateAssignmentMap() {
assertThrows(IllegalArgumentException.class, () ->
DirectoryId.createAssignmentMap(new int[]{1, 2}, DirectoryId.unassignedArray(3)));
assertEquals(
new HashMap<Integer, Uuid>() {{
put(1, Uuid.fromString("upjfkCrUR9GNn1i94ip1wg"));
put(2, Uuid.fromString("bCF3l0RIQjOKhUqgbivHZA"));
put(3, Uuid.fromString("Fg3mFhcVQlqCWRk4dZazxw"));
put(4, Uuid.fromString("bv9TEYi4TqOm52hLmrxT5w"));
}},
DirectoryId.createAssignmentMap(
new int[] {1, 2, 3, 4},
new Uuid[] {
Uuid.fromString("upjfkCrUR9GNn1i94ip1wg"),
Uuid.fromString("bCF3l0RIQjOKhUqgbivHZA"),
Uuid.fromString("Fg3mFhcVQlqCWRk4dZazxw"),
Uuid.fromString("bv9TEYi4TqOm52hLmrxT5w")
})
);
}
|
@Override
public boolean canDecrypt(String cipherText) {
if (isBlank(cipherText)) {
return false;
}
String[] splits = cipherText.split(":");
return splits.length == 3 && "AES".equals(splits[0]) && isNotBlank(splits[1]) && isNotBlank(splits[2]);
}
|
@Test
public void canDecryptShouldAnswerTrueIfPasswordLooksLikeAES() {
assertThat(aesEncrypter.canDecrypt("AES:foo:bar")).isTrue();
assertThat(aesEncrypter.canDecrypt("aes:bar:baz")).isFalse();
assertThat(aesEncrypter.canDecrypt("")).isFalse();
assertThat(aesEncrypter.canDecrypt("\t\n")).isFalse();
assertThat(aesEncrypter.canDecrypt(null)).isFalse();
assertThat(aesEncrypter.canDecrypt("foo:bar:baz")).isFalse();
assertThat(aesEncrypter.canDecrypt("aes::")).isFalse();
assertThat(aesEncrypter.canDecrypt("aes:asdf:")).isFalse();
assertThat(aesEncrypter.canDecrypt("aes::asdf")).isFalse();
}
|
public static String toString(long unixTime, String pattern) {
return Instant.ofEpochSecond(unixTime).atZone(ZoneId.systemDefault()).format(DateTimeFormatter.ofPattern(pattern));
}
|
@Test
public void testToString() {
String dateStr = DateKit.toString(date, "yyyy-MM-dd");
Assert.assertEquals("2017-09-20", dateStr);
}
|
@Override
public void setMonochrome(boolean monochrome) {
formats = monochrome ? monochrome() : ansi();
}
|
@Test
void should_print_output_from_before_hooks() {
Feature feature = TestFeatureParser.parse("path/test.feature", "" +
"Feature: feature name\n" +
" Scenario: scenario name\n" +
" Given first step\n");
ByteArrayOutputStream out = new ByteArrayOutputStream();
Runtime.builder()
.withFeatureSupplier(new StubFeatureSupplier(feature))
.withAdditionalPlugins(new PrettyFormatter(out))
.withRuntimeOptions(new RuntimeOptionsBuilder().setMonochrome().build())
.withBackendSupplier(new StubBackendSupplier(
singletonList(new StubHookDefinition(testCaseState -> testCaseState.log("printed from hook"))),
singletonList(new StubStepDefinition("first step", "path/step_definitions.java:3")),
emptyList()))
.build()
.run();
assertThat(out, bytes(equalToCompressingWhiteSpace("" +
"Scenario: scenario name # path/test.feature:2\n" +
"\n" +
" printed from hook\n" +
"\n" +
" Given first step # path/step_definitions.java:3\n")));
}
|
@Deprecated
@Override
public void init(final ProcessorContext context,
final StateStore root) {
this.context = context instanceof InternalProcessorContext ? (InternalProcessorContext<?, ?>) context : null;
taskId = context.taskId();
initStoreSerde(context);
streamsMetrics = (StreamsMetricsImpl) context.metrics();
registerMetrics();
final Sensor restoreSensor =
StateStoreMetrics.restoreSensor(taskId.toString(), metricsScope, name(), streamsMetrics);
// register and possibly restore the state from the logs
maybeMeasureLatency(() -> super.init(context, root), time, restoreSensor);
}
|
@Test
public void shouldDelegateInit() {
final MeteredWindowStore<String, String> outer = new MeteredWindowStore<>(
innerStoreMock,
WINDOW_SIZE_MS, // any size
STORE_TYPE,
new MockTime(),
Serdes.String(),
new SerdeThatDoesntHandleNull()
);
when(innerStoreMock.name()).thenReturn("store");
doNothing().when(innerStoreMock).init((StateStoreContext) context, outer);
outer.init((StateStoreContext) context, outer);
}
|
public static String byteCountToDisplaySize(long size) {
if (size < 1024L) {
return String.valueOf(size) + (size > 1 ? " bytes" : " byte");
}
long exp = (long) (Math.log(size) / Math.log((long) 1024));
double value = size / Math.pow((long) 1024, exp);
char unit = "KMGTPEZY".charAt((int) exp - 1);
return String.format("%.1f %s%s", value, unit, "B");
}
|
@Test
public void shouldConvertBytesToGiga() {
long twoGiga = 2L * 1024 * 1024 * 1024 + 512 * 1024 * 1024;
assertThat(FileSizeUtils.byteCountToDisplaySize(twoGiga), is("2.5 GB"));
}
|
@Override
public void createPod(Pod pod) {
checkNotNull(pod, ERR_NULL_POD);
checkArgument(!Strings.isNullOrEmpty(pod.getMetadata().getUid()),
ERR_NULL_POD_UID);
kubevirtPodStore.createPod(pod);
log.debug(String.format(MSG_POD, pod.getMetadata().getName(), MSG_CREATED));
}
|
@Test(expected = IllegalArgumentException.class)
public void testCreateDuplicatePod() {
target.createPod(POD);
target.createPod(POD);
}
|
@Override
public double readDouble() {
return Double.longBitsToDouble(readLong());
}
|
@Test
public void testReadDoubleAfterRelease() {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() {
releasedBuffer().readDouble();
}
});
}
|
@Override
public Long createMailTemplate(MailTemplateSaveReqVO createReqVO) {
// 校验 code 是否唯一
validateCodeUnique(null, createReqVO.getCode());
// 插入
MailTemplateDO template = BeanUtils.toBean(createReqVO, MailTemplateDO.class)
.setParams(parseTemplateContentParams(createReqVO.getContent()));
mailTemplateMapper.insert(template);
return template.getId();
}
|
@Test
public void testCreateMailTemplate_success() {
// 准备参数
MailTemplateSaveReqVO reqVO = randomPojo(MailTemplateSaveReqVO.class)
.setId(null); // 防止 id 被赋值
// 调用
Long mailTemplateId = mailTemplateService.createMailTemplate(reqVO);
// 断言
assertNotNull(mailTemplateId);
// 校验记录的属性是否正确
MailTemplateDO mailTemplate = mailTemplateMapper.selectById(mailTemplateId);
assertPojoEquals(reqVO, mailTemplate, "id");
}
|
@Override
public RetryStrategy getNextRetryStrategy() {
int nextRemainingRetries = remainingRetries - 1;
Preconditions.checkState(
nextRemainingRetries >= 0, "The number of remaining retries must not be negative");
return new FixedRetryStrategy(nextRemainingRetries, retryDelay);
}
|
@Test
void testRetryFailure() {
assertThatThrownBy(
() ->
new FixedRetryStrategy(0, Duration.ofMillis(5L))
.getNextRetryStrategy())
.isInstanceOf(IllegalStateException.class);
}
|
@Override
public String execute(CommandContext commandContext, String[] args) {
Channel channel = commandContext.getRemote();
String service = channel.attr(ChangeTelnet.SERVICE_KEY).get();
if ((service == null || service.length() == 0) && (args == null || args.length == 0)) {
return "Please input service name, eg: \r\ncount XxxService\r\ncount XxxService xxxMethod\r\ncount XxxService xxxMethod 10\r\nor \"cd XxxService\" firstly.";
}
StringBuilder buf = new StringBuilder();
if (service != null && service.length() > 0) {
buf.append("Use default service ").append(service).append(".\r\n");
}
String method;
String times;
if (service == null || service.length() == 0) {
service = args[0];
method = args.length > 1 ? args[1] : null;
} else {
method = args.length > 0 ? args[0] : null;
}
if (StringUtils.isNumber(method)) {
times = method;
method = null;
} else {
times = args.length > 2 ? args[2] : "1";
}
if (!StringUtils.isNumber(times)) {
return "Illegal times " + times + ", must be integer.";
}
final int t = Integer.parseInt(times);
Invoker<?> invoker = null;
for (Exporter<?> exporter : dubboProtocol.getExporters()) {
if (service.equals(exporter.getInvoker().getInterface().getSimpleName())
|| service.equals(exporter.getInvoker().getInterface().getName())
|| service.equals(exporter.getInvoker().getUrl().getPath())
|| service.equals(exporter.getInvoker().getUrl().getServiceKey())) {
invoker = exporter.getInvoker();
break;
}
}
if (invoker != null) {
if (t > 0) {
final String mtd = method;
final Invoker<?> inv = invoker;
Thread thread = new Thread(
() -> {
for (int i = 0; i < t; i++) {
String result = count(inv, mtd);
try {
send(channel, "\r\n" + result);
} catch (RemotingException e1) {
return;
}
if (i < t - 1) {
try {
Thread.sleep(1000);
} catch (InterruptedException ignored) {
}
}
}
try {
send(channel, "\r\n" + PROMPT);
} catch (RemotingException ignored) {
}
},
"TelnetCount");
thread.setDaemon(true);
thread.start();
}
} else {
buf.append("No such service ").append(service);
}
return buf.toString();
}
|
@Test
void test() throws Exception {
String methodName = "sayHello";
RpcStatus.removeStatus(url, methodName);
String[] args = new String[] {"org.apache.dubbo.qos.legacy.service.DemoService", "sayHello", "1"};
ExtensionLoader.getExtensionLoader(Protocol.class)
.getExtension(DubboProtocol.NAME)
.export(mockInvoker);
RpcStatus.beginCount(url, methodName);
RpcStatus.endCount(url, methodName, 10L, true);
count.execute(mockCommandContext, args);
latch.await();
StringBuilder sb = new StringBuilder();
for (Object o : mockChannel.getReceivedObjects()) {
sb.append(o.toString());
}
assertThat(sb.toString(), containsString(buildTable(methodName, 10, 10, "1", "0", "0")));
}
|
@Override
public MapperResult findChangeConfig(MapperContext context) {
String sql =
"SELECT id, data_id, group_id, tenant_id, app_name, content, gmt_modified, encrypted_data_key FROM config_info WHERE "
+ "gmt_modified >= ? and id > ? order by id OFFSET 0 ROWS FETCH NEXT ? ROWS ONLY";
return new MapperResult(sql, CollectionUtils.list(context.getWhereParameter(FieldConstant.START_TIME),
context.getWhereParameter(FieldConstant.LAST_MAX_ID),
context.getWhereParameter(FieldConstant.PAGE_SIZE)));
}
|
@Test
void testFindChangeConfig() {
MapperResult mapperResult = configInfoMapperByDerby.findChangeConfig(context);
assertEquals(mapperResult.getSql(),
"SELECT id, data_id, group_id, tenant_id, app_name, content, gmt_modified, encrypted_data_key FROM config_info "
+ "WHERE gmt_modified >= ? and id > ? order by id OFFSET 0 ROWS FETCH NEXT ? ROWS ONLY");
assertArrayEquals(new Object[] {startTime, lastMaxId, pageSize}, mapperResult.getParamList().toArray());
}
|
@Override
protected int command() {
if (!validateConfigFilePresent()) {
return 1;
}
final MigrationConfig config;
try {
config = MigrationConfig.load(getConfigFile());
} catch (KsqlException | MigrationException e) {
LOGGER.error(e.getMessage());
return 1;
}
return command(getMigrationsDir(getConfigFile(), config));
}
|
@Test
public void shouldCreateWithNoExplicitVersionAndEmptyMigrationsDir() {
// Given:
command = PARSER.parse(DESCRIPTION);
// When:
final int result = command.command(migrationsDir);
// Then:
assertThat(result, is(0));
final File expectedFile = new File(Paths.get(migrationsDir, "V000001__" + EXPECTED_FILE_SUFFIX).toString());
assertThat(expectedFile.exists(), is(true));
assertThat(expectedFile.isDirectory(), is(false));
}
|
public static AggregateFunctionInitArguments createAggregateFunctionInitArgs(
final int numInitArgs,
final FunctionCall functionCall
) {
return createAggregateFunctionInitArgs(
numInitArgs,
Collections.emptyList(),
functionCall,
KsqlConfig.empty()
);
}
|
@Test
public void shouldNotThrowIfFirstParamNotALiteral() {
// Given:
when(functionCall.getArguments()).thenReturn(ImmutableList.of(
new UnqualifiedColumnReferenceExp(ColumnName.of("Bob")),
new StringLiteral("No issue here")
));
// When:
UdafUtil.createAggregateFunctionInitArgs(
Math.max(0, functionCall.getArguments().size() - 1),
Collections.singletonList(0),
functionCall,
KsqlConfig.empty()
);
// Then: did not throw.
}
|
@Override
public void doAfterResponse(String remoteAddr, RemotingCommand request, RemotingCommand response) {
if (RequestCode.GET_ROUTEINFO_BY_TOPIC != request.getCode()) {
return;
}
if (response == null || response.getBody() == null || ResponseCode.SUCCESS != response.getCode()) {
return;
}
boolean zoneMode = Boolean.parseBoolean(request.getExtFields().get(MixAll.ZONE_MODE));
if (!zoneMode) {
return;
}
String zoneName = request.getExtFields().get(MixAll.ZONE_NAME);
if (StringUtils.isBlank(zoneName)) {
return;
}
TopicRouteData topicRouteData = RemotingSerializable.decode(response.getBody(), TopicRouteData.class);
response.setBody(filterByZoneName(topicRouteData, zoneName).encode());
}
|
@Test
public void testDoAfterResponseWithNoZoneMode() {
RemotingCommand request1 = RemotingCommand.createRequestCommand(106,null);
zoneRouteRPCHook.doAfterResponse("", request1, null);
HashMap<String, String> extFields = new HashMap<>();
extFields.put(MixAll.ZONE_MODE, "false");
RemotingCommand request = RemotingCommand.createRequestCommand(105,null);
request.setExtFields(extFields);
RemotingCommand response = RemotingCommand.createResponseCommand(null);
response.setCode(ResponseCode.SUCCESS);
response.setBody(RemotingSerializable.encode(createSampleTopicRouteData()));
zoneRouteRPCHook.doAfterResponse("", request, response);
}
|
public static Instant later(Instant time1, Instant time2) {
return time1.isAfter(time2) ? time1 : time2;
}
|
@Test
public void later() {
Instant t1 = Instant.now(); // earlier
Instant t2 = t1.plusSeconds(1); // later
assertEquals(t2, TimeUtils.later(t1, t2));
assertEquals(t2, TimeUtils.later(t2, t1));
assertEquals(t1, TimeUtils.later(t1, t1));
assertEquals(t2, TimeUtils.later(t2, t2));
}
|
@SuppressWarnings("unchecked")
public <T> T convert(DocString docString, Type targetType) {
if (DocString.class.equals(targetType)) {
return (T) docString;
}
List<DocStringType> docStringTypes = docStringTypeRegistry.lookup(docString.getContentType(), targetType);
if (docStringTypes.isEmpty()) {
if (docString.getContentType() == null) {
throw new CucumberDocStringException(format(
"It appears you did not register docstring type for %s",
targetType.getTypeName()));
}
throw new CucumberDocStringException(format(
"It appears you did not register docstring type for '%s' or %s",
docString.getContentType(),
targetType.getTypeName()));
}
if (docStringTypes.size() > 1) {
List<String> suggestedContentTypes = suggestedContentTypes(docStringTypes);
if (docString.getContentType() == null) {
throw new CucumberDocStringException(format(
"Multiple converters found for type %s, add one of the following content types to your docstring %s",
targetType.getTypeName(),
suggestedContentTypes));
}
throw new CucumberDocStringException(format(
"Multiple converters found for type %s, and the content type '%s' did not match any of the registered types %s. Change the content type of the docstring or register a docstring type for '%s'",
targetType.getTypeName(),
docString.getContentType(),
suggestedContentTypes,
docString.getContentType()));
}
return (T) docStringTypes.get(0).transform(docString.getContent());
}
|
@Test
void same_docstring_content_type_can_convert_to_different_registered_doc_string_types() {
registry.defineDocStringType(new DocStringType(
Greet.class,
"text",
Greet::new));
registry.defineDocStringType(new DocStringType(
Meet.class,
"text",
Meet::new));
registry.defineDocStringType(new DocStringType(
Leave.class,
"text",
Leave::new));
DocString docStringGreet = DocString.create(
"hello world", "text");
DocString docStringMeet = DocString.create(
"nice to meet", "text");
DocString docStringLeave = DocString.create(
"goodbye", "text");
Greet expectedGreet = new Greet(docStringGreet.getContent());
Meet expectedMeet = new Meet(docStringMeet.getContent());
Leave expectedLeave = new Leave(docStringLeave.getContent());
assertThat(converter.convert(docStringGreet, Greet.class), equalTo(expectedGreet));
assertThat(converter.convert(docStringMeet, Meet.class), equalTo(expectedMeet));
assertThat(converter.convert(docStringLeave, Leave.class), equalTo(expectedLeave));
}
|
@Override
public void lock() {
try {
lock(-1, null, false);
} catch (InterruptedException e) {
throw new IllegalStateException();
}
}
|
@Test
public void testUnlockFail() {
Assertions.assertThrows(IllegalMonitorStateException.class, () -> {
RLock lock = redisson.getLock("lock");
Thread t = new Thread() {
public void run() {
RLock lock = redisson.getLock("lock");
lock.lock();
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
lock.unlock();
};
};
t.start();
t.join(400);
try {
lock.unlock();
} catch (IllegalMonitorStateException e) {
t.join();
throw e;
}
});
}
|
@Override
public <T extends ComponentRoot> T get(Class<T> providerId) {
try {
return providerId.getConstructor().newInstance();
} catch (ReflectiveOperationException e) {
throw new IllegalArgumentException(e);
}
}
|
@Test
void get() {
String fileName = "fileName";
String name = "name";
LocalUri retrieved = appRoot.get(ComponentRootA.class)
.get(fileName, name)
.toLocalId()
.asLocalUri();
appRoot.get(ComponentRootA.class)
.get(fileName, name)
.toLocalId();
assertThat(retrieved).isNotNull();
String expected = String.format("/%1$s/%2$s/%3$s", LocalComponentIdA.PREFIX, fileName, name);
assertThat(retrieved.path()).isEqualTo(expected);
}
|
@VisibleForTesting
void validateDictTypeNameUnique(Long id, String name) {
DictTypeDO dictType = dictTypeMapper.selectByName(name);
if (dictType == null) {
return;
}
// 如果 id 为空,说明不用比较是否为相同 id 的字典类型
if (id == null) {
throw exception(DICT_TYPE_NAME_DUPLICATE);
}
if (!dictType.getId().equals(id)) {
throw exception(DICT_TYPE_NAME_DUPLICATE);
}
}
|
@Test
public void testValidateDictTypNameUnique_success() {
// 调用,成功
dictTypeService.validateDictTypeNameUnique(randomLongId(), randomString());
}
|
public static Object fillInDataDefault(DataSchema schema, Object dataWithoutDefault)
{
try
{
switch (schema.getType())
{
case RECORD:
return fillInDefaultOnRecord((RecordDataSchema) schema, (DataMap) dataWithoutDefault);
case TYPEREF:
return fillInDefaultOnTyperef((TyperefDataSchema) schema, dataWithoutDefault);
case MAP:
return fillInDefaultOnMap((MapDataSchema) schema, (DataMap) dataWithoutDefault);
case UNION:
return fillInDefaultOnUnion((UnionDataSchema) schema, (DataMap) dataWithoutDefault);
case ARRAY:
return fillInDefaultOnArray((ArrayDataSchema) schema, (DataList) dataWithoutDefault);
default:
return dataWithoutDefault;
}
}
catch (CloneNotSupportedException ex)
{
throw new RestLiServiceException(HttpStatus.S_500_INTERNAL_SERVER_ERROR, ex);
}
}
|
@Test(dataProvider = "default_serialization")
public void testGetAbsentFieldsDefaultValues(String caseFilename)
{
try
{
MultiFormatDataSchemaResolver schemaResolver = MultiFormatDataSchemaResolver.withBuiltinFormats(resolverDir);
String expectedDataJsonFile = Files.readFile(new File(pegasusDir + FS + caseFilename));
DataMap caseData = DataMapUtils.readMap(new ByteArrayInputStream(expectedDataJsonFile.getBytes()), Collections.emptyMap());
String schemaFileText = Files.readFile(new File(pegasusDir + FS + caseData.get("schema")));
DataMap caseInput = (DataMap) caseData.get("input");
DataMap caseExpect = (DataMap) caseData.get("expect");
DataSchema schema = DataTemplateUtil.parseSchema(schemaFileText, schemaResolver, SchemaFormatType.PDL);
DataMap dataWithDefault = (DataMap) ResponseUtils.fillInDataDefault(schema, caseInput);
System.out.println("Expect " + caseExpect);
System.out.println("Actual " + dataWithDefault);
Assert.assertEquals(dataWithDefault, caseExpect, (String) caseData.get("context"));
}
catch (Exception e)
{
Assert.fail("Test failed with exception: \n" + e.toString());
}
}
|
private PartitionedByError<ReconcilableTopic, Void> createTopics(List<ReconcilableTopic> kts) {
var newTopics = kts.stream().map(reconcilableTopic -> {
// Admin create
return buildNewTopic(reconcilableTopic.kt(), reconcilableTopic.topicName());
}).collect(Collectors.toSet());
LOGGER.debugOp("Admin.createTopics({})", newTopics);
var timerSample = TopicOperatorUtil.startExternalRequestTimer(metrics, enableAdditionalMetrics);
CreateTopicsResult ctr = admin.createTopics(newTopics);
ctr.all().whenComplete((i, e) -> {
TopicOperatorUtil.stopExternalRequestTimer(timerSample, metrics::createTopicsTimer, enableAdditionalMetrics, namespace);
if (e != null) {
LOGGER.traceOp("Admin.createTopics({}) failed with {}", newTopics, String.valueOf(e));
} else {
LOGGER.traceOp("Admin.createTopics({}) completed", newTopics);
}
});
Map<String, KafkaFuture<Void>> values = ctr.values();
return partitionedByError(kts.stream().map(reconcilableTopic -> {
try {
values.get(reconcilableTopic.topicName()).get();
reconcilableTopic.kt().setStatus(new KafkaTopicStatusBuilder()
.withTopicId(ctr.topicId(reconcilableTopic.topicName()).get().toString()).build());
return new Pair<>(reconcilableTopic, Either.ofRight((null)));
} catch (ExecutionException e) {
if (e.getCause() != null && e.getCause() instanceof TopicExistsException) {
// we treat this as a success, the next reconciliation checks the configuration
return new Pair<>(reconcilableTopic, Either.ofRight((null)));
} else {
return new Pair<>(reconcilableTopic, Either.ofLeft(handleAdminException(e)));
}
} catch (InterruptedException e) {
throw new UncheckedInterruptedException(e);
}
}));
}
|
@Test
public void shouldHandleInterruptedExceptionFromCreateTopics(KafkaCluster cluster) throws ExecutionException, InterruptedException {
var topicName = "my-topic";
kafkaAdminClient[0] = Admin.create(Map.of(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers()));
var kafkaAdminClientSpy = Mockito.spy(kafkaAdminClient[0]);
var result = Mockito.mock(CreateTopicsResult.class);
Mockito.doReturn(interruptedFuture()).when(result).all();
Mockito.doReturn(Map.of(topicName, interruptedFuture())).when(result).values();
Mockito.doReturn(result).when(kafkaAdminClientSpy).createTopics(any());
KafkaTopic kafkaTopic = createKafkaTopic(topicName);
assertOnUpdateThrowsInterruptedException(kafkaAdminClientSpy, kafkaTopic);
}
|
public void saveUserInfo( IUser user ) throws KettleException {
normalizeUserInfo( user );
if ( !validateUserInfo( user ) ) {
throw new KettleException( BaseMessages.getString( PurRepositorySecurityManager.class,
"PurRepositorySecurityManager.ERROR_0001_INVALID_NAME" ) );
}
userRoleDelegate.createUser( user );
}
|
@Test( expected = KettleException.class )
public void saveUserInfo_NormalizesInfo_FailsIfStillBreaches() throws Exception {
UserInfo info = new UserInfo( " " );
manager.saveUserInfo( info );
}
|
@Override
protected void doStart() throws Exception {
super.doStart();
LOG.debug("Creating connection to Azure ServiceBus");
client = getEndpoint().getServiceBusClientFactory().createServiceBusProcessorClient(getConfiguration(),
this::processMessage, this::processError);
client.start();
}
|
@Test
void synchronizationCompletesMessageOnSuccess() throws Exception {
try (ServiceBusConsumer consumer = new ServiceBusConsumer(endpoint, processor)) {
when(configuration.getServiceBusReceiveMode()).thenReturn(ServiceBusReceiveMode.PEEK_LOCK);
consumer.doStart();
verify(client).start();
verify(clientFactory).createServiceBusProcessorClient(any(), any(), any());
when(messageContext.getMessage()).thenReturn(message);
processMessageCaptor.getValue().accept(messageContext);
verify(messageContext).getMessage();
Exchange exchange = exchangeCaptor.getValue();
assertThat(exchange).isNotNull();
Synchronization synchronization = exchange.getExchangeExtension().handoverCompletions().get(0);
synchronization.onComplete(exchange);
verify(messageContext).complete();
verifyNoMoreInteractions(messageContext);
}
}
|
@Override
public String toString()
{
return getClass().getSimpleName() + "[" + debugFontName + "]";
}
|
@Test
void testFontMatrix()
{
List<Number> fontMatrix = testCFFType1Font.getFontMatrix();
assertNotNull(fontMatrix, "FontMatrix must not be null");
assertNumberList("FontMatrix values are different than expected" + fontMatrix.toString(),
new float[] { 0.001f, 0.0f, 0.0f, 0.001f, 0.0f, 0.0f }, fontMatrix);
}
|
@Override
public String getSQL() {
return sql;
}
|
@Test
void assertNew() {
when(payload.readStringEOF()).thenReturn("SELECT id FROM tbl WHERE id=?");
MySQLComStmtPreparePacket actual = new MySQLComStmtPreparePacket(payload);
assertThat(actual.getSQL(), is("SELECT id FROM tbl WHERE id=?"));
}
|
Collection<OutputFile> compile() {
List<OutputFile> out = new ArrayList<>(queue.size() + 1);
for (Schema schema : queue) {
out.add(compile(schema));
}
if (protocol != null) {
out.add(compileInterface(protocol));
}
return out;
}
|
@Test
void fieldWithUnderscore_avro3826() {
String jsonSchema = "{\n" + " \"name\": \"Value\",\n" + " \"type\": \"record\",\n" + " \"fields\": [\n"
+ " { \"name\": \"__deleted\", \"type\": \"string\"\n" + " }\n" + " ]\n" + "}";
Collection<SpecificCompiler.OutputFile> outputs = new SpecificCompiler(new Schema.Parser().parse(jsonSchema))
.compile();
assertEquals(1, outputs.size());
SpecificCompiler.OutputFile outputFile = outputs.iterator().next();
assertTrue(outputFile.contents.contains("getDeleted()"));
assertFalse(outputFile.contents.contains("$0"));
assertFalse(outputFile.contents.contains("$1"));
String jsonSchema2 = "{\n" + " \"name\": \"Value\", \"type\": \"record\",\n" + " \"fields\": [\n"
+ " { \"name\": \"__deleted\", \"type\": \"string\"},\n"
+ " { \"name\": \"_deleted\", \"type\": \"string\"}\n" + " ]\n" + "}";
Collection<SpecificCompiler.OutputFile> outputs2 = new SpecificCompiler(new Schema.Parser().parse(jsonSchema2))
.compile();
assertEquals(1, outputs2.size());
SpecificCompiler.OutputFile outputFile2 = outputs2.iterator().next();
assertTrue(outputFile2.contents.contains("getDeleted()"));
assertTrue(outputFile2.contents.contains("getDeleted$0()"));
assertFalse(outputFile.contents.contains("$1"));
String jsonSchema3 = "{\n" + " \"name\": \"Value\", \"type\": \"record\",\n" + " \"fields\": [\n"
+ " { \"name\": \"__deleted\", \"type\": \"string\"},\n"
+ " { \"name\": \"_deleted\", \"type\": \"string\"},\n"
+ " { \"name\": \"deleted\", \"type\": \"string\"}\n" + " ]\n" + "}";
Collection<SpecificCompiler.OutputFile> outputs3 = new SpecificCompiler(new Schema.Parser().parse(jsonSchema3))
.compile();
assertEquals(1, outputs3.size());
SpecificCompiler.OutputFile outputFile3 = outputs3.iterator().next();
assertTrue(outputFile3.contents.contains("getDeleted()"));
assertTrue(outputFile3.contents.contains("getDeleted$0()"));
assertTrue(outputFile3.contents.contains("getDeleted$1()"));
assertFalse(outputFile3.contents.contains("$2"));
String jsonSchema4 = "{\n" + " \"name\": \"Value\", \"type\": \"record\",\n" + " \"fields\": [\n"
+ " { \"name\": \"__deleted\", \"type\": \"string\"},\n"
+ " { \"name\": \"_deleted\", \"type\": \"string\"},\n"
+ " { \"name\": \"deleted\", \"type\": \"string\"},\n"
+ " { \"name\": \"Deleted\", \"type\": \"string\"}\n" + " ]\n" + "}";
Collection<SpecificCompiler.OutputFile> outputs4 = new SpecificCompiler(new Schema.Parser().parse(jsonSchema4))
.compile();
assertEquals(1, outputs4.size());
SpecificCompiler.OutputFile outputFile4 = outputs4.iterator().next();
assertTrue(outputFile4.contents.contains("getDeleted()"));
assertTrue(outputFile4.contents.contains("getDeleted$0()"));
assertTrue(outputFile4.contents.contains("getDeleted$1()"));
assertTrue(outputFile4.contents.contains("getDeleted$2()"));
assertFalse(outputFile4.contents.contains("$3"));
}
|
public AdvancedNetworkConfig getAdvancedNetworkConfig() {
return advancedNetworkConfig;
}
|
@Test
public void testEndpointConfig() {
String name = randomName();
EndpointQualifier qualifier = EndpointQualifier.resolve(WAN, name);
ServerSocketEndpointConfig endpointConfig = new ServerSocketEndpointConfig();
endpointConfig.setName(name);
endpointConfig.setProtocolType(WAN);
config.getAdvancedNetworkConfig().addWanEndpointConfig(endpointConfig);
assertEquals(endpointConfig,
config.getAdvancedNetworkConfig().getEndpointConfigs().get(qualifier));
}
|
public static boolean isBasicInfoChanged(Member actual, Member expected) {
if (null == expected) {
return null != actual;
}
if (!expected.getIp().equals(actual.getIp())) {
return true;
}
if (expected.getPort() != actual.getPort()) {
return true;
}
if (!expected.getAddress().equals(actual.getAddress())) {
return true;
}
if (!expected.getState().equals(actual.getState())) {
return true;
}
// if change
if (expected.isGrpcReportEnabled() != actual.isGrpcReportEnabled()) {
return true;
}
return isBasicInfoChangedInExtendInfo(expected, actual);
}
|
@Test
void testIsBasicInfoChangedForChangedBasicExtendInfo() {
Member newMember = buildMember();
newMember.setExtendVal(MemberMetaDataConstants.WEIGHT, "100");
assertTrue(MemberUtil.isBasicInfoChanged(newMember, originalMember));
}
|
public static boolean publishEvent(final Event event) {
try {
return publishEvent(event.getClass(), event);
} catch (Throwable ex) {
LOGGER.error("There was an exception to the message publishing : ", ex);
return false;
}
}
|
@Test
void testPublishEventByNoPublisher() {
for (int i = 0; i < 3; i++) {
assertFalse(NotifyCenter.publishEvent(new NoPublisherEvent()));
}
}
|
@Override
public String getDescription() {
return "Load prioritized rules";
}
|
@Test
void getDescription_shouldReturnValue() {
assertThat(underTest.getDescription()).isEqualTo("Load prioritized rules");
}
|
@Override
public boolean checkCredentials(String username, String password) {
return this.username.equals(username) && this.password.equals(password);
}
|
@Test
public void test() {
BasicAuthenticator plainTextAuthenticator =
new PlaintextAuthenticator("/", VALID_USERNAME, VALID_PASSWORD);
for (String username : TEST_USERNAMES) {
for (String password : TEST_PASSWORDS) {
boolean expectedIsAuthenticated =
VALID_USERNAME.equals(username) && VALID_PASSWORD.equals(password);
boolean actualIsAuthenticated =
plainTextAuthenticator.checkCredentials(username, password);
assertEquals(expectedIsAuthenticated, actualIsAuthenticated);
}
}
}
|
public static DiffResult diff(Graph left, Graph right) {
List<Edge> removedEdges = left.edges().filter(e -> !right.hasEquivalentEdge(e)).collect(Collectors.toList());
List<Vertex> removedVertices = left.vertices().filter(v -> !right.hasEquivalentVertex(v)).collect(Collectors.toList());
List<Edge> addedEdges = right.edges().filter(e -> !left.hasEquivalentEdge(e)).collect(Collectors.toList());
List<Vertex> addedVertices = right.vertices().filter(v -> !left.hasEquivalentVertex(v)).collect(Collectors.toList());
return new DiffResult(removedEdges, addedEdges, removedVertices, addedVertices);
}
|
@Test
public void testDifferentSimpleGraphs() throws InvalidIRException {
Graph left = simpleGraph();
Graph right = left.copy();
Vertex new1 = createTestVertex("new1");
right.addVertex(new1);
right.chainVerticesById("t3", "new1");
GraphDiff.DiffResult result = GraphDiff.diff(left, right);
assertFalse(result.isIdentical());
assertThat(result.getAddedVertices().size(), is(1));
assertThat(result.getAddedVertices().stream().findAny().get().getId(), is("new1"));
assertThat(result.getAddedEdges().size(), is(1));
Edge expectedEdge = new1.incomingEdges().findAny().get();
assertTrue(result.getAddedEdges().stream().findAny().get().sourceComponentEquals(expectedEdge));
assertTrue(result.getRemovedVertices().isEmpty());
assertTrue(result.getRemovedEdges().isEmpty());
}
|
public static MetadataUpdate fromJson(String json) {
return JsonUtil.parse(json, MetadataUpdateParser::fromJson);
}
|
@Test
public void testBranchFromJsonAllFields() {
String action = MetadataUpdateParser.SET_SNAPSHOT_REF;
long snapshotId = 1L;
SnapshotRefType type = SnapshotRefType.BRANCH;
String refName = "hank";
Integer minSnapshotsToKeep = 2;
Long maxSnapshotAgeMs = 3L;
Long maxRefAgeMs = 4L;
String json =
"{\"action\":\"set-snapshot-ref\",\"ref-name\":\"hank\",\"snapshot-id\":1,\"type\":\"branch\","
+ "\"min-snapshots-to-keep\":2,\"max-snapshot-age-ms\":3,\"max-ref-age-ms\":4}";
MetadataUpdate expected =
new MetadataUpdate.SetSnapshotRef(
refName, snapshotId, type, minSnapshotsToKeep, maxSnapshotAgeMs, maxRefAgeMs);
assertEquals(action, expected, MetadataUpdateParser.fromJson(json));
}
|
@Override
protected LinkedHashMap<String, Callable<? extends ChannelHandler>> getCustomChildChannelHandlers(MessageInput input) {
final LinkedHashMap<String, Callable<? extends ChannelHandler>> handlers = new LinkedHashMap<>(super.getCustomChildChannelHandlers(input));
handlers.put("beats", BeatsFrameDecoder::new);
return handlers;
}
|
@Test
public void customChildChannelHandlersContainBeatsHandler() {
final NettyTransportConfiguration nettyTransportConfiguration = new NettyTransportConfiguration("nio", "jdk", 1);
final EventLoopGroupFactory eventLoopGroupFactory = new EventLoopGroupFactory(nettyTransportConfiguration);
final BeatsTransport transport = new BeatsTransport(
Configuration.EMPTY_CONFIGURATION,
eventLoopGroup,
eventLoopGroupFactory,
nettyTransportConfiguration,
new ThroughputCounter(eventLoopGroup),
new LocalMetricRegistry(),
tlsConfiguration
);
final MessageInput input = mock(MessageInput.class);
assertThat(transport.getCustomChildChannelHandlers(input)).containsKey("beats");
}
|
public boolean isFound() {
return found;
}
|
@Test
public void testCalcDistanceDetails() {
Weighting weighting = new SpeedWeighting(carAvSpeedEnc);
Path p = new Dijkstra(pathDetailGraph, weighting, TraversalMode.NODE_BASED).calcPath(1, 5);
assertTrue(p.isFound());
Map<String, List<PathDetail>> details = PathDetailsFromEdges.calcDetails(p, carManager, weighting,
List.of(DISTANCE), new PathDetailsBuilderFactory(), 0, pathDetailGraph);
assertEquals(1, details.size());
List<PathDetail> distanceDetails = details.get(DISTANCE);
assertEquals(5D, distanceDetails.get(0).getValue());
assertEquals(5D, distanceDetails.get(1).getValue());
assertEquals(10D, distanceDetails.get(2).getValue());
assertEquals(5D, distanceDetails.get(3).getValue());
}
|
public static <T> List<T> move(List<T> list, T element, int newPosition) {
Assert.notNull(list);
if (false == list.contains(element)) {
list.add(newPosition, element);
} else {
list.remove(element);
list.add(newPosition, element);
}
return list;
}
|
@Test
public void testMoveElementToPosition() {
List<String> list = new ArrayList<>(Arrays.asList("A", "B", "C", "D"));
// Move "B" to position 2
List<String> expectedResult1 = new ArrayList<>(Arrays.asList("A", "C", "B", "D"));
assertEquals(expectedResult1, ListUtil.move(list, "B", 2));
list = new ArrayList<>(Arrays.asList("A", "B", "C", "D"));
// Move "D" to position 0
List<String> expectedResult2 = new ArrayList<>(Arrays.asList("D", "A", "B", "C"));
assertEquals(expectedResult2, ListUtil.move(list, "D", 0));
list = new ArrayList<>(Arrays.asList("A", "B", "C", "D"));
// Move "E" (not in list) to position 1
List<String> expectedResult3 = new ArrayList<>(Arrays.asList("A", "E", "B", "C", "D"));
assertEquals(expectedResult3, ListUtil.move(list, "E", 1));
}
|
public static void extractFiles(File archive, File extractTo) throws ExtractionException {
extractFiles(archive, extractTo, null);
}
|
@Test(expected = org.owasp.dependencycheck.utils.ExtractionException.class)
public void testExtractFiles_3args() throws Exception {
File destination = getSettings().getTempDirectory();
File archive = BaseTest.getResourceAsFile(this, "evil.zip");
Engine engine = null;
ExtractionUtil.extractFiles(archive, destination, engine);
}
|
public static Object parse(Payload payload) {
Class classType = PayloadRegistry.getClassByType(payload.getMetadata().getType());
if (classType != null) {
ByteString byteString = payload.getBody().getValue();
ByteBuffer byteBuffer = byteString.asReadOnlyByteBuffer();
Object obj = JacksonUtils.toObj(new ByteBufferBackedInputStream(byteBuffer), classType);
if (obj instanceof Request) {
((Request) obj).putAllHeader(payload.getMetadata().getHeadersMap());
}
return obj;
} else {
throw new RemoteException(NacosException.SERVER_ERROR,
"Unknown payload type:" + payload.getMetadata().getType());
}
}
|
@Test
void testParseNullType() {
assertThrows(RemoteException.class, () -> {
Payload mockPayload = mock(Payload.class);
Metadata mockMetadata = mock(Metadata.class);
when(mockPayload.getMetadata()).thenReturn(mockMetadata);
GrpcUtils.parse(mockPayload);
});
}
|
@Override
public void revert(final Path file) throws BackgroundException {
// To restore a version all that needs to be done is to move a version the special restore folder at /remote.php/dav/versions/USER/restore
try {
session.getClient().move(URIEncoder.encode(file.getAbsolute()),
URIEncoder.encode(String.format("%s/restore/target",
new NextcloudHomeFeature(session.getHost()).find(NextcloudHomeFeature.Context.versions).getAbsolute()))
);
}
catch(SardineException e) {
throw new DAVExceptionMappingService().map("Cannot revert file", e, file);
}
catch(IOException e) {
throw new HttpExceptionMappingService().map(e, file);
}
}
|
@Test
public void testRevert() throws Exception {
final Path directory = new DAVDirectoryFeature(session, new NextcloudAttributesFinderFeature(session)).mkdir(new Path(new DefaultHomeFinderService(session).find(),
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path test = new Path(directory, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final TransferStatus status = new TransferStatus();
final NextcloudWriteFeature writer = new NextcloudWriteFeature(session);
final byte[] initialContent = RandomUtils.nextBytes(32769);
{
new StreamCopier(status, status).transfer(new ByteArrayInputStream(initialContent), writer.write(test, status.withLength(initialContent.length), new DisabledConnectionCallback()));
}
final NextcloudVersioningFeature feature = new NextcloudVersioningFeature(session);
assertEquals(0, feature.list(test.withAttributes(new NextcloudAttributesFinderFeature(session).find(test)), new DisabledListProgressListener()).size());
final PathAttributes initialAttributes = new NextcloudAttributesFinderFeature(session).find(test);
final byte[] contentUpdate = RandomUtils.nextBytes(16258);
{
new StreamCopier(status, status).transfer(new ByteArrayInputStream(contentUpdate), writer.write(test, status.withLength(contentUpdate.length).exists(true), new DisabledConnectionCallback()));
final AttributedList<Path> versions = feature.list(test.withAttributes(new NextcloudAttributesFinderFeature(session).find(test)), new DisabledListProgressListener());
assertEquals(1, versions.size());
}
{
final byte[] contentLatest = RandomUtils.nextBytes(13247);
new StreamCopier(status, status).transfer(new ByteArrayInputStream(contentLatest), writer.write(test, status.withLength(contentLatest.length).exists(true), new DisabledConnectionCallback()));
}
final AttributedList<Path> versions = feature.list(test.withAttributes(new NextcloudAttributesFinderFeature(session).find(test)), new DisabledListProgressListener());
assertEquals(2, versions.size());
final Path initialVersion = versions.get(1);
{
assertEquals(initialAttributes.getSize(), initialVersion.attributes().getSize());
assertEquals(initialAttributes.getModificationDate(), initialVersion.attributes().getModificationDate());
assertNotNull(initialVersion.attributes().getVersionId());
assertNotEquals(initialAttributes, new NextcloudAttributesFinderFeature(session).find(test));
assertEquals(initialVersion.attributes(), new NextcloudAttributesFinderFeature(session).find(initialVersion));
{
final InputStream reader = new NextcloudReadFeature(session).read(initialVersion, new TransferStatus(), new DisabledLoginCallback());
assertArrayEquals(initialContent, IOUtils.readFully(reader, initialContent.length));
reader.close();
}
final Path updatedVersion = versions.get(0);
assertEquals(contentUpdate.length, new NextcloudAttributesFinderFeature(session).find(updatedVersion).getSize());
{
final InputStream reader = new NextcloudReadFeature(session).read(updatedVersion, new TransferStatus(), new DisabledLoginCallback());
assertArrayEquals(contentUpdate, IOUtils.readFully(reader, contentUpdate.length));
reader.close();
}
}
feature.revert(initialVersion);
assertEquals(initialVersion.attributes().getModificationDate(), new NextcloudAttributesFinderFeature(session).find(test).getModificationDate());
new DAVDeleteFeature(session).delete(Arrays.asList(test, directory), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
static long sizeOf(Mutation m) {
if (m.getOperation() == Mutation.Op.DELETE) {
return sizeOf(m.getKeySet());
}
long result = 0;
for (Value v : m.getValues()) {
switch (v.getType().getCode()) {
case ARRAY:
result += estimateArrayValue(v);
break;
case STRUCT:
throw new IllegalArgumentException("Structs are not supported in mutation.");
default:
result += estimatePrimitiveValue(v);
}
}
return result;
}
|
@Test
public void jsons() throws Exception {
Mutation empty =
Mutation.newInsertOrUpdateBuilder("test").set("one").to(Value.json("{}")).build();
Mutation nullValue =
Mutation.newInsertOrUpdateBuilder("test").set("one").to(Value.json((String) null)).build();
Mutation sample =
Mutation.newInsertOrUpdateBuilder("test")
.set("one")
.to(Value.json("{\"name\":\"number\",\"val\":12345.123}"))
.build();
Mutation nullArray =
Mutation.newInsertOrUpdateBuilder("test").set("one").toJsonArray(null).build();
assertThat(MutationSizeEstimator.sizeOf(empty), is(2L));
assertThat(MutationSizeEstimator.sizeOf(nullValue), is(0L));
assertThat(MutationSizeEstimator.sizeOf(sample), is(33L));
assertThat(MutationSizeEstimator.sizeOf(nullArray), is(0L));
}
|
@Override
public List<ValidationMessage> validate(ValidationContext context) {
return context.query().tokens().stream()
.filter(this::isInvalidOperator)
.map(token -> {
final String errorMessage = String.format(Locale.ROOT, "Query contains invalid operator \"%s\". All AND / OR / NOT operators have to be written uppercase", token.image());
return ValidationMessage.builder(ValidationStatus.WARNING, ValidationType.INVALID_OPERATOR)
.errorMessage(errorMessage)
.relatedProperty(token.image())
.position(QueryPosition.from(token))
.build();
}).collect(Collectors.toList());
}
|
@Test
void testInvalidOperatorNoOperatorPresent() {
final ValidationContext context = TestValidationContext.create("foo:bar baz")
.build();
assertThat(sut.validate(context)).isEmpty();
}
|
public MultiMap<Value, T, List<T>> get(final KeyDefinition keyDefinition) {
return tree.get(keyDefinition);
}
|
@Test
void testFindByName() throws Exception {
MultiMap<Value, Person, List<Person>> multiMap = map.get(KeyDefinition.newKeyDefinition()
.withId("name")
.build());
assertThat(multiMap.keySet()).extracting(x -> x.getComparable()).containsExactlyInAnyOrder("Toni", "Eder", "Michael");
}
|
public IterableSubject factKeys() {
if (!(actual instanceof ErrorWithFacts)) {
failWithActual(simpleFact("expected a failure thrown by Truth's failure API"));
return ignoreCheck().that(ImmutableList.of());
}
ErrorWithFacts error = (ErrorWithFacts) actual;
return check("factKeys()").that(getFactKeys(error));
}
|
@Test
public void factKeysNoValue() {
assertThat(simpleFact("foo")).factKeys().containsExactly("foo");
}
|
public static StrimziPodSet createPodSet(
String name,
String namespace,
Labels labels,
OwnerReference ownerReference,
ResourceTemplate template,
int replicas,
Map<String, String> annotations,
Labels selectorLabels,
Function<Integer, Pod> podCreator
) {
List<Map<String, Object>> pods = new ArrayList<>(replicas);
for (int i = 0; i < replicas; i++) {
Pod pod = podCreator.apply(i);
pods.add(PodSetUtils.podToMap(pod));
}
return new StrimziPodSetBuilder()
.withNewMetadata()
.withName(name)
.withLabels(labels.withAdditionalLabels(TemplateUtils.labels(template)).toMap())
.withNamespace(namespace)
.withAnnotations(Util.mergeLabelsOrAnnotations(annotations, TemplateUtils.annotations(template)))
.withOwnerReferences(ownerReference)
.endMetadata()
.withNewSpec()
.withSelector(new LabelSelectorBuilder().withMatchLabels(selectorLabels.toMap()).build())
.withPods(pods)
.endSpec()
.build();
}
|
@Test
public void testCreateStrimziPodSetFromNodeReferencesWithNullTemplate() {
List<String> podNames = new ArrayList<>();
StrimziPodSet sps = WorkloadUtils.createPodSet(
NAME,
NAMESPACE,
LABELS,
OWNER_REFERENCE,
null,
NODES,
Map.of("extra", "annotations"),
LABELS.strimziSelectorLabels(),
n -> {
podNames.add(n.podName());
return new PodBuilder()
.withNewMetadata()
.withName(n.podName())
.endMetadata()
.build();
}
);
assertThat(sps.getMetadata().getName(), is(NAME));
assertThat(sps.getMetadata().getNamespace(), is(NAMESPACE));
assertThat(sps.getMetadata().getOwnerReferences(), is(List.of(OWNER_REFERENCE)));
assertThat(sps.getMetadata().getLabels(), is(LABELS.toMap()));
assertThat(sps.getMetadata().getAnnotations(), is(Map.of("extra", "annotations")));
assertThat(sps.getSpec().getSelector().getMatchLabels().size(), is(3));
assertThat(sps.getSpec().getSelector().getMatchLabels().get(Labels.STRIMZI_CLUSTER_LABEL), is("my-cluster"));
assertThat(sps.getSpec().getSelector().getMatchLabels().get(Labels.STRIMZI_NAME_LABEL), is("my-workload"));
assertThat(sps.getSpec().getSelector().getMatchLabels().get(Labels.STRIMZI_KIND_LABEL), is("my-kind"));
// Test generating pods from the PodCreator method
assertThat(podNames.size(), is(3));
assertThat(podNames, hasItems("my-cluster-nodes-10", "my-cluster-nodes-11", "my-cluster-nodes-12"));
assertThat(sps.getSpec().getPods().size(), is(3));
assertThat(sps.getSpec().getPods().stream().map(pod -> PodSetUtils.mapToPod(pod).getMetadata().getName()).toList(), hasItems("my-cluster-nodes-10", "my-cluster-nodes-11", "my-cluster-nodes-12"));
}
|
public static File generate(String content, int width, int height, File targetFile) {
String extName = FileUtil.extName(targetFile);
switch (extName) {
case QR_TYPE_SVG:
String svg = generateAsSvg(content, new QrConfig(width, height));
FileUtil.writeString(svg, targetFile, StandardCharsets.UTF_8);
break;
case QR_TYPE_TXT:
String txt = generateAsAsciiArt(content, new QrConfig(width, height));
FileUtil.writeString(txt, targetFile, StandardCharsets.UTF_8);
break;
default:
final BufferedImage image = generate(content, width, height);
ImgUtil.write(image, targetFile);
break;
}
return targetFile;
}
|
@Test
public void pdf417Test() {
final BufferedImage image = QrCodeUtil.generate("content111", BarcodeFormat.PDF_417, QrConfig.create());
Assert.notNull(image);
}
|
@Override
public Map<String, String> getMetadata(final Path file) throws BackgroundException {
return new S3AttributesFinderFeature(session, acl).find(file).getMetadata();
}
|
@Test
public void testGetMetadataFile() throws Exception {
final Path container = new Path("versioning-test-eu-central-1-cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory));
final Path test = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new S3TouchFeature(session, new S3AccessControlListFeature(session)).touch(test, new TransferStatus()
.withMetadata(Collections.singletonMap("app", "cyberduck"))
.withMime("text/plain"));
final Map<String, String> metadata = new S3MetadataFeature(session, new S3AccessControlListFeature(session)).getMetadata(test);
assertFalse(metadata.isEmpty());
assertTrue(metadata.containsKey("app"));
assertEquals("cyberduck", metadata.get("app"));
assertTrue(metadata.containsKey("Content-Type"));
assertEquals("text/plain", metadata.get("Content-Type"));
assertFalse(metadata.containsKey(Constants.KEY_FOR_USER_METADATA));
assertFalse(metadata.containsKey(Constants.KEY_FOR_SERVICE_METADATA));
assertFalse(metadata.containsKey(Constants.KEY_FOR_COMPLETE_METADATA));
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public static List<Type> decode(String rawInput, List<TypeReference<Type>> outputParameters) {
return decoder.decodeFunctionResult(rawInput, outputParameters);
}
|
@Test
public void testBuildEventOfArrayOfDynamicStruct() throws ClassNotFoundException {
// The full event signature is
//
// Stamp3(uint256 indexed stampId, address indexed caller, bool odd,
// (uint256,bool,string) topMessage, (uint256,bool,string)[] messages),
//
// but we are only decoding the non-indexed data portion of it represented by
// 'bool odd, (uint256,bool,string) topMessage, (uint256,bool,string)[] messages'.
//
// Transaction:
// https://testnet.treasurescan.io/tx/0x041e53e7571283d462df99a95b2c21324279657f26a3adef907095d2d9c5ed85?tab=logs
// Contract:
// https://testnet.treasurescan.io/address/0x5167E9A422aCEd95C2D0b62bF05a7847a9a942B2
String data =
"0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000568656c6c6f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001a00000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000002676d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000016000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000002676d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000017000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000002676d000000000000000000000000000000000000000000000000000000000000";
TypeReference<DynamicStruct> tupleTr =
new TypeReference<DynamicStruct>(
false,
Arrays.asList(
TypeReference.makeTypeReference("uint256"),
TypeReference.makeTypeReference("bool"),
TypeReference.makeTypeReference("string"))) {};
// Decode data according to the above signature for the non-indexed parameters.
List<Type> decodedData =
FunctionReturnDecoder.decode(
data,
Utils.convert(
Arrays.asList(
// bool odd
TypeReference.makeTypeReference("bool"),
// (uint256,bool,string)
tupleTr,
// (uint256,bool,string)[]
new TypeReference<DynamicArray>(false) {
@Override
public TypeReference getSubTypeReference() {
return tupleTr;
}
@Override
public java.lang.reflect.Type getType() {
return new java.lang.reflect.ParameterizedType() {
@Override
public java.lang.reflect.Type[]
getActualTypeArguments() {
return new java.lang.reflect.Type[] {
tupleTr.getType()
};
}
@Override
public java.lang.reflect.Type getRawType() {
return DynamicArray.class;
}
@Override
public java.lang.reflect.Type getOwnerType() {
return Class.class;
}
};
}
})));
assertEquals(decodedData.get(0).getValue(), false);
List<Type> tupleData = ((DynamicStruct) decodedData.get(1)).getValue();
assertEquals(tupleData.get(0).getValue(), BigInteger.valueOf(20));
assertEquals(tupleData.get(1).getValue(), false);
assertEquals(tupleData.get(2).getValue(), "hello");
List<DynamicStruct> tupleArrayData =
((DynamicArray<DynamicStruct>) decodedData.get(2)).getValue();
List<Type> tupleArrayEntry0 = tupleArrayData.get(0).getValue();
assertEquals(tupleArrayEntry0.get(0).getValue(), BigInteger.valueOf(21));
assertEquals(tupleArrayEntry0.get(1).getValue(), true);
assertEquals(tupleArrayEntry0.get(2).getValue(), "gm");
List<Type> tupleArrayEntry1 = tupleArrayData.get(1).getValue();
assertEquals(tupleArrayEntry1.get(0).getValue(), BigInteger.valueOf(22));
assertEquals(tupleArrayEntry1.get(1).getValue(), false);
assertEquals(tupleArrayEntry1.get(2).getValue(), "gm");
List<Type> tupleArrayEntry2 = tupleArrayData.get(2).getValue();
assertEquals(tupleArrayEntry2.get(0).getValue(), BigInteger.valueOf(23));
assertEquals(tupleArrayEntry2.get(1).getValue(), true);
assertEquals(tupleArrayEntry2.get(2).getValue(), "gm");
}
|
@Bean("ScmChangedFiles")
public ScmChangedFiles provide(ScmConfiguration scmConfiguration, BranchConfiguration branchConfiguration, DefaultInputProject project) {
Path rootBaseDir = project.getBaseDir();
Set<ChangedFile> changedFiles = loadChangedFilesIfNeeded(scmConfiguration, branchConfiguration, rootBaseDir);
if (changedFiles != null) {
validatePaths(getAbsoluteFilePaths(changedFiles));
}
return new ScmChangedFiles(changedFiles);
}
|
@Test
public void testFailIfRelativePath() {
when(branchConfiguration.targetBranchName()).thenReturn("target");
when(branchConfiguration.isPullRequest()).thenReturn(true);
when(scmConfiguration.provider()).thenReturn(scmProvider);
when(scmProvider.branchChangedFiles("target", rootBaseDir)).thenReturn(Collections.singleton(Paths.get("changedFile")));
assertThatThrownBy(() -> provider.provide(scmConfiguration, branchConfiguration, project))
.isInstanceOf(IllegalStateException.class)
.hasMessageContaining("changed file with a relative path");
}
|
@Override
public void writeSpecialized(int i) {
if (current == null) {
current = i;
} else {
Integer currentI = this.ofSameTypeOrThrow(current, Integer.class);
current = currentI + i;
}
}
|
@Test
void invalidAddition() throws IOException {
try (TypedObjectWriter writer = new TypedObjectWriter()){
writer.writeSpecialized(1);
IllegalArgumentException illegalArgumentException = Assertions.assertThrows(IllegalArgumentException.class, () -> writer.writeSpecialized('a'));
assertThat(illegalArgumentException.getMessage(), is("Tried to add java.lang.Character to java.lang.Integer"));
}
}
|
static long sizeOf(Mutation m) {
if (m.getOperation() == Mutation.Op.DELETE) {
return sizeOf(m.getKeySet());
}
long result = 0;
for (Value v : m.getValues()) {
switch (v.getType().getCode()) {
case ARRAY:
result += estimateArrayValue(v);
break;
case STRUCT:
throw new IllegalArgumentException("Structs are not supported in mutation.");
default:
result += estimatePrimitiveValue(v);
}
}
return result;
}
|
@Test
public void primitives() throws Exception {
Mutation int64 = Mutation.newInsertOrUpdateBuilder("test").set("one").to(1).build();
Mutation float32 = Mutation.newInsertOrUpdateBuilder("test").set("one").to(1.3f).build();
Mutation float64 = Mutation.newInsertOrUpdateBuilder("test").set("one").to(2.9).build();
Mutation bool = Mutation.newInsertOrUpdateBuilder("test").set("one").to(false).build();
Mutation numeric =
Mutation.newInsertOrUpdateBuilder("test")
.set("one")
.to(new BigDecimal("12345678901234567890.123456789"))
.build();
Mutation pgNumeric =
Mutation.newInsertOrUpdateBuilder("test")
.set("one")
.to(Value.pgNumeric("12345678901234567890.123456789"))
.build();
Mutation pgNumericNaN =
Mutation.newInsertOrUpdateBuilder("test").set("one").to(Value.pgNumeric("NaN")).build();
Mutation json =
Mutation.newInsertOrUpdateBuilder("test")
.set("one")
.to(Value.json("{\"key1\":\"value1\", \"key2\":\"value2\"}"))
.build();
Mutation deleteDouble = Mutation.delete("test", Key.of(1223.));
Mutation jsonb =
Mutation.newInsertOrUpdateBuilder("test")
.set("one")
.to(Value.pgJsonb("{\"key123\":\"value123\", \"key321\":\"value321\"}"))
.build();
assertThat(MutationSizeEstimator.sizeOf(int64), is(8L));
assertThat(MutationSizeEstimator.sizeOf(float32), is(4L));
assertThat(MutationSizeEstimator.sizeOf(float64), is(8L));
assertThat(MutationSizeEstimator.sizeOf(bool), is(1L));
assertThat(MutationSizeEstimator.sizeOf(numeric), is(30L));
assertThat(MutationSizeEstimator.sizeOf(pgNumeric), is(30L));
assertThat(MutationSizeEstimator.sizeOf(pgNumericNaN), is(3L));
assertThat(MutationSizeEstimator.sizeOf(json), is(34L));
assertThat(MutationSizeEstimator.sizeOf(deleteDouble), is(8L));
assertThat(MutationSizeEstimator.sizeOf(jsonb), is(42L));
}
|
public void parse(InputStream stream, ContentHandler handler, Metadata metadata,
ParseContext context) throws IOException, SAXException, TikaException {
if (stream == null) {
throw new NullPointerException("null stream");
}
Throwable t;
boolean alive = false;
ForkClient client = acquireClient();
try {
ContentHandler tee =
(handler instanceof AbstractRecursiveParserWrapperHandler) ? handler :
new TeeContentHandler(handler, new MetadataContentHandler(metadata));
t = client.call("parse", stream, tee, metadata, context);
alive = true;
} catch (TikaException te) {
// Problem occurred on our side
alive = true;
throw te;
} catch (IOException e) {
// Problem occurred on the other side
throw new TikaException("Failed to communicate with a forked parser process." +
" The process has most likely crashed due to some error" +
" like running out of memory. A new process will be" +
" started for the next parsing request.", e);
} finally {
releaseClient(client, alive);
}
if (t instanceof IOException) {
throw (IOException) t;
} else if (t instanceof SAXException) {
throw (SAXException) t;
} else if (t instanceof TikaException) {
throw (TikaException) t;
} else if (t != null) {
throw new TikaException("Unexpected error in forked server process", t);
}
}
|
@Test
public void testRPWWithMainDocNPE() throws Exception {
Parser parser = new AutoDetectParser();
RecursiveParserWrapper wrapper = new RecursiveParserWrapper(parser);
RecursiveParserWrapperHandler handler = new RecursiveParserWrapperHandler(
new BasicContentHandlerFactory(BasicContentHandlerFactory.HANDLER_TYPE.TEXT,
20000));
try (ForkParser fork = new ForkParser(ForkParserTest.class.getClassLoader(), wrapper);
InputStream is = getResourceAsStream("/test-documents/embedded_then_npe.xml")) {
Metadata metadata = new Metadata();
ParseContext context = new ParseContext();
fork.parse(is, handler, metadata, context);
fail();
} catch (TikaException e) {
assertTrue(e.getCause() instanceof NullPointerException);
assertContains("another", e.getCause().getMessage());
}
List<Metadata> metadataList = handler.getMetadataList();
Metadata m0 = metadataList.get(0);
assertEquals("Nikolai Lobachevsky", m0.get(TikaCoreProperties.CREATOR));
assertContains("main_content", m0.get(TikaCoreProperties.TIKA_CONTENT));
assertContains("embed1.xml", m0.get(TikaCoreProperties.TIKA_CONTENT));
Metadata m1 = metadataList.get(1);
assertEquals("embeddedAuthor", m1.get(TikaCoreProperties.CREATOR));
assertContains("some_embedded_content", m1.get(TikaCoreProperties.TIKA_CONTENT));
assertEquals("/embed1.xml", m1.get(TikaCoreProperties.EMBEDDED_RESOURCE_PATH));
}
|
@VisibleForTesting
static String formatLevel(int level)
{
if (level < 10000)
{
return Integer.toString(level);
}
else
{
return (level / 1000) + "k";
}
}
|
@Test
public void testFormatLevel()
{
assertEquals("398", formatLevel(398));
assertEquals("5000", formatLevel(5000));
assertEquals("7682", formatLevel(7682));
assertEquals("12k", formatLevel(12398));
assertEquals("219k", formatLevel(219824));
}
|
public CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> acknowledge(
String memberId,
String groupId,
Map<TopicIdPartition, List<ShareAcknowledgementBatch>> acknowledgeTopics
) {
log.trace("Acknowledge request for topicIdPartitions: {} with groupId: {}",
acknowledgeTopics.keySet(), groupId);
this.shareGroupMetrics.shareAcknowledgement();
Map<TopicIdPartition, CompletableFuture<Errors>> futures = new HashMap<>();
acknowledgeTopics.forEach((topicIdPartition, acknowledgePartitionBatches) -> {
SharePartition sharePartition = partitionCacheMap.get(sharePartitionKey(groupId, topicIdPartition));
if (sharePartition != null) {
CompletableFuture<Errors> future = sharePartition.acknowledge(memberId, acknowledgePartitionBatches).thenApply(throwable -> {
if (throwable.isPresent()) {
return Errors.forException(throwable.get());
}
acknowledgePartitionBatches.forEach(batch -> {
batch.acknowledgeTypes().forEach(this.shareGroupMetrics::recordAcknowledgement);
});
return Errors.NONE;
});
futures.put(topicIdPartition, future);
} else {
futures.put(topicIdPartition, CompletableFuture.completedFuture(Errors.UNKNOWN_TOPIC_OR_PARTITION));
}
});
CompletableFuture<Void> allFutures = CompletableFuture.allOf(
futures.values().toArray(new CompletableFuture[0]));
return allFutures.thenApply(v -> {
Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData> result = new HashMap<>();
futures.forEach((topicIdPartition, future) -> result.put(topicIdPartition, new ShareAcknowledgeResponseData.PartitionData()
.setPartitionIndex(topicIdPartition.partition())
.setErrorCode(future.join().code())));
return result;
});
}
|
@Test
public void testAcknowledgeMultiplePartition() {
String groupId = "grp";
String memberId = Uuid.randomUuid().toString();
TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo1", 0));
TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0));
TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo3", 0));
SharePartition sp1 = mock(SharePartition.class);
SharePartition sp2 = mock(SharePartition.class);
SharePartition sp3 = mock(SharePartition.class);
when(sp1.acknowledge(ArgumentMatchers.eq(memberId), any())).thenReturn(CompletableFuture.completedFuture(Optional.empty()));
when(sp2.acknowledge(ArgumentMatchers.eq(memberId), any())).thenReturn(CompletableFuture.completedFuture(Optional.empty()));
when(sp3.acknowledge(ArgumentMatchers.eq(memberId), any())).thenReturn(CompletableFuture.completedFuture(Optional.empty()));
Map<SharePartitionManager.SharePartitionKey, SharePartition> partitionCacheMap = new HashMap<>();
partitionCacheMap.put(new SharePartitionManager.SharePartitionKey(groupId, tp1), sp1);
partitionCacheMap.put(new SharePartitionManager.SharePartitionKey(groupId, tp2), sp2);
partitionCacheMap.put(new SharePartitionManager.SharePartitionKey(groupId, tp3), sp3);
Metrics metrics = new Metrics();
SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder()
.withPartitionCacheMap(partitionCacheMap).withMetrics(metrics).build();
Map<TopicIdPartition, List<ShareAcknowledgementBatch>> acknowledgeTopics = new HashMap<>();
acknowledgeTopics.put(tp1, Arrays.asList(
new ShareAcknowledgementBatch(12, 20, Collections.singletonList((byte) 1)),
new ShareAcknowledgementBatch(24, 56, Collections.singletonList((byte) 1))
));
acknowledgeTopics.put(tp2, Arrays.asList(
new ShareAcknowledgementBatch(15, 26, Collections.singletonList((byte) 2)),
new ShareAcknowledgementBatch(34, 56, Collections.singletonList((byte) 2))
));
acknowledgeTopics.put(tp3, Arrays.asList(
new ShareAcknowledgementBatch(4, 15, Collections.singletonList((byte) 3)),
new ShareAcknowledgementBatch(16, 21, Collections.singletonList((byte) 3))
));
CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> resultFuture =
sharePartitionManager.acknowledge(memberId, groupId, acknowledgeTopics);
Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData> result = resultFuture.join();
assertEquals(3, result.size());
assertTrue(result.containsKey(tp1));
assertTrue(result.containsKey(tp2));
assertTrue(result.containsKey(tp3));
assertEquals(0, result.get(tp1).partitionIndex());
assertEquals(Errors.NONE.code(), result.get(tp1).errorCode());
assertEquals(0, result.get(tp2).partitionIndex());
assertEquals(Errors.NONE.code(), result.get(tp2).errorCode());
assertEquals(0, result.get(tp3).partitionIndex());
assertEquals(Errors.NONE.code(), result.get(tp3).errorCode());
Map<MetricName, Consumer<Double>> expectedMetrics = new HashMap<>();
expectedMetrics.put(
metrics.metricName(SharePartitionManager.ShareGroupMetrics.SHARE_ACK_COUNT, SharePartitionManager.ShareGroupMetrics.METRICS_GROUP_NAME),
val -> assertEquals(val, 1.0)
);
expectedMetrics.put(
metrics.metricName(SharePartitionManager.ShareGroupMetrics.SHARE_ACK_RATE, SharePartitionManager.ShareGroupMetrics.METRICS_GROUP_NAME),
val -> assertTrue(val > 0)
);
expectedMetrics.put(
metrics.metricName(SharePartitionManager.ShareGroupMetrics.RECORD_ACK_COUNT, SharePartitionManager.ShareGroupMetrics.METRICS_GROUP_NAME,
Collections.singletonMap(SharePartitionManager.ShareGroupMetrics.ACK_TYPE, AcknowledgeType.ACCEPT.toString())),
val -> assertEquals(2.0, val)
);
expectedMetrics.put(
metrics.metricName(SharePartitionManager.ShareGroupMetrics.RECORD_ACK_COUNT, SharePartitionManager.ShareGroupMetrics.METRICS_GROUP_NAME,
Collections.singletonMap(SharePartitionManager.ShareGroupMetrics.ACK_TYPE, AcknowledgeType.RELEASE.toString())),
val -> assertEquals(2.0, val)
);
expectedMetrics.put(
metrics.metricName(SharePartitionManager.ShareGroupMetrics.RECORD_ACK_COUNT, SharePartitionManager.ShareGroupMetrics.METRICS_GROUP_NAME,
Collections.singletonMap(SharePartitionManager.ShareGroupMetrics.ACK_TYPE, AcknowledgeType.REJECT.toString())),
val -> assertEquals(2.0, val)
);
expectedMetrics.put(
metrics.metricName(SharePartitionManager.ShareGroupMetrics.RECORD_ACK_RATE, SharePartitionManager.ShareGroupMetrics.METRICS_GROUP_NAME,
Collections.singletonMap(SharePartitionManager.ShareGroupMetrics.ACK_TYPE, AcknowledgeType.ACCEPT.toString())),
val -> assertTrue(val > 0)
);
expectedMetrics.put(
metrics.metricName(SharePartitionManager.ShareGroupMetrics.RECORD_ACK_RATE, SharePartitionManager.ShareGroupMetrics.METRICS_GROUP_NAME,
Collections.singletonMap(SharePartitionManager.ShareGroupMetrics.ACK_TYPE, AcknowledgeType.RELEASE.toString())),
val -> assertTrue(val > 0)
);
expectedMetrics.put(
metrics.metricName(SharePartitionManager.ShareGroupMetrics.RECORD_ACK_RATE, SharePartitionManager.ShareGroupMetrics.METRICS_GROUP_NAME,
Collections.singletonMap(SharePartitionManager.ShareGroupMetrics.ACK_TYPE, AcknowledgeType.REJECT.toString())),
val -> assertTrue(val > 0)
);
expectedMetrics.forEach((metric, test) -> {
assertTrue(metrics.metrics().containsKey(metric));
test.accept((Double) metrics.metrics().get(metric).metricValue());
});
}
|
@Override
public TaskManagerMetricsMessageParameters getUnresolvedMessageParameters() {
return new TaskManagerMetricsMessageParameters();
}
|
@Test
void testMessageParameters() {
assertThat(taskManagerMetricsHeaders.getUnresolvedMessageParameters())
.isInstanceOf(TaskManagerMetricsMessageParameters.class);
}
|
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof TtlBucket)) {
return false;
}
TtlBucket that = (TtlBucket) o;
return mTtlIntervalStartTimeMs == that.mTtlIntervalStartTimeMs;
}
|
@Test
public void equals() {
TtlBucket firstBucket = new TtlBucket(0);
TtlBucket secondBucket = new TtlBucket(0);
TtlBucket thirdBucket = new TtlBucket(1);
Assert.assertNotEquals(firstBucket, null);
Assert.assertEquals(firstBucket, firstBucket);
Assert.assertEquals(firstBucket, secondBucket);
Assert.assertEquals(secondBucket, firstBucket);
Assert.assertNotEquals(firstBucket, thirdBucket);
}
|
public void extractTablesFromSelect(final SelectStatement selectStatement) {
if (selectStatement.getCombine().isPresent()) {
CombineSegment combineSegment = selectStatement.getCombine().get();
extractTablesFromSelect(combineSegment.getLeft().getSelect());
extractTablesFromSelect(combineSegment.getRight().getSelect());
}
if (selectStatement.getFrom().isPresent() && !selectStatement.getCombine().isPresent()) {
extractTablesFromTableSegment(selectStatement.getFrom().get());
}
selectStatement.getWhere().ifPresent(optional -> extractTablesFromExpression(optional.getExpr()));
if (null != selectStatement.getProjections() && !selectStatement.getCombine().isPresent()) {
extractTablesFromProjections(selectStatement.getProjections());
}
selectStatement.getGroupBy().ifPresent(optional -> extractTablesFromOrderByItems(optional.getGroupByItems()));
selectStatement.getOrderBy().ifPresent(optional -> extractTablesFromOrderByItems(optional.getOrderByItems()));
selectStatement.getHaving().ifPresent(optional -> extractTablesFromExpression(optional.getExpr()));
selectStatement.getWithSegment().ifPresent(optional -> extractTablesFromCTEs(optional.getCommonTableExpressions()));
selectStatement.getLock().ifPresent(this::extractTablesFromLock);
}
|
@Test
void assertExtractTablesFromCombineSegment() {
SelectStatement selectStatement = createSelectStatement("t_order");
SubquerySegment left = new SubquerySegment(0, 0, createSelectStatement("t_order"), "");
SubquerySegment right = new SubquerySegment(0, 0, createSelectStatement("t_order_item"), "");
when(selectStatement.getCombine()).thenReturn(Optional.of(new CombineSegment(0, 0, left, CombineType.UNION, right)));
tableExtractor.extractTablesFromSelect(selectStatement);
Collection<SimpleTableSegment> actual = tableExtractor.getRewriteTables();
assertThat(actual.size(), is(2));
Iterator<SimpleTableSegment> iterator = actual.iterator();
assertTableSegment(iterator.next(), 0, 0, "t_order");
assertTableSegment(iterator.next(), 0, 0, "t_order_item");
}
|
@Override
public boolean isEmpty() {
return true;
}
|
@Test
public void testIsEmpty() {
assertThat(sut.isEmpty(), is(true));
}
|
@NonNull
static String getImageUrl(List<FastDocumentFile> files, Uri folderUri) {
// look for special file names
for (String iconLocation : PREFERRED_FEED_IMAGE_FILENAMES) {
for (FastDocumentFile file : files) {
if (iconLocation.equals(file.getName())) {
return file.getUri().toString();
}
}
}
// use the first image in the folder if existing
for (FastDocumentFile file : files) {
String mime = file.getType();
if (mime != null && (mime.startsWith("image/jpeg") || mime.startsWith("image/png"))) {
return file.getUri().toString();
}
}
// use default icon as fallback
return Feed.PREFIX_GENERATIVE_COVER + folderUri;
}
|
@Test
public void testGetImageUrl_PreferredImagesFilenames() {
for (String filename : LocalFeedUpdater.PREFERRED_FEED_IMAGE_FILENAMES) {
List<FastDocumentFile> folder = Arrays.asList(mockDocumentFile("audio.mp3", "audio/mp3"),
mockDocumentFile(filename, "image/jpeg")); // image MIME type doesn't matter
String imageUrl = LocalFeedUpdater.getImageUrl(folder, Uri.EMPTY);
assertThat(imageUrl, endsWith(filename));
}
}
|
@PublicAPI(usage = ACCESS)
public static Set<Location> ofPackage(String pkg) {
ImmutableSet.Builder<Location> result = ImmutableSet.builder();
for (Location location : getLocationsOf(asResourceName(pkg))) {
result.add(location);
}
return result.build();
}
|
@Test
public void locations_of_packages_within_file_URIs() throws Exception {
Set<Location> locations = Locations.ofPackage("com.tngtech.archunit.core.importer");
assertThat(urisOf(locations)).contains(
uriOfFolderOf(getClass()),
uriOfFolderOf(Locations.class)
);
}
|
public FEELFnResult<BigDecimal> invoke(@ParameterName( "list" ) List list) {
if ( list == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null"));
}
return FEELFnResult.ofResult( BigDecimal.valueOf( list.size() ) );
}
|
@Test
void invokeParamArrayNonEmpty() {
FunctionTestUtil.assertResult(countFunction.invoke(new Object[]{1, 2, "test"}), BigDecimal.valueOf(3));
}
|
public static MemberVersion of(int major, int minor, int patch) {
if (major == 0 && minor == 0 && patch == 0) {
return MemberVersion.UNKNOWN;
} else {
return new MemberVersion(major, minor, patch);
}
}
|
@Test
public void testVersionOf_whenVersionStringIsNull() {
assertEquals(MemberVersion.UNKNOWN, MemberVersion.of(null));
}
|
@Override
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
ReflectionUtils.doWithMethods(bean.getClass(), recurringJobFinderMethodCallback);
return bean;
}
|
@Test
void beansWithMethodsAnnotatedWithRecurringAnnotationHasDisabledCronExpressionButNotSpecifiedIdShouldBeOmitted() {
new ApplicationContextRunner()
.withBean(RecurringJobPostProcessor.class)
.withBean(JobScheduler.class, () -> jobScheduler)
.withPropertyValues("my-job.id=")
.withPropertyValues("my-job.cron=-")
.withPropertyValues("my-job.zone-id=Asia/Taipei")
.run(context -> {
context.getBean(RecurringJobPostProcessor.class)
.postProcessAfterInitialization(new MyServiceWithRecurringAnnotationContainingPropertyPlaceholder(), "not important");
verifyNoInteractions(jobScheduler);
});
}
|
public static void retryWithBackoff(
final int maxRetries,
final int initialWaitMs,
final int maxWaitMs,
final Runnable runnable,
final Class<?>... passThroughExceptions) {
retryWithBackoff(
maxRetries,
initialWaitMs,
maxWaitMs,
runnable,
() -> false,
Arrays.stream(passThroughExceptions)
.map(c -> (Predicate<Exception>) c::isInstance)
.collect(Collectors.toList())
);
}
|
@Test
public void shouldBackoffOnFailure() {
doThrow(new RuntimeException("error")).when(runnable).run();
try {
RetryUtil.retryWithBackoff(3, 1, 100, runnable, sleep, () -> false, Collections.emptyList());
fail("retry should have thrown");
} catch (final RuntimeException e) {
}
verify(runnable, times(4)).run();
final InOrder inOrder = Mockito.inOrder(sleep);
inOrder.verify(sleep).accept((long) 1);
inOrder.verify(sleep).accept((long) 2);
inOrder.verify(sleep).accept((long) 4);
inOrder.verifyNoMoreInteractions();
}
|
public static Configuration readSSLConfiguration(Configuration conf,
Mode mode) {
Configuration sslConf = new Configuration(false);
sslConf.setBoolean(SSL_REQUIRE_CLIENT_CERT_KEY, conf.getBoolean(
SSL_REQUIRE_CLIENT_CERT_KEY, SSL_REQUIRE_CLIENT_CERT_DEFAULT));
String sslConfResource;
if (mode == Mode.CLIENT) {
sslConfResource = conf.get(SSL_CLIENT_CONF_KEY,
SSL_CLIENT_CONF_DEFAULT);
} else {
sslConfResource = conf.get(SSL_SERVER_CONF_KEY,
SSL_SERVER_CONF_DEFAULT);
}
sslConf.addResource(sslConfResource);
// Only fallback to input config if classpath SSL config does not load for
// backward compatibility.
if (sslConf.getResource(sslConfResource) == null) {
LOG.debug("{} can't be loaded form classpath, fallback using SSL" +
" config from input configuration.", sslConfResource);
sslConf = conf;
}
return sslConf;
}
|
@Test
public void testSslConfFallback() throws Exception {
Configuration conf = new Configuration(FAKE_SSL_CONFIG);
// Set non-exist-ssl-client.xml that fails to load.
// This triggers fallback to SSL config from input conf.
conf.set(SSL_CLIENT_CONF_KEY, "non-exist-ssl-client.xml");
Configuration sslConf = SSLFactory.readSSLConfiguration(conf, CLIENT);
// Verify fallback to input conf when ssl conf can't be loaded from
// classpath.
String clientTsLoc = sslConf.get(getClientTrustStoreKeyName());
assertEquals("trustStoreLocation", clientTsLoc);
assertEquals(sslConf, conf);
}
|
@Override
public void setTimestamp(final Path file, final TransferStatus status) throws BackgroundException {
try {
if(null != status.getModified()) {
// We must both set the accessed and modified time. See AttribFlags.SSH_FILEXFER_ATTR_V3_ACMODTIME
// All times are represented as seconds from Jan 1, 1970 in UTC.
final long atime = Timestamp.toSeconds(System.currentTimeMillis());
final long mtime = Timestamp.toSeconds(status.getModified() != null ? status.getModified() : System.currentTimeMillis());
final FileAttributes attrs = new FileAttributes.Builder().withAtimeMtime(atime / 1000, mtime / 1000).build();
session.sftp().setAttributes(file.getAbsolute(), attrs);
}
}
catch(IOException e) {
throw new SFTPExceptionMappingService().map("Cannot change timestamp of {0}", e, file);
}
}
|
@Test
public void testSetTimestampDirectory() throws Exception {
final Path home = new SFTPHomeDirectoryService(session).find();
final Path test = new Path(home, UUID.randomUUID().toString(), EnumSet.of(Path.Type.directory));
new SFTPDirectoryFeature(session).mkdir(test, new TransferStatus());
final long modified = System.currentTimeMillis();
new SFTPTimestampFeature(session).setTimestamp(test, modified);
assertEquals(TimeUnit.SECONDS.toMillis(TimeUnit.MILLISECONDS.toSeconds(modified)), new SFTPListService(session).list(home, new DisabledListProgressListener()).get(test).attributes().getModificationDate(), 0);
new SFTPDeleteFeature(session).delete(Collections.<Path>singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public static Predicate parse(String expression)
{
final Stack<Predicate> predicateStack = new Stack<>();
final Stack<Character> operatorStack = new Stack<>();
final String trimmedExpression = TRIMMER_PATTERN.matcher(expression).replaceAll("");
final StringTokenizer tokenizer = new StringTokenizer(trimmedExpression, OPERATORS, true);
boolean isTokenMode = true;
while (true)
{
final Character operator;
final String token;
if (isTokenMode)
{
if (tokenizer.hasMoreTokens())
{
token = tokenizer.nextToken();
}
else
{
break;
}
if (OPERATORS.contains(token))
{
operator = token.charAt(0);
}
else
{
operator = null;
}
}
else
{
operator = operatorStack.pop();
token = null;
}
isTokenMode = true;
if (operator == null)
{
try
{
predicateStack.push(Class.forName(token).asSubclass(Predicate.class).getDeclaredConstructor().newInstance());
}
catch (ClassCastException e)
{
throw new RuntimeException(token + " must implement " + Predicate.class.getName(), e);
}
catch (Exception e)
{
throw new RuntimeException(e);
}
}
else
{
if (operatorStack.empty() || operator == '(')
{
operatorStack.push(operator);
}
else if (operator == ')')
{
while (operatorStack.peek() != '(')
{
evaluate(predicateStack, operatorStack);
}
operatorStack.pop();
}
else
{
if (OPERATOR_PRECEDENCE.get(operator) < OPERATOR_PRECEDENCE.get(operatorStack.peek()))
{
evaluate(predicateStack, operatorStack);
isTokenMode = false;
}
operatorStack.push(operator);
}
}
}
while (!operatorStack.empty())
{
evaluate(predicateStack, operatorStack);
}
if (predicateStack.size() > 1)
{
throw new RuntimeException("Invalid logical expression");
}
return predicateStack.pop();
}
|
@Test
public void testNotParenOr()
{
final Predicate parsed = PredicateExpressionParser.parse("!(com.linkedin.data.it.AlwaysTruePredicate | com.linkedin.data.it.AlwaysFalsePredicate)");
Assert.assertEquals(parsed.getClass(), NotPredicate.class);
final Predicate intermediate = ((NotPredicate) parsed).getChildPredicate();
Assert.assertEquals(intermediate.getClass(), OrPredicate.class);
final List<Predicate> children = ((OrPredicate) intermediate).getChildPredicates();
Assert.assertEquals(children.get(0).getClass(), AlwaysTruePredicate.class);
Assert.assertEquals(children.get(1).getClass(), AlwaysFalsePredicate.class);
}
|
public String getName() {
if (elements.isEmpty()) return "";
return elements.get(elements.size() - 1);
}
|
@Test
public void testGetName() {
assertEquals("baz", getAbsolutePath().getName());
assertEquals("baz", getRelativePath().getName());
assertEquals("baz", getWithSlashes().getName());
assertEquals("baz", getAppended().getName());
assertEquals("foo", getOne().getName());
}
|
@VisibleForTesting
@Nonnull
Map<String, Object> prepareContextForPaginatedResponse(@Nonnull List<RuleDao> rules) {
final Map<String, RuleDao> ruleTitleMap = rules
.stream()
.collect(Collectors.toMap(RuleDao::title, dao -> dao));
final Map<String, List<PipelineCompactSource>> result = new HashMap<>();
rules.forEach(r -> result.put(r.id(), new ArrayList<>()));
pipelineServiceHelper.groupByRuleName(
pipelineService::loadAll, ruleTitleMap.keySet())
.forEach((ruleTitle, pipelineDaos) -> {
result.put(
ruleTitleMap.get(ruleTitle).id(),
pipelineDaos.stream()
.map(dao -> PipelineCompactSource.builder()
.id(dao.id())
.title(dao.title())
.build())
.toList()
);
});
return Map.of("used_in_pipelines", result);
}
|
@Test
public void prepareContextForPaginatedResponse_returnsEmptyMapOnEmptyListOfRules() {
assertThat(underTest.prepareContextForPaginatedResponse(List.of()))
.isEqualTo(Map.of("used_in_pipelines", Map.of()));
}
|
@Override
public void removeDevice(DeviceId did) {
snmpDeviceMap.remove(did);
}
|
@Test
public void removeDevice() {
addDevice();
snmpController.removeDevice(device.deviceId());
assertNull("Device shoudl not be present", snmpController.getDevice(device.deviceId()));
}
|
public static RLESparseResourceAllocation merge(ResourceCalculator resCalc,
Resource clusterResource, RLESparseResourceAllocation a,
RLESparseResourceAllocation b, RLEOperator operator, long start, long end)
throws PlanningException {
NavigableMap<Long, Resource> cumA =
a.getRangeOverlapping(start, end).getCumulative();
NavigableMap<Long, Resource> cumB =
b.getRangeOverlapping(start, end).getCumulative();
NavigableMap<Long, Resource> out =
merge(resCalc, clusterResource, cumA, cumB, start, end, operator);
return new RLESparseResourceAllocation(out, resCalc);
}
|
@Test
public void testMergeMax() throws PlanningException {
TreeMap<Long, Resource> a = new TreeMap<>();
TreeMap<Long, Resource> b = new TreeMap<>();
setupArrays(a, b);
RLESparseResourceAllocation rleA =
new RLESparseResourceAllocation(a, new DefaultResourceCalculator());
RLESparseResourceAllocation rleB =
new RLESparseResourceAllocation(b, new DefaultResourceCalculator());
RLESparseResourceAllocation out =
RLESparseResourceAllocation.merge(new DefaultResourceCalculator(),
Resource.newInstance(100 * 128 * 1024, 100 * 32), rleA, rleB,
RLEOperator.max, 0, 60);
System.out.println(out);
long[] time = { 10, 20, 30, 40, 50, 60 };
int[] alloc = { 5, 10, 15, 20, 10 };
validate(out, time, alloc);
}
|
boolean readBuffers(Queue<MemorySegment> buffers, BufferRecycler recycler) throws IOException {
return fileReader.readCurrentRegion(buffers, recycler, this::addBuffer);
}
|
@Test
void testReadBuffers() throws Exception {
CountingAvailabilityListener listener = new CountingAvailabilityListener();
SortMergeSubpartitionReader subpartitionReader =
createSortMergeSubpartitionReader(listener);
assertThat(listener.numNotifications).isZero();
assertThat(subpartitionReader.unsynchronizedGetNumberOfQueuedBuffers()).isZero();
Queue<MemorySegment> segments = createsMemorySegments(2);
subpartitionReader.readBuffers(segments, FreeingBufferRecycler.INSTANCE);
assertThat(listener.numNotifications).isEqualTo(1);
assertThat(subpartitionReader.unsynchronizedGetNumberOfQueuedBuffers()).isEqualTo(1);
assertThat(segments).isEmpty();
segments = createsMemorySegments(2);
subpartitionReader.readBuffers(segments, FreeingBufferRecycler.INSTANCE);
assertThat(listener.numNotifications).isEqualTo(1);
assertThat(subpartitionReader.unsynchronizedGetNumberOfQueuedBuffers()).isEqualTo(2);
assertThat(segments).isEmpty();
while (subpartitionReader.unsynchronizedGetNumberOfQueuedBuffers() > 0) {
checkNotNull(subpartitionReader.getNextBuffer()).buffer().recycleBuffer();
}
segments = createsMemorySegments(numBuffersPerSubpartition);
subpartitionReader.readBuffers(segments, FreeingBufferRecycler.INSTANCE);
assertThat(listener.numNotifications).isEqualTo(2);
assertThat(subpartitionReader.unsynchronizedGetNumberOfQueuedBuffers())
.isEqualTo(numBuffersPerSubpartition - 2);
assertThat(segments.size()).isEqualTo(1);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.