focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@VisibleForTesting
static String summarize(byte[] value)
{
if (value.length * 2 <= MAX_DISPLAY_CHARACTERS) {
return BaseEncoding.base16().encode(value);
}
return BaseEncoding.base16().encode(value, 0, PREFIX_SUFFIX_BYTES)
+ FILLER
+ BaseEncoding.base16().encode(value, value.length - PREFIX_SUFFIX_BYTES, PREFIX_SUFFIX_BYTES);
}
|
@Test
public void testSummarize()
{
assertEquals(summarize(bytes()), "");
assertEquals(summarize(bytes(1)), "01");
assertEquals(summarize(bytes(255, 254, 253, 252, 251, 250, 249)), "FFFEFDFCFBFAF9");
assertEquals(summarize(bytes(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 249, 250, 251, 252, 253, 254, 255)),
"00010203040506070809F9FAFBFCFDFEFF");
assertEquals(summarize(bytes(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 247, 248, 249, 250, 251, 252, 253, 254, 255)),
"0001020304050607..F8F9FAFBFCFDFEFF");
}
|
public Iterable<CounterUpdate> extractMsecCounters(boolean isFinalUpdate) {
return executionStateRegistry.extractUpdates(isFinalUpdate);
}
|
@Test
public void extractMsecCounters() {
BatchModeExecutionContext executionContext =
BatchModeExecutionContext.forTesting(PipelineOptionsFactory.create(), "testStage");
MetricsContainer metricsContainer = Mockito.mock(MetricsContainer.class);
ProfileScope profileScope = Mockito.mock(ProfileScope.class);
ExecutionState start1 =
executionContext.executionStateRegistry.getState(
NameContext.create("stage", "original-1", "system-1", "user-1"),
ExecutionStateTracker.START_STATE_NAME,
metricsContainer,
profileScope);
ExecutionState process1 =
executionContext.executionStateRegistry.getState(
NameContext.create("stage", "original-1", "system-1", "user-1"),
ExecutionStateTracker.PROCESS_STATE_NAME,
metricsContainer,
profileScope);
ExecutionState start2 =
executionContext.executionStateRegistry.getState(
NameContext.create("stage", "original-2", "system-2", "user-2"),
ExecutionStateTracker.START_STATE_NAME,
metricsContainer,
profileScope);
ExecutionState other =
executionContext.executionStateRegistry.getState(
NameContext.forStage("stage"), "other", null, NoopProfileScope.NOOP);
other.takeSample(120);
start1.takeSample(100);
process1.takeSample(500);
assertThat(
executionContext.extractMsecCounters(false),
containsInAnyOrder(
msecStage("other-msecs", "stage", 120),
msec("start-msecs", "stage", "original-1", 100),
msec("process-msecs", "stage", "original-1", 500)));
process1.takeSample(200);
start2.takeSample(200);
assertThat(
executionContext.extractMsecCounters(false),
containsInAnyOrder(
msec("process-msecs", "stage", "original-1", 500 + 200),
msec("start-msecs", "stage", "original-2", 200)));
process1.takeSample(300);
assertThat(
executionContext.extractMsecCounters(true),
hasItems(
msecStage("other-msecs", "stage", 120),
msec("start-msecs", "stage", "original-1", 100),
msec("process-msecs", "stage", "original-1", 500 + 200 + 300),
msec("start-msecs", "stage", "original-2", 200)));
}
|
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
return inject(statement, new TopicProperties.Builder());
}
|
@Test
public void shouldHaveCleanupPolicyDeleteCreateStream() {
// Given:
givenStatement("CREATE STREAM x (FOO VARCHAR) WITH (kafka_topic='foo', partitions=1);");
// When:
final CreateSource createSource = ((CreateSource) injector.inject(statement, builder).getStatement());
// Then:
final CreateSourceProperties props = createSource.getProperties();
assertThat(props.getCleanupPolicy(), is(Optional.of(TopicConfig.CLEANUP_POLICY_DELETE)));
}
|
List<Endpoint> endpoints() {
try {
String urlString = String.format("%s/api/v1/namespaces/%s/pods", kubernetesMaster, namespace);
return enrichWithPublicAddresses(parsePodsList(callGet(urlString)));
} catch (RestClientException e) {
return handleKnownException(e);
}
}
|
@Test(expected = KubernetesClientException.class)
public void endpointsFailFastWhenLbServiceHasNoPublicAddress() throws JsonProcessingException {
// given
kubernetesClient = newKubernetesClient(ExposeExternallyMode.ENABLED, false, null, null);
stub(String.format("/api/v1/namespaces/%s/pods", NAMESPACE), podsListResponse());
stub(String.format("/api/v1/namespaces/%s/endpoints", NAMESPACE), endpointsListResponse());
stub(String.format("/api/v1/namespaces/%s/services/service-0", NAMESPACE),
serviceLbWithoutAddr(servicePort(0, 0, 0)));
stub(String.format("/api/v1/namespaces/%s/services/hazelcast-0", NAMESPACE),
serviceLbWithoutAddr(servicePort(0, 0, 0)));
stub(String.format("/api/v1/namespaces/%s/services/service-1", NAMESPACE),
serviceLbWithoutAddr(servicePort(0, 0, 0)));
// when
List<Endpoint> result = kubernetesClient.endpoints();
// then
// exception
}
|
public static BytesSchema of() {
return INSTANCE;
}
|
@Test
public void testBytesSchemaOf() {
testBytesSchema(BytesSchema.of());
}
|
@Udf
public List<Long> generateSeriesLong(
@UdfParameter(description = "The beginning of the series") final long start,
@UdfParameter(description = "Marks the end of the series (inclusive)") final long end
) {
return generateSeriesLong(start, end, end - start > 0 ? 1 : -1);
}
|
@Test
public void shouldThrowIfStepWrongSignLong2() {
// When:
final Exception e = assertThrows(
KsqlFunctionException.class,
() -> rangeUdf.generateSeriesLong(9, 0, 1)
);
// Then:
assertThat(e.getMessage(), containsString(
"GENERATE_SERIES step has wrong sign"));
}
|
@Override
public RefreshServiceAclsResponse refreshServiceAcls(RefreshServiceAclsRequest request)
throws YarnException, IOException {
// parameter verification.
if (request == null) {
routerMetrics.incrRefreshServiceAclsFailedRetrieved();
RouterServerUtil.logAndThrowException("Missing RefreshServiceAcls request.", null);
}
// call refreshAdminAcls of activeSubClusters.
try {
long startTime = clock.getTime();
RMAdminProtocolMethod remoteMethod = new RMAdminProtocolMethod(
new Class[]{RefreshServiceAclsRequest.class}, new Object[]{request});
String subClusterId = request.getSubClusterId();
Collection<RefreshServiceAclsResponse> refreshServiceAclsResps =
remoteMethod.invokeConcurrent(this, RefreshServiceAclsResponse.class, subClusterId);
if (CollectionUtils.isNotEmpty(refreshServiceAclsResps)) {
long stopTime = clock.getTime();
routerMetrics.succeededRefreshServiceAclsRetrieved(stopTime - startTime);
return RefreshServiceAclsResponse.newInstance();
}
} catch (YarnException e) {
routerMetrics.incrRefreshServiceAclsFailedRetrieved();
RouterServerUtil.logAndThrowException(e,
"Unable to refreshAdminAcls due to exception. " + e.getMessage());
}
routerMetrics.incrRefreshServiceAclsFailedRetrieved();
throw new YarnException("Unable to refreshServiceAcls.");
}
|
@Test
public void testSC1RefreshServiceAcls() throws Exception {
// case 1, test the existing subCluster (SC-1).
String existSubCluster = "SC-1";
RefreshServiceAclsRequest request = RefreshServiceAclsRequest.newInstance(existSubCluster);
RefreshServiceAclsResponse response = interceptor.refreshServiceAcls(request);
assertNotNull(response);
// case 2, test the non-exist subCluster.
String notExistsSubCluster = "SC-NON";
RefreshServiceAclsRequest request1 = RefreshServiceAclsRequest.newInstance(notExistsSubCluster);
LambdaTestUtils.intercept(Exception.class, "subClusterId = SC-NON is not an active subCluster.",
() -> interceptor.refreshServiceAcls(request1));
}
|
@Override
public void shutdown() {
if (server.started()) {
server.stop();
}
vertx.close();
}
|
@Test
public void shouldCloseInners() {
// When:
checker.shutdown();
// Then:
verify(server).stop();
verify(vertx).close();
}
|
public boolean hasAttribute(String key) {
return attributes().containsKey(toKey(key));
}
|
@Test
void hasAttribute() {
final LDAPEntry entry = LDAPEntry.builder()
.dn("cn=jane,ou=people,dc=example,dc=com")
.base64UniqueId(Base64.encode("unique-id"))
.addAttribute("zero", "0")
.addAttribute("one", null)
.build();
assertThat(entry.hasAttribute("zero")).isTrue();
assertThat(entry.hasAttribute("ZERO")).isTrue();
assertThat(entry.hasAttribute("one")).isFalse();
assertThat(entry.hasAttribute("nope")).isFalse();
}
|
@Override
public void reset() {
gaugeValue.set(GaugeData.empty());
dirty.reset();
}
|
@Test
public void testReset() {
GaugeCell gaugeCell = new GaugeCell(MetricName.named("namespace", "name"));
gaugeCell.set(2);
Assert.assertNotEquals(gaugeCell.getDirty(), new DirtyState());
assertThat(gaugeCell.getCumulative().value(), equalTo(GaugeData.create(2).value()));
gaugeCell.reset();
assertThat(gaugeCell.getCumulative(), equalTo(GaugeData.empty()));
assertThat(gaugeCell.getDirty(), equalTo(new DirtyState()));
}
|
static Boolean orOperator(Boolean aBoolean, Boolean aBoolean2) {
logger.trace("orOperator {} {}", aBoolean, aBoolean2);
return aBoolean != null ? aBoolean || aBoolean2 : aBoolean2;
}
|
@Test
void orOperator() {
Boolean aBoolean = null;
boolean aBoolean2 = true;
assertThat(KiePMMLCompoundPredicate.orOperator(aBoolean, aBoolean2)).isTrue();
aBoolean2 = false;
assertThat(KiePMMLCompoundPredicate.orOperator(aBoolean, aBoolean2)).isFalse();
aBoolean = false;
aBoolean2 = false;
assertThat(KiePMMLCompoundPredicate.orOperator(aBoolean, aBoolean2)).isFalse();
aBoolean = true;
aBoolean2 = false;
assertThat(KiePMMLCompoundPredicate.orOperator(aBoolean, aBoolean2)).isTrue();
aBoolean = false;
aBoolean2 = true;
assertThat(KiePMMLCompoundPredicate.orOperator(aBoolean, aBoolean2)).isTrue();
aBoolean = true;
aBoolean2 = true;
assertThat(KiePMMLCompoundPredicate.orOperator(aBoolean, aBoolean2)).isTrue();
}
|
public void validateUserCanReceivePushEventForProjectUuids(String userUuid, Set<String> projectUuids) {
UserDto userDto;
try (DbSession dbSession = dbClient.openSession(false)) {
userDto = dbClient.userDao().selectByUuid(dbSession, userUuid);
}
if (userDto == null) {
throw new ForbiddenException("User does not exist");
}
UserSession userSession = userSessionFactory.create(userDto, false);
List<ProjectDto> projectDtos;
try (DbSession dbSession = dbClient.openSession(false)) {
projectDtos = dbClient.projectDao().selectByUuids(dbSession, projectUuids);
}
validateProjectPermissions(userSession, projectDtos);
}
|
@Test
public void validate_givenUserNotActivated_throwException() {
UserDto userDto = new UserDto();
when(userDao.selectByUuid(any(), any())).thenReturn(userDto);
when(userSession.isActive()).thenReturn(false);
assertThrows(ForbiddenException.class,
() -> underTest.validateUserCanReceivePushEventForProjectUuids(USER_UUID, exampleProjectuuids));
}
|
public RecordAppendResult append(String topic,
int partition,
long timestamp,
byte[] key,
byte[] value,
Header[] headers,
AppendCallbacks callbacks,
long maxTimeToBlock,
boolean abortOnNewBatch,
long nowMs,
Cluster cluster) throws InterruptedException {
TopicInfo topicInfo = topicInfoMap.computeIfAbsent(topic, k -> new TopicInfo(createBuiltInPartitioner(logContext, k, batchSize)));
// We keep track of the number of appending thread to make sure we do not miss batches in
// abortIncompleteBatches().
appendsInProgress.incrementAndGet();
ByteBuffer buffer = null;
if (headers == null) headers = Record.EMPTY_HEADERS;
try {
// Loop to retry in case we encounter partitioner's race conditions.
while (true) {
// If the message doesn't have any partition affinity, so we pick a partition based on the broker
// availability and performance. Note, that here we peek current partition before we hold the
// deque lock, so we'll need to make sure that it's not changed while we were waiting for the
// deque lock.
final BuiltInPartitioner.StickyPartitionInfo partitionInfo;
final int effectivePartition;
if (partition == RecordMetadata.UNKNOWN_PARTITION) {
partitionInfo = topicInfo.builtInPartitioner.peekCurrentPartitionInfo(cluster);
effectivePartition = partitionInfo.partition();
} else {
partitionInfo = null;
effectivePartition = partition;
}
// Now that we know the effective partition, let the caller know.
setPartition(callbacks, effectivePartition);
// check if we have an in-progress batch
Deque<ProducerBatch> dq = topicInfo.batches.computeIfAbsent(effectivePartition, k -> new ArrayDeque<>());
synchronized (dq) {
// After taking the lock, validate that the partition hasn't changed and retry.
if (partitionChanged(topic, topicInfo, partitionInfo, dq, nowMs, cluster))
continue;
RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callbacks, dq, nowMs);
if (appendResult != null) {
// If queue has incomplete batches we disable switch (see comments in updatePartitionInfo).
boolean enableSwitch = allBatchesFull(dq);
topicInfo.builtInPartitioner.updatePartitionInfo(partitionInfo, appendResult.appendedBytes, cluster, enableSwitch);
return appendResult;
}
}
// we don't have an in-progress record batch try to allocate a new batch
if (abortOnNewBatch) {
// Return a result that will cause another call to append.
return new RecordAppendResult(null, false, false, true, 0);
}
if (buffer == null) {
byte maxUsableMagic = apiVersions.maxUsableProduceMagic();
int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression.type(), key, value, headers));
log.trace("Allocating a new {} byte message buffer for topic {} partition {} with remaining timeout {}ms", size, topic, effectivePartition, maxTimeToBlock);
// This call may block if we exhausted buffer space.
buffer = free.allocate(size, maxTimeToBlock);
// Update the current time in case the buffer allocation blocked above.
// NOTE: getting time may be expensive, so calling it under a lock
// should be avoided.
nowMs = time.milliseconds();
}
synchronized (dq) {
// After taking the lock, validate that the partition hasn't changed and retry.
if (partitionChanged(topic, topicInfo, partitionInfo, dq, nowMs, cluster))
continue;
RecordAppendResult appendResult = appendNewBatch(topic, effectivePartition, dq, timestamp, key, value, headers, callbacks, buffer, nowMs);
// Set buffer to null, so that deallocate doesn't return it back to free pool, since it's used in the batch.
if (appendResult.newBatchCreated)
buffer = null;
// If queue has incomplete batches we disable switch (see comments in updatePartitionInfo).
boolean enableSwitch = allBatchesFull(dq);
topicInfo.builtInPartitioner.updatePartitionInfo(partitionInfo, appendResult.appendedBytes, cluster, enableSwitch);
return appendResult;
}
}
} finally {
free.deallocate(buffer);
appendsInProgress.decrementAndGet();
}
}
|
@Test
public void testUniformBuiltInPartitioner() throws Exception {
mockRandom = new AtomicInteger();
long totalSize = 1024 * 1024;
int batchSize = 1024; // note that this is also a "sticky" limit for the partitioner
RecordAccumulator accum = createTestRecordAccumulator(batchSize, totalSize, Compression.NONE, 0);
// Set up callbacks so that we know what partition is chosen.
final AtomicInteger partition = new AtomicInteger(RecordMetadata.UNKNOWN_PARTITION);
RecordAccumulator.AppendCallbacks callbacks = new RecordAccumulator.AppendCallbacks() {
@Override
public void setPartition(int p) {
partition.set(p);
}
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
}
};
PartitionInfo part1 = MetadataResponse.toPartitionInfo(partMetadata1, nodes);
PartitionInfo part2 = MetadataResponse.toPartitionInfo(partMetadata2, nodes);
PartitionInfo part3 = MetadataResponse.toPartitionInfo(partMetadata3, nodes);
Cluster cluster = new Cluster(null, asList(node1, node2), asList(part1, part2, part3),
Collections.emptySet(), Collections.emptySet());
// Produce small record, we should switch to first partition.
accum.append(topic, RecordMetadata.UNKNOWN_PARTITION, 0L, null, value, Record.EMPTY_HEADERS,
callbacks, maxBlockTimeMs, false, time.milliseconds(), cluster);
assertEquals(partition1, partition.get());
assertEquals(1, mockRandom.get());
// Produce large record, we should exceed "sticky" limit, but produce to this partition
// as we try to switch after the "sticky" limit is exceeded. The switch is disabled
// because of incomplete batch.
byte[] largeValue = new byte[batchSize];
accum.append(topic, RecordMetadata.UNKNOWN_PARTITION, 0L, null, largeValue, Record.EMPTY_HEADERS,
callbacks, maxBlockTimeMs, false, time.milliseconds(), cluster);
assertEquals(partition1, partition.get());
assertEquals(1, mockRandom.get());
// Produce large record, we should switch to next partition as we complete
// previous batch and exceeded sticky limit.
accum.append(topic, RecordMetadata.UNKNOWN_PARTITION, 0L, null, largeValue, Record.EMPTY_HEADERS,
callbacks, maxBlockTimeMs, false, time.milliseconds(), cluster);
assertEquals(partition2, partition.get());
assertEquals(2, mockRandom.get());
// Produce large record, we should switch to next partition as we complete
// previous batch and exceeded sticky limit.
accum.append(topic, RecordMetadata.UNKNOWN_PARTITION, 0L, null, largeValue, Record.EMPTY_HEADERS,
callbacks, maxBlockTimeMs, false, time.milliseconds(), cluster);
assertEquals(partition3, partition.get());
assertEquals(3, mockRandom.get());
// Produce large record, we should switch to next partition as we complete
// previous batch and exceeded sticky limit.
accum.append(topic, RecordMetadata.UNKNOWN_PARTITION, 0L, null, largeValue, Record.EMPTY_HEADERS,
callbacks, maxBlockTimeMs, false, time.milliseconds(), cluster);
assertEquals(partition1, partition.get());
assertEquals(4, mockRandom.get());
}
|
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) {
return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature);
}
|
@Test
public void testProcessElementWithOnTimerContextRejected() throws Exception {
thrown.expect(IllegalArgumentException.class);
// The message should at least mention @ProcessElement and OnTimerContext
thrown.expectMessage("@" + DoFn.ProcessElement.class.getSimpleName());
thrown.expectMessage(DoFn.OnTimerContext.class.getSimpleName());
DoFnSignatures.getSignature(
new DoFn<KV<String, Integer>, Long>() {
@TimerId("foo")
private final TimerSpec bizzle = TimerSpecs.timer(TimeDomain.EVENT_TIME);
@ProcessElement
public void foo(ProcessContext context, OnTimerContext bogus) {}
@OnTimer("foo")
public void onFoo() {}
}.getClass());
}
|
public MediaType detect(InputStream stream, Metadata metadata) throws IOException {
if (stream != null) {
try {
stream.mark(1);
if (stream.read() == -1) {
return MediaType.EMPTY;
}
} finally {
stream.reset();
}
}
return MediaType.OCTET_STREAM;
}
|
@Test
public void testDetectZeroValue() {
byte[] data = "".getBytes(UTF_8);
detect(data, MediaType.EMPTY);
}
|
static byte[] readPrivateKey(Path path) throws KeyException {
final byte[] bytes;
try {
bytes = Files.readAllBytes(path);
} catch (IOException e) {
throw new KeyException("Couldn't read private key from file: " + path, e);
}
final String content = new String(bytes, StandardCharsets.US_ASCII);
final Matcher m = KEY_PATTERN.matcher(content);
if (!m.find()) {
throw new KeyException("No private key found in file: " + path);
}
final String s = CharMatcher.breakingWhitespace().removeFrom(m.group(1));
byte[] base64 = s.getBytes(StandardCharsets.US_ASCII);
return Base64.getDecoder().decode(base64);
}
|
@Test
public void readPrivateKeyHandlesSecuredPrivateKey() throws Exception {
final URL url = Resources.getResource("org/graylog2/shared/security/tls/key-enc-pbe1.p8");
final byte[] privateKey = PemReader.readPrivateKey(Paths.get(url.toURI()));
assertThat(privateKey).isNotEmpty();
}
|
protected static String anonymizeLog(String log) {
return log.replaceAll(
"(/((home)|(Users))/[^/\n]*)|(\\\\Users\\\\[^\\\\\n]*)",
"/ANONYMIZED_HOME_DIR"); // NOI18N
}
|
@Test
public void testAnonymizeUnixLog() {
String log = "" +
" System Locale; Encoding = en_GB (gephi); UTF-8\n" +
" Home Directory = /home/mjackson\n" +
" Current Directory = /home/mjackson/gephi/modules/application\n" +
" User Directory = /home/mjackson/gephi/modules/application";
String expected = "" +
" System Locale; Encoding = en_GB (gephi); UTF-8\n" +
" Home Directory = /ANONYMIZED_HOME_DIR\n" +
" Current Directory = /ANONYMIZED_HOME_DIR/gephi/modules/application\n" +
" User Directory = /ANONYMIZED_HOME_DIR/gephi/modules/application";
String anonymized = ReportController.anonymizeLog(log);
Assert.assertEquals(expected, anonymized);
}
|
public void writeBytesDecreasing(byte[] value) {
writeBytes(value, true);
}
|
@Test
public void testWriteBytesDecreasing() {
byte[] first = {'a', 'b', 'c'};
byte[] second = {'d', 'e', 'f'};
byte[] last = {'x', 'y', 'z'};
OrderedCode orderedCode = new OrderedCode();
orderedCode.writeBytesDecreasing(first);
byte[] firstEncoded = orderedCode.getEncodedBytes();
assertArrayEquals(orderedCode.readBytesDecreasing(), first);
orderedCode.writeBytesDecreasing(first);
orderedCode.writeBytesDecreasing(second);
orderedCode.writeBytesDecreasing(last);
byte[] allEncoded = orderedCode.getEncodedBytes();
assertArrayEquals(orderedCode.readBytesDecreasing(), first);
assertArrayEquals(orderedCode.readBytesDecreasing(), second);
assertArrayEquals(orderedCode.readBytesDecreasing(), last);
orderedCode = new OrderedCode(firstEncoded);
orderedCode.writeBytesDecreasing(second);
orderedCode.writeBytesDecreasing(last);
assertArrayEquals(orderedCode.getEncodedBytes(), allEncoded);
assertArrayEquals(orderedCode.readBytesDecreasing(), first);
assertArrayEquals(orderedCode.readBytesDecreasing(), second);
assertArrayEquals(orderedCode.readBytesDecreasing(), last);
orderedCode = new OrderedCode(allEncoded);
assertArrayEquals(orderedCode.readBytesDecreasing(), first);
assertArrayEquals(orderedCode.readBytesDecreasing(), second);
assertArrayEquals(orderedCode.readBytesDecreasing(), last);
}
|
@Override
public ExportResult<MediaContainerResource> export(UUID jobId, AD authData,
Optional<ExportInformation> exportInfo) throws Exception {
ExportResult<PhotosContainerResource> per = exportPhotos(jobId, authData, exportInfo);
if (per.getThrowable().isPresent()) {
return new ExportResult<>(per.getThrowable().get());
}
ExportResult<VideosContainerResource> ver = exportVideos(jobId, authData, exportInfo);
if (ver.getThrowable().isPresent()) {
return new ExportResult<>(ver.getThrowable().get());
}
return mergeResults(per, ver);
}
|
@Test
public void shouldHandleOnlyVideos() throws Exception {
MediaContainerResource mcr = new MediaContainerResource(albums, null, videos);
ExportResult<MediaContainerResource> exp = new ExportResult<>(ResultType.END, mcr);
Optional<ExportInformation> ei = Optional.of(new ExportInformation(null, mcr));
ExportResult<MediaContainerResource> res = mediaExporter.export(null, null, ei);
assertEquals(exp, res);
}
|
public HtmlCreator center(String text) {
html.append("<center>").append(text).append("</center>");
return this;
}
|
@Test
public void testCenter() {
htmlCreator.center("Hello");
Assert.assertEquals(true, htmlCreator.html().contains("<center>Hello</center>"));
}
|
public StatementExecutorResponse execute(
final ConfiguredStatement<? extends Statement> statement,
final KsqlExecutionContext executionContext,
final KsqlSecurityContext securityContext
) {
final String commandRunnerWarningString = commandRunnerWarning.get();
if (!commandRunnerWarningString.equals("")) {
throw new KsqlServerException("Failed to handle Ksql Statement."
+ System.lineSeparator()
+ commandRunnerWarningString);
}
final InjectorWithSideEffects injector = InjectorWithSideEffects.wrap(
injectorFactory.apply(executionContext, securityContext.getServiceContext()));
final ConfiguredStatementWithSideEffects<?> injectedWithSideEffects =
injector.injectWithSideEffects(statement);
try {
return executeInjected(
injectedWithSideEffects.getStatement(),
statement,
executionContext,
securityContext);
} catch (Exception e) {
injector.revertSideEffects(injectedWithSideEffects);
throw e;
}
}
|
@Test
public void shouldReturnCommandStatus() {
// When:
final CommandStatusEntity commandStatusEntity =
(CommandStatusEntity) distributor.execute(
CONFIGURED_STATEMENT,
executionContext,
securityContext
)
.getEntity()
.orElseThrow(null);
// Then:
assertThat(commandStatusEntity,
equalTo(new CommandStatusEntity("", CS_COMMAND, SUCCESS_STATUS, 1L)));
}
|
@VisibleForTesting
public static JobGraph createJobGraph(StreamGraph streamGraph) {
return new StreamingJobGraphGenerator(
Thread.currentThread().getContextClassLoader(),
streamGraph,
null,
Runnable::run)
.createJobGraph();
}
|
@Test
void testHybridShuffleModeInNonBatchMode() {
Configuration configuration = new Configuration();
// set all edge to HYBRID_FULL result partition type.
configuration.set(
ExecutionOptions.BATCH_SHUFFLE_MODE, BatchShuffleMode.ALL_EXCHANGES_HYBRID_FULL);
configuration.set(ExecutionOptions.RUNTIME_MODE, RuntimeExecutionMode.STREAMING);
StreamExecutionEnvironment env =
StreamExecutionEnvironment.getExecutionEnvironment(configuration);
env.disableOperatorChaining();
DataStreamSource<Integer> source = env.fromData(1, 2, 3);
final DataStream<Integer> partitioned =
new DataStream<>(
env,
new PartitionTransformation<>(
source.getTransformation(),
new RebalancePartitioner<>(),
StreamExchangeMode.HYBRID_FULL));
partitioned.sinkTo(new DiscardingSink<>());
StreamGraph streamGraph = env.getStreamGraph();
assertThatThrownBy(() -> StreamingJobGraphGenerator.createJobGraph(streamGraph))
.isInstanceOf(IllegalStateException.class);
}
|
public String ldapLogin(String userId, String userPwd) {
Properties searchEnv = getManagerLdapEnv();
LdapContext ctx = null;
try {
// Connect to the LDAP server and Authenticate with a service user of whom we know the DN and credentials
ctx = new InitialLdapContext(searchEnv, null);
SearchControls sc = new SearchControls();
sc.setReturningAttributes(new String[]{ldapEmailAttribute});
sc.setSearchScope(SearchControls.SUBTREE_SCOPE);
EqualsFilter filter = new EqualsFilter(ldapUserIdentifyingAttribute, userId);
NamingEnumeration<SearchResult> results = ctx.search(ldapBaseDn, filter.toString(), sc);
if (results.hasMore()) {
// get the users DN (distinguishedName) from the result
SearchResult result = results.next();
NamingEnumeration<? extends Attribute> attrs = result.getAttributes().getAll();
while (attrs.hasMore()) {
// Open another connection to the LDAP server with the found DN and the password
searchEnv.put(Context.SECURITY_PRINCIPAL, result.getNameInNamespace());
searchEnv.put(Context.SECURITY_CREDENTIALS, userPwd);
try {
new InitialDirContext(searchEnv);
} catch (Exception e) {
log.warn("invalid ldap credentials or ldap search error", e);
return null;
}
Attribute attr = attrs.next();
if (attr.getID().equals(ldapEmailAttribute)) {
return (String) attr.get();
}
}
}
} catch (NamingException e) {
log.error("ldap search error", e);
return null;
} finally {
try {
if (ctx != null) {
ctx.close();
}
} catch (NamingException e) {
log.error("ldap context close error", e);
}
}
return null;
}
|
@Test
public void ldapLogin() throws NoSuchFieldException, IllegalAccessException {
changeSslEnable(false);
String email = ldapService.ldapLogin(username, correctPassword);
Assertions.assertEquals("[email protected]", email);
}
|
@Override
public final boolean offer(int ordinal, @Nonnull Object item) {
if (ordinal == -1) {
return offerInternal(allEdges, item);
} else {
if (ordinal == bucketCount()) {
// ordinal beyond bucketCount will add to snapshot queue, which we don't allow through this method
throw new IllegalArgumentException("Illegal edge ordinal: " + ordinal);
}
singleEdge[0] = ordinal;
return offerInternal(singleEdge, item);
}
}
|
@Test
public void when_offer2_then_rateLimited() {
do_when_offer_then_rateLimited(e -> outbox.offer(0, e));
}
|
static String escapeAndJoin(List<String> parts) {
return parts.stream()
.map(ZetaSqlIdUtils::escapeSpecialChars)
.map(ZetaSqlIdUtils::replaceWhitespaces)
.map(ZetaSqlIdUtils::backtickIfNeeded)
.collect(joining("."));
}
|
@Test
public void testHandlesWhiteSpacesInOnePart() {
List<String> id = Arrays.asList("a\nab\tbc\rcd\fd");
assertEquals("`a\\nab\\tbc\\rcd\\fd`", ZetaSqlIdUtils.escapeAndJoin(id));
}
|
@Override
public boolean supportsTransactionIsolationLevel(final int level) {
return false;
}
|
@Test
void assertSupportsTransactionIsolationLevel() {
assertFalse(metaData.supportsTransactionIsolationLevel(0));
}
|
@Override
public IcebergEnumeratorState snapshotState(long checkpointId) {
return new IcebergEnumeratorState(
enumeratorPosition.get(), assigner.state(), enumerationHistory.snapshot());
}
|
@Test
public void testDiscoverWhenReaderRegistered() throws Exception {
TestingSplitEnumeratorContext<IcebergSourceSplit> enumeratorContext =
new TestingSplitEnumeratorContext<>(4);
ScanContext scanContext =
ScanContext.builder()
.streaming(true)
.startingStrategy(StreamingStartingStrategy.TABLE_SCAN_THEN_INCREMENTAL)
.build();
ManualContinuousSplitPlanner splitPlanner = new ManualContinuousSplitPlanner(scanContext, 0);
ContinuousIcebergEnumerator enumerator =
createEnumerator(enumeratorContext, scanContext, splitPlanner);
// register one reader, and let it request a split
enumeratorContext.registerReader(2, "localhost");
enumerator.addReader(2);
enumerator.handleSourceEvent(2, new SplitRequestEvent());
// make one split available and trigger the periodic discovery
List<IcebergSourceSplit> splits =
SplitHelpers.createSplitsFromTransientHadoopTable(temporaryFolder, 1, 1);
splitPlanner.addSplits(splits);
enumeratorContext.triggerAllActions();
assertThat(enumerator.snapshotState(1).pendingSplits()).isEmpty();
assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.contains(splits.get(0));
}
|
public static void assertThatClassIsUtility(Class<?> clazz) {
final UtilityClassChecker checker = new UtilityClassChecker();
if (!checker.isProperlyDefinedUtilityClass(clazz)) {
final Description toDescription = new StringDescription();
final Description mismatchDescription = new StringDescription();
checker.describeTo(toDescription);
checker.describeMismatch(mismatchDescription);
final String reason =
"\n" +
"Expected: is \"" + toDescription.toString() + "\"\n" +
" but : was \"" + mismatchDescription.toString() + "\"";
throw new AssertionError(reason);
}
}
|
@Test
public void testOnlyOneConstructor() throws Exception {
boolean gotException = false;
try {
assertThatClassIsUtility(TwoConstructors.class);
} catch (AssertionError assertion) {
assertThat(assertion.getMessage(),
containsString("more than one constructor"));
gotException = true;
}
assertThat(gotException, is(true));
}
|
public boolean acquire(final Object o) {
if (Objects.isNull(o)) {
throw new NullPointerException();
}
if (memory.sum() >= memoryLimit) {
return false;
}
acquireLock.lock();
try {
final long sum = memory.sum();
final long objectSize = inst.getObjectSize(o);
if (sum + objectSize >= memoryLimit) {
return false;
}
memory.add(objectSize);
if (memory.sum() < memoryLimit) {
notLimited.signal();
}
} finally {
acquireLock.unlock();
}
if (memory.sum() > 0) {
signalNotEmpty();
}
return true;
}
|
@Test
public void testAcquireWithTimeWaitNotRelease() throws InterruptedException {
MemoryLimiter memoryLimiter = new MemoryLimiter(testObjectSize + 1, instrumentation);
memoryLimiter.acquire(testObject);
assertFalse(memoryLimiter.acquire(testObject, 1, TimeUnit.SECONDS));
}
|
@Override
public Processor<K, Change<V>, K, Change<V>> get() {
return new KTableReduceProcessor();
}
|
@Test
public void shouldAddAndSubtract() {
final InternalMockProcessorContext<String, Change<Set<String>>> context = new InternalMockProcessorContext<>();
final Processor<String, Change<Set<String>>, String, Change<Set<String>>> reduceProcessor =
new KTableReduce<String, Set<String>>(
"myStore",
this::unionNotNullArgs,
this::differenceNotNullArgs
).get();
final TimestampedKeyValueStore<String, Set<String>> myStore =
new GenericInMemoryTimestampedKeyValueStore<>("myStore");
context.register(myStore, null);
reduceProcessor.init(context);
context.setCurrentNode(new ProcessorNode<>("reduce", reduceProcessor, singleton("myStore")));
reduceProcessor.process(new Record<>("A", new Change<>(singleton("a"), null), 10L));
assertEquals(ValueAndTimestamp.make(singleton("a"), 10L), myStore.get("A"));
reduceProcessor.process(new Record<>("A", new Change<>(singleton("b"), singleton("a")), 15L));
assertEquals(ValueAndTimestamp.make(singleton("b"), 15L), myStore.get("A"));
reduceProcessor.process(new Record<>("A", new Change<>(null, singleton("b")), 12L));
assertEquals(ValueAndTimestamp.make(emptySet(), 15L), myStore.get("A"));
}
|
public static Slice truncateToLengthAndTrimSpaces(Slice slice, Type type)
{
requireNonNull(type, "type is null");
if (!isCharType(type)) {
throw new IllegalArgumentException("type must be the instance of CharType");
}
return truncateToLengthAndTrimSpaces(slice, CharType.class.cast(type));
}
|
@Test
public void testTruncateToLengthAndTrimSpaces()
{
assertEquals(utf8Slice("a"), truncateToLengthAndTrimSpaces(utf8Slice("a c"), 1));
assertEquals(utf8Slice("a"), truncateToLengthAndTrimSpaces(utf8Slice("a "), 1));
assertEquals(utf8Slice("a"), truncateToLengthAndTrimSpaces(utf8Slice("abc"), 1));
assertEquals(utf8Slice(""), truncateToLengthAndTrimSpaces(utf8Slice("a c"), 0));
assertEquals(utf8Slice("a c"), truncateToLengthAndTrimSpaces(utf8Slice("a c "), 3));
assertEquals(utf8Slice("a c"), truncateToLengthAndTrimSpaces(utf8Slice("a c "), 4));
assertEquals(utf8Slice("a c"), truncateToLengthAndTrimSpaces(utf8Slice("a c "), 5));
assertEquals(utf8Slice("a c"), truncateToLengthAndTrimSpaces(utf8Slice("a c"), 3));
assertEquals(utf8Slice("a c"), truncateToLengthAndTrimSpaces(utf8Slice("a c"), 4));
assertEquals(utf8Slice("a c"), truncateToLengthAndTrimSpaces(utf8Slice("a c"), 5));
assertEquals(utf8Slice(""), truncateToLengthAndTrimSpaces(utf8Slice(" "), 1));
assertEquals(utf8Slice(""), truncateToLengthAndTrimSpaces(utf8Slice(""), 1));
}
|
public static String getSQLSelectString(PoiCategoryFilter filter, int count, LatLong orderBy) {
StringBuilder sb = new StringBuilder();
sb.append(DbConstants.FIND_IN_BOX_CLAUSE_SELECT);
sb.append(DbConstants.JOIN_CATEGORY_CLAUSE);
sb.append(DbConstants.JOIN_DATA_CLAUSE);
sb.append(DbConstants.FIND_IN_BOX_CLAUSE_WHERE);
sb.append(getSQLWhereClauseString(filter));
for (int i = 0; i < count; i++) {
sb.append(i == 0 ? " AND (" : " OR ");
sb.append(DbConstants.FIND_BY_DATA_CLAUSE);
if (i == count - 1) {
sb.append(")");
}
}
if (orderBy != null) {
sb.append(" ORDER BY ((").append(orderBy.latitude).append(" - poi_index.lat) * (").append(orderBy.latitude).append(" - poi_index.lat))")
.append(" + ((").append(orderBy.longitude).append(" - poi_index.lon) * (").append(orderBy.longitude).append(" - poi_index.lon)) ASC");
}
return (sb.append(" LIMIT ?;").toString());
}
|
@Test
public void selectTwoFromBalancedHierarchy() throws UnknownPoiCategoryException {
PoiCategoryFilter filter = new WhitelistPoiCategoryFilter();
filter.addCategory(this.balancedCm.getPoiCategoryByTitle("l1_1"));
filter.addCategory(this.balancedCm.getPoiCategoryByTitle("l1_2"));
String query = PoiCategoryRangeQueryGenerator.getSQLSelectString(filter, 0, null);
System.out.println("Query: " + query);
// TODO add assertions
}
|
public void createMapping(
String mappingName,
String tableName,
List<SqlColumnMetadata> mappingColumns,
String dataConnectionRef,
String idColumn
) {
sqlService.execute(
createMappingQuery(mappingName, tableName, mappingColumns, dataConnectionRef, idColumn)
).close();
}
|
@Test
@SuppressWarnings("OperatorWrap")
public void when_createMapping_then_escapeParameters() {
mappingHelper.createMapping(
"my\"Mapping",
"my\"Table",
singletonList(new SqlColumnMetadata("id\"", SqlColumnType.INTEGER, true)),
"data\"ConnectionRef",
"id'Column"
);
verify(sqlService).execute(
"CREATE MAPPING \"my\"\"Mapping\" " +
"EXTERNAL NAME \"my\"\"Table\" " +
"( \"id\"\"\" INTEGER ) " +
"DATA CONNECTION \"data\"\"ConnectionRef\" " +
"OPTIONS (" +
" 'idColumn' = 'id''Column' " +
")"
);
}
|
@Nullable
public TrackerClient getTrackerClient(Request request,
RequestContext requestContext,
Ring<URI> ring,
Map<URI, TrackerClient> trackerClients)
{
TrackerClient trackerClient;
URI targetHostUri = KeyMapper.TargetHostHints.getRequestContextTargetHost(requestContext);
if (targetHostUri != null)
{
trackerClient = getTrackerClientFromTarget(targetHostUri, requestContext, trackerClients);
}
else
{
trackerClient = getTrackerClientFromRing(request, requestContext, ring, trackerClients);
}
addToExcludedHosts(trackerClient, requestContext);
return trackerClient;
}
|
@Test
public void testGetTargetHost()
{
KeyMapper.TargetHostHints.setRequestContextTargetHost(_requestContext, URI_1);
TrackerClient trackerClient = _clientSelector.getTrackerClient(_request, _requestContext, DEFAULT_RING, DEFAULT_TRACKER_CLIENT_MAP);
assertEquals(trackerClient.getUri(), URI_1);
}
|
@Override
public RestResponse<KsqlEntityList> makeKsqlRequest(
final URI serverEndPoint,
final String sql,
final Map<String, ?> requestProperties) {
final KsqlTarget target = sharedClient
.target(serverEndPoint);
return getTarget(target)
.postKsqlRequest(sql, requestProperties, Optional.empty());
}
|
@Test
public void shouldGetRightTraget() {
// When:
client.makeKsqlRequest(SERVER_ENDPOINT, "Sql", ImmutableMap.of());
// Then:
verify(sharedClient).target(SERVER_ENDPOINT);
}
|
public String anonymize(final ParseTree tree) {
return build(tree);
}
|
@Test
public void printStatementsShouldGetAnonymized() {
Assert.assertEquals("PRINT topic FROM BEGINNING;",
anon.anonymize("PRINT my_topic FROM BEGINNING;"));
Assert.assertEquals("PRINT topic INTERVAL '0';",
anon.anonymize("PRINT my_topic INTERVAL 2;"));
Assert.assertEquals("PRINT topic LIMIT '0';",
anon.anonymize("PRINT my_topic LIMIT 3;"));
}
|
@Override
public V poll(long timeout, TimeUnit unit) throws InterruptedException {
return commandExecutor.getInterrupted(pollAsync(timeout, unit));
}
|
@Test
public void testPollInterrupted() throws InterruptedException {
final AtomicBoolean interrupted = new AtomicBoolean();
Thread t = new Thread() {
public void run() {
try {
RBlockingQueue<Integer> queue1 = getQueue(redisson);
queue1.poll(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
interrupted.set(true);
}
};
};
t.start();
t.join(1000);
t.interrupt();
Awaitility.await().atMost(Duration.ofSeconds(1)).untilTrue(interrupted);
}
|
public static void closeAll(Closeable... closeables) throws IOException {
IOException exception = null;
for (Closeable closeable : closeables) {
try {
if (closeable != null)
closeable.close();
} catch (IOException e) {
if (exception != null)
exception.addSuppressed(e);
else
exception = e;
}
}
if (exception != null)
throw exception;
}
|
@Test
public void testCloseAll() {
TestCloseable[] closeablesWithoutException = TestCloseable.createCloseables(false, false, false);
try {
Utils.closeAll(closeablesWithoutException);
TestCloseable.checkClosed(closeablesWithoutException);
} catch (IOException e) {
fail("Unexpected exception: " + e);
}
TestCloseable[] closeablesWithException = TestCloseable.createCloseables(true, true, true);
try {
Utils.closeAll(closeablesWithException);
fail("Expected exception not thrown");
} catch (IOException e) {
TestCloseable.checkClosed(closeablesWithException);
TestCloseable.checkException(e, closeablesWithException);
}
TestCloseable[] singleExceptionCloseables = TestCloseable.createCloseables(false, true, false);
try {
Utils.closeAll(singleExceptionCloseables);
fail("Expected exception not thrown");
} catch (IOException e) {
TestCloseable.checkClosed(singleExceptionCloseables);
TestCloseable.checkException(e, singleExceptionCloseables[1]);
}
TestCloseable[] mixedCloseables = TestCloseable.createCloseables(false, true, false, true, true);
try {
Utils.closeAll(mixedCloseables);
fail("Expected exception not thrown");
} catch (IOException e) {
TestCloseable.checkClosed(mixedCloseables);
TestCloseable.checkException(e, mixedCloseables[1], mixedCloseables[3], mixedCloseables[4]);
}
}
|
public void startAsync() {
try {
udfLoader.load();
ProcessingLogServerUtils.maybeCreateProcessingLogTopic(
serviceContext.getTopicClient(),
processingLogConfig,
ksqlConfig);
if (processingLogConfig.getBoolean(ProcessingLogConfig.STREAM_AUTO_CREATE)) {
log.warn("processing log auto-create is enabled, but this is not supported "
+ "for headless mode.");
}
rocksDBConfigSetterHandler.accept(ksqlConfig);
processesQueryFile(readQueriesFile(queriesFile));
showWelcomeMessage();
final Properties properties = new Properties();
ksqlConfig.originals().forEach((key, value) -> {
if (nonNull(value)) {
properties.put(key, value.toString());
}
});
versionChecker.start(KsqlModuleType.SERVER, properties);
} catch (final Exception e) {
log.error("Failed to start KSQL Server with query file: " + queriesFile, e);
throw e;
}
}
|
@Test
public void shouldConfigureRocksDBConfigSetter() {
// Given:
givenQueryFileParsesTo(PREPARED_CSAS);
when(sandBoxTopicInjector.inject(argThat(configured(equalTo(PREPARED_CSAS)))))
.thenReturn((ConfiguredStatement) CSAS_CFG_WITH_TOPIC);
// When:
standaloneExecutor.startAsync();
// Then:
verify(rocksDBConfigSetterHandler).accept(ksqlConfig);
}
|
public static KeyFormat sanitizeKeyFormat(
final KeyFormat keyFormat,
final List<SqlType> newKeyColumnSqlTypes,
final boolean allowKeyFormatChangeToSupportNewKeySchema
) {
return sanitizeKeyFormatWrapping(
!allowKeyFormatChangeToSupportNewKeySchema ? keyFormat :
sanitizeKeyFormatForTypeCompatibility(
sanitizeKeyFormatForMultipleColumns(
keyFormat,
newKeyColumnSqlTypes.size()),
newKeyColumnSqlTypes
),
newKeyColumnSqlTypes.size() == 1
);
}
|
@Test
public void shouldConvertNoneFormatForSingleKeyWithNonPrimitiveType() {
// Given:
final KeyFormat format = KeyFormat.nonWindowed(
FormatInfo.of(NoneFormat.NAME),
SerdeFeatures.of());
// When:
final KeyFormat sanitized = SerdeFeaturesFactory.sanitizeKeyFormat(format, ImmutableList.of(SqlTypes.struct().build()), true);
// Then:
assertThat(sanitized.getFormatInfo(), equalTo(FormatInfo.of(JsonFormat.NAME)));
assertThat(sanitized.getFeatures(), equalTo(SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES)));
}
|
public Schema getSchema() {
return context.getSchema();
}
|
@Test
public void testReversedOneOfSchema() {
ProtoDynamicMessageSchema schemaProvider = schemaFromDescriptor(ReversedOneOf.getDescriptor());
Schema schema = schemaProvider.getSchema();
assertEquals(REVERSED_ONEOF_SCHEMA, schema);
}
|
@Nullable
public static <T> T checkSerializable(@Nullable T object, @Nonnull String objectName) {
if (object == null) {
return null;
}
if (object instanceof DataSerializable) {
// hz-serialization is implemented, but we cannot actually check it - we don't have a
// SerializationService at hand.
return object;
}
if (!(object instanceof Serializable)) {
throw new IllegalArgumentException('"' + objectName + "\" must implement Serializable");
}
try (ObjectOutputStream os = new ObjectOutputStream(OutputStream.nullOutputStream())) {
os.writeObject(object);
} catch (NotSerializableException | InvalidClassException e) {
throw new IllegalArgumentException("\"" + objectName + "\" must be serializable", e);
} catch (IOException e) {
// never really thrown, as the underlying stream never throws it
throw new JetException(e);
}
return object;
}
|
@Test
public void whenSerializableObjectToCheckSerializable_thenReturnObject() {
Object o = "o";
Object returned = Util.checkSerializable(o, "object");
assertThat(returned).isSameAs(o);
}
|
@Override
public List<TransferItem> list(final Session<?> session, final Path directory, final Local local,
final ListProgressListener listener) throws BackgroundException {
if(log.isDebugEnabled()) {
log.debug(String.format("List children for %s", directory));
}
final AttributedList<Path> list = session.getFeature(ListService.class).list(directory, listener).filter(comparator, filter);
final Path copy = mapping.get(directory);
for(Path f : list) {
mapping.put(f, new Path(copy, f.getName(), EnumSet.of(f.isDirectory() ? Path.Type.directory : Path.Type.file)));
}
final List<TransferItem> nullified = new ArrayList<>();
for(Path p : list) {
nullified.add(new TransferItem(p));
}
return nullified;
}
|
@Test
public void testList() throws Exception {
Transfer t = new CopyTransfer(new Host(new TestProtocol()),
new Host(new TestProtocol()), new HashMap<>(Collections.singletonMap(
new Path("/s", EnumSet.of(Path.Type.directory)),
new Path("/t", EnumSet.of(Path.Type.directory)))));
final NullSession session = new NullSession(new Host(new TestProtocol())) {
@Override
public AttributedList<Path> list(final Path file, final ListProgressListener listener) {
final AttributedList<Path> children = new AttributedList<>();
children.add(new Path("/s/c", EnumSet.of(Path.Type.file)));
return children;
}
};
assertEquals(Collections.singletonList(new TransferItem(new Path("/s/c", EnumSet.of(Path.Type.file)))),
t.list(session, new Path("/s", EnumSet.of(Path.Type.directory)), null, new DisabledListProgressListener())
);
}
|
public static String serialize(Object object) {
try {
return Base64.getUrlEncoder()
.encodeToString(OBJECT_MAPPER.writeValueAsBytes(object));
} catch (JsonProcessingException e) {
throw new IllegalArgumentException("The given Json object value: "
+ object + " cannot be transformed to a String", e);
}
}
|
@Test
public void serializeDeserializeOAuth2AuthorizationRequestTest() {
HttpCookieOAuth2AuthorizationRequestRepository cookieRequestRepo = new HttpCookieOAuth2AuthorizationRequestRepository();
HttpServletRequest servletRequest = Mockito.mock(HttpServletRequest.class);
Map<String, Object> additionalParameters = new LinkedHashMap<>();
additionalParameters.put("param1", "value1");
additionalParameters.put("param2", "value2");
var request = OAuth2AuthorizationRequest.authorizationCode()
.authorizationUri("testUri").clientId("testId")
.scope("read", "write")
.additionalParameters(additionalParameters).build();
Cookie cookie = new Cookie(OAUTH2_AUTHORIZATION_REQUEST_COOKIE_NAME, CookieUtils.serialize(request));
Mockito.when(servletRequest.getCookies()).thenReturn(new Cookie[]{cookie});
OAuth2AuthorizationRequest deserializedRequest = cookieRequestRepo.loadAuthorizationRequest(servletRequest);
assertNotNull(deserializedRequest);
assertEquals(request.getGrantType(), deserializedRequest.getGrantType());
assertEquals(request.getAuthorizationUri(), deserializedRequest.getAuthorizationUri());
assertEquals(request.getClientId(), deserializedRequest.getClientId());
}
|
@Override
public boolean isValid(@Nullable String value, ConstraintValidatorContext context) {
return value == null || !value.isEmpty();
}
|
@Test
void isValid_shouldNotValidateEmptyString() {
assertFalse(validator.isValid("", context));
}
|
public static int scanForGap(
final UnsafeBuffer termBuffer,
final int termId,
final int termOffset,
final int limitOffset,
final GapHandler handler)
{
int offset = termOffset;
do
{
final int frameLength = frameLengthVolatile(termBuffer, offset);
if (frameLength <= 0)
{
break;
}
offset += align(frameLength, FRAME_ALIGNMENT);
}
while (offset < limitOffset);
final int gapBeginOffset = offset;
if (offset < limitOffset)
{
final int limit = limitOffset - ALIGNED_HEADER_LENGTH;
while (offset < limit)
{
offset += FRAME_ALIGNMENT;
if (0 != termBuffer.getIntVolatile(offset))
{
offset -= ALIGNED_HEADER_LENGTH;
break;
}
}
final int gapLength = (offset - gapBeginOffset) + ALIGNED_HEADER_LENGTH;
handler.onGap(termId, gapBeginOffset, gapLength);
}
return gapBeginOffset;
}
|
@Test
void shouldReportGapAtBeginningOfBuffer()
{
final int frameOffset = align(HEADER_LENGTH * 3, FRAME_ALIGNMENT);
final int highWaterMark = frameOffset + align(HEADER_LENGTH, FRAME_ALIGNMENT);
when(termBuffer.getIntVolatile(frameOffset)).thenReturn(HEADER_LENGTH);
assertEquals(0, TermGapScanner.scanForGap(termBuffer, TERM_ID, 0, highWaterMark, gapHandler));
verify(gapHandler).onGap(TERM_ID, 0, frameOffset);
}
|
@Override
public boolean doesMaxRowSizeIncludeBlobs() {
return false;
}
|
@Test
void assertDoesMaxRowSizeIncludeBlobs() {
assertFalse(metaData.doesMaxRowSizeIncludeBlobs());
}
|
@PUT
@Path("/{connector}/config")
@Operation(summary = "Create or reconfigure the specified connector")
public Response putConnectorConfig(final @PathParam("connector") String connector,
final @Context HttpHeaders headers,
final @Parameter(hidden = true) @QueryParam("forward") Boolean forward,
final Map<String, String> connectorConfig) throws Throwable {
FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>();
checkAndPutConnectorConfigName(connector, connectorConfig);
herder.putConnectorConfig(connector, connectorConfig, true, cb);
Herder.Created<ConnectorInfo> createdInfo = requestHandler.completeOrForwardRequest(cb, "/connectors/" + connector + "/config",
"PUT", headers, connectorConfig, new TypeReference<ConnectorInfo>() { }, new CreatedConnectorInfoTranslator(), forward);
Response.ResponseBuilder response;
if (createdInfo.created()) {
URI location = UriBuilder.fromUri("/connectors").path(connector).build();
response = Response.created(location);
} else {
response = Response.ok();
}
return response.entity(createdInfo.result()).build();
}
|
@Test
public void testPutConnectorConfig() throws Throwable {
final ArgumentCaptor<Callback<Herder.Created<ConnectorInfo>>> cb = ArgumentCaptor.forClass(Callback.class);
expectAndCallbackResult(cb, new Herder.Created<>(false, new ConnectorInfo(CONNECTOR_NAME, CONNECTOR_CONFIG, CONNECTOR_TASK_NAMES,
ConnectorType.SINK))
).when(herder).putConnectorConfig(eq(CONNECTOR_NAME), eq(CONNECTOR_CONFIG), eq(true), cb.capture());
connectorsResource.putConnectorConfig(CONNECTOR_NAME, NULL_HEADERS, FORWARD, CONNECTOR_CONFIG);
}
|
public static boolean isCoastedPoint(NopHit centerPoint) {
CenterRadarHit crh = (CenterRadarHit) centerPoint.rawMessage();
return isCoastedRadarHit(crh);
}
|
@Test
public void testIsCoastedPoint() {
NopHit notCoasted = new NopHit(NON_COASTED_RH);
NopHit coasted = new NopHit(COASTED_RH);
assertFalse(CenterSmoothing.isCoastedPoint(notCoasted));
assertTrue(CenterSmoothing.isCoastedPoint(coasted));
}
|
public Optional<Measure> toMeasure(@Nullable MeasureDto measureDto, Metric metric) {
requireNonNull(metric);
if (measureDto == null) {
return Optional.empty();
}
Double value = measureDto.getValue();
String data = measureDto.getData();
switch (metric.getType().getValueType()) {
case INT:
return toIntegerMeasure(measureDto, value, data);
case LONG:
return toLongMeasure(measureDto, value, data);
case DOUBLE:
return toDoubleMeasure(measureDto, value, data);
case BOOLEAN:
return toBooleanMeasure(measureDto, value, data);
case STRING:
return toStringMeasure(measureDto, data);
case LEVEL:
return toLevelMeasure(measureDto, data);
case NO_VALUE:
return toNoValueMeasure(measureDto);
default:
throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType());
}
}
|
@Test
public void toMeasure_maps_data_and_alert_properties_in_dto_for_Int_Metric() {
MeasureDto measureDto = new MeasureDto().setValue(10d).setData(SOME_DATA).setAlertStatus(Level.OK.name()).setAlertText(SOME_ALERT_TEXT);
Optional<Measure> measure = underTest.toMeasure(measureDto, SOME_INT_METRIC);
assertThat(measure).isPresent();
assertThat(measure.get().getValueType()).isEqualTo(Measure.ValueType.INT);
assertThat(measure.get().getIntValue()).isEqualTo(10);
assertThat(measure.get().getData()).isEqualTo(SOME_DATA);
assertThat(measure.get().getQualityGateStatus().getStatus()).isEqualTo(Level.OK);
assertThat(measure.get().getQualityGateStatus().getText()).isEqualTo(SOME_ALERT_TEXT);
}
|
public static int toIntSize(long size) {
assert size >= 0 : "Invalid size value: " + size;
return size > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) size;
}
|
@Test
public void toIntSize_whenEqualToIntMax() {
long size = Integer.MAX_VALUE;
assertEquals(Integer.MAX_VALUE, toIntSize(size));
}
|
public static String decompressZlib(byte[] compressedData) throws IOException {
return decompressZlib(compressedData, Long.MAX_VALUE);
}
|
@Test
public void testDecompressZlib() throws IOException {
final String testString = "Teststring 123";
final byte[] compressed = TestHelper.zlibCompress(testString);
assertEquals(testString, Tools.decompressZlib(compressed));
}
|
@Override
public void onAddClassLoader(ModuleModel scopeModel, ClassLoader classLoader) {
refreshClassLoader(classLoader);
}
|
@Test
void testSerializable1() {
FrameworkModel frameworkModel = new FrameworkModel();
ApplicationModel applicationModel = frameworkModel.newApplication();
ApplicationConfig applicationConfig = new ApplicationConfig("Test");
applicationConfig.setCheckSerializable(false);
applicationModel.getApplicationConfigManager().setApplication(applicationConfig);
ModuleModel moduleModel = applicationModel.newModule();
SerializeSecurityManager ssm = frameworkModel.getBeanFactory().getBean(SerializeSecurityManager.class);
SerializeSecurityConfigurator serializeSecurityConfigurator = new SerializeSecurityConfigurator(moduleModel);
serializeSecurityConfigurator.onAddClassLoader(
moduleModel, Thread.currentThread().getContextClassLoader());
Assertions.assertFalse(ssm.isCheckSerializable());
frameworkModel.destroy();
}
|
@Override
public T deserialize(final String topic, final byte[] data) {
final List<?> values = inner.deserialize(topic, data);
if (values == null) {
return null;
}
SerdeUtils.throwOnColumnCountMismatch(numColumns, values.size(), false, topic);
return factory.apply(values);
}
|
@Test
public void shouldDeserializeNulls() {
// Given:
when(innerDeserializer.deserialize(any(), any())).thenReturn(null);
// When:
final TestListWrapper result = deserializer.deserialize("topic", SERIALIZED);
// Then:
verify(innerDeserializer).deserialize("topic", SERIALIZED);
assertThat(result, is(nullValue()));
}
|
@Override
public boolean addAll(Collection<V> objects) {
return get(addAllAsync(objects));
}
|
@Test
public void testAddAll() {
RHyperLogLog<Integer> log = redisson.getHyperLogLog("log");
log.addAll(Arrays.asList(1, 2, 3));
Assertions.assertEquals(3L, log.count());
}
|
public EdgeResult convertForViaWays(LongArrayList fromWays, LongArrayList viaWays, LongArrayList toWays) throws OSMRestrictionException {
if (fromWays.isEmpty() || toWays.isEmpty() || viaWays.isEmpty())
throw new IllegalArgumentException("There must be at least one from-, via- and to-way");
if (fromWays.size() > 1 && toWays.size() > 1)
throw new IllegalArgumentException("There can only be multiple from- or to-ways, but not both");
List<IntArrayList> solutions = new ArrayList<>();
for (LongCursor fromWay : fromWays)
for (LongCursor toWay : toWays)
findEdgeChain(fromWay.value, viaWays, toWay.value, solutions);
if (solutions.size() < fromWays.size() * toWays.size())
throw new OSMRestrictionException("has disconnected member ways");
else if (solutions.size() > fromWays.size() * toWays.size())
throw new OSMRestrictionException("has member ways that do not form a unique path");
return buildResult(solutions, new EdgeResult(fromWays.size(), viaWays.size(), toWays.size()));
}
|
@Test
void convertForViaWays_loop() {
BaseGraph graph = new BaseGraph.Builder(1).create();
// 4
// |
// 0-1-2
// |/
// 3
graph.edge(0, 1);
graph.edge(1, 2);
graph.edge(2, 3);
graph.edge(3, 1);
graph.edge(1, 4);
LongFunction<Iterator<IntCursor>> edgesByWay = way -> IntArrayList.from(Math.toIntExact(way)).iterator();
OSMRestrictionException e = assertThrows(OSMRestrictionException.class, () ->
new WayToEdgeConverter(graph, edgesByWay).convertForViaWays(ways(0), ways(1, 2, 3), ways(4)));
// So far we allow the via ways/edges to be in an arbitrary order, but do not allow multiple solutions.
assertTrue(e.getMessage().contains("has member ways that do not form a unique path"), e.getMessage());
}
|
public static String getRamSign(String encryptText, String encryptKey) {
try {
String[] encryptData = encryptText.split(",");
byte[] data = getProductSigningKey(encryptKey,
LocalDateTime.ofEpochSecond(Long.parseLong(encryptData[2]) / 1000, 0, ZoneOffset.UTC).format(DTF),
DEFAULT_REGION, DEFAULT_PRODCUT_CODE, SHA256_ENCRYPT);
// Construct a key according to the given byte array, and the second parameter specifies the name of a key algorithm
SecretKey secretKey = new SecretKeySpec(data, SHA256_ENCRYPT);
// Generate a Mac object specifying Mac algorithm
Mac mac = Mac.getInstance(SHA256_ENCRYPT);
// Initialize the Mac object with the given key
mac.init(secretKey);
byte[] text = encryptText.getBytes(StandardCharsets.UTF_8);
byte[] textFinal = mac.doFinal(text);
// Complete Mac operation, base64 encoding, convert byte array to string
return ConfigTools.byte2Base64(textFinal);
} catch (Exception e) {
throw new RuntimeException("get ram sign with hmacSHA1Encrypt fail", e);
}
}
|
@Test
public void testGetRamSign() {
String encryptText = "testGroup,127.0.0.1,1702564471650";
String encryptKey = "exampleEncryptKey";
String expectedSign = "6g9nMk6BRLFxl7bf5ZfWaEZvGdho3JBmwvx5rqgSUCE=";
String actualSign = RamSignAdapter.getRamSign(encryptText, encryptKey);
// Assert the generated sign matches the expected sign
Assertions.assertEquals(expectedSign, actualSign);
}
|
public <InputT, OutputT, CollectionT extends PCollection<? extends InputT>>
DataStream<OutputT> applyBeamPTransform(
DataStream<InputT> input, PTransform<CollectionT, PCollection<OutputT>> transform) {
return (DataStream)
getNonNull(
applyBeamPTransformInternal(
ImmutableMap.of("input", input),
(pipeline, map) -> (CollectionT) getNonNull(map, "input"),
(output) -> ImmutableMap.of("output", output),
transform,
input.getExecutionEnvironment()),
"output");
}
|
@Test
public void testApplyPreservesOutputTimestamps() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
DataStream<Long> input = env.fromCollection(ImmutableList.of(1L, 2L, 12L));
DataStream<Long> withTimestamps =
new BeamFlinkDataStreamAdapter()
.applyBeamPTransform(
input,
new PTransform<PCollection<Long>, PCollection<Long>>() {
@Override
public PCollection<Long> expand(PCollection<Long> input) {
return input.apply(WithTimestamps.of(x -> Instant.ofEpochMilli(x)));
}
});
assertThat(
ImmutableList.copyOf(
withTimestamps
.windowAll(TumblingEventTimeWindows.of(Time.milliseconds(10)))
.reduce((ReduceFunction<Long>) (a, b) -> a + b)
.executeAndCollect()),
containsInAnyOrder(3L, 12L));
}
|
@VisibleForTesting
@Nullable
Integer getUploadBufferSizeBytes() {
return uploadBufferSizeBytes;
}
|
@Test
public void testUploadBufferSizeDefault() {
GcsOptions pipelineOptions = gcsOptionsWithTestCredential();
GcsUtil util = pipelineOptions.getGcsUtil();
assertNull(util.getUploadBufferSizeBytes());
}
|
public static URI buildExternalUri(@NotNull MultivaluedMap<String, String> httpHeaders, @NotNull URI defaultUri) {
Optional<URI> externalUri = Optional.empty();
final List<String> headers = httpHeaders.get(HttpConfiguration.OVERRIDE_HEADER);
if (headers != null && !headers.isEmpty()) {
externalUri = headers.stream()
.filter(s -> {
try {
if (Strings.isNullOrEmpty(s)) {
return false;
}
final URI uri = new URI(s);
if (!uri.isAbsolute()) {
return true;
}
switch (uri.getScheme()) {
case "http":
case "https":
return true;
}
return false;
} catch (URISyntaxException e) {
return false;
}
})
.map(URI::create)
.findFirst();
}
final URI uri = externalUri.orElse(defaultUri);
// Make sure we return an URI object with a trailing slash
if (!uri.toString().endsWith("/")) {
return URI.create(uri.toString() + "/");
}
return uri;
}
|
@Test
public void buildExternalUriReturnsHeaderValueIfHeaderIsPresent() throws Exception {
final MultivaluedMap<String, String> httpHeaders = new MultivaluedHashMap<>();
httpHeaders.putSingle(HttpConfiguration.OVERRIDE_HEADER, "http://header.example.com");
final URI externalUri = URI.create("http://graylog.example.com");
assertThat(RestTools.buildExternalUri(httpHeaders, externalUri)).isEqualTo(URI.create("http://header.example.com/"));
}
|
@ScalarOperator(CAST)
@SqlType(StandardTypes.SMALLINT)
public static long castToSmallint(@SqlType(StandardTypes.TINYINT) long value)
{
return value;
}
|
@Test
public void testCastToSmallint()
{
assertFunction("cast(TINYINT'37' as smallint)", SMALLINT, (short) 37);
assertFunction("cast(TINYINT'17' as smallint)", SMALLINT, (short) 17);
}
|
public double[][] test(DataFrame data) {
DataFrame x = formula.x(data);
int n = x.nrow();
int ntrees = trees.length;
double[][] prediction = new double[ntrees][n];
for (int j = 0; j < n; j++) {
Tuple xj = x.get(j);
double base = b;
for (int i = 0; i < ntrees; i++) {
base += shrinkage * trees[i].predict(xj);
prediction[i][j] = base;
}
}
return prediction;
}
|
@Test
public void testCalHousingHuber() {
test(Loss.huber(0.9), "cal_housing", CalHousing.formula, CalHousing.data, 62090.2639);
}
|
@Override
public IndexSpec getIndexSpec(String indexName) {
return this.indexSpecs.get(indexName);
}
|
@Test
void getIndexSpec() {
var specs = new DefaultIndexSpecs();
var nameSpec = primaryKeyIndexSpec(FakeExtension.class);
specs.add(nameSpec);
assertThat(specs.getIndexSpec(PrimaryKeySpecUtils.PRIMARY_INDEX_NAME)).isEqualTo(nameSpec);
}
|
public <T> T parse(String input, Class<T> cls) {
return readFlow(input, cls, type(cls));
}
|
@Test
void listeners() {
ConstraintViolationException exception = assertThrows(
ConstraintViolationException.class,
() -> modelValidator.validate(this.parse("flows/invalids/listener.yaml"))
);
assertThat(exception.getConstraintViolations().size(), is(2));
assertThat(new ArrayList<>(exception.getConstraintViolations()).getFirst().getMessage(), containsString("must not be empty"));
assertThat(new ArrayList<>(exception.getConstraintViolations()).get(1).getMessage(), is("must not be empty"));
}
|
@GetMapping("/queryRootTag")
public ShenyuAdminResult queryRootTag() {
return ShenyuAdminResult.success(ShenyuResultMessage.QUERY_SUCCESS, tagService.findByParentTagId("0"));
}
|
@Test
public void testQueryRootTag() throws Exception {
List<TagVO> tagVOS = new ArrayList<>();
given(tagService.findByParentTagId("0")).willReturn(tagVOS);
this.mockMvc.perform(MockMvcRequestBuilders.get("/tag/queryRootTag"))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message", is(ShenyuResultMessage.QUERY_SUCCESS)))
.andReturn();
}
|
@Override
public void ensureValid(String name, Object value) {
if (value == null || ((List) value).isEmpty()) {
throw new ConfigException(name, value, "Empty list");
}
}
|
@Test
public void testEmptyList() {
assertThrows(ConfigException.class,
() -> new NonEmptyListValidator().ensureValid("foo", Collections.emptyList()));
}
|
public DefaultIssue setStatus(String s) {
Preconditions.checkArgument(!Strings.isNullOrEmpty(s), "Status must be set");
this.status = s;
return this;
}
|
@Test
void fail_on_empty_status() {
try {
issue.setStatus("");
fail();
} catch (IllegalArgumentException e) {
assertThat(e).hasMessage("Status must be set");
}
}
|
static byte eightBitCharacter(final String asciiCharacter)
{
Verify.notNull(asciiCharacter, "asciiCharacter");
final byte[] bytes = asciiCharacter.getBytes(StandardCharsets.US_ASCII);
if (bytes.length != 1)
{
throw new IllegalArgumentException(
"String value `" + asciiCharacter + "` did not fit into a single 8-bit character");
}
return bytes[0];
}
|
@Test
void happyPathEightBitCharacter()
{
final byte aByte = RustUtil.eightBitCharacter("a");
assertEquals('a', (char)aByte);
assertEquals("97", Byte.toString(aByte));
}
|
public static Socket acceptWithoutTimeout(ServerSocket serverSocket) throws IOException {
Preconditions.checkArgument(
serverSocket.getSoTimeout() == 0, "serverSocket SO_TIMEOUT option must be 0");
while (true) {
try {
return serverSocket.accept();
} catch (SocketTimeoutException exception) {
// This should be impossible given that the socket timeout is set to zero
// which indicates an infinite timeout. This is due to the underlying JDK-8237858
// bug. We retry the accept call indefinitely to replicate the expected behavior.
}
}
}
|
@Test
void testAcceptWithoutTimeoutZeroTimeout() throws IOException {
// Explicitly sets a timeout of zero
final Socket expected = new Socket();
try (final ServerSocket serverSocket =
new ServerSocket(0) {
@Override
public Socket accept() {
return expected;
}
}) {
serverSocket.setSoTimeout(0);
assertThat(NetUtils.acceptWithoutTimeout(serverSocket)).isEqualTo(expected);
}
}
|
@Override
public Endpoint<Http2LocalFlowController> local() {
return localEndpoint;
}
|
@Test
public void newStreamBehindExpectedShouldThrow() throws Http2Exception {
assertThrows(Http2Exception.class, new Executable() {
@Override
public void execute() throws Throwable {
server.local().createStream(0, true);
}
});
}
|
boolean isSqlMonitoringDisabled() {
return isMonitoringDisabled() || !sqlCounter.isDisplayed();
}
|
@Test
public void testIsSqlMonitoringDisabled() {
Utils.setProperty(Parameter.DISABLED, "false");
jdbcWrapper.getSqlCounter().setDisplayed(true);
assertFalse("isSqlMonitoringDisabled1", jdbcWrapper.isSqlMonitoringDisabled());
Utils.setProperty(Parameter.DISABLED, "true");
assertTrue("isSqlMonitoringDisabled2", jdbcWrapper.isSqlMonitoringDisabled());
Utils.setProperty(Parameter.DISABLED, "false");
jdbcWrapper.getSqlCounter().setDisplayed(false);
assertTrue("isSqlMonitoringDisabled3", jdbcWrapper.isSqlMonitoringDisabled());
jdbcWrapper.getSqlCounter().setDisplayed(true);
assertFalse("isSqlMonitoringDisabled4", jdbcWrapper.isSqlMonitoringDisabled());
}
|
@Override
public int getRemainingQueueCapacity() {
return taskQ.remainingCapacity();
}
|
@Test
public void getRemainingQueueCapacity_whenNoTasksSubmitted() {
int queueSize = 123;
assertEquals(queueSize, newManagedExecutorService(1, queueSize).getRemainingQueueCapacity());
}
|
@Override
public boolean apply(Collection<Member> members) {
if (members.size() < minimumClusterSize) {
return false;
}
int count = 0;
long timestamp = Clock.currentTimeMillis();
for (Member member : members) {
if (!isAlivePerIcmp(member)) {
continue;
}
if (member.localMember() || failureDetector.isAlive(member, timestamp)) {
count++;
}
}
return count >= minimumClusterSize;
}
|
@Test
public void testSplitBrainProtectionAbsent_whenIcmpAlive_andFewerThanSplitBrainProtectionPresent() {
splitBrainProtectionFunction = new ProbabilisticSplitBrainProtectionFunction(splitBrainProtectionSize, 10000, 10000, 200, 100, 10);
prepareSplitBrainProtectionFunctionForIcmpFDTest(splitBrainProtectionFunction);
// heartbeat each second for all members for 5 seconds
heartbeat(5, 1000);
pingSuccessfully();
assertFalse(splitBrainProtectionFunction.apply(subsetOfMembers(splitBrainProtectionSize - 1)));
}
|
@GetMapping(value = "/{id}")
public Mono<Post> get(@PathVariable(value = "id") Long id) {
return this.posts.findById(id);
}
|
@Test
public void getAllPostsWillBeOk() throws Exception {
this.rest
.get()
.uri("/posts")
.accept(APPLICATION_JSON)
.exchange()
.expectBody()
.jsonPath("$.length()")
.isEqualTo(2);
}
|
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
final AttributedList<Path> children = new AttributedList<Path>();
try (RemoteDirectory handle = session.sftp().openDir(directory.getAbsolute())) {
for(List<RemoteResourceInfo> list : ListUtils.partition(handle.scan(new RemoteResourceFilter() {
@Override
public boolean accept(RemoteResourceInfo remoteResourceInfo) {
return true;
}
}),
new HostPreferences(session.getHost()).getInteger("sftp.listing.chunksize"))) {
for(RemoteResourceInfo f : list) {
final PathAttributes attr = attributes.toAttributes(f.getAttributes());
final EnumSet<Path.Type> type = EnumSet.noneOf(Path.Type.class);
switch(f.getAttributes().getType()) {
case DIRECTORY:
type.add(Path.Type.directory);
break;
case SYMLINK:
type.add(Path.Type.symboliclink);
break;
default:
type.add(Path.Type.file);
break;
}
final Path file = new Path(directory, f.getName(), type, attr);
if(this.post(file)) {
children.add(file);
listener.chunk(directory, children);
}
}
}
return children;
}
catch(IOException e) {
throw new SFTPExceptionMappingService().map("Listing directory {0} failed", e, directory);
}
}
|
@Test(expected = NotfoundException.class)
public void testListFile() throws Exception {
final Path home = new SFTPHomeDirectoryService(session).find();
final Path f = new Path(home, "test", EnumSet.of(Path.Type.directory));
final SFTPListService service = new SFTPListService(session);
service.list(f, new DisabledListProgressListener());
}
|
public final void doesNotContain(@Nullable Object element) {
if (Iterables.contains(checkNotNull(actual), element)) {
failWithActual("expected not to contain", element);
}
}
|
@Test
public void iterableDoesNotContainFailure() {
expectFailureWhenTestingThat(asList(1, 2, 3)).doesNotContain(2);
assertFailureKeys("expected not to contain", "but was");
assertFailureValue("expected not to contain", "2");
}
|
public static Set<Result> anaylze(String log) {
Set<Result> results = new HashSet<>();
for (Rule rule : Rule.values()) {
Matcher matcher = rule.pattern.matcher(log);
if (matcher.find()) {
results.add(new Result(rule, log, matcher));
}
}
return results;
}
|
@Test
public void loadingErrorFabric() throws IOException {
CrashReportAnalyzer.Result result = findResultByRule(
CrashReportAnalyzer.anaylze(loadLog("/crash-report/loading_error_fabric.txt")),
CrashReportAnalyzer.Rule.LOADING_CRASHED_FABRIC);
assertEquals("test", result.getMatcher().group("id"));
}
|
public Order complete(Boolean complete) {
this.complete = complete;
return this;
}
|
@Test
public void completeTest() {
// TODO: test complete
}
|
@Asn1ObjectIdentifier("2.16.528.1.1003.10.9.1")
@Asn1Property(order = 71)
public PolymorphicInfo getPolymorphicInfo() {
return polymorphicInfo;
}
|
@Test
public void testEfCardAccess() {
final PcaSecurityInfos result = mapper.read(efCardAccess, PcaSecurityInfos.class);
assertEquals(1, result.getPolymorphicInfo().getPcaVersion());
assertEquals(0x6c, result.getPolymorphicInfo().getFlags().intValue());
}
|
public DiscardObject markAsDiscardedOnShutdown(JobStatus jobStatus) {
return shouldBeDiscardedOnShutdown(jobStatus) ? markAsDiscarded() : NOOP_DISCARD_OBJECT;
}
|
@Test
void testCleanUpOnShutdown() throws Exception {
JobStatus[] terminalStates =
new JobStatus[] {
JobStatus.FINISHED, JobStatus.CANCELED, JobStatus.FAILED, JobStatus.SUSPENDED
};
for (JobStatus status : terminalStates) {
OperatorState state = mock(OperatorState.class);
Map<OperatorID, OperatorState> operatorStates = new HashMap<>();
operatorStates.put(new OperatorID(), state);
EmptyStreamStateHandle retainedHandle = new EmptyStreamStateHandle();
TestCompletedCheckpointStorageLocation retainedLocation =
new TestCompletedCheckpointStorageLocation(retainedHandle, "ptr");
// Keep
CheckpointProperties retainProps =
new CheckpointProperties(
false,
CheckpointType.CHECKPOINT,
false,
false,
false,
false,
false,
false);
CompletedCheckpoint checkpoint =
new CompletedCheckpoint(
new JobID(),
0,
0,
1,
new HashMap<>(operatorStates),
Collections.emptyList(),
retainProps,
retainedLocation,
null);
checkpoint.markAsDiscardedOnShutdown(status).discard();
verify(state, times(0)).discardState();
assertThat(retainedLocation.isDisposed()).isFalse();
assertThat(retainedHandle.isDisposed()).isFalse();
// Discard
EmptyStreamStateHandle discardHandle = new EmptyStreamStateHandle();
TestCompletedCheckpointStorageLocation discardLocation =
new TestCompletedCheckpointStorageLocation(discardHandle, "ptr");
// Keep
CheckpointProperties discardProps =
new CheckpointProperties(
false, CheckpointType.CHECKPOINT, true, true, true, true, true, false);
checkpoint =
new CompletedCheckpoint(
new JobID(),
0,
0,
1,
new HashMap<>(operatorStates),
Collections.emptyList(),
discardProps,
discardLocation,
null);
checkpoint.markAsDiscardedOnShutdown(status).discard();
verify(state, times(1)).discardState();
assertThat(discardLocation.isDisposed()).isTrue();
assertThat(discardHandle.isDisposed()).isTrue();
}
}
|
public DefaultHeaders<K, V, T> copy() {
DefaultHeaders<K, V, T> copy = new DefaultHeaders<K, V, T>(
hashingStrategy, valueConverter, nameValidator, entries.length);
copy.addImpl(this);
return copy;
}
|
@Test
public void testCopy() throws Exception {
TestDefaultHeaders headers = newInstance();
headers.addBoolean(of("boolean"), true);
headers.addLong(of("long"), Long.MAX_VALUE);
headers.addInt(of("int"), Integer.MIN_VALUE);
headers.addShort(of("short"), Short.MAX_VALUE);
headers.addChar(of("char"), Character.MAX_VALUE);
headers.addByte(of("byte"), Byte.MAX_VALUE);
headers.addDouble(of("double"), Double.MAX_VALUE);
headers.addFloat(of("float"), Float.MAX_VALUE);
long millis = System.currentTimeMillis();
headers.addTimeMillis(of("millis"), millis);
headers.addObject(of("object"), "Hello World");
headers.add(of("name"), of("value"));
headers = newInstance().add(headers);
assertTrue(headers.containsBoolean(of("boolean"), true));
assertFalse(headers.containsBoolean(of("boolean"), false));
assertTrue(headers.containsLong(of("long"), Long.MAX_VALUE));
assertFalse(headers.containsLong(of("long"), Long.MIN_VALUE));
assertTrue(headers.containsInt(of("int"), Integer.MIN_VALUE));
assertFalse(headers.containsInt(of("int"), Integer.MAX_VALUE));
assertTrue(headers.containsShort(of("short"), Short.MAX_VALUE));
assertFalse(headers.containsShort(of("short"), Short.MIN_VALUE));
assertTrue(headers.containsChar(of("char"), Character.MAX_VALUE));
assertFalse(headers.containsChar(of("char"), Character.MIN_VALUE));
assertTrue(headers.containsByte(of("byte"), Byte.MAX_VALUE));
assertFalse(headers.containsLong(of("byte"), Byte.MIN_VALUE));
assertTrue(headers.containsDouble(of("double"), Double.MAX_VALUE));
assertFalse(headers.containsDouble(of("double"), Double.MIN_VALUE));
assertTrue(headers.containsFloat(of("float"), Float.MAX_VALUE));
assertFalse(headers.containsFloat(of("float"), Float.MIN_VALUE));
assertTrue(headers.containsTimeMillis(of("millis"), millis));
// This test doesn't work on midnight, January 1, 1970 UTC
assertFalse(headers.containsTimeMillis(of("millis"), 0));
assertTrue(headers.containsObject(of("object"), "Hello World"));
assertFalse(headers.containsObject(of("object"), ""));
assertTrue(headers.contains(of("name"), of("value")));
assertFalse(headers.contains(of("name"), of("value1")));
}
|
@Override
public DataSourceProvenance getProvenance() {
return new AggregateDataSourceProvenance(this);
}
|
@Test
public void testADSIterationOrder() {
MockOutputFactory factory = new MockOutputFactory();
String[] featureNames = new String[] {"X1","X2"};
double[] featureValues = new double[] {1.0, 2.0};
List<Example<MockOutput>> first = new ArrayList<>();
first.add(new ArrayExample<>(new MockOutput("A"),featureNames,featureValues));
first.add(new ArrayExample<>(new MockOutput("B"),featureNames,featureValues));
first.add(new ArrayExample<>(new MockOutput("C"),featureNames,featureValues));
first.add(new ArrayExample<>(new MockOutput("D"),featureNames,featureValues));
first.add(new ArrayExample<>(new MockOutput("E"),featureNames,featureValues));
ListDataSource<MockOutput> firstSource = new ListDataSource<>(first,factory,new SimpleDataSourceProvenance("First",factory));
List<Example<MockOutput>> second = new ArrayList<>();
second.add(new ArrayExample<>(new MockOutput("F"),featureNames,featureValues));
second.add(new ArrayExample<>(new MockOutput("G"),featureNames,featureValues));
ListDataSource<MockOutput> secondSource = new ListDataSource<>(second,factory,new SimpleDataSourceProvenance("Second",factory));
List<Example<MockOutput>> third = new ArrayList<>();
third.add(new ArrayExample<>(new MockOutput("H"),featureNames,featureValues));
third.add(new ArrayExample<>(new MockOutput("I"),featureNames,featureValues));
third.add(new ArrayExample<>(new MockOutput("J"),featureNames,featureValues));
third.add(new ArrayExample<>(new MockOutput("K"),featureNames,featureValues));
ListDataSource<MockOutput> thirdSource = new ListDataSource<>(third,factory,new SimpleDataSourceProvenance("Third",factory));
List<DataSource<MockOutput>> sources = new ArrayList<>();
sources.add(firstSource);
sources.add(secondSource);
sources.add(thirdSource);
AggregateDataSource<MockOutput> adsSeq = new AggregateDataSource<>(sources, AggregateDataSource.IterationOrder.SEQUENTIAL);
String[] expectedSeq = new String[] {"A","B","C","D","E","F","G","H","I","J","K"};
String[] actualSeq = StreamSupport.stream(adsSeq.spliterator(), false).map(Example::getOutput).map(MockOutput::toString).toArray(String[]::new);
Assertions.assertArrayEquals(expectedSeq,actualSeq);
Helpers.testProvenanceMarshalling(adsSeq.getProvenance());
AggregateDataSource<MockOutput> adsRR = new AggregateDataSource<>(sources, AggregateDataSource.IterationOrder.ROUNDROBIN);
String[] expectedRR = new String[] {"A","F","H","B","G","I","C","J","D","K","E"};
String[] actualRR = StreamSupport.stream(adsRR.spliterator(), false).map(Example::getOutput).map(MockOutput::toString).toArray(String[]::new);
Assertions.assertArrayEquals(expectedRR,actualRR);
Helpers.testProvenanceMarshalling(adsRR.getProvenance());
}
|
@Override
public ShardingTableRuleConfiguration swapToObject(final YamlTableRuleConfiguration yamlConfig) {
ShardingSpherePreconditions.checkNotNull(yamlConfig.getLogicTable(), () -> new MissingRequiredShardingConfigurationException("Sharding Logic table"));
ShardingTableRuleConfiguration result = new ShardingTableRuleConfiguration(yamlConfig.getLogicTable(), yamlConfig.getActualDataNodes());
if (null != yamlConfig.getDatabaseStrategy()) {
result.setDatabaseShardingStrategy(shardingStrategySwapper.swapToObject(yamlConfig.getDatabaseStrategy()));
}
if (null != yamlConfig.getTableStrategy()) {
result.setTableShardingStrategy(shardingStrategySwapper.swapToObject(yamlConfig.getTableStrategy()));
}
if (null != yamlConfig.getKeyGenerateStrategy()) {
result.setKeyGenerateStrategy(keyGenerateStrategySwapper.swapToObject(yamlConfig.getKeyGenerateStrategy()));
}
if (null != yamlConfig.getAuditStrategy()) {
result.setAuditStrategy(auditStrategySwapper.swapToObject(yamlConfig.getAuditStrategy()));
}
return result;
}
|
@Test
void assertSwapToObject() {
YamlShardingTableRuleConfigurationSwapper swapper = new YamlShardingTableRuleConfigurationSwapper();
ShardingTableRuleConfiguration actual = swapper.swapToObject(createYamlTableRuleConfiguration());
assertThat(actual.getDatabaseShardingStrategy().getShardingAlgorithmName(), is("standard"));
assertThat(actual.getTableShardingStrategy().getShardingAlgorithmName(), is("standard"));
assertThat(actual.getKeyGenerateStrategy().getKeyGeneratorName(), is("auto_increment"));
assertThat(actual.getAuditStrategy().getAuditorNames(), is(Collections.singletonList("audit_algorithm")));
}
|
public void addPackageDescr(Resource resource, PackageDescr packageDescr) {
if (!getNamespace().equals(packageDescr.getNamespace())) {
throw new RuntimeException("Composing PackageDescr (" + packageDescr.getName()
+ ") in different namespaces (namespace=" + getNamespace()
+ " packageDescr=" + packageDescr.getNamespace() + ")" );
}
internalAdd(resource, packageDescr);
}
|
@Test(expected = RuntimeException.class)
public void addPackageDescrDifferentPkgUUID() {
String pkgUUID = generateUUID();
PackageDescr first = new PackageDescr(NAMESPACE);
first.setPreferredPkgUUID(pkgUUID);
assertThat(first.getPreferredPkgUUID().isPresent()).isTrue();
compositePackageDescr.addPackageDescr(new ByteArrayResource(), first);
assertThat(compositePackageDescr.getPreferredPkgUUID().isPresent()).isTrue();
assertThat(compositePackageDescr.getPreferredPkgUUID().get()).isEqualTo(pkgUUID);
pkgUUID = generateUUID();
PackageDescr second = new PackageDescr(NAMESPACE);
second.setPreferredPkgUUID(pkgUUID);
assertThat(second.getPreferredPkgUUID().isPresent()).isTrue();
assertThat(second.getPreferredPkgUUID().get()).isNotEqualTo(first.getPreferredPkgUUID().get());
compositePackageDescr.addPackageDescr(new ByteArrayResource(), second);
}
|
public ClientTelemetrySender telemetrySender() {
return clientTelemetrySender;
}
|
@Test
public void testCreateRequestInvalidState() {
ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender();
telemetrySender.updateSubscriptionResult(subscription, time.milliseconds());
assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS));
assertFalse(telemetrySender.createRequest().isPresent());
assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED));
assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_IN_PROGRESS));
assertFalse(telemetrySender.createRequest().isPresent());
assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.TERMINATING_PUSH_NEEDED));
assertFalse(telemetrySender.createRequest().isPresent());
assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.TERMINATING_PUSH_IN_PROGRESS));
assertFalse(telemetrySender.createRequest().isPresent());
assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.TERMINATED));
assertFalse(telemetrySender.createRequest().isPresent());
}
|
public static int hash32(byte[] data) {
int len = data.length;
if (len <= 24) {
return len <= 12 ?
(len <= 4 ? hash32Len0to4(data) : hash32Len5to12(data)) :
hash32Len13to24(data);
}
// len > 24
int h = len, g = c1 * len, f = g;
int a0 = rotate32(fetch32(data, len - 4) * c1, 17) * c2;
int a1 = rotate32(fetch32(data, len - 8) * c1, 17) * c2;
int a2 = rotate32(fetch32(data, len - 16) * c1, 17) * c2;
int a3 = rotate32(fetch32(data, len - 12) * c1, 17) * c2;
int a4 = rotate32(fetch32(data, len - 20) * c1, 17) * c2;
h ^= a0;
h = rotate32(h, 19);
h = h * 5 + 0xe6546b64;
h ^= a2;
h = rotate32(h, 19);
h = h * 5 + 0xe6546b64;
g ^= a1;
g = rotate32(g, 19);
g = g * 5 + 0xe6546b64;
g ^= a3;
g = rotate32(g, 19);
g = g * 5 + 0xe6546b64;
f += a4;
f = rotate32(f, 19);
f = f * 5 + 0xe6546b64;
int iters = (len - 1) / 20;
int pos = 0;
do {
a0 = rotate32(fetch32(data, pos) * c1, 17) * c2;
a1 = fetch32(data, pos + 4);
a2 = rotate32(fetch32(data, pos + 8) * c1, 17) * c2;
a3 = rotate32(fetch32(data, pos + 12) * c1, 17) * c2;
a4 = fetch32(data, pos + 16);
h ^= a0;
h = rotate32(h, 18);
h = h * 5 + 0xe6546b64;
f += a1;
f = rotate32(f, 19);
f = f * c1;
g += a2;
g = rotate32(g, 18);
g = g * 5 + 0xe6546b64;
h ^= a3 + a1;
h = rotate32(h, 19);
h = h * 5 + 0xe6546b64;
g ^= a4;
g = Integer.reverseBytes(g) * 5;
h += a4 * 5;
h = Integer.reverseBytes(h);
f += a0;
int swapValue = f;
f = g;
g = h;
h = swapValue;
pos += 20;
} while (--iters != 0);
g = rotate32(g, 11) * c1;
g = rotate32(g, 17) * c1;
f = rotate32(f, 11) * c1;
f = rotate32(f, 17) * c1;
h = rotate32(h + g, 19);
h = h * 5 + 0xe6546b64;
h = rotate32(h, 17) * c1;
h = rotate32(h + f, 19);
h = h * 5 + 0xe6546b64;
h = rotate32(h, 17) * c1;
return h;
}
|
@Test
public void hash32Test() {
int hv = CityHash.hash32(StrUtil.utf8Bytes("你"));
assertEquals(1290029860, hv);
hv = CityHash.hash32(StrUtil.utf8Bytes("你好"));
assertEquals(1374181357, hv);
hv = CityHash.hash32(StrUtil.utf8Bytes("见到你很高兴"));
assertEquals(1475516842, hv);
hv = CityHash.hash32(StrUtil.utf8Bytes("我们将通过生成一个大的文件的方式来检验各种方法的执行效率因为这种方式在结束的时候需要执行文件"));
assertEquals(0x51020cae, hv);
}
|
@Override
public AppResponse process(Flow flow, ActivateWithCodeRequest request) throws FlowNotDefinedException, IOException, NoSuchAlgorithmException {
Map<String, Object> result = digidClient.activateAccountWithCode(appSession.getAccountId(), request.getActivationCode());
if (result.get(lowerUnderscore(STATUS)).equals("OK")) {
appAuthenticator.setIssuerType((String) result.get(lowerUnderscore(ISSUER_TYPE)));
return new OkResponse();
}
if (result.get(lowerUnderscore(STATUS)).equals("NOK") && result.get(ERROR) != null ) {
final var error = result.get(ERROR);
if (ERROR_CODE_NOT_CORRECT.equals(error)) {
// Logcode 88 is already logged in x, can be changed when switching to account microservice :
return new EnterActivationResponse(ERROR_CODE_NOT_CORRECT, Map.of(REMAINING_ATTEMPTS, result.get(lowerUnderscore(REMAINING_ATTEMPTS))));
} else if (ERROR_CODE_BLOCKED.equals(error)) {
digidClient.remoteLog("87", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId()));
return new NokResponse((String) result.get(ERROR));
} else if (ERROR_CODE_INVALID.equals(error)) {
digidClient.remoteLog("90", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId()));
return new EnterActivationResponse(ERROR_CODE_INVALID, Map.of(DAYS_VALID, result.get(lowerUnderscore(DAYS_VALID))));
}
}
return new NokResponse();
}
|
@Test
public void responseNotCorrectTest() throws FlowNotDefinedException, IOException, NoSuchAlgorithmException {
//given
when(digidClientMock.activateAccountWithCode(anyLong(), any())).thenReturn(Map.of(
lowerUnderscore(STATUS), "NOK",
lowerUnderscore(ERROR), "activation_code_not_correct",
lowerUnderscore(ERROR_CODE_BLOCKED), "activation_code_not_correct",
lowerUnderscore(REMAINING_ATTEMPTS), 2
));
//when
AppResponse result = activationCodeChecked.process(mockedFlow, activateWithCodeRequest);
//then
assertTrue(result instanceof EnterActivationResponse);
assertEquals("activation_code_not_correct", ((NokResponse) result).getError());
}
|
@Override
protected Drive connect(final ProxyFinder proxy, final HostKeyCallback callback, final LoginCallback prompt, final CancelCallback cancel) throws HostParserException, ConnectionCanceledException {
final HttpClientBuilder configuration = builder.build(proxy, this, prompt);
authorizationService = new OAuth2RequestInterceptor(builder.build(proxy, this, prompt).build(), host, prompt)
.withRedirectUri(host.getProtocol().getOAuthRedirectUrl());
configuration.addInterceptorLast(authorizationService);
configuration.setServiceUnavailableRetryStrategy(new CustomServiceUnavailableRetryStrategy(host,
new ExecutionCountServiceUnavailableRetryStrategy(new OAuth2ErrorResponseInterceptor(host, authorizationService))));
if(new HostPreferences(host).getBoolean("googledrive.limit.requests.enable")) {
configuration.addInterceptorLast(new RateLimitingHttpRequestInterceptor(new DefaultHttpRateLimiter(
new HostPreferences(host).getInteger("googledrive.limit.requests.second")
)));
}
transport = new ApacheHttpTransport(configuration.build());
final UseragentProvider ua = new PreferencesUseragentProvider();
return new Drive.Builder(transport, new GsonFactory(), new UserAgentHttpRequestInitializer(ua))
.setApplicationName(ua.get())
.build();
}
|
@Test
public void testConnect() throws Exception {
assertTrue(session.isConnected());
session.close();
assertFalse(session.isConnected());
}
|
public static ByteBuf copyFloat(float value) {
ByteBuf buf = buffer(4);
buf.writeFloat(value);
return buf;
}
|
@Test
public void testWrapSingleFloat() {
ByteBuf buffer = copyFloat(42);
assertEquals(4, buffer.capacity());
assertEquals(42, buffer.readFloat(), 0.01);
assertFalse(buffer.isReadable());
buffer.release();
}
|
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
QueryEntry that = (QueryEntry) o;
if (!key.equals(that.key)) {
return false;
}
return true;
}
|
@Test
@SuppressWarnings("EqualsWithItself")
public void test_equality_same() {
QueryableEntry entry = createEntry();
assertTrue(entry.equals(entry));
}
|
public List<KiePMMLDroolsType> declareTypes(final List<DerivedField> derivedFields) {
return derivedFields.stream().map(this::declareType).collect(Collectors.toList());
}
|
@Test
void declareTypes() {
List<DerivedField> derivedFields = IntStream.range(0, 5)
.mapToObj(value -> getDerivedField("FieldName-" + value))
.collect(Collectors.toList());
List<KiePMMLDroolsType> retrieved = fieldASTFactory.declareTypes(derivedFields);
assertThat(retrieved).hasSameSizeAs(derivedFields);
for (int i = 0; i < derivedFields.size(); i++) {
commonValidateKiePMMLDroolsType(retrieved.get(i), derivedFields.get(i));
}
}
|
@Override
protected Map<String, Object> toJsonMap(ILoggingEvent event) {
final MapBuilder mapBuilder = new MapBuilder(timestampFormatter, customFieldNames, additionalFields, includes.size())
.addTimestamp("timestamp", isIncluded(EventAttribute.TIMESTAMP), event.getTimeStamp())
.add("level", isIncluded(EventAttribute.LEVEL), () -> String.valueOf(event.getLevel()))
.add("thread", isIncluded(EventAttribute.THREAD_NAME), event::getThreadName)
.add("marker", isIncluded(EventAttribute.MARKER) && event.getMarker() != null, () -> event.getMarker().getName())
.add("logger", isIncluded(EventAttribute.LOGGER_NAME), event::getLoggerName)
.add("message", isIncluded(EventAttribute.MESSAGE), event::getFormattedMessage)
.add("context", isIncluded(EventAttribute.CONTEXT_NAME), () -> event.getLoggerContextVO().getName())
.add("version", jsonProtocolVersion != null, jsonProtocolVersion)
.add("exception", isIncluded(EventAttribute.EXCEPTION) && event.getThrowableProxy() != null,
() -> throwableProxyConverter.convert(event));
final boolean includeMdc = isIncluded(EventAttribute.MDC);
if (flattenMdc) {
filterMdc(event.getMDCPropertyMap()).forEach((k,v) -> mapBuilder.add(k, includeMdc, v));
} else {
mapBuilder.addMap("mdc", includeMdc, () -> filterMdc(event.getMDCPropertyMap()));
}
final boolean includeCallerData = isIncluded(EventAttribute.CALLER_DATA);
final StackTraceElement[] callerData = event.getCallerData();
if (includeCallerData && callerData.length >= 1) {
final StackTraceElement stackTraceElement = callerData[0];
mapBuilder.add("caller_class_name", includeCallerData, stackTraceElement.getClassName());
mapBuilder.add("caller_method_name", includeCallerData, stackTraceElement.getMethodName());
mapBuilder.add("caller_file_name", includeCallerData, stackTraceElement.getFileName());
mapBuilder.addNumber("caller_line_number", includeCallerData, stackTraceElement.getLineNumber());
}
return mapBuilder.build();
}
|
@Test
void testProducesDefaultMap() {
Map<String, Object> map = eventJsonLayout.toJsonMap(event);
final HashMap<String, Object> expectedFields = new HashMap<>(defaultExpectedFields);
assertThat(map).isEqualTo(expectedFields);
}
|
public static boolean prefixEquals(ChannelBuffer bufferA, ChannelBuffer bufferB, int count) {
final int aLen = bufferA.readableBytes();
final int bLen = bufferB.readableBytes();
if (aLen < count || bLen < count) {
return false;
}
int aIndex = bufferA.readerIndex();
int bIndex = bufferB.readerIndex();
for (int i = count; i > 0; i--) {
if (bufferA.getByte(aIndex) != bufferB.getByte(bIndex)) {
return false;
}
aIndex++;
bIndex++;
}
return true;
}
|
@Test
void testPrefixEquals() {
ChannelBuffer bufA = ChannelBuffers.wrappedBuffer("abcedfaf".getBytes());
ChannelBuffer bufB = ChannelBuffers.wrappedBuffer("abcedfaa".getBytes());
Assertions.assertTrue(ChannelBuffers.equals(bufA, bufB));
Assertions.assertTrue(ChannelBuffers.prefixEquals(bufA, bufB, 7));
Assertions.assertFalse(ChannelBuffers.prefixEquals(bufA, bufB, 8));
}
|
@Override
public PostgreSQLIdentifierTag getIdentifier() {
return PostgreSQLMessagePacketType.ROW_DESCRIPTION;
}
|
@Test
void assertGetIdentifier() {
PostgreSQLRowDescriptionPacket packet = new PostgreSQLRowDescriptionPacket(Collections.emptyList());
assertThat(packet.getIdentifier(), is(PostgreSQLMessagePacketType.ROW_DESCRIPTION));
}
|
@Override
public Set<K8sNode> completeNodes() {
Set<K8sNode> nodes = nodeStore.nodes().stream()
.filter(node -> node.state() == COMPLETE)
.collect(Collectors.toSet());
return ImmutableSet.copyOf(nodes);
}
|
@Test
public void testGetCompleteNodes() {
assertEquals(ERR_SIZE, 1, target.completeNodes().size());
assertTrue(ERR_NOT_FOUND, target.completeNodes().contains(MINION_3));
}
|
public <
M extends MessageHeaders<EmptyRequestBody, P, EmptyMessageParameters>,
P extends ResponseBody>
CompletableFuture<P> sendRequest(String targetAddress, int targetPort, M messageHeaders)
throws IOException {
return sendRequest(
targetAddress,
targetPort,
messageHeaders,
EmptyMessageParameters.getInstance(),
EmptyRequestBody.getInstance());
}
|
@Test
void testConnectionClosedHandling() throws Exception {
final Configuration config = new Configuration();
config.set(RestOptions.IDLENESS_TIMEOUT, Duration.ofMillis(5000L));
try (final ServerSocket serverSocket = new ServerSocket(0);
final RestClient restClient =
new RestClient(config, EXECUTOR_EXTENSION.getExecutor())) {
final String targetAddress = "localhost";
final int targetPort = serverSocket.getLocalPort();
// start server
final CompletableFuture<Socket> socketCompletableFuture =
CompletableFuture.supplyAsync(
CheckedSupplier.unchecked(
() -> NetUtils.acceptWithoutTimeout(serverSocket)));
final CompletableFuture<EmptyResponseBody> responseFuture =
restClient.sendRequest(
targetAddress,
targetPort,
new TestMessageHeaders(),
EmptyMessageParameters.getInstance(),
EmptyRequestBody.getInstance(),
Collections.emptyList());
Socket connectionSocket = null;
try {
connectionSocket = socketCompletableFuture.get(TIMEOUT, TimeUnit.SECONDS);
} catch (TimeoutException ignored) {
// could not establish a server connection --> see that the response failed
socketCompletableFuture.cancel(true);
}
if (connectionSocket != null) {
// close connection
connectionSocket.close();
}
FlinkAssertions.assertThatFuture(responseFuture)
.eventuallyFailsWith(ExecutionException.class)
.withCauseInstanceOf(IOException.class);
}
}
|
@Override
public ValidationTaskResult validateImpl(Map<String, String> optionMap) {
// Skip this test if NOSASL
if (mConf.get(PropertyKey.SECURITY_AUTHENTICATION_TYPE)
.equals(AuthType.NOSASL)) {
return new ValidationTaskResult(ValidationUtils.State.SKIPPED, getName(),
String.format("Impersonation validation is skipped for NOSASL"), "");
}
ValidationTaskResult loadConfig = loadHdfsConfig();
if (loadConfig.getState() != ValidationUtils.State.OK) {
mAdvice.insert(0, "Validating the proxy user requires additional HDFS "
+ "configuration. ");
return loadConfig.setAdvice(mAdvice.toString());
}
// TODO(jiacheng): validate proxyuser.hosts for the cluster
// Validate proxyuser config for the current Alluxio user
try {
String alluxioUser = getCurrentUser();
return validateProxyUsers(alluxioUser);
} catch (UnauthenticatedException e) {
mMsg.append(String.format("Failed to authenticate in Alluxio: "));
mMsg.append(ExceptionUtils.asPlainText(e));
mAdvice.append("Please fix the authentication issue.");
return new ValidationTaskResult(ValidationUtils.State.FAILED, getName(),
mMsg.toString(), mAdvice.toString());
}
}
|
@Test
public void skipped() {
mConf.set(PropertyKey.SECURITY_AUTHENTICATION_TYPE, AuthType.NOSASL);
HdfsProxyUserValidationTask task =
new HdfsProxyUserValidationTask("hdfs://namenode:9000/alluxio", mConf);
ValidationTaskResult result = task.validateImpl(ImmutableMap.of());
assertEquals(ValidationUtils.State.SKIPPED, result.getState());
}
|
long importPhotos(
Collection<PhotoModel> photos,
GPhotosUpload gPhotosUpload)
throws Exception {
return gPhotosUpload.uploadItemsViaBatching(
photos,
this::importPhotoBatch);
}
|
@Test
public void importTwoPhotosWithFailure() throws Exception {
PhotoModel photoModel1 =
new PhotoModel(
PHOTO_TITLE,
IMG_URI,
PHOTO_DESCRIPTION,
JPEG_MEDIA_TYPE,
"oldPhotoID1",
OLD_ALBUM_ID,
false);
PhotoModel photoModel2 =
new PhotoModel(
PHOTO_TITLE,
IMG_URI,
PHOTO_DESCRIPTION,
JPEG_MEDIA_TYPE,
"oldPhotoID2",
OLD_ALBUM_ID,
false);
Mockito.when(googlePhotosInterface.uploadMediaContent(any(), eq(null)))
.thenReturn("token1", "token2");
BatchMediaItemResponse batchMediaItemResponse =
new BatchMediaItemResponse(
new NewMediaItemResult[]{
buildMediaItemResult("token1", Code.OK_VALUE),
buildMediaItemResult("token2", Code.UNAUTHENTICATED_VALUE)
});
Mockito.when(googlePhotosInterface.createPhotos(any(NewMediaItemUpload.class)))
.thenReturn(batchMediaItemResponse);
long length = googleMediaImporter.importPhotos(Lists.newArrayList(photoModel1, photoModel2),
new GPhotosUpload(UUID.randomUUID(), executor, mock(TokensAndUrlAuthData.class)));
// Only one photo of 32L imported
assertEquals(32L, length);
assertTrue(executor.isKeyCached(String.format("%s-%s", OLD_ALBUM_ID, "oldPhotoID1")));
String failedDataId = String.format("%s-%s", OLD_ALBUM_ID, "oldPhotoID2");
assertFalse(executor.isKeyCached(failedDataId));
ErrorDetail errorDetail = executor.getErrors().iterator().next();
assertEquals(failedDataId, errorDetail.id());
assertThat(
errorDetail.exception(), CoreMatchers.containsString("Media item could not be created."));
}
|
public int validate(
final ServiceContext serviceContext,
final List<ParsedStatement> statements,
final SessionProperties sessionProperties,
final String sql
) {
requireSandbox(serviceContext);
final KsqlExecutionContext ctx = requireSandbox(snapshotSupplier.apply(serviceContext));
final Injector injector = injectorFactory.apply(ctx, serviceContext);
final KsqlConfig ksqlConfig = ctx.getKsqlConfig();
int numPersistentQueries = 0;
for (final ParsedStatement parsed : statements) {
final PreparedStatement<?> prepared = ctx.prepare(
parsed,
(isVariableSubstitutionEnabled(sessionProperties, ksqlConfig)
? sessionProperties.getSessionVariables()
: Collections.emptyMap())
);
final ConfiguredStatement<?> configured = ConfiguredStatement.of(prepared,
SessionConfig.of(ksqlConfig, sessionProperties.getMutableScopedProperties())
);
final int currNumPersistentQueries = validate(
serviceContext,
configured,
sessionProperties,
ctx,
injector
);
numPersistentQueries += currNumPersistentQueries;
if (currNumPersistentQueries > 0
&& QueryCapacityUtil.exceedsPersistentQueryCapacity(ctx, ksqlConfig)) {
QueryCapacityUtil.throwTooManyActivePersistentQueriesException(ctx, ksqlConfig, sql);
}
}
return numPersistentQueries;
}
|
@Test
public void shouldThrowIfSnapshotSupplierReturnsNonSandbox() {
// Given:
executionContext = mock(KsqlExecutionContext.class);
givenRequestValidator(ImmutableMap.of());
// When:
final Exception e = assertThrows(
IllegalArgumentException.class,
() -> validator.validate(serviceContext, ImmutableList.of(), sessionProperties, "sql")
);
// Then:
assertThat(e.getMessage(), containsString(
"Expected sandbox"));
}
|
@Override
public boolean supports(Job job) {
JobDetails jobDetails = job.getJobDetails();
return !jobDetails.hasStaticFieldName() && Modifier.isStatic(getJobMethod(jobDetails).getModifiers());
}
|
@Test
void supportsJobIfJobClassHasPrivateConstructorButStaticJobMethod() {
Job job = anEnqueuedJob()
.withJobDetails(() -> TestServiceForIoC.doWorkInStaticMethod(UUID.randomUUID()))
.build();
assertThat(backgroundStaticJobWithoutIocRunner.supports(job)).isTrue();
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.