focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public Num calculate(BarSeries series, Position position) {
if (position == null || position.getEntry() == null || position.getExit() == null) {
return series.zero();
}
Returns returns = new Returns(series, position, Returns.ReturnType.LOG);
return calculateES(returns, confidence);
}
|
@Test
public void calculateWithNoBarsShouldReturn0() {
series = new MockBarSeries(numFunction, 100d, 95d, 100d, 80d, 85d, 70d);
AnalysisCriterion varCriterion = getCriterion();
assertNumEquals(numOf(0), varCriterion.calculate(series, new BaseTradingRecord()));
}
|
public static CompletableFuture<Map<String, String>> labelFailure(
final Throwable cause,
final Context context,
final Executor mainThreadExecutor,
final Collection<FailureEnricher> failureEnrichers) {
// list of CompletableFutures to enrich failure with labels from each enricher
final Collection<CompletableFuture<Map<String, String>>> enrichFutures = new ArrayList<>();
for (final FailureEnricher enricher : failureEnrichers) {
enrichFutures.add(
enricher.processFailure(cause, context)
.thenApply(
enricherLabels -> {
final Map<String, String> validLabels = new HashMap<>();
enricherLabels.forEach(
(k, v) -> {
if (!enricher.getOutputKeys().contains(k)) {
LOG.warn(
"Ignoring label with key {} from enricher {}"
+ " violating contract, keys allowed {}.",
k,
enricher.getClass(),
enricher.getOutputKeys());
} else {
validLabels.put(k, v);
}
});
return validLabels;
})
.exceptionally(
t -> {
LOG.warn(
"Enricher {} threw an exception.",
enricher.getClass(),
t);
return Collections.emptyMap();
}));
}
// combine all CompletableFutures into a single CompletableFuture containing a Map of labels
return FutureUtils.combineAll(enrichFutures)
.thenApplyAsync(
labelsToMerge -> {
final Map<String, String> mergedLabels = new HashMap<>();
for (Map<String, String> labels : labelsToMerge) {
labels.forEach(
(k, v) ->
// merge label with existing, throwing an exception
// if there is a key conflict
mergedLabels.merge(
k,
v,
(first, second) -> {
throw new FlinkRuntimeException(
String.format(
MERGE_EXCEPTION_MSG,
k));
}));
}
return mergedLabels;
},
mainThreadExecutor);
}
|
@Test
public void testLabelFailureWithValidAndThrowingEnricher() {
// A failing enricher shouldn't affect remaining enrichers with valid labels
final Throwable cause = new RuntimeException("test exception");
final FailureEnricher validEnricher = new TestEnricher("enricherKey");
final FailureEnricher throwingEnricher = new ThrowingEnricher("throwingKey");
final Set<FailureEnricher> enrichers =
new HashSet<FailureEnricher>() {
{
add(validEnricher);
add(throwingEnricher);
}
};
final CompletableFuture<Map<String, String>> result =
FailureEnricherUtils.labelFailure(
cause,
null,
ComponentMainThreadExecutorServiceAdapter.forMainThread(),
enrichers);
assertThatFuture(result)
.eventuallySucceeds()
.satisfies(
labels -> {
assertThat(labels).hasSize(1);
assertThat(labels).containsKey("enricherKey");
assertThat(labels).containsValue("enricherKeyValue");
});
}
|
@Nonnull
public static <K, V> Sink<ChangeRecord> map(
@Nonnull String mapName,
@Nonnull FunctionEx<? super ChangeRecord, ? extends K> keyFn,
@Nonnull FunctionEx<? super ChangeRecord, ? extends V> valueFn
) {
String name = "mapCdcSink(" + mapName + ')';
return sink(name, mapName, null, keyFn, valueFn);
}
|
@Test
public void deleteFromLocalMap_ViaValueProjection() {
p.readFrom(items(SYNC1, INSERT2))
.writeTo(localSync());
execute().join();
p = Pipeline.create();
p.readFrom(items(UPDATE1))
.writeTo(CdcSinks.map(MAP,
r -> (Integer) r.key().toMap().get(ID),
r -> null
));
execute().join();
assertMap(hz(), null, "[email protected]");
hz().getMap(MAP).destroy();
}
|
public static int read(final AtomicBuffer buffer, final ErrorConsumer consumer)
{
return read(buffer, consumer, 0);
}
|
@Test
void readShouldNotReadIfRemainingSpaceIsLessThanOneErrorPrefix()
{
final UnsafeBuffer buffer = new UnsafeBuffer(new byte[64]);
final long lastTimestamp = 543495734L;
final long firstTimestamp = lastTimestamp - 1000;
final int count = 123;
final int totalLength = 45;
buffer.putInt(LENGTH_OFFSET, totalLength);
buffer.putLong(LAST_OBSERVATION_TIMESTAMP_OFFSET, lastTimestamp);
buffer.putLong(FIRST_OBSERVATION_TIMESTAMP_OFFSET, firstTimestamp);
buffer.putInt(OBSERVATION_COUNT_OFFSET, count);
buffer.putStringWithoutLengthAscii(ENCODED_ERROR_OFFSET, "abcdefghijklmnopqrstuvwxyz");
buffer.putInt(totalLength + LENGTH_OFFSET, 12);
final ErrorConsumer errorConsumer = mock(ErrorConsumer.class);
assertEquals(1, ErrorLogReader.read(buffer, errorConsumer, 0));
verify(errorConsumer).accept(count, firstTimestamp, lastTimestamp, "abcdefghijklmnopqrstu");
}
|
public void markStale() {
stale = true;
}
|
@Test
void stale() {
final Account account = AccountsHelper.generateTestAccount("+14151234567", UUID.randomUUID(), UUID.randomUUID(), Collections.emptyList(),
new byte[0]);
assertDoesNotThrow(account::getNumber);
account.markStale();
assertThrows(AssertionError.class, account::getNumber);
assertDoesNotThrow(account::getUuid);
}
|
@Override
protected TableSchema transformTableSchema() {
tryOpen();
List<String> inputColumnsMapping = new ArrayList<>();
SeaTunnelRowType outRowType = sqlEngine.typeMapping(inputColumnsMapping);
List<String> outputColumns = Arrays.asList(outRowType.getFieldNames());
TableSchema.Builder builder = TableSchema.builder();
if (inputCatalogTable.getTableSchema().getPrimaryKey() != null
&& outputColumns.containsAll(
inputCatalogTable.getTableSchema().getPrimaryKey().getColumnNames())) {
builder.primaryKey(inputCatalogTable.getTableSchema().getPrimaryKey().copy());
}
List<ConstraintKey> outputConstraintKeys =
inputCatalogTable.getTableSchema().getConstraintKeys().stream()
.filter(
key -> {
List<String> constraintColumnNames =
key.getColumnNames().stream()
.map(
ConstraintKey.ConstraintKeyColumn
::getColumnName)
.collect(Collectors.toList());
return outputColumns.containsAll(constraintColumnNames);
})
.map(ConstraintKey::copy)
.collect(Collectors.toList());
builder.constraintKey(outputConstraintKeys);
String[] fieldNames = outRowType.getFieldNames();
SeaTunnelDataType<?>[] fieldTypes = outRowType.getFieldTypes();
List<Column> columns = new ArrayList<>(fieldNames.length);
for (int i = 0; i < fieldNames.length; i++) {
Column simpleColumn = null;
String inputColumnName = inputColumnsMapping.get(i);
if (inputColumnName != null) {
for (Column inputColumn : inputCatalogTable.getTableSchema().getColumns()) {
if (inputColumnName.equals(inputColumn.getName())) {
simpleColumn = inputColumn;
break;
}
}
}
Column column;
if (simpleColumn != null) {
column =
new PhysicalColumn(
fieldNames[i],
fieldTypes[i],
simpleColumn.getColumnLength(),
simpleColumn.getScale(),
simpleColumn.isNullable(),
simpleColumn.getDefaultValue(),
simpleColumn.getComment(),
simpleColumn.getSourceType(),
simpleColumn.getOptions());
} else {
column = PhysicalColumn.of(fieldNames[i], fieldTypes[i], 0, true, null, null);
}
columns.add(column);
}
return builder.columns(columns).build();
}
|
@Test
public void testNotLoseSourceTypeAndOptions() {
SQLTransform sqlTransform = new SQLTransform(READONLY_CONFIG, getCatalogTable());
TableSchema tableSchema = sqlTransform.transformTableSchema();
tableSchema
.getColumns()
.forEach(
column -> {
if (!column.getName().equals(GENERATE_PARTITION_KEY)) {
Assertions.assertEquals(
"source_" + column.getDataType(), column.getSourceType());
Assertions.assertEquals(
"testInSQL", column.getOptions().get("context"));
}
});
}
|
public static Map<String, Object> getAnnotationValues(Annotation annotation) throws NoSuchFieldException {
InvocationHandler h = Proxy.getInvocationHandler(annotation);
return getFieldValue(h, "memberValues");
}
|
@Test
@EnabledOnJre({JRE.JAVA_8, JRE.JAVA_11}) // `ReflectionUtil.modifyStaticFinalField` does not supported java17 and above versions
public void testGetAnnotationValues() throws NoSuchMethodException, NoSuchFieldException {
Assertions.assertEquals(new LinkedHashMap<>(), ReflectionUtil
.getAnnotationValues(this.getClass().getMethod("testGetAnnotationValues").getAnnotation(Test.class)));
}
|
public void isEqualTo(@Nullable Object expected) {
standardIsEqualTo(expected);
}
|
@Test
public void isEqualToStringWithNullVsNull() {
expectFailure.whenTesting().that("null").isEqualTo(null);
assertFailureKeys("expected", "an instance of", "but was", "an instance of");
assertFailureValue("expected", "null");
assertFailureValueIndexed("an instance of", 0, "(null reference)");
assertFailureValue("but was", "(non-equal value with same string representation)");
assertFailureValueIndexed("an instance of", 1, "java.lang.String");
}
|
public static boolean backgroundRemoval(String inputPath, String outputPath, int tolerance) {
return BackgroundRemoval.backgroundRemoval(inputPath, outputPath, tolerance);
}
|
@Test
@Disabled
public void backgroundRemovalTest() {
// εΎη θζ― ζ’ζ ιζη
ImgUtil.backgroundRemoval(
"d:/test/617180969474805871.jpg",
"d:/test/2.jpg", 10);
// εΎη θζ― ζ’ζ ηΊ’θ²η
ImgUtil.backgroundRemoval(new File(
"d:/test/617180969474805871.jpg"),
new File("d:/test/3.jpg"),
new Color(200, 0, 0), 10);
}
|
public synchronized void register(String id, MapInterceptor interceptor) {
assert !(Thread.currentThread() instanceof PartitionOperationThread);
if (id2InterceptorMap.containsKey(id)) {
return;
}
Map<String, MapInterceptor> tmpMap = new HashMap<>(id2InterceptorMap);
tmpMap.put(id, interceptor);
id2InterceptorMap = unmodifiableMap(tmpMap);
List<MapInterceptor> tmpInterceptors = new ArrayList<>(interceptors);
tmpInterceptors.add(interceptor);
interceptors = unmodifiableList(tmpInterceptors);
}
|
@Test
public void testRegister() {
registry.register(interceptor.id, interceptor);
assertInterceptorRegistryContainsInterceptor();
}
|
@SuppressWarnings("squid:S1181")
// Yes we really do want to catch Throwable
@Override
public V apply(U input) {
int retryAttempts = 0;
while (true) {
try {
return baseFunction.apply(input);
} catch (Throwable t) {
if (!exceptionClass.isAssignableFrom(t.getClass()) || retryAttempts == maxRetries) {
Throwables.throwIfUnchecked(t);
throw new RetriesExceededException(t);
}
Tools.randomDelay(maxDelayBetweenRetries);
retryAttempts++;
}
}
}
|
@Test
public void testSuccessAfterOneRetry() {
new RetryingFunction<>(this::succeedAfterOneFailure, RetryableException.class, 1, 10).apply(null);
}
|
public static double cor(int[] x, int[] y) {
if (x.length != y.length) {
throw new IllegalArgumentException("Arrays have different length.");
}
if (x.length < 3) {
throw new IllegalArgumentException("array length has to be at least 3.");
}
double Sxy = cov(x, y);
double Sxx = var(x);
double Syy = var(y);
if (Sxx == 0 || Syy == 0) {
return Double.NaN;
}
return Sxy / sqrt(Sxx * Syy);
}
|
@Test
public void testCor_doubleArr_doubleArr() {
System.out.println("cor");
double[] x = {-2.1968219, -0.9559913, -0.0431738, 1.0567679, 0.3853515};
double[] y = {-1.7781325, -0.6659839, 0.9526148, -0.9460919, -0.3925300};
assertEquals(0.4686847, MathEx.cor(x, y), 1E-7);
}
|
static String resolveCluster(AwsConfig awsConfig, AwsMetadataApi metadataApi, Environment environment) {
if (!isNullOrEmptyAfterTrim(awsConfig.getCluster())) {
return awsConfig.getCluster();
}
if (environment.isRunningOnEcs()) {
String cluster = metadataApi.clusterEcs();
LOGGER.info("No ECS cluster defined, using current cluster: " + cluster);
return cluster;
}
throw new InvalidConfigurationException("You must define 'cluster' property if not running inside ECS cluster");
}
|
@Test
public void resolveClusterAwsEcsMetadata() {
// given
String cluster = "service-name";
AwsConfig config = AwsConfig.builder().build();
AwsMetadataApi metadataApi = mock(AwsMetadataApi.class);
given(metadataApi.clusterEcs()).willReturn(cluster);
Environment environment = mock(Environment.class);
given(environment.isRunningOnEcs()).willReturn(true);
// when
String result = AwsClientConfigurator.resolveCluster(config, metadataApi, environment);
// then
assertEquals(cluster, result);
}
|
public byte[] readAll() throws IOException {
if (pos == 0 && count == buf.length) {
pos = count;
return buf;
}
byte[] ret = new byte[count - pos];
super.read(ret);
return ret;
}
|
@Test
public void testReadAllAfterReadPartial() throws IOException {
assertNotEquals(-1, exposedStream.read());
byte[] ret = exposedStream.readAll();
assertArrayEquals("ello World!".getBytes(StandardCharsets.UTF_8), ret);
}
|
public String getServerStatus() {
try {
NamingService namingService = nacosServiceManager.getNamingService();
return namingService.getServerStatus();
} catch (NacosException e) {
LOGGER.log(Level.SEVERE, "get nacos server status failed", e);
}
return STATUS_UNKNOW;
}
|
@Test
public void testGetServerStatus() throws NacosException {
mockNamingService();
Assert.assertNull(nacosClient.getServerStatus());
}
|
public void setWriteTimeout(int writeTimeout) {
this.writeTimeout = writeTimeout;
}
|
@Test
public void testRetryableError() throws IOException {
MockLowLevelHttpResponse[] mockResponses =
createMockResponseWithStatusCode(
503, // Retryable
429, // We also retry on 429 Too Many Requests.
200);
when(mockLowLevelRequest.execute())
.thenReturn(mockResponses[0], mockResponses[1], mockResponses[2]);
Storage.Buckets.Get result = storage.buckets().get("test");
HttpResponse response = result.executeUnparsed();
assertNotNull(response);
verify(mockHttpResponseInterceptor).interceptResponse(any(HttpResponse.class));
verify(mockLowLevelRequest, atLeastOnce()).addHeader(anyString(), anyString());
verify(mockLowLevelRequest, times(3)).setTimeout(anyInt(), anyInt());
verify(mockLowLevelRequest, times(3)).setWriteTimeout(anyInt());
verify(mockLowLevelRequest, times(3)).execute();
// It reads the status code of all responses
for (MockLowLevelHttpResponse mockResponse : mockResponses) {
verify(mockResponse, atLeastOnce()).getStatusCode();
}
expectedLogs.verifyDebug("Request failed with code 503");
}
|
@Override
public void warn(String msg) {
logger.warn(msg);
}
|
@Test
public void testWarnWithException() {
Log mockLog = mock(Log.class);
InternalLogger logger = new CommonsLogger(mockLog, "foo");
logger.warn("a", e);
verify(mockLog).warn("a", e);
}
|
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
|
@Test
public void setEnabled() {
properties.setEnabled(false);
assertThat(properties.isEnabled()).isEqualTo(false);
}
|
public static int scan(final UnsafeBuffer termBuffer, final int termOffset, final int limitOffset)
{
int offset = termOffset;
while (offset < limitOffset)
{
final int frameLength = frameLengthVolatile(termBuffer, offset);
if (frameLength <= 0)
{
break;
}
final int alignedFrameLength = align(frameLength, FRAME_ALIGNMENT);
if (isPaddingFrame(termBuffer, offset))
{
if (termOffset == offset)
{
offset += alignedFrameLength;
}
break;
}
if (offset + alignedFrameLength > limitOffset)
{
break;
}
offset += alignedFrameLength;
}
return offset;
}
|
@Test
void shouldReadBlockOfTwoMessages()
{
final int offset = 0;
final int limit = termBuffer.capacity();
final int messageLength = 50;
final int alignedMessageLength = BitUtil.align(messageLength, FRAME_ALIGNMENT);
when(termBuffer.getIntVolatile(lengthOffset(offset))).thenReturn(messageLength);
when(termBuffer.getShort(typeOffset(offset))).thenReturn((short)HDR_TYPE_DATA);
when(termBuffer.getIntVolatile(lengthOffset(alignedMessageLength))).thenReturn(messageLength);
when(termBuffer.getShort(typeOffset(alignedMessageLength))).thenReturn((short)HDR_TYPE_DATA);
final int newOffset = TermBlockScanner.scan(termBuffer, offset, limit);
assertEquals(alignedMessageLength * 2, newOffset);
}
|
public void isNotNull() {
standardIsNotEqualTo(null);
}
|
@Test
public void isNotNullBadEqualsImplementation() {
assertThat(new ThrowsOnEqualsNull()).isNotNull();
}
|
public static boolean isRetryOrDlqTopic(String topic) {
if (StringUtils.isBlank(topic)) {
return false;
}
return topic.startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX) || topic.startsWith(MixAll.DLQ_GROUP_TOPIC_PREFIX);
}
|
@Test
public void testIsRetryOrDlqTopicWithNonRetryOrDlqTopic() {
String topic = "NormalTopic";
boolean result = BrokerMetricsManager.isRetryOrDlqTopic(topic);
assertThat(result).isFalse();
}
|
public static boolean hasModifier(Class<?> clazz, ModifierType... modifierTypes) {
if (null == clazz || ArrayUtil.isEmpty(modifierTypes)) {
return false;
}
return 0 != (clazz.getModifiers() & modifiersToInt(modifierTypes));
}
|
@Test
public void hasModifierTest() throws NoSuchMethodException {
Method method = ModifierUtilTest.class.getDeclaredMethod("ddd");
assertTrue(ModifierUtil.hasModifier(method, ModifierUtil.ModifierType.PRIVATE));
assertTrue(ModifierUtil.hasModifier(method,
ModifierUtil.ModifierType.PRIVATE,
ModifierUtil.ModifierType.STATIC)
);
}
|
public static Builder newBuilder(List<ReservedPort> reservedPorts) {
return new Builder(reservedPorts);
}
|
@Test
public void zookeeper() throws Exception {
mCluster = MultiProcessCluster.newBuilder(PortCoordination.MULTI_PROCESS_ZOOKEEPER)
.setClusterName("zookeeper")
.setNumMasters(3)
.setNumWorkers(2)
.addProperty(PropertyKey.MASTER_JOURNAL_TYPE, JournalType.UFS)
.build();
clusterVerification();
}
|
@Override
public List<KubernetesPod> getPodsWithLabels(Map<String, String> labels) {
final List<Pod> podList =
this.internalClient
.pods()
.withLabels(labels)
.list(
new ListOptionsBuilder()
.withResourceVersion(KUBERNETES_ZERO_RESOURCE_VERSION)
.build())
.getItems();
if (podList == null || podList.isEmpty()) {
return new ArrayList<>();
}
return podList.stream().map(KubernetesPod::new).collect(Collectors.toList());
}
|
@Test
void testGetPodsWithLabels() {
final String podName = "pod-with-labels";
final Pod pod =
new PodBuilder()
.editOrNewMetadata()
.withName(podName)
.withLabels(TESTING_LABELS)
.endMetadata()
.editOrNewSpec()
.endSpec()
.build();
this.kubeClient.pods().inNamespace(NAMESPACE).create(pod);
List<KubernetesPod> kubernetesPods = this.flinkKubeClient.getPodsWithLabels(TESTING_LABELS);
assertThat(kubernetesPods)
.satisfiesExactly(
kubernetesPod -> assertThat(kubernetesPod.getName()).isEqualTo(podName));
}
|
public static List<String> computeLangFromLocale(Locale locale) {
final List<String> resourceNames = new ArrayList<>(5);
if (StringUtils.isBlank(locale.getLanguage())) {
throw new IllegalArgumentException(
"Locale \"" + locale + "\" "
+ "cannot be used as it does not specify a language.");
}
resourceNames.add("default");
resourceNames.add(locale.getLanguage());
if (StringUtils.isNotBlank(locale.getCountry())) {
resourceNames.add(locale.getLanguage() + "_" + locale.getCountry());
}
if (StringUtils.isNotBlank(locale.getVariant())) {
resourceNames.add(
locale.getLanguage() + "_" + locale.getCountry() + "-" + locale.getVariant());
}
return resourceNames;
}
|
@Test
void computeLangFromLocale() {
List<String> languages = LanguageUtils.computeLangFromLocale(Locale.CHINA);
assertThat(languages).isEqualTo(List.of("default", "zh", "zh_CN"));
languages = LanguageUtils.computeLangFromLocale(Locale.CHINESE);
assertThat(languages).isEqualTo(List.of("default", "zh"));
languages = LanguageUtils.computeLangFromLocale(Locale.TAIWAN);
assertThat(languages).isEqualTo(List.of("default", "zh", "zh_TW"));
languages = LanguageUtils.computeLangFromLocale(Locale.ENGLISH);
assertThat(languages).isEqualTo(List.of("default", "en"));
languages = LanguageUtils.computeLangFromLocale(Locale.US);
assertThat(languages).isEqualTo(List.of("default", "en", "en_US"));
languages =
LanguageUtils.computeLangFromLocale(Locale.forLanguageTag("en-US-x-lvariant-POSIX"));
assertThat(languages).isEqualTo(List.of("default", "en", "en_US", "en_US-POSIX"));
}
|
public Quantity<U> add(Quantity<U> second) {
if(unit == second.unit)
return new Quantity<U>(value + second.value, unit);
else {
final double sum = value + second.in(unit).value;
return new Quantity<U>(sum, unit);
}
}
|
@Test
public void addQuantitiesGivenAsPrimitives() throws Exception {
Quantity<Metrics> first = new Quantity<Metrics>(100, Metrics.cm);
assertThat(first.add(2, Metrics.m)).isEqualTo(new Quantity<Metrics>(300, Metrics.cm));
}
|
@Override
public String format(Object value) {
return value == null ? EMPTY : nonNullFormat(value);
}
|
@Test
public void nonNullInput() {
assertEquals("what?", MOCK_OUTPUT, frm.format(1));
}
|
static MinMax findMinMax(MinMax minMax, List<Statement> statements, EncodedValueLookup lookup) {
List<List<Statement>> groups = CustomModelParser.splitIntoGroup(statements);
for (List<Statement> group : groups) findMinMaxForGroup(minMax, group, lookup);
return minMax;
}
|
@Test
public void testBlock() {
List<Statement> statements = Arrays.asList(
If("road_class == TERTIARY",
List.of(If("max_speed > 100", LIMIT, "100"),
Else(LIMIT, "30"))),
ElseIf("road_class == SECONDARY", LIMIT, "25"),
Else(MULTIPLY, "0.8")
);
assertEquals(100, findMinMax(new MinMax(0, 120), statements, lookup).max);
statements = Arrays.asList(
If("road_class == TERTIARY",
List.of(If("max_speed > 100", LIMIT, "90"),
Else(LIMIT, "30"))),
ElseIf("road_class == SECONDARY", LIMIT, "25"),
Else(MULTIPLY, "0.8")
);
assertEquals(96, findMinMax(new MinMax(0, 120), statements, lookup).max);
}
|
@Override
public boolean isSatisfied(int index, TradingRecord tradingRecord) {
final boolean satisfied = first.getValue(index).isEqual(second.getValue(index));
traceIsSatisfied(index, satisfied);
return satisfied;
}
|
@Test
public void isSatisfied() {
assertTrue(rule.isSatisfied(0));
assertFalse(rule.isSatisfied(1));
assertFalse(rule.isSatisfied(2));
assertFalse(rule.isSatisfied(3));
}
|
public static boolean isNotEmpty(String value) {
return !isEmpty(value);
}
|
@Test
void testIsNotEmpty() throws Exception {
assertThat(ConfigUtils.isNotEmpty("abc"), is(true));
}
|
public static void register(Observer observer) {
register(SubjectType.SPRING_CONTENT_REFRESHED.name(), observer);
}
|
@Test
public void testDefaultRegister() {
AbstractSubjectCenter.register(subjectNotifyListener);
List<Observer> list = OBSERVERS_MAP.get(AbstractSubjectCenter.SubjectType.SPRING_CONTENT_REFRESHED.name());
Assert.assertNotNull(list);
Assert.assertEquals(1, list.size());
Assert.assertSame(subjectNotifyListener, list.get(0));
OBSERVERS_MAP.clear();
}
|
@Override
public SchemaVersion versionFromBytes(byte[] version) {
// The schema storage converts the schema from bytes to long
// so it handles both cases 1) version is 64 bytes long pre 2.4.0;
// 2) version is 8 bytes long post 2.4.0
//
// NOTE: if you are planning to change the logic here. you should consider
// both 64 bytes and 8 bytes cases.
ByteBuffer bb = ByteBuffer.wrap(version);
return new LongSchemaVersion(bb.getLong());
}
|
@Test
public void testVersionFromBytes() {
long version = System.currentTimeMillis();
ByteBuffer bbPre240 = ByteBuffer.allocate(Long.SIZE);
bbPre240.putLong(version);
byte[] versionBytesPre240 = bbPre240.array();
ByteBuffer bbPost240 = ByteBuffer.allocate(Long.BYTES);
bbPost240.putLong(version);
byte[] versionBytesPost240 = bbPost240.array();
PulsarService mockPulsarService = mock(PulsarService.class);
when(mockPulsarService.getLocalMetadataStore()).thenReturn(mock(MetadataStoreExtended.class));
BookkeeperSchemaStorage schemaStorage = new BookkeeperSchemaStorage(mockPulsarService);
assertEquals(new LongSchemaVersion(version), schemaStorage.versionFromBytes(versionBytesPre240));
assertEquals(new LongSchemaVersion(version), schemaStorage.versionFromBytes(versionBytesPost240));
}
|
@Nullable
public Buffer requestBuffer() {
return bufferManager.requestBuffer();
}
|
@Test
void testRequestBuffer() throws Exception {
BufferPool bufferPool = new TestBufferPool();
SingleInputGate inputGate = createSingleInputGate(bufferPool);
RemoteInputChannel remoteChannel1 = createRemoteInputChannel(inputGate, 0, 2);
RemoteInputChannel remoteChannel2 = createRemoteInputChannel(inputGate, 1, 0);
inputGate.setup();
remoteChannel1.requestSubpartitions();
remoteChannel2.requestSubpartitions();
remoteChannel1.onSenderBacklog(2);
remoteChannel2.onSenderBacklog(2);
for (int i = 4; i >= 0; --i) {
assertThat(remoteChannel1.getNumberOfRequiredBuffers()).isEqualTo(i);
remoteChannel1.requestBuffer();
}
for (int i = 2; i >= 0; --i) {
assertThat(remoteChannel2.getNumberOfRequiredBuffers()).isEqualTo(i);
remoteChannel2.requestBuffer();
}
}
|
public int doWork()
{
final long nowNs = nanoClock.nanoTime();
trackTime(nowNs);
int workCount = 0;
workCount += processTimers(nowNs);
if (!asyncClientCommandInFlight)
{
workCount += clientCommandAdapter.receive();
}
workCount += drainCommandQueue();
workCount += trackStreamPositions(workCount, nowNs);
workCount += nameResolver.doWork(cachedEpochClock.time());
workCount += freeEndOfLifeResources(ctx.resourceFreeLimit());
return workCount;
}
|
@Test
void shouldTimeoutSubscription()
{
driverProxy.addSubscription(CHANNEL_4000, STREAM_ID_1);
driverConductor.doWork();
final ArgumentCaptor<ReceiveChannelEndpoint> captor = ArgumentCaptor.forClass(ReceiveChannelEndpoint.class);
verify(receiverProxy).registerReceiveChannelEndpoint(captor.capture());
receiveChannelEndpoint = captor.getValue();
verify(receiverProxy).addSubscription(eq(receiveChannelEndpoint), eq(STREAM_ID_1));
doWorkUntil(() -> nanoClock.nanoTime() >= CLIENT_LIVENESS_TIMEOUT_NS * 2);
verify(receiverProxy, times(1))
.removeSubscription(eq(receiveChannelEndpoint), eq(STREAM_ID_1));
verify(receiverProxy).closeReceiveChannelEndpoint(receiveChannelEndpoint);
}
|
public void setValue(PDSignature value) throws IOException
{
getCOSObject().setItem(COSName.V, value);
applyChange();
}
|
@Test
void setValueForAbstractedSignatureField()
{
PDSignatureField sigField = new PDSignatureField(acroForm);
sigField.setPartialName("SignatureField");
assertThrows(UnsupportedOperationException.class, () -> {
sigField.setValue("Can't set value using String");
});
}
|
@VisibleForTesting
@Override
public boolean allocateSlot(
int index, JobID jobId, AllocationID allocationId, Duration slotTimeout) {
return allocateSlot(index, jobId, allocationId, defaultSlotResourceProfile, slotTimeout);
}
|
@Test
void testAllocatedSlotTimeout() throws Exception {
final CompletableFuture<AllocationID> timeoutFuture = new CompletableFuture<>();
final TestingSlotActions testingSlotActions =
new TestingSlotActionsBuilder()
.setTimeoutSlotConsumer(
(allocationID, uuid) -> timeoutFuture.complete(allocationID))
.build();
try (final TaskSlotTableImpl<TaskSlotPayload> taskSlotTable =
createTaskSlotTableAndStart(1, testingSlotActions)) {
final AllocationID allocationId = new AllocationID();
assertThat(
taskSlotTable.allocateSlot(
0, new JobID(), allocationId, Duration.ofMillis(1L)))
.isTrue();
assertThatFuture(timeoutFuture).eventuallySucceeds().isEqualTo(allocationId);
}
}
|
public static void assignFieldParams(Object bean, Map<String, Param> params)
throws TikaConfigException {
Class<?> beanClass = bean.getClass();
if (!PARAM_INFO.containsKey(beanClass)) {
synchronized (TikaConfig.class) {
if (!PARAM_INFO.containsKey(beanClass)) {
List<AccessibleObject> aObjs =
collectInfo(beanClass, org.apache.tika.config.Field.class);
List<ParamField> fields = new ArrayList<>(aObjs.size());
for (AccessibleObject aObj : aObjs) {
fields.add(new ParamField(aObj));
}
PARAM_INFO.put(beanClass, fields);
}
}
}
List<ParamField> fields = PARAM_INFO.get(beanClass);
for (ParamField field : fields) {
Param<?> param = params.get(field.getName());
if (param != null) {
if (field.getType().isAssignableFrom(param.getType())) {
try {
field.assignValue(bean, param.getValue());
} catch (InvocationTargetException e) {
LOG.error("Error assigning value '{}' to '{}'", param.getValue(), param.getName());
final Throwable cause = e.getCause() == null ? e : e.getCause();
throw new TikaConfigException(cause.getMessage(), cause);
} catch (IllegalAccessException e) {
LOG.error("Error assigning value '{}' to '{}'", param.getValue(), param.getName());
throw new TikaConfigException(e.getMessage(), e);
}
} else {
String msg = String.format(Locale.ROOT,
"Value '%s' of type '%s' can't be" +
" assigned to field '%s' of defined type '%s'",
param.getValue(),
param.getValue().getClass(), field.getName(), field.getType());
throw new TikaConfigException(msg);
}
} else if (field.isRequired()) {
//param not supplied but field is declared as required?
String msg = String.format(Locale.ROOT,
"Param %s is required for %s," + " but it is not given in config.",
field.getName(), bean.getClass().getName());
throw new TikaConfigException(msg);
} else {
LOG.debug("Param not supplied, field is not mandatory");
}
}
}
|
@Test
public void testMisMatchType() {
class MyParser extends Configurable {
@Field(required = true)
int config;
}
Map<String, Param> params = new HashMap<>();
try {
params.put("config", new Param<>("config", 1));
MyParser bean = new MyParser();
AnnotationUtils.assignFieldParams(bean, params);
assertEquals(bean.config, 1);
} catch (TikaConfigException e) {
e.printStackTrace();
fail("Exception Not expected");
}
params.clear();
try {
params.put("config", new Param<>("config", "a string value"));
AnnotationUtils.assignFieldParams(new MyParser(), params);
fail("Exception expected");
} catch (TikaConfigException e) {
//expected
}
}
|
@Override
public int read(ByteBuffer dst) throws IOException {
checkNotNull(dst);
checkOpen();
checkReadable();
int read = 0; // will definitely either be assigned or an exception will be thrown
synchronized (this) {
boolean completed = false;
try {
if (!beginBlocking()) {
return 0; // AsynchronousCloseException will be thrown
}
file.readLock().lockInterruptibly();
try {
read = file.read(position, dst);
if (read != -1) {
position += read;
}
file.setLastAccessTime(fileSystemState.now());
completed = true;
} finally {
file.readLock().unlock();
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} finally {
endBlocking(completed);
}
}
return read;
}
|
@Test
public void testReadNegative() throws IOException {
FileChannel channel = channel(regularFile(0), READ, WRITE);
try {
channel.read(buffer("111"), -1);
fail();
} catch (IllegalArgumentException expected) {
}
ByteBuffer[] bufs = {buffer("111"), buffer("111")};
try {
channel.read(bufs, -1, 10);
fail();
} catch (IndexOutOfBoundsException expected) {
}
try {
channel.read(bufs, 0, -1);
fail();
} catch (IndexOutOfBoundsException expected) {
}
}
|
@Override
public Optional<NativeEntity<DataAdapterDto>> findExisting(Entity entity, Map<String, ValueReference> parameters) {
if (entity instanceof EntityV1) {
return findExisting((EntityV1) entity, parameters);
} else {
throw new IllegalArgumentException("Unsupported entity version: " + entity.getClass());
}
}
|
@Test
@MongoDBFixtures("LookupDataAdapterFacadeTest.json")
public void findExisting() {
final Entity entity = EntityV1.builder()
.id(ModelId.of("1"))
.type(ModelTypes.LOOKUP_ADAPTER_V1)
.data(objectMapper.convertValue(LookupDataAdapterEntity.create(
ValueReference.of(DefaultEntityScope.NAME),
ValueReference.of("http-dsv"),
ValueReference.of("HTTP DSV"),
ValueReference.of("HTTP DSV"),
ReferenceMapUtils.toReferenceMap(Collections.emptyMap())
), JsonNode.class))
.build();
final NativeEntity<DataAdapterDto> nativeEntity = facade.findExisting(entity, Collections.emptyMap()).orElseThrow(AssertionError::new);
assertThat(nativeEntity.descriptor().id()).isEqualTo(ModelId.of("5adf24a04b900a0fdb4e52c8"));
assertThat(nativeEntity.descriptor().type()).isEqualTo(ModelTypes.LOOKUP_ADAPTER_V1);
assertThat(nativeEntity.entity().name()).isEqualTo("http-dsv");
assertThat(nativeEntity.entity().title()).isEqualTo("HTTP DSV");
assertThat(nativeEntity.entity().description()).isEqualTo("HTTP DSV");
}
|
@Override
public String evaluate(EvaluationContext evaluationContext, String... args) {
if (args != null && args.length == 1) {
int maxLength = DEFAULT_LENGTH;
try {
maxLength = Integer.parseInt(args[0]);
} catch (NumberFormatException nfe) {
// Ignore, we'll stick to the default.
}
return generateString(getRandom(), maxLength);
}
return generateString(getRandom(), DEFAULT_LENGTH);
}
|
@Test
void testCustomSizeEvaluation() {
// Compute evaluation.
RandomStringELFunction function = new RandomStringELFunction();
String result = function.evaluate(null, "64");
assertEquals(64, result.length());
}
|
public void watch(final String key, final Consumer<Object> consumer) {
watchUpstreamListener.put(key, consumer);
}
|
@Test
public void testWatch() {
this.applicationConfigCache.watch(selector.getName(), Assertions::assertNotNull);
}
|
@Override protected void customAnalyze( XMLOutputMeta meta, IMetaverseNode node ) throws MetaverseAnalyzerException {
super.customAnalyze( meta, node );
node.setProperty( "parentnode", meta.getMainElement() );
node.setProperty( "rownode", meta.getRepeatElement() );
}
|
@Test
public void testCustomAnalyze() throws Exception {
when( meta.getMainElement() ).thenReturn( "main" );
when( meta.getRepeatElement() ).thenReturn( "repeat" );
analyzer.customAnalyze( meta, node );
verify( node ).setProperty( "parentnode", "main" );
verify( node ).setProperty( "rownode", "repeat" );
}
|
@Override
public Result apply(ApplyNode applyNode, Captures captures, Context context)
{
if (applyNode.getSubqueryAssignments().size() != 1) {
return Result.empty();
}
RowExpression expression = getOnlyElement(applyNode.getSubqueryAssignments().getExpressions());
if (!(expression instanceof InSubqueryExpression)) {
return Result.empty();
}
InSubqueryExpression inPredicate = (InSubqueryExpression) expression;
VariableReferenceExpression semiJoinVariable = getOnlyElement(applyNode.getSubqueryAssignments().getVariables());
SemiJoinNode replacement = new SemiJoinNode(
applyNode.getSourceLocation(),
context.getIdAllocator().getNextId(),
applyNode.getInput(),
applyNode.getSubquery(),
inPredicate.getValue(),
inPredicate.getSubquery(),
semiJoinVariable,
Optional.empty(),
Optional.empty(),
Optional.empty(),
ImmutableMap.of());
return Result.ofPlanNode(replacement);
}
|
@Test
public void testFiresForInPredicate()
{
tester().assertThat(new TransformUncorrelatedInPredicateSubqueryToSemiJoin())
.on(p -> p.apply(
assignment(
p.variable("x"),
inSubquery(p.variable("y"), p.variable("z"))),
emptyList(),
p.values(p.variable("y")),
p.values(p.variable("z"))))
.matches(node(SemiJoinNode.class, values("y"), values("z")));
}
|
@SuppressWarnings("unchecked")
public <T> T convert(DocString docString, Type targetType) {
if (DocString.class.equals(targetType)) {
return (T) docString;
}
List<DocStringType> docStringTypes = docStringTypeRegistry.lookup(docString.getContentType(), targetType);
if (docStringTypes.isEmpty()) {
if (docString.getContentType() == null) {
throw new CucumberDocStringException(format(
"It appears you did not register docstring type for %s",
targetType.getTypeName()));
}
throw new CucumberDocStringException(format(
"It appears you did not register docstring type for '%s' or %s",
docString.getContentType(),
targetType.getTypeName()));
}
if (docStringTypes.size() > 1) {
List<String> suggestedContentTypes = suggestedContentTypes(docStringTypes);
if (docString.getContentType() == null) {
throw new CucumberDocStringException(format(
"Multiple converters found for type %s, add one of the following content types to your docstring %s",
targetType.getTypeName(),
suggestedContentTypes));
}
throw new CucumberDocStringException(format(
"Multiple converters found for type %s, and the content type '%s' did not match any of the registered types %s. Change the content type of the docstring or register a docstring type for '%s'",
targetType.getTypeName(),
docString.getContentType(),
suggestedContentTypes,
docString.getContentType()));
}
return (T) docStringTypes.get(0).transform(docString.getContent());
}
|
@Test
void throws_when_uses_doc_string_type_but_downcast_conversion() {
registry.defineDocStringType(jsonNodeForJson);
DocString docString = DocString.create("{\"hello\":\"world\"}", "json");
CucumberDocStringException exception = assertThrows(
CucumberDocStringException.class,
() -> converter.convert(docString, Object.class));
assertThat(exception.getMessage(), is("" +
"It appears you did not register docstring type for 'json' or java.lang.Object"));
}
|
private static void handleSetTabletEnablePersistentIndex(long backendId, Map<Long, TTablet> backendTablets) {
List<Pair<Long, Boolean>> tabletToEnablePersistentIndex = Lists.newArrayList();
TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex();
for (TTablet backendTablet : backendTablets.values()) {
for (TTabletInfo tabletInfo : backendTablet.tablet_infos) {
if (!tabletInfo.isSetEnable_persistent_index()) {
continue;
}
long tabletId = tabletInfo.getTablet_id();
boolean beEnablePersistentIndex = tabletInfo.enable_persistent_index;
TabletMeta tabletMeta = invertedIndex.getTabletMeta(tabletId);
long dbId = tabletMeta != null ? tabletMeta.getDbId() : TabletInvertedIndex.NOT_EXIST_VALUE;
long tableId = tabletMeta != null ? tabletMeta.getTableId() : TabletInvertedIndex.NOT_EXIST_VALUE;
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
continue;
}
OlapTable olapTable = (OlapTable) db.getTable(tableId);
if (olapTable == null) {
continue;
}
Locker locker = new Locker();
locker.lockTablesWithIntensiveDbLock(db, Lists.newArrayList(olapTable.getId()), LockType.READ);
try {
boolean feEnablePersistentIndex = olapTable.enablePersistentIndex();
if (beEnablePersistentIndex != feEnablePersistentIndex) {
tabletToEnablePersistentIndex.add(new Pair<>(tabletId, feEnablePersistentIndex));
}
} finally {
locker.unLockTablesWithIntensiveDbLock(db, Lists.newArrayList(olapTable.getId()), LockType.READ);
}
}
}
if (!tabletToEnablePersistentIndex.isEmpty()) {
LOG.info("find [{}] tablet(s) which need to be set with persistent index enabled",
tabletToEnablePersistentIndex.size());
AgentBatchTask batchTask = new AgentBatchTask();
TabletMetadataUpdateAgentTask task = TabletMetadataUpdateAgentTaskFactory
.createEnablePersistentIndexUpdateTask(backendId, tabletToEnablePersistentIndex);
batchTask.addTask(task);
if (FeConstants.runningUnitTest) {
AgentTaskExecutor.submit(batchTask);
}
}
}
|
@Test
public void testHandleSetTabletEnablePersistentIndex() {
Database db = GlobalStateMgr.getCurrentState().getDb("test");
long dbId = db.getId();
long backendId = 10001L;
List<Long> tabletIds = GlobalStateMgr.getCurrentState().getTabletInvertedIndex().getTabletIdsByBackendId(10001);
Assert.assertFalse(tabletIds.isEmpty());
Map<Long, TTablet> backendTablets = new HashMap<Long, TTablet>();
List<TTabletInfo> tabletInfos = Lists.newArrayList();
TTablet tablet = new TTablet(tabletInfos);
for (Long tabletId : tabletIds) {
TTabletInfo tabletInfo = new TTabletInfo();
tabletInfo.setTablet_id(tabletId);
tabletInfo.setSchema_hash(60000);
tabletInfo.setEnable_persistent_index(true);
tablet.tablet_infos.add(tabletInfo);
}
backendTablets.put(backendId, tablet);
ReportHandler handler = new ReportHandler();
handler.testHandleSetTabletEnablePersistentIndex(backendId, backendTablets);
}
|
public Integer doCall() throws Exception {
// Operator id must be set
if (ObjectHelper.isEmpty(operatorId)) {
printer().println("Operator id must be set");
return -1;
}
List<String> integrationSources
= Stream.concat(Arrays.stream(Optional.ofNullable(filePaths).orElseGet(() -> new String[] {})),
Arrays.stream(Optional.ofNullable(sources).orElseGet(() -> new String[] {}))).toList();
Integration integration = new Integration();
integration.setSpec(new IntegrationSpec());
integration.getMetadata()
.setName(getIntegrationName(integrationSources));
if (dependencies != null && dependencies.length > 0) {
List<String> deps = new ArrayList<>();
for (String dependency : dependencies) {
String normalized = normalizeDependency(dependency);
validateDependency(normalized, printer());
deps.add(normalized);
}
integration.getSpec().setDependencies(deps);
}
if (kit != null) {
IntegrationKit integrationKit = new IntegrationKit();
integrationKit.setName(kit);
integration.getSpec().setIntegrationKit(integrationKit);
}
if (traitProfile != null) {
TraitProfile p = TraitProfile.valueOf(traitProfile.toUpperCase(Locale.US));
integration.getSpec().setProfile(p.name().toLowerCase(Locale.US));
}
if (repositories != null && repositories.length > 0) {
integration.getSpec().setRepositories(List.of(repositories));
}
if (annotations != null && annotations.length > 0) {
integration.getMetadata().setAnnotations(Arrays.stream(annotations)
.filter(it -> it.contains("="))
.map(it -> it.split("="))
.filter(it -> it.length == 2)
.collect(Collectors.toMap(it -> it[0].trim(), it -> it[1].trim())));
}
if (integration.getMetadata().getAnnotations() == null) {
integration.getMetadata().setAnnotations(new HashMap<>());
}
// --operator-id={id} is a syntax sugar for '--annotation camel.apache.org/operator.id={id}'
integration.getMetadata().getAnnotations().put(CamelKCommand.OPERATOR_ID_LABEL, operatorId);
// --integration-profile={id} is a syntax sugar for '--annotation camel.apache.org/integration-profile.id={id}'
if (integrationProfile != null) {
if (integrationProfile.contains("/")) {
String[] namespacedName = integrationProfile.split("/", 2);
integration.getMetadata().getAnnotations().put(CamelKCommand.INTEGRATION_PROFILE_NAMESPACE_ANNOTATION,
namespacedName[0]);
integration.getMetadata().getAnnotations().put(CamelKCommand.INTEGRATION_PROFILE_ANNOTATION, namespacedName[1]);
} else {
integration.getMetadata().getAnnotations().put(CamelKCommand.INTEGRATION_PROFILE_ANNOTATION,
integrationProfile);
}
}
if (labels != null && labels.length > 0) {
integration.getMetadata().setLabels(Arrays.stream(labels)
.filter(it -> it.contains("="))
.map(it -> it.split("="))
.filter(it -> it.length == 2)
.collect(Collectors.toMap(it -> it[0].trim(), it -> it[1].trim())));
}
Traits traitsSpec = TraitHelper.parseTraits(traits);
if (image != null) {
TraitHelper.configureContainerImage(traitsSpec, image, null, null, null, null);
} else {
List<Source> resolvedSources = SourceHelper.resolveSources(integrationSources, compression);
List<Flows> flows = new ArrayList<>();
List<Sources> sources = new ArrayList<>();
for (Source source : resolvedSources) {
if (useFlows && source.isYaml() && !source.compressed()) {
JsonNode json = KubernetesHelper.json().convertValue(
KubernetesHelper.yaml().load(source.content()), JsonNode.class);
if (json.isArray()) {
for (JsonNode item : json) {
Flows flowSpec = new Flows();
flowSpec.setAdditionalProperties(KubernetesHelper.json().readerFor(Map.class).readValue(item));
flows.add(flowSpec);
}
} else {
Flows flowSpec = new Flows();
flowSpec.setAdditionalProperties(KubernetesHelper.json().readerFor(Map.class).readValue(json));
flows.add(flowSpec);
}
} else {
Sources sourceSpec = new Sources();
sourceSpec.setName(source.name());
sourceSpec.setLanguage(source.language());
sourceSpec.setContent(source.content());
sourceSpec.setCompression(source.compressed());
sources.add(sourceSpec);
}
}
if (!flows.isEmpty()) {
integration.getSpec().setFlows(flows);
}
if (!sources.isEmpty()) {
integration.getSpec().setSources(sources);
}
}
if (podTemplate != null) {
Source templateSource = SourceHelper.resolveSource(podTemplate);
if (!templateSource.isYaml()) {
throw new RuntimeCamelException(
("Unsupported pod template %s - " +
"please use proper YAML source").formatted(templateSource.extension()));
}
Spec podSpec = KubernetesHelper.yaml().loadAs(templateSource.content(), Spec.class);
Template template = new Template();
template.setSpec(podSpec);
integration.getSpec().setTemplate(template);
}
convertOptionsToTraits(traitsSpec);
integration.getSpec().setTraits(traitsSpec);
if (serviceAccount != null) {
integration.getSpec().setServiceAccountName(serviceAccount);
}
if (output != null) {
switch (output) {
case "k8s" -> {
List<Source> sources = SourceHelper.resolveSources(integrationSources);
TraitContext context
= new TraitContext(integration.getMetadata().getName(), "1.0-SNAPSHOT", printer(), sources);
TraitHelper.configureContainerImage(traitsSpec, image, "quay.io", null, integration.getMetadata().getName(),
"1.0-SNAPSHOT");
new TraitCatalog().apply(traitsSpec, context, traitProfile);
printer().println(
context.buildItems().stream().map(KubernetesHelper::dumpYaml).collect(Collectors.joining("---")));
}
case "yaml" -> printer().println(KubernetesHelper.dumpYaml(integration));
case "json" -> printer().println(
JSonHelper.prettyPrint(KubernetesHelper.json().writer().writeValueAsString(integration), 2));
default -> {
printer().printf("Unsupported output format '%s' (supported: yaml, json)%n", output);
return -1;
}
}
return 0;
}
final AtomicBoolean updated = new AtomicBoolean(false);
client(Integration.class).resource(integration).createOr(it -> {
updated.set(true);
return it.update();
});
if (updated.get()) {
printer().printf("Integration %s updated%n", integration.getMetadata().getName());
} else {
printer().printf("Integration %s created%n", integration.getMetadata().getName());
}
if (wait || logs) {
client(Integration.class).withName(integration.getMetadata().getName())
.waitUntilCondition(it -> "Running".equals(it.getStatus().getPhase()), 10, TimeUnit.MINUTES);
}
if (logs) {
IntegrationLogs logsCommand = new IntegrationLogs(getMain());
logsCommand.withClient(client());
logsCommand.withName(integration.getMetadata().getName());
logsCommand.doCall();
}
return 0;
}
|
@Test
public void shouldRunIntegration() throws Exception {
IntegrationRun command = createCommand();
command.filePaths = new String[] { "classpath:route.yaml" };
command.doCall();
Assertions.assertEquals("Integration route created", printer.getOutput());
Integration created = kubernetesClient.resources(Integration.class).withName("route").get();
Assertions.assertEquals("camel-k", created.getMetadata().getAnnotations().get(CamelKCommand.OPERATOR_ID_LABEL));
}
|
@Override
public boolean isSatisfied(int index, TradingRecord tradingRecord) {
boolean satisfied = false;
// No trading history or no position opened, no loss
if (tradingRecord != null) {
Position currentPosition = tradingRecord.getCurrentPosition();
if (currentPosition.isOpened()) {
Num currentPrice = priceIndicator.getValue(index);
int positionIndex = currentPosition.getEntry().getIndex();
if (currentPosition.getEntry().isBuy()) {
satisfied = isBuySatisfied(currentPrice, index, positionIndex);
} else {
satisfied = isSellSatisfied(currentPrice, index, positionIndex);
}
}
}
traceIsSatisfied(index, satisfied);
return satisfied;
}
|
@Test
public void isSatisfiedForBuyForBarCount() {
BaseTradingRecord tradingRecord = new BaseTradingRecord(TradeType.BUY);
ClosePriceIndicator closePrice = new ClosePriceIndicator(
new MockBarSeries(numFunction, 100, 110, 120, 130, 120, 117.00, 117.00, 130, 116.99));
// 10% trailing-stop-loss
TrailingStopLossRule rule = new TrailingStopLossRule(closePrice, numOf(10), 3);
assertFalse(rule.isSatisfied(0, null));
assertFalse(rule.isSatisfied(1, tradingRecord));
// Enter at 114
tradingRecord.enter(2, numOf(114), numOf(1));
assertFalse(rule.isSatisfied(2, tradingRecord));
assertFalse(rule.isSatisfied(3, tradingRecord));
assertFalse(rule.isSatisfied(4, tradingRecord));
assertTrue(rule.isSatisfied(5, tradingRecord));
assertFalse(rule.isSatisfied(6, tradingRecord));
// Exit
tradingRecord.exit(7);
// Enter at 128
tradingRecord.enter(7, numOf(128), numOf(1));
assertFalse(rule.isSatisfied(7, tradingRecord));
assertTrue(rule.isSatisfied(8, tradingRecord));
}
|
@Override
public void shutdown() throws NacosException {
NAMING_LOGGER.info("Shutdown naming grpc client proxy for uuid->{}", uuid);
redoService.shutdown();
shutDownAndRemove(uuid);
NotifyCenter.deregisterSubscriber(this);
}
|
@Test
void testShutdown() throws Exception {
client.shutdown();
assertNull(RpcClientFactory.getClient(uuid));
//verify(this.rpcClient, times(1)).shutdown();
}
|
public static Builder builder() {
return new Builder();
}
|
@Test
public void testBuilderDoesNotBuildInvalidRequests() {
assertThatThrownBy(
() ->
RenameTableRequest.builder()
.withSource(null)
.withDestination(TAX_PAID_RENAMED)
.build())
.isInstanceOf(NullPointerException.class)
.hasMessage("Invalid source table identifier: null");
assertThatThrownBy(
() -> RenameTableRequest.builder().withSource(TAX_PAID).withDestination(null).build())
.isInstanceOf(NullPointerException.class)
.hasMessage("Invalid destination table identifier: null");
}
|
void validateSnapshotsUpdate(
TableMetadata metadata, List<Snapshot> addedSnapshots, List<Snapshot> deletedSnapshots) {
if (metadata.currentSnapshot() == null) {
// no need to verify attempt to delete current snapshot if it doesn't exist
// deletedSnapshots is necessarily empty when original snapshots list is empty
return;
}
if (!addedSnapshots.isEmpty()) {
// latest snapshot can be deleted if new snapshots are added.
return;
}
long latestSnapshotId = metadata.currentSnapshot().snapshotId();
if (!deletedSnapshots.isEmpty()
&& deletedSnapshots.get(deletedSnapshots.size() - 1).snapshotId() == latestSnapshotId) {
throw new InvalidIcebergSnapshotException(
String.format("Cannot delete the latest snapshot %s", latestSnapshotId));
}
}
|
@Test
void testValidateSnapshotsUpdateWithNoSnapshotMetadata() throws IOException {
List<Snapshot> testSnapshots = IcebergTestUtil.getSnapshots();
// No exception since added as well deleted snapshots are allowed to support replication
// use case which performs table commit with added and deleted snapshots.
Assertions.assertDoesNotThrow(
() ->
snapshotInspector.validateSnapshotsUpdate(
noSnapshotsMetadata, testSnapshots.subList(0, 1), testSnapshots.subList(1, 4)));
Assertions.assertDoesNotThrow(
() ->
snapshotInspector.validateSnapshotsUpdate(
noSnapshotsMetadata, testSnapshots, Collections.emptyList()));
Assertions.assertDoesNotThrow(
() ->
snapshotInspector.validateSnapshotsUpdate(
noSnapshotsMetadata, Collections.emptyList(), testSnapshots));
}
|
@Override
public AttributeResource getAttrValue(ResName resName) {
AttributeResource attributeResource = items.get(resName);
// This hack allows us to look up attributes from downstream dependencies, see comment in
// org.robolectric.shadows.ShadowThemeTest.obtainTypedArrayFromDependencyLibrary()
// for an explanation. TODO(jongerrish): Make Robolectric use a more realistic resource merging
// scheme.
if (attributeResource == null
&& !"android".equals(resName.packageName)
&& !"android".equals(packageName)) {
attributeResource = items.get(resName.withPackageName(packageName));
if (attributeResource != null && !"android".equals(attributeResource.contextPackageName)) {
attributeResource =
new AttributeResource(resName, attributeResource.value, resName.packageName);
}
}
return attributeResource;
}
|
@Test
public void getAttrValue_willNotFindFrameworkResourcesWithSameName() {
StyleData styleData =
new StyleData(
"android",
"Theme_Material",
"Theme",
asList(new AttributeResource(androidSearchViewStyle, "android_value", "android")));
assertThat(styleData.getAttrValue(androidSearchViewStyle).value).isEqualTo("android_value");
assertThat(styleData.getAttrValue(myAppSearchViewStyle)).isNull();
assertThat(styleData.getAttrValue(myLibSearchViewStyle)).isNull();
}
|
@POST
@Consumes({ MediaTypeRestconf.APPLICATION_YANG_DATA_JSON, MediaType.APPLICATION_JSON })
@Produces(MediaTypeRestconf.APPLICATION_YANG_DATA_JSON)
@Path("data/{identifier : .+}")
public Response handlePostRequest(@PathParam("identifier") String uriString,
InputStream stream) {
log.debug("handlePostRequest: {}", uriString);
URI uri = uriInfo.getRequestUri();
try {
ObjectNode rootNode = readTreeFromStream(mapper(), stream);
service.runPostOperationOnDataResource(uri, rootNode);
return Response.created(uriInfo.getRequestUri()).build();
} catch (JsonProcessingException e) {
log.error("ERROR: handlePostRequest ", e);
RestconfError error = RestconfError
.builder(RestconfError.ErrorType.APPLICATION, RestconfError.ErrorTag.MALFORMED_MESSAGE)
.errorMessage(e.getMessage()).errorAppTag("handlePostRequest").build();
return Response.status(BAD_REQUEST)
.entity(RestconfError.wrapErrorAsJson(Arrays.asList(error))).build();
} catch (RestconfException e) {
log.error("ERROR: handlePostRequest: {}", e.getMessage());
log.debug("Exception in handlePostRequest:", e);
return Response.status(e.getResponse().getStatus())
.entity(e.toRestconfErrorJson()).build();
} catch (IOException ex) {
log.error("ERROR: handlePostRequest ", ex);
RestconfError error = RestconfError
.builder(RestconfError.ErrorType.APPLICATION, RestconfError.ErrorTag.OPERATION_FAILED)
.errorMessage(ex.getMessage()).errorAppTag("handlePostRequest").build();
return Response.status(INTERNAL_SERVER_ERROR)
.entity(RestconfError.wrapErrorAsJson(Arrays.asList(error))).build();
}
}
|
@Test
public void testHandlePostRequest() {
ObjectMapper mapper = new ObjectMapper();
ObjectNode ietfSystemSubNode = mapper.createObjectNode();
ietfSystemSubNode.put("contact", "Open Networking Foundation");
ietfSystemSubNode.put("hostname", "host1");
ietfSystemSubNode.put("location", "The moon");
ObjectNode ietfSystemNode = mapper.createObjectNode();
ietfSystemNode.put("ietf-system:system", ietfSystemSubNode);
WebTarget wt = target();
Response response = wt.path("/" + DATA_IETF_SYSTEM_SYSTEM)
.request()
.post(Entity.json(ietfSystemNode.toString()));
assertEquals(201, response.getStatus());
}
|
public static boolean deleteQuietly(@Nullable File file) {
if (file == null) {
return false;
}
try {
if (file.isDirectory()) {
deleteDirectory(file);
if (file.exists()) {
LOG.warn("Unable to delete directory '{}'", file);
}
} else {
Files.delete(file.toPath());
}
return true;
} catch (IOException | SecurityException ignored) {
return false;
}
}
|
@Test
public void deleteDirectory_deletes_directory_and_content() throws IOException {
Path target = temporaryFolder.newFolder().toPath();
Path childFile1 = Files.createFile(target.resolve("file1.txt"));
Path childDir1 = Files.createDirectory(target.resolve("subDir1"));
Path childFile2 = Files.createFile(childDir1.resolve("file2.txt"));
Path childDir2 = Files.createDirectory(childDir1.resolve("subDir2"));
assertThat(target).isDirectory();
assertThat(childFile1).isRegularFile();
assertThat(childDir1).isDirectory();
assertThat(childFile2).isRegularFile();
assertThat(childDir2).isDirectory();
FileUtils2.deleteQuietly(target.toFile());
assertThat(target).doesNotExist();
assertThat(childFile1).doesNotExist();
assertThat(childDir1).doesNotExist();
assertThat(childFile2).doesNotExist();
assertThat(childDir2).doesNotExist();
}
|
@Override
public synchronized void close() {
super.close();
notifyAll();
}
|
@Test
public void testMetadataAwaitAfterClose() throws InterruptedException {
long time = 0;
metadata.updateWithCurrentRequestVersion(responseWithCurrentTopics(), false, time);
assertTrue(metadata.timeToNextUpdate(time) > 0, "No update needed.");
metadata.requestUpdate(true);
assertTrue(metadata.timeToNextUpdate(time) > 0, "Still no updated needed due to backoff");
time += (long) (refreshBackoffMs * (1 + CommonClientConfigs.RETRY_BACKOFF_JITTER));
assertEquals(0, metadata.timeToNextUpdate(time), "Update needed now that backoff time expired");
String topic = "my-topic";
metadata.close();
Thread t1 = asyncFetch(topic, 500);
t1.join();
assertEquals(KafkaException.class, backgroundError.get().getClass());
assertTrue(backgroundError.get().toString().contains("Requested metadata update after close"));
clearBackgroundError();
}
|
public static boolean seemDuplicates(FeedItem item1, FeedItem item2) {
if (sameAndNotEmpty(item1.getItemIdentifier(), item2.getItemIdentifier())) {
return true;
}
FeedMedia media1 = item1.getMedia();
FeedMedia media2 = item2.getMedia();
if (media1 == null || media2 == null) {
return false;
}
if (sameAndNotEmpty(media1.getStreamUrl(), media2.getStreamUrl())) {
return true;
}
return titlesLookSimilar(item1, item2)
&& datesLookSimilar(item1, item2)
&& durationsLookSimilar(media1, media2)
&& mimeTypeLooksSimilar(media1, media2);
}
|
@Test
public void testOtherAttributes() {
assertTrue(FeedItemDuplicateGuesser.seemDuplicates(
item("id1", "Title", "example.com/episode1", 10, 5 * MINUTES, "audio/*"),
item("id2", "Title", "example.com/episode2", 10, 5 * MINUTES, "audio/*")));
assertTrue(FeedItemDuplicateGuesser.seemDuplicates(
item("id1", "Title", "example.com/episode1", 10, 5 * MINUTES, "audio/*"),
item("id2", "Title", "example.com/episode2", 20, 6 * MINUTES, "audio/*")));
assertFalse(FeedItemDuplicateGuesser.seemDuplicates(
item("id1", "Title", "example.com/episode1", 10, 5 * MINUTES, "audio/*"),
item("id2", "Title", "example.com/episode2", 10, 5 * MINUTES, "video/*")));
assertTrue(FeedItemDuplicateGuesser.seemDuplicates(
item("id1", "Title", "example.com/episode1", 10, 5 * MINUTES, "audio/mpeg"),
item("id2", "Title", "example.com/episode2", 10, 5 * MINUTES, "audio/mp3")));
assertFalse(FeedItemDuplicateGuesser.seemDuplicates(
item("id1", "Title", "example.com/episode1", 5 * DAYS, 5 * MINUTES, "audio/*"),
item("id2", "Title", "example.com/episode2", 2 * DAYS, 5 * MINUTES, "audio/*")));
}
|
boolean matchesNonValueField(final Optional<SourceName> source, final ColumnName column) {
if (!source.isPresent()) {
return sourceSchemas.values().stream()
.anyMatch(schema ->
SystemColumns.isPseudoColumn(column) || schema.isKeyColumn(column));
}
final SourceName sourceName = source.get();
final LogicalSchema sourceSchema = sourceSchemas.get(sourceName);
if (sourceSchema == null) {
throw new IllegalArgumentException("Unknown source: " + sourceName);
}
return sourceSchema.isKeyColumn(column) || SystemColumns.isPseudoColumn(column);
}
|
@Test
public void shouldNotMatchOtherFields() {
assertThat(sourceSchemas.matchesNonValueField(Optional.of(ALIAS_2), V2), is(false));
}
|
public static CrypticClue forText(String text)
{
for (CrypticClue clue : CLUES)
{
if (text.equalsIgnoreCase(clue.text) || text.equalsIgnoreCase(clue.questionText))
{
return clue;
}
}
return null;
}
|
@Test
public void forTextEmptyString()
{
assertNull(CrypticClue.forText(""));
}
|
@Override
public Set<KubevirtSecurityGroup> securityGroups() {
return sgStore.securityGroups();
}
|
@Test
public void testGetSecurityGroups() {
createBasicSecurityGroups();
assertEquals("Number of security group did not match",
2, target.securityGroups().size());
}
|
public Path setDistance(double distance) {
this.distance = distance;
return this;
}
|
@Test
public void testFindInstruction() {
BaseGraph g = new BaseGraph.Builder(carManager).create();
NodeAccess na = g.getNodeAccess();
na.setNode(0, 0.0, 0.0);
na.setNode(1, 5.0, 0.0);
na.setNode(2, 5.0, 0.5);
na.setNode(3, 10.0, 0.5);
na.setNode(4, 7.5, 0.25);
na.setNode(5, 5.0, 1.0);
EdgeIteratorState edge1 = g.edge(0, 1).setDistance(1000).set(carAvSpeedEnc, 50.0, 50.0);
edge1.setWayGeometry(Helper.createPointList());
edge1.setKeyValues(Map.of(STREET_NAME, new KValue( "Street 1")));
EdgeIteratorState edge2 = g.edge(1, 2).setDistance(1000).set(carAvSpeedEnc, 50.0, 50.0);
edge2.setWayGeometry(Helper.createPointList());
edge2.setKeyValues(Map.of(STREET_NAME, new KValue( "Street 2")));
EdgeIteratorState edge3 = g.edge(2, 3).setDistance(1000).set(carAvSpeedEnc, 50.0, 50.0);
edge3.setWayGeometry(Helper.createPointList());
edge3.setKeyValues(Map.of(STREET_NAME, new KValue( "Street 3")));
EdgeIteratorState edge4 = g.edge(3, 4).setDistance(500).set(carAvSpeedEnc, 50.0, 50.0);
edge4.setWayGeometry(Helper.createPointList());
edge4.setKeyValues(Map.of(STREET_NAME, new KValue( "Street 4")));
g.edge(1, 5).setDistance(10000).set(carAvSpeedEnc, 50.0, 50.0);
g.edge(2, 5).setDistance(10000).set(carAvSpeedEnc, 50.0, 50.0);
g.edge(3, 5).setDistance(100000).set(carAvSpeedEnc, 50.0, 50.0);
SPTEntry e1 =
new SPTEntry(edge4.getEdge(), 4, 1,
new SPTEntry(edge3.getEdge(), 3, 1,
new SPTEntry(edge2.getEdge(), 2, 1,
new SPTEntry(edge1.getEdge(), 1, 1,
new SPTEntry(0, 1)
))));
Weighting weighting = new SpeedWeighting(carAvSpeedEnc);
Path path = extractPath(g, weighting, e1);
InstructionList il = InstructionsFromEdges.calcInstructions(path, path.graph, weighting, carManager, tr);
assertEquals(5, il.size());
assertEquals(Instruction.CONTINUE_ON_STREET, il.get(0).getSign());
assertEquals(Instruction.TURN_RIGHT, il.get(1).getSign());
assertEquals(Instruction.TURN_LEFT, il.get(2).getSign());
assertEquals(Instruction.TURN_SHARP_LEFT, il.get(3).getSign());
assertEquals(Instruction.FINISH, il.get(4).getSign());
}
|
static byte[] generateRandomPayload(Integer recordSize, List<byte[]> payloadByteList, byte[] payload,
SplittableRandom random, boolean payloadMonotonic, long recordValue) {
if (!payloadByteList.isEmpty()) {
payload = payloadByteList.get(random.nextInt(payloadByteList.size()));
} else if (recordSize != null) {
for (int j = 0; j < payload.length; ++j)
payload[j] = (byte) (random.nextInt(26) + 65);
} else if (payloadMonotonic) {
payload = Long.toString(recordValue).getBytes(StandardCharsets.UTF_8);
} else {
throw new IllegalArgumentException("no payload File Path or record Size or payload-monotonic option provided");
}
return payload;
}
|
@Test
public void testGenerateRandomPayloadByPayloadFile() {
Integer recordSize = null;
String inputString = "Hello Kafka";
byte[] byteArray = inputString.getBytes(StandardCharsets.UTF_8);
List<byte[]> payloadByteList = new ArrayList<>();
payloadByteList.add(byteArray);
byte[] payload = null;
SplittableRandom random = new SplittableRandom(0);
payload = ProducerPerformance.generateRandomPayload(recordSize, payloadByteList, payload, random, false, 0L);
assertEquals(inputString, new String(payload));
}
|
@Override
public Object clone() {
try {
ValueString retval = (ValueString) super.clone();
return retval;
} catch ( CloneNotSupportedException e ) {
return null;
}
}
|
@Test
public void testClone() {
ValueString vs = new ValueString( "Boden" );
ValueString vs1 = (ValueString) vs.clone();
assertFalse( vs.equals( vs1 ) ); // not the same object, equals not implement
assertTrue( vs != vs1 ); // not the same object
assertEquals( vs.getString(), vs1.getString() );
}
|
@Override
public IcebergEnumeratorState snapshotState(long checkpointId) {
return new IcebergEnumeratorState(
enumeratorPosition.get(), assigner.state(), enumerationHistory.snapshot());
}
|
@Test
public void testThrottlingDiscovery() throws Exception {
// create 10 splits
List<IcebergSourceSplit> splits =
SplitHelpers.createSplitsFromTransientHadoopTable(temporaryFolder, 10, 1);
TestingSplitEnumeratorContext<IcebergSourceSplit> enumeratorContext =
new TestingSplitEnumeratorContext<>(4);
ScanContext scanContext =
ScanContext.builder()
.streaming(true)
.startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_EARLIEST_SNAPSHOT)
// discover one snapshot at a time
.maxPlanningSnapshotCount(1)
.build();
ManualContinuousSplitPlanner splitPlanner = new ManualContinuousSplitPlanner(scanContext, 0);
ContinuousIcebergEnumerator enumerator =
createEnumerator(enumeratorContext, scanContext, splitPlanner);
// register reader-2, and let it request a split
enumeratorContext.registerReader(2, "localhost");
enumerator.addReader(2);
enumerator.handleSourceEvent(2, new SplitRequestEvent());
// add splits[0] to the planner for next discovery
splitPlanner.addSplits(Arrays.asList(splits.get(0)));
enumeratorContext.triggerAllActions();
// because discovered split was assigned to reader, pending splits should be empty
assertThat(enumerator.snapshotState(1).pendingSplits()).isEmpty();
// split assignment to reader-2 should contain splits[0, 1)
assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.containsExactlyElementsOf(splits.subList(0, 1));
// add the remaining 9 splits (one for every snapshot)
// run discovery cycles while reader-2 still processing the splits[0]
for (int i = 1; i < 10; ++i) {
splitPlanner.addSplits(Arrays.asList(splits.get(i)));
enumeratorContext.triggerAllActions();
}
// can only discover up to 3 snapshots/splits
assertThat(enumerator.snapshotState(2).pendingSplits()).hasSize(3);
// split assignment to reader-2 should be splits[0, 1)
assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.containsExactlyElementsOf(splits.subList(0, 1));
// now reader-2 finished splits[0]
enumerator.handleSourceEvent(2, new SplitRequestEvent(Arrays.asList(splits.get(0).splitId())));
enumeratorContext.triggerAllActions();
// still have 3 pending splits. After assigned splits[1] to reader-2, one more split was
// discovered and added.
assertThat(enumerator.snapshotState(3).pendingSplits()).hasSize(3);
// split assignment to reader-2 should be splits[0, 2)
assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.containsExactlyElementsOf(splits.subList(0, 2));
// run 3 more split discovery cycles
for (int i = 0; i < 3; ++i) {
enumeratorContext.triggerAllActions();
}
// no more splits are discovered due to throttling
assertThat(enumerator.snapshotState(4).pendingSplits()).hasSize(3);
// split assignment to reader-2 should still be splits[0, 2)
assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.containsExactlyElementsOf(splits.subList(0, 2));
// now reader-2 finished splits[1]
enumerator.handleSourceEvent(2, new SplitRequestEvent(Arrays.asList(splits.get(1).splitId())));
enumeratorContext.triggerAllActions();
// still have 3 pending splits. After assigned new splits[2] to reader-2, one more split was
// discovered and added.
assertThat(enumerator.snapshotState(5).pendingSplits()).hasSize(3);
// split assignment to reader-2 should be splits[0, 3)
assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.containsExactlyElementsOf(splits.subList(0, 3));
}
|
public List<String> build() {
switch (dialect.getId()) {
case PostgreSql.ID:
StringBuilder sql = new StringBuilder().append(ALTER_TABLE).append(tableName).append(" ");
dropColumns(sql, "DROP COLUMN ", columns);
return Collections.singletonList(sql.toString());
case MsSql.ID:
return Collections.singletonList(getMsSQLStatement(columns));
case Oracle.ID:
return Collections.singletonList(getOracleStatement());
case H2.ID:
return Arrays.stream(columns).map(this::getMsSQLStatement).toList();
default:
throw new IllegalStateException(String.format("Unsupported database '%s'", dialect.getId()));
}
}
|
@Test
public void drop_columns_on_mssql() {
assertThat(new DropColumnsBuilder(new MsSql(), "issues", "date_in_ms", "name").build())
.containsOnly("ALTER TABLE issues DROP COLUMN date_in_ms, name");
}
|
@Override
protected FetchData getFetchData(@NonNull BlueUrlTokenizer blueUrl) {
if (!blueUrl.lastPartIs(BlueUrlTokenizer.UrlPart.PIPELINE_RUN_DETAIL_TAB, "changes")) {
// Not interested in it
return null;
}
BluePipeline pipeline = getPipeline(blueUrl);
if (pipeline == null) {
// Not interested in it
return null;
}
// It's a pipeline page. Let's prefetch the pipeline activity and add them to the page,
// saving the frontend the overhead of requesting them.
Container<BlueRun> activitiesContainer = pipeline.getRuns();
if(activitiesContainer==null){
return null;
}
BlueRun run = activitiesContainer.get(blueUrl.getPart(BlueUrlTokenizer.UrlPart.PIPELINE_RUN_DETAIL_ID));
Container<BlueChangeSetEntry> containerChangeSets = run.getChangeSet();
return getFetchData(containerChangeSets);
}
|
@Test
public void prefetchUrlIsRight() throws IOException, ExecutionException, InterruptedException {
FreeStyleProject project = j.createFreeStyleProject("project");
Run run = j.waitForCompletion(project.scheduleBuild2(0).waitForStart());
FreeStylePipeline freeStylePipeline = (FreeStylePipeline) BluePipelineFactory.resolve(project);
assertNotNull(freeStylePipeline);
BlueRun blueRun = freeStylePipeline.getLatestRun();
assertNotNull(blueRun);
BlueOrganization organization = OrganizationFactory.getInstance().getContainingOrg(Jenkins.get());
ChangeSetContainerImpl container = new ChangeSetContainerImpl(
organization,
blueRun,
run
);
BlueRunChangesetPreloader preloader = new BlueRunChangesetPreloader();
RESTFetchPreloader.FetchData fetchData = preloader.getFetchData(container);
assertEquals("/blue/rest/organizations/jenkins/pipelines/project/runs/1/changeSet/?start=0&limit=101", fetchData.getRestUrl());
}
|
@Override
public long stopDebug() {
return doStopWithoutMessage(LoggerLevel.DEBUG);
}
|
@Test
public void fail_if_stop_without_start() {
try {
underTest.stopDebug("foo");
fail();
} catch (IllegalStateException e) {
assertThat(e).hasMessage("Profiler must be started before being stopped");
}
}
|
public static String getRemoteIp(HttpServletRequest request) {
String xForwardedFor = request.getHeader(X_FORWARDED_FOR);
if (!StringUtils.isBlank(xForwardedFor)) {
return xForwardedFor.split(X_FORWARDED_FOR_SPLIT_SYMBOL)[0].trim();
}
String nginxHeader = request.getHeader(X_REAL_IP);
return StringUtils.isBlank(nginxHeader) ? request.getRemoteAddr() : nginxHeader;
}
|
@Test
void testGetRemoteIp() {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getRemoteAddr()).thenReturn("127.0.0.1");
assertEquals("127.0.0.1", WebUtils.getRemoteIp(request));
Mockito.when(request.getHeader(eq(X_REAL_IP))).thenReturn("127.0.0.2");
assertEquals("127.0.0.2", WebUtils.getRemoteIp(request));
Mockito.when(request.getHeader(eq(X_FORWARDED_FOR))).thenReturn("127.0.0.3");
assertEquals("127.0.0.3", WebUtils.getRemoteIp(request));
Mockito.when(request.getHeader(eq(X_FORWARDED_FOR))).thenReturn("127.0.0.3, 127.0.0.4");
assertEquals("127.0.0.3", WebUtils.getRemoteIp(request));
Mockito.when(request.getHeader(eq(X_FORWARDED_FOR))).thenReturn("");
assertEquals("127.0.0.2", WebUtils.getRemoteIp(request));
Mockito.when(request.getHeader(eq(X_REAL_IP))).thenReturn("");
assertEquals("127.0.0.1", WebUtils.getRemoteIp(request));
}
|
@Override
public void setStoragePolicy(Path src, String policyName) throws IOException {
super.setStoragePolicy(fullPath(src), policyName);
}
|
@Test(timeout = 30000)
public void testSetStoragePolicy() throws Exception {
Path storagePolicyPath = new Path("/storagePolicy");
Path chRootedStoragePolicyPath = new Path("/a/b/storagePolicy");
Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
URI chrootUri = URI.create("mockfs://foo/a/b");
ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf);
FileSystem mockFs = ((FilterFileSystem) chrootFs.getRawFileSystem())
.getRawFileSystem();
chrootFs.setStoragePolicy(storagePolicyPath, "HOT");
verify(mockFs).setStoragePolicy(chRootedStoragePolicyPath, "HOT");
}
|
@Udf
public <T> String join(
@UdfParameter(description = "the array to join using the default delimiter '"
+ DEFAULT_DELIMITER + "'") final List<T> array
) {
return join(array, DEFAULT_DELIMITER);
}
|
@Test
public void shouldReturnEmptyStringForEmptyArrays() {
assertThat(arrayJoinUDF.join(Collections.emptyList()).isEmpty(),is(true));
assertThat(arrayJoinUDF.join(Collections.emptyList(),CUSTOM_DELIMITER).isEmpty(),is(true));
}
|
public static long ordinalOf(double value) {
if (value == Double.POSITIVE_INFINITY) {
return 0xFFFFFFFFFFFFFFFFL;
}
if (value == Double.NEGATIVE_INFINITY || Double.isNaN(value)) {
return 0;
}
long bits = Double.doubleToLongBits(value);
// need negatives to come before positives
if ((bits & Long.MIN_VALUE) == Long.MIN_VALUE) {
// conflate 0/-0, or reverse order of negatives
bits = bits == Long.MIN_VALUE ? Long.MIN_VALUE : ~bits;
} else {
// positives after negatives
bits ^= Long.MIN_VALUE;
}
return bits;
}
|
@Test
public void testZeroes() {
assertEquals(FPOrdering.ordinalOf(0D), 0x8000000000000000L);
assertEquals(FPOrdering.ordinalOf(0F), 0x80000000L);
assertEquals(FPOrdering.ordinalOf(-0D), 0x8000000000000000L);
assertEquals(FPOrdering.ordinalOf(-0F), 0x80000000L);
assertTrue(Long.compareUnsigned(FPOrdering.ordinalOf(0D),
FPOrdering.ordinalOf(-1D)) > 0);
assertTrue(Long.compareUnsigned(FPOrdering.ordinalOf(0F),
FPOrdering.ordinalOf(-1F)) > 0);
assertTrue(Long.compareUnsigned(FPOrdering.ordinalOf(0D),
FPOrdering.ordinalOf(-1e-200D)) > 0);
assertTrue(Long.compareUnsigned(FPOrdering.ordinalOf(0F),
FPOrdering.ordinalOf(-1e-20F)) > 0);
}
|
public String getSource() {
return source;
}
|
@Test
public void testExchangeDone() throws Exception {
// START SNIPPET: e2
// register the NotificationListener
ObjectName on = getCamelObjectName(TYPE_EVENT_NOTIFIER, "JmxEventNotifier");
MyNotificationListener listener = new MyNotificationListener();
context.getManagementStrategy().getManagementAgent().getMBeanServer().addNotificationListener(on,
listener,
new NotificationFilter() {
private static final long serialVersionUID = 1L;
public boolean isNotificationEnabled(Notification notification) {
return notification.getSource().equals("MyCamel");
}
}, null);
// END SNIPPET: e2
getMockEndpoint("mock:result").expectedMessageCount(1);
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
assertEquals(8, listener.getEventCounter(), "Get a wrong number of events");
context.stop();
}
|
@Override
public void accept(final MeterEntity entity, final BucketedValues value) {
if (dataset.size() > 0) {
if (!value.isCompatible(dataset)) {
throw new IllegalArgumentException(
"Incompatible BucketedValues [" + value + "] for current HistogramFunction[" + dataset + "]");
}
}
this.entityId = entity.id();
final long[] values = value.getValues();
for (int i = 0; i < values.length; i++) {
final long bucket = value.getBuckets()[i];
String bucketName = bucket == Long.MIN_VALUE ? Bucket.INFINITE_NEGATIVE : String.valueOf(bucket);
final long bucketValue = values[i];
dataset.valueAccumulation(bucketName, bucketValue);
}
}
|
@Test
public void testFunctionWithInfinite() {
HistogramFunctionInst inst = new HistogramFunctionInst();
inst.accept(
MeterEntity.newService("service-test", Layer.GENERAL),
new BucketedValues(
INFINITE_BUCKETS, new long[] {
0,
4,
10,
10
})
);
inst.accept(
MeterEntity.newService("service-test", Layer.GENERAL),
new BucketedValues(
INFINITE_BUCKETS, new long[] {
1,
2,
3,
4
})
);
Assertions.assertEquals(1L, inst.getDataset().get(Bucket.INFINITE_NEGATIVE).longValue());
}
|
public static String serialize(Object obj) throws JsonProcessingException {
return MAPPER.writeValueAsString(obj);
}
|
@Test
void serializeMeterWithoutHost() throws JsonProcessingException {
DSeries series = new DSeries();
series.add(new DMeter(new TestMeter(0, 1), METRIC, null, tags, () -> MOCKED_SYSTEM_MILLIS));
assertSerialization(
DatadogHttpClient.serialize(series),
new MetricAssertion(MetricType.gauge, false, "1.0"));
}
|
@Override
public HttpResponseOutputStream<StorageObject> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final S3Object object = this.getDetails(file, status);
final DelayedHttpEntityCallable<StorageObject> command = new DelayedHttpEntityCallable<StorageObject>(file) {
@Override
public StorageObject call(final HttpEntity entity) throws BackgroundException {
try {
final RequestEntityRestStorageService client = session.getClient();
final Path bucket = containerService.getContainer(file);
client.putObjectWithRequestEntityImpl(
bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), object, entity, status.getParameters());
if(log.isDebugEnabled()) {
log.debug(String.format("Saved object %s with checksum %s", file, object.getETag()));
}
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Upload {0} failed", e, file);
}
return object;
}
@Override
public long getContentLength() {
return status.getLength();
}
};
return this.write(file, status, command);
}
|
@Test
public void testWriteCustomTimestamp() throws Exception {
final Path container = new Path("versioning-test-eu-central-1-cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory));
final Path test = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final TransferStatus status = new TransferStatus().withModified(1630305150672L).withCreated(1695159781972L);
final byte[] content = RandomUtils.nextBytes(1033);
status.setChecksum(new SHA256ChecksumCompute().compute(new ByteArrayInputStream(content), status));
status.setLength(content.length);
final S3AccessControlListFeature acl = new S3AccessControlListFeature(session);
final HttpResponseOutputStream<StorageObject> out = new S3WriteFeature(session, acl).write(test, status, new DisabledConnectionCallback());
new StreamCopier(new TransferStatus(), new TransferStatus()).transfer(new ByteArrayInputStream(content), out);
out.close();
test.withAttributes(new S3AttributesAdapter(session.getHost()).toAttributes(out.getStatus()));
assertTrue(new S3FindFeature(session, acl).find(test));
final PathAttributes attributes = new S3AttributesFinderFeature(session, acl).find(test);
assertEquals(1630305150000L, attributes.getModificationDate());
assertEquals(1695159781000L, attributes.getCreationDate());
assertEquals(1630305150000L, new S3ObjectListService(session, acl, true).list(container,
new DisabledListProgressListener()).find(new DefaultPathPredicate(test)).attributes().getModificationDate());
assertEquals(1630305150000L, new S3VersionedObjectListService(session, acl, 50, true).list(container,
new DisabledListProgressListener()).find(new DefaultPathPredicate(test)).attributes().getModificationDate());
assertNotEquals(1630305150000L, new S3ObjectListService(session, acl, false).list(container,
new DisabledListProgressListener()).find(new SimplePathPredicate(test)).attributes().getModificationDate());
assertNotEquals(1630305150000L, new S3VersionedObjectListService(session, acl, 50, false).list(container,
new DisabledListProgressListener()).find(new SimplePathPredicate(test)).attributes().getModificationDate());
final Path moved = new S3MoveFeature(session, acl).move(test, new Path(container,
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback());
assertEquals(1630305150000L, new S3AttributesFinderFeature(session, acl).find(moved).getModificationDate());
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(moved), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public ContentInfo verify(ContentInfo signedMessage, Date date) {
final SignedData signedData = SignedData.getInstance(signedMessage.getContent());
final X509Certificate cert = certificate(signedData);
certificateVerifier.verify(cert, date);
final X500Name name = X500Name.getInstance(cert.getIssuerX500Principal().getEncoded());
try {
final CMSSignedData cms = new CMSSignedData(signedMessage);
cms.verifySignatures(signerId -> {
if (!name.equals(signerId.getIssuer())) {
throw new VerificationException("Issuer does not match certificate");
}
if (!cert.getSerialNumber().equals(signerId.getSerialNumber())) {
throw new VerificationException("Serial number does not match certificate");
}
return new JcaSignerInfoVerifierBuilder(digestProvider).setProvider(bcProvider).build(cert);
});
} catch (CMSException e) {
throw new VerificationException("Could not verify CMS", e);
}
return signedData.getEncapContentInfo();
}
|
@Test
public void verifyValidPcaRvigCms() throws Exception {
final ContentInfo signedMessage = ContentInfo.getInstance(fixture("pca-rvig"));
final ContentInfo message = new CmsVerifier(new CertificateVerifier.None()).verify(signedMessage);
assertEquals(LdsSecurityObject.OID, message.getContentType().getId());
assertEquals("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", Hex.toHexString(
DigestUtils.digest("SHA1").digest(((ASN1OctetString) message.getContent()).getOctets())
));
}
|
public List<Modification> parse(String svnLogOutput, String path, SAXBuilder builder) {
try {
Document document = builder.build(new StringReader(svnLogOutput));
return parseDOMTree(document, path);
} catch (Exception e) {
throw bomb("Unable to parse svn log output: " + svnLogOutput, e);
}
}
|
@Test
public void shouldGetAllModifiedFilesUnderRootPath() {
SvnLogXmlParser parser = new SvnLogXmlParser();
List<Modification> materialRevisions = parser.parse(MULTIPLE_FILES, "", new SAXBuilder());
Modification mod = materialRevisions.get(0);
List<ModifiedFile> files = mod.getModifiedFiles();
assertThat(files.size()).isEqualTo(2);
ModifiedFile file = files.get(0);
assertThat(file.getFileName()).isEqualTo("/trunk/revision3.txt");
assertThat(file.getAction()).isEqualTo(ModifiedAction.added);
file = files.get(1);
assertThat(file.getFileName()).isEqualTo("/branch/1.1/readme.txt");
assertThat(file.getAction()).isEqualTo(ModifiedAction.deleted);
}
|
static boolean isTableUsingInstancePoolAndReplicaGroup(@Nonnull TableConfig tableConfig) {
boolean status = true;
Map<String, InstanceAssignmentConfig> instanceAssignmentConfigMap = tableConfig.getInstanceAssignmentConfigMap();
if (instanceAssignmentConfigMap != null) {
for (InstanceAssignmentConfig instanceAssignmentConfig : instanceAssignmentConfigMap.values()) {
if (instanceAssignmentConfig != null) {
status &= (instanceAssignmentConfig.getTagPoolConfig().isPoolBased()
&& instanceAssignmentConfig.getReplicaGroupPartitionConfig().isReplicaGroupBased());
} else {
status = false;
}
}
} else {
status = false;
}
return status;
}
|
@Test
public void testValidIGnRGOfflineTable() {
InstanceAssignmentConfig config =
new InstanceAssignmentConfig(new InstanceTagPoolConfig("DefaultTenant", true, 0, null), null,
new InstanceReplicaGroupPartitionConfig(true, 0, 0, 0, 0, 0, false, null), null, false);
TableConfig tableConfig =
new TableConfig("table", TableType.OFFLINE.name(), new SegmentsValidationAndRetentionConfig(),
new TenantConfig("DefaultTenant", "DefaultTenant", null), new IndexingConfig(), new TableCustomConfig(null),
null, null, null, null, Map.of("OFFLINE", config), null, null, null, null, null, null, false, null, null,
null);
Assert.assertTrue(TableConfigUtils.isTableUsingInstancePoolAndReplicaGroup(tableConfig));
}
|
@Override
public ShardingRule build(final ShardingRuleConfiguration ruleConfig, final String databaseName, final DatabaseType protocolType,
final ResourceMetaData resourceMetaData, final Collection<ShardingSphereRule> builtRules, final ComputeNodeInstanceContext computeNodeInstanceContext) {
ShardingSpherePreconditions.checkNotEmpty(resourceMetaData.getDataSourceMap(), () -> new MissingRequiredShardingConfigurationException("Data source", databaseName));
return new ShardingRule(ruleConfig, resourceMetaData.getDataSourceMap(), computeNodeInstanceContext);
}
|
@SuppressWarnings("unchecked")
@Test
void assertBuild() {
assertThat(builder.build(ruleConfig, "sharding_db", new MySQLDatabaseType(),
mock(ResourceMetaData.class, RETURNS_DEEP_STUBS), Collections.emptyList(), mock(ComputeNodeInstanceContext.class)), instanceOf(ShardingRule.class));
}
|
static void validateJobParameters(List<String> jobParameters) {
// Check that parameter is not null
if (Objects.isNull(jobParameters)) {
throw new JetException("jobParameters can not be null");
}
}
|
@Test
public void testValidateJobParameters() {
assertThatThrownBy(() -> JarOnClientValidator.validateJobParameters(null))
.isInstanceOf(JetException.class)
.hasMessageContaining("jobParameters can not be null");
}
|
@Override
public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException {
try {
new EueAttributesFinderFeature(session, fileid).find(file);
return true;
}
catch(NotfoundException e) {
return false;
}
}
|
@Test
public void testFindDirectory() throws Exception {
final EueResourceIdProvider fileid = new EueResourceIdProvider(session);
final Path folder = new EueDirectoryFeature(session, fileid).mkdir(
new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
assertTrue(new EueFindFeature(session, fileid).find(folder));
assertFalse(new EueFindFeature(session, fileid).find(new Path(folder.getAbsolute(), EnumSet.of(Path.Type.file))));
new EueDeleteFeature(session, fileid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public String toString() {
return "ID: " + this.id +
"\tSequence: " + (this.sequenceNumber + 1) + // +1 for readability: 1/2 not 0/2
"/" + this.sequenceCount +
"\tArrival: " + this.arrival +
"\tData size: " + this.payload.readableBytes();
}
|
@Test
public void testToString() throws Exception {
assertNotNull(buildChunk().toString());
}
|
public Optional<Boolean> getHelp() {
return Optional.ofNullable(mHelp);
}
|
@Test
public void testGetHelp() {
mJCommander.parse("--help");
assertEquals(Optional.of(true), mOptions.getHelp());
}
|
@Operation(summary = "create", description = "CREATE_WORKFLOWS_NOTES")
@PostMapping(consumes = {"application/json"})
@ResponseStatus(HttpStatus.CREATED)
@ApiException(CREATE_PROCESS_DEFINITION_ERROR)
public Result<ProcessDefinition> createWorkflow(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestBody WorkflowCreateRequest workflowCreateRequest) {
ProcessDefinition processDefinition =
processDefinitionService.createSingleProcessDefinition(loginUser, workflowCreateRequest);
return Result.success(processDefinition);
}
|
@Test
public void testCreateWorkflow() {
WorkflowCreateRequest workflowCreateRequest = new WorkflowCreateRequest();
workflowCreateRequest.setName(name);
workflowCreateRequest.setReleaseState(releaseState);
workflowCreateRequest.setProjectCode(projectCode);
workflowCreateRequest.setDescription(description);
workflowCreateRequest.setGlobalParams(EMPTY_STRING);
workflowCreateRequest.setTimeout(timeout);
workflowCreateRequest.setWarningGroupId(warningGroupId);
workflowCreateRequest.setExecutionType(executionType);
Mockito.when(processDefinitionService.createSingleProcessDefinition(user, workflowCreateRequest))
.thenReturn(this.getProcessDefinition(name));
Result<ProcessDefinition> resourceResponse = workflowV2Controller.createWorkflow(user, workflowCreateRequest);
Assertions.assertEquals(this.getProcessDefinition(name), resourceResponse.getData());
}
|
public Node parse() throws ScanException {
if (tokenList == null || tokenList.isEmpty())
return null;
return E();
}
|
@Test
public void literalWithTwoAccolades() throws ScanException {
Tokenizer tokenizer = new Tokenizer("%x{y} %a{b} c");
Parser parser = new Parser(tokenizer.tokenize());
Node node = parser.parse();
Node witness = new Node(Node.Type.LITERAL, "%x");
Node t = witness.next = new Node(Node.Type.LITERAL, "{");
t.next = new Node(Node.Type.LITERAL, "y");
t = t.next;
t.next = new Node(Node.Type.LITERAL, "}");
t = t.next;
t.next = new Node(Node.Type.LITERAL, " %a");
t = t.next;
t.next = new Node(Node.Type.LITERAL, "{");
t = t.next;
t.next = new Node(Node.Type.LITERAL, "b");
t = t.next;
t.next = new Node(Node.Type.LITERAL, "}");
t = t.next;
t.next = new Node(Node.Type.LITERAL, " c");
node.dump();
System.out.println("");
assertEquals(witness, node);
}
|
@Override
public void isEqualTo(@Nullable Object expected) {
super.isEqualTo(expected);
}
|
@Test
public void isEqualTo_WithoutToleranceParameter_Success() {
assertThat(array(2.2d, 5.4d, POSITIVE_INFINITY, NEGATIVE_INFINITY, 0.0, -0.0))
.isEqualTo(array(2.2d, 5.4d, POSITIVE_INFINITY, NEGATIVE_INFINITY, 0.0, -0.0));
}
|
@Override
public void updateIndices(SegmentDirectory.Writer segmentWriter)
throws Exception {
Map<String, List<Operation>> columnOperationsMap = computeOperations(segmentWriter);
if (columnOperationsMap.isEmpty()) {
return;
}
for (Map.Entry<String, List<Operation>> entry : columnOperationsMap.entrySet()) {
String column = entry.getKey();
List<Operation> operations = entry.getValue();
for (Operation operation : operations) {
switch (operation) {
case DISABLE_FORWARD_INDEX:
// Deletion of the forward index will be handled outside the index handler to ensure that other index
// handlers that need the forward index to construct their own indexes will have it available.
_tmpForwardIndexColumns.add(column);
break;
case ENABLE_FORWARD_INDEX:
ColumnMetadata columnMetadata = createForwardIndexIfNeeded(segmentWriter, column, false);
if (columnMetadata.hasDictionary()) {
if (!segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) {
throw new IllegalStateException(String.format(
"Dictionary should still exist after rebuilding forward index for dictionary column: %s", column));
}
} else {
if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) {
throw new IllegalStateException(
String.format("Dictionary should not exist after rebuilding forward index for raw column: %s",
column));
}
}
break;
case DISABLE_DICTIONARY:
Set<String> newForwardIndexDisabledColumns =
FieldIndexConfigsUtil.columnsWithIndexDisabled(_fieldIndexConfigs.keySet(), StandardIndexes.forward(),
_fieldIndexConfigs);
if (newForwardIndexDisabledColumns.contains(column)) {
removeDictionaryFromForwardIndexDisabledColumn(column, segmentWriter);
if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) {
throw new IllegalStateException(
String.format("Dictionary should not exist after disabling dictionary for column: %s", column));
}
} else {
disableDictionaryAndCreateRawForwardIndex(column, segmentWriter);
}
break;
case ENABLE_DICTIONARY:
createDictBasedForwardIndex(column, segmentWriter);
if (!segmentWriter.hasIndexFor(column, StandardIndexes.forward())) {
throw new IllegalStateException(String.format("Forward index was not created for column: %s", column));
}
break;
case CHANGE_INDEX_COMPRESSION_TYPE:
rewriteForwardIndexForCompressionChange(column, segmentWriter);
break;
default:
throw new IllegalStateException("Unsupported operation for column " + column);
}
}
}
}
|
@Test
public void testEnableForwardIndexForDictionaryDisabledColumns()
throws Exception {
Set<String> forwardIndexDisabledColumns = new HashSet<>(SV_FORWARD_INDEX_DISABLED_COLUMNS);
forwardIndexDisabledColumns.addAll(MV_FORWARD_INDEX_DISABLED_COLUMNS);
forwardIndexDisabledColumns.addAll(MV_FORWARD_INDEX_DISABLED_DUPLICATES_COLUMNS);
forwardIndexDisabledColumns.addAll(FORWARD_INDEX_DISABLED_RAW_COLUMNS);
forwardIndexDisabledColumns.add(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITHOUT_INV_IDX);
forwardIndexDisabledColumns.add(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITH_RANGE_INDEX);
SegmentMetadataImpl existingSegmentMetadata = new SegmentMetadataImpl(_segmentDirectory);
SegmentDirectory segmentLocalFSDirectory =
new SegmentLocalFSDirectory(_segmentDirectory, existingSegmentMetadata, ReadMode.mmap);
SegmentDirectory.Writer writer = segmentLocalFSDirectory.createWriter();
IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig);
forwardIndexDisabledColumns.remove(DIM_RAW_SV_FORWARD_INDEX_DISABLED_INTEGER);
forwardIndexDisabledColumns.remove(DIM_RAW_MV_FORWARD_INDEX_DISABLED_INTEGER);
indexLoadingConfig.setForwardIndexDisabledColumns(forwardIndexDisabledColumns);
Set<String> invertedIndexColumns = new HashSet<>(forwardIndexDisabledColumns);
invertedIndexColumns.removeAll(FORWARD_INDEX_DISABLED_RAW_COLUMNS);
invertedIndexColumns.remove(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITHOUT_INV_IDX);
invertedIndexColumns.remove(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITH_RANGE_INDEX);
indexLoadingConfig.setInvertedIndexColumns(invertedIndexColumns);
validateIndexMap(DIM_RAW_SV_FORWARD_INDEX_DISABLED_INTEGER, false, true);
validateIndexesForForwardIndexDisabledColumns(DIM_RAW_MV_FORWARD_INDEX_DISABLED_INTEGER);
validateIndexMap(DIM_RAW_SV_FORWARD_INDEX_DISABLED_INTEGER, false, true);
validateIndexesForForwardIndexDisabledColumns(DIM_RAW_MV_FORWARD_INDEX_DISABLED_INTEGER);
ForwardIndexHandler fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema);
fwdIndexHandler.updateIndices(writer);
fwdIndexHandler.postUpdateIndicesCleanup(writer);
// Tear down before validation. Because columns.psf and index map cleanup happens at segmentDirectory.close()
segmentLocalFSDirectory.close();
// Validate nothing has changed
validateIndexMap(DIM_RAW_SV_FORWARD_INDEX_DISABLED_INTEGER, false, true);
validateIndexesForForwardIndexDisabledColumns(DIM_RAW_SV_FORWARD_INDEX_DISABLED_INTEGER);
validateIndexMap(DIM_RAW_MV_FORWARD_INDEX_DISABLED_INTEGER, false, true);
validateIndexesForForwardIndexDisabledColumns(DIM_RAW_MV_FORWARD_INDEX_DISABLED_INTEGER);
// In column metadata, nothing should change.
ColumnMetadata metadata = existingSegmentMetadata.getColumnMetadataFor(DIM_RAW_SV_FORWARD_INDEX_DISABLED_INTEGER);
validateMetadataProperties(DIM_RAW_SV_FORWARD_INDEX_DISABLED_INTEGER, metadata.hasDictionary(),
metadata.getColumnMaxLength(), metadata.getCardinality(), metadata.getTotalDocs(), metadata.getDataType(),
metadata.getFieldType(), metadata.isSorted(), metadata.isSingleValue(), metadata.getMaxNumberOfMultiValues(),
metadata.getTotalNumberOfEntries(), metadata.isAutoGenerated(), metadata.getMinValue(), metadata.getMaxValue(),
false);
metadata = existingSegmentMetadata.getColumnMetadataFor(DIM_RAW_MV_FORWARD_INDEX_DISABLED_INTEGER);
validateMetadataProperties(DIM_RAW_MV_FORWARD_INDEX_DISABLED_INTEGER, metadata.hasDictionary(),
metadata.getColumnMaxLength(), metadata.getCardinality(), metadata.getTotalDocs(), metadata.getDataType(),
metadata.getFieldType(), metadata.isSorted(), metadata.isSingleValue(), metadata.getMaxNumberOfMultiValues(),
metadata.getTotalNumberOfEntries(), metadata.isAutoGenerated(), metadata.getMinValue(), metadata.getMaxValue(),
false);
}
|
@Override
public void close() throws IOException {
if (mClosed.getAndSet(true)) {
LOG.warn("OBSOutputStream is already closed");
return;
}
mLocalOutputStream.close();
try {
BufferedInputStream in = new BufferedInputStream(
new FileInputStream(mFile));
ObjectMetadata objMeta = new ObjectMetadata();
objMeta.setContentLength(mFile.length());
if (mHash != null) {
byte[] hashBytes = mHash.digest();
objMeta.setContentMd5(new String(Base64.encodeBase64(hashBytes)));
}
mContentHash = mObsClient.putObject(mBucketName, mKey, in, objMeta).getEtag();
} catch (ObsException e) {
LOG.error("Failed to upload {}. Temporary file @ {}", mKey, mFile.getPath());
throw new IOException(e);
} finally {
// Delete the temporary file on the local machine if the GCS client completed the
// upload or if the upload failed.
if (!mFile.delete()) {
LOG.error("Failed to delete temporary file @ {}", mFile.getPath());
}
}
}
|
@Test
@PrepareForTest(OBSOutputStream.class)
public void testConstructor() throws Exception {
PowerMockito.whenNew(File.class).withArguments(Mockito.anyString()).thenReturn(mFile);
String errorMessage = "protocol doesn't support output";
PowerMockito.whenNew(FileOutputStream.class).withArguments(mFile)
.thenThrow(new IOException(errorMessage));
mThrown.expect(IOException.class);
mThrown.expectMessage(errorMessage);
new OBSOutputStream("testBucketName", "testKey", mObsClient,
sConf.getList(PropertyKey.TMP_DIRS)).close();
}
|
public static <K, V> Read<K, V> read() {
return new AutoValue_KafkaIO_Read.Builder<K, V>()
.setTopics(new ArrayList<>())
.setTopicPartitions(new ArrayList<>())
.setConsumerFactoryFn(KafkaIOUtils.KAFKA_CONSUMER_FACTORY_FN)
.setConsumerConfig(KafkaIOUtils.DEFAULT_CONSUMER_PROPERTIES)
.setMaxNumRecords(Long.MAX_VALUE)
.setCommitOffsetsInFinalizeEnabled(false)
.setDynamicRead(false)
.setTimestampPolicyFactory(TimestampPolicyFactory.withProcessingTime())
.setConsumerPollingTimeout(2L)
.setRedistributed(false)
.setAllowDuplicates(false)
.setRedistributeNumKeys(0)
.build();
}
|
@Test
public void testUnboundedSourceWithPattern() {
int numElements = 1000;
List<String> topics =
ImmutableList.of(
"best", "gest", "hest", "jest", "lest", "nest", "pest", "rest", "test", "vest", "west",
"zest");
String bootStrapServer = "none";
KafkaIO.Read<byte[], Long> reader =
KafkaIO.<byte[], Long>read()
.withBootstrapServers("none")
.withTopicPattern("[a-z]est")
.withConsumerFactoryFn(
new ConsumerFactoryFn(topics, 10, numElements, OffsetResetStrategy.EARLIEST))
.withKeyDeserializer(ByteArrayDeserializer.class)
.withValueDeserializer(LongDeserializer.class)
.withMaxNumRecords(numElements);
PCollection<Long> input = p.apply(reader.withoutMetadata()).apply(Values.create());
addCountingAsserts(input, numElements);
PipelineResult result = p.run();
String[] expect =
topics.stream()
.map(topic -> String.format("kafka:%s.%s", bootStrapServer, topic))
.toArray(String[]::new);
assertThat(Lineage.query(result.metrics(), Lineage.Type.SOURCE), containsInAnyOrder(expect));
}
|
private BatchOperationResult suspendAll(RestApi.RequestContext context) {
String parentHostnameString = context.pathParameters().getStringOrThrow("hostname");
List<String> hostnamesAsStrings = context.queryParameters().getStringList("hostname");
HostName parentHostname = new HostName(parentHostnameString);
List<HostName> hostnames = hostnamesAsStrings.stream().map(HostName::new).toList();
try {
orchestrator.suspendAll(parentHostname, hostnames);
} catch (BatchHostStateChangeDeniedException | UncheckedTimeoutException e) {
log.log(Level.FINE, e, () -> "Failed to suspend nodes " + hostnames + " with parent host " + parentHostname);
throw createRestApiException(e.getMessage(), Response.Status.CONFLICT, e);
} catch (BatchHostNameNotFoundException e) {
log.log(Level.FINE, e, () -> "Failed to suspend nodes " + hostnames + " with parent host " + parentHostname);
// Note that we're returning BAD_REQUEST instead of NOT_FOUND because the resource identified
// by the URL path was found. It's one of the hostnames in the request it failed to find.
throw createRestApiException(e.getMessage(), Response.Status.BAD_REQUEST, e);
} catch (BatchInternalErrorException e) {
log.log(Level.FINE, e, () -> "Failed to suspend nodes " + hostnames + " with parent host " + parentHostname);
throw createRestApiException(e.getMessage(), Response.Status.INTERNAL_SERVER_ERROR, e);
}
log.log(Level.FINE, () -> "Suspended " + hostnames + " with parent " + parentHostname);
return BatchOperationResult.successResult();
}
|
@Test
void throws_409_on_suspendAll_timeout() throws BatchHostStateChangeDeniedException, BatchHostNameNotFoundException, BatchInternalErrorException {
Orchestrator orchestrator = mock(Orchestrator.class);
doThrow(new UncheckedTimeoutException("Timeout Message")).when(orchestrator).suspendAll(any(), any());
RestApiTestDriver testDriver = createTestDriver(orchestrator);
HttpResponse response = executeSuspendAllRequest(testDriver, "parenthost", List.of("h1", "h2", "h3"));
assertEquals(409, response.getStatus());
}
|
@Udf
public Long trunc(@UdfParameter final Long val) {
return val;
}
|
@Test
public void shouldTruncateDoubleWithDecimalPlacesNegative() {
assertThat(udf.trunc(-1.0d, 0), is(-1.0d));
assertThat(udf.trunc(-1.1d, 0), is(-1.0d));
assertThat(udf.trunc(-1.5d, 0), is(-1.0d));
assertThat(udf.trunc(-1.75d, 0), is(-1.0d));
assertThat(udf.trunc(-100.1d, 0), is(-100.0d));
assertThat(udf.trunc(-100.5d, 0), is(-100.0d));
assertThat(udf.trunc(-100.75d, 0), is(-100.0d));
assertThat(udf.trunc(-100.10d, 1), is(-100.1d));
assertThat(udf.trunc(-100.11d, 1), is(-100.1d));
assertThat(udf.trunc(-100.15d, 1), is(-100.1d));
assertThat(udf.trunc(-100.17d, 1), is(-100.1d));
assertThat(udf.trunc(-100.110d, 2), is(-100.11d));
assertThat(udf.trunc(-100.111d, 2), is(-100.11d));
assertThat(udf.trunc(-100.115d, 2), is(-100.11d));
assertThat(udf.trunc(-100.117d, 2), is(-100.11d));
assertThat(udf.trunc(-100.1110d, 3), is(-100.111d));
assertThat(udf.trunc(-100.1111d, 3), is(-100.111d));
assertThat(udf.trunc(-100.1115d, 3), is(-100.111d));
assertThat(udf.trunc(-100.1117d, 3), is(-100.111d));
assertThat(udf.trunc(-1.0d, 3), is(-1.0d));
assertThat(udf.trunc(-1.1d, 3), is(-1.1d));
assertThat(udf.trunc(-1.5d, 3), is(-1.5d));
assertThat(udf.trunc(-1.7d, 3), is(-1.7d));
assertThat(udf.trunc(-12345.67d, -1), is(-12340d));
assertThat(udf.trunc(-12345.67d, -2), is(-12300d));
assertThat(udf.trunc(-12345.67d, -3), is(-12000d));
assertThat(udf.trunc(-12345.67d, -4), is(-10000d));
assertThat(udf.trunc(-12345.67d, -5), is(0d));
}
|
public static FieldType fieldTypeForJavaType(TypeDescriptor typeDescriptor) {
// TODO: Convert for registered logical types.
if (typeDescriptor.isArray()
|| typeDescriptor.isSubtypeOf(TypeDescriptor.of(Collection.class))) {
return getArrayFieldType(typeDescriptor);
} else if (typeDescriptor.isSubtypeOf(TypeDescriptor.of(Map.class))) {
return getMapFieldType(typeDescriptor);
} else if (typeDescriptor.isSubtypeOf(TypeDescriptor.of(Iterable.class))) {
return getIterableFieldType(typeDescriptor);
} else if (typeDescriptor.isSubtypeOf(TypeDescriptor.of(Row.class))) {
throw new IllegalArgumentException(
"Cannot automatically determine a field type from a Row class"
+ " as we cannot determine the schema. You should set a field type explicitly.");
} else {
TypeName typeName = PRIMITIVE_MAPPING.inverse().get(typeDescriptor);
if (typeName == null) {
throw new RuntimeException("Couldn't find field type for " + typeDescriptor);
}
return FieldType.of(typeName);
}
}
|
@Test
public void testArrayTypeToFieldType() {
assertEquals(
FieldType.array(FieldType.STRING),
FieldTypeDescriptors.fieldTypeForJavaType(
TypeDescriptors.lists(TypeDescriptors.strings())));
assertEquals(
FieldType.array(FieldType.array(FieldType.STRING)),
FieldTypeDescriptors.fieldTypeForJavaType(
TypeDescriptors.lists(TypeDescriptors.lists(TypeDescriptors.strings()))));
assertEquals(
FieldType.array(FieldType.STRING),
FieldTypeDescriptors.fieldTypeForJavaType(
TypeDescriptor.of(new ArrayList<String>() {}.getClass())));
}
|
public void succeededGetResourceProfileRetrieved(long duration) {
totalSucceededGetResourceProfileRetrieved.add(duration);
getResourceProfileLatency.add(duration);
}
|
@Test
public void testSucceededGetResourceProfileRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetResourceProfileRetrieved();
goodSubCluster.getResourceProfileRetrieved(150);
Assert.assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetResourceProfileRetrieved());
Assert.assertEquals(150,
metrics.getLatencySucceededGetResourceProfileRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getResourceProfileRetrieved(300);
Assert.assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetResourceProfileRetrieved());
Assert.assertEquals(225,
metrics.getLatencySucceededGetResourceProfileRetrieved(), ASSERT_DOUBLE_DELTA);
}
|
@Override
public boolean hasNext() {
return innerIterator.hasNext();
}
|
@Test
public void shouldForwardHasNext() {
when(mockedKeyValueIterator.hasNext()).thenReturn(true).thenReturn(false);
assertTrue(keyValueIteratorFacade.hasNext());
assertFalse(keyValueIteratorFacade.hasNext());
}
|
public boolean toBoolean(String name) {
return toBoolean(name, false);
}
|
@Test
public void testToBoolean_String() {
System.out.println("toBoolean");
boolean expResult;
boolean result;
Properties props = new Properties();
props.put("value1", "true");
props.put("value2", "false");
props.put("empty", "");
props.put("str", "abc");
props.put("boolean", "true");
props.put("float", "24.98");
props.put("int", "12");
props.put("char", "a");
PropertyParser instance = new PropertyParser(props);
expResult = true;
result = instance.toBoolean("value1");
assertEquals(expResult, result);
expResult = false;
result = instance.toBoolean("value2");
assertEquals(expResult, result);
expResult = false;
result = instance.toBoolean("empty");
assertEquals(expResult, result);
expResult = false;
result = instance.toBoolean("str");
assertEquals(expResult, result);
expResult = true;
result = instance.toBoolean("boolean");
assertEquals(expResult, result);
expResult = false;
result = instance.toBoolean("float");
assertEquals(expResult, result);
expResult = false;
result = instance.toBoolean("int");
assertEquals(expResult, result);
expResult = false;
result = instance.toBoolean("char");
assertEquals(expResult, result);
expResult = false;
result = instance.toBoolean("nonexistent");
assertEquals(expResult, result);
}
|
@Override
public void loadGlue(Glue glue, List<URI> gluePaths) {
gluePaths.stream()
.filter(gluePath -> CLASSPATH_SCHEME.equals(gluePath.getScheme()))
.map(ClasspathSupport::packageName)
.map(classFinder::scanForClassesInPackage)
.flatMap(Collection::stream)
.filter(InjectorSource.class::isAssignableFrom)
.distinct()
.forEach(container::addClass);
}
|
@Test
void finds_injector_source_impls_once_by_classpath_url() {
GuiceBackend backend = new GuiceBackend(factory, classLoader);
backend.loadGlue(glue, asList(URI.create("classpath:io/cucumber/guice/integration"),
URI.create("classpath:io/cucumber/guice/integration")));
verify(factory, times(1)).addClass(YourInjectorSource.class);
}
|
public double currentErrorRate() {
return collectorMap.values().stream()
.mapToDouble(MetricCollector::errorRate)
.sum();
}
|
@Test
public void shouldAggregateDeserializationErrors() {
final MetricCollectors metricCollectors = new MetricCollectors();
final StreamsErrorCollector streamsErrorCollector = StreamsErrorCollector.create(
"test-application",
metricCollectors
);
for (int i = 0; i < 2000; i++) {
streamsErrorCollector.recordError(TEST_TOPIC);
}
// we have 2000 errors in one sample out of a 100. So the effective error rate computed
// should be 20 for this run.
assertEquals(20.0, Math.floor(metricCollectors.currentErrorRate()), 0.1);
}
|
public static <T> DequeCoder<T> of(Coder<T> elemCoder) {
return new DequeCoder<>(elemCoder);
}
|
@Test
public void testCoderIsSerializableWithWellKnownCoderType() throws Exception {
CoderProperties.coderSerializable(DequeCoder.of(GlobalWindow.Coder.INSTANCE));
}
|
@Override
public Integer convertStringToValue(final String value) {
return Integer.valueOf(value);
}
|
@Test
void testConvertStringToValue() {
assertThat(parallelismQueryParameter.convertValueToString(42)).isEqualTo("42");
}
|
@Override
public boolean shouldWait() {
RingbufferContainer ringbuffer = getRingBufferContainerOrNull();
if (resultSet == null) {
resultSet = new ReadResultSetImpl<>(minSize, maxSize, getNodeEngine().getSerializationService(), filter);
sequence = startSequence;
}
if (ringbuffer == null) {
return minSize > 0;
}
sequence = ringbuffer.clampReadSequenceToBounds(sequence);
if (minSize == 0) {
if (sequence < ringbuffer.tailSequence() + 1) {
readMany(ringbuffer);
}
return false;
}
if (resultSet.isMinSizeReached()) {
// enough items have been read, we are done.
return false;
}
if (sequence == ringbuffer.tailSequence() + 1) {
// the sequence is not readable
return true;
}
readMany(ringbuffer);
return !resultSet.isMinSizeReached();
}
|
@Test
public void whenOnTailAndBufferEmpty() {
ReadManyOperation op = getReadManyOperation(ringbuffer.tailSequence(), 1, 1, null);
// since there is an item, we don't need to wait
boolean shouldWait = op.shouldWait();
assertTrue(shouldWait);
ReadResultSetImpl response = getReadResultSet(op);
assertEquals(0, response.readCount());
assertEquals(0, response.getNextSequenceToReadFrom());
}
|
@Override
public InstancePort instancePort(MacAddress macAddress) {
checkNotNull(macAddress, ERR_NULL_MAC_ADDRESS);
return instancePortStore.instancePorts().stream()
.filter(port -> port.macAddress().equals(macAddress))
.findFirst().orElse(null);
}
|
@Test
public void testGetInstancePortById() {
createBasicInstancePorts();
assertNotNull("Instance port did not match", target.instancePort(PORT_ID_1));
assertNotNull("Instance port did not match", target.instancePort(PORT_ID_2));
assertNull("Instance port did not match", target.instancePort(UNKNOWN_ID));
}
|
public static LoginManager acquireLoginManager(JaasContext jaasContext, String saslMechanism,
Class<? extends Login> defaultLoginClass,
Map<String, ?> configs) throws LoginException {
Class<? extends Login> loginClass = configuredClassOrDefault(configs, jaasContext,
saslMechanism, SaslConfigs.SASL_LOGIN_CLASS, defaultLoginClass);
Class<? extends AuthenticateCallbackHandler> defaultLoginCallbackHandlerClass = OAuthBearerLoginModule.OAUTHBEARER_MECHANISM
.equals(saslMechanism) ? OAuthBearerUnsecuredLoginCallbackHandler.class
: AbstractLogin.DefaultLoginCallbackHandler.class;
Class<? extends AuthenticateCallbackHandler> loginCallbackClass = configuredClassOrDefault(configs, jaasContext,
saslMechanism, SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS, defaultLoginCallbackHandlerClass);
synchronized (LoginManager.class) {
LoginManager loginManager;
Password jaasConfigValue = jaasContext.dynamicJaasConfig();
if (jaasConfigValue != null) {
LoginMetadata<Password> loginMetadata = new LoginMetadata<>(jaasConfigValue, loginClass, loginCallbackClass, configs);
loginManager = DYNAMIC_INSTANCES.get(loginMetadata);
if (loginManager == null) {
loginManager = new LoginManager(jaasContext, saslMechanism, configs, loginMetadata);
DYNAMIC_INSTANCES.put(loginMetadata, loginManager);
}
} else {
LoginMetadata<String> loginMetadata = new LoginMetadata<>(jaasContext.name(), loginClass, loginCallbackClass, configs);
loginManager = STATIC_INSTANCES.get(loginMetadata);
if (loginManager == null) {
loginManager = new LoginManager(jaasContext, saslMechanism, configs, loginMetadata);
STATIC_INSTANCES.put(loginMetadata, loginManager);
}
}
SecurityUtils.addConfiguredSecurityProviders(configs);
return loginManager.acquire();
}
}
|
@Test
public void testShouldReThrowExceptionOnErrorLoginAttempt() throws Exception {
Map<String, Object> config = new HashMap<>();
config.put(SaslConfigs.SASL_JAAS_CONFIG, dynamicPlainContext);
config.put(SaslConfigs.SASL_LOGIN_CLASS, Login.class);
config.put(SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS, AuthenticateCallbackHandler.class);
JaasContext dynamicContext = JaasContext.loadClientContext(config);
Login mockLogin = mock(Login.class);
AuthenticateCallbackHandler mockHandler = mock(AuthenticateCallbackHandler.class);
doThrow(new LoginException("Expecting LoginException")).when(mockLogin).login();
try (MockedStatic<Utils> mockedUtils = mockStatic(Utils.class, Mockito.CALLS_REAL_METHODS)) {
mockedUtils.when(() -> Utils.newInstance(Login.class)).thenReturn(mockLogin);
mockedUtils.when(() -> Utils.newInstance(AuthenticateCallbackHandler.class)).thenReturn(mockHandler);
assertThrows(LoginException.class, () ->
LoginManager.acquireLoginManager(dynamicContext, "PLAIN", DefaultLogin.class, config)
);
verify(mockLogin).close();
verify(mockHandler).close();
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.