focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public Optional<IntentProcessPhase> execute() {
try {
List<Intent> compiled = processor.compile(data.intent(),
//TODO consider passing an optional here in the future
stored.map(IntentData::installables).orElse(null));
return Optional.of(new Installing(processor, IntentData.compiled(data, compiled), stored));
} catch (IntentException e) {
log.warn("Unable to compile intent {} due to:", data.intent(), e);
if (stored.filter(x -> !x.installables().isEmpty()).isPresent()) {
// removing orphaned flows and deallocating resources
return Optional.of(new Withdrawing(processor, IntentData.compiled(data, stored.get().installables())));
} else {
return Optional.of(new Failed(data));
}
}
}
|
@Test
public void testWhenIntentCompilationExceptionOccurs() {
IntentData pending = new IntentData(input, INSTALL_REQ, version);
expect(processor.compile(input, null)).andThrow(new IntentCompilationException());
replay(processor);
Compiling sut = new Compiling(processor, pending, Optional.empty());
Optional<IntentProcessPhase> output = sut.execute();
verify(processor);
assertThat(output.get(), is(instanceOf(Failed.class)));
}
|
public Method methodDefinition() {
return method;
}
|
@Test
public void server_dispatches_log_messages_from_log_request() {
List<LogMessage> messages = List.of(MESSAGE_1, MESSAGE_2);
LogDispatcher logDispatcher = mock(LogDispatcher.class);
try (RpcServer server = new RpcServer(0)) {
server.addMethod(new ArchiveLogMessagesMethod(logDispatcher).methodDefinition());
server.start();
try (TestClient client = new TestClient(server.listenPort())) {
client.logMessages(messages);
}
}
verify(logDispatcher).handle(new ArrayList<>(messages));
verify(logDispatcher).flush();
}
|
@Override
public String pluginNamed() {
return PluginEnum.LOGGING_PULSAR.getName();
}
|
@Test
public void testPluginNamed() {
Assertions.assertEquals(loggingPulsarPluginDataHandler.pluginNamed(), "loggingPulsar");
}
|
@Override
public List<Feature> get() {
List<URI> featurePaths = featureOptions.getFeaturePaths();
List<Feature> features = loadFeatures(featurePaths);
if (features.isEmpty()) {
if (featurePaths.isEmpty()) {
log.warn(() -> "Got no path to feature directory or feature file");
} else {
log.warn(
() -> "No features found at " + featurePaths.stream().map(URI::toString).collect(joining(", ")));
}
}
return features;
}
|
@Test
void logs_message_if_no_features_are_found(LogRecordListener logRecordListener) {
Options featureOptions = () -> singletonList(FeaturePath.parse("classpath:io/cucumber/core/options"));
FeaturePathFeatureSupplier supplier = new FeaturePathFeatureSupplier(classLoader, featureOptions, parser);
supplier.get();
assertThat(logRecordListener.getLogRecords().get(1).getMessage(),
equalTo("No features found at classpath:io/cucumber/core/options"));
}
|
public static ValueReference createParameter(String value) {
return ValueReference.builder()
.valueType(ValueType.PARAMETER)
.value(value)
.build();
}
|
@Test
public void deserializeParameter() throws IOException {
assertThat(objectMapper.readValue("{\"@type\":\"parameter\",\"@value\":\"Test\"}", ValueReference.class)).isEqualTo(ValueReference.createParameter("Test"));
assertThatThrownBy(() -> objectMapper.readValue("{\"@type\":\"parameter\",\"@value\":\"\"}", ValueReference.class))
.isInstanceOf(JsonMappingException.class)
.hasCauseInstanceOf(IllegalArgumentException.class);
assertThatThrownBy(() -> objectMapper.readValue("{\"@type\":\"parameter\",\"@value\":\" \"}", ValueReference.class))
.isInstanceOf(JsonMappingException.class)
.hasCauseInstanceOf(IllegalArgumentException.class);
}
|
@Udf(description = "Converts a string representation of a date in the given format"
+ " into the TIMESTAMP value."
+ " Single quotes in the timestamp format can be escaped with '',"
+ " for example: 'yyyy-MM-dd''T''HH:mm:ssX'.")
public Timestamp parseTimestamp(
@UdfParameter(
description = "The string representation of a date.") final String formattedTimestamp,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
return parseTimestamp(formattedTimestamp, formatPattern, ZoneId.of("GMT").getId());
}
|
@Test
public void shouldSupportEmbeddedChars() throws ParseException {
// When:
final Object result = udf.parseTimestamp("2021-12-01T12:10:11.123Fred",
"yyyy-MM-dd'T'HH:mm:ss.SSS'Fred'");
final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Fred'");
sdf.setTimeZone(TimeZone.getTimeZone("GMT"));
// Then:
final Timestamp expectedResult = Timestamp.from(sdf.parse("2021-12-01T12:10:11.123Fred").toInstant());
assertThat(result, is(expectedResult));
}
|
@Override
public Boolean update(List<ModifyRequest> modifyRequests, BiConsumer<Boolean, Throwable> consumer) {
return update(transactionTemplate, jdbcTemplate, modifyRequests, consumer);
}
|
@Test
void testUpdate4() {
List<ModifyRequest> modifyRequests = new ArrayList<>();
ModifyRequest modifyRequest1 = new ModifyRequest();
String sql = "UPDATE config_info SET data_id = 'test' WHERE id = ?;";
modifyRequest1.setSql(sql);
Object[] args = new Object[] {1};
modifyRequest1.setArgs(args);
modifyRequests.add(modifyRequest1);
when(transactionTemplate.execute(any(TransactionCallback.class))).thenReturn(true);
assertTrue(operate.update(transactionTemplate, jdbcTemplate, modifyRequests, biConsumer));
}
|
public static String camelCaseToUnderScore(String key) {
if (key.isEmpty())
return key;
StringBuilder sb = new StringBuilder(key.length());
for (int i = 0; i < key.length(); i++) {
char c = key.charAt(i);
if (Character.isUpperCase(c))
sb.append("_").append(Character.toLowerCase(c));
else
sb.append(c);
}
return sb.toString();
}
|
@Test
public void testCamelCaseToUnderscore() {
assertEquals("test_case", Helper.camelCaseToUnderScore("testCase"));
assertEquals("test_case_t_b_d", Helper.camelCaseToUnderScore("testCaseTBD"));
assertEquals("_test_case", Helper.camelCaseToUnderScore("TestCase"));
assertEquals("_test_case", Helper.camelCaseToUnderScore("_test_case"));
}
|
public static HttpRequest toNettyRequest(RestRequest request) throws Exception
{
HttpMethod nettyMethod = HttpMethod.valueOf(request.getMethod());
URL url = new URL(request.getURI().toString());
String path = url.getFile();
// RFC 2616, section 5.1.2:
// Note that the absolute path cannot be empty; if none is present in the original URI,
// it MUST be given as "/" (the server root).
if (path.isEmpty())
{
path = "/";
}
ByteBuf content = Unpooled.wrappedBuffer(request.getEntity().asByteBuffer());
HttpRequest nettyRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, nettyMethod, path, content);
nettyRequest.headers().set(HttpConstants.CONTENT_LENGTH, request.getEntity().length());
setHttpHeadersAndCookies(request, url, nettyRequest);
return nettyRequest;
}
|
@Test
public void testStreamToNettyRequestContentLengthIgnoreCase() throws Exception
{
StreamRequestBuilder streamRequestBuilder = new StreamRequestBuilder(new URI(ANY_URI));
streamRequestBuilder.setHeader("CONTENT-LENGTH", Integer.toString(ANY_ENTITY.length()));
StreamRequest streamRequest = streamRequestBuilder.build(
EntityStreams.newEntityStream(new ByteStringWriter(ByteString.copy(ANY_ENTITY.getBytes()))));
HttpRequest nettyRequest = NettyRequestAdapter.toNettyRequest(streamRequest);
Assert.assertNull(nettyRequest.headers().get("Content-Length"));
}
|
static ColumnExtractor create(final Column column) {
final int index = column.index();
Preconditions.checkArgument(index >= 0, "negative index: " + index);
return column.namespace() == Namespace.KEY
? new KeyColumnExtractor(index)
: new ValueColumnExtractor(index);
}
|
@Test
public void shouldExtractKeyColumn() {
// Given:
when(column.namespace()).thenReturn(Namespace.KEY);
when(column.index()).thenReturn(0);
when(key.get(0)).thenReturn("some value");
final ColumnExtractor extractor = TimestampColumnExtractors.create(column);
// When:
final Object result = extractor.extract(key, value);
// Then:
assertThat(result, is("some value"));
}
|
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
}
|
@Test
public void editMessageText() {
String text = "Update " + System.currentTimeMillis();
BaseResponse response = bot.execute(new EditMessageText(chatId, 925, text)
.parseMode(ParseMode.Markdown)
.disableWebPagePreview(true)
.replyMarkup(new InlineKeyboardMarkup()));
assertTrue(response.isOk());
assertNotNull(((SendResponse) response).message().editDate());
response = bot.execute(new EditMessageText(channelName, 306, text));
assertTrue(response.isOk());
response = bot.execute(new EditMessageText("AgAAAN3wAQCj_Q4DjX4ok5VEUZU", text));
if (!response.isOk()) {
assertEquals(400, response.errorCode());
assertEquals("Bad Request: MESSAGE_ID_INVALID", response.description());
}
}
|
@Override
public String toString() {
return "QJM to " + loggers;
}
|
@Test
public void testToString() throws Exception {
GenericTestUtils.assertMatches(
qjm.toString(),
"QJM to \\[127.0.0.1:\\d+, 127.0.0.1:\\d+, 127.0.0.1:\\d+\\]");
}
|
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof NiciraSetNshSpi) {
NiciraSetNshSpi that = (NiciraSetNshSpi) obj;
return Objects.equals(nshSpi, that.nshSpi);
}
return false;
}
|
@Test
public void testEquals() {
new EqualsTester().addEqualityGroup(nshSpi1, sameAsNshSpi1).addEqualityGroup(nshSpi2).testEquals();
}
|
@Override
public Object toConnectRow(final Object ksqlData) {
if (!(ksqlData instanceof Struct)) {
return ksqlData;
}
final Schema schema = getSchema();
final Struct struct = new Struct(schema);
Struct originalData = (Struct) ksqlData;
Schema originalSchema = originalData.schema();
if (originalSchema.name() == null && schema.name() != null) {
originalSchema = AvroSchemas.getAvroCompatibleConnectSchema(
originalSchema, schema.name()
);
originalData = ConnectSchemas.withCompatibleRowSchema(originalData, originalSchema);
}
validate(originalSchema, schema);
copyStruct(originalData, originalSchema, struct, schema);
return struct;
}
|
@Test
public void shouldThrowIfExtraFieldNotOptionalOrDefault() {
// Given:
final Schema schema = SchemaBuilder.struct()
.field("f1", SchemaBuilder.OPTIONAL_STRING_SCHEMA)
.field("f2", SchemaBuilder.OPTIONAL_INT32_SCHEMA)
.field("f3", SchemaBuilder.OPTIONAL_INT64_SCHEMA)
.field("f4", SchemaBuilder.STRING_SCHEMA)
.build();
final Struct struct = new Struct(ORIGINAL_SCHEMA)
.put("f1", "abc")
.put("f2", 12);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> new AvroSRSchemaDataTranslator(schema).toConnectRow(struct)
);
// Then:
assertThat(e.getMessage(), is("Missing default value for required Avro field: [f4]. "
+ "This field appears in Avro schema in Schema Registry"));
}
|
@VisibleForTesting
static List<Set<PlanFragmentId>> extractPhases(Collection<PlanFragment> fragments)
{
// Build a graph where the plan fragments are vertexes and the edges represent
// a before -> after relationship. For example, a join hash build has an edge
// to the join probe.
Graph<PlanFragmentId, DefaultEdge> graph = new DefaultDirectedGraph<>(DefaultEdge.class);
fragments.forEach(fragment -> graph.addVertex(fragment.getId()));
Visitor visitor = new Visitor(fragments, graph);
for (PlanFragment fragment : fragments) {
visitor.processFragment(fragment.getId());
}
// Computes all the strongly connected components of the directed graph.
// These are the "phases" which hold the set of fragments that must be started
// at the same time to avoid deadlock.
List<Set<PlanFragmentId>> components = new KosarajuStrongConnectivityInspector<>(graph).stronglyConnectedSets();
Map<PlanFragmentId, Set<PlanFragmentId>> componentMembership = new HashMap<>();
for (Set<PlanFragmentId> component : components) {
for (PlanFragmentId planFragmentId : component) {
componentMembership.put(planFragmentId, component);
}
}
// build graph of components (phases)
Graph<Set<PlanFragmentId>, DefaultEdge> componentGraph = new DefaultDirectedGraph<>(DefaultEdge.class);
components.forEach(componentGraph::addVertex);
for (DefaultEdge edge : graph.edgeSet()) {
PlanFragmentId source = graph.getEdgeSource(edge);
PlanFragmentId target = graph.getEdgeTarget(edge);
Set<PlanFragmentId> from = componentMembership.get(source);
Set<PlanFragmentId> to = componentMembership.get(target);
if (!from.equals(to)) { // the topological order iterator below doesn't include vertices that have self-edges, so don't add them
componentGraph.addEdge(from, to);
}
}
List<Set<PlanFragmentId>> schedulePhases = ImmutableList.copyOf(new TopologicalOrderIterator<>(componentGraph));
return schedulePhases;
}
|
@Test
public void testJoinWithDeepSources()
{
PlanFragment buildSourceFragment = createTableScanPlanFragment("buildSource");
PlanFragment buildMiddleFragment = createExchangePlanFragment("buildMiddle", buildSourceFragment);
PlanFragment buildTopFragment = createExchangePlanFragment("buildTop", buildMiddleFragment);
PlanFragment probeSourceFragment = createTableScanPlanFragment("probeSource");
PlanFragment probeMiddleFragment = createExchangePlanFragment("probeMiddle", probeSourceFragment);
PlanFragment probeTopFragment = createExchangePlanFragment("probeTop", probeMiddleFragment);
PlanFragment joinFragment = createJoinPlanFragment(INNER, "join", buildTopFragment, probeTopFragment);
List<Set<PlanFragmentId>> phases = PhasedExecutionSchedule.extractPhases(ImmutableList.of(
joinFragment,
buildTopFragment,
buildMiddleFragment,
buildSourceFragment,
probeTopFragment,
probeMiddleFragment,
probeSourceFragment));
assertEquals(phases, ImmutableList.of(
ImmutableSet.of(joinFragment.getId()),
ImmutableSet.of(buildTopFragment.getId()),
ImmutableSet.of(buildMiddleFragment.getId()),
ImmutableSet.of(buildSourceFragment.getId()),
ImmutableSet.of(probeTopFragment.getId()),
ImmutableSet.of(probeMiddleFragment.getId()),
ImmutableSet.of(probeSourceFragment.getId())));
}
|
@Override
public DataNodeDto startNode(String nodeId) throws NodeNotFoundException {
final DataNodeDto node = nodeService.byNodeId(nodeId);
if (node.getDataNodeStatus() != DataNodeStatus.UNAVAILABLE && node.getDataNodeStatus() != DataNodeStatus.PREPARED) {
throw new IllegalArgumentException("Only stopped data nodes can be started.");
}
DataNodeLifecycleEvent e = DataNodeLifecycleEvent.create(node.getNodeId(), DataNodeLifecycleTrigger.START);
clusterEventBus.post(e);
return node;
}
|
@Test
public void startNodeFailsWhenNodeNotStopped() throws NodeNotFoundException {
final String testNodeId = "node";
nodeService.registerServer(buildTestNode(testNodeId, DataNodeStatus.AVAILABLE));
Exception e = assertThrows(IllegalArgumentException.class, () -> {
classUnderTest.startNode(testNodeId);
});
assertEquals("Only stopped data nodes can be started.", e.getMessage());
verifyNoMoreInteractions(clusterEventBus);
}
|
@Override
public Predicate negate() {
return new GreaterLessPredicate(attributeName, value, !equal, !less);
}
|
@Test
public void negate_whenEqualsFalseAndLessTrue_thenReturnNewInstanceWithEqualsTrueAndLessFalse() {
String attribute = "attribute";
Comparable value = 1;
GreaterLessPredicate original = new GreaterLessPredicate(attribute, value, false, true);
GreaterLessPredicate negate = (GreaterLessPredicate) original.negate();
assertThat(negate).isNotSameAs(original);
assertThat(negate.attributeName).isEqualTo(attribute);
assertThat(negate.equal).isTrue();
assertThat(negate.less).isFalse();
}
|
public static String getAddress(ECKeyPair ecKeyPair) {
return getAddress(ecKeyPair.getPublicKey());
}
|
@Test
public void testGetAddressString() {
assertEquals(Keys.getAddress(SampleKeys.PUBLIC_KEY_STRING), (SampleKeys.ADDRESS_NO_PREFIX));
}
|
public synchronized T getConfig(String configId) {
try (ConfigSubscriber subscriber = new ConfigSubscriber()) {
ConfigHandle<T> handle = subscriber.subscribe(clazz, configId);
subscriber.nextConfig(true);
return handle.getConfig();
}
}
|
@Test
public void testsStaticGetConfig() {
int times = 11;
String message = "testGetConfig";
String a0 = "a0";
String configId = "raw:times " + times + "\nmessage " + message + "\na[1]\na[0].name " + a0;
AppConfig config = ConfigGetter.getConfig(AppConfig.class, configId);
assertEquals(times, config.times());
assertEquals(message, config.message());
assertEquals(1, config.a().size());
assertEquals(a0, config.a(0).name());
AppService service = new AppService(configId, sourceSet);
AppConfig serviceConfig = service.getConfig();
assertTrue(service.isConfigured());
assertEquals(config, serviceConfig);
service.cancelSubscription();
}
|
public static String generateDatabaseId(String baseString) {
checkArgument(baseString.length() != 0, "baseString cannot be empty!");
String databaseId =
generateResourceId(
baseString,
ILLEGAL_DATABASE_CHARS,
REPLACE_DATABASE_CHAR,
MAX_DATABASE_ID_LENGTH,
DATABASE_TIME_FORMAT);
// replace hyphen with underscore, so there's no need for backticks
String trimmed = CharMatcher.is('_').trimTrailingFrom(databaseId);
checkArgument(
trimmed.length() > 0,
"Database id is empty after removing illegal characters and trailing underscores");
// if first char is not a letter, replace with a padding letter, so it doesn't
// violate spanner's database naming rules
char padding = generatePadding();
if (!Character.isLetter(trimmed.charAt(0))) {
trimmed = padding + trimmed.substring(1);
}
return trimmed;
}
|
@Test
public void testGenerateDatabaseIdShouldReplaceNonLetterFirstCharWithLetter() {
String testBaseString = "0_database";
String actual = generateDatabaseId(testBaseString);
assertThat(actual).matches("[a-z]_datab_\\d{8}_\\d{6}_\\d{6}");
}
|
@Override
public CurrentStateInformation trigger(MigrationStep step, Map<String, Object> args) {
context.setCurrentStep(step);
if (Objects.nonNull(args) && !args.isEmpty()) {
context.addActionArguments(step, args);
}
String errorMessage = null;
try {
stateMachine.fire(step);
} catch (Exception e) {
errorMessage = Objects.nonNull(e.getMessage()) ? e.getMessage() : e.toString();
}
persistenceService.saveStateMachineContext(context);
return new CurrentStateInformation(getState(), nextSteps(), errorMessage, context.getResponse());
}
|
@Test
public void smThrowsErrorOnWrongArgumentType() {
StateMachine<MigrationState, MigrationStep> stateMachine = testStateMachineWithAction((context) -> {
context.getActionArgument("k1", Integer.class);
});
migrationStateMachine = new MigrationStateMachineImpl(stateMachine, persistenceService, context);
CurrentStateInformation context = migrationStateMachine.trigger(MIGRATION_STEP, Map.of("k1", "v1"));
assertThat(context.hasErrors()).isTrue();
assertThat(context.errorMessage()).isEqualTo("Argument k1 must be of type class java.lang.Integer");
}
|
public void createPartitionMetadataTable() {
List<String> ddl = new ArrayList<>();
if (this.isPostgres()) {
// Literals need be added around literals to preserve casing.
ddl.add(
"CREATE TABLE \""
+ tableName
+ "\"(\""
+ COLUMN_PARTITION_TOKEN
+ "\" text NOT NULL,\""
+ COLUMN_PARENT_TOKENS
+ "\" text[] NOT NULL,\""
+ COLUMN_START_TIMESTAMP
+ "\" timestamptz NOT NULL,\""
+ COLUMN_END_TIMESTAMP
+ "\" timestamptz NOT NULL,\""
+ COLUMN_HEARTBEAT_MILLIS
+ "\" BIGINT NOT NULL,\""
+ COLUMN_STATE
+ "\" text NOT NULL,\""
+ COLUMN_WATERMARK
+ "\" timestamptz NOT NULL,\""
+ COLUMN_CREATED_AT
+ "\" SPANNER.COMMIT_TIMESTAMP NOT NULL,\""
+ COLUMN_SCHEDULED_AT
+ "\" SPANNER.COMMIT_TIMESTAMP,\""
+ COLUMN_RUNNING_AT
+ "\" SPANNER.COMMIT_TIMESTAMP,\""
+ COLUMN_FINISHED_AT
+ "\" SPANNER.COMMIT_TIMESTAMP,"
+ " PRIMARY KEY (\""
+ COLUMN_PARTITION_TOKEN
+ "\")"
+ ")"
+ " TTL INTERVAL '"
+ TTL_AFTER_PARTITION_FINISHED_DAYS
+ " days' ON \""
+ COLUMN_FINISHED_AT
+ "\"");
ddl.add(
"CREATE INDEX \""
+ WATERMARK_INDEX
+ "\" on \""
+ tableName
+ "\" (\""
+ COLUMN_WATERMARK
+ "\") INCLUDE (\""
+ COLUMN_STATE
+ "\")");
ddl.add(
"CREATE INDEX \""
+ CREATED_AT_START_TIMESTAMP_INDEX
+ "\" ON \""
+ tableName
+ "\" (\""
+ COLUMN_CREATED_AT
+ "\",\""
+ COLUMN_START_TIMESTAMP
+ "\")");
} else {
ddl.add(
"CREATE TABLE "
+ tableName
+ " ("
+ COLUMN_PARTITION_TOKEN
+ " STRING(MAX) NOT NULL,"
+ COLUMN_PARENT_TOKENS
+ " ARRAY<STRING(MAX)> NOT NULL,"
+ COLUMN_START_TIMESTAMP
+ " TIMESTAMP NOT NULL,"
+ COLUMN_END_TIMESTAMP
+ " TIMESTAMP NOT NULL,"
+ COLUMN_HEARTBEAT_MILLIS
+ " INT64 NOT NULL,"
+ COLUMN_STATE
+ " STRING(MAX) NOT NULL,"
+ COLUMN_WATERMARK
+ " TIMESTAMP NOT NULL,"
+ COLUMN_CREATED_AT
+ " TIMESTAMP NOT NULL OPTIONS (allow_commit_timestamp=true),"
+ COLUMN_SCHEDULED_AT
+ " TIMESTAMP OPTIONS (allow_commit_timestamp=true),"
+ COLUMN_RUNNING_AT
+ " TIMESTAMP OPTIONS (allow_commit_timestamp=true),"
+ COLUMN_FINISHED_AT
+ " TIMESTAMP OPTIONS (allow_commit_timestamp=true),"
+ ") PRIMARY KEY ("
+ COLUMN_PARTITION_TOKEN
+ "),"
+ " ROW DELETION POLICY (OLDER_THAN("
+ COLUMN_FINISHED_AT
+ ", INTERVAL "
+ TTL_AFTER_PARTITION_FINISHED_DAYS
+ " DAY))");
ddl.add(
"CREATE INDEX "
+ WATERMARK_INDEX
+ " on "
+ tableName
+ " ("
+ COLUMN_WATERMARK
+ ") STORING ("
+ COLUMN_STATE
+ ")");
ddl.add(
"CREATE INDEX "
+ CREATED_AT_START_TIMESTAMP_INDEX
+ " ON "
+ tableName
+ " ("
+ COLUMN_CREATED_AT
+ ","
+ COLUMN_START_TIMESTAMP
+ ")");
}
OperationFuture<Void, UpdateDatabaseDdlMetadata> op =
databaseAdminClient.updateDatabaseDdl(instanceId, databaseId, ddl, null);
try {
// Initiate the request which returns an OperationFuture.
op.get(TIMEOUT_MINUTES, TimeUnit.MINUTES);
} catch (ExecutionException | TimeoutException e) {
// If the operation failed or timed out during execution, expose the cause.
if (e.getCause() != null) {
throw (SpannerException) e.getCause();
} else {
throw SpannerExceptionFactory.asSpannerException(e);
}
} catch (InterruptedException e) {
// Throw when a thread is waiting, sleeping, or otherwise occupied,
// and the thread is interrupted, either before or during the activity.
throw SpannerExceptionFactory.propagateInterrupt(e);
}
}
|
@Test
public void testCreatePartitionMetadataTableWithTimeoutException() throws Exception {
when(op.get(10, TimeUnit.MINUTES)).thenThrow(new TimeoutException(TIMED_OUT));
try {
partitionMetadataAdminDao.createPartitionMetadataTable();
fail();
} catch (SpannerException e) {
assertTrue(e.getMessage().contains(TIMED_OUT));
}
}
|
@Override
public int read(long position, byte[] buffer, int offset, int length)
throws IOException {
// When bufferedPreadDisabled = true, this API does not use any shared buffer,
// cursor position etc. So this is implemented as NOT synchronized. HBase
// kind of random reads on a shared file input stream will greatly get
// benefited by such implementation.
// Strict close check at the begin of the API only not for the entire flow.
synchronized (this) {
if (closed) {
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
}
}
LOG.debug("pread requested offset = {} len = {} bufferedPreadDisabled = {}",
offset, length, bufferedPreadDisabled);
if (!bufferedPreadDisabled) {
return super.read(position, buffer, offset, length);
}
validatePositionedReadArgs(position, buffer, offset, length);
if (length == 0) {
return 0;
}
if (streamStatistics != null) {
streamStatistics.readOperationStarted();
}
int bytesRead = readRemote(position, buffer, offset, length, tracingContext);
if (statistics != null) {
statistics.incrementBytesRead(bytesRead);
}
if (streamStatistics != null) {
streamStatistics.bytesRead(bytesRead);
}
return bytesRead;
}
|
@Test
public void testOlderReadAheadFailure() throws Exception {
AbfsClient client = getMockAbfsClient();
AbfsRestOperation successOp = getMockRestOp();
// Stub :
// First Read request leads to 3 readahead calls: Fail all 3 readahead-client.read()
// A second read request will see that readahead had failed for data in
// the requested offset range and also that its is an older readahead request.
// So attempt a new read only for the requested range.
doThrow(new TimeoutException("Internal Server error for RAH-X"))
.doThrow(new TimeoutException("Internal Server error for RAH-Y"))
.doThrow(new TimeoutException("Internal Server error for RAH-Z"))
.doReturn(successOp) // pass the read for second read request
.doReturn(successOp) // pass success for post eviction test
.when(client)
.read(any(String.class), any(Long.class), any(byte[].class),
any(Integer.class), any(Integer.class), any(String.class),
any(String.class), any(), any(TracingContext.class));
AbfsInputStream inputStream = getAbfsInputStream(client, "testOlderReadAheadFailure.txt");
// First read request that fails as the readahead triggered from this request failed.
intercept(IOException.class,
() -> inputStream.read(new byte[ONE_KB]));
// Only the 3 readAhead threads should have triggered client.read
verifyReadCallCount(client, 3);
// Sleep for thresholdAgeMs so that the read ahead buffer qualifies for being old.
Thread.sleep(ReadBufferManager.getBufferManager().getThresholdAgeMilliseconds());
// Second read request should retry the read (and not issue any new readaheads)
inputStream.read(ONE_KB, new byte[ONE_KB], 0, ONE_KB);
// Once created, mock will remember all interactions. So total number of read
// calls will be one more from earlier (there is a reset mock which will reset the
// count, but the mock stub is erased as well which needs AbsInputStream to be recreated,
// which beats the purpose)
verifyReadCallCount(client, 4);
// Stub returns success for the 5th read request, if ReadBuffers still
// persisted request would have failed for position 0.
checkEvictedStatus(inputStream, 0, false);
}
|
@Override
public HttpHeaders set(HttpHeaders headers) {
if (headers instanceof DefaultHttpHeaders) {
this.headers.set(((DefaultHttpHeaders) headers).headers);
return this;
} else {
return super.set(headers);
}
}
|
@Test
public void testSetNullHeaderValueNotValidate() {
final HttpHeaders headers = new DefaultHttpHeaders(false);
assertThrows(NullPointerException.class, new Executable() {
@Override
public void execute() {
headers.set(of("test"), (CharSequence) null);
}
});
}
|
@Override
public long position() throws IOException {
return delegate.position();
}
|
@Test
public void testPosition() throws IOException {
int newPosition = 5;
channelUnderTest.position(newPosition);
assertEquals(newPosition, delegate.position());
assertEquals(newPosition, channelUnderTest.position());
}
|
@Override
protected int command() {
if (!validateConfigFilePresent()) {
return 1;
}
final MigrationConfig config;
try {
config = MigrationConfig.load(getConfigFile());
} catch (KsqlException | MigrationException e) {
LOGGER.error(e.getMessage());
return 1;
}
return command(config, MigrationsUtil::getKsqlClient);
}
|
@Test
public void shouldCleanMigrationsStreamAndTable() {
// When:
final int status = command.command(config, cfg -> client);
// Then:
assertThat(status, is(0));
verify(client).executeStatement("TERMINATE " + CTAS_QUERY_ID + ";");
verify(client).executeStatement("DROP TABLE " + MIGRATIONS_TABLE + " DELETE TOPIC;");
verify(client).executeStatement("DROP STREAM " + MIGRATIONS_STREAM + " DELETE TOPIC;");
}
|
@Override
public JFieldVar apply(String nodeName, JsonNode node, JsonNode parent, JFieldVar field, Schema currentSchema) {
if (ruleFactory.getGenerationConfig().isIncludeJsr303Annotations()
&& (node.has("minLength") || node.has("maxLength"))
&& isApplicableType(field)) {
final Class<? extends Annotation> sizeClass
= ruleFactory.getGenerationConfig().isUseJakartaValidation()
? Size.class
: javax.validation.constraints.Size.class;
JAnnotationUse annotation = field.annotate(sizeClass);
if (node.has("minLength")) {
annotation.param("min", node.get("minLength").asInt());
}
if (node.has("maxLength")) {
annotation.param("max", node.get("maxLength").asInt());
}
}
return field;
}
|
@Test
public void testMaxAndMinLengthGenericsOnType() {
when(config.isIncludeJsr303Annotations()).thenReturn(true);
final int minValue = new Random().nextInt();
final int maxValue = new Random().nextInt();
JsonNode maxSubNode = Mockito.mock(JsonNode.class);
when(subNode.asInt()).thenReturn(minValue);
when(maxSubNode.asInt()).thenReturn(maxValue);
when(node.get("minLength")).thenReturn(subNode);
when(node.get("maxLength")).thenReturn(maxSubNode);
when(fieldVar.annotate(sizeClass)).thenReturn(annotation);
when(node.has("minLength")).thenReturn(true);
when(node.has("maxLength")).thenReturn(true);
when(fieldVar.type().boxify().fullName()).thenReturn(fieldClass.getTypeName() + "<String>");
JFieldVar result = rule.apply("node", node, null, fieldVar, null);
assertSame(fieldVar, result);
verify(fieldVar, times(isApplicable ? 1 : 0)).annotate(sizeClass);
verify(annotation, times(isApplicable ? 1 : 0)).param("min", minValue);
verify(annotation, times(isApplicable ? 1 : 0)).param("max", maxValue);
}
|
public static void trimRecordTemplate(RecordTemplate recordTemplate, MaskTree override, final boolean failOnMismatch)
{
trimRecordTemplate(recordTemplate.data(), recordTemplate.schema(), override, failOnMismatch);
}
|
@Test
public void testOverrideMask() throws CloneNotSupportedException
{
RecordBar bar = new RecordBar();
bar.setLocation("mountain view");
bar.data().put("SF", "CA");
RecordBar expected = bar.clone();
MaskTree maskTree = new MaskTree();
maskTree.addOperation(new PathSpec("SF"), MaskOperation.POSITIVE_MASK_OP);
RestUtils.trimRecordTemplate(bar, maskTree, false);
Assert.assertEquals(bar, expected);
}
|
public boolean checkPermission(String user, List<String> groups, AclAction action) {
return getPermission(user, groups).contains(action);
}
|
@Test
public void checkPermission() {
AccessControlList acl = new AccessControlList();
setPermissions(acl);
assertTrue(checkMode(acl, OWNING_USER, Collections.emptyList(), Mode.Bits.ALL));
assertTrue(checkMode(acl, NAMED_USER, Collections.emptyList(), Mode.Bits.READ_EXECUTE));
assertFalse(checkMode(acl, NAMED_USER, Collections.emptyList(), Mode.Bits.WRITE));
assertTrue(checkMode(acl, OTHER_USER, Lists.newArrayList(OWNING_GROUP),
Mode.Bits.READ_EXECUTE));
assertFalse(checkMode(acl, OTHER_USER, Lists.newArrayList(OWNING_GROUP),
Mode.Bits.WRITE));
assertTrue(checkMode(acl, OTHER_USER, Lists.newArrayList(NAMED_GROUP), Mode.Bits.READ));
assertFalse(checkMode(acl, OTHER_USER, Lists.newArrayList(NAMED_GROUP),
Mode.Bits.WRITE));
assertFalse(checkMode(acl, OTHER_USER, Lists.newArrayList(NAMED_GROUP),
Mode.Bits.EXECUTE));
assertTrue(checkMode(acl, OTHER_USER, Lists.newArrayList(OTHER_GROUP),
Mode.Bits.EXECUTE));
assertFalse(checkMode(acl, OTHER_USER, Lists.newArrayList(OTHER_GROUP),
Mode.Bits.READ));
assertFalse(checkMode(acl, OTHER_USER, Lists.newArrayList(OTHER_GROUP),
Mode.Bits.WRITE));
}
|
public double parseDouble(String name) {
return Double.parseDouble(getProperties().getProperty(name));
}
|
@Test
public void testParseDouble() {
System.out.println("parseDouble");
double expResult;
double result;
Properties props = new Properties();
props.put("value1", "12345.6789");
props.put("value2", "-9000.001");
props.put("empty", "");
props.put("str", "abc");
props.put("boolean", "true");
props.put("float", "24.98");
props.put("int", "12");
props.put("char", "a");
PropertyParser instance = new PropertyParser(props);
expResult = 12345.6789;
result = instance.parseDouble("value1");
assertEquals(expResult, result, 0);
expResult = -9000.001;
result = instance.parseDouble("value2");
assertEquals(expResult, result, 0);
try {
instance.parseDouble("empty");
fail("no exception");
} catch (IllegalArgumentException e) {
}
try {
instance.parseDouble("str");
fail("no exception");
} catch (IllegalArgumentException e) {
}
try {
instance.parseDouble("boolean");
fail("no exception");
} catch (IllegalArgumentException e) {
}
expResult = 24.98;
result = instance.parseDouble("float");
assertEquals(expResult, result, 0);
expResult = 12;
result = instance.parseDouble("int");
assertEquals(expResult, result, 0);
try {
instance.parseDouble("char");
fail("no exception");
} catch (IllegalArgumentException e) {
}
try {
instance.parseDouble("nonexistent");
fail("no exception");
} catch (NullPointerException e) {
}
}
|
public ApiClient createApiClient(@NonNull String baseUrl, String token, String truststoreLocation)
throws MalformedURLException, SSLException {
WebClient webClient = createWebClient(baseUrl, token, truststoreLocation);
ApiClient apiClient = new ApiClient(webClient);
if (token != null && !token.isEmpty()) {
apiClient.addDefaultHeader(HttpHeaders.AUTHORIZATION, String.format("Bearer %s", token));
}
apiClient.setBasePath(baseUrl);
return apiClient;
}
|
@Test
public void testSetClientNameCalled() throws Exception {
ArgumentCaptor<String> clientNameCapture = ArgumentCaptor.forClass(String.class);
tablesApiClientFactorySpy.setClientName("trino");
tablesApiClientFactorySpy.createApiClient(
"https://test.openhouse.com", "", tmpCert.getAbsolutePath());
Mockito.verify(tablesApiClientFactorySpy, Mockito.times(1))
.setClientName(clientNameCapture.capture());
assertEquals("trino", clientNameCapture.getValue());
}
|
void handleFinish(HttpResponse response, Span span) {
if (response == null) throw new NullPointerException("response == null");
if (span == null) throw new NullPointerException("span == null");
if (span.isNoop()) return;
if (response.error() != null) {
span.error(response.error()); // Ensures MutableSpan.error() for SpanHandler
}
try {
parseResponse(response, span);
} catch (Throwable t) {
propagateIfFatal(t);
Platform.get().log("error parsing response {0}", response, t);
} finally {
long finishTimestamp = response.finishTimestamp();
if (finishTimestamp == 0L) {
span.finish();
} else {
span.finish(finishTimestamp);
}
}
}
|
@Test void handleFinish_nothingOnNoop() {
when(span.isNoop()).thenReturn(true);
handler.handleFinish(response, span);
verify(span, never()).finish();
}
|
public int getListReservationFailedRetrieved() {
return numListReservationFailedRetrieved.value();
}
|
@Test
public void testGetListReservationRetrievedFailed() {
long totalBadBefore = metrics.getListReservationFailedRetrieved();
badSubCluster.getListReservationFailed();
Assert.assertEquals(totalBadBefore + 1,
metrics.getListReservationFailedRetrieved());
}
|
public static String unescape(String str) {
if (str == null) {
return null;
}
int len = str.length();
StringWriter writer = new StringWriter(len);
StringBuilder unicode = new StringBuilder(4);
boolean hadSlash = false;
boolean inUnicode = false;
for (int i = 0; i < len; i++) {
char ch = str.charAt(i);
if (inUnicode) {
unicode.append(ch);
if (unicode.length() == 4) {
try {
int value = Integer.parseInt(unicode.toString(), 16);
writer.write((char) value);
unicode.setLength(0);
inUnicode = false;
hadSlash = false;
} catch (NumberFormatException nfe) {
throw new JsonPathException("Unable to parse unicode value: " + unicode, nfe);
}
}
continue;
}
if (hadSlash) {
hadSlash = false;
switch (ch) {
case '\\':
writer.write('\\');
break;
case '\'':
writer.write('\'');
break;
case '\"':
writer.write('"');
break;
case 'r':
writer.write('\r');
break;
case 'f':
writer.write('\f');
break;
case 't':
writer.write('\t');
break;
case 'n':
writer.write('\n');
break;
case 'b':
writer.write('\b');
break;
case 'u':
{
inUnicode = true;
break;
}
default :
writer.write(ch);
break;
}
continue;
} else if (ch == '\\') {
hadSlash = true;
continue;
}
writer.write(ch);
}
if (hadSlash) {
writer.write('\\');
}
return writer.toString();
}
|
@Test
public void testUnescapeThrow() {
Assertions.assertThrows(JsonPathException.class, () -> Utils.unescape("\\uuuuu"));
}
|
@Override
public ConfigOperateResult insertOrUpdateBeta(final ConfigInfo configInfo, final String betaIps, final String srcIp,
final String srcUser) {
if (findConfigInfo4BetaState(configInfo.getDataId(), configInfo.getGroup(), configInfo.getTenant()) == null) {
return addConfigInfo4Beta(configInfo, betaIps, srcIp, srcUser);
} else {
return updateConfigInfo4Beta(configInfo, betaIps, srcIp, srcUser);
}
}
|
@Test
void testInsertOrUpdateBetaOfUpdate() {
String dataId = "betaDataId113";
String group = "group";
String tenant = "tenant";
//mock exist beta
ConfigInfoStateWrapper mockedConfigInfoStateWrapper = new ConfigInfoStateWrapper();
mockedConfigInfoStateWrapper.setDataId(dataId);
mockedConfigInfoStateWrapper.setGroup(group);
mockedConfigInfoStateWrapper.setTenant(tenant);
mockedConfigInfoStateWrapper.setId(123456L);
mockedConfigInfoStateWrapper.setLastModified(System.currentTimeMillis());
Mockito.when(
databaseOperate.queryOne(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER)))
.thenReturn(mockedConfigInfoStateWrapper, mockedConfigInfoStateWrapper);
//execute
String betaIps = "betaips...";
String srcIp = "srcUp...";
String srcUser = "srcUser...";
String appName = "appname";
String content = "content111";
ConfigInfo configInfo = new ConfigInfo(dataId, group, tenant, appName, content);
configInfo.setEncryptedDataKey("key34567");
ConfigOperateResult configOperateResult = embeddedConfigInfoBetaPersistService.insertOrUpdateBeta(configInfo, betaIps, srcIp,
srcUser);
//expect return obj
assertEquals(mockedConfigInfoStateWrapper.getId(), configOperateResult.getId());
assertEquals(mockedConfigInfoStateWrapper.getLastModified(), configOperateResult.getLastModified());
//verify update to be invoked
embeddedStorageContextHolderMockedStatic.verify(
() -> EmbeddedStorageContextHolder.addSqlContext(anyString(), eq(configInfo.getContent()),
eq(configInfo.getMd5()), eq(betaIps), eq(srcIp), eq(srcUser), eq(configInfo.getAppName()),
eq(configInfo.getEncryptedDataKey()), eq(dataId), eq(group), eq(tenant)), times(1));
}
|
public TypeDescriptor<T> getEncodedTypeDescriptor() {
return (TypeDescriptor<T>)
TypeDescriptor.of(getClass()).resolveType(new TypeDescriptor<T>() {}.getType());
}
|
@Test
public void testTypeIsPreserved() throws Exception {
assertThat(VoidCoder.of().getEncodedTypeDescriptor(), equalTo(TypeDescriptor.of(Void.class)));
}
|
public static HCatSchema getTableSchemaWithPtnCols(Table table) throws IOException {
HCatSchema tableSchema = new HCatSchema(HCatUtil.getHCatFieldSchemaList(table.getCols()));
if (table.getPartitionKeys().size() != 0) {
// add partition keys to table schema
// NOTE : this assumes that we do not ever have ptn keys as columns
// inside the table schema as well!
for (FieldSchema fs : table.getPartitionKeys()) {
tableSchema.append(HCatSchemaUtils.getHCatFieldSchema(fs));
}
}
return tableSchema;
}
|
@Test
public void testGetTableSchemaWithPtnColsApi() throws IOException {
// Check the schema of a table with one field & no partition keys.
StorageDescriptor sd = new StorageDescriptor(
Lists.newArrayList(new FieldSchema("username", serdeConstants.STRING_TYPE_NAME, null)),
"location", "org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.mapred.TextOutputFormat", false, -1, new SerDeInfo(),
new ArrayList<String>(), new ArrayList<Order>(), new HashMap<String, String>());
org.apache.hadoop.hive.metastore.api.Table apiTable =
new org.apache.hadoop.hive.metastore.api.Table("test_tblname", "test_dbname", "test_owner",
0, 0, 0, sd, new ArrayList<FieldSchema>(), new HashMap<String, String>(),
"viewOriginalText", "viewExpandedText", TableType.EXTERNAL_TABLE.name());
Table table = new Table(apiTable);
List<HCatFieldSchema> expectedHCatSchema =
Lists.newArrayList(new HCatFieldSchema("username", HCatFieldSchema.Type.STRING, null));
Assert.assertEquals(new HCatSchema(expectedHCatSchema),
HCatUtil.getTableSchemaWithPtnCols(table));
// Add a partition key & ensure its reflected in the schema.
List<FieldSchema> partitionKeys =
Lists.newArrayList(new FieldSchema("dt", serdeConstants.STRING_TYPE_NAME, null));
table.getTTable().setPartitionKeys(partitionKeys);
expectedHCatSchema.add(new HCatFieldSchema("dt", HCatFieldSchema.Type.STRING, null));
Assert.assertEquals(new HCatSchema(expectedHCatSchema),
HCatUtil.getTableSchemaWithPtnCols(table));
}
|
public static CurlOption parse(String cmdLine) {
List<String> args = ShellWords.parse(cmdLine);
URI url = null;
HttpMethod method = HttpMethod.PUT;
List<Entry<String, String>> headers = new ArrayList<>();
Proxy proxy = NO_PROXY;
while (!args.isEmpty()) {
String arg = args.remove(0);
if (arg.equals("-X")) {
String methodArg = removeArgFor(arg, args);
method = HttpMethod.parse(methodArg);
} else if (arg.equals("-H")) {
String headerArg = removeArgFor(arg, args);
SimpleEntry<String, String> e = parseHeader(headerArg);
headers.add(e);
} else if (arg.equals("-x")) {
String proxyArg = removeArgFor(arg, args);
proxy = parseProxy(proxyArg);
} else {
if (url != null) {
throw new IllegalArgumentException("'" + cmdLine + "' was not a valid curl command");
}
url = parseUrl(arg);
}
}
if (url == null) {
throw new IllegalArgumentException("'" + cmdLine + "' was not a valid curl command");
}
return new CurlOption(proxy, method, url, headers);
}
|
@Test
public void must_provide_a_valid_uri() {
String uri = "'https://example.com/path with spaces'";
IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> CurlOption.parse(uri));
assertThat(exception.getCause(), instanceOf(URISyntaxException.class));
}
|
@Override
public ChannelFuture writePushPromise(ChannelHandlerContext ctx, int streamId, int promisedStreamId,
Http2Headers headers, int padding, ChannelPromise promise) {
try {
if (connection.goAwayReceived()) {
throw connectionError(PROTOCOL_ERROR, "Sending PUSH_PROMISE after GO_AWAY received.");
}
Http2Stream stream = requireStream(streamId);
// Reserve the promised stream.
connection.local().reservePushStream(promisedStreamId, stream);
promise = promise.unvoid();
ChannelFuture future = frameWriter.writePushPromise(ctx, streamId, promisedStreamId, headers, padding,
promise);
// Writing headers may fail during the encode state if they violate HPACK limits.
Throwable failureCause = future.cause();
if (failureCause == null) {
// This just sets internal stream state which is used elsewhere in the codec and doesn't
// necessarily mean the write will complete successfully.
stream.pushPromiseSent();
if (!future.isSuccess()) {
// Either the future is not done or failed in the meantime.
notifyLifecycleManagerOnError(future, ctx);
}
} else {
lifecycleManager.onError(ctx, true, failureCause);
}
return future;
} catch (Throwable t) {
lifecycleManager.onError(ctx, true, t);
promise.tryFailure(t);
return promise;
}
}
|
@Test
public void pushPromiseWriteShouldReserveStream() throws Exception {
createStream(STREAM_ID, false);
ChannelPromise promise = newPromise();
encoder.writePushPromise(ctx, STREAM_ID, PUSH_STREAM_ID, EmptyHttp2Headers.INSTANCE, 0, promise);
assertEquals(RESERVED_LOCAL, stream(PUSH_STREAM_ID).state());
verify(writer).writePushPromise(eq(ctx), eq(STREAM_ID), eq(PUSH_STREAM_ID),
eq(EmptyHttp2Headers.INSTANCE), eq(0), eq(promise));
}
|
public static void getSemanticPropsSingleFromString(
SingleInputSemanticProperties result,
String[] forwarded,
String[] nonForwarded,
String[] readSet,
TypeInformation<?> inType,
TypeInformation<?> outType) {
getSemanticPropsSingleFromString(
result, forwarded, nonForwarded, readSet, inType, outType, false);
}
|
@Test
void testNonForwardedInvalidString() {
String[] nonForwardedFields = {"notValid"};
SingleInputSemanticProperties sp = new SingleInputSemanticProperties();
assertThatThrownBy(
() ->
SemanticPropUtil.getSemanticPropsSingleFromString(
sp,
null,
nonForwardedFields,
null,
threeIntTupleType,
threeIntTupleType))
.isInstanceOf(InvalidSemanticAnnotationException.class);
}
|
@ScalarOperator(GREATER_THAN_OR_EQUAL)
@SqlType(StandardTypes.BOOLEAN)
public static boolean greaterThanOrEqual(@SqlType(StandardTypes.TINYINT) long left, @SqlType(StandardTypes.TINYINT) long right)
{
return left >= right;
}
|
@Test
public void testGreaterThanOrEqual()
{
assertFunction("TINYINT'37' >= TINYINT'37'", BOOLEAN, true);
assertFunction("TINYINT'37' >= TINYINT'17'", BOOLEAN, true);
assertFunction("TINYINT'17' >= TINYINT'37'", BOOLEAN, false);
assertFunction("TINYINT'17' >= TINYINT'17'", BOOLEAN, true);
}
|
@Override protected String propagationField(String keyName) {
if (keyName == null) throw new NullPointerException("keyName == null");
Key<String> key = nameToKey.get(keyName);
if (key == null) {
assert false : "We currently don't support getting headers except propagation fields";
return null;
}
return headers.get(key);
}
|
@Test void propagationField_lastValue() {
headers.put(b3Key, "0");
headers.put(b3Key, "1");
assertThat(request.propagationField("b3")).isEqualTo("1");
}
|
public static <UserT, DestinationT, OutputT> WriteFiles<UserT, DestinationT, OutputT> to(
FileBasedSink<UserT, DestinationT, OutputT> sink) {
checkArgument(sink != null, "sink can not be null");
return new AutoValue_WriteFiles.Builder<UserT, DestinationT, OutputT>()
.setSink(sink)
.setComputeNumShards(null)
.setNumShardsProvider(null)
.setWindowedWrites(false)
.setWithAutoSharding(false)
.setMaxNumWritersPerBundle(DEFAULT_MAX_NUM_WRITERS_PER_BUNDLE)
.setSideInputs(sink.getDynamicDestinations().getSideInputs())
.setSkipIfEmpty(false)
.setBadRecordErrorHandler(new DefaultErrorHandler<>())
.setBadRecordRouter(BadRecordRouter.THROWING_ROUTER)
.build();
}
|
@Test
@Category(NeedsRunner.class)
public void testUnboundedNeedsWindowed() {
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage(
"Must use windowed writes when applying WriteFiles to an unbounded PCollection");
SimpleSink<Void> sink = makeSimpleSink();
p.apply(Create.of("foo")).setIsBoundedInternal(IsBounded.UNBOUNDED).apply(WriteFiles.to(sink));
p.run();
}
|
@Override
public Rule getByKey(RuleKey key) {
verifyKeyArgument(key);
ensureInitialized();
Rule rule = rulesByKey.get(key);
checkArgument(rule != null, "Can not find rule for key %s. This rule does not exist in DB", key);
return rule;
}
|
@Test
public void first_call_to_getByKey_triggers_call_to_db_and_any_subsequent_get_or_find_call_does_not() {
underTest.getByKey(AB_RULE.getKey());
verify(ruleDao, times(1)).selectAll(any(DbSession.class));
verifyNoMethodCallTriggersCallToDB();
}
|
@VisibleForTesting
void validateMenu(Long parentId, String name, Long id) {
MenuDO menu = menuMapper.selectByParentIdAndName(parentId, name);
if (menu == null) {
return;
}
// 如果 id 为空,说明不用比较是否为相同 id 的菜单
if (id == null) {
throw exception(MENU_NAME_DUPLICATE);
}
if (!menu.getId().equals(id)) {
throw exception(MENU_NAME_DUPLICATE);
}
}
|
@Test
public void testValidateMenu_success() {
// mock 父子菜单
MenuDO sonMenu = createParentAndSonMenu();
// 准备参数
Long parentId = sonMenu.getParentId();
Long otherSonMenuId = randomLongId();
String otherSonMenuName = randomString();
// 调用,无需断言
menuService.validateMenu(parentId, otherSonMenuName, otherSonMenuId);
}
|
@Override
public URL getUrl() {
return channel.getUrl();
}
|
@Test
void getUrlTest() {
Assertions.assertEquals(header.getUrl(), URL.valueOf("dubbo://localhost:20880"));
}
|
public static int checkLessThan(int n, int expected, String name)
{
if (n >= expected)
{
throw new IllegalArgumentException(name + ": " + n + " (expected: < " + expected + ')');
}
return n;
}
|
@Test
public void checkLessThanMustPassIfArgumentIsLessThanExpected()
{
final int n = 0;
final int actual = RangeUtil.checkLessThan(n, 1, "var");
assertThat(actual, is(equalTo(n)));
}
|
protected void initializeXulMenu( Document doc, List<StepMeta> selection, StepMeta stepMeta ) throws KettleException {
XulMenuitem item = (XulMenuitem) doc.getElementById( "trans-graph-entry-newhop" );
int sels = selection.size();
item.setDisabled( sels != 2 );
item = (XulMenuitem) doc.getElementById( "trans-graph-entry-align-snap" );
item.setAcceltext( "ALT-HOME" );
item.setLabel( BaseMessages.getString( PKG, "TransGraph.PopupMenu.SnapToGrid" ) );
item.setAccesskey( "alt-home" );
item = (XulMenuitem) doc.getElementById( "trans-graph-entry-open-mapping" );
XulMenu men = (XulMenu) doc.getElementById( TRANS_GRAPH_ENTRY_SNIFF );
men.setDisabled( trans == null || trans.isRunning() == false );
item = (XulMenuitem) doc.getElementById( "trans-graph-entry-sniff-input" );
item.setDisabled( trans == null || trans.isRunning() == false );
item = (XulMenuitem) doc.getElementById( "trans-graph-entry-sniff-output" );
item.setDisabled( trans == null || trans.isRunning() == false );
item = (XulMenuitem) doc.getElementById( "trans-graph-entry-sniff-error" );
item.setDisabled( !( stepMeta.supportsErrorHandling() && stepMeta.getStepErrorMeta() != null
&& stepMeta.getStepErrorMeta().getTargetStep() != null && trans != null && trans.isRunning() ) );
XulMenu aMenu = (XulMenu) doc.getElementById( TRANS_GRAPH_ENTRY_AGAIN );
if ( aMenu != null ) {
aMenu.setDisabled( sels < 2 );
}
// item = (XulMenuitem) doc.getElementById("trans-graph-entry-data-movement-distribute");
// item.setSelected(stepMeta.isDistributes());
item = (XulMenuitem) doc.getElementById( "trans-graph-entry-partitioning" );
item.setDisabled( spoon.getPartitionSchemasNames( transMeta ).isEmpty() );
item = (XulMenuitem) doc.getElementById( "trans-graph-entry-data-movement-copy" );
item.setSelected( !stepMeta.isDistributes() );
item = (XulMenuitem) doc.getElementById( "trans-graph-entry-hide" );
item.setDisabled( !( stepMeta.isDrawn() && !transMeta.isAnySelectedStepUsedInTransHops() ) );
item = (XulMenuitem) doc.getElementById( "trans-graph-entry-detach" );
item.setDisabled( !transMeta.isStepUsedInTransHops( stepMeta ) );
item = (XulMenuitem) doc.getElementById( "trans-graph-entry-errors" );
item.setDisabled( !stepMeta.supportsErrorHandling() );
}
|
@SuppressWarnings( "unchecked" )
@Test
public void testInitializeXulMenu() throws KettleException {
StepMeta stepMeta = mock( StepMeta.class );
TransGraph transGraph = mock( TransGraph.class );
TransMeta transMeta = mock( TransMeta.class );
Document document = mock( Document.class );
XulMenuitem xulItem = mock( XulMenuitem.class );
XulMenu xulMenu = mock( XulMenu.class );
StepErrorMeta stepErrorMeta = mock( StepErrorMeta.class );
Spoon spoon = mock( Spoon.class );
List<StepMeta> selection = Arrays.asList( new StepMeta(), stepMeta, new StepMeta() );
doCallRealMethod().when( transGraph ).setTransMeta( any( TransMeta.class ) );
doCallRealMethod().when( transGraph ).setSpoon( any( Spoon.class ) );
transGraph.setTransMeta( transMeta );
transGraph.setSpoon( spoon );
when( stepMeta.getStepErrorMeta() ).thenReturn( stepErrorMeta );
when( stepMeta.isDrawn() ).thenReturn( true );
when( document.getElementById( any( String.class ) ) ).thenReturn( xulItem );
when( document.getElementById( TransGraph.TRANS_GRAPH_ENTRY_AGAIN ) ).thenReturn( xulMenu );
when( document.getElementById( TransGraph.TRANS_GRAPH_ENTRY_SNIFF ) ).thenReturn( xulMenu );
doCallRealMethod().when( transGraph ).initializeXulMenu( any( Document.class ),
any( List.class ), any( StepMeta.class ) );
transGraph.initializeXulMenu( document, selection, stepMeta );
verify( transMeta ).isAnySelectedStepUsedInTransHops();
}
|
@Override
public VersionedRecord<V> get(final K key) {
final ValueAndTimestamp<V> valueAndTimestamp = internal.get(key);
return valueAndTimestamp == null
? null
: new VersionedRecord<>(valueAndTimestamp.value(), valueAndTimestamp.timestamp());
}
|
@Test
public void shouldThrowNullPointerOnGetIfKeyIsNull() {
assertThrows(NullPointerException.class, () -> store.get(null));
}
|
@Override
public Addresses loadAddresses(ClientConnectionProcessListenerRegistry listenerRunner)
throws Exception {
privateToPublic = getAddresses.call();
Set<Address> addresses = privateToPublic.keySet();
listenerRunner.onPossibleAddressesCollected(addresses);
return new Addresses(addresses);
}
|
@Test
public void testLoadAddresses() throws Exception {
RemoteAddressProvider provider = new RemoteAddressProvider(() -> expectedAddresses, true);
Collection<Address> addresses = provider.loadAddresses(createConnectionProcessListenerRunner()).primary();
assertEquals(3, addresses.size());
for (Address address : expectedAddresses.keySet()) {
addresses.remove(address);
}
assertTrue(addresses.isEmpty());
}
|
public static UnifiedDiff parseUnifiedDiff(InputStream stream) throws IOException, UnifiedDiffParserException {
UnifiedDiffReader parser = new UnifiedDiffReader(new BufferedReader(new InputStreamReader(stream)));
return parser.parse();
}
|
@Test
public void testParseIssue85() throws IOException {
UnifiedDiff diff = UnifiedDiffReader.parseUnifiedDiff(
UnifiedDiffReaderTest.class.getResourceAsStream("problem_diff_issue85.diff"));
assertThat(diff.getFiles().size()).isEqualTo(1);
assertEquals(1, diff.getFiles().size());
final UnifiedDiffFile file1 = diff.getFiles().get(0);
assertEquals("diff -r 83e41b73d115 -r a4438263b228 tests/test-check-pyflakes.t",
file1.getDiffCommand());
assertEquals("tests/test-check-pyflakes.t", file1.getFromFile());
assertEquals("tests/test-check-pyflakes.t", file1.getToFile());
assertEquals(1, file1.getPatch().getDeltas().size());
assertNull(diff.getTail());
}
|
@Override
public SocialUserDO getSocialUser(Long id) {
return socialUserMapper.selectById(id);
}
|
@Test
public void testGetSocialUser_id() {
// mock 数据
SocialUserDO socialUserDO = randomPojo(SocialUserDO.class);
socialUserMapper.insert(socialUserDO);
// 参数准备
Long id = socialUserDO.getId();
// 调用
SocialUserDO dbSocialUserDO = socialUserService.getSocialUser(id);
// 断言
assertPojoEquals(socialUserDO, dbSocialUserDO);
}
|
@Override
public void reset() {
super.reset();
this.minDeltaInCurrentBlock = Long.MAX_VALUE;
}
|
@Test
public void shouldReset() throws IOException {
shouldReadWriteWhenDataIsNotAlignedWithBlock();
long[] data = new long[5 * blockSize];
for (int i = 0; i < blockSize * 5; i++) {
data[i] = i * 2;
}
writer.reset();
shouldWriteAndRead(data);
}
|
@SuppressWarnings("unchecked")
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
if (!(statement.getStatement() instanceof CreateSource)
&& !(statement.getStatement() instanceof CreateAsSelect)) {
return statement;
}
try {
if (statement.getStatement() instanceof CreateSource) {
final ConfiguredStatement<CreateSource> createStatement =
(ConfiguredStatement<CreateSource>) statement;
return (ConfiguredStatement<T>) forCreateStatement(createStatement).orElse(createStatement);
} else {
final ConfiguredStatement<CreateAsSelect> createStatement =
(ConfiguredStatement<CreateAsSelect>) statement;
return (ConfiguredStatement<T>) forCreateAsStatement(createStatement).orElse(
createStatement);
}
} catch (final KsqlStatementException e) {
throw e;
} catch (final KsqlException e) {
throw new KsqlStatementException(
ErrorMessageUtil.buildErrorMessage(e),
statement.getMaskedStatementText(),
e.getCause());
}
}
|
@Test
public void shouldReturnStatementUnchangedIfHasKeySchemaAndValueFormatNotSupported() {
// Given:
givenKeyButNotValueInferenceSupported();
when(cs.getElements()).thenReturn(SOME_KEY_ELEMENTS_STREAM);
// When:
final ConfiguredStatement<?> result = injector.inject(csStatement);
// Then:
assertThat(result, is(sameInstance(csStatement)));
}
|
public void runOnStateAppliedFilters(Job job) {
new JobPerformingFilters(job, jobDefaultFilters).runOnStateAppliedFilters();
}
|
@Test
void jobFiltersAreNotAppliedIfJobHasNoStateChange() {
// GIVEN
Job aJob = anEnqueuedJob().build();
aJob.startProcessingOn(backgroundJobServer);
aJob.getStateChangesForJobFilters(); // clear
// WHEN
aJob.updateProcessing();
jobFilterUtils.runOnStateAppliedFilters(List.of(aJob));
// THEN
assertThat(logAllStateChangesFilter.getStateChanges(aJob)).isEmpty();
}
|
@Override
public HttpRestResult<String> httpGet(String path, Map<String, String> headers, Map<String, String> paramValues,
String encode, long readTimeoutMs) throws Exception {
final long endTime = System.currentTimeMillis() + readTimeoutMs;
String currentServerAddr = serverListMgr.getCurrentServerAddr();
int maxRetry = this.maxRetry;
HttpClientConfig httpConfig = HttpClientConfig.builder()
.setReadTimeOutMillis(Long.valueOf(readTimeoutMs).intValue())
.setConTimeOutMillis(ConfigHttpClientManager.getInstance().getConnectTimeoutOrDefault(100)).build();
do {
try {
Header newHeaders = Header.newInstance();
if (headers != null) {
newHeaders.addAll(headers);
}
Query query = Query.newInstance().initParams(paramValues);
HttpRestResult<String> result = nacosRestTemplate.get(getUrl(currentServerAddr, path), httpConfig,
newHeaders, query, String.class);
if (isFail(result)) {
LOGGER.error("[NACOS ConnectException] currentServerAddr: {}, httpCode: {}",
serverListMgr.getCurrentServerAddr(), result.getCode());
} else {
// Update the currently available server addr
serverListMgr.updateCurrentServerAddr(currentServerAddr);
return result;
}
} catch (ConnectException connectException) {
LOGGER.error("[NACOS ConnectException httpGet] currentServerAddr:{}, err : {}",
serverListMgr.getCurrentServerAddr(), connectException.getMessage());
} catch (SocketTimeoutException socketTimeoutException) {
LOGGER.error("[NACOS SocketTimeoutException httpGet] currentServerAddr:{}, err : {}",
serverListMgr.getCurrentServerAddr(), socketTimeoutException.getMessage());
} catch (Exception ex) {
LOGGER.error("[NACOS Exception httpGet] currentServerAddr: " + serverListMgr.getCurrentServerAddr(),
ex);
throw ex;
}
if (serverListMgr.getIterator().hasNext()) {
currentServerAddr = serverListMgr.getIterator().next();
} else {
maxRetry--;
if (maxRetry < 0) {
throw new ConnectException(
"[NACOS HTTP-GET] The maximum number of tolerable server reconnection errors has been reached");
}
serverListMgr.refreshCurrentServerAddr();
}
} while (System.currentTimeMillis() <= endTime);
LOGGER.error("no available server");
throw new ConnectException("no available server");
}
|
@Test
void testHttpGetFailed() throws Exception {
assertThrows(ConnectException.class, () -> {
when(nacosRestTemplate.<String>get(eq(SERVER_ADDRESS_1 + "/test"), any(HttpClientConfig.class),
any(Header.class), any(Query.class), eq(String.class))).thenReturn(mockResult);
when(mockResult.getCode()).thenReturn(HttpURLConnection.HTTP_NOT_FOUND);
serverHttpAgent.httpGet("/test", Collections.emptyMap(), Collections.emptyMap(), "UTF-8", 1000);
});
}
|
@Udf(description = "Converts a number of milliseconds since 1970-01-01 00:00:00 UTC/GMT into the"
+ " string representation of the timestamp in the given format. Single quotes in the"
+ " timestamp format can be escaped with '', for example: 'yyyy-MM-dd''T''HH:mm:ssX'."
+ " The system default time zone is used when no time zone is explicitly provided."
+ " The format pattern should be in the format expected"
+ " by java.time.format.DateTimeFormatter")
public String timestampToString(
@UdfParameter(
description = "Milliseconds since"
+ " January 1, 1970, 00:00:00 UTC/GMT.") final long epochMilli,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
if (formatPattern == null) {
return null;
}
try {
final Timestamp timestamp = new Timestamp(epochMilli);
final DateTimeFormatter formatter = formatters.get(formatPattern);
return timestamp.toInstant()
.atZone(ZoneId.systemDefault())
.format(formatter);
} catch (final ExecutionException | RuntimeException e) {
throw new KsqlFunctionException("Failed to format timestamp " + epochMilli
+ " with formatter '" + formatPattern
+ "': " + e.getMessage(), e);
}
}
|
@Test
public void testTimeZoneInUniversalTime() {
// Given:
final long timestamp = 1534353043000L;
// When:
final String universalTime = udf.timestampToString(timestamp,
"yyyy-MM-dd HH:mm:ss zz", "UTC");
// Then:
assertThat(universalTime, is("2018-08-15 17:10:43 UTC"));
}
|
public B loadbalance(String loadbalance) {
this.loadbalance = loadbalance;
return getThis();
}
|
@Test
void loadbalance() {
MethodBuilder builder = new MethodBuilder();
builder.loadbalance("mockloadbalance");
Assertions.assertEquals("mockloadbalance", builder.build().getLoadbalance());
}
|
public static Comparator<Object[]> getComparator(List<OrderByExpressionContext> orderByExpressions,
ColumnContext[] orderByColumnContexts, boolean nullHandlingEnabled) {
return getComparator(orderByExpressions, orderByColumnContexts, nullHandlingEnabled, 0, orderByExpressions.size());
}
|
@Test
public void testAscNullsLast() {
List<OrderByExpressionContext> orderBys =
Collections.singletonList(new OrderByExpressionContext(COLUMN1, ASC, NULLS_LAST));
setUpSingleColumnRows();
_rows.sort(OrderByComparatorFactory.getComparator(orderBys, ENABLE_NULL_HANDLING));
assertEquals(extractColumn(_rows, COLUMN1_INDEX), Arrays.asList(1, 2, null));
}
|
@ApiOperation(value = "Get Tenant Entity Views (getTenantEntityViews)",
notes = "Returns a page of entity views owned by tenant. " + ENTITY_VIEW_DESCRIPTION +
PAGE_DATA_PARAMETERS + TENANT_AUTHORITY_PARAGRAPH)
@PreAuthorize("hasAuthority('TENANT_ADMIN')")
@RequestMapping(value = "/tenant/entityViews", params = {"pageSize", "page"}, method = RequestMethod.GET)
@ResponseBody
public PageData<EntityView> getTenantEntityViews(
@Parameter(description = PAGE_SIZE_DESCRIPTION, required = true)
@RequestParam int pageSize,
@Parameter(description = PAGE_NUMBER_DESCRIPTION, required = true)
@RequestParam int page,
@Parameter(description = ENTITY_VIEW_TYPE)
@RequestParam(required = false) String type,
@Parameter(description = ENTITY_VIEW_TEXT_SEARCH_DESCRIPTION)
@RequestParam(required = false) String textSearch,
@Parameter(description = SORT_PROPERTY_DESCRIPTION, schema = @Schema(allowableValues = {"createdTime", "name", "type"}))
@RequestParam(required = false) String sortProperty,
@Parameter(description = SORT_ORDER_DESCRIPTION, schema = @Schema(allowableValues = {"ASC", "DESC"}))
@RequestParam(required = false) String sortOrder) throws ThingsboardException {
TenantId tenantId = getCurrentUser().getTenantId();
PageLink pageLink = createPageLink(pageSize, page, textSearch, sortProperty, sortOrder);
if (type != null && type.trim().length() > 0) {
return checkNotNull(entityViewService.findEntityViewByTenantIdAndType(tenantId, pageLink, type));
} else {
return checkNotNull(entityViewService.findEntityViewByTenantId(tenantId, pageLink));
}
}
|
@Test
public void testGetTenantEntityViews() throws Exception {
List<ListenableFuture<EntityViewInfo>> entityViewInfoFutures = new ArrayList<>(178);
for (int i = 0; i < 178; i++) {
ListenableFuture<EntityView> entityViewFuture = getNewSavedEntityViewAsync("Test entity view" + i);
entityViewInfoFutures.add(Futures.transform(entityViewFuture,
view -> new EntityViewInfo(view, null, false),
MoreExecutors.directExecutor()));
}
List<EntityViewInfo> entityViewInfos = Futures.allAsList(entityViewInfoFutures).get(TIMEOUT, SECONDS);
List<EntityViewInfo> loadedViews = loadListOfInfo(new PageLink(23), "/api/tenant/entityViewInfos?");
assertThat(entityViewInfos).containsExactlyInAnyOrderElementsOf(loadedViews);
}
|
public static int fromLogical(Schema schema, java.util.Date value) {
if (!(LOGICAL_NAME.equals(schema.name())))
throw new DataException("Requested conversion of Time object but the schema does not match.");
Calendar calendar = Calendar.getInstance(UTC);
calendar.setTime(value);
long unixMillis = calendar.getTimeInMillis();
if (unixMillis < 0 || unixMillis > MILLIS_PER_DAY) {
throw new DataException("Kafka Connect Time type should not have any date fields set to non-zero values.");
}
return (int) unixMillis;
}
|
@Test
public void testFromLogicalInvalidHasDateComponents() {
assertThrows(DataException.class,
() -> Time.fromLogical(Time.SCHEMA, EPOCH_PLUS_DATE_COMPONENT.getTime()));
}
|
public Expression rewrite(final Expression expression) {
return new ExpressionTreeRewriter<>(new OperatorPlugin()::process)
.rewrite(expression, null);
}
|
@Test
public void shouldReplaceComparisonOfWindowStartAndString() {
// Given:
final Expression predicate = getPredicate(
"SELECT * FROM orders where WINDOWSTART > '2017-01-01T00:00:00.000';");
// When:
final Expression rewritten = rewriter.rewrite(predicate);
// Then:
assertThat(rewritten.toString(), is(String.format("(WINDOWSTART > %d)", A_TIMESTAMP)));
}
|
public NearCacheConfig setInMemoryFormat(InMemoryFormat inMemoryFormat) {
this.inMemoryFormat = isNotNull(inMemoryFormat, "inMemoryFormat");
return this;
}
|
@Test(expected = IllegalArgumentException.class)
public void testSetInMemoryFormat_withString_whenNull() {
config.setInMemoryFormat((String) null);
}
|
public static Long validateIssuedAt(String claimName, Long claimValue) throws ValidateException {
if (claimValue != null && claimValue < 0)
throw new ValidateException(String.format("%s value must be null or non-negative; value given was \"%s\"", claimName, claimValue));
return claimValue;
}
|
@Test
public void testValidateIssuedAt() {
Long expected = 1L;
Long actual = ClaimValidationUtils.validateIssuedAt("iat", expected);
assertEquals(expected, actual);
}
|
@EventListener
@Async
void onApplicationEvent(HaloDocumentRebuildRequestEvent event) {
getSearchEngine()
.doOnNext(SearchEngine::deleteAll)
.flatMap(searchEngine -> extensionGetter.getExtensions(HaloDocumentsProvider.class)
.flatMap(HaloDocumentsProvider::fetchAll)
.buffer(this.bufferSize)
.doOnNext(searchEngine::addOrUpdate)
.then())
.blockOptional(Duration.ofMinutes(1));
}
|
@Test
void shouldDeleteDocsWhenReceivingDeleteRequestEvent() {
var searchEngine = mock(SearchEngine.class);
when(searchEngine.available()).thenReturn(true);
when(extensionGetter.getEnabledExtension(SearchEngine.class))
.thenReturn(Mono.just(searchEngine));
var docIds = List.of("1", "2", "3");
listener.onApplicationEvent(new HaloDocumentDeleteRequestEvent(this, docIds));
verify(searchEngine).deleteDocument(docIds);
}
|
@Override
public void renameTable(TableIdentifier from, TableIdentifier to) {
throw new UnsupportedOperationException("Cannot rename Hadoop tables");
}
|
@Test
public void testRenameTable() throws Exception {
HadoopCatalog catalog = hadoopCatalog();
TableIdentifier testTable = TableIdentifier.of("db", "tbl1");
catalog.createTable(testTable, SCHEMA, PartitionSpec.unpartitioned());
assertThatThrownBy(() -> catalog.renameTable(testTable, TableIdentifier.of("db", "tbl2")))
.isInstanceOf(UnsupportedOperationException.class)
.hasMessage("Cannot rename Hadoop tables");
}
|
static String strip(final String line) {
return new Parser(line).parse();
}
|
@Test
public void shouldReturnLineWithCommentInBackQuotesAsIs() {
// Given:
final String line = "no comment here `-- even this is not a comment`...";
// Then:
assertThat(CommentStripper.strip(line), is(sameInstance(line)));
}
|
public LinkedHashMap<String, String> getKeyPropertyList(ObjectName mbeanName) {
LinkedHashMap<String, String> keyProperties = keyPropertiesPerBean.get(mbeanName);
if (keyProperties == null) {
keyProperties = new LinkedHashMap<>();
String properties = mbeanName.getKeyPropertyListString();
Matcher match = PROPERTY_PATTERN.matcher(properties);
while (match.lookingAt()) {
keyProperties.put(match.group(1), match.group(2));
properties = properties.substring(match.end());
if (properties.startsWith(",")) {
properties = properties.substring(1);
}
match.reset(properties);
}
keyPropertiesPerBean.put(mbeanName, keyProperties);
}
return keyProperties;
}
|
@Test
public void testSingleObjectName() throws Throwable {
JmxMBeanPropertyCache testCache = new JmxMBeanPropertyCache();
LinkedHashMap<String, String> parameterList =
testCache.getKeyPropertyList(new ObjectName("com.organisation:name=value"));
assertSameElementsAndOrder(parameterList, "name", "value");
}
|
public boolean isValid() {
return (mPrimaryColor != mPrimaryTextColor) && (mPrimaryDarkColor != mPrimaryTextColor);
}
|
@Test
public void isValidIfTextColorIsDifferentThanBackground() {
Assert.assertTrue(overlay(Color.GRAY, Color.GRAY, Color.BLACK).isValid());
Assert.assertTrue(overlay(Color.GRAY, Color.BLACK, Color.BLUE).isValid());
}
|
@Override
public boolean contains(int i) {
return false;
}
|
@Test
public void testContains1() throws Exception {
assertFalse(es.contains(Integer.valueOf(5)));
assertFalse(es.contains(Integer.valueOf(3)));
}
|
public static java.util.Date convertToTimestamp(Schema schema, Object value) {
if (value == null) {
throw new DataException("Unable to convert a null value to a schema that requires a value");
}
return convertToTimestamp(Timestamp.SCHEMA, schema, value);
}
|
@Test
public void shouldConvertTimestampValues() {
java.util.Date current = new java.util.Date();
long currentMillis = current.getTime() % MILLIS_PER_DAY;
// java.util.Date - just copy
java.util.Date ts1 = Values.convertToTimestamp(Timestamp.SCHEMA, current);
assertEquals(current, ts1);
// java.util.Date as a Timestamp - discard the day's milliseconds and keep the date
java.util.Date currentDate = new java.util.Date(current.getTime() - currentMillis);
ts1 = Values.convertToTimestamp(Date.SCHEMA, currentDate);
assertEquals(currentDate, ts1);
// java.util.Date as a Time - discard the date and keep the day's milliseconds
ts1 = Values.convertToTimestamp(Time.SCHEMA, currentMillis);
assertEquals(new java.util.Date(currentMillis), ts1);
// ISO8601 strings - currently broken because tokenization breaks at colon
// Millis as string
java.util.Date ts3 = Values.convertToTimestamp(Timestamp.SCHEMA, Long.toString(current.getTime()));
assertEquals(current, ts3);
// Millis as long
java.util.Date ts4 = Values.convertToTimestamp(Timestamp.SCHEMA, current.getTime());
assertEquals(current, ts4);
}
|
@Nonnull
public static <T> AggregateOperation1<T, LongDoubleAccumulator, Double> averagingDouble(
@Nonnull ToDoubleFunctionEx<? super T> getDoubleValueFn
) {
checkSerializable(getDoubleValueFn, "getDoubleValueFn");
// count == accumulator.value1
// sum == accumulator.value2
return AggregateOperation
.withCreate(LongDoubleAccumulator::new)
.andAccumulate((LongDoubleAccumulator a, T item) -> {
// a bit faster check than in addExact, specialized for increment
if (a.getLong() == Long.MAX_VALUE) {
throw new ArithmeticException("Counter overflow");
}
a.setLong(a.getLong() + 1);
a.setDouble(a.getDouble() + getDoubleValueFn.applyAsDouble(item));
})
.andCombine((a1, a2) -> {
a1.setLong(Math.addExact(a1.getLong(), a2.getLong()));
a1.setDouble(a1.getDouble() + a2.getDouble());
})
.andDeduct((a1, a2) -> {
a1.setLong(Math.subtractExact(a1.getLong(), a2.getLong()));
a1.setDouble(a1.getDouble() - a2.getDouble());
})
.andExportFinish(a -> a.getDouble() / a.getLong());
}
|
@Test
public void when_averagingDouble_noInput_then_NaN() {
// Given
AggregateOperation1<Double, LongDoubleAccumulator, Double> aggrOp = averagingDouble(Double::doubleValue);
LongDoubleAccumulator acc = aggrOp.createFn().get();
// When
double result = aggrOp.finishFn().apply(acc);
// Then
assertEquals(Double.NaN, result, 0.0);
}
|
BackgroundJobRunner getBackgroundJobRunner(Job job) {
assertJobExists(job.getJobDetails());
return backgroundJobRunners.stream()
.filter(jobRunner -> jobRunner.supports(job))
.findFirst()
.orElseThrow(() -> problematicConfigurationException("Could not find a BackgroundJobRunner: either no JobActivator is registered, your Background Job Class is not registered within the IoC container or your Job does not have a default no-arg constructor."));
}
|
@Test
void getBackgroundJobRunnerForNonIoCJobWithInstance() {
jobActivator.clear();
final Job job = anEnqueuedJob()
.withJobDetails(() -> testService.doWork())
.build();
assertThat(backgroundJobServer.getBackgroundJobRunner(job))
.isNotNull()
.isInstanceOf(BackgroundJobWithoutIocRunner.class);
}
|
Object getEventuallyWeightedResult(Object rawObject, MULTIPLE_MODEL_METHOD multipleModelMethod,
double weight) {
switch (multipleModelMethod) {
case MAJORITY_VOTE:
case MODEL_CHAIN:
case SELECT_ALL:
case SELECT_FIRST:
return rawObject;
case MAX:
case SUM:
case MEDIAN:
case AVERAGE:
case WEIGHTED_SUM:
case WEIGHTED_MEDIAN:
case WEIGHTED_AVERAGE:
if (!(rawObject instanceof Number)) {
throw new KiePMMLException("Expected a number, retrieved " + rawObject.getClass().getName());
}
return new KiePMMLValueWeight(((Number) rawObject).doubleValue(), weight);
case WEIGHTED_MAJORITY_VOTE:
throw new KiePMMLException(multipleModelMethod + " not implemented, yet");
default:
throw new KiePMMLException("Unrecognized MULTIPLE_MODEL_METHOD " + multipleModelMethod);
}
}
|
@Test
void getEventuallyWeightedResultValueWeightNoNumber() {
VALUE_WEIGHT_METHODS.forEach(multipleModelMethod -> {
try {
evaluator.getEventuallyWeightedResult("OBJ", multipleModelMethod, 34.2);
fail(multipleModelMethod + " is supposed to throw exception because raw object is not a number");
} catch (KiePMMLException e) {
// expected
}
});
}
|
@Transactional
public ChecklistQuestionsResponse readChecklistQuestions(User user) {
List<CustomChecklistQuestion> customChecklistQuestions = customChecklistQuestionRepository.findAllByUser(user);
Map<Category, List<Question>> categoryQuestions = customChecklistQuestions.stream()
.map(CustomChecklistQuestion::getQuestion)
.collect(Collectors.groupingBy(Question::getCategory, LinkedHashMap::new, Collectors.toList()));
List<CategoryQuestionsResponse> categoryQuestionsResponses = categoryQuestions.entrySet().stream()
.map(categoryQuestionEntry -> CategoryQuestionsResponse.of(
categoryQuestionEntry.getKey(),
categoryQuestionEntry.getValue().stream()
.map(QuestionResponse::new)
.toList()))
.toList();
return new ChecklistQuestionsResponse(categoryQuestionsResponses);
}
|
@DisplayName("체크리스트 질문 조회 성공")
@Test
void readChecklistQuestions() {
// given
customChecklistQuestionRepository.saveAll(CustomChecklistFixture.CUSTOM_CHECKLIST_QUESTION_DEFAULT);
// given & when
ChecklistQuestionsResponse checklistQuestionsResponse = checklistService.readChecklistQuestions(USER1);
// then
List<Integer> defaultQuestionsIds = CUSTOM_CHECKLIST_QUESTION_DEFAULT.stream()
.map(CustomChecklistQuestion::getQuestion)
.map(Question::getId)
.toList();
List<Integer> responseQuestionsIds = checklistQuestionsResponse.categories().stream()
.map(CategoryQuestionsResponse::questions)
.flatMap(Collection::stream)
.map(QuestionResponse::getQuestionId)
.toList();
assertThat(responseQuestionsIds).containsExactlyElementsOf(defaultQuestionsIds);
}
|
public static List<String> filterMatches(@Nullable List<String> candidates,
@Nullable Pattern[] positivePatterns,
@Nullable Pattern[] negativePatterns) {
if (candidates == null || candidates.isEmpty()) {
return Collections.emptyList();
}
final Pattern[] positive = (positivePatterns == null || positivePatterns.length == 0) ?
MATCH_ALL_PATTERN : positivePatterns;
final Pattern[] negative = negativePatterns == null ? EMPTY_PATTERN : negativePatterns;
return candidates.stream()
.filter(c -> Arrays.stream(positive).anyMatch(p -> p.matcher(c).matches()))
.filter(c -> Arrays.stream(negative).noneMatch(p -> p.matcher(c).matches()))
.collect(Collectors.toList());
}
|
@Test
public void filterMatchesNegative() {
List<String> candidates = ImmutableList.of("a", "b");
List<String> expected = ImmutableList.of("a");
assertThat(filterMatches(candidates, null, new Pattern[]{Pattern.compile("b")}),
is(expected));
}
|
@Override
public DescribeClusterResult describeCluster(DescribeClusterOptions options) {
final KafkaFutureImpl<Collection<Node>> describeClusterFuture = new KafkaFutureImpl<>();
final KafkaFutureImpl<Node> controllerFuture = new KafkaFutureImpl<>();
final KafkaFutureImpl<String> clusterIdFuture = new KafkaFutureImpl<>();
final KafkaFutureImpl<Set<AclOperation>> authorizedOperationsFuture = new KafkaFutureImpl<>();
final long now = time.milliseconds();
runnable.call(new Call("listNodes", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedBrokerOrActiveKController()) {
private boolean useMetadataRequest = false;
@Override
AbstractRequest.Builder createRequest(int timeoutMs) {
if (!useMetadataRequest) {
return new DescribeClusterRequest.Builder(new DescribeClusterRequestData()
.setIncludeClusterAuthorizedOperations(options.includeAuthorizedOperations())
.setEndpointType(metadataManager.usingBootstrapControllers() ?
EndpointType.CONTROLLER.id() : EndpointType.BROKER.id()));
} else {
// Since this only requests node information, it's safe to pass true for allowAutoTopicCreation (and it
// simplifies communication with older brokers)
return new MetadataRequest.Builder(new MetadataRequestData()
.setTopics(Collections.emptyList())
.setAllowAutoTopicCreation(true)
.setIncludeClusterAuthorizedOperations(
options.includeAuthorizedOperations()));
}
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
if (!useMetadataRequest) {
DescribeClusterResponse response = (DescribeClusterResponse) abstractResponse;
Errors error = Errors.forCode(response.data().errorCode());
if (error != Errors.NONE) {
ApiError apiError = new ApiError(error, response.data().errorMessage());
handleFailure(apiError.exception());
return;
}
Map<Integer, Node> nodes = response.nodes();
describeClusterFuture.complete(nodes.values());
// Controller is null if controller id is equal to NO_CONTROLLER_ID
controllerFuture.complete(nodes.get(response.data().controllerId()));
clusterIdFuture.complete(response.data().clusterId());
authorizedOperationsFuture.complete(
validAclOperations(response.data().clusterAuthorizedOperations()));
} else {
MetadataResponse response = (MetadataResponse) abstractResponse;
describeClusterFuture.complete(response.brokers());
controllerFuture.complete(controller(response));
clusterIdFuture.complete(response.clusterId());
authorizedOperationsFuture.complete(
validAclOperations(response.clusterAuthorizedOperations()));
}
}
private Node controller(MetadataResponse response) {
if (response.controller() == null || response.controller().id() == MetadataResponse.NO_CONTROLLER_ID)
return null;
return response.controller();
}
@Override
void handleFailure(Throwable throwable) {
describeClusterFuture.completeExceptionally(throwable);
controllerFuture.completeExceptionally(throwable);
clusterIdFuture.completeExceptionally(throwable);
authorizedOperationsFuture.completeExceptionally(throwable);
}
@Override
boolean handleUnsupportedVersionException(final UnsupportedVersionException exception) {
if (metadataManager.usingBootstrapControllers()) {
return false;
}
if (useMetadataRequest) {
return false;
}
useMetadataRequest = true;
return true;
}
}, now);
return new DescribeClusterResult(describeClusterFuture, controllerFuture, clusterIdFuture,
authorizedOperationsFuture);
}
|
@Test
public void testDescribeCluster() throws Exception {
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(4, 0),
AdminClientConfig.RETRIES_CONFIG, "2")) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
// Prepare the describe cluster response used for the first describe cluster
env.kafkaClient().prepareResponse(
prepareDescribeClusterResponse(0,
env.cluster().nodes(),
env.cluster().clusterResource().clusterId(),
2,
MetadataResponse.AUTHORIZED_OPERATIONS_OMITTED));
// Prepare the describe cluster response used for the second describe cluster
env.kafkaClient().prepareResponse(
prepareDescribeClusterResponse(0,
env.cluster().nodes(),
env.cluster().clusterResource().clusterId(),
3,
1 << AclOperation.DESCRIBE.code() | 1 << AclOperation.ALTER.code()));
// Test DescribeCluster with the authorized operations omitted.
final DescribeClusterResult result = env.adminClient().describeCluster();
assertEquals(env.cluster().clusterResource().clusterId(), result.clusterId().get());
assertEquals(new HashSet<>(env.cluster().nodes()), new HashSet<>(result.nodes().get()));
assertEquals(2, result.controller().get().id());
assertNull(result.authorizedOperations().get());
// Test DescribeCluster with the authorized operations included.
final DescribeClusterResult result2 = env.adminClient().describeCluster();
assertEquals(env.cluster().clusterResource().clusterId(), result2.clusterId().get());
assertEquals(new HashSet<>(env.cluster().nodes()), new HashSet<>(result2.nodes().get()));
assertEquals(3, result2.controller().get().id());
assertEquals(new HashSet<>(asList(AclOperation.DESCRIBE, AclOperation.ALTER)),
result2.authorizedOperations().get());
}
}
|
Properties getProperties() {
return m_properties;
}
|
@Test
public void checkDefaultOptions() {
Properties props = m_parser.getProperties();
// verifyDefaults(props);
assertTrue(props.isEmpty());
}
|
public T send() throws IOException {
return web3jService.send(this, responseType);
}
|
@Test
public void testEthGetBlockByNumber() throws Exception {
web3j.ethGetBlockByNumber(DefaultBlockParameter.valueOf(Numeric.toBigInt("0x1b4")), true)
.send();
verifyResult(
"{\"jsonrpc\":\"2.0\",\"method\":\"eth_getBlockByNumber\","
+ "\"params\":[\"0x1b4\",true],\"id\":1}");
}
|
public static Map<String, String> validate(Map<String, String> configs) {
Map<String, String> invalidConfigs = new HashMap<>();
// No point to validate when connector is disabled.
if ("false".equals(configs.getOrDefault(ENABLED, "true"))) {
return invalidConfigs;
}
if ("false".equals(configs.get(EMIT_CHECKPOINTS_ENABLED))) {
invalidConfigs.putIfAbsent(EMIT_CHECKPOINTS_ENABLED, "MirrorCheckpointConnector can't run with " +
EMIT_CHECKPOINTS_ENABLED + " set to false");
}
if ("false".equals(configs.get(EMIT_OFFSET_SYNCS_ENABLED))) {
invalidConfigs.putIfAbsent(EMIT_OFFSET_SYNCS_ENABLED, "MirrorCheckpointConnector can't run without offset syncs");
}
return invalidConfigs;
}
|
@Test
public void testSkipValidationIfConnectorDisabled() {
Map<String, String> configValues = MirrorCheckpointConfig.validate(makeProps(
MirrorConnectorConfig.ENABLED, "false",
MirrorCheckpointConfig.EMIT_CHECKPOINTS_ENABLED, "false",
MirrorCheckpointConfig.SYNC_GROUP_OFFSETS_ENABLED, "false"));
assertTrue(configValues.isEmpty());
configValues = MirrorCheckpointConfig.validate(makeProps(
MirrorConnectorConfig.ENABLED, "false",
MirrorCheckpointConfig.EMIT_CHECKPOINTS_ENABLED, "true",
MirrorCheckpointConfig.EMIT_OFFSET_SYNCS_ENABLED, "false"));
assertTrue(configValues.isEmpty());
}
|
@Override
public StatusOutputStream<Void> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
try {
final java.nio.file.Path p = session.toPath(file);
final Set<OpenOption> options = new HashSet<>();
options.add(StandardOpenOption.WRITE);
if(status.isAppend()) {
if(!status.isExists()) {
options.add(StandardOpenOption.CREATE);
}
}
else {
if(status.isExists()) {
if(file.isSymbolicLink()) {
Files.delete(p);
options.add(StandardOpenOption.CREATE);
}
else {
options.add(StandardOpenOption.TRUNCATE_EXISTING);
}
}
else {
options.add(StandardOpenOption.CREATE_NEW);
}
}
final FileChannel channel = FileChannel.open(session.toPath(file), options.stream().toArray(OpenOption[]::new));
channel.position(status.getOffset());
return new VoidStatusOutputStream(Channels.newOutputStream(channel));
}
catch(IOException e) {
throw new LocalExceptionMappingService().map("Upload {0} failed", e, file);
}
}
|
@Test
public void testWriteContentRange() throws Exception {
final LocalSession session = new LocalSession(new Host(new LocalProtocol(), new LocalProtocol().getDefaultHostname()));
session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback());
session.login(new DisabledLoginCallback(), new DisabledCancelCallback());
final LocalWriteFeature feature = new LocalWriteFeature(session);
final Path workdir = new LocalHomeFinderFeature().find();
final Path test = new Path(workdir, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
final byte[] content = RandomUtils.nextBytes(64000);
{
final TransferStatus status = new TransferStatus();
status.setLength(1024L);
status.setOffset(0L);
final OutputStream out = feature.write(test, status, new DisabledConnectionCallback());
// Write first 1024
new StreamCopier(status, status).withOffset(status.getOffset()).withLimit(status.getLength()).transfer(new ByteArrayInputStream(content), out);
out.flush();
out.close();
}
assertTrue(new DefaultFindFeature(session).find(test));
assertEquals(1024L, new DefaultAttributesFinderFeature(session).find(test).getSize());
{
// Remaining chunked transfer with offset
final TransferStatus status = new TransferStatus().exists(true);
status.setLength(content.length - 1024L);
status.setOffset(1024L);
status.setAppend(true);
final OutputStream out = feature.write(test, status, new DisabledConnectionCallback());
new StreamCopier(status, status).withOffset(status.getOffset()).withLimit(status.getLength()).transfer(new ByteArrayInputStream(content), out);
out.flush();
out.close();
}
final ByteArrayOutputStream out = new ByteArrayOutputStream(content.length);
IOUtils.copy(new LocalReadFeature(session).read(test, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()), out);
assertArrayEquals(content, out.toByteArray());
new LocalDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public String getId() {
return codec.getId();
}
|
@Test
public void delegatesGetId() {
Mockito.when(codec.getId()).thenReturn("MyLogstashPluginId");
final JavaCodecDelegator codecDelegator = constructCodecDelegator();
assertEquals("MyLogstashPluginId", codecDelegator.getId());
}
|
@Override
public String getExtraNameCharacters() {
return null;
}
|
@Test
void assertGetExtraNameCharacters() {
assertNull(metaData.getExtraNameCharacters());
}
|
@Override
public <VO, VR> KStream<K, VR> join(final KStream<K, VO> otherStream,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final JoinWindows windows) {
return join(otherStream, toValueJoinerWithKey(joiner), windows);
}
|
@Test
public void shouldNotAllowNullValueJoinerOnJoinWithGlobalTable() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.join(testGlobalTable, MockMapper.selectValueMapper(), (ValueJoiner<? super String, ? super String, ?>) null));
assertThat(exception.getMessage(), equalTo("joiner can't be null"));
}
|
public static double convertToSeconds(long duration, TimeUnit timeUnit) {
return timeUnit.toNanos(duration) / NANOS_IN_SECOND;
}
|
@Test
public void testConvertToSeconds() {
assertThat(MetricsUtil.convertToSeconds(1, TimeUnit.HOURS)).isEqualTo(3600.0);
assertThat(MetricsUtil.convertToSeconds(1, TimeUnit.MINUTES)).isEqualTo(60.0);
assertThat(MetricsUtil.convertToSeconds(1, TimeUnit.SECONDS)).isEqualTo(1.0);
assertThat(MetricsUtil.convertToSeconds(1, TimeUnit.MILLISECONDS)).isEqualTo(0.001);
assertThat(MetricsUtil.convertToSeconds(1, TimeUnit.MICROSECONDS)).isEqualTo(0.000_001);
assertThat(MetricsUtil.convertToSeconds(1, TimeUnit.NANOSECONDS)).isEqualTo(0.000_000_001);
}
|
@Override
public String getName() {
return this.name;
}
|
@Test
public void allCircuitBreakerStatesAllowTransitionToItsOwnState() {
for (final CircuitBreaker.State state : CircuitBreaker.State.values()) {
assertThatNoException().isThrownBy(() -> CircuitBreaker.StateTransition.transitionBetween(circuitBreaker.getName(), state, state));
}
}
|
@Override
public void start() {
this.all = registry.meter(name(getName(), "all"));
this.trace = registry.meter(name(getName(), "trace"));
this.debug = registry.meter(name(getName(), "debug"));
this.info = registry.meter(name(getName(), "info"));
this.warn = registry.meter(name(getName(), "warn"));
this.error = registry.meter(name(getName(), "error"));
super.start();
}
|
@Test
public void usesSharedRegistries() {
String registryName = "registry";
SharedMetricRegistries.add(registryName, registry);
final InstrumentedAppender shared = new InstrumentedAppender(registryName);
shared.start();
when(event.getLevel()).thenReturn(Level.INFO);
shared.doAppend(event);
assertThat(registry.meter(METRIC_NAME_PREFIX + ".info").getCount())
.isEqualTo(1);
}
|
public RuntimeOptionsBuilder parse(Map<String, String> properties) {
return parse(properties::get);
}
|
@Test
void should_parse_plugin_publish_token() {
properties.put(Constants.PLUGIN_PUBLISH_TOKEN_PROPERTY_NAME, "some/value");
RuntimeOptions options = cucumberPropertiesParser
.parse(properties)
.enablePublishPlugin()
.build();
assertThat(options.plugins().get(0).pluginString(),
equalTo("io.cucumber.core.plugin.PublishFormatter:some/value"));
}
|
static List<ClassLoader> selectClassLoaders(ClassLoader classLoader) {
// list prevents reordering!
List<ClassLoader> classLoaders = new ArrayList<>();
if (classLoader != null) {
classLoaders.add(classLoader);
}
// check if TCCL is same as given classLoader
ClassLoader tccl = Thread.currentThread().getContextClassLoader();
if (tccl != null && tccl != classLoader) {
classLoaders.add(tccl);
}
// Hazelcast core classLoader
ClassLoader coreClassLoader = ServiceLoader.class.getClassLoader();
if (coreClassLoader != classLoader && coreClassLoader != tccl) {
classLoaders.add(coreClassLoader);
}
// Hazelcast client classLoader
try {
Class<?> hzClientClass = Class.forName("com.hazelcast.client.HazelcastClient");
ClassLoader clientClassLoader = hzClientClass.getClassLoader();
if (clientClassLoader != classLoader && clientClassLoader != tccl && clientClassLoader != coreClassLoader) {
classLoaders.add(clientClassLoader);
}
} catch (ClassNotFoundException ignore) {
// ignore since we may not have the HazelcastClient in the classpath
ignore(ignore);
}
return classLoaders;
}
|
@Test
public void selectingSimpleGivenClassLoader() {
List<ClassLoader> classLoaders = ServiceLoader.selectClassLoaders(new URLClassLoader(new URL[0]));
assertEquals(2, classLoaders.size());
}
|
public static boolean shouldLoadInIsolation(String name) {
return !(EXCLUDE.matcher(name).matches() && !INCLUDE.matcher(name).matches());
}
|
@Test
public void testAllowedJsonConverterClasses() {
List<String> jsonConverterClasses = Arrays.asList(
"org.apache.kafka.connect.json.",
"org.apache.kafka.connect.json.DecimalFormat",
"org.apache.kafka.connect.json.JsonConverter",
"org.apache.kafka.connect.json.JsonConverterConfig",
"org.apache.kafka.connect.json.JsonDeserializer",
"org.apache.kafka.connect.json.JsonSchema",
"org.apache.kafka.connect.json.JsonSerializer"
);
for (String clazz : jsonConverterClasses) {
assertTrue(PluginUtils.shouldLoadInIsolation(clazz),
clazz + " from 'json' is not loaded in isolation but should be");
}
}
|
public List<QueuePath> getWildcardedQueuePaths(int maxAutoCreatedQueueDepth) {
List<QueuePath> wildcardedPaths = new ArrayList<>();
// Start with the most explicit format (without wildcard)
wildcardedPaths.add(this);
String[] pathComponents = getPathComponents();
int supportedWildcardLevel = getSupportedWildcardLevel(maxAutoCreatedQueueDepth);
// Collect all template entries
for (int wildcardLevel = 1; wildcardLevel <= supportedWildcardLevel; wildcardLevel++) {
int wildcardedComponentIndex = pathComponents.length - wildcardLevel;
pathComponents[wildcardedComponentIndex] = WILDCARD_QUEUE;
QueuePath wildcardedPath = createFromQueues(pathComponents);
wildcardedPaths.add(wildcardedPath);
}
return wildcardedPaths;
}
|
@Test
public void testWildcardedQueuePathsWithTwoLevelWildCard() {
int maxAutoCreatedQueueDepth = 2;
List<QueuePath> expectedPaths = new ArrayList<>();
expectedPaths.add(TEST_QUEUE_PATH);
expectedPaths.add(ONE_LEVEL_WILDCARDED_TEST_PATH);
expectedPaths.add(TWO_LEVEL_WILDCARDED_TEST_PATH);
List<QueuePath> wildcardedPaths = TEST_QUEUE_PATH
.getWildcardedQueuePaths(maxAutoCreatedQueueDepth);
Assert.assertEquals(expectedPaths, wildcardedPaths);
}
|
public MutableTree<K> beginWrite() {
return new MutableTree<>(this);
}
|
@Test
public void tailReverseIterationTest() {
Random random = new Random(239786);
Persistent23Tree.MutableTree<Integer> tree = new Persistent23Tree<Integer>().beginWrite();
int[] p = genPermutation(random);
TreeSet<Integer> added = new TreeSet<>();
for (int i = 0; i < ENTRIES_TO_ADD; i++) {
int size = tree.size();
Assert.assertEquals(i, size);
if ((size & 1023) == 0 || size < 100) {
if (i > 0) {
checkTailReverseIteration(tree, added, added.first());
checkTailReverseIteration(tree, added, added.first() - 1);
checkTailReverseIteration(tree, added, added.last());
checkTailReverseIteration(tree, added, added.last() + 1);
}
checkTailReverseIteration(tree, added, Integer.MAX_VALUE);
checkTailReverseIteration(tree, added, Integer.MIN_VALUE);
for (int j = 0; j < 10; j++) {
checkTailReverseIteration(tree, added, p[i * j / 10]);
}
}
tree.add(p[i]);
added.add(p[i]);
}
}
|
public static void main(String[] args) throws IOException {
runSqlLine(args, null, System.out, System.err);
}
|
@Test
public void classLoader_readFile() throws Exception {
File simpleTable = folder.newFile();
BeamSqlLine.main(
new String[] {
"-e",
"CREATE EXTERNAL TABLE test (id INTEGER) TYPE 'text' LOCATION '"
+ simpleTable.getAbsolutePath()
+ "';",
"-e",
"SELECT * FROM test;",
"-e",
"DROP TABLE test;"
});
}
|
long remove(final long recordingId)
{
ensurePositive(recordingId, "recordingId");
final long[] index = this.index;
final int lastPosition = lastPosition();
final int position = find(index, recordingId, lastPosition);
if (position < 0)
{
return NULL_VALUE;
}
final long recordingDescriptorOffset = index[position + 1];
count--;
// Shift data to the left
for (int i = position; i < lastPosition; i += 2)
{
index[i] = index[i + 2];
index[i + 1] = index[i + 3];
}
// Reset last copied element
index[lastPosition] = 0;
index[lastPosition + 1] = 0;
return recordingDescriptorOffset;
}
|
@Test
void removeReturnsNullValueWhenIndexIsEmpty()
{
assertEquals(NULL_VALUE, catalogIndex.remove(1));
}
|
static Expression getResultUpdaterExpression(final RegressionModel.NormalizationMethod normalizationMethod) {
if (UNSUPPORTED_NORMALIZATION_METHODS.contains(normalizationMethod)) {
return new NullLiteralExpr();
} else {
return getResultUpdaterSupportedExpression(normalizationMethod);
}
}
|
@Test
void getResultUpdaterExpression() {
UNSUPPORTED_NORMALIZATION_METHODS.forEach(normalizationMethod -> {
Expression retrieved =
KiePMMLRegressionTableFactory.getResultUpdaterExpression(normalizationMethod);
assertThat(retrieved).isInstanceOf(NullLiteralExpr.class);
});
}
|
public static DateTime convertToDateTime(@Nonnull Object value) {
if (value instanceof DateTime) {
return (DateTime) value;
}
if (value instanceof Date) {
return new DateTime(value, DateTimeZone.UTC);
} else if (value instanceof ZonedDateTime) {
final DateTimeZone dateTimeZone = DateTimeZone.forTimeZone(TimeZone.getTimeZone(((ZonedDateTime) value).getZone()));
return new DateTime(Date.from(((ZonedDateTime) value).toInstant()), dateTimeZone);
} else if (value instanceof OffsetDateTime) {
return new DateTime(Date.from(((OffsetDateTime) value).toInstant()), DateTimeZone.UTC);
} else if (value instanceof LocalDateTime) {
final LocalDateTime localDateTime = (LocalDateTime) value;
final ZoneId defaultZoneId = ZoneId.systemDefault();
final ZoneOffset offset = defaultZoneId.getRules().getOffset(localDateTime);
return new DateTime(Date.from(localDateTime.toInstant(offset)));
} else if (value instanceof LocalDate) {
final LocalDate localDate = (LocalDate) value;
final LocalDateTime localDateTime = localDate.atStartOfDay();
final ZoneId defaultZoneId = ZoneId.systemDefault();
final ZoneOffset offset = defaultZoneId.getRules().getOffset(localDateTime);
return new DateTime(Date.from(localDateTime.toInstant(offset)));
} else if (value instanceof Instant) {
return new DateTime(Date.from((Instant) value), DateTimeZone.UTC);
} else if (value instanceof String) {
return ES_DATE_FORMAT_FORMATTER.parseDateTime((String) value);
} else {
throw new IllegalArgumentException("Value of invalid type <" + value.getClass().getSimpleName() + "> provided");
}
}
|
@Test
void convertFromOffsetDateTime() {
final OffsetDateTime input = OffsetDateTime.of(2021, 11, 20, 14, 50, 10, 0, ZoneOffset.UTC);
final DateTime output = DateTimeConverter.convertToDateTime(input);
final DateTime expectedOutput = new DateTime(2021, 11, 20, 14, 50, 10, DateTimeZone.UTC);
assertThat(output).isEqualTo(expectedOutput);
}
|
@Override
public Thread newThread(Runnable runnable) {
String name = mPrefix + mThreadNum.getAndIncrement();
Thread ret = new Thread(mGroup, runnable, name, 0);
ret.setDaemon(mDaemon);
return ret;
}
|
@Test
void testNewThread() {
NamedThreadFactory factory = new NamedThreadFactory();
Thread t = factory.newThread(Mockito.mock(Runnable.class));
assertThat(t.getName(), allOf(containsString("pool-"), containsString("-thread-")));
assertFalse(t.isDaemon());
// since security manager is not installed.
assertSame(t.getThreadGroup(), Thread.currentThread().getThreadGroup());
}
|
public Choice<T> or(Choice<T> other) {
checkNotNull(other);
if (other == none()) {
return this;
} else {
Choice<T> thisChoice = this;
return new Choice<T>() {
@Override
protected Iterator<T> iterator() {
return Iterators.concat(thisChoice.iterator(), other.iterator());
}
@Override
public String toString() {
return String.format("%s.or(%s)", thisChoice, other);
}
};
}
}
|
@Test
public void or() {
assertThat(Choice.of(2).or(Choice.from(ImmutableList.of(1, 3))).asIterable())
.containsExactly(2, 1, 3)
.inOrder();
}
|
@VisibleForTesting
static Properties extractCommonsHikariProperties(Properties properties) {
Properties result = new Properties();
for (Map.Entry<Object, Object> entry : properties.entrySet()) {
String key = (String) entry.getKey();
if (!ALLOWED_SONAR_PROPERTIES.contains(key)) {
if (DEPRECATED_SONAR_PROPERTIES.contains(key)) {
LOG.warn("Property [{}] has no effect as pool connection implementation changed, check 9.7 upgrade notes.", key);
}
continue;
}
if (StringUtils.startsWith(key, SONAR_JDBC)) {
String resolvedKey = toHikariPropertyKey(key);
String existingValue = (String) result.setProperty(resolvedKey, (String) entry.getValue());
checkState(existingValue == null || existingValue.equals(entry.getValue()),
"Duplicate property declaration for resolved jdbc key '%s': conflicting values are '%s' and '%s'", resolvedKey, existingValue, entry.getValue());
result.setProperty(resolvedKey, (String) entry.getValue());
}
}
return result;
}
|
@Test
public void logWarningIfDeprecatedPropertyUsed() {
Properties props = new Properties();
props.setProperty("sonar.jdbc.maxIdle", "5");
props.setProperty("sonar.jdbc.minEvictableIdleTimeMillis", "300000");
props.setProperty("sonar.jdbc.timeBetweenEvictionRunsMillis", "1000");
props.setProperty("sonar.jdbc.connectionTimeout", "8000");
DefaultDatabase.extractCommonsHikariProperties(props);
assertThat(logTester.logs())
.contains("Property [sonar.jdbc.maxIdle] has no effect as pool connection implementation changed, check 9.7 upgrade notes.")
.contains("Property [sonar.jdbc.minEvictableIdleTimeMillis] has no effect as pool connection implementation changed, check 9.7 upgrade notes.")
.contains("Property [sonar.jdbc.timeBetweenEvictionRunsMillis] has no effect as pool connection implementation changed, check 9.7 upgrade notes.");
}
|
public static FingerprintTrustManagerFactoryBuilder builder(String algorithm) {
return new FingerprintTrustManagerFactoryBuilder(algorithm);
}
|
@Test
public void testFingerprintWithUnexpectedCharacters() {
assertThrows(IllegalArgumentException.class, new Executable() {
@Override
public void execute() {
FingerprintTrustManagerFactory.builder("SHA-256").fingerprints("00:00:00\n").build();
}
});
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.