focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public SerializableFunction<Row, T> getFromRowFunction() {
return fromRowFunction;
}
|
@Test
public void testEnumRowToProto() {
ProtoDynamicMessageSchema schemaProvider = schemaFromDescriptor(EnumMessage.getDescriptor());
SerializableFunction<Row, DynamicMessage> fromRow = schemaProvider.getFromRowFunction();
assertEquals(ENUM_PROTO.toString(), fromRow.apply(ENUM_ROW).toString());
}
|
@Override
public void handleRequest(RestRequest request, RequestContext requestContext, Callback<RestResponse> callback)
{
//This code path cannot accept content types or accept types that contain
//multipart/related. This is because these types of requests will usually have very large payloads and therefore
//would degrade server performance since RestRequest reads everything into memory.
if (!isMultipart(request, requestContext, callback))
{
_restRestLiServer.handleRequest(request, requestContext, callback);
}
}
|
@SuppressWarnings({"unchecked"})
@Test(dataProvider = "restOrStream")
public void testAsyncServer(final RestOrStream restOrStream) throws Exception
{
final AsyncStatusCollectionResource statusResource = getMockResource(AsyncStatusCollectionResource.class);
statusResource.get(eq(1L), EasyMock.<Callback<Status>> anyObject());
EasyMock.expectLastCall().andAnswer(new IAnswer<Object>()
{
@Override
public Object answer() throws Throwable {
Callback<Status> callback = (Callback<Status>) EasyMock.getCurrentArguments()[1];
Status stat = buildStatusRecord();
callback.onSuccess(stat);
return null;
}
});
replay(statusResource);
final Callback<RestResponse> restResponseCallback = new Callback<RestResponse>()
{
@Override
public void onSuccess(RestResponse restResponse)
{
assertEquals(restResponse.getStatus(), 200);
assertTrue(restResponse.getEntity().length() > 0);
EasyMock.verify(statusResource);
EasyMock.reset(statusResource);
}
@Override
public void onError(Throwable e)
{
fail();
}
};
if (restOrStream == RestOrStream.REST)
{
final RestRequest request = new RestRequestBuilder(new URI("/asyncstatuses/1"))
.setHeader(RestConstants.HEADER_RESTLI_PROTOCOL_VERSION, AllProtocolVersions.BASELINE_PROTOCOL_VERSION.toString()).build();
_server.handleRequest(request, new RequestContext(), restResponseCallback);
}
else
{
final StreamRequest streamRequest = new StreamRequestBuilder(new URI("/asyncstatuses/1"))
.setHeader(RestConstants.HEADER_RESTLI_PROTOCOL_VERSION, AllProtocolVersions.BASELINE_PROTOCOL_VERSION.toString()).build(EntityStreams.emptyStream());
final Callback<StreamResponse> callback = new Callback<StreamResponse>()
{
@Override
public void onSuccess(StreamResponse streamResponse)
{
Messages.toRestResponse(streamResponse, new Callback<RestResponse>()
{
@Override
public void onError(Throwable e)
{
Assert.fail();
}
@Override
public void onSuccess(RestResponse result)
{
restResponseCallback.onSuccess(result);
}
});
}
@Override
public void onError(Throwable e)
{
fail();
}
};
_server.handleRequest(streamRequest, new RequestContext(), callback);
}
}
|
public FieldType calcFieldType( String content ) {
final SnippetBuilder.SnippetType snippetType = SnippetBuilder.getType( content );
if ( snippetType == SnippetBuilder.SnippetType.FORALL ) {
return FieldType.FORALL_FIELD;
} else if ( snippetType != SnippetBuilder.SnippetType.SINGLE ) {
return FieldType.NORMAL_FIELD;
}
for ( String op : operators ) {
if ( content.endsWith( op ) ) {
return FieldType.OPERATOR_FIELD;
}
}
return content.endsWith( "?" ) ? FieldType.QUESTION_FIELD : FieldType.SINGLE_FIELD;
}
|
@Test
public void testIdentifyFieldTypes() {
builder = new LhsBuilder(9, 1, "");
assertThat(builder.calcFieldType("age")).isEqualTo(SINGLE_FIELD);
assertThat(builder.calcFieldType("age <")).isEqualTo(OPERATOR_FIELD);
assertThat(builder.calcFieldType("age < $param")).isEqualTo(NORMAL_FIELD);
assertThat(builder.calcFieldType("forall(||){age < $}")).isEqualTo(FORALL_FIELD);
assertThat(builder.calcFieldType("forall(&&){age < $}")).isEqualTo(FORALL_FIELD);
assertThat(builder.calcFieldType("forall(,){age < $}")).isEqualTo(FORALL_FIELD);
assertThat(builder.calcFieldType("forall(){age < $}")).isEqualTo(FORALL_FIELD);
assertThat(builder.calcFieldType("forall(){age < $} && forall(){age == $}")).isEqualTo(FORALL_FIELD);
assertThat(builder.calcFieldType("x && forall(){age < $} && forall(){age == $}")).isEqualTo(FORALL_FIELD);
assertThat(builder.calcFieldType("x && forall(){age < $} && forall(){age == $} && y")).isEqualTo(FORALL_FIELD);
assertThat(builder.calcFieldType("age < $para")).isEqualTo(SINGLE_FIELD);
assertThat(builder.calcFieldType("forall{||}{age < $}")).isEqualTo(SINGLE_FIELD);
assertThat(builder.calcFieldType("forall(){}")).isEqualTo(SINGLE_FIELD);
assertThat(builder.calcFieldType("forall(){age < $")).isEqualTo(SINGLE_FIELD);
assertThat(builder.calcFieldType("forall(){,")).isEqualTo(SINGLE_FIELD);
assertThat(builder.calcFieldType("forall({})")).isEqualTo(SINGLE_FIELD);
assertThat(builder.calcFieldType("forall({}){test})")).isEqualTo(SINGLE_FIELD);
assertThat(builder.calcFieldType("forall(&&){{}})")).isEqualTo(SINGLE_FIELD);
assertThat(builder.calcFieldType("forall(&&){{})")).isEqualTo(SINGLE_FIELD);
}
|
public <T extends AbstractMessageListenerContainer> T decorateMessageListenerContainer(T container) {
Advice[] advice = prependTracingMessageContainerAdvice(container);
if (advice != null) {
container.setAdviceChain(advice);
}
return container;
}
|
@Test void decorateDirectMessageListenerContainer_prepends_as_first_when_absent() {
DirectMessageListenerContainer listenerContainer = new DirectMessageListenerContainer();
listenerContainer.setAdviceChain(new CacheInterceptor());
assertThat(rabbitTracing.decorateMessageListenerContainer(listenerContainer))
.extracting("adviceChain")
.asInstanceOf(array(Advice[].class))
.hasSize(2)
.matches(adviceArray -> adviceArray[0] instanceof TracingRabbitListenerAdvice);
}
|
@Override
public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
super.onDataReceived(device, data);
if (data.size() != 2) {
onInvalidDataReceived(device, data);
return;
}
final int interval = data.getIntValue(Data.FORMAT_UINT16_LE, 0);
onMeasurementIntervalReceived(device, interval);
}
|
@Test
public void onMeasurementIntervalReceived() {
final ProfileReadResponse response = new MeasurementIntervalDataCallback() {
@Override
public void onMeasurementIntervalReceived(@NonNull final BluetoothDevice device, final int interval) {
called = true;
assertEquals("Interval", 60, interval);
}
};
called = false;
final Data data = new Data(new byte[] { 60, 0 });
response.onDataReceived(null, data);
assertTrue(response.isValid());
assertTrue(called);
}
|
@Override
public String toString() {
return topicId + ":" + topic() + "-" + partition();
}
|
@Test
public void testToString() {
assertEquals("vDiRhkpVQgmtSLnsAZx7lA:a_topic_name-1", topicIdPartition0.toString());
assertEquals("vDiRhkpVQgmtSLnsAZx7lA:null-1", topicIdPartitionWithNullTopic0.toString());
}
|
@Override
public Flux<ReactiveRedisConnection.BooleanResponse<RenameCommand>> renameNX(Publisher<RenameCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getNewName(), "New name must not be null!");
byte[] keyBuf = toByteArray(command.getKey());
byte[] newKeyBuf = toByteArray(command.getNewName());
if (executorService.getConnectionManager().calcSlot(keyBuf) == executorService.getConnectionManager().calcSlot(newKeyBuf)) {
return super.renameNX(commands);
}
return exists(command.getNewName())
.zipWith(read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.DUMP, keyBuf))
.filter(newKeyExistsAndDump -> !newKeyExistsAndDump.getT1() && Objects.nonNull(newKeyExistsAndDump.getT2()))
.map(Tuple2::getT2)
.zipWhen(value ->
pTtl(command.getKey())
.filter(Objects::nonNull)
.map(ttl -> Math.max(0, ttl))
.switchIfEmpty(Mono.just(0L))
)
.flatMap(valueAndTtl -> write(newKeyBuf, StringCodec.INSTANCE, RedisCommands.RESTORE, newKeyBuf, valueAndTtl.getT2(), valueAndTtl.getT1())
.then(Mono.just(true)))
.switchIfEmpty(Mono.just(false))
.doOnSuccess(didRename -> {
if (didRename) {
del(command.getKey());
}
})
.map(didRename -> new BooleanResponse<>(command, didRename));
});
}
|
@Test
public void testRenameNX() {
connection.stringCommands().set(originalKey, value).block();
if (hasTtl) {
connection.keyCommands().expire(originalKey, Duration.ofSeconds(1000)).block();
}
Integer originalSlot = getSlotForKey(originalKey);
newKey = getNewKeyForSlot(new String(originalKey.array()), getTargetSlot(originalSlot));
Boolean result = connection.keyCommands().renameNX(originalKey, newKey).block();
assertThat(result).isTrue();
assertThat(connection.stringCommands().get(newKey).block()).isEqualTo(value);
if (hasTtl) {
assertThat(connection.keyCommands().ttl(newKey).block()).isGreaterThan(0);
} else {
assertThat(connection.keyCommands().ttl(newKey).block()).isEqualTo(-1);
}
connection.stringCommands().set(originalKey, value).block();
result = connection.keyCommands().renameNX(originalKey, newKey).block();
assertThat(result).isFalse();
}
|
@Override
public CompletableFuture<Boolean> triggerCheckpointAsync(
CheckpointMetaData checkpointMetaData, CheckpointOptions checkpointOptions) {
if (!externallyInducedCheckpoints) {
if (isSynchronousSavepoint(checkpointOptions.getCheckpointType())) {
return triggerStopWithSavepointAsync(checkpointMetaData, checkpointOptions);
} else {
return super.triggerCheckpointAsync(checkpointMetaData, checkpointOptions);
}
} else if (checkpointOptions.getCheckpointType().equals(CheckpointType.FULL_CHECKPOINT)) {
// see FLINK-25256
throw new IllegalStateException(
"Using externally induced sources, we can not enforce taking a full checkpoint."
+ "If you are restoring from a snapshot in NO_CLAIM mode, please use"
+ " CLAIM mode.");
} else {
// we do not trigger checkpoints here, we simply state whether we can trigger them
synchronized (lock) {
return CompletableFuture.completedFuture(isRunning());
}
}
}
|
@Test
void testTriggeringStopWithSavepointWithDrain() throws Exception {
SourceFunction<String> testSource = new EmptySource();
CompletableFuture<Boolean> checkpointCompleted = new CompletableFuture<>();
CheckpointResponder checkpointResponder =
new TestCheckpointResponder() {
@Override
public void acknowledgeCheckpoint(
JobID jobID,
ExecutionAttemptID executionAttemptID,
long checkpointId,
CheckpointMetrics checkpointMetrics,
TaskStateSnapshot subtaskState) {
super.acknowledgeCheckpoint(
jobID,
executionAttemptID,
checkpointId,
checkpointMetrics,
subtaskState);
checkpointCompleted.complete(null);
}
};
try (StreamTaskMailboxTestHarness<String> harness =
new StreamTaskMailboxTestHarnessBuilder<>(SourceStreamTask::new, STRING_TYPE_INFO)
.setTaskStateSnapshot(1, TaskStateSnapshot.FINISHED_ON_RESTORE)
.setCheckpointResponder(checkpointResponder)
.setupOutputForSingletonOperatorChain(new StreamSource<>(testSource))
.build()) {
CompletableFuture<Boolean> triggerResult =
harness.streamTask.triggerCheckpointAsync(
new CheckpointMetaData(2, 2),
CheckpointOptions.alignedNoTimeout(
SavepointType.terminate(SavepointFormatType.CANONICAL),
CheckpointStorageLocationReference.getDefault()));
checkpointCompleted.whenComplete(
(ignored, exception) -> harness.streamTask.notifyCheckpointCompleteAsync(2));
// Run mailbox till the source thread finished and suspend the mailbox
harness.streamTask.runMailboxLoop();
harness.finishProcessing();
assertThat(triggerResult.get()).isTrue();
assertThat(checkpointCompleted.isDone()).isTrue();
}
}
|
public static <T> Global<T> globally() {
return new Global<>();
}
|
@Test
@Category(NeedsRunner.class)
public void testGloballyWithSchemaAggregateFn() {
Collection<Aggregate> elements =
ImmutableList.of(
Aggregate.of(1, 1, 2),
Aggregate.of(2, 1, 3),
Aggregate.of(3, 2, 4),
Aggregate.of(4, 2, 5));
PCollection<Row> aggregate =
pipeline
.apply(Create.of(elements))
.apply(
Group.<Aggregate>globally()
.aggregateField("field1", Sum.ofLongs(), "field1_sum")
.aggregateField("field3", Sum.ofIntegers(), "field3_sum")
.aggregateField("field1", Top.largestLongsFn(1), "field1_top"));
Schema aggregateSchema =
Schema.builder()
.addInt64Field("field1_sum")
.addInt32Field("field3_sum")
.addArrayField("field1_top", FieldType.INT64)
.build();
Row expectedRow = Row.withSchema(aggregateSchema).addValues(10L, 14).addArray(4L).build();
PAssert.that(aggregate).containsInAnyOrder(expectedRow);
pipeline.run();
}
|
static final String generateForFragment(RuleBuilderStep step, Configuration configuration) {
final String fragmentName = step.function();
try {
Template template = configuration.getTemplate(fragmentName);
StringWriter writer = new StringWriter();
Map<String, Object> filteredParams = new HashMap<>();
if (step.parameters() != null) {
for (Map.Entry<String, Object> val : step.parameters().entrySet()) {
if (val.getValue() instanceof String s) {
if (StringUtils.isBlank(s)) {
} else if (s.startsWith("$")) {
filteredParams.put(val.getKey(), s.substring(1));
} else {
filteredParams.put(val.getKey(), "\"" + s + "\"");
}
} else {
filteredParams.put(val.getKey(), val.getValue());
}
}
}
template.process(filteredParams, writer);
writer.close();
return writer.toString();
} catch (TemplateNotFoundException e) {
throw new IllegalArgumentException(f("No template found for fragment %s", fragmentName));
} catch (Exception e) {
throw new IllegalArgumentException("Error converting fragment template to fragment.", e);
}
}
|
@Test
public void generateForFragmentConvertsFreemarkerTemplate() {
RuleBuilderStep step = mock(RuleBuilderStep.class);
when(step.function()).thenReturn("test_fragment1");
Map<String, Object> params = Map.of("field", "my_field");
when(step.parameters()).thenReturn(params);
assertThat(ParserUtil.generateForFragment(step, configuration))
.isEqualTo("let gl2_fragmentvar_v1 = $message.\"my_field\";");
}
|
public static CsvIOParse<Row> parseRows(Schema schema, CSVFormat csvFormat) {
CsvIOParseHelpers.validateCsvFormat(csvFormat);
CsvIOParseHelpers.validateCsvFormatWithSchema(csvFormat, schema);
RowCoder coder = RowCoder.of(schema);
CsvIOParseConfiguration.Builder<Row> builder = CsvIOParseConfiguration.builder();
builder.setCsvFormat(csvFormat).setSchema(schema).setCoder(coder).setFromRowFn(row -> row);
return CsvIOParse.<Row>builder().setConfigBuilder(builder).build();
}
|
@Test
public void givenStringToRecordError_RecordToObjectError_emits() {
Pipeline pipeline = Pipeline.create();
PCollection<String> input =
pipeline.apply(
Create.of("true,\"1.1,3.141592,1,5,foo", "true,1.1,3.141592,this_is_an_error,5,foo"));
Schema schema =
Schema.builder()
.addBooleanField("aBoolean")
.addDoubleField("aDouble")
.addFloatField("aFloat")
.addInt32Field("anInteger")
.addInt64Field("aLong")
.addStringField("aString")
.build();
CsvIOParse<Row> underTest = CsvIO.parseRows(schema, csvFormat().withQuote('"'));
CsvIOParseResult<Row> result = input.apply(underTest);
PAssert.thatSingleton(result.getErrors().apply(Count.globally())).isEqualTo(2L);
PAssert.thatSingleton(
stackTraceContains(result.getErrors(), CsvIOStringToCsvRecord.class.getName()))
.isEqualTo(1L);
PAssert.thatSingleton(
stackTraceContains(result.getErrors(), CsvIORecordToObjects.class.getName()))
.isEqualTo(1L);
pipeline.run();
}
|
public boolean verifyTopicCleanupPolicyOnlyCompact(String topic, String workerTopicConfig,
String topicPurpose) {
Set<String> cleanupPolicies = topicCleanupPolicy(topic);
if (cleanupPolicies.isEmpty()) {
log.info("Unable to use admin client to verify the cleanup policy of '{}' "
+ "topic is '{}', either because the broker is an older "
+ "version or because the Kafka principal used for Connect "
+ "internal topics does not have the required permission to "
+ "describe topic configurations.", topic, TopicConfig.CLEANUP_POLICY_COMPACT);
return false;
}
Set<String> expectedPolicies = Collections.singleton(TopicConfig.CLEANUP_POLICY_COMPACT);
if (!cleanupPolicies.equals(expectedPolicies)) {
String expectedPolicyStr = String.join(",", expectedPolicies);
String cleanupPolicyStr = String.join(",", cleanupPolicies);
String msg = String.format("Topic '%s' supplied via the '%s' property is required "
+ "to have '%s=%s' to guarantee consistency and durability of "
+ "%s, but found the topic currently has '%s=%s'. Continuing would likely "
+ "result in eventually losing %s and problems restarting this Connect "
+ "cluster in the future. Change the '%s' property in the "
+ "Connect worker configurations to use a topic with '%s=%s'.",
topic, workerTopicConfig, TopicConfig.CLEANUP_POLICY_CONFIG, expectedPolicyStr,
topicPurpose, TopicConfig.CLEANUP_POLICY_CONFIG, cleanupPolicyStr, topicPurpose,
workerTopicConfig, TopicConfig.CLEANUP_POLICY_CONFIG, expectedPolicyStr);
throw new ConfigException(msg);
}
return true;
}
|
@Test
public void verifyingTopicCleanupPolicyShouldFailWhenTopicHasDeleteAndCompactPolicy() {
String topicName = "myTopic";
Map<String, String> topicConfigs = Collections.singletonMap("cleanup.policy", "delete,compact");
Cluster cluster = createCluster(1);
try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) {
TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList());
mockAdminClient.addTopic(false, topicName, Collections.singletonList(topicPartitionInfo), topicConfigs);
TopicAdmin admin = new TopicAdmin(mockAdminClient);
ConfigException e = assertThrows(ConfigException.class, () -> admin.verifyTopicCleanupPolicyOnlyCompact("myTopic", "worker.topic", "purpose"));
assertTrue(e.getMessage().contains("to guarantee consistency and durability"));
}
}
|
public static String extractMulti(Pattern pattern, CharSequence content, String template) {
if (null == content || null == pattern || null == template) {
return null;
}
//提取模板中的编号
final TreeSet<Integer> varNums = new TreeSet<>((o1, o2) -> ObjectUtil.compare(o2, o1));
final Matcher matcherForTemplate = PatternPool.GROUP_VAR.matcher(template);
while (matcherForTemplate.find()) {
varNums.add(Integer.parseInt(matcherForTemplate.group(1)));
}
final Matcher matcher = pattern.matcher(content);
if (matcher.find()) {
for (Integer group : varNums) {
template = template.replace("$" + group, matcher.group(group));
}
return template;
}
return null;
}
|
@Test
public void extractMultiTest() {
// 抽取多个分组然后把它们拼接起来
final String resultExtractMulti = ReUtil.extractMulti("(\\w)aa(\\w)", content, "$1-$2");
assertEquals("Z-a", resultExtractMulti);
}
|
@Override
public Batch toBatch() {
return new SparkBatch(
sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode());
}
|
@TestTemplate
public void testUnpartitionedHours() throws Exception {
createUnpartitionedTable(spark, tableName);
SparkScanBuilder builder = scanBuilder();
HoursFunction.TimestampToHoursFunction function = new HoursFunction.TimestampToHoursFunction();
UserDefinedScalarFunc udf = toUDF(function, expressions(fieldRef("ts")));
Predicate predicate =
new Predicate(
">=",
expressions(
udf, intLit(timestampStrToHourOrdinal("2017-11-22T06:02:09.243857+00:00"))));
pushFilters(builder, predicate);
Batch scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(10);
// NOT GTEQ
builder = scanBuilder();
predicate = new Not(predicate);
pushFilters(builder, predicate);
scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(10);
}
|
public SerializableFunction<Row, T> getFromRowFunction() {
return fromRowFunction;
}
|
@Test
public void testOneOfRowToProto() {
ProtoDynamicMessageSchema schemaProvider = schemaFromDescriptor(OneOf.getDescriptor());
SerializableFunction<Row, DynamicMessage> fromRow = schemaProvider.getFromRowFunction();
assertEquals(ONEOF_PROTO_INT32.toString(), fromRow.apply(ONEOF_ROW_INT32).toString());
assertEquals(ONEOF_PROTO_BOOL.toString(), fromRow.apply(ONEOF_ROW_BOOL).toString());
assertEquals(ONEOF_PROTO_STRING.toString(), fromRow.apply(ONEOF_ROW_STRING).toString());
assertEquals(ONEOF_PROTO_PRIMITIVE.toString(), fromRow.apply(ONEOF_ROW_PRIMITIVE).toString());
}
|
@Override
public void unregister(final Collection<TopicPartition> revokedChangelogs) {
// Only changelogs that are initialized have been added to the restore consumer's assignment
final List<TopicPartition> revokedInitializedChangelogs = new ArrayList<>();
for (final TopicPartition partition : revokedChangelogs) {
final ChangelogMetadata changelogMetadata = changelogs.remove(partition);
if (changelogMetadata != null) {
// if the changelog is still in REGISTERED, it means it has not initialized and started
// restoring yet, and hence we should not try to remove the changelog partition
if (!changelogMetadata.state().equals(ChangelogState.REGISTERED)) {
revokedInitializedChangelogs.add(partition);
// if the changelog is not in RESTORING, it means
// the corresponding onRestoreStart was not called; in this case
// we should not call onRestoreSuspended either
if (changelogMetadata.state().equals(ChangelogState.RESTORING)) {
final String storeName = changelogMetadata.storeMetadata.store().name();
if (changelogMetadata.stateManager.taskType() == Task.TaskType.ACTIVE) {
try {
stateRestoreListener.onRestoreSuspended(partition, storeName, changelogMetadata.totalRestored);
} catch (final Exception e) {
throw new StreamsException("State restore listener failed on restore paused", e);
}
} else if (changelogMetadata.stateManager.taskType() == TaskType.STANDBY) {
final StateStoreMetadata storeMetadata = changelogMetadata.stateManager.storeMetadata(partition);
// endOffset and storeOffset may be unknown at this point
final long storeOffset = storeMetadata.offset() != null ? storeMetadata.offset() : -1;
final long endOffset = storeMetadata.endOffset() != null ? storeMetadata.endOffset() : -1;
// Unregistering running standby tasks means the task has been promoted to active.
final StandbyUpdateListener.SuspendReason suspendReason =
changelogMetadata.stateManager.taskState() == Task.State.RUNNING
? StandbyUpdateListener.SuspendReason.PROMOTED
: StandbyUpdateListener.SuspendReason.MIGRATED;
try {
standbyUpdateListener.onUpdateSuspended(partition, storeName, storeOffset, endOffset, suspendReason);
} catch (final Exception e) {
throw new StreamsException("Standby updater listener failed on update suspended", e);
}
}
}
}
changelogMetadata.clear();
} else {
log.debug("Changelog partition {} could not be found," +
" it could be already cleaned up during the handling" +
" of task corruption and never restore again", partition);
}
}
removeChangelogsFromRestoreConsumer(revokedInitializedChangelogs);
if (changelogs.isEmpty()) {
state = ChangelogReaderState.ACTIVE_RESTORING;
}
}
|
@Test
public void shouldNotThrowOnUnknownRevokedPartition() {
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StoreChangelogReader.class)) {
appender.setClassLogger(StoreChangelogReader.class, Level.DEBUG);
changelogReader.unregister(Collections.singletonList(new TopicPartition("unknown", 0)));
assertThat(
appender.getMessages(),
hasItem("test-reader Changelog partition unknown-0 could not be found," +
" it could be already cleaned up during the handling of task corruption and never restore again")
);
}
}
|
@Override
public ServletRequest getRequest(javax.servlet.ServletRequest servletRequest) {
return new JettyRequest(servletRequest);
}
|
@Test
public void shouldGetJettyRequest() {
ServletRequest request = new JettyServletHelper().getRequest(mock(Request.class));
assertThat(request instanceof JettyRequest, is(true));
}
|
public static boolean isNotBlank(String str) {
return !isBlank(str);
}
|
@Test
public void testIsNotBlank() {
Assert.assertFalse(EagleEyeCoreUtils.isNotBlank(""));
Assert.assertFalse(EagleEyeCoreUtils.isNotBlank(" "));
Assert.assertFalse(EagleEyeCoreUtils.isNotBlank(null));
Assert.assertTrue(EagleEyeCoreUtils.isNotBlank("foo"));
}
|
public static int toInt(final String str, final int defaultValue) {
if (str == null) {
return defaultValue;
}
try {
return Integer.parseInt(str);
} catch (NumberFormatException nfe) {
return defaultValue;
}
}
|
@Test
public void testToInReturnDefaultValueWithFormatIsInvalid() {
Assertions.assertEquals(10, NumberUtils.toInt("nine", 10));
}
|
public static void main(String[] args) {
try {
FSConfigToCSConfigArgumentHandler fsConfigConversionArgumentHandler =
new FSConfigToCSConfigArgumentHandler();
int exitCode =
fsConfigConversionArgumentHandler.parseAndConvert(args);
if (exitCode != 0) {
LOG.error(FATAL,
"Error while starting FS configuration conversion, " +
"see previous error messages for details!");
}
exitFunction.accept(exitCode);
} catch (Throwable t) {
LOG.error(FATAL,
"Error while starting FS configuration conversion!", t);
exitFunction.accept(-1);
}
}
|
@Test
public void testConvertFSConfigurationWithConsoleParam()
throws Exception {
setupFSConfigConversionFiles();
FSConfigToCSConfigConverterMain.main(new String[] {
"-p",
"-e",
"-y", YARN_SITE_XML,
"-f", FS_ALLOC_FILE,
"-r", CONVERSION_RULES_FILE});
String stdout = converterTestCommons.getStdOutContent().toString();
assertTrue("Stdout doesn't contain yarn-site.xml",
stdout.contains("======= yarn-site.xml ======="));
assertTrue("Stdout doesn't contain capacity-scheduler.xml",
stdout.contains("======= capacity-scheduler.xml ======="));
assertTrue("Stdout doesn't contain mapping-rules.json",
stdout.contains("======= mapping-rules.json ======="));
assertEquals("Exit code", 0, exitFunc.exitCode);
}
|
public synchronized long nextId() {
long timestamp = genTime();
if (timestamp < this.lastTimestamp) {
if (this.lastTimestamp - timestamp < timeOffset) {
// 容忍指定的回拨,避免NTP校时造成的异常
timestamp = lastTimestamp;
} else {
// 如果服务器时间有问题(时钟后退) 报错。
throw new IllegalStateException(StrUtil.format("Clock moved backwards. Refusing to generate id for {}ms", lastTimestamp - timestamp));
}
}
if (timestamp == this.lastTimestamp) {
final long sequence = (this.sequence + 1) & SEQUENCE_MASK;
if (sequence == 0) {
timestamp = tilNextMillis(lastTimestamp);
}
this.sequence = sequence;
} else {
// issue#I51EJY
if (randomSequenceLimit > 1) {
sequence = RandomUtil.randomLong(randomSequenceLimit);
} else {
sequence = 0L;
}
}
lastTimestamp = timestamp;
return ((timestamp - twepoch) << TIMESTAMP_LEFT_SHIFT)
| (dataCenterId << DATA_CENTER_ID_SHIFT)
| (workerId << WORKER_ID_SHIFT)
| sequence;
}
|
@Test
public void snowflakeTest1(){
//构建Snowflake,提供终端ID和数据中心ID
final Snowflake idWorker = new Snowflake(0, 0);
final long nextId = idWorker.nextId();
assertTrue(nextId > 0);
}
|
@Override
public double getWeight(int source, int target) {
if (digraph) {
for (Edge edge : graph[source]) {
if (edge.v2 == target) {
return edge.weight;
}
}
} else {
for (Edge edge : graph[source]) {
if ((edge.v1 == source && edge.v2 == target) || (edge.v2 == source && edge.v1 == target)) {
return edge.weight;
}
}
}
return 0.0;
}
|
@Test
public void testGetWeight() {
System.out.println("getWeight");
assertEquals(0.0, g1.getWeight(1, 2), 1E-10);
assertEquals(0.0, g1.getWeight(1, 1), 1E-10);
assertEquals(1.0, g2.getWeight(1, 2), 1E-10);
assertEquals(1.0, g2.getWeight(2, 1), 1E-10);
assertEquals(1.0, g3.getWeight(1, 2), 1E-10);
assertEquals(1.0, g3.getWeight(2, 1), 1E-10);
assertEquals(1.0, g3.getWeight(3, 2), 1E-10);
assertEquals(1.0, g3.getWeight(2, 3), 1E-10);
assertEquals(1.0, g3.getWeight(1, 3), 1E-10);
assertEquals(1.0, g3.getWeight(3, 1), 1E-10);
assertEquals(0.0, g3.getWeight(4, 2), 1E-10);
assertEquals(0.0, g4.getWeight(1, 4), 1E-10);
g4.addEdge(1, 4);
assertEquals(1.0, g4.getWeight(1, 4), 1E-10);
assertEquals(0.0, g5.getWeight(1, 2), 1E-10);
assertEquals(0.0, g5.getWeight(1, 1), 1E-10);
assertEquals(1.0, g6.getWeight(1, 2), 1E-10);
assertEquals(1.0, g6.getWeight(2, 1), 1E-10);
assertEquals(1.0, g7.getWeight(1, 2), 1E-10);
assertEquals(1.0, g7.getWeight(2, 1), 1E-10);
assertEquals(1.0, g7.getWeight(3, 2), 1E-10);
assertEquals(1.0, g7.getWeight(2, 3), 1E-10);
assertEquals(1.0, g7.getWeight(1, 3), 1E-10);
assertEquals(1.0, g7.getWeight(3, 1), 1E-10);
assertEquals(0.0, g7.getWeight(4, 2), 1E-10);
assertEquals(1.0, g8.getWeight(1, 4), 1E-10);
}
|
public Object stringToKey(String s) {
char type = s.charAt(0);
switch (type) {
case 'S':
// this is a String, NOT a Short. For Short see case 'X'.
return s.substring(2);
case 'I':
// This is an Integer
return Integer.valueOf(s.substring(2));
case 'Y':
// This is a BYTE
return Byte.valueOf(s.substring(2));
case 'L':
// This is a Long
return Long.valueOf(s.substring(2));
case 'X':
// This is a SHORT
return Short.valueOf(s.substring(2));
case 'D':
// This is a Double
return Double.valueOf(s.substring(2));
case 'F':
// This is a Float
return Float.valueOf(s.substring(2));
case 'B':
// This is a Boolean, NOT a Byte. For Byte see case 'Y'.
return Boolean.valueOf(s.substring(2));
case 'C':
// This is a Character
return Character.valueOf(s.charAt(2));
case 'U':
// This is a java.util.UUID
return UUID.fromString(s.substring(2));
case 'A':
// This is an array of bytes encoded as a Base64 string
return Base64.getDecoder().decode(s.substring(2));
case 'T':
// this is a custom Transformable or a type with a registered Transformer
int indexOfSecondDelimiter = s.indexOf(':', 2);
String keyClassName = s.substring(2, indexOfSecondDelimiter);
String keyAsString = s.substring(indexOfSecondDelimiter + 1);
Transformer t = getTransformer(keyClassName);
if (t != null) {
return t.fromString(keyAsString);
} else {
throw CONTAINER.noTransformerForKey(keyClassName);
}
}
throw new CacheException("Unknown key type metadata: " + type);
}
|
@Test(expectedExceptions = CacheException.class)
public void testStringToKeyWithInvalidTransformer() {
keyTransformationHandler.stringToKey("T:org.infinispan.InexistentTransformer:key1");
}
|
public void transmit(final int msgTypeId, final DirectBuffer srcBuffer, final int srcIndex, final int length)
{
checkTypeId(msgTypeId);
checkMessageLength(length);
final AtomicBuffer buffer = this.buffer;
long currentTail = buffer.getLong(tailCounterIndex);
int recordOffset = (int)currentTail & (capacity - 1);
final int recordLength = HEADER_LENGTH + length;
final int recordLengthAligned = BitUtil.align(recordLength, RECORD_ALIGNMENT);
final long newTail = currentTail + recordLengthAligned;
final int toEndOfBuffer = capacity - recordOffset;
if (toEndOfBuffer < recordLengthAligned)
{
signalTailIntent(buffer, newTail + toEndOfBuffer);
insertPaddingRecord(buffer, recordOffset, toEndOfBuffer);
currentTail += toEndOfBuffer;
recordOffset = 0;
}
else
{
signalTailIntent(buffer, newTail);
}
buffer.putInt(lengthOffset(recordOffset), recordLength);
buffer.putInt(typeOffset(recordOffset), msgTypeId);
buffer.putBytes(msgOffset(recordOffset), srcBuffer, srcIndex, length);
buffer.putLongOrdered(latestCounterIndex, currentTail);
buffer.putLongOrdered(tailCounterIndex, currentTail + recordLengthAligned);
}
|
@Test
void shouldThrowExceptionWhenMessageTypeIdInvalid()
{
final int invalidMsgId = -1;
final UnsafeBuffer srcBuffer = new UnsafeBuffer(new byte[1024]);
assertThrows(IllegalArgumentException.class, () ->
broadcastTransmitter.transmit(invalidMsgId, srcBuffer, 0, 32));
}
|
@Override
public Collection<PluginInfo> getPluginInfos() {
checkState(started.get(), NOT_STARTED_YET);
return Set.copyOf(pluginInfosByKeys.values());
}
|
@Test
public void getPluginInfos_throws_ISE_if_repo_is_not_started() {
assertThatThrownBy(() -> underTest.getPluginInfos())
.isInstanceOf(IllegalStateException.class)
.hasMessage("not started yet");
}
|
public void contains(@Nullable Object rowKey, @Nullable Object columnKey) {
if (!checkNotNull(actual).contains(rowKey, columnKey)) {
/*
* TODO(cpovirk): Consider including information about whether any cell with the given row
* *or* column was present.
*/
failWithActual(
simpleFact("expected to contain mapping for row-column key pair"),
fact("row key", rowKey),
fact("column key", columnKey));
}
}
|
@Test
public void contains() {
ImmutableTable<String, String, String> table = ImmutableTable.of("row", "col", "val");
assertThat(table).contains("row", "col");
}
|
@Override
public int chown(String path, long uid, long gid) {
return AlluxioFuseUtils.call(LOG, () -> chownInternal(path, uid, gid),
FuseConstants.FUSE_CHOWN, "path=%s,uid=%d,gid=%d", path, uid, gid);
}
|
@Test
@DoraTestTodoItem(action = DoraTestTodoItem.Action.FIX, owner = "LuQQiu",
comment = "waiting on security metadata to be implemented in Dora")
@Ignore
public void chown() throws Exception {
Optional<Long> uid = AlluxioFuseUtils.getUid(System.getProperty("user.name"));
// avoid using the launch user
if (uid.isPresent() && uid.get().equals(AlluxioFuseUtils.getSystemUid())) {
uid = Optional.of(uid.get() + 1);
}
assertTrue(uid.isPresent());
Optional<String> userName = AlluxioFuseUtils.getUserName(uid.get());
if (!userName.isPresent()) {
// skip this case for such an environment
return;
}
Optional<Long> gid = AlluxioFuseUtils.getGidFromUserName(userName.get());
assertTrue(gid.isPresent());
URIStatus status = mock(URIStatus.class);
when(status.getOwner()).thenReturn("user");
when(status.getGroup()).thenReturn("group");
when(mFileSystem.getStatus(any(AlluxioURI.class))).thenReturn(status);
mFuseFs.chown("/foo/bar", uid.get(), gid.get());
Optional<String> groupName = AlluxioFuseUtils.getGroupName(gid.get());
assertTrue(groupName.isPresent());
AlluxioURI expectedPath = BASE_EXPECTED_URI.join("/foo/bar");
SetAttributePOptions options =
SetAttributePOptions.newBuilder().setGroup(groupName.get())
.setOwner(userName.get()).build();
verify(mFileSystem).setAttribute(expectedPath, options);
}
|
public static Slime jsonToSlimeOrThrow(String json) {
return jsonToSlimeOrThrow(json.getBytes(StandardCharsets.UTF_8));
}
|
@Test
public void test_json_to_slime_or_throw() {
Slime slime = SlimeUtils.jsonToSlimeOrThrow("{\"foo\":\"foobie\",\"bar\":{}}");
assertEquals("foobie", slime.get().field("foo").asString());
assertTrue(slime.get().field("bar").valid());
}
|
@Udf
public String concatWS(
@UdfParameter(description = "Separator string and values to join") final String... inputs) {
if (inputs == null || inputs.length < 2) {
throw new KsqlFunctionException("Function Concat_WS expects at least two input arguments.");
}
final String separator = inputs[0];
if (separator == null) {
return null;
}
return Arrays.stream(inputs, 1,
inputs.length)
.filter(Objects::nonNull)
.collect(Collectors.joining(separator));
}
|
@Test
public void shouldFailIfOnlySeparatorStringInput() {
// When:
final KsqlException e = assertThrows(KsqlFunctionException.class, () -> udf.concatWS("SEP"));
// Then:
assertThat(e.getMessage(), containsString("expects at least two input arguments"));
}
|
@ApiOperation(value = "List engine properties", tags = { "Engine" })
@ApiResponses(value = {
@ApiResponse(code = 200, message = "Indicates the properties are returned."),
})
@GetMapping(value = "/management/properties", produces = "application/json")
public Map<String, String> getProperties() {
if (restApiInterceptor != null) {
restApiInterceptor.accessManagementInfo();
}
return managementService.getProperties();
}
|
@Test
public void testGetProperties() throws Exception {
CloseableHttpResponse response = executeRequest(new HttpGet(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_PROPERTIES_COLLECTION)),
HttpStatus.SC_OK);
Map<String, String> properties = managementService.getProperties();
JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent());
closeResponse(response);
assertThat(responseNode).hasSize(properties.size());
Iterator<Map.Entry<String, JsonNode>> nodes = responseNode.fields();
Map.Entry<String, JsonNode> node = null;
while (nodes.hasNext()) {
node = nodes.next();
String propValue = properties.get(node.getKey());
assertThat(propValue).isNotNull();
assertThat(node.getValue().textValue()).isEqualTo(propValue);
}
}
|
@ApiOperation(value = "Assign dashboard to edge (assignDashboardToEdge)",
notes = "Creates assignment of an existing dashboard to an instance of The Edge. " +
EDGE_ASSIGN_ASYNC_FIRST_STEP_DESCRIPTION +
"Second, remote edge service will receive a copy of assignment dashboard " +
EDGE_ASSIGN_RECEIVE_STEP_DESCRIPTION +
"Third, once dashboard will be delivered to edge service, it's going to be available for usage on remote edge instance." +
TENANT_AUTHORITY_PARAGRAPH)
@PreAuthorize("hasAuthority('TENANT_ADMIN')")
@RequestMapping(value = "/edge/{edgeId}/dashboard/{dashboardId}", method = RequestMethod.POST)
@ResponseBody
public Dashboard assignDashboardToEdge(@PathVariable("edgeId") String strEdgeId,
@PathVariable(DASHBOARD_ID) String strDashboardId) throws ThingsboardException {
checkParameter("edgeId", strEdgeId);
checkParameter(DASHBOARD_ID, strDashboardId);
EdgeId edgeId = new EdgeId(toUUID(strEdgeId));
Edge edge = checkEdgeId(edgeId, Operation.READ);
DashboardId dashboardId = new DashboardId(toUUID(strDashboardId));
checkDashboardId(dashboardId, Operation.READ);
return tbDashboardService.asignDashboardToEdge(getTenantId(), dashboardId, edge, getCurrentUser());
}
|
@Test
public void testAssignDashboardToEdge() throws Exception {
Edge edge = constructEdge("My edge", "default");
Edge savedEdge = doPost("/api/edge", edge, Edge.class);
Dashboard dashboard = new Dashboard();
dashboard.setTitle("My dashboard");
Dashboard savedDashboard = doPost("/api/dashboard", dashboard, Dashboard.class);
Mockito.reset(tbClusterService, auditLogService);
doPost("/api/edge/" + savedEdge.getId().getId().toString()
+ "/dashboard/" + savedDashboard.getId().getId().toString(), Dashboard.class);
testNotifyEntityAllOneTime(savedDashboard, savedDashboard.getId(), savedDashboard.getId(), savedTenant.getId(),
tenantAdmin.getCustomerId(), tenantAdmin.getId(), tenantAdmin.getEmail(), ActionType.ASSIGNED_TO_EDGE,
savedDashboard.getId().getId().toString(), savedEdge.getId().getId().toString(), savedEdge.getName());
PageData<Dashboard> pageData = doGetTypedWithPageLink("/api/edge/" + savedEdge.getId().getId().toString() + "/dashboards?",
new TypeReference<PageData<Dashboard>>() {
}, new PageLink(100));
Assert.assertEquals(1, pageData.getData().size());
doDelete("/api/edge/" + savedEdge.getId().getId().toString()
+ "/dashboard/" + savedDashboard.getId().getId().toString(), Dashboard.class);
pageData = doGetTypedWithPageLink("/api/edge/" + savedEdge.getId().getId().toString() + "/dashboards?",
new TypeReference<PageData<Dashboard>>() {
}, new PageLink(100));
Assert.assertEquals(0, pageData.getData().size());
}
|
@Override
public <VO, VR> KStream<K, VR> join(final KStream<K, VO> otherStream,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final JoinWindows windows) {
return join(otherStream, toValueJoinerWithKey(joiner), windows);
}
|
@Test
public void shouldNotAllowNullValueJoinerOnJoinWithGlobalTableWithNamed() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.join(
testGlobalTable,
MockMapper.selectValueMapper(),
(ValueJoiner<? super String, ? super String, ?>) null,
Named.as("name")));
assertThat(exception.getMessage(), equalTo("joiner can't be null"));
}
|
public synchronized @Nullable WorkItemServiceState reportSuccess() throws IOException {
checkState(!finalStateSent, "cannot reportSuccess after sending a final state");
checkState(worker != null, "setWorker should be called before reportSuccess");
if (wasAskedToAbort) {
LOG.info("Service already asked to abort work item, not reporting ignored progress.");
return null;
}
WorkItemStatus status = createStatusUpdate(true);
if (worker instanceof SourceOperationExecutor) {
// TODO: Find out a generic way for the DataflowWorkExecutor to report work-specific results
// into the work update.
SourceOperationResponse response = ((SourceOperationExecutor) worker).getResponse();
if (response != null) {
status.setSourceOperationResponse(response);
}
}
LOG.info("Success processing work item {}", uniqueWorkId());
return execute(status);
}
|
@Test
public void reportSuccessBeforeSetWorker() throws IOException {
thrown.expect(IllegalStateException.class);
thrown.expectMessage("setWorker");
thrown.expectMessage("reportSuccess");
statusClient.reportSuccess();
}
|
@Override
public ScannerReport.Metadata readMetadata() {
ensureInitialized();
if (this.metadata == null) {
this.metadata = delegate.readMetadata();
}
return this.metadata;
}
|
@Test
public void readMetadata_throws_ISE_if_no_metadata() {
assertThatThrownBy(() -> underTest.readMetadata())
.isInstanceOf(IllegalStateException.class);
}
|
public Column getColumn(String value) {
Matcher m = PATTERN.matcher(value);
if (!m.matches()) {
throw new IllegalArgumentException("value " + value + " is not a valid column definition");
}
String name = m.group(1);
String type = m.group(6);
type = type == null ? "String" : type;
boolean array = (m.group(4) != null) || (m.group(7) != null);
if (array) {
return new ArrayColumn(name,
createColumn(name,
type));
}
return createColumn(name,
type);
}
|
@Test
public void testGetDollarColumn() {
ColumnFactory f = new ColumnFactory();
Column column = f.getColumn("$column");
assertThat(column instanceof StringColumn).isTrue();
assertThat(column.getName()).isEqualTo("$column");
assertThat(column.getCellType()).isEqualTo("StringCell");
}
|
@Override
public ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map<String, ListConsumerGroupOffsetsSpec> groupSpecs,
ListConsumerGroupOffsetsOptions options) {
SimpleAdminApiFuture<CoordinatorKey, Map<TopicPartition, OffsetAndMetadata>> future =
ListConsumerGroupOffsetsHandler.newFuture(groupSpecs.keySet());
ListConsumerGroupOffsetsHandler handler =
new ListConsumerGroupOffsetsHandler(groupSpecs, options.requireStable(), logContext);
invokeDriver(handler, future, options.timeoutMs);
return new ListConsumerGroupOffsetsResult(future.all());
}
|
@Test
public void testBatchedListConsumerGroupOffsetsWithNoFindCoordinatorBatching() throws Exception {
Cluster cluster = mockCluster(1, 0);
Time time = new MockTime();
Map<String, ListConsumerGroupOffsetsSpec> groupSpecs = batchedListConsumerGroupOffsetsSpec();
ApiVersion findCoordinatorV3 = new ApiVersion()
.setApiKey(ApiKeys.FIND_COORDINATOR.id)
.setMinVersion((short) 0)
.setMaxVersion((short) 3);
ApiVersion offsetFetchV7 = new ApiVersion()
.setApiKey(ApiKeys.OFFSET_FETCH.id)
.setMinVersion((short) 0)
.setMaxVersion((short) 7);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "0")) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(asList(findCoordinatorV3, offsetFetchV7)));
env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode()));
env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
ListConsumerGroupOffsetsResult result = env.adminClient().listConsumerGroupOffsets(groupSpecs);
// Fail the first request in order to ensure that the group is not batched when retried.
sendOffsetFetchResponse(env.kafkaClient(), groupSpecs, false, Errors.COORDINATOR_LOAD_IN_PROGRESS);
sendOffsetFetchResponse(env.kafkaClient(), groupSpecs, false, Errors.NONE);
sendOffsetFetchResponse(env.kafkaClient(), groupSpecs, false, Errors.NONE);
verifyListOffsetsForMultipleGroups(groupSpecs, result);
}
}
|
boolean isCausedBySecurity(Throwable e) {
if (e == null) {
return false;
}
return (e instanceof GeneralSecurityException) || isCausedBySecurity(e.getCause());
}
|
@Test
void shouldReturnFalseIfNotCausedBySecurity() {
Exception exception = new Exception(new IOException());
assertThat(agentController.isCausedBySecurity(exception)).isFalse();
}
|
@Override
public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay way, IntsRef relationFlags) {
carMaxSpeedEnc.setDecimal(false, edgeId, edgeIntAccess, getMaxSpeed(way, false));
carMaxSpeedEnc.setDecimal(true, edgeId, edgeIntAccess, getMaxSpeed(way, true));
}
|
@Test
void countryRule() {
DecimalEncodedValue maxSpeedEnc = MaxSpeed.create();
maxSpeedEnc.init(new EncodedValue.InitializerConfig());
OSMMaxSpeedParser parser = new OSMMaxSpeedParser(maxSpeedEnc);
IntsRef relFlags = new IntsRef(2);
ReaderWay way = new ReaderWay(29L);
way.setTag("highway", "primary");
EdgeIntAccess edgeIntAccess = new ArrayEdgeIntAccess(1);
int edgeId = 0;
way.setTag("maxspeed", "30");
parser.handleWayTags(edgeId, edgeIntAccess, way, relFlags);
assertEquals(30, maxSpeedEnc.getDecimal(false, edgeId, edgeIntAccess), .1);
// different direction
edgeIntAccess = new ArrayEdgeIntAccess(1);
way = new ReaderWay(29L);
way.setTag("highway", "primary");
way.setTag("maxspeed:forward", "30");
way.setTag("maxspeed:backward", "40");
parser.handleWayTags(edgeId, edgeIntAccess, way, relFlags);
assertEquals(30, maxSpeedEnc.getDecimal(false, edgeId, edgeIntAccess), .1);
assertEquals(40, maxSpeedEnc.getDecimal(true, edgeId, edgeIntAccess), .1);
}
|
public DdlCommandResult execute(
final String sql,
final DdlCommand ddlCommand,
final boolean withQuery,
final Set<SourceName> withQuerySources
) {
return execute(sql, ddlCommand, withQuery, withQuerySources, false);
}
|
@Test
public void shouldWarnAddDuplicateTableWithoutReplace() {
// Given:
givenCreateTable();
cmdExec.execute(SQL_TEXT, createTable, false, NO_QUERY_SOURCES);
// When:
givenCreateTable();
final DdlCommandResult result =cmdExec.execute(SQL_TEXT, createTable,
false, NO_QUERY_SOURCES);
// Then:
assertThat("Expected successful execution", result.isSuccess());
assertThat(result.getMessage(), containsString("A table with the same name already exists"));
}
|
public boolean createTable(CreateTableStmt stmt, List<Column> partitionColumns) throws DdlException {
String dbName = stmt.getDbName();
String tableName = stmt.getTableName();
Map<String, String> properties = stmt.getProperties() != null ? stmt.getProperties() : new HashMap<>();
Path tablePath = null;
boolean tableLocationExists = false;
if (!stmt.isExternal()) {
checkLocationProperties(properties);
if (!Strings.isNullOrEmpty(properties.get(LOCATION_PROPERTY))) {
String tableLocationWithUserAssign = properties.get(LOCATION_PROPERTY);
tablePath = new Path(tableLocationWithUserAssign);
if (pathExists(tablePath, hadoopConf)) {
tableLocationExists = true;
if (!isEmpty(tablePath, hadoopConf)) {
throw new StarRocksConnectorException("not support creating table under non-empty directory: %s",
tableLocationWithUserAssign);
}
}
} else {
tablePath = getDefaultLocation(dbName, tableName);
}
} else {
// checkExternalLocationProperties(properties);
if (properties.containsKey(EXTERNAL_LOCATION_PROPERTY)) {
tablePath = new Path(properties.get(EXTERNAL_LOCATION_PROPERTY));
} else if (properties.containsKey(LOCATION_PROPERTY)) {
tablePath = new Path(properties.get(LOCATION_PROPERTY));
}
tableLocationExists = true;
}
HiveStorageFormat.check(properties);
List<String> partitionColNames;
if (partitionColumns.isEmpty()) {
partitionColNames = stmt.getPartitionDesc() != null ?
((ListPartitionDesc) stmt.getPartitionDesc()).getPartitionColNames() : new ArrayList<>();
} else {
partitionColNames = partitionColumns.stream().map(Column::getName).collect(Collectors.toList());
}
// default is managed table
HiveTable.HiveTableType tableType = HiveTable.HiveTableType.MANAGED_TABLE;
if (stmt.isExternal()) {
tableType = HiveTable.HiveTableType.EXTERNAL_TABLE;
}
HiveTable.Builder builder = HiveTable.builder()
.setId(ConnectorTableId.CONNECTOR_ID_GENERATOR.getNextId().asInt())
.setTableName(tableName)
.setCatalogName(catalogName)
.setResourceName(toResourceName(catalogName, "hive"))
.setHiveDbName(dbName)
.setHiveTableName(tableName)
.setPartitionColumnNames(partitionColNames)
.setDataColumnNames(stmt.getColumns().stream()
.map(Column::getName)
.collect(Collectors.toList()).subList(0, stmt.getColumns().size() - partitionColNames.size()))
.setFullSchema(stmt.getColumns())
.setTableLocation(tablePath == null ? null : tablePath.toString())
.setProperties(stmt.getProperties())
.setStorageFormat(HiveStorageFormat.get(properties.getOrDefault(FILE_FORMAT, "parquet")))
.setCreateTime(System.currentTimeMillis())
.setHiveTableType(tableType);
Table table = builder.build();
try {
if (!tableLocationExists) {
createDirectory(tablePath, hadoopConf);
}
metastore.createTable(dbName, table);
} catch (Exception e) {
LOG.error("Failed to create table {}.{}", dbName, tableName);
boolean shouldDelete;
try {
if (tableExists(dbName, tableName)) {
LOG.warn("Table {}.{} already exists. But some error occur such as accessing meta service timeout",
dbName, table, e);
return true;
}
FileSystem fileSystem = FileSystem.get(URI.create(tablePath.toString()), hadoopConf);
shouldDelete = !fileSystem.listLocatedStatus(tablePath).hasNext() && !tableLocationExists;
if (shouldDelete) {
fileSystem.delete(tablePath);
}
} catch (Exception e1) {
LOG.error("Failed to delete table location {}", tablePath, e);
}
throw new DdlException(String.format("Failed to create table %s.%s. msg: %s", dbName, tableName, e.getMessage()));
}
return true;
}
|
@Test
public void testCreateTableWithLocation() throws DdlException {
new MockUp<HiveWriteUtils>() {
@Mock
public void createDirectory(Path path, Configuration conf) {
}
@Mock
public boolean pathExists(Path path, Configuration conf) {
return true;
}
@Mock
public boolean isEmpty(Path path, Configuration conf) {
return true;
}
};
HiveMetastoreOperations mockedHmsOps = new HiveMetastoreOperations(cachingHiveMetastore, true,
new Configuration(), MetastoreType.HMS, "hive_catalog") {
@Override
public Path getDefaultLocation(String dbName, String tableName) {
return new Path("mytable_locatino");
}
};
Map<String, String> properties = Maps.newHashMap();
properties.put("location", "hdfs://path_to_file/file_name");
CreateTableStmt stmt = new CreateTableStmt(
false,
false,
new TableName("hive_catalog", "hive_db", "hive_table"),
Lists.newArrayList(
new ColumnDef("c1", TypeDef.create(PrimitiveType.INT)),
new ColumnDef("p1", TypeDef.create(PrimitiveType.INT))),
"hive",
null,
new ListPartitionDesc(Lists.newArrayList("p1"), new ArrayList<>()),
null,
properties,
new HashMap<>(),
"my table comment");
List<Column> columns = stmt.getColumnDefs().stream().map(def -> def.toColumn(null)).collect(Collectors.toList());
stmt.setColumns(columns);
Assert.assertTrue(mockedHmsOps.createTable(stmt));
}
|
@Override
public void importData(JsonReader reader) throws IOException {
logger.info("Reading configuration for 1.0");
// this *HAS* to start as an object
reader.beginObject();
while (reader.hasNext()) {
JsonToken tok = reader.peek();
switch (tok) {
case NAME:
String name = reader.nextName();
// find out which member it is
if (name.equals(CLIENTS)) {
readClients(reader);
} else if (name.equals(GRANTS)) {
readGrants(reader);
} else if (name.equals(WHITELISTEDSITES)) {
readWhitelistedSites(reader);
} else if (name.equals(BLACKLISTEDSITES)) {
readBlacklistedSites(reader);
} else if (name.equals(AUTHENTICATIONHOLDERS)) {
readAuthenticationHolders(reader);
} else if (name.equals(ACCESSTOKENS)) {
readAccessTokens(reader);
} else if (name.equals(REFRESHTOKENS)) {
readRefreshTokens(reader);
} else if (name.equals(SYSTEMSCOPES)) {
readSystemScopes(reader);
} else {
for (MITREidDataServiceExtension extension : extensions) {
if (extension.supportsVersion(THIS_VERSION)) {
if (extension.supportsVersion(THIS_VERSION)) {
extension.importExtensionData(name, reader);
break;
}
}
}
// unknown token, skip it
reader.skipValue();
}
break;
case END_OBJECT:
// the object ended, we're done here
reader.endObject();
continue;
default:
logger.debug("Found unexpected entry");
reader.skipValue();
continue; }
}
fixObjectReferences();
for (MITREidDataServiceExtension extension : extensions) {
if (extension.supportsVersion(THIS_VERSION)) {
extension.fixExtensionObjectReferences(maps);
break;
}
}
maps.clearAll();
}
|
@Test
public void testImportBlacklistedSites() throws IOException {
BlacklistedSite site1 = new BlacklistedSite();
site1.setId(1L);
site1.setUri("http://foo.com");
BlacklistedSite site2 = new BlacklistedSite();
site2.setId(2L);
site2.setUri("http://bar.com");
BlacklistedSite site3 = new BlacklistedSite();
site3.setId(3L);
site3.setUri("http://baz.com");
String configJson = "{" +
"\"" + MITREidDataService.CLIENTS + "\": [], " +
"\"" + MITREidDataService.ACCESSTOKENS + "\": [], " +
"\"" + MITREidDataService.REFRESHTOKENS + "\": [], " +
"\"" + MITREidDataService.GRANTS + "\": [], " +
"\"" + MITREidDataService.WHITELISTEDSITES + "\": [], " +
"\"" + MITREidDataService.SYSTEMSCOPES + "\": [], " +
"\"" + MITREidDataService.AUTHENTICATIONHOLDERS + "\": [], " +
"\"" + MITREidDataService.BLACKLISTEDSITES + "\": [" +
"{\"id\":1,\"uri\":\"http://foo.com\"}," +
"{\"id\":2,\"uri\":\"http://bar.com\"}," +
"{\"id\":3,\"uri\":\"http://baz.com\"}" +
" ]" +
"}";
System.err.println(configJson);
JsonReader reader = new JsonReader(new StringReader(configJson));
dataService.importData(reader);
verify(blSiteRepository, times(3)).save(capturedBlacklistedSites.capture());
List<BlacklistedSite> savedSites = capturedBlacklistedSites.getAllValues();
assertThat(savedSites.size(), is(3));
assertThat(savedSites.get(0).getUri(), equalTo(site1.getUri()));
assertThat(savedSites.get(1).getUri(), equalTo(site2.getUri()));
assertThat(savedSites.get(2).getUri(), equalTo(site3.getUri()));
}
|
@Override
public void resolveArtifacts(
ArtifactApi.ResolveArtifactsRequest request,
StreamObserver<ArtifactApi.ResolveArtifactsResponse> responseObserver) {
// Trying out artifact services in order till one succeeds.
// If all services fail, re-raises the last error.
// TODO: when all services fail, return an aggregated error with errors from all services.
RuntimeException lastError = null;
for (Endpoints.ApiServiceDescriptor endpoint : endpoints) {
ArtifactResolver artifactResolver =
this.artifactResolver != null
? this.artifactResolver
: new EndpointBasedArtifactResolver(endpoint.getUrl());
try {
responseObserver.onNext(artifactResolver.resolveArtifacts(request));
responseObserver.onCompleted();
return;
} catch (RuntimeException exn) {
lastError = exn;
} finally {
if (this.artifactResolver == null) {
artifactResolver.shutdown();
}
}
}
if (lastError == null) {
lastError =
new RuntimeException(
"Could not successfully resolve the artifact for the request " + request);
}
throw lastError;
}
|
@Test
public void testArtifactResolveFirstEndpoint() {
Path path = Paths.get("dummypath");
RunnerApi.ArtifactInformation fileArtifact =
RunnerApi.ArtifactInformation.newBuilder()
.setTypeUrn(ArtifactRetrievalService.FILE_ARTIFACT_URN)
.setTypePayload(
RunnerApi.ArtifactFilePayload.newBuilder()
.setPath(path.toString())
.build()
.toByteString())
.setRoleUrn("")
.build();
ArtifactApi.ResolveArtifactsRequest request =
ArtifactApi.ResolveArtifactsRequest.newBuilder().addArtifacts(fileArtifact).build();
Mockito.when(artifactResolver.resolveArtifacts(request))
.thenReturn(ArtifactApi.ResolveArtifactsResponse.newBuilder().build());
StreamObserver<ResolveArtifactsResponse> responseObserver = Mockito.mock(StreamObserver.class);
artifactService.resolveArtifacts(request, responseObserver);
Mockito.verify(artifactResolver, Mockito.times(1)).resolveArtifacts(request);
}
|
public CapacityScheduler() {
super(CapacityScheduler.class.getName());
this.maxRunningEnforcer = new CSMaxRunningAppsEnforcer(this);
}
|
@Test
public void testCapacityScheduler() throws Exception {
LOG.info("--- START: testCapacityScheduler ---");
NodeStatus mockNodeStatus = createMockNodeStatus();
// Register node1
String host_0 = "host_0";
NodeManager nm_0 =
registerNode(resourceManager, host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK,
Resources.createResource(4 * GB, 1), mockNodeStatus);
// Register node2
String host_1 = "host_1";
NodeManager nm_1 =
registerNode(resourceManager, host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK,
Resources.createResource(2 * GB, 1), mockNodeStatus);
// ResourceRequest priorities
Priority priority_0 = Priority.newInstance(0);
Priority priority_1 = Priority.newInstance(1);
// Submit an application
Application application_0 = new Application("user_0", "a1", resourceManager);
application_0.submit();
application_0.addNodeManager(host_0, 1234, nm_0);
application_0.addNodeManager(host_1, 1234, nm_1);
Resource capability_0_0 = Resources.createResource(1 * GB, 1);
application_0.addResourceRequestSpec(priority_1, capability_0_0);
Resource capability_0_1 = Resources.createResource(2 * GB, 1);
application_0.addResourceRequestSpec(priority_0, capability_0_1);
Task task_0_0 = new Task(application_0, priority_1,
new String[] {host_0, host_1});
application_0.addTask(task_0_0);
// Submit another application
Application application_1 = new Application("user_1", "b2", resourceManager);
application_1.submit();
application_1.addNodeManager(host_0, 1234, nm_0);
application_1.addNodeManager(host_1, 1234, nm_1);
Resource capability_1_0 = Resources.createResource(3 * GB, 1);
application_1.addResourceRequestSpec(priority_1, capability_1_0);
Resource capability_1_1 = Resources.createResource(2 * GB, 1);
application_1.addResourceRequestSpec(priority_0, capability_1_1);
Task task_1_0 = new Task(application_1, priority_1,
new String[] {host_0, host_1});
application_1.addTask(task_1_0);
// Send resource requests to the scheduler
application_0.schedule();
application_1.schedule();
// Send a heartbeat to kick the tires on the Scheduler
LOG.info("Kick!");
// task_0_0 and task_1_0 allocated, used=4G
nodeUpdate(resourceManager, nm_0);
// nothing allocated
nodeUpdate(resourceManager, nm_1);
// Get allocations from the scheduler
application_0.schedule(); // task_0_0
checkApplicationResourceUsage(1 * GB, application_0);
application_1.schedule(); // task_1_0
checkApplicationResourceUsage(3 * GB, application_1);
checkNodeResourceUsage(4*GB, nm_0); // task_0_0 (1G) and task_1_0 (3G)
checkNodeResourceUsage(0*GB, nm_1); // no tasks, 2G available
LOG.info("Adding new tasks...");
Task task_1_1 = new Task(application_1, priority_0,
new String[] {ResourceRequest.ANY});
application_1.addTask(task_1_1);
application_1.schedule();
Task task_0_1 = new Task(application_0, priority_0,
new String[] {host_0, host_1});
application_0.addTask(task_0_1);
application_0.schedule();
// Send a heartbeat to kick the tires on the Scheduler
LOG.info("Sending hb from " + nm_0.getHostName());
// nothing new, used=4G
nodeUpdate(resourceManager, nm_0);
LOG.info("Sending hb from " + nm_1.getHostName());
// task_0_1 is prefer as locality, used=2G
nodeUpdate(resourceManager, nm_1);
// Get allocations from the scheduler
LOG.info("Trying to allocate...");
application_0.schedule();
checkApplicationResourceUsage(1 * GB, application_0);
application_1.schedule();
checkApplicationResourceUsage(5 * GB, application_1);
nodeUpdate(resourceManager, nm_0);
nodeUpdate(resourceManager, nm_1);
checkNodeResourceUsage(4*GB, nm_0);
checkNodeResourceUsage(2*GB, nm_1);
LOG.info("--- END: testCapacityScheduler ---");
}
|
@Override
public boolean isInputConsumable(
SchedulingExecutionVertex executionVertex,
Set<ExecutionVertexID> verticesToDeploy,
Map<ConsumedPartitionGroup, Boolean> consumableStatusCache) {
for (ConsumedPartitionGroup consumedPartitionGroup :
executionVertex.getConsumedPartitionGroups()) {
if (!consumableStatusCache.computeIfAbsent(
consumedPartitionGroup, this::isConsumableBasedOnFinishedProducers)) {
return false;
}
}
return true;
}
|
@Test
void testNotFinishedBlockingInput() {
final TestingSchedulingTopology topology = new TestingSchedulingTopology();
final List<TestingSchedulingExecutionVertex> producers =
topology.addExecutionVertices().withParallelism(2).finish();
final List<TestingSchedulingExecutionVertex> consumer =
topology.addExecutionVertices().withParallelism(2).finish();
topology.connectAllToAll(producers, consumer)
.withResultPartitionState(ResultPartitionState.CREATED)
.withResultPartitionType(ResultPartitionType.BLOCKING)
.finish();
AllFinishedInputConsumableDecider inputConsumableDecider =
createAllFinishedInputConsumableDecider();
assertThat(
inputConsumableDecider.isInputConsumable(
consumer.get(0), Collections.emptySet(), new HashMap<>()))
.isFalse();
assertThat(
inputConsumableDecider.isInputConsumable(
consumer.get(1), Collections.emptySet(), new HashMap<>()))
.isFalse();
}
|
public static String parentOf(String path) throws PathNotFoundException {
List<String> elements = split(path);
int size = elements.size();
if (size == 0) {
throw new PathNotFoundException("No parent of " + path);
}
if (size == 1) {
return "/";
}
elements.remove(size - 1);
StringBuilder parent = new StringBuilder(path.length());
for (String element : elements) {
parent.append("/");
parent.append(element);
}
return parent.toString();
}
|
@Test
public void testParentOf() throws Throwable {
assertEquals("/", parentOf("/a"));
assertEquals("/", parentOf("/a/"));
assertEquals("/a", parentOf("/a/b"));
assertEquals("/a/b", parentOf("/a/b/c"));
}
|
@Override
public @Nullable State waitUntilFinish() {
return waitUntilFinish(Duration.millis(-1));
}
|
@Test
public void testCumulativeTimeOverflow() throws Exception {
Dataflow.Projects.Locations.Jobs.Get statusRequest =
mock(Dataflow.Projects.Locations.Jobs.Get.class);
Job statusResponse = new Job();
statusResponse.setCurrentState("JOB_STATE_RUNNING");
when(mockJobs.get(eq(PROJECT_ID), eq(REGION_ID), eq(JOB_ID))).thenReturn(statusRequest);
when(statusRequest.execute()).thenReturn(statusResponse);
FastNanoClockAndFuzzySleeper clock = new FastNanoClockAndFuzzySleeper();
DataflowPipelineJob job =
new DataflowPipelineJob(DataflowClient.create(options), JOB_ID, options, ImmutableMap.of());
long startTime = clock.nanoTime();
State state = job.waitUntilFinish(Duration.millis(4), null, clock, clock);
assertEquals(null, state);
long timeDiff = TimeUnit.NANOSECONDS.toMillis(clock.nanoTime() - startTime);
// Should only have slept for the 4 ms allowed.
assertThat(timeDiff, lessThanOrEqualTo(4L));
}
|
@PutMapping()
@TpsControl(pointName = "NamingServiceUpdate", name = "HttpNamingServiceUpdate")
@Secured(action = ActionTypes.WRITE)
public Result<String> update(ServiceForm serviceForm) throws Exception {
serviceForm.validate();
Map<String, String> metadata = UtilsAndCommons.parseMetadata(serviceForm.getMetadata());
ServiceMetadata serviceMetadata = new ServiceMetadata();
serviceMetadata.setProtectThreshold(serviceForm.getProtectThreshold());
serviceMetadata.setExtendData(metadata);
serviceMetadata.setSelector(parseSelector(serviceForm.getSelector()));
Service service = Service.newService(serviceForm.getNamespaceId(), serviceForm.getGroupName(),
serviceForm.getServiceName());
serviceOperatorV2.update(service, serviceMetadata);
NotifyCenter.publishEvent(new UpdateServiceTraceEvent(System.currentTimeMillis(), serviceForm.getNamespaceId(),
serviceForm.getGroupName(), serviceForm.getServiceName(), metadata));
return Result.success("ok");
}
|
@Test
void testUpdate() throws Exception {
ServiceForm serviceForm = new ServiceForm();
serviceForm.setNamespaceId(Constants.DEFAULT_NAMESPACE_ID);
serviceForm.setGroupName(Constants.DEFAULT_GROUP);
serviceForm.setServiceName("service");
serviceForm.setProtectThreshold(0.0f);
serviceForm.setMetadata("");
serviceForm.setSelector("");
Result<String> actual = serviceController.update(serviceForm);
verify(serviceOperatorV2).update(eq(Service.newService(Constants.DEFAULT_NAMESPACE_ID, Constants.DEFAULT_GROUP, "service")),
any(ServiceMetadata.class));
assertEquals(ErrorCode.SUCCESS.getCode(), actual.getCode());
assertEquals("ok", actual.getData());
TimeUnit.SECONDS.sleep(1);
assertEquals(UpdateServiceTraceEvent.class, eventReceivedClass);
}
|
@Override
public int publish(TopicPath topic, List<OutgoingMessage> outgoingMessages) throws IOException {
List<PubsubMessage> pubsubMessages = new ArrayList<>(outgoingMessages.size());
for (OutgoingMessage outgoingMessage : outgoingMessages) {
PubsubMessage pubsubMessage =
new PubsubMessage().encodeData(outgoingMessage.getMessage().getData().toByteArray());
pubsubMessage.setAttributes(getMessageAttributes(outgoingMessage));
if (!outgoingMessage.getMessage().getOrderingKey().isEmpty()) {
pubsubMessage.setOrderingKey(outgoingMessage.getMessage().getOrderingKey());
}
// N.B. publishTime and messageId are intentionally not set on the message that is published
pubsubMessages.add(pubsubMessage);
}
PublishRequest request = new PublishRequest().setMessages(pubsubMessages);
PublishResponse response =
pubsub.projects().topics().publish(topic.getPath(), request).execute();
return response.getMessageIds().size();
}
|
@Test
public void publishOneMessageWithOnlyTimestampAndIdAttributes() throws IOException {
String expectedTopic = TOPIC.getPath();
PubsubMessage expectedPubsubMessage =
new PubsubMessage()
.encodeData(DATA.getBytes(StandardCharsets.UTF_8))
.setAttributes(
ImmutableMap.<String, String>builder()
.put(TIMESTAMP_ATTRIBUTE, String.valueOf(MESSAGE_TIME))
.put(ID_ATTRIBUTE, RECORD_ID)
.build());
PublishRequest expectedRequest =
new PublishRequest().setMessages(ImmutableList.of(expectedPubsubMessage));
PublishResponse expectedResponse =
new PublishResponse().setMessageIds(ImmutableList.of(MESSAGE_ID));
when((Object) mockPubsub.projects().topics().publish(expectedTopic, expectedRequest).execute())
.thenReturn(expectedResponse);
OutgoingMessage actualMessage =
OutgoingMessage.of(
com.google.pubsub.v1.PubsubMessage.newBuilder()
.setData(ByteString.copyFromUtf8(DATA))
.build(),
MESSAGE_TIME,
RECORD_ID,
null);
int n = client.publish(TOPIC, ImmutableList.of(actualMessage));
assertEquals(1, n);
}
|
T getFunction(final List<SqlArgument> arguments) {
// first try to get the candidates without any implicit casting
Optional<T> candidate = findMatchingCandidate(arguments, false);
if (candidate.isPresent()) {
return candidate.get();
} else if (!supportsImplicitCasts) {
throw createNoMatchingFunctionException(arguments);
}
// if none were found (candidate isn't present) try again with implicit casting
candidate = findMatchingCandidate(arguments, true);
if (candidate.isPresent()) {
return candidate.get();
}
throw createNoMatchingFunctionException(arguments);
}
|
@Test
public void shouldChooseCorrectlyInComplicatedTopology() {
// Given:
givenFunctions(
function(EXPECTED, -1, STRING, INT, STRING, INT), function(OTHER, 0, STRING_VARARGS),
function("two", 1, STRING, STRING_VARARGS),
function("three", 2, STRING, INT, STRING_VARARGS),
function("four", 4, STRING, INT, STRING, INT, STRING_VARARGS),
function("five", 4, INT, INT, STRING, INT, STRING_VARARGS)
);
// When:
final KsqlScalarFunction fun = udfIndex.getFunction(Arrays.asList(SqlArgument.of(SqlTypes.STRING), SqlArgument.of(INTEGER), null, SqlArgument.of(INTEGER)));
// Then:
assertThat(fun.name(), equalTo(EXPECTED));
}
|
@Deprecated
public static void addFileToClassPath(Path file, Configuration conf) throws IOException {
Job.addFileToClassPath(file, conf, file.getFileSystem(conf));
}
|
@Test
public void testAddFileToClassPath() throws Exception {
Configuration conf = new Configuration(false);
// Test first with 2 args
try {
DistributedCache.addFileToClassPath(null, conf);
fail("Accepted null archives argument");
} catch (NullPointerException ex) {
// Expected
}
DistributedCache.addFileToClassPath(new Path("file:///a"), conf);
assertEquals("The mapreduce.job.classpath.files property was not "
+ "set correctly", "file:/a", conf.get(MRJobConfig.CLASSPATH_FILES));
assertEquals("The mapreduce.job.cache.files property was not set "
+ "correctly", "file:///a", conf.get(MRJobConfig.CACHE_FILES));
DistributedCache.addFileToClassPath(new Path("file:///b"), conf);
assertEquals("The mapreduce.job.classpath.files property was not "
+ "set correctly", "file:/a,file:/b",
conf.get(MRJobConfig.CLASSPATH_FILES));
assertEquals("The mapreduce.job.cache.files property was not set "
+ "correctly", "file:///a,file:///b",
conf.get(MRJobConfig.CACHE_FILES));
// Now test with 3 args
FileSystem fs = FileSystem.newInstance(conf);
conf.clear();
try {
DistributedCache.addFileToClassPath(null, conf, fs);
fail("Accepted null archives argument");
} catch (NullPointerException ex) {
// Expected
}
DistributedCache.addFileToClassPath(new Path("file:///a"), conf, fs);
assertEquals("The mapreduce.job.classpath.files property was not "
+ "set correctly", "file:/a", conf.get(MRJobConfig.CLASSPATH_FILES));
assertEquals("The mapreduce.job.cache.files property was not set "
+ "correctly", "file:///a", conf.get(MRJobConfig.CACHE_FILES));
DistributedCache.addFileToClassPath(new Path("file:///b"), conf, fs);
assertEquals("The mapreduce.job.classpath.files property was not "
+ "set correctly", "file:/a,file:/b",
conf.get(MRJobConfig.CLASSPATH_FILES));
assertEquals("The mapreduce.job.cache.files property was not set "
+ "correctly", "file:///a,file:///b",
conf.get(MRJobConfig.CACHE_FILES));
// Now test with 4th arg true
conf.clear();
try {
DistributedCache.addFileToClassPath(null, conf, fs, true);
fail("Accepted null archives argument");
} catch (NullPointerException ex) {
// Expected
}
DistributedCache.addFileToClassPath(new Path("file:///a"), conf, fs, true);
assertEquals("The mapreduce.job.classpath.files property was not "
+ "set correctly", "file:/a", conf.get(MRJobConfig.CLASSPATH_FILES));
assertEquals("The mapreduce.job.cache.files property was not set "
+ "correctly", "file:///a", conf.get(MRJobConfig.CACHE_FILES));
DistributedCache.addFileToClassPath(new Path("file:///b"), conf, fs, true);
assertEquals("The mapreduce.job.classpath.files property was not "
+ "set correctly", "file:/a,file:/b",
conf.get(MRJobConfig.CLASSPATH_FILES));
assertEquals("The mapreduce.job.cache.files property was not set "
+ "correctly", "file:///a,file:///b",
conf.get(MRJobConfig.CACHE_FILES));
// And finally with 4th arg false
conf.clear();
try {
DistributedCache.addFileToClassPath(null, conf, fs, false);
fail("Accepted null archives argument");
} catch (NullPointerException ex) {
// Expected
}
DistributedCache.addFileToClassPath(new Path("file:///a"), conf, fs, false);
assertEquals("The mapreduce.job.classpath.files property was not "
+ "set correctly", "file:/a", conf.get(MRJobConfig.CLASSPATH_FILES));
assertEquals("The mapreduce.job.cache.files property was not set "
+ "correctly", "", conf.get(MRJobConfig.CACHE_FILES, ""));
DistributedCache.addFileToClassPath(new Path("file:///b"), conf, fs, false);
assertEquals("The mapreduce.job.classpath.files property was not "
+ "set correctly", "file:/a,file:/b",
conf.get(MRJobConfig.CLASSPATH_FILES));
assertEquals("The mapreduce.job.cache.files property was not set "
+ "correctly", "", conf.get(MRJobConfig.CACHE_FILES, ""));
}
|
public static String initEndpoint(final NacosClientProperties properties) {
if (properties == null) {
return "";
}
// Whether to enable domain name resolution rules
String isUseEndpointRuleParsing = properties.getProperty(PropertyKeyConst.IS_USE_ENDPOINT_PARSING_RULE,
properties.getProperty(SystemPropertyKeyConst.IS_USE_ENDPOINT_PARSING_RULE,
String.valueOf(ParamUtil.USE_ENDPOINT_PARSING_RULE_DEFAULT_VALUE)));
boolean isUseEndpointParsingRule = Boolean.parseBoolean(isUseEndpointRuleParsing);
String endpointUrl;
if (isUseEndpointParsingRule) {
// Get the set domain name information
endpointUrl = ParamUtil.parsingEndpointRule(properties.getProperty(PropertyKeyConst.ENDPOINT));
if (StringUtils.isBlank(endpointUrl)) {
return "";
}
} else {
endpointUrl = properties.getProperty(PropertyKeyConst.ENDPOINT);
}
if (StringUtils.isBlank(endpointUrl)) {
return "";
}
String endpointPort = TemplateUtils
.stringEmptyAndThenExecute(properties.getProperty(PropertyKeyConst.SystemEnv.ALIBABA_ALIWARE_ENDPOINT_PORT),
() -> properties.getProperty(PropertyKeyConst.ENDPOINT_PORT));
endpointPort = TemplateUtils.stringEmptyAndThenExecute(endpointPort, () -> DEFAULT_END_POINT_PORT);
return endpointUrl + ":" + endpointPort;
}
|
@Test
void testInitEndpointFromDefaultWithCloudParsing() {
System.setProperty(SystemPropertyKeyConst.IS_USE_ENDPOINT_PARSING_RULE, "true");
final NacosClientProperties properties = NacosClientProperties.PROTOTYPE.derive();
String actual = InitUtils.initEndpoint(properties);
assertEquals("", actual);
}
|
public boolean isFound() {
return found;
}
|
@Test
public void testCalcInstructionsForSlightTurnWithOtherSlightTurn() {
// Test for a fork with two slight turns. Since there are two slight turns, show the turn instruction
Weighting weighting = new SpeedWeighting(mixedCarSpeedEnc);
Path p = new Dijkstra(roundaboutGraph.g, weighting, TraversalMode.NODE_BASED)
.calcPath(12, 16);
assertTrue(p.isFound());
InstructionList wayList = InstructionsFromEdges.calcInstructions(p, p.graph, weighting, mixedEncodingManager, tr);
// Contain start, turn, and finish instruction
assertEquals(3, wayList.size());
// Assert turn right
assertEquals(7, wayList.get(1).getSign());
}
|
public static String simpleClassName(Object o) {
if (o == null) {
return "null_object";
} else {
return simpleClassName(o.getClass());
}
}
|
@Test
public void testSimpleClassName() throws Exception {
testSimpleClassName(String.class);
}
|
public void executor(final ConfigGroupEnum type, final String json, final String eventType) {
ENUM_MAP.get(type).handle(json, eventType);
}
|
@Test
public void testPluginRefreshExecutor() {
String json = getJson();
websocketDataHandler.executor(ConfigGroupEnum.PLUGIN, json, DataEventTypeEnum.REFRESH.name());
List<PluginData> pluginDataList = new PluginDataHandler(pluginDataSubscriber).convert(json);
Mockito.verify(pluginDataSubscriber).refreshPluginDataSelf(pluginDataList);
}
|
@Override
protected double maintain() {
if ( ! nodeRepository().nodes().isWorking()) return 0.0;
// Don't need to maintain spare capacity in dynamically provisioned zones; can provision more on demand.
if (nodeRepository().zone().cloud().dynamicProvisioning()) return 1.0;
NodeList allNodes = nodeRepository().nodes().list();
CapacityChecker capacityChecker = new CapacityChecker(allNodes);
List<Node> overcommittedHosts = capacityChecker.findOvercommittedHosts();
metric.set(ConfigServerMetrics.OVERCOMMITTED_HOSTS.baseName(), overcommittedHosts.size(), null);
retireOvercommitedHosts(allNodes, overcommittedHosts);
boolean success = true;
Optional<CapacityChecker.HostFailurePath> failurePath = capacityChecker.worstCaseHostLossLeadingToFailure();
if (failurePath.isPresent()) {
int spareHostCapacity = failurePath.get().hostsCausingFailure.size() - 1;
if (spareHostCapacity == 0) {
List<Move> mitigation = findMitigation(failurePath.get());
if (execute(mitigation, failurePath.get())) {
// We succeeded or are in the process of taking a step to mitigate.
// Report with the assumption this will eventually succeed to avoid alerting before we're stuck
spareHostCapacity++;
}
else {
success = false;
}
}
metric.set(ConfigServerMetrics.SPARE_HOST_CAPACITY.baseName(), spareHostCapacity, null);
}
return success ? 1.0 : 0.0;
}
|
@Test
public void testTooManyIterationsAreNeeded() {
// 6 nodes must move to the next host, which is more than the max limit
var tester = new SpareCapacityMaintainerTester(5);
tester.addHosts(2, new NodeResources(10, 100, 1000, 1));
tester.addHosts(1, new NodeResources(9, 90, 900, 0.9));
tester.addHosts(1, new NodeResources(8, 80, 800, 0.8));
tester.addHosts(1, new NodeResources(7, 70, 700, 0.7));
tester.addHosts(1, new NodeResources(6, 60, 600, 0.6));
tester.addHosts(1, new NodeResources(5, 50, 500, 0.5));
tester.addHosts(1, new NodeResources(4, 40, 400, 0.4));
tester.addNodes(0, 1, new NodeResources(10, 100, 1000, 1.0), 0);
tester.addNodes(1, 1, new NodeResources( 9, 90, 900, 0.9), 1);
tester.addNodes(2, 1, new NodeResources( 8, 80, 800, 0.8), 2);
tester.addNodes(3, 1, new NodeResources( 7, 70, 700, 0.7), 3);
tester.addNodes(4, 1, new NodeResources( 6, 60, 600, 0.6), 4);
tester.addNodes(5, 1, new NodeResources( 5, 50, 500, 0.5), 5);
tester.addNodes(6, 1, new NodeResources( 4, 40, 400, 0.4), 6);
tester.maintainer.maintain();
assertEquals(0, tester.deployer.activations);
assertEquals(0, tester.nodeRepository.nodes().list().retired().size());
assertEquals(0, tester.metric.values.get("spareHostCapacity"));
}
|
public static List<SQLRecognizer> get(String sql, String dbType) {
return SQL_RECOGNIZER_FACTORY.create(sql, dbType);
}
|
@Test
public void testSqlRecognizerLoading() {
List<SQLRecognizer> recognizers = SQLVisitorFactory.get("update t1 set name = 'test' where id = '1'", JdbcConstants.MYSQL);
Assertions.assertNotNull(recognizers);
Assertions.assertEquals(recognizers.size(), 1);
SQLRecognizer recognizer = recognizers.get(0);
Assertions.assertEquals(SQLType.UPDATE, recognizer.getSQLType());
Assertions.assertEquals("t1", recognizer.getTableName());
recognizers = SQLVisitorFactory.get("update t1 set name = 'test' where id = '1'", JdbcConstants.MARIADB);
Assertions.assertNotNull(recognizers);
Assertions.assertEquals(recognizers.size(), 1);
recognizer = recognizers.get(0);
Assertions.assertEquals(SQLType.UPDATE, recognizer.getSQLType());
Assertions.assertEquals("t1", recognizer.getTableName());
}
|
public String delayedServiceResponse() {
try {
return this.delayedService.attemptRequest();
} catch (RemoteServiceException e) {
return e.getMessage();
}
}
|
@Test
void testDelayedRemoteResponseFailure() {
var delayedService = new DelayedRemoteService(System.nanoTime(), 2);
var delayedServiceCircuitBreaker = new DefaultCircuitBreaker(delayedService, 3000,
1,
2 * 1000 * 1000 * 1000);
var monitoringService = new MonitoringService(delayedServiceCircuitBreaker,null);
//Set time as current time as initially server fails
var response = monitoringService.delayedServiceResponse();
assertEquals(response, "Delayed service is down");
}
|
private static void addQueue(
QueueConfigInfo addInfo, CapacitySchedulerConfiguration proposedConf,
Map<String, String> confUpdate) throws IOException {
if (addInfo == null) {
return;
}
QueuePath queuePath = new QueuePath(addInfo.getQueue());
String queueName = queuePath.getLeafName();
if (queuePath.isRoot() || queuePath.isInvalid()) {
throw new IOException("Can't add invalid queue " + queuePath);
} else if (getSiblingQueues(queuePath, proposedConf).contains(
queueName)) {
throw new IOException("Can't add existing queue " + queuePath);
}
QueuePath parentPath = queuePath.getParentObject();
List<String> siblingQueues = proposedConf.getQueues(parentPath);
siblingQueues.add(queueName);
proposedConf.setQueues(parentPath,
siblingQueues.toArray(new String[0]));
confUpdate.put(getQueuesConfig(parentPath),
Joiner.on(',').join(siblingQueues));
String keyPrefix = QueuePrefixes.getQueuePrefix(queuePath);
for (Map.Entry<String, String> kv : addInfo.getParams().entrySet()) {
String keyValue = kv.getValue();
if (keyValue == null || keyValue.isEmpty()) {
proposedConf.unset(keyPrefix + kv.getKey());
confUpdate.put(keyPrefix + kv.getKey(), null);
} else {
proposedConf.set(keyPrefix + kv.getKey(), keyValue);
confUpdate.put(keyPrefix + kv.getKey(), keyValue);
}
}
// Unset Ordering Policy of Parent Queue converted from
// Leaf Queue after addQueue
String queueOrderingPolicy = getOrderingPolicyConfig(parentPath);
if (siblingQueues.size() == 1) {
proposedConf.unset(queueOrderingPolicy);
confUpdate.put(queueOrderingPolicy, null);
}
}
|
@Test
public void testAddQueue() throws Exception {
SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
Map<String, String> updateMap = new HashMap<>();
updateMap.put(CONFIG_NAME, C_CONFIG_VALUE);
QueueConfigInfo queueConfigInfo = new QueueConfigInfo(C_PATH, updateMap);
updateInfo.getAddQueueInfo().add(queueConfigInfo);
Map<String, String> configurationUpdate =
ConfigurationUpdateAssembler.constructKeyValueConfUpdate(csConfig, updateInfo);
assertEquals(C_CONFIG_VALUE, configurationUpdate.get(C_CONFIG_PATH));
assertEquals("a,b,c", configurationUpdate.get(ROOT_QUEUES_PATH));
}
|
public static <K> KStreamHolder<K> build(
final KStreamHolder<K> stream,
final StreamSelectKey<K> selectKey,
final RuntimeBuildContext buildContext
) {
return build(stream, selectKey, buildContext, PartitionByParamsFactory::build);
}
|
@Test
public void shouldReturnCorrectSerdeFactory() {
// When:
final KStreamHolder<GenericKey> result = StreamSelectKeyBuilder
.build(stream, selectKey, buildContext, paramBuilder);
// Then:
result.getExecutionKeyFactory().buildKeySerde(
FormatInfo.of(FormatFactory.JSON.name()),
PhysicalSchema.from(SOURCE_SCHEMA, SerdeFeatures.of(), SerdeFeatures.of()),
queryContext
);
verify(buildContext).buildKeySerde(
FormatInfo.of(FormatFactory.JSON.name()),
PhysicalSchema.from(SOURCE_SCHEMA, SerdeFeatures.of(), SerdeFeatures.of()),
queryContext);
}
|
public static JobConsoleLogger getConsoleLogger() {
if (context == null) throw new RuntimeException("context is null");
return new JobConsoleLogger();
}
|
@Test
public void shouldFailGetLoggerIfContextIsNotSet() {
ReflectionUtil.setStaticField(JobConsoleLogger.class, "context", null);
try {
JobConsoleLogger.getConsoleLogger();
fail("expected this to fail");
} catch (Exception e) {
assertThat(e.getMessage(), is("context is null"));
}
}
|
public Class<?>[] getClasses(String name, Class<?> ... defaultValue) {
String valueString = getRaw(name);
if (null == valueString) {
return defaultValue;
}
String[] classnames = getTrimmedStrings(name);
try {
Class<?>[] classes = new Class<?>[classnames.length];
for(int i = 0; i < classnames.length; i++) {
classes[i] = getClassByName(classnames[i]);
}
return classes;
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
}
|
@Test
public void testGetClasses() throws IOException {
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("test.classes1", "java.lang.Integer,java.lang.String");
appendProperty("test.classes2", " java.lang.Integer , java.lang.String ");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
String[] expectedNames = {"java.lang.Integer", "java.lang.String"};
Class<?>[] defaultClasses = {};
Class<?>[] classes1 = conf.getClasses("test.classes1", defaultClasses);
Class<?>[] classes2 = conf.getClasses("test.classes2", defaultClasses);
assertArrayEquals(expectedNames, extractClassNames(classes1));
assertArrayEquals(expectedNames, extractClassNames(classes2));
}
|
@Override
public void executeUpdate(final RegisterStorageUnitStatement sqlStatement, final ContextManager contextManager) {
checkDataSource(sqlStatement, contextManager);
Map<String, DataSourcePoolProperties> propsMap = DataSourceSegmentsConverter.convert(database.getProtocolType(), sqlStatement.getStorageUnits());
if (sqlStatement.isIfNotExists()) {
Collection<String> currentStorageUnits = getCurrentStorageUnitNames(contextManager);
Collection<String> logicalDataSourceNames = getLogicalDataSourceNames();
propsMap.keySet().removeIf(currentStorageUnits::contains);
propsMap.keySet().removeIf(logicalDataSourceNames::contains);
}
if (propsMap.isEmpty()) {
return;
}
validateHandler.validate(propsMap, getExpectedPrivileges(sqlStatement));
try {
contextManager.getPersistServiceFacade().getMetaDataManagerPersistService().registerStorageUnits(database.getName(), propsMap);
} catch (final SQLException | ShardingSphereExternalException ex) {
throw new StorageUnitsOperateException("register", propsMap.keySet(), ex);
}
}
|
@Test
void assertExecuteUpdateWithDuplicateStorageUnitNamesInStatement() {
assertThrows(DuplicateStorageUnitException.class, () -> executor.executeUpdate(createRegisterStorageUnitStatementWithDuplicateStorageUnitNames(), mock(ContextManager.class)));
}
|
public boolean write(final int msgTypeId, final DirectBuffer srcBuffer, final int offset, final int length)
{
checkTypeId(msgTypeId);
checkMsgLength(length);
final AtomicBuffer buffer = this.buffer;
final int recordLength = length + HEADER_LENGTH;
final int recordIndex = claimCapacity(buffer, recordLength);
if (INSUFFICIENT_CAPACITY == recordIndex)
{
return false;
}
buffer.putIntOrdered(lengthOffset(recordIndex), -recordLength);
MemoryAccess.releaseFence();
buffer.putBytes(encodedMsgOffset(recordIndex), srcBuffer, offset, length);
buffer.putInt(typeOffset(recordIndex), msgTypeId);
buffer.putIntOrdered(lengthOffset(recordIndex), recordLength);
return true;
}
|
@Test
void shouldInsertPaddingRecordPlusMessageOnBufferWrap()
{
final int length = 200;
final int recordLength = length + HEADER_LENGTH;
final int alignedRecordLength = align(recordLength, ALIGNMENT);
final long tail = CAPACITY - HEADER_LENGTH;
final long head = tail - (ALIGNMENT * 4);
when(buffer.getLongVolatile(HEAD_COUNTER_INDEX)).thenReturn(head);
when(buffer.getLongVolatile(TAIL_COUNTER_INDEX)).thenReturn(tail);
when(buffer.compareAndSetLong(TAIL_COUNTER_INDEX, tail, tail + alignedRecordLength + ALIGNMENT))
.thenReturn(TRUE);
final UnsafeBuffer srcBuffer = new UnsafeBuffer(allocateDirect(1024));
final int srcIndex = 0;
assertTrue(ringBuffer.write(MSG_TYPE_ID, srcBuffer, srcIndex, length));
final InOrder inOrder = inOrder(buffer);
inOrder.verify(buffer).putIntOrdered(lengthOffset((int)tail), -HEADER_LENGTH);
inOrder.verify(buffer).putInt(typeOffset((int)tail), PADDING_MSG_TYPE_ID);
inOrder.verify(buffer).putIntOrdered(lengthOffset((int)tail), HEADER_LENGTH);
inOrder.verify(buffer).putIntOrdered(lengthOffset(0), -recordLength);
inOrder.verify(buffer).putBytes(encodedMsgOffset(0), srcBuffer, srcIndex, length);
inOrder.verify(buffer).putInt(typeOffset(0), MSG_TYPE_ID);
inOrder.verify(buffer).putIntOrdered(lengthOffset(0), recordLength);
}
|
public static boolean toBoolean(String val, boolean defaultValue) {
if (StringUtils.isBlank(val)) {
return defaultValue;
}
return Boolean.parseBoolean(val);
}
|
@Test
void testToBoolean() {
// ConvertUtils.toBoolean(String)
assertTrue(ConvertUtils.toBoolean("true"));
assertTrue(ConvertUtils.toBoolean("True"));
assertTrue(ConvertUtils.toBoolean("TRUE"));
assertFalse(ConvertUtils.toBoolean("false"));
assertFalse(ConvertUtils.toBoolean("False"));
assertFalse(ConvertUtils.toBoolean("FALSE"));
assertFalse(ConvertUtils.toBoolean(null));
assertFalse(ConvertUtils.toBoolean("notBoolean"));
// ConvertUtils.toBoolean(String, boolean)
assertFalse(ConvertUtils.toBoolean("", false));
assertFalse(ConvertUtils.toBoolean(null, false));
assertFalse(ConvertUtils.toBoolean("notBoolean", false));
assertTrue(ConvertUtils.toBoolean("true", false));
}
|
@GET
@TreeResponse
public ExportedToolDescriptor[] doToolMetadata() {
List<ExportedToolDescriptor> models = new ArrayList<>();
for (ToolDescriptor<? extends ToolInstallation> d : ToolInstallation.all()) {
ExportedToolDescriptor descriptor = new ExportedToolDescriptor(d.getDisplayName(), symbolForObject(d), d.getClass());
models.add(descriptor);
for (ToolInstallation installation : d.getInstallations()) {
descriptor.addInstallation(new ExportedToolDescriptor.ExportedToolInstallation(installation.getName(), installation.getClass()));
}
}
return models.toArray(new ExportedToolDescriptor[0]);
}
|
@Test
public void toolMetadata() throws Exception {
PipelineMetadataService svc = new PipelineMetadataService();
List<ExportedToolDescriptor> tools = new ArrayList<>(Arrays.asList(svc.doToolMetadata()));
assertFalse(tools.isEmpty());
ExportedToolDescriptor t = null;
for (ExportedToolDescriptor a : tools) {
if (a.getType().equals(JDK.DescriptorImpl.class.getName())) {
t = a;
}
}
assertNotNull(t);
assertEquals("jdk", t.getSymbol());
}
|
@Override
public Optional<EncryptColumnExistedReviser> getColumnExistedReviser(final EncryptRule rule, final String tableName) {
return rule.findEncryptTable(tableName).map(EncryptColumnExistedReviser::new);
}
|
@Test
void assertGetColumnExistedReviser() {
Optional<EncryptColumnExistedReviser> columnExistedReviser = new EncryptMetaDataReviseEntry().getColumnExistedReviser(createEncryptRule(), TABLE_NAME);
assertTrue(columnExistedReviser.isPresent());
assertThat(columnExistedReviser.get().getClass(), is(EncryptColumnExistedReviser.class));
}
|
Record deserialize(Object data) {
return (Record) fieldDeserializer.value(data);
}
|
@Test
public void testSchemaDeserialize() {
StandardStructObjectInspector schemaObjectInspector =
ObjectInspectorFactory.getStandardStructObjectInspector(
Arrays.asList("0:col1", "1:col2"),
Arrays.asList(
PrimitiveObjectInspectorFactory.writableLongObjectInspector,
PrimitiveObjectInspectorFactory.writableStringObjectInspector
));
Deserializer deserializer = new Deserializer.Builder()
.schema(CUSTOMER_SCHEMA)
.writerInspector((StructObjectInspector) IcebergObjectInspector.create(CUSTOMER_SCHEMA))
.sourceInspector(schemaObjectInspector)
.build();
Record expected = GenericRecord.create(CUSTOMER_SCHEMA);
expected.set(0, 1L);
expected.set(1, "Bob");
Record actual = deserializer.deserialize(new Object[] { new LongWritable(1L), new Text("Bob") });
Assert.assertEquals(expected, actual);
}
|
@Override public SpanCustomizer tag(String key, String value) {
return tracer.currentSpanCustomizer().tag(key, value);
}
|
@Test void tag_when_no_current_span() {
spanCustomizer.tag("foo", "bar");
}
|
public static byte[] extractHashFromP2SH(Script script) {
return script.chunks().get(1).data;
}
|
@Test
public void p2shScriptHashFromKeys() {
// import some keys from this example: https://gist.github.com/gavinandresen/3966071
ECKey key1 = DumpedPrivateKey.fromBase58(MAINNET, "5JaTXbAUmfPYZFRwrYaALK48fN6sFJp4rHqq2QSXs8ucfpE4yQU").getKey();
key1 = ECKey.fromPrivate(key1.getPrivKeyBytes());
ECKey key2 = DumpedPrivateKey.fromBase58(MAINNET, "5Jb7fCeh1Wtm4yBBg3q3XbT6B525i17kVhy3vMC9AqfR6FH2qGk").getKey();
key2 = ECKey.fromPrivate(key2.getPrivKeyBytes());
ECKey key3 = DumpedPrivateKey.fromBase58(MAINNET, "5JFjmGo5Fww9p8gvx48qBYDJNAzR9pmH5S389axMtDyPT8ddqmw").getKey();
key3 = ECKey.fromPrivate(key3.getPrivKeyBytes());
List<ECKey> keys = Arrays.asList(key1, key2, key3);
Script p2shScript = ScriptBuilder.createP2SHOutputScript(2, keys);
byte[] p2shScriptHash = ScriptPattern.extractHashFromP2SH(p2shScript);
assertEquals("defdb71910720a2c854529019189228b4245eddd", ByteUtils.formatHex(p2shScriptHash));
}
|
public OpenAPI filter(OpenAPI openAPI, OpenAPISpecFilter filter, Map<String, List<String>> params, Map<String, String> cookies, Map<String, List<String>> headers) {
OpenAPI filteredOpenAPI = filterOpenAPI(filter, openAPI, params, cookies, headers);
if (filteredOpenAPI == null) {
return filteredOpenAPI;
}
OpenAPI clone = new OpenAPI();
clone.info(filteredOpenAPI.getInfo());
clone.openapi(filteredOpenAPI.getOpenapi());
clone.jsonSchemaDialect(filteredOpenAPI.getJsonSchemaDialect());
clone.setSpecVersion(filteredOpenAPI.getSpecVersion());
clone.setExtensions(filteredOpenAPI.getExtensions());
clone.setExternalDocs(filteredOpenAPI.getExternalDocs());
clone.setSecurity(filteredOpenAPI.getSecurity());
clone.setServers(filteredOpenAPI.getServers());
clone.tags(filteredOpenAPI.getTags() == null ? null : new ArrayList<>(openAPI.getTags()));
final Set<String> allowedTags = new HashSet<>();
final Set<String> filteredTags = new HashSet<>();
Paths clonedPaths = new Paths();
if (filteredOpenAPI.getPaths() != null) {
for (String resourcePath : filteredOpenAPI.getPaths().keySet()) {
PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath);
PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers);
PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags);
if (clonedPathItem != null) {
if (!clonedPathItem.readOperations().isEmpty()) {
clonedPaths.addPathItem(resourcePath, clonedPathItem);
}
}
}
clone.paths(clonedPaths);
}
filteredTags.removeAll(allowedTags);
final List<Tag> tags = clone.getTags();
if (tags != null && !filteredTags.isEmpty()) {
tags.removeIf(tag -> filteredTags.contains(tag.getName()));
if (clone.getTags().isEmpty()) {
clone.setTags(null);
}
}
if (filteredOpenAPI.getWebhooks() != null) {
for (String resourcePath : filteredOpenAPI.getWebhooks().keySet()) {
PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath);
PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers);
PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags);
if (clonedPathItem != null) {
if (!clonedPathItem.readOperations().isEmpty()) {
clone.addWebhooks(resourcePath, clonedPathItem);
}
}
}
}
if (filteredOpenAPI.getComponents() != null) {
clone.components(new Components());
clone.getComponents().setSchemas(filterComponentsSchema(filter, filteredOpenAPI.getComponents().getSchemas(), params, cookies, headers));
clone.getComponents().setSecuritySchemes(filteredOpenAPI.getComponents().getSecuritySchemes());
clone.getComponents().setCallbacks(filteredOpenAPI.getComponents().getCallbacks());
clone.getComponents().setExamples(filteredOpenAPI.getComponents().getExamples());
clone.getComponents().setExtensions(filteredOpenAPI.getComponents().getExtensions());
clone.getComponents().setHeaders(filteredOpenAPI.getComponents().getHeaders());
clone.getComponents().setLinks(filteredOpenAPI.getComponents().getLinks());
clone.getComponents().setParameters(filteredOpenAPI.getComponents().getParameters());
clone.getComponents().setRequestBodies(filteredOpenAPI.getComponents().getRequestBodies());
clone.getComponents().setResponses(filteredOpenAPI.getComponents().getResponses());
clone.getComponents().setPathItems(filteredOpenAPI.getComponents().getPathItems());
}
if (filter.isRemovingUnreferencedDefinitions()) {
clone = removeBrokenReferenceDefinitions(clone);
}
return clone;
}
|
@Test(description = "it should filter any Pet Ref in Schemas")
public void filterAwayPetRefInSchemas() throws IOException {
final OpenAPI openAPI = getOpenAPI(RESOURCE_PATH);
final OpenAPI filtered = new SpecFilter().filter(openAPI, new NoPetRefSchemaFilter(), null, null, null);
validateSchemasInComponents(filtered.getComponents(), PET_MODEL);
}
|
@VisibleForTesting
public Set<NodeAttribute> parseAttributes(String config)
throws IOException {
if (Strings.isNullOrEmpty(config)) {
return ImmutableSet.of();
}
Set<NodeAttribute> attributeSet = new HashSet<>();
// Configuration value should be in one line, format:
// "ATTRIBUTE_NAME,ATTRIBUTE_TYPE,ATTRIBUTE_VALUE",
// multiple node-attributes are delimited by ":".
// Each attribute str should not container any space.
String[] attributeStrs = config.split(NODE_ATTRIBUTES_DELIMITER);
for (String attributeStr : attributeStrs) {
String[] fields = attributeStr.split(NODE_ATTRIBUTE_DELIMITER);
if (fields.length != 3) {
throw new IOException("Invalid value for "
+ YarnConfiguration.NM_PROVIDER_CONFIGURED_NODE_ATTRIBUTES
+ "=" + config);
}
// We don't allow user config to overwrite our dist prefix,
// so disallow any prefix set in the configuration.
if (fields[0].contains("/")) {
throw new IOException("Node attribute set in "
+ YarnConfiguration.NM_PROVIDER_CONFIGURED_NODE_ATTRIBUTES
+ " should not contain any prefix.");
}
// Make sure attribute type is valid.
if (!EnumUtils.isValidEnum(NodeAttributeType.class, fields[1])) {
throw new IOException("Invalid node attribute type: "
+ fields[1] + ", valid values are "
+ Arrays.asList(NodeAttributeType.values()));
}
// Automatically setup prefix for collected attributes
NodeAttribute na = NodeAttribute.newInstance(
NodeAttribute.PREFIX_DISTRIBUTED,
fields[0],
NodeAttributeType.valueOf(fields[1]),
fields[2]);
// Since a NodeAttribute is identical with another one as long as
// their prefix and name are same, to avoid attributes getting
// overwritten by ambiguous attribute, make sure it fails in such
// case.
if (!attributeSet.add(na)) {
throw new IOException("Ambiguous node attribute is found: "
+ na.toString() + ", a same attribute already exists");
}
}
// Before updating the attributes to the provider,
// verify if they are valid
try {
NodeLabelUtil.validateNodeAttributes(attributeSet);
} catch (IOException e) {
throw new IOException("Node attributes set by configuration property: "
+ YarnConfiguration.NM_PROVIDER_CONFIGURED_NODE_ATTRIBUTES
+ " is not valid. Detail message: " + e.getMessage());
}
return attributeSet;
}
|
@Test(timeout=30000L)
public void testNodeAttributesFetchInterval()
throws IOException, InterruptedException {
Set<NodeAttribute> expectedAttributes1 = new HashSet<>();
expectedAttributes1.add(NodeAttribute
.newInstance("test.io", "host",
NodeAttributeType.STRING, "host1"));
Configuration conf = new Configuration();
// Set fetch interval to 1s for testing
conf.setLong(
YarnConfiguration.NM_NODE_ATTRIBUTES_PROVIDER_FETCH_INTERVAL_MS, 1000);
ConfigurationNodeAttributesProvider spyProvider =
Mockito.spy(nodeAttributesProvider);
Mockito.when(spyProvider.parseAttributes(Mockito.any()))
.thenReturn(expectedAttributes1);
spyProvider.init(conf);
spyProvider.start();
// Verify init value is honored.
Assert.assertEquals(expectedAttributes1, spyProvider.getDescriptors());
// Configuration provider provides a different set of attributes.
Set<NodeAttribute> expectedAttributes2 = new HashSet<>();
expectedAttributes2.add(NodeAttribute
.newInstance("test.io", "os",
NodeAttributeType.STRING, "windows"));
Mockito.when(spyProvider.parseAttributes(Mockito.any()))
.thenReturn(expectedAttributes2);
// Since we set fetch interval to 1s, it needs to wait for 1s until
// the updated attributes is updated to the provider. So we are expecting
// to see some old values for a short window.
ArrayList<String> keysMet = new ArrayList<>();
int numOfOldValue = 0;
int numOfNewValue = 0;
// Run 5 times in 500ms interval
int times=5;
while(times>0) {
Set<NodeAttribute> current = spyProvider.getDescriptors();
Assert.assertEquals(1, current.size());
String attributeName =
current.iterator().next().getAttributeKey().getAttributeName();
if ("host".equals(attributeName)){
numOfOldValue++;
} else if ("os".equals(attributeName)) {
numOfNewValue++;
}
Thread.sleep(500);
times--;
}
// We should either see the old value or the new value.
Assert.assertEquals(5, numOfNewValue + numOfOldValue);
// Both values should be more than 0.
Assert.assertTrue(numOfOldValue > 0);
Assert.assertTrue(numOfNewValue > 0);
}
|
@Override
public void createService(Service service, AbstractSelector selector) throws NacosException {
NAMING_LOGGER.info("[CREATE-SERVICE] {} creating service : {}", namespaceId, service);
final Map<String, String> params = new HashMap<>(16);
params.put(CommonParams.NAMESPACE_ID, namespaceId);
params.put(CommonParams.SERVICE_NAME, service.getName());
params.put(CommonParams.GROUP_NAME, service.getGroupName());
params.put(PROTECT_THRESHOLD_PARAM, String.valueOf(service.getProtectThreshold()));
params.put(META_PARAM, JacksonUtils.toJson(service.getMetadata()));
params.put(SELECTOR_PARAM, JacksonUtils.toJson(selector));
reqApi(UtilAndComs.nacosUrlService, params, HttpMethod.POST);
}
|
@Test
void testCreateService() throws Exception {
//given
NacosRestTemplate nacosRestTemplate = mock(NacosRestTemplate.class);
HttpRestResult<Object> a = new HttpRestResult<Object>();
a.setData("");
a.setCode(200);
when(nacosRestTemplate.exchangeForm(any(), any(), any(), any(), any(), any())).thenReturn(a);
final Field nacosRestTemplateField = NamingHttpClientProxy.class.getDeclaredField("nacosRestTemplate");
nacosRestTemplateField.setAccessible(true);
nacosRestTemplateField.set(clientProxy, nacosRestTemplate);
//when
clientProxy.createService(new Service(), new NoneSelector());
//then
verify(nacosRestTemplate, times(1)).exchangeForm(endsWith(UtilAndComs.nacosUrlService), any(), any(), any(),
eq(HttpMethod.POST), any());
}
|
public LockResource lockExclusive(StateLockOptions lockOptions)
throws TimeoutException, InterruptedException, IOException {
return lockExclusive(lockOptions, null);
}
|
@Test
public void testGraceMode_Forced() throws Throwable {
// Enable interrupt-cycle with 100ms interval.
configureInterruptCycle(true, 100);
// The state-lock instance.
StateLockManager stateLockManager = new StateLockManager();
// Start a thread that owns the state-lock in shared mode.
StateLockingThread sharedHolderThread = new StateLockingThread(stateLockManager, false);
sharedHolderThread.start();
sharedHolderThread.waitUntilStateLockAcquired();
// Take the state-lock exclusively with GUARANTEED grace mode.
try (LockResource lr = stateLockManager
.lockExclusive(new StateLockOptions(GraceMode.FORCED, 10, 0, 100))) {
// Holder should have been interrupted.
Assert.assertTrue(sharedHolderThread.lockInterrupted());
sharedHolderThread.join();
// Spawn a new thread that waits on the lock.
StateLockingThread sharedWaiterThread = new StateLockingThread(stateLockManager, false);
sharedWaiterThread.start();
// Wait until it's interrupted by the cycle too.
CommonUtils.waitFor("waiter interrupted", () -> sharedWaiterThread.lockInterrupted());
sharedWaiterThread.join();
}
}
|
@Nullable
public Object sanitize(String key, @Nullable Object value) {
for (Pattern pattern : sanitizeKeysPatterns) {
if (pattern.matcher(key).matches()) {
return SANITIZED_VALUE;
}
}
return value;
}
|
@Test
void notObfuscateNormalConfigs() {
final var sanitizer = new KafkaConfigSanitizer(true, List.of());
assertThat(sanitizer.sanitize("security.protocol", "SASL_SSL")).isEqualTo("SASL_SSL");
final String[] bootstrapServer = new String[] {"test1:9092", "test2:9092"};
assertThat(sanitizer.sanitize("bootstrap.servers", bootstrapServer)).isEqualTo(bootstrapServer);
}
|
@Override
public String getConfig(String dataId, String group, long timeoutMs) throws NacosException {
return getConfigInner(namespace, dataId, group, timeoutMs);
}
|
@Test
void testGetConfigFromLocalCache() throws NacosException {
final String dataId = "1localcache";
final String group = "2";
final String tenant = "";
MockedStatic<LocalConfigInfoProcessor> localConfigInfoProcessorMockedStatic = Mockito.mockStatic(LocalConfigInfoProcessor.class);
try {
String contentFailOver = "localCacheContent" + System.currentTimeMillis();
//fail over null
localConfigInfoProcessorMockedStatic.when(() -> LocalConfigInfoProcessor.getFailover(any(), eq(dataId), eq(group), eq(tenant)))
.thenReturn(null);
//snapshot content
localConfigInfoProcessorMockedStatic.when(() -> LocalConfigInfoProcessor.getSnapshot(any(), eq(dataId), eq(group), eq(tenant)))
.thenReturn(contentFailOver);
//form server error.
final int timeout = 3000;
Mockito.when(mockWoker.getServerConfig(dataId, group, "", timeout, false)).thenThrow(new NacosException());
final String config = nacosConfigService.getConfig(dataId, group, timeout);
assertEquals(contentFailOver, config);
} finally {
localConfigInfoProcessorMockedStatic.close();
}
}
|
@Override
public boolean isReadable(Class<?> type,
@Nullable Type genericType,
@Nullable Annotation[] annotations,
@Nullable MediaType mediaType) {
return isProvidable(type) && super.isReadable(type, genericType, annotations, mediaType);
}
|
@Test
void doesNotReadIgnoredTypes() {
assertThat(provider.isReadable(Ignorable.class, null, null, null))
.isFalse();
}
|
@Override
public Collection<RedisServer> masters() {
List<Map<String, String>> masters = connection.sync(StringCodec.INSTANCE, RedisCommands.SENTINEL_MASTERS);
return toRedisServersList(masters);
}
|
@Test
public void testMasters() {
Collection<RedisServer> masters = connection.masters();
assertThat(masters).hasSize(1);
}
|
@Override
public ByteBuf duplicate() {
return newSharedLeakAwareByteBuf(super.duplicate());
}
|
@Test
public void testWrapDuplicate() {
assertWrapped(newBuffer(8).duplicate());
}
|
public DurationConfParser durationConf() {
return new DurationConfParser();
}
|
@Test
public void testDurationConf() {
Map<String, String> writeOptions = ImmutableMap.of("write-prop", "111s");
ConfigOption<Duration> configOption =
ConfigOptions.key("conf-prop").durationType().noDefaultValue();
Configuration flinkConf = new Configuration();
flinkConf.setString(configOption.key(), "222s");
Table table = mock(Table.class);
when(table.properties()).thenReturn(ImmutableMap.of("table-prop", "333s"));
FlinkConfParser confParser = new FlinkConfParser(table, writeOptions, flinkConf);
Duration defaultVal = Duration.ofMillis(999);
Duration result =
confParser.durationConf().option("write-prop").defaultValue(defaultVal).parse();
assertThat(result).isEqualTo(Duration.ofSeconds(111));
result = confParser.durationConf().flinkConfig(configOption).defaultValue(defaultVal).parse();
assertThat(result).isEqualTo(Duration.ofSeconds(222));
result = confParser.durationConf().tableProperty("table-prop").defaultValue(defaultVal).parse();
assertThat(result).isEqualTo(Duration.ofSeconds(333));
}
|
static boolean apply(@Nullable HttpStatus httpStatus) {
if (Objects.isNull(httpStatus)) {
return false;
}
RpcEnhancementReporterProperties reportProperties;
try {
reportProperties = ApplicationContextAwareUtils.getApplicationContext()
.getBean(RpcEnhancementReporterProperties.class);
}
catch (BeansException e) {
LOG.error("get RpcEnhancementReporterProperties bean err", e);
reportProperties = new RpcEnhancementReporterProperties();
}
// statuses > series
List<HttpStatus> status = reportProperties.getStatuses();
if (status.isEmpty()) {
List<HttpStatus.Series> series = reportProperties.getSeries();
// Check INTERNAL_SERVER_ERROR (500) status.
if (reportProperties.isIgnoreInternalServerError() && Objects.equals(httpStatus, INTERNAL_SERVER_ERROR)) {
return false;
}
if (series.isEmpty()) {
return HTTP_STATUSES.contains(httpStatus);
}
return series.contains(httpStatus.series());
}
// Use the user-specified fuse status code.
return status.contains(httpStatus);
}
|
@Test
public void testApplyWithoutSeries() {
RpcEnhancementReporterProperties properties = new RpcEnhancementReporterProperties();
// Mock Condition
properties.getStatuses().clear();
properties.getSeries().clear();
ApplicationContext applicationContext = mock(ApplicationContext.class);
doReturn(properties)
.when(applicationContext).getBean(RpcEnhancementReporterProperties.class);
mockedApplicationContextAwareUtils.when(ApplicationContextAwareUtils::getApplicationContext)
.thenReturn(applicationContext);
// Assert
assertThat(PolarisEnhancedPluginUtils.apply(HttpStatus.OK)).isEqualTo(false);
assertThat(PolarisEnhancedPluginUtils.apply(HttpStatus.INTERNAL_SERVER_ERROR)).isEqualTo(false);
assertThat(PolarisEnhancedPluginUtils.apply(HttpStatus.BAD_GATEWAY)).isEqualTo(true);
}
|
public RoleDO getRole() {
return (RoleDO) getSource();
}
|
@Test
public void testGetRole() {
assertEquals(roleDO, roleUpdatedEvent.getRole());
}
|
public static String getDescWithoutMethodName(Method m) {
StringBuilder ret = new StringBuilder();
ret.append('(');
Class<?>[] parameterTypes = m.getParameterTypes();
for (int i = 0; i < parameterTypes.length; i++) {
ret.append(getDesc(parameterTypes[i]));
}
ret.append(')').append(getDesc(m.getReturnType()));
return ret.toString();
}
|
@Test
void testGetDescWithoutMethodName() throws Exception {
assertThat(
ReflectUtils.getDescWithoutMethodName(Foo2.class.getDeclaredMethod("hello", int[].class)),
equalTo("([I)Ljava/util/List;"));
}
|
public static ResourceId matchNewResource(String singleResourceSpec, boolean isDirectory) {
return getFileSystemInternal(parseScheme(singleResourceSpec))
.matchNewResource(singleResourceSpec, isDirectory);
}
|
@Test
public void testValidMatchNewResourceForLocalFileSystem() {
assertEquals("file", FileSystems.matchNewResource("/tmp/f1", false).getScheme());
assertEquals("file", FileSystems.matchNewResource("tmp/f1", false).getScheme());
assertEquals("file", FileSystems.matchNewResource("c:\\tmp\\f1", false).getScheme());
}
|
public RandomForest merge(RandomForest other) {
if (!formula.equals(other.formula)) {
throw new IllegalArgumentException("RandomForest have different model formula");
}
Model[] forest = new Model[models.length + other.models.length];
System.arraycopy(models, 0, forest, 0, models.length);
System.arraycopy(other.models, 0, forest, models.length, other.models.length);
// rough estimation
RegressionMetrics mergedMetrics = new RegressionMetrics(
metrics.fitTime * other.metrics.fitTime,
metrics.scoreTime * other.metrics.scoreTime,
metrics.size,
(metrics.rss * other.metrics.rss) / 2,
(metrics.mse * other.metrics.mse) / 2,
(metrics.rmse * other.metrics.rmse) / 2,
(metrics.mad * other.metrics.mad) / 2,
(metrics.r2 * other.metrics.r2) / 2
);
double[] mergedImportance = importance.clone();
for (int i = 0; i < importance.length; i++) {
mergedImportance[i] += other.importance[i];
}
return new RandomForest(formula, forest, mergedMetrics, mergedImportance);
}
|
@Test
public void testMerge() {
System.out.println("merge");
RandomForest forest1 = RandomForest.fit(Abalone.formula, Abalone.train, 50, 3, 20, 100, 5, 1.0, Arrays.stream(seeds));
RandomForest forest2 = RandomForest.fit(Abalone.formula, Abalone.train, 50, 3, 20, 100, 5, 1.0, Arrays.stream(seeds).skip(50));
RandomForest forest = forest1.merge(forest2);
double rmse1 = RMSE.of(Abalone.testy, forest1.predict(Abalone.test));
double rmse2 = RMSE.of(Abalone.testy, forest2.predict(Abalone.test));
double rmse = RMSE.of(Abalone.testy, forest.predict(Abalone.test));
System.out.format("Forest 1 RMSE = %.4f%n", rmse1);
System.out.format("Forest 2 RMSE = %.4f%n", rmse2);
System.out.format("Merged RMSE = %.4f%n", rmse);
assertEquals(2.0858, rmse1, 1E-4);
assertEquals(2.0633, rmse2, 1E-4);
assertEquals(2.0693, rmse, 1E-4);
}
|
public void onClose()
{
if (asyncTaskExecutor instanceof ExecutorService)
{
try
{
final ExecutorService executor = (ExecutorService)asyncTaskExecutor;
executor.shutdownNow();
if (!executor.awaitTermination(EXECUTOR_SHUTDOWN_TIMEOUT_SECONDS, TimeUnit.SECONDS))
{
ctx.errorHandler().onError(new AeronEvent("failed to shutdown async task executor"));
}
}
catch (final Exception e)
{
ctx.errorHandler().onError(e);
}
}
CloseHelper.close(ctx.errorHandler(), nameResolver);
publicationImages.forEach(PublicationImage::free);
networkPublications.forEach(NetworkPublication::free);
ipcPublications.forEach(IpcPublication::free);
freeEndOfLifeResources(Integer.MAX_VALUE);
toDriverCommands.consumerHeartbeatTime(Aeron.NULL_VALUE);
ctx.cncByteBuffer().force();
ctx.close();
}
|
@Test
void onCloseHandlesExceptionFromClosingAsyncExecutor(@TempDir final Path dir)
{
final ExecutorService asyncTaskExecutor = mock(ExecutorService.class);
final IllegalStateException closeException = new IllegalStateException("executor failed");
doThrow(closeException).when(asyncTaskExecutor).shutdownNow();
final DriverConductor conductor = new DriverConductor(ctx.clone()
.cncByteBuffer(IoUtil.mapNewFile(dir.resolve("some.txt").toFile(), 64))
.asyncTaskExecutor(asyncTaskExecutor));
conductor.onClose();
final InOrder inOrder = inOrder(asyncTaskExecutor, mockErrorHandler);
inOrder.verify(asyncTaskExecutor).shutdownNow();
inOrder.verify(mockErrorHandler).onError(closeException);
inOrder.verifyNoMoreInteractions();
}
|
@VisibleForTesting
static InstructionOutput forInstructionOutput(
InstructionOutput input, boolean replaceWithByteArrayCoder) throws Exception {
InstructionOutput cloudOutput = clone(input, InstructionOutput.class);
cloudOutput.setCodec(forCodec(cloudOutput.getCodec(), replaceWithByteArrayCoder));
return cloudOutput;
}
|
@Test
public void testLengthPrefixInstructionOutputCoder() throws Exception {
InstructionOutput output = new InstructionOutput();
output.setCodec(CloudObjects.asCloudObject(windowedValueCoder, /*sdkComponents=*/ null));
output.setFactory(new GsonFactory());
InstructionOutput prefixedOutput = forInstructionOutput(output, false);
assertEqualsAsJson(
CloudObjects.asCloudObject(prefixedWindowedValueCoder, /*sdkComponents=*/ null),
prefixedOutput.getCodec());
// Should not mutate the instruction.
assertEqualsAsJson(
output.getCodec(), CloudObjects.asCloudObject(windowedValueCoder, /*sdkComponents=*/ null));
}
|
@Override
public Optional<SimpleLock> lock(LockConfiguration lockConfiguration) {
if (lockConfiguration.getLockAtMostFor().compareTo(minimalLockAtMostFor) < 0) {
throw new IllegalArgumentException(
"Can not use KeepAliveLockProvider with lockAtMostFor shorter than " + minimalLockAtMostFor);
}
Optional<SimpleLock> lock = wrapped.lock(lockConfiguration);
return lock.map(simpleLock -> new KeepAliveLock(lockConfiguration, simpleLock, executorService));
}
|
@Test
void shouldScheduleKeepAliveTask() {
mockExtension(originalLock, Optional.of(originalLock));
Optional<SimpleLock> lock = provider.lock(lockConfiguration);
assertThat(lock).isNotNull();
tickMs(1_500);
verify(originalLock).extend(lockConfiguration.getLockAtMostFor(), ofMillis(500));
lock.get().unlock();
verify(originalLock).unlock();
tickMs(10_000);
verifyNoMoreInteractions(originalLock);
}
|
public static List<Import> getImportList(final List<String> importCells) {
final List<Import> importList = new ArrayList<>();
if ( importCells == null ) {
return importList;
}
for( String importCell: importCells ){
final StringTokenizer tokens = new StringTokenizer( importCell, "," );
while ( tokens.hasMoreTokens() ) {
final Import imp = new Import();
imp.setClassName( tokens.nextToken().trim() );
importList.add( imp );
}
}
return importList;
}
|
@Test
public void getImportList_nullValue() {
assertThat(getImportList(null)).isNotNull().isEmpty();
}
|
@VisibleForTesting
void validateUsernameUnique(Long id, String username) {
if (StrUtil.isBlank(username)) {
return;
}
AdminUserDO user = userMapper.selectByUsername(username);
if (user == null) {
return;
}
// 如果 id 为空,说明不用比较是否为相同 id 的用户
if (id == null) {
throw exception(USER_USERNAME_EXISTS);
}
if (!user.getId().equals(id)) {
throw exception(USER_USERNAME_EXISTS);
}
}
|
@Test
public void testValidateUsernameUnique_usernameExistsForUpdate() {
// 准备参数
Long id = randomLongId();
String username = randomString();
// mock 数据
userMapper.insert(randomAdminUserDO(o -> o.setUsername(username)));
// 调用,校验异常
assertServiceException(() -> userService.validateUsernameUnique(id, username),
USER_USERNAME_EXISTS);
}
|
@Override
public Cursor<Tuple> zScan(byte[] key, ScanOptions options) {
return new KeyBoundCursor<Tuple>(key, 0, options) {
private RedisClient client;
@Override
protected ScanIteration<Tuple> doScan(byte[] key, long cursorId, ScanOptions options) {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException("'ZSCAN' cannot be called in pipeline / transaction mode.");
}
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(Long.toUnsignedString(cursorId));
if (options.getPattern() != null) {
args.add("MATCH");
args.add(options.getPattern());
}
if (options.getCount() != null) {
args.add("COUNT");
args.add(options.getCount());
}
RFuture<ListScanResult<Tuple>> f = executorService.readAsync(client, key, ByteArrayCodec.INSTANCE, ZSCAN, args.toArray());
ListScanResult<Tuple> res = syncFuture(f);
client = res.getRedisClient();
return new ScanIteration<Tuple>(Long.parseUnsignedLong(res.getPos()), res.getValues());
}
}.open();
}
|
@Test
public void testZScan() {
connection.zAdd("key".getBytes(), 1, "value1".getBytes());
connection.zAdd("key".getBytes(), 2, "value2".getBytes());
Cursor<RedisZSetCommands.Tuple> t = connection.zScan("key".getBytes(), ScanOptions.scanOptions().build());
assertThat(t.hasNext()).isTrue();
assertThat(t.next().getValue()).isEqualTo("value1".getBytes());
assertThat(t.hasNext()).isTrue();
assertThat(t.next().getValue()).isEqualTo("value2".getBytes());
}
|
public static SqlType fromValue(final BigDecimal value) {
// SqlDecimal does not support negative scale:
final BigDecimal decimal = value.scale() < 0
? value.setScale(0, BigDecimal.ROUND_UNNECESSARY)
: value;
/* We can't use BigDecimal.precision() directly for all cases, since it defines
* precision differently from SQL Decimal.
* In particular, if the decimal is between -0.1 and 0.1, BigDecimal precision can be
* lower than scale, which is disallowed in SQL Decimal. For example, 0.005 in
* BigDecimal has a precision,scale of 1,3; whereas we expect 4,3.
* If the decimal is in (-1,1) but outside (-0.1,0.1), the code doesn't throw, but
* gives lower precision than expected (e.g., 0.8 has precision 1 instead of 2).
* To account for this edge case, we just take the scale and add one and use that
* for the precision instead. This works since BigDecimal defines scale as the
* number of digits to the right of the period; which is one lower than the precision for
* anything in the range (-1, 1).
* This covers the case where BigDecimal has a value of 0.
* Note: This solution differs from the SQL definition in that it returns (4, 3) for
* both "0.005" and ".005", whereas SQL expects (3, 3) for the latter. This is unavoidable
* if we use BigDecimal as an intermediate representation, since the two strings are parsed
* identically by it to have precision 1.
*/
if (decimal.compareTo(BigDecimal.ONE) < 0 && decimal.compareTo(BigDecimal.ONE.negate()) > 0) {
return SqlTypes.decimal(decimal.scale() + 1, decimal.scale());
}
return SqlTypes.decimal(decimal.precision(), Math.max(decimal.scale(), 0));
}
|
@Test
public void shouldGetSchemaFromDecimal1_0() {
// When:
final SqlType schema = DecimalUtil.fromValue(new BigDecimal("0"));
// Then:
assertThat(schema, is(SqlTypes.decimal(1, 0)));
}
|
@Override
public void deregisterInstance(String serviceName, String ip, int port) throws NacosException {
deregisterInstance(serviceName, ip, port, Constants.DEFAULT_CLUSTER_NAME);
}
|
@Test
void testDeregisterInstance1() throws NacosException {
//given
String serviceName = "service1";
String ip = "1.1.1.1";
int port = 10000;
//when
client.deregisterInstance(serviceName, ip, port);
//then
verify(proxy, times(1)).deregisterService(eq(serviceName), eq(Constants.DEFAULT_GROUP),
argThat(instance -> instance.getIp().equals(ip) && instance.getPort() == port
&& Math.abs(instance.getWeight() - 1.0) < 0.01f && instance.getClusterName()
.equals(Constants.DEFAULT_CLUSTER_NAME)));
}
|
@Udf(description = "Returns the inverse (arc) tangent of an INT value")
public Double atan(
@UdfParameter(
value = "value",
description = "The value to get the inverse tangent of."
) final Integer value
) {
return atan(value == null ? null : value.doubleValue());
}
|
@Test
public void shouldHandleLessThanNegativeOne() {
assertThat(udf.atan(-1.1), closeTo(-0.8329812666744317, 0.000000000000001));
assertThat(udf.atan(-6.0), closeTo(-1.4056476493802699, 0.000000000000001));
assertThat(udf.atan(-2), closeTo(-1.1071487177940904, 0.000000000000001));
assertThat(udf.atan(-2L), closeTo(-1.1071487177940904, 0.000000000000001));
}
|
public static BigInteger factorial(BigInteger n) {
if (n.equals(BigInteger.ZERO)) {
return BigInteger.ONE;
}
return factorial(n, BigInteger.ZERO);
}
|
@Test
public void factorialTest(){
long factorial = NumberUtil.factorial(0);
assertEquals(1, factorial);
assertEquals(1L, NumberUtil.factorial(1));
assertEquals(1307674368000L, NumberUtil.factorial(15));
assertEquals(2432902008176640000L, NumberUtil.factorial(20));
factorial = NumberUtil.factorial(5, 0);
assertEquals(120, factorial);
factorial = NumberUtil.factorial(5, 1);
assertEquals(120, factorial);
assertEquals(5, NumberUtil.factorial(5, 4));
assertEquals(2432902008176640000L, NumberUtil.factorial(20, 0));
}
|
@Override
public Set<Long> calculateUsers(DelegateExecution execution, String param) {
Set<Long> roleIds = StrUtils.splitToLongSet(param);
return permissionApi.getUserRoleIdListByRoleIds(roleIds);
}
|
@Test
public void testCalculateUsers() {
// 准备参数
String param = "1,2";
// mock 方法
when(permissionApi.getUserRoleIdListByRoleIds(eq(asSet(1L, 2L))))
.thenReturn(asSet(11L, 22L));
// 调用
Set<Long> results = strategy.calculateUsers(null, param);
// 断言
assertEquals(asSet(11L, 22L), results);
}
|
@VisibleForTesting
ClientConfiguration createBkClientConfiguration(MetadataStoreExtended store, ServiceConfiguration conf) {
ClientConfiguration bkConf = new ClientConfiguration();
if (conf.getBookkeeperClientAuthenticationPlugin() != null
&& conf.getBookkeeperClientAuthenticationPlugin().trim().length() > 0) {
bkConf.setClientAuthProviderFactoryClass(conf.getBookkeeperClientAuthenticationPlugin());
bkConf.setProperty(conf.getBookkeeperClientAuthenticationParametersName(),
conf.getBookkeeperClientAuthenticationParameters());
}
if (conf.isBookkeeperTLSClientAuthentication()) {
bkConf.setTLSClientAuthentication(true);
bkConf.setTLSCertificatePath(conf.getBookkeeperTLSCertificateFilePath());
bkConf.setTLSKeyStore(conf.getBookkeeperTLSKeyFilePath());
bkConf.setTLSKeyStoreType(conf.getBookkeeperTLSKeyFileType());
bkConf.setTLSKeyStorePasswordPath(conf.getBookkeeperTLSKeyStorePasswordPath());
bkConf.setTLSProviderFactoryClass(conf.getBookkeeperTLSProviderFactoryClass());
bkConf.setTLSTrustStore(conf.getBookkeeperTLSTrustCertsFilePath());
bkConf.setTLSTrustStoreType(conf.getBookkeeperTLSTrustCertTypes());
bkConf.setTLSTrustStorePasswordPath(conf.getBookkeeperTLSTrustStorePasswordPath());
bkConf.setTLSCertFilesRefreshDurationSeconds(conf.getBookkeeperTlsCertFilesRefreshDurationSeconds());
}
bkConf.setBusyWaitEnabled(conf.isEnableBusyWait());
bkConf.setNumWorkerThreads(conf.getBookkeeperClientNumWorkerThreads());
bkConf.setThrottleValue(conf.getBookkeeperClientThrottleValue());
bkConf.setAddEntryTimeout((int) conf.getBookkeeperClientTimeoutInSeconds());
bkConf.setReadEntryTimeout((int) conf.getBookkeeperClientTimeoutInSeconds());
bkConf.setSpeculativeReadTimeout(conf.getBookkeeperClientSpeculativeReadTimeoutInMillis());
bkConf.setNumChannelsPerBookie(conf.getBookkeeperNumberOfChannelsPerBookie());
bkConf.setUseV2WireProtocol(conf.isBookkeeperUseV2WireProtocol());
bkConf.setEnableDigestTypeAutodetection(true);
bkConf.setStickyReadsEnabled(conf.isBookkeeperEnableStickyReads());
bkConf.setNettyMaxFrameSizeBytes(conf.getMaxMessageSize() + Commands.MESSAGE_SIZE_FRAME_PADDING);
bkConf.setDiskWeightBasedPlacementEnabled(conf.isBookkeeperDiskWeightBasedPlacementEnabled());
bkConf.setMetadataServiceUri(conf.getBookkeeperMetadataStoreUrl());
bkConf.setLimitStatsLogging(conf.isBookkeeperClientLimitStatsLogging());
if (!conf.isBookkeeperMetadataStoreSeparated()) {
// If we're connecting to the same metadata service, with same config, then
// let's share the MetadataStore instance
bkConf.setProperty(AbstractMetadataDriver.METADATA_STORE_INSTANCE, store);
}
if (conf.isBookkeeperClientHealthCheckEnabled()) {
bkConf.enableBookieHealthCheck();
bkConf.setBookieHealthCheckInterval((int) conf.getBookkeeperClientHealthCheckIntervalSeconds(),
TimeUnit.SECONDS);
bkConf.setBookieErrorThresholdPerInterval(conf.getBookkeeperClientHealthCheckErrorThresholdPerInterval());
bkConf.setBookieQuarantineTime((int) conf.getBookkeeperClientHealthCheckQuarantineTimeInSeconds(),
TimeUnit.SECONDS);
bkConf.setBookieQuarantineRatio(conf.getBookkeeperClientQuarantineRatio());
}
bkConf.setReorderReadSequenceEnabled(conf.isBookkeeperClientReorderReadSequenceEnabled());
bkConf.setExplictLacInterval(conf.getBookkeeperExplicitLacIntervalInMills());
bkConf.setGetBookieInfoIntervalSeconds(
conf.getBookkeeperClientGetBookieInfoIntervalSeconds(), TimeUnit.SECONDS);
bkConf.setGetBookieInfoRetryIntervalSeconds(
conf.getBookkeeperClientGetBookieInfoRetryIntervalSeconds(), TimeUnit.SECONDS);
bkConf.setNumIOThreads(conf.getBookkeeperClientNumIoThreads());
PropertiesUtils.filterAndMapProperties(conf.getProperties(), "bookkeeper_")
.forEach((key, value) -> {
log.info("Applying BookKeeper client configuration setting {}={}", key, value);
bkConf.setProperty(key, value);
});
return bkConf;
}
|
@Test
public void testSetExplicitLacInterval() {
BookKeeperClientFactoryImpl factory = new BookKeeperClientFactoryImpl();
ServiceConfiguration conf = new ServiceConfiguration();
conf.setMetadataStoreUrl("zk:localhost:2181");
assertEquals(factory.createBkClientConfiguration(mock(MetadataStoreExtended.class), conf).getExplictLacInterval(),
0);
conf.setBookkeeperExplicitLacIntervalInMills(5);
assertEquals(
factory.createBkClientConfiguration(mock(MetadataStoreExtended.class), conf).getExplictLacInterval(),
5);
}
|
public static MD5Hash computeMd5ForFile(File dataFile) throws IOException {
InputStream in = Files.newInputStream(dataFile.toPath());
try {
MessageDigest digester = MD5Hash.getDigester();
DigestInputStream dis = new DigestInputStream(in, digester);
IOUtils.copyBytes(dis, new IOUtils.NullOutputStream(), 128*1024);
return new MD5Hash(digester.digest());
} finally {
IOUtils.closeStream(in);
}
}
|
@Test
public void testComputeMd5ForFile() throws Exception {
MD5Hash computedDigest = MD5FileUtils.computeMd5ForFile(TEST_FILE);
assertEquals(TEST_MD5, computedDigest);
}
|
static Set<String> parseStaleDataNodeList(String liveNodeJsonString,
final int blockThreshold, final Logger log) throws IOException {
final Set<String> dataNodesToReport = new HashSet<>();
JsonFactory fac = JacksonUtil.createBasicJsonFactory();
JsonParser parser = fac.createParser(IOUtils
.toInputStream(liveNodeJsonString, StandardCharsets.UTF_8.name()));
int objectDepth = 0;
String currentNodeAddr = null;
for (JsonToken tok = parser.nextToken(); tok != null; tok = parser
.nextToken()) {
if (tok == JsonToken.START_OBJECT) {
objectDepth++;
} else if (tok == JsonToken.END_OBJECT) {
objectDepth--;
} else if (tok == JsonToken.FIELD_NAME) {
if (objectDepth == 1) {
// This is where the Datanode identifiers are stored
currentNodeAddr = parser.getCurrentName();
} else if (objectDepth == 2) {
if (parser.getCurrentName().equals("numBlocks")) {
JsonToken valueToken = parser.nextToken();
if (valueToken != JsonToken.VALUE_NUMBER_INT
|| currentNodeAddr == null) {
throw new IOException(String.format("Malformed LiveNodes JSON; "
+ "got token = %s; currentNodeAddr = %s: %s",
valueToken, currentNodeAddr, liveNodeJsonString));
}
int numBlocks = parser.getIntValue();
if (numBlocks < blockThreshold) {
log.debug(String.format(
"Queueing Datanode <%s> for block report; numBlocks = %d",
currentNodeAddr, numBlocks));
dataNodesToReport.add(currentNodeAddr);
} else {
log.debug(String.format(
"Not queueing Datanode <%s> for block report; numBlocks = %d",
currentNodeAddr, numBlocks));
}
}
}
}
}
return dataNodesToReport;
}
|
@Test
public void testParseStaleDatanodeListMultipleDatanodes() throws Exception {
String json = "{"
+ "\"1.2.3.4:1\": {\"numBlocks\": 0}, "
+ "\"1.2.3.4:2\": {\"numBlocks\": 15}, "
+ "\"1.2.3.4:3\": {\"numBlocks\": 5}, "
+ "\"1.2.3.4:4\": {\"numBlocks\": 10} "
+ "}";
Set<String> out = DynoInfraUtils.parseStaleDataNodeList(json, 10, LOG);
assertEquals(2, out.size());
assertTrue(out.contains("1.2.3.4:1"));
assertTrue(out.contains("1.2.3.4:3"));
}
|
public String convert(ILoggingEvent le) {
List<Marker> markers = le.getMarkerList();
if (markers == null || markers.isEmpty()) {
return EMPTY;
}
int size = markers.size();
if (size == 1)
return markers.get(0).toString();
StringBuffer buf = new StringBuffer(32);
for (int i = 0; i < size; i++) {
if (i != 0)
buf.append(' ');
Marker m = markers.get(i);
buf.append(m.toString());
}
return buf.toString();
}
|
@Test
public void testWithOneChildMarker() {
Marker marker = markerFactory.getMarker("test");
marker.add(markerFactory.getMarker("child"));
String result = converter.convert(createLoggingEvent(marker));
assertEquals("test [ child ]", result);
}
|
public static AvroGenericCoder of(Schema schema) {
return AvroGenericCoder.of(schema);
}
|
@Test
public void testDeterminismSet() {
assertDeterministic(AvroCoder.of(StringSortedSet.class));
assertDeterministic(AvroCoder.of(StringTreeSet.class));
assertNonDeterministic(
AvroCoder.of(StringHashSet.class),
reasonField(
StringHashSet.class,
"stringCollection",
"java.util.HashSet<java.lang.String> may not be deterministically ordered"));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.