focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException {
InvokeMode invokeMode = RpcUtils.getInvokeMode(invoker.getUrl(), invocation);
if (InvokeMode.SYNC == invokeMode) {
return syncInvoke(invoker, invocation);
} else {
return asyncInvoke(invoker, invocation);
}
}
|
@Test
public void testInvokeSync() {
Invocation invocation = DubboTestUtil.getDefaultMockInvocationOne();
Invoker invoker = DubboTestUtil.getDefaultMockInvoker();
final Result result = mock(Result.class);
when(result.hasException()).thenReturn(false);
when(invoker.invoke(invocation)).thenAnswer(invocationOnMock -> {
verifyInvocationStructure(invoker, invocation);
return result;
});
consumerFilter.invoke(invoker, invocation);
verify(invoker).invoke(invocation);
Context context = ContextUtil.getContext();
assertNull(context);
}
|
public int doWork()
{
final long nowNs = nanoClock.nanoTime();
trackTime(nowNs);
int workCount = 0;
workCount += processTimers(nowNs);
if (!asyncClientCommandInFlight)
{
workCount += clientCommandAdapter.receive();
}
workCount += drainCommandQueue();
workCount += trackStreamPositions(workCount, nowNs);
workCount += nameResolver.doWork(cachedEpochClock.time());
workCount += freeEndOfLifeResources(ctx.resourceFreeLimit());
return workCount;
}
|
@Test
void shouldNotRemoveCounterOnClientKeepalive()
{
final long registrationId = driverProxy.addCounter(
COUNTER_TYPE_ID,
counterKeyAndLabel,
COUNTER_KEY_OFFSET,
COUNTER_KEY_LENGTH,
counterKeyAndLabel,
COUNTER_LABEL_OFFSET,
COUNTER_LABEL_LENGTH);
driverConductor.doWork();
final ArgumentCaptor<Integer> captor = ArgumentCaptor.forClass(Integer.class);
verify(mockClientProxy).onCounterReady(eq(registrationId), captor.capture());
final AtomicCounter heartbeatCounter = clientHeartbeatCounter(spyCountersManager);
doWorkUntil(() ->
{
heartbeatCounter.setOrdered(epochClock.time());
return (CLIENT_LIVENESS_TIMEOUT_NS * 2) - nanoClock.nanoTime() <= 0;
});
verify(spyCountersManager, never()).free(captor.getValue());
}
|
@Override
public CRMaterial deserialize(JsonElement json, Type type, JsonDeserializationContext context) throws JsonParseException {
return determineJsonElementForDistinguishingImplementers(json, context, TYPE, ARTIFACT_ORIGIN);
}
|
@Test
public void shouldDeserializePluggableScmMaterialType() {
JsonObject jsonObject = new JsonObject();
jsonObject.addProperty("type", "plugin");
materialTypeAdapter.deserialize(jsonObject, type, jsonDeserializationContext);
verify(jsonDeserializationContext).deserialize(jsonObject, CRPluggableScmMaterial.class);
}
|
public String getRestartStepId() {
return getCurrentNode(restartConfig).getStepId();
}
|
@Test
public void testGetRestartStepId() {
RestartConfig config = RestartConfig.builder().addRestartNode("foo", 1, "bar").build();
RunRequest runRequest =
RunRequest.builder()
.initiator(new ManualInitiator())
.currentPolicy(RunPolicy.RESTART_FROM_INCOMPLETE)
.restartConfig(config)
.build();
Assert.assertEquals("bar", runRequest.getRestartStepId());
}
|
public SymmetricEncryptionConfig setKey(byte[] key) {
this.key = cloneKey(key);
return this;
}
|
@Test
public void testSetKey() {
byte[] key = new byte[]{23, 42};
config.setKey(key);
assertEquals(key[0], config.getKey()[0]);
assertEquals(key[1], config.getKey()[1]);
}
|
public static <T> Inner<T> fields(String... fields) {
return fields(FieldAccessDescriptor.withFieldNames(fields));
}
|
@Test
@Category(NeedsRunner.class)
public void testMaintainsOriginalSchemaOrder() {
Schema expectedSchema =
Schema.builder()
.addFields(intFieldsRange(1, 10))
.addFields(intFieldsRange(11, 19))
.addFields(intFieldsRange(21, 55))
.addFields(intFieldsRange(56, 100))
.build();
PCollection<Row> result =
pipeline
.apply(
Create.of(
multipleIntRow(MULTIPLE_INT_SCHEMA, 1),
multipleIntRow(MULTIPLE_INT_SCHEMA, 2))
.withRowSchema(MULTIPLE_INT_SCHEMA))
.apply(DropFields.fields("field0", "field10", "field19", "field20", "field55"));
assertEquals(expectedSchema, result.getSchema());
List<Row> expectedRows =
Lists.newArrayList(multipleIntRow(expectedSchema, 1), multipleIntRow(expectedSchema, 2));
PAssert.that(result).containsInAnyOrder(expectedRows);
pipeline.run();
}
|
@Override
public <T> T convert(DataTable dataTable, Type type) {
return convert(dataTable, type, false);
}
|
@Test
void convert_to_single_object__single_cell() {
DataTable table = parse("| ♝ |");
registry.defineDataTableType(new DataTableType(Piece.class, PIECE_TABLE_CELL_TRANSFORMER));
assertEquals(Piece.BLACK_BISHOP, converter.convert(table, Piece.class));
}
|
public void setMessage(AbstractMessage message) {
this.message = message;
}
|
@Test
public void setMessage() {
nettyPoolKey.setMessage(MSG2);
Assertions.assertEquals(nettyPoolKey.getMessage(), MSG2);
}
|
int parseAndConvert(String[] args) throws Exception {
Options opts = createOptions();
int retVal = 0;
try {
if (args.length == 0) {
LOG.info("Missing command line arguments");
printHelp(opts);
return 0;
}
CommandLine cliParser = new GnuParser().parse(opts, args);
if (cliParser.hasOption(CliOption.HELP.shortSwitch)) {
printHelp(opts);
return 0;
}
FSConfigToCSConfigConverter converter =
prepareAndGetConverter(cliParser);
converter.convert(converterParams);
String outputDir = converterParams.getOutputDirectory();
boolean skipVerification =
cliParser.hasOption(CliOption.SKIP_VERIFICATION.shortSwitch);
if (outputDir != null && !skipVerification) {
validator.validateConvertedConfig(
converterParams.getOutputDirectory());
}
} catch (ParseException e) {
String msg = "Options parsing failed: " + e.getMessage();
logAndStdErr(e, msg);
printHelp(opts);
retVal = -1;
} catch (PreconditionException e) {
String msg = "Cannot start FS config conversion due to the following"
+ " precondition error: " + e.getMessage();
handleException(e, msg);
retVal = -1;
} catch (UnsupportedPropertyException e) {
String msg = "Unsupported property/setting encountered during FS config "
+ "conversion: " + e.getMessage();
handleException(e, msg);
retVal = -1;
} catch (ConversionException | IllegalArgumentException e) {
String msg = "Fatal error during FS config conversion: " + e.getMessage();
handleException(e, msg);
retVal = -1;
} catch (VerificationException e) {
Throwable cause = e.getCause();
String msg = "Verification failed: " + e.getCause().getMessage();
conversionOptions.handleVerificationFailure(cause, msg);
retVal = -1;
}
conversionOptions.handleParsingFinished();
return retVal;
}
|
@Test
public void testDisabledAsyncScheduling() throws Exception {
setupFSConfigConversionFiles(true);
FSConfigToCSConfigArgumentHandler argumentHandler =
new FSConfigToCSConfigArgumentHandler(conversionOptions, mockValidator);
String[] args = getArgumentsAsArrayWithDefaults("-f",
FSConfigConverterTestCommons.FS_ALLOC_FILE, "-p");
argumentHandler.parseAndConvert(args);
assertFalse("-a switch wasn't provided but async scheduling option is true",
conversionOptions.isEnableAsyncScheduler());
}
|
@Override
public String getLongDescription() {
return String.format("%s [ %s ]", CaseInsensitiveString.str(pipelineName), CaseInsensitiveString.str(stageName));
}
|
@Test
void shouldSetLongDescriptionAsCombinationOfPipelineAndStageName() {
DependencyMaterial material = new DependencyMaterial(new CaseInsensitiveString("pipeline-name"), new CaseInsensitiveString("stage-name"));
assertThat(material.getLongDescription()).isEqualTo("pipeline-name [ stage-name ]");
}
|
@Override
public GenericRecordBuilder newRecordBuilder() {
return new AvroRecordBuilderImpl(this);
}
|
@Test
public void testDecodeWithMultiVersioningSupport() {
MultiVersionSchemaInfoProvider provider = mock(MultiVersionSchemaInfoProvider.class);
readerSchema.setSchemaInfoProvider(provider);
when(provider.getSchemaByVersion(any(byte[].class)))
.thenReturn(CompletableFuture.completedFuture(writerSchema.getSchemaInfo()));
GenericRecord dataForWriter = writerSchema.newRecordBuilder()
.set("field1", SchemaTestUtils.TEST_MULTI_VERSION_SCHEMA_STRING)
.set("field3", 0)
.build();
GenericRecord record = readerSchema.decode(writerSchema.encode(dataForWriter), new byte[10]);
Assert.assertEquals(SchemaTestUtils.TEST_MULTI_VERSION_SCHEMA_STRING, record.getField("field1"));
Assert.assertEquals(0, record.getField("field3"));
Assert.assertEquals(SchemaTestUtils.TEST_MULTI_VERSION_SCHEMA_DEFAULT_STRING, record.getField("fieldUnableNull"));
}
|
@Override
public Processor<K, Change<V>, KO, SubscriptionWrapper<K>> get() {
return new UnbindChangeProcessor();
}
|
@Test
public void leftJoinShouldPropagateChangeOfFKFromNonNullToNullValue() {
final MockInternalNewProcessorContext<String, SubscriptionWrapper<String>> context = new MockInternalNewProcessorContext<>();
leftJoinProcessor.init(context);
context.setRecordMetadata("topic", 0, 0);
final LeftValue leftRecordValue = new LeftValue(null);
leftJoinProcessor.process(new Record<>(pk, new Change<>(leftRecordValue, new LeftValue(fk1)), 0));
assertThat(context.forwarded().size(), greaterThan(0));
assertThat(
context.forwarded().get(0).record(),
is(new Record<>(fk1, new SubscriptionWrapper<>(hash(leftRecordValue), DELETE_KEY_AND_PROPAGATE, pk, 0), 0))
);
}
|
public static Set<Result> anaylze(String log) {
Set<Result> results = new HashSet<>();
for (Rule rule : Rule.values()) {
Matcher matcher = rule.pattern.matcher(log);
if (matcher.find()) {
results.add(new Result(rule, log, matcher));
}
}
return results;
}
|
@Test
public void openj9() throws IOException {
CrashReportAnalyzer.Result result = findResultByRule(
CrashReportAnalyzer.anaylze(loadLog("/logs/openj9.txt")),
CrashReportAnalyzer.Rule.OPENJ9);
}
|
@Override
public Iterator<IndexKeyEntries> getSqlRecordIteratorBatch(@Nonnull Comparable value, boolean descending) {
return getSqlRecordIteratorBatch(value, descending, null);
}
|
@Test
public void getRecordsUsingExactValueAscending() {
var expectedOrder = List.of(1, 4, 7);
var actual = store.getSqlRecordIteratorBatch(1, false);
assertResult(expectedOrder, actual);
}
|
@Config("hive.metastore")
public MetastoreConfig setMetastoreType(String metastoreType)
{
this.metastoreType = metastoreType;
return this;
}
|
@Test
public void testDefaults()
{
assertRecordedDefaults(recordDefaults(MetastoreConfig.class)
.setMetastoreType("thrift"));
}
|
@Override
public boolean getTcpKeepAlive() {
return clientConfig.getPropertyAsBoolean(TCP_KEEP_ALIVE, false);
}
|
@Test
void testGetTcpKeepAlive() {
assertFalse(connectionPoolConfig.getTcpKeepAlive());
}
|
public boolean eval(ContentFile<?> file) {
// TODO: detect the case where a column is missing from the file using file's max field id.
return new MetricsEvalVisitor().eval(file);
}
|
@Test
public void testIntegerGtEq() {
boolean shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", INT_MAX_VALUE + 6))
.eval(FILE);
assertThat(shouldRead).as("Should not read: id range above upper bound (85 < 79)").isFalse();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", INT_MAX_VALUE + 1))
.eval(FILE);
assertThat(shouldRead).as("Should not read: id range above upper bound (80 > 79)").isFalse();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", INT_MAX_VALUE)).eval(FILE);
assertThat(shouldRead).as("Should read: one possible id").isTrue();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", INT_MAX_VALUE - 4))
.eval(FILE);
assertThat(shouldRead).as("Should read: may possible ids").isTrue();
}
|
public static long getObjectSize(Object obj) throws UnsupportedOperationException {
// JDK versions 16 or later enforce strong encapsulation and block illegal reflective access.
// In effect, we cannot calculate object size by deep reflection and invoking `setAccessible` on a field,
// especially when the `isAccessible` is false. More details in JEP 403. While integrating Hudi with other
// software packages that compile against JDK 16 or later (e.g. Trino), the IllegalAccessException will be thrown.
// In that case, we use Java Object Layout (JOL) to estimate the object size.
//
// NOTE: We cannot get the object size base on the amount of byte serialized because there is no guarantee
// that the incoming object is serializable. We could have used Java's Instrumentation API, but it
// needs an instrumentation agent that can be hooked to the JVM. In lieu of that, we are using JOL.
// GraphLayout gives the deep size of an object, including the size of objects that are referenced from the given object.
return obj == null ? 0 : GraphLayout.parseInstance(obj).totalSize();
}
|
@Test
public void testGetObjectSize() {
EmptyClass emptyClass = new EmptyClass();
StringClass stringClass = new StringClass();
PayloadClass payloadClass = new PayloadClass();
String emptyString = "";
String string = "hello";
String[] stringArray = {emptyString, string, " world"};
String[] anotherStringArray = new String[100];
List<String> stringList = new ArrayList<>();
StringBuilder stringBuilder = new StringBuilder(100);
int maxIntPrimitive = Integer.MAX_VALUE;
int minIntPrimitive = Integer.MIN_VALUE;
Integer maxInteger = Integer.MAX_VALUE;
Integer minInteger = Integer.MIN_VALUE;
long zeroLong = 0L;
double zeroDouble = 0.0;
boolean booleanField = true;
Object object = new Object();
String name = "Alice Bob";
Person person = new Person(name);
if (getJavaVersion() == 11 || getJavaVersion() == 17) {
assertEquals(48, getObjectSize(string));
assertEquals(168, getObjectSize(stringArray));
assertEquals(144, getObjectSize(stringBuilder));
assertEquals(72, getObjectSize(DayOfWeek.TUESDAY));
assertEquals(HoodieAvroUtils.gteqAvro1_9() ? 1256 : 1176,
getObjectSize(Schema.create(Schema.Type.STRING)));
assertEquals(96, getObjectSize(person));
} else {
assertEquals(56, getObjectSize(string));
assertEquals(184, getObjectSize(stringArray));
assertEquals(240, getObjectSize(stringBuilder));
assertEquals(80, getObjectSize(DayOfWeek.TUESDAY));
// Since avro 1.9, Schema use ConcurrentHashMap instead of LinkedHashMap to
// implement props, which will change the size of the object.
assertEquals(HoodieAvroUtils.gteqAvro1_9() ? 1320 : 1240,
getObjectSize(Schema.create(Schema.Type.STRING)));
assertEquals(104, getObjectSize(person));
}
assertEquals(40, getObjectSize(emptyString));
assertEquals(416, getObjectSize(anotherStringArray));
assertEquals(40, getObjectSize(stringList));
assertEquals(16, getObjectSize(maxIntPrimitive));
assertEquals(16, getObjectSize(minIntPrimitive));
assertEquals(16, getObjectSize(maxInteger));
assertEquals(16, getObjectSize(minInteger));
assertEquals(24, getObjectSize(zeroLong));
assertEquals(24, getObjectSize(zeroDouble));
assertEquals(16, getObjectSize(booleanField));
assertEquals(16, getObjectSize(object));
assertEquals(32, getObjectSize(emptyClass));
assertEquals(40, getObjectSize(stringClass));
assertEquals(40, getObjectSize(payloadClass));
}
|
@ProcessElement
public void processElement(OutputReceiver<PartitionMetadata> receiver) {
PartitionMetadataDao partitionMetadataDao = daoFactory.getPartitionMetadataDao();
if (!partitionMetadataDao.tableExists()) {
daoFactory.getPartitionMetadataAdminDao().createPartitionMetadataTable();
createFakeParentPartition();
}
final PartitionMetadata initialPartition =
Optional.ofNullable(partitionMetadataDao.getPartition(InitialPartition.PARTITION_TOKEN))
.map(mapperFactory.partitionMetadataMapper()::from)
.orElseThrow(
() -> new IllegalStateException("Initial partition not found in metadata table."));
receiver.output(initialPartition);
}
|
@Test
public void testInitializeWithNoPartition() {
when(daoFactory.getPartitionMetadataDao()).thenReturn(partitionMetadataDao);
when(partitionMetadataDao.tableExists()).thenReturn(false);
when(daoFactory.getPartitionMetadataAdminDao()).thenReturn(partitionMetadataAdminDao);
doNothing().when(partitionMetadataAdminDao).createPartitionMetadataTable();
when(partitionMetadataDao.insert(any())).thenReturn(Timestamp.ofTimeMicroseconds(1L));
when(mapperFactory.partitionMetadataMapper()).thenReturn(partitionMetadataMapper);
when(partitionMetadataMapper.from(any())).thenReturn(mock(PartitionMetadata.class));
try {
initializeDoFn.processElement(receiver);
fail();
} catch (IllegalStateException e) {
assertEquals("Initial partition not found in metadata table.", e.getMessage());
}
}
|
public void resolveFields(SearchContext searchContext, String indexMapping) throws StarRocksConnectorException {
JSONObject jsonObject = new JSONObject(indexMapping);
// the indexName use alias takes the first mapping
Iterator<String> keys = jsonObject.keys();
String docKey = keys.next();
JSONObject docData = jsonObject.optJSONObject(docKey);
JSONObject mappings = docData.optJSONObject("mappings");
JSONObject rootSchema = mappings.optJSONObject(searchContext.type());
JSONObject properties;
// Elasticsearch 7.x, type was removed from ES mapping, default type is `_doc`
// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/removal-of-types.html
// From Elasticsearch 8.x, Specifying types in requests is no longer supported,
// The include_type_name parameter is removed
// https://www.elastic.co/guide/en/elasticsearch/reference/7.17/removal-of-types.html
if (rootSchema == null) {
// 1. before 7.0, if the `type` does not exist in index, rootSchema is null
// this can throw exception within the `properties == null` predicate
// 2. after or equal 8.x, type is removed from mappings
properties = mappings.optJSONObject("properties");
} else {
properties = rootSchema.optJSONObject("properties");
}
if (properties == null) {
throw new StarRocksConnectorException("index[" + searchContext.sourceIndex() + "] type[" + searchContext.type() +
"] mapping not found for the ES Cluster");
}
for (Column col : searchContext.columns()) {
String colName = col.getName();
// if column exists in StarRocks Table but no found in ES's mapping, we choose to ignore this situation?
if (!properties.has(colName)) {
continue;
}
JSONObject fieldObject = properties.optJSONObject(colName);
resolveKeywordFields(searchContext, fieldObject, colName);
resolveDocValuesFields(searchContext, fieldObject, colName);
}
}
|
@Test
public void testMultTextFields() throws Exception {
MappingPhase mappingPhase = new MappingPhase(null);
EsTable esTableAfter7X = fakeEsTable("fake", "test", "_doc", columns);
SearchContext searchContext = new SearchContext(esTableAfter7X);
mappingPhase
.resolveFields(searchContext, loadJsonFromFile("data/es/test_index_mapping_field_mult_analyzer.json"));
assertFalse(searchContext.docValueFieldsContext().containsKey("k3"));
}
|
public static boolean isFastStatsSame(Partition oldPart, Partition newPart) {
// requires to calculate stats if new and old have different fast stats
if ((oldPart != null) && oldPart.isSetParameters() && newPart != null && newPart.isSetParameters()) {
for (String stat : StatsSetupConst.FAST_STATS) {
if (oldPart.getParameters().containsKey(stat) && newPart.getParameters().containsKey(stat)) {
Long oldStat = Long.parseLong(oldPart.getParameters().get(stat));
String newStat = newPart.getParameters().get(stat);
if (newStat == null || !oldStat.equals(Long.parseLong(newStat))) {
return false;
}
} else {
return false;
}
}
return true;
}
return false;
}
|
@Test
public void isFastStatsSameMatchingButOnlyOneStat() {
Partition oldPartition = new Partition();
Partition newPartition = new Partition();
Map<String, String> randomParams = new HashMap<String, String>();
randomParams.put("randomParam1", "randomVal1");
newPartition.setParameters(randomParams);
assertFalse(MetaStoreServerUtils.isFastStatsSame(oldPartition, newPartition));
}
|
protected CompletableFuture<Triple<MessageExt, String, Boolean>> getMessageFromRemoteAsync(String topic, long offset, int queueId, String brokerName) {
try {
String brokerAddr = this.brokerController.getTopicRouteInfoManager().findBrokerAddressInSubscribe(brokerName, MixAll.MASTER_ID, false);
if (null == brokerAddr) {
this.brokerController.getTopicRouteInfoManager().updateTopicRouteInfoFromNameServer(topic, true, false);
brokerAddr = this.brokerController.getTopicRouteInfoManager().findBrokerAddressInSubscribe(brokerName, MixAll.MASTER_ID, false);
if (null == brokerAddr) {
LOG.warn("can't find broker address for topic {}, {}", topic, brokerName);
return CompletableFuture.completedFuture(Triple.of(null, "brokerAddress not found", true)); // maybe offline temporarily, so need retry
}
}
return this.brokerController.getBrokerOuterAPI().pullMessageFromSpecificBrokerAsync(brokerName,
brokerAddr, this.innerConsumerGroupName, topic, queueId, offset, 1, DEFAULT_PULL_TIMEOUT_MILLIS)
.thenApply(pullResult -> {
if (pullResult.getLeft() != null
&& PullStatus.FOUND.equals(pullResult.getLeft().getPullStatus())
&& CollectionUtils.isNotEmpty(pullResult.getLeft().getMsgFoundList())) {
return Triple.of(pullResult.getLeft().getMsgFoundList().get(0), "", false);
}
return Triple.of(null, pullResult.getMiddle(), pullResult.getRight());
});
} catch (Exception e) {
LOG.error("Get message from remote failed. {}, {}, {}, {}", topic, offset, queueId, brokerName, e);
}
return CompletableFuture.completedFuture(Triple.of(null, "Get message from remote failed", true)); // need retry
}
|
@Test
public void getMessageFromRemoteAsyncTest_brokerAddressNotFound() throws Exception {
when(topicRouteInfoManager.findBrokerAddressInSubscribe(anyString(), anyLong(), anyBoolean())).thenReturn(null);
Triple<MessageExt, String, Boolean> rst = escapeBridge.getMessageFromRemoteAsync(TEST_TOPIC, 1, DEFAULT_QUEUE_ID, BROKER_NAME).join();
Assert.assertNull(rst.getLeft());
Assert.assertEquals("brokerAddress not found", rst.getMiddle());
Assert.assertTrue(rst.getRight()); // need retry
}
|
@Override
public ConnectionProperties parse(final String url, final String username, final String catalog) {
List<Matcher> matchers = Arrays.asList(THIN_URL_PATTERN.matcher(url), CONNECT_DESCRIPTOR_URL_PATTERN.matcher(url));
Matcher matcher = matchers.stream().filter(Matcher::find).findAny().orElseThrow(() -> new UnrecognizedDatabaseURLException(url, THIN_URL_PATTERN.pattern()));
int groupCount = matcher.groupCount();
return THIN_MATCH_GROUP_COUNT == groupCount ? getThinConnectionProperties(username, matcher) : getStandardConnectionProperties(username, matcher);
}
|
@Test
void assertNewConstructorFailure() {
assertThrows(UnrecognizedDatabaseURLException.class, () -> parser.parse("jdbc:oracle:xxxxxxxx", "test", null));
}
|
public static void getTables( DatabaseMeta databaseMeta, String schema, Consumer<String[]> tablesConsumer ) {
executeAction( databaseMeta, database -> {
try {
tablesConsumer.accept( database.getTablenames( schema, false ) );
} catch ( KettleDatabaseException | NullPointerException e ) {
logError( databaseMeta, e );
tablesConsumer.accept( new String[ 0 ] );
}
} );
}
|
@Test
public void getTables() throws InterruptedException, ExecutionException, TimeoutException {
AsyncDatabaseAction.getTables( dbMeta, "PUBLIC", completion::complete );
String[] tables = completion.get( COMPLETION_TIMEOUT, TimeUnit.MILLISECONDS );
assertThat( tables.length, equalTo( 3 ) );
assertThat( sorted( tables ), equalTo( new String[] { "BAR", "BAZ", "FOO" } ) );
}
|
@Override
public boolean hasParam(String key) {
return source.getParameterMap().containsKey(key);
}
|
@Test
public void has_param_from_source() {
when(source.getParameterMap()).thenReturn(Map.of("param", new String[] {"value"}));
ServletRequest request = new ServletRequest(new JavaxHttpRequest(source));
assertThat(request.hasParam("param")).isTrue();
}
|
public static CharSequence unescapeCsv(CharSequence value) {
int length = checkNotNull(value, "value").length();
if (length == 0) {
return value;
}
int last = length - 1;
boolean quoted = isDoubleQuote(value.charAt(0)) && isDoubleQuote(value.charAt(last)) && length != 1;
if (!quoted) {
validateCsvFormat(value);
return value;
}
StringBuilder unescaped = InternalThreadLocalMap.get().stringBuilder();
for (int i = 1; i < last; i++) {
char current = value.charAt(i);
if (current == DOUBLE_QUOTE) {
if (isDoubleQuote(value.charAt(i + 1)) && (i + 1) != last) {
// Followed by a double-quote but not the last character
// Just skip the next double-quote
i++;
} else {
// Not followed by a double-quote or the following double-quote is the last character
throw newInvalidEscapedCsvFieldException(value, i);
}
}
unescaped.append(current);
}
return unescaped.toString();
}
|
@Test
public void unescapeCsvWithOddQuote() {
assertThrows(IllegalArgumentException.class, new Executable() {
@Override
public void execute() {
unescapeCsv("\"\"\"");
}
});
}
|
static List<String> parse(String cmdline) {
List<String> matchList = new ArrayList<>();
Matcher shellwordsMatcher = SHELLWORDS_PATTERN.matcher(cmdline);
while (shellwordsMatcher.find()) {
if (shellwordsMatcher.group(1) != null) {
matchList.add(shellwordsMatcher.group(1));
} else {
String shellword = shellwordsMatcher.group();
if (shellword.startsWith("\"")
&& shellword.endsWith("\"")
&& shellword.length() > 2) {
shellword = shellword.substring(1, shellword.length() - 1);
}
matchList.add(shellword);
}
}
return matchList;
}
|
@Test
void parses_double_quoted_strings() {
assertThat(ShellWords.parse("--name \"The Fox\""), is(equalTo(asList("--name", "The Fox"))));
}
|
@Override
public void processElement(StreamRecord<IN> element) throws Exception {
IN value = element.getValue();
IN currentValue = values.value();
if (currentValue == null) {
// register a timer for emitting the result at the end when this is the
// first input for this key
timerService.registerEventTimeTimer(VoidNamespace.INSTANCE, Long.MAX_VALUE);
} else {
// otherwise, reduce things
value = userFunction.reduce(currentValue, value);
}
values.update(value);
}
|
@Test
void noIncrementalResults() throws Exception {
KeyedOneInputStreamOperatorTestHarness<String, String, String> testHarness =
createTestHarness();
testHarness.processElement(new StreamRecord<>("hello"));
testHarness.processElement(new StreamRecord<>("hello"));
testHarness.processElement(new StreamRecord<>("ciao"));
testHarness.processElement(new StreamRecord<>("ciao"));
assertThat(testHarness.getOutput()).isEmpty();
}
|
public void in(MetaInAlarm meta, Metrics metrics) {
if (!includeMetrics.contains(meta.getMetricsName())) {
//Don't match rule, exit.
if (log.isTraceEnabled()) {
log.trace("Metric name not in the expression, {}-{}", expression, meta.getMetricsName());
}
return;
}
final String metaName = meta.getName();
if (!validate(metaName, includeNames, excludeNames, includeNamesRegex, excludeNamesRegex)) {
return;
}
AlarmEntity entity = new AlarmEntity(
meta.getScope(), meta.getScopeId(), meta.getName(), meta.getId0(), meta.getId1());
Window window = windows.computeIfAbsent(entity, ignored -> new Window(this.period, this.additionalPeriod));
window.add(meta.getMetricsName(), metrics);
}
|
@Test
public void testInitAndStart() throws IllegalExpressionException {
AlarmRule alarmRule = new AlarmRule();
alarmRule.setAlarmRuleName("mix_rule");
alarmRule.setExpression("sum((increase(endpoint_cpm,5) + increase(endpoint_percent,2)) > 0) >= 1");
alarmRule.getIncludeMetrics().add("endpoint_percent");
alarmRule.getIncludeMetrics().add("endpoint_cpm");
alarmRule.setPeriod(10);
alarmRule.setTags(new HashMap<String, String>() {{
put("key", "value");
}});
RunningRule runningRule = new RunningRule(alarmRule);
DateTime startTime = DateTime.now();
long timeInPeriod1 = TimeBucket.getMinuteTimeBucket(startTime.getMillis());
DateTime targetTime = new DateTime(TimeBucket.getTimestamp(timeInPeriod1));
runningRule.in(getMetaInAlarm(123), getMetrics(timeInPeriod1, 70));
Map<AlarmEntity, RunningRule.Window> windows = Whitebox.getInternalState(runningRule, "windows");
RunningRule.Window window = windows.get(getAlarmEntity(123));
LocalDateTime endTime = Whitebox.getInternalState(window, "endTime");
int additionalPeriod = Whitebox.getInternalState(window, "additionalPeriod");
LinkedList<Metrics> metricsBuffer = Whitebox.getInternalState(window, "values");
Assertions.assertTrue(targetTime.equals(endTime.toDateTime()));
Assertions.assertEquals(5, additionalPeriod);
Assertions.assertEquals(15, metricsBuffer.size());
}
|
public static void getSemanticPropsSingleFromString(
SingleInputSemanticProperties result,
String[] forwarded,
String[] nonForwarded,
String[] readSet,
TypeInformation<?> inType,
TypeInformation<?> outType) {
getSemanticPropsSingleFromString(
result, forwarded, nonForwarded, readSet, inType, outType, false);
}
|
@Test
void testNonForwardedSpaces() {
String[] nonForwardedFields = {" f1 ; f2"};
SingleInputSemanticProperties sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, null, nonForwardedFields, null, threeIntTupleType, threeIntTupleType);
assertThat(sp.getForwardingTargetFields(0, 0)).contains(0);
assertThat(sp.getForwardingTargetFields(0, 1)).isEmpty();
assertThat(sp.getForwardingTargetFields(0, 2)).isEmpty();
}
|
public static Map<String, StepTransition> computeDag(
Workflow workflow, List<String> startStepIds, List<String> endStepIds) {
Map<String, Step> stepMap =
workflow.getSteps().stream()
.collect(
Collectors.toMap(
Step::getId,
Function.identity(),
(step1, step2) -> {
throw new IllegalArgumentException(
String.format(
"Invalid definition of workflow [%s], where two steps have the same id [%s]",
workflow.getId(), step1.getId()));
}));
if (startStepIds != null) {
Map<String, Step> visited = new HashMap<>();
Queue<Step> queue = new ArrayDeque<>();
for (String stepId : startStepIds) {
Step step =
Checks.notNull(
stepMap.get(stepId),
"Cannot start the graph from step id [%s] as workflow does not contain it.",
stepId);
step.getTransition().getPredecessors().clear();
visited.put(step.getId(), step);
queue.add(step);
}
if (!ObjectHelper.isCollectionEmptyOrNull(endStepIds)) {
for (String stepId : endStepIds) {
Step step =
Checks.notNull(
stepMap.get(stepId),
"Cannot end the graph with step id [%s] as workflow does not contain it.",
stepId);
step.getTransition().getSuccessors().clear();
visited.put(step.getId(), step);
}
}
while (!queue.isEmpty()) {
Step step = queue.remove();
for (String successor : step.getTransition().getSuccessors().keySet()) {
if (!visited.containsKey(successor)) {
Step toAdd = stepMap.get(successor);
queue.add(toAdd);
visited.put(toAdd.getId(), toAdd);
}
}
}
stepMap = visited;
}
Map<String, GraphNode> nodeMap = computeNodeMap(workflow.getId(), stepMap);
for (GraphNode node : nodeMap.values()) {
// add predecessors if empty
if (stepMap.get(node.stepId).getTransition().getPredecessors().isEmpty()) {
stepMap.get(node.stepId).getTransition().getPredecessors().addAll(node.parents.keySet());
}
}
Checks.checkTrue(
!containsCycleInDag(nodeMap),
"Invalid workflow definition [%s], where DAG contains cycle",
workflow.getId());
return stepMap.values().stream().collect(MapHelper.toListMap(Step::getId, Step::getTransition));
}
|
@Test
public void testComputeDAGPathWithCycle() throws Exception {
WorkflowCreateRequest request =
loadObject(
"fixtures/workflows/request/sample-dag-with-cycle-wf.json",
WorkflowCreateRequest.class);
AssertHelper.assertThrows(
"Invalid workflow definition [sample-dag-test-1-wf], where DAG contains cycle",
IllegalArgumentException.class,
"Invalid workflow definition [sample-dag-test-1-wf], where DAG contains cycle",
() -> WorkflowGraph.computeDag(request.getWorkflow(), null, null));
}
|
public ProviderBuilder buffer(Integer buffer) {
this.buffer = buffer;
return getThis();
}
|
@Test
void buffer() {
ProviderBuilder builder = ProviderBuilder.newBuilder();
builder.buffer(1024);
Assertions.assertEquals(1024, builder.build().getBuffer());
}
|
public ShardingSphereDatabase getDatabase(final String name) {
ShardingSpherePreconditions.checkNotEmpty(name, NoDatabaseSelectedException::new);
ShardingSphereMetaData metaData = getMetaDataContexts().getMetaData();
ShardingSpherePreconditions.checkState(metaData.containsDatabase(name), () -> new UnknownDatabaseException(name));
return metaData.getDatabase(name);
}
|
@Test
void assertGetDatabase() {
assertNotNull(contextManager.getDatabase("foo_db"));
}
|
@ExecuteOn(TaskExecutors.IO)
@Get(uri = "/{executionId}")
@Operation(tags = {"Executions"}, summary = "Get an execution")
public Execution get(
@Parameter(description = "The execution id") @PathVariable String executionId
) {
return executionRepository
.findById(tenantService.resolveTenant(), executionId)
.orElse(null);
}
|
@SuppressWarnings("unchecked")
@Test
void getFlowFromNamespace() {
List<FlowForExecution> result = client.toBlocking().retrieve(
GET("/api/v1/executions/namespaces/io.kestra.tests/flows"),
Argument.of(List.class, FlowForExecution.class)
);
assertThat(result.size(), greaterThan(100));
}
|
public boolean isProcessing() {
return isProcessing.get();
}
|
@Test
public void testSetLifecycleUninitialized() throws Exception {
assertFalse(status.isProcessing());
verify(eventBus, never()).post(Lifecycle.UNINITIALIZED);
}
|
public Optional<Projection> createProjection(final ProjectionSegment projectionSegment) {
if (projectionSegment instanceof ShorthandProjectionSegment) {
return Optional.of(createProjection((ShorthandProjectionSegment) projectionSegment));
}
if (projectionSegment instanceof ColumnProjectionSegment) {
return Optional.of(createProjection((ColumnProjectionSegment) projectionSegment));
}
if (projectionSegment instanceof ExpressionProjectionSegment) {
return Optional.of(createProjection((ExpressionProjectionSegment) projectionSegment));
}
if (projectionSegment instanceof AggregationDistinctProjectionSegment) {
return Optional.of(createProjection((AggregationDistinctProjectionSegment) projectionSegment));
}
if (projectionSegment instanceof AggregationProjectionSegment) {
return Optional.of(createProjection((AggregationProjectionSegment) projectionSegment));
}
if (projectionSegment instanceof SubqueryProjectionSegment) {
return Optional.of(createProjection((SubqueryProjectionSegment) projectionSegment));
}
if (projectionSegment instanceof ParameterMarkerExpressionSegment) {
return Optional.of(createProjection((ParameterMarkerExpressionSegment) projectionSegment));
}
return Optional.empty();
}
|
@Test
void assertCreateProjectionWhenProjectionSegmentNotMatched() {
assertFalse(new ProjectionEngine(databaseType).createProjection(null).isPresent());
}
|
public static FieldScope fromSetFields(Message message) {
return fromSetFields(
message, AnyUtils.defaultTypeRegistry(), AnyUtils.defaultExtensionRegistry());
}
|
@Test
public void testFromSetFields_unknownFields() throws InvalidProtocolBufferException {
// Make sure that merging of repeated fields, separation by tag number, and separation by
// unknown field type all work.
Message scopeMessage =
fromUnknownFields(
UnknownFieldSet.newBuilder()
.addField(333, Field.newBuilder().addFixed32(1).addFixed64(1).build())
.addField(
444,
Field.newBuilder()
.addVarint(1)
.addLengthDelimited(ByteString.copyFrom("1", UTF_8))
.addGroup(
UnknownFieldSet.newBuilder()
.addField(1, Field.newBuilder().addFixed32(1).build())
.build())
.addGroup(
UnknownFieldSet.newBuilder()
.addField(2, Field.newBuilder().addFixed64(1).build())
.build())
.build())
.build());
// 1 = compared, [2, 3] = ignored, 4 = compared and fails
Message message =
fromUnknownFields(
UnknownFieldSet.newBuilder()
.addField(222, Field.newBuilder().addFixed32(2).addFixed64(2).build())
.addField(
333,
Field.newBuilder()
.addFixed32(1)
.addFixed64(1)
.addVarint(2)
.addLengthDelimited(ByteString.copyFrom("2", UTF_8))
.addGroup(
UnknownFieldSet.newBuilder()
.addField(1, Field.newBuilder().addFixed32(2).build())
.build())
.build())
.addField(
444,
Field.newBuilder()
.addFixed32(2)
.addFixed64(2)
.addVarint(1)
.addLengthDelimited(ByteString.copyFrom("1", UTF_8))
.addGroup(
UnknownFieldSet.newBuilder()
.addField(1, Field.newBuilder().addFixed32(1).addFixed64(2).build())
.addField(2, Field.newBuilder().addFixed32(2).addFixed64(1).build())
.addField(3, Field.newBuilder().addFixed32(2).build())
.build())
.build())
.build());
Message diffMessage =
fromUnknownFields(
UnknownFieldSet.newBuilder()
.addField(222, Field.newBuilder().addFixed32(3).addFixed64(3).build())
.addField(
333,
Field.newBuilder()
.addFixed32(4)
.addFixed64(4)
.addVarint(3)
.addLengthDelimited(ByteString.copyFrom("3", UTF_8))
.addGroup(
UnknownFieldSet.newBuilder()
.addField(1, Field.newBuilder().addFixed32(3).build())
.build())
.build())
.addField(
444,
Field.newBuilder()
.addFixed32(3)
.addFixed64(3)
.addVarint(4)
.addLengthDelimited(ByteString.copyFrom("4", UTF_8))
.addGroup(
UnknownFieldSet.newBuilder()
.addField(1, Field.newBuilder().addFixed32(4).addFixed64(3).build())
.addField(2, Field.newBuilder().addFixed32(3).addFixed64(4).build())
.addField(3, Field.newBuilder().addFixed32(3).build())
.build())
.build())
.build());
Message eqMessage =
fromUnknownFields(
UnknownFieldSet.newBuilder()
.addField(222, Field.newBuilder().addFixed32(3).addFixed64(3).build())
.addField(
333,
Field.newBuilder()
.addFixed32(1)
.addFixed64(1)
.addVarint(3)
.addLengthDelimited(ByteString.copyFrom("3", UTF_8))
.addGroup(
UnknownFieldSet.newBuilder()
.addField(1, Field.newBuilder().addFixed32(3).build())
.build())
.build())
.addField(
444,
Field.newBuilder()
.addFixed32(3)
.addFixed64(3)
.addVarint(1)
.addLengthDelimited(ByteString.copyFrom("1", UTF_8))
.addGroup(
UnknownFieldSet.newBuilder()
.addField(1, Field.newBuilder().addFixed32(1).addFixed64(3).build())
.addField(2, Field.newBuilder().addFixed32(3).addFixed64(1).build())
.addField(3, Field.newBuilder().addFixed32(3).build())
.build())
.build())
.build());
expectThat(diffMessage).isNotEqualTo(message);
expectThat(eqMessage).isNotEqualTo(message);
expectThat(diffMessage)
.withPartialScope(FieldScopes.fromSetFields(scopeMessage))
.isNotEqualTo(message);
expectThat(eqMessage)
.withPartialScope(FieldScopes.fromSetFields(scopeMessage))
.isEqualTo(message);
expectFailureWhenTesting().that(diffMessage).isEqualTo(message);
expectIsEqualToFailed();
expectThatFailure().hasMessageThat().contains("1 -> 4");
expectThatFailure().hasMessageThat().contains("\"1\" -> \"4\"");
expectThatFailure().hasMessageThat().contains("2 -> 3");
expectThatFailure().hasMessageThat().contains("\"2\" -> \"3\"");
expectFailureWhenTesting()
.that(diffMessage)
.withPartialScope(FieldScopes.fromSetFields(scopeMessage))
.isEqualTo(message);
expectIsEqualToFailed();
expectThatFailure().hasMessageThat().contains("1 -> 4");
expectThatFailure().hasMessageThat().contains("\"1\" -> \"4\"");
expectThatFailure().hasMessageThat().doesNotContain("2 -> 3");
expectThatFailure().hasMessageThat().doesNotContain("\"2\" -> \"3\"");
expectFailureWhenTesting()
.that(eqMessage)
.withPartialScope(FieldScopes.fromSetFields(scopeMessage))
.isNotEqualTo(message);
expectIsNotEqualToFailed();
expectThatFailure().hasMessageThat().doesNotContain("2 -> 3");
expectThatFailure().hasMessageThat().doesNotContain("\"2\" -> \"3\"");
}
|
@Override
@Nullable
public Object convert(@Nullable String value) {
if (isNullOrEmpty(value)) {
return null;
}
LOG.debug("Trying to parse date <{}> with pattern <{}>, locale <{}>, and timezone <{}>.", value, dateFormat, locale, timeZone);
final DateTimeFormatter formatter;
if (containsTimeZone) {
formatter = DateTimeFormat
.forPattern(dateFormat)
.withDefaultYear(YearMonth.now(timeZone).getYear())
.withLocale(locale);
} else {
formatter = DateTimeFormat
.forPattern(dateFormat)
.withDefaultYear(YearMonth.now(timeZone).getYear())
.withLocale(locale)
.withZone(timeZone);
}
return DateTime.parse(value, formatter);
}
|
@Test
public void convertUsesEtcUTCIfTimeZoneSettingIsEmpty() throws Exception {
final Converter c = new DateConverter(config("YYYY-MM-dd HH:mm:ss", "", null));
final DateTime dateTime = (DateTime) c.convert("2014-03-12 10:00:00");
assertThat(dateTime).isEqualTo("2014-03-12T10:00:00.000Z");
}
|
@Override
@GuardedBy("getLock().writeLock()")
public void commitFile(String fileId, String newFileId) throws PageNotFoundException {
Set<PageInfo> pages = mPages.getByField(INDEX_FILE_ID, fileId);
if (pages.size() == 0) {
throw new PageNotFoundException(
String.format("No Pages found for file %s when committing", fileId));
}
for (PageInfo oldPageInfo : pages) {
PageId newPageId = new PageId(newFileId, oldPageInfo.getPageId().getPageIndex());
PageInfo newPageInfo = new PageInfo(newPageId, oldPageInfo.getPageSize(),
oldPageInfo.getScope(), oldPageInfo.getLocalCacheDir());
mPages.remove(oldPageInfo);
mPages.add(newPageInfo);
}
}
|
@Test
public void commitFile() throws PageNotFoundException {
String newTempFile = "newTempFile";
long pageIndex = 2L;
PageId newTempPage = new PageId(newTempFile, pageIndex);
mMetaStore.addPage(mPage, mPageInfo);
mMetaStore.commitFile(mPage.getFileId(), newTempFile);
assertEquals(mPageStoreDir, mMetaStore.getPageInfo(newTempPage).getLocalCacheDir());
assertEquals(newTempFile, mMetaStore.getPageInfo(newTempPage).getPageId().getFileId());
assertEquals(pageIndex, mMetaStore.getPageInfo(newTempPage).getPageId().getPageIndex());
}
|
public static DateTime endOfQuarter(Date date) {
return new DateTime(endOfQuarter(calendar(date)));
}
|
@Test
public void endOfQuarterTest() {
final Date date = DateUtil.endOfQuarter(
DateUtil.parse("2020-05-31 00:00:00"));
assertEquals("2020-06-30 23:59:59", DateUtil.format(date, "yyyy-MM-dd HH:mm:ss"));
}
|
public void run() {
runner = newJsonRunnerWithSetting(
globalSettings.stream()
.filter(byEnv(this.env))
.map(toRunnerSetting())
.collect(toList()), startArgs);
runner.run();
}
|
@Test
public void should_run_with_setting_with_context() throws IOException {
stream = getResourceAsStream("settings/context-settings.json");
runner = new SettingRunner(stream, createStartArgs(12306));
runner.run();
assertThat(helper.get(remoteUrl("/foo/foo")), is("foo"));
assertThat(helper.get(remoteUrl("/bar/bar")), is("bar"));
}
|
@GetMapping(params = "show=all")
public Namespace getNamespace(@RequestParam("namespaceId") String namespaceId) throws NacosException {
return namespaceOperationService.getNamespace(namespaceId);
}
|
@Test
void testGetNamespaceByNamespaceId() throws Exception {
Namespace namespace = new Namespace("", "public", "", 0, 0, 0);
when(namespaceOperationService.getNamespace("")).thenReturn(namespace);
assertEquals(namespace, namespaceController.getNamespace(""));
}
|
public static <T> T getOrDefault(Object obj, int index, T defaultValue) {
try {
return (T) get(obj, index);
} catch (IndexOutOfBoundsException e) {
return defaultValue;
}
}
|
@Test
void testGetOrDefault() {
assertEquals("default", CollectionUtils.getOrDefault(Collections.emptyList(), 1, "default"));
assertEquals("element", CollectionUtils.getOrDefault(Collections.singletonList("element"), 0, "default"));
}
|
static Optional<ExecutorService> lookupExecutorServiceRef(
CamelContext camelContext, String name, Object source, String executorServiceRef) {
ExecutorServiceManager manager = camelContext.getExecutorServiceManager();
ObjectHelper.notNull(manager, ESM_NAME);
ObjectHelper.notNull(executorServiceRef, "executorServiceRef");
// lookup in registry first and use existing thread pool if exists,
// or create a new thread pool, assuming that the executor service ref is a thread pool ID
return lookupByNameAndType(camelContext, executorServiceRef, ExecutorService.class)
.or(() -> Optional.ofNullable(manager.newThreadPool(source, name, executorServiceRef)));
}
|
@Test
void testLookupExecutorServiceRef() {
String name = "ThreadPool";
Object source = new Object();
String executorServiceRef = "ThreadPoolRef";
when(camelContext.getRegistry()).thenReturn(mockRegistry);
when(camelContext.getExecutorServiceManager()).thenReturn(manager);
when(mockRegistry.lookupByNameAndType(executorServiceRef, ExecutorService.class)).thenReturn(existingThreadPool);
Optional<ExecutorService> executorService
= DynamicRouterRecipientListHelper.lookupExecutorServiceRef(camelContext, name, source, executorServiceRef);
Assertions.assertTrue(executorService.isPresent());
}
|
@Override
public Data getKeyData() {
return keyData;
}
|
@Override
@Test
public void getKeyData_caching() {
QueryableEntry entry = createEntry("key", "value");
assertSame(entry.getKeyData(), entry.getKeyData());
}
|
@SuppressWarnings("WeakerAccess")
public Map<String, Object> getProducerConfigs(final String clientId) {
final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames());
checkIfUnexpectedUserSpecifiedConsumerConfig(clientProvidedProps, NON_CONFIGURABLE_PRODUCER_EOS_CONFIGS);
// generate producer configs from original properties and overridden maps
final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES);
props.putAll(getClientCustomProps());
props.putAll(clientProvidedProps);
// When using EOS alpha, stream should auto-downgrade the transactional commit protocol to be compatible with older brokers.
if (StreamsConfigUtils.processingMode(this) == StreamsConfigUtils.ProcessingMode.EXACTLY_ONCE_ALPHA) {
props.put("internal.auto.downgrade.txn.commit", true);
}
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG));
// add client id with stream client id prefix
props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId);
return props;
}
|
@Test
public void shouldNotSetInternalAutoDowngradeTxnCommitToTrueInProducerForEosDisabled() {
final Map<String, Object> producerConfigs = streamsConfig.getProducerConfigs(clientId);
assertThat(producerConfigs.get("internal.auto.downgrade.txn.commit"), is(nullValue()));
}
|
public static String highLevelDecode(boolean[] correctedBits) throws FormatException {
return getEncodedData(correctedBits);
}
|
@Test
public void testHighLevelDecode() throws FormatException {
// no ECI codes
testHighLevelDecodeString("A. b.",
// 'A' P/S '. ' L/L b D/L '.'
"...X. ..... ...XX XXX.. ...XX XXXX. XX.X");
// initial ECI code 26 (switch to UTF-8)
testHighLevelDecodeString("Ça",
// P/S FLG(n) 2 '2' '6' B/S 2 0xc3 0x87 L/L 'a'
"..... ..... .X. .X.. X... XXXXX ...X. XX....XX X....XXX XXX.. ...X.");
// initial character without ECI (must be interpreted as ISO_8859_1)
// followed by ECI code 26 (= UTF-8) and UTF-8 text
testHighLevelDecodeString("±Ça",
// B/S 1 0xb1 P/S FLG(n) 2 '2' '6' B/S 2 0xc3 0x87 L/L 'a'
"XXXXX ....X X.XX...X ..... ..... .X. .X.. X... XXXXX ...X. XX....XX X....XXX XXX.. ...X.");
// GS1 data
testHighLevelDecodeString("101233742",
// P/S FLG(n) 0 D/L 1 0 1 2 3 P/S FLG(n) 0 3 7 4 2
"..... ..... ... XXXX. ..XX ..X. ..XX .X.. .X.X .... ..... ... .X.X X..X .XX. .X..");
}
|
@Override
public void deletePrefix(String prefix) {
internalDeleteFiles(
Streams.stream(listPrefix(prefix))
.map(fileInfo -> BlobId.fromGsUtilUri(fileInfo.location())));
}
|
@Test
public void testDeletePrefix() {
String prefix = "del/path/";
String path1 = prefix + "data1.dat";
storage.create(BlobInfo.newBuilder(TEST_BUCKET, path1).build());
String path2 = prefix + "data2.dat";
storage.create(BlobInfo.newBuilder(TEST_BUCKET, path2).build());
String path3 = "del/skip/data3.dat";
storage.create(BlobInfo.newBuilder(TEST_BUCKET, path3).build());
assertThat(StreamSupport.stream(io.listPrefix(gsUri("del/")).spliterator(), false).count())
.isEqualTo(3);
io.deletePrefix(gsUri(prefix));
assertThat(StreamSupport.stream(io.listPrefix(gsUri("del/")).spliterator(), false).count())
.isEqualTo(1);
}
|
@Override
public void createNetwork(K8sNetwork network) {
checkNotNull(network, ERR_NULL_NETWORK);
checkArgument(!Strings.isNullOrEmpty(network.networkId()), ERR_NULL_NETWORK_ID);
k8sNetworkStore.createNetwork(network);
log.info(String.format(MSG_NETWORK, network.name(), MSG_CREATED));
}
|
@Test(expected = NullPointerException.class)
public void testCreateNullNetwork() {
target.createNetwork(null);
}
|
@NotNull @Override
public Optional<Version> parse(
@Nullable String str, @NotNull DetectionLocation detectionLocation) {
if (str == null) {
return Optional.empty();
}
Pattern pattern = Pattern.compile("tlsv(\\d+(\\.\\d+)?)");
Matcher matcher = pattern.matcher(str.toLowerCase());
if (matcher.find()) {
String number = matcher.group(1);
if (number.equals("1")) {
number = "1.0";
}
return Optional.of(new Version(number, detectionLocation));
}
return Optional.empty();
}
|
@Test
public void test2() {
DetectionLocation testDetectionLocation =
new DetectionLocation("testfile", 1, 1, List.of("test"), () -> "SSL");
final SSLVersionMapper mapper = new SSLVersionMapper();
final Optional<? extends INode> version = mapper.parse("TLSv1", testDetectionLocation);
assertThat(version).isPresent();
assertThat(version.get().is(Version.class)).isTrue();
assertThat(version.get().asString()).isEqualTo("1.0");
}
|
public ContentInfo verify(ContentInfo signedMessage, Date date) {
final SignedData signedData = SignedData.getInstance(signedMessage.getContent());
final X509Certificate cert = certificate(signedData);
certificateVerifier.verify(cert, date);
final X500Name name = X500Name.getInstance(cert.getIssuerX500Principal().getEncoded());
try {
final CMSSignedData cms = new CMSSignedData(signedMessage);
cms.verifySignatures(signerId -> {
if (!name.equals(signerId.getIssuer())) {
throw new VerificationException("Issuer does not match certificate");
}
if (!cert.getSerialNumber().equals(signerId.getSerialNumber())) {
throw new VerificationException("Serial number does not match certificate");
}
return new JcaSignerInfoVerifierBuilder(digestProvider).setProvider(bcProvider).build(cert);
});
} catch (CMSException e) {
throw new VerificationException("Could not verify CMS", e);
}
return signedData.getEncapContentInfo();
}
|
@Test
public void verifyValidDl1Cms() throws Exception {
final ContentInfo signedMessage = ContentInfo.getInstance(fixture("dl1"));
final ContentInfo message = new CmsVerifier(new CertificateVerifier.None()).verify(signedMessage);
assertEquals(LdsSecurityObject.OID, message.getContentType().getId());
assertEquals("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", Hex.toHexString(
DigestUtils.digest("SHA1").digest(((ASN1OctetString) message.getContent()).getOctets())
));
}
|
public void removeModelBuildListener(OnModelBuildFinishedListener listener) {
adapter.removeModelBuildListener(listener);
}
|
@Test
public void testRemoveModelBuildListener() {
OnModelBuildFinishedListener observer = mock(OnModelBuildFinishedListener.class);
EpoxyController controller = new EpoxyController() {
@Override
protected void buildModels() {
new TestModel()
.addTo(this);
}
};
controller.addModelBuildListener(observer);
controller.removeModelBuildListener(observer);
controller.requestModelBuild();
verify(observer, never()).onModelBuildFinished(any(DiffResult.class));
}
|
public PluginDescriptor getDescriptor() {
Iterator<PluginInfo> iterator = this.iterator();
if (!iterator.hasNext()) {
throw new RuntimeException("Cannot get descriptor. Could not find any plugin information.");
}
return iterator.next().getDescriptor();
}
|
@Test
public void shouldFailWhenThereIsNoPluginInfoToGetTheDescriptorFrom() {
CombinedPluginInfo pluginInfo = new CombinedPluginInfo();
try {
pluginInfo.getDescriptor();
fail("Should have failed since there are no plugins found.");
} catch (RuntimeException e) {
assertThat(e.getMessage(), containsString("Cannot get descriptor"));
}
}
|
public static ShowResultSet execute(ShowStmt statement, ConnectContext context) {
return GlobalStateMgr.getCurrentState().getShowExecutor().showExecutorVisitor.visit(statement, context);
}
|
@Ignore
@Test
public void testDescribe() throws DdlException {
ctx.setGlobalStateMgr(globalStateMgr);
ctx.setQualifiedUser("testUser");
DescribeStmt stmt = (DescribeStmt) com.starrocks.sql.parser.SqlParser.parse("desc testTbl",
ctx.getSessionVariable().getSqlMode()).get(0);
com.starrocks.sql.analyzer.Analyzer.analyze(stmt, ctx);
ShowResultSet resultSet;
try {
resultSet = ShowExecutor.execute(stmt, ctx);
Assert.assertFalse(resultSet.next());
} catch (SemanticException e) {
e.printStackTrace();
Assert.fail();
}
}
|
@Override
public Flux<ProductReview> findProductReviewsByProduct(int productId) {
return this.productReviewRepository.findAllByProductId(productId);
}
|
@Test
void findProductReviewsByProduct_ReturnsProductReviews() {
// given
doReturn(Flux.fromIterable(List.of(
new ProductReview(UUID.fromString("bd7779c2-cb05-11ee-b5f3-df46a1249898"), 1, 1,
"Отзыв №1", "user-1"),
new ProductReview(UUID.fromString("be424abc-cb05-11ee-ab16-2b747e61f570"), 1, 3,
"Отзыв №2", "user-2"),
new ProductReview(UUID.fromString("be77f95a-cb05-11ee-91a3-1bdc94fa9de4"), 1, 5,
"Отзыв №3", "user-3")
))).when(this.productReviewRepository).findAllByProductId(1);
// when
StepVerifier.create(this.service.findProductReviewsByProduct(1))
// then
.expectNext(
new ProductReview(UUID.fromString("bd7779c2-cb05-11ee-b5f3-df46a1249898"), 1, 1,
"Отзыв №1", "user-1"),
new ProductReview(UUID.fromString("be424abc-cb05-11ee-ab16-2b747e61f570"), 1, 3,
"Отзыв №2", "user-2"),
new ProductReview(UUID.fromString("be77f95a-cb05-11ee-91a3-1bdc94fa9de4"), 1, 5,
"Отзыв №3", "user-3")
)
.verifyComplete();
}
|
public static <K, V> V getOrDefault(final Map<K, V> map, final K key, final Function<K, V> supplier)
{
V value = map.get(key);
if (value == null)
{
value = supplier.apply(key);
map.put(key, value);
}
return value;
}
|
@Test
void getOrDefaultDoesNotCreateNewValueWhenOneExists()
{
final Map<Integer, Integer> values = new HashMap<>();
values.put(0, 0);
final Integer result = CollectionUtil.getOrDefault(
values,
0,
(x) ->
{
fail("Shouldn't be called");
return x + 1;
});
assertThat(result, is(0));
}
|
ApolloNotificationMessages transformMessages(String messagesAsString) {
ApolloNotificationMessages notificationMessages = null;
if (!Strings.isNullOrEmpty(messagesAsString)) {
try {
notificationMessages = gson.fromJson(messagesAsString, ApolloNotificationMessages.class);
} catch (Throwable ex) {
Tracer.logError(ex);
}
}
return notificationMessages;
}
|
@Test
public void testTransformInvalidMessages() throws Exception {
String someInvalidMessages = "someInvalidMessages";
assertNull(configController.transformMessages(someInvalidMessages));
}
|
@Override
public HttpResponseOutputStream<FileEntity> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final String uploadUri;
FileUploadPartEntity uploadPartEntity = null;
if(StringUtils.isBlank(status.getUrl())) {
uploadPartEntity = new BrickUploadFeature(session, this).startUpload(file);
uploadUri = uploadPartEntity.getUploadUri();
}
else {
uploadUri = status.getUrl();
}
final HttpResponseOutputStream<FileEntity> stream = this.write(file, status, new DelayedHttpEntityCallable<FileEntity>(file) {
@Override
public FileEntity call(final HttpEntity entity) throws BackgroundException {
try {
final HttpPut request = new HttpPut(uploadUri);
request.setEntity(entity);
request.setHeader(HttpHeaders.CONTENT_TYPE, MimeTypeService.DEFAULT_CONTENT_TYPE);
final HttpResponse response = session.getClient().execute(request);
// Validate response
try {
switch(response.getStatusLine().getStatusCode()) {
case HttpStatus.SC_OK:
if(log.isInfoEnabled()) {
log.info(String.format("Received response %s for part number %d", response, status.getPart()));
}
// Upload complete
if(response.containsHeader("ETag")) {
if(file.getType().contains(Path.Type.encrypted)) {
log.warn(String.format("Skip checksum verification for %s with client side encryption enabled", file));
}
else {
if(HashAlgorithm.md5.equals(status.getChecksum().algorithm)) {
final Checksum etag = Checksum.parse(StringUtils.remove(response.getFirstHeader("ETag").getValue(), '"'));
if(!status.getChecksum().equals(etag)) {
throw new ChecksumException(MessageFormat.format(LocaleFactory.localizedString("Upload {0} failed", "Error"), file.getName()),
MessageFormat.format("Mismatch between {0} hash {1} of uploaded data and ETag {2} returned by the server",
etag.algorithm.toString(), status.getChecksum().hash, etag.hash));
}
}
}
}
else {
if(log.isDebugEnabled()) {
log.debug("No ETag header in response available");
}
}
return null;
default:
EntityUtils.updateEntity(response, new BufferedHttpEntity(response.getEntity()));
throw new DefaultHttpResponseExceptionMappingService().map("Upload {0} failed",
new HttpResponseException(response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()), file);
}
}
finally {
EntityUtils.consume(response.getEntity());
}
}
catch(HttpResponseException e) {
throw new DefaultHttpResponseExceptionMappingService().map("Upload {0} failed", e, file);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file);
}
}
@Override
public long getContentLength() {
return status.getLength();
}
});
if(StringUtils.isBlank(status.getUrl())) {
final String ref = uploadPartEntity.getRef();
return new HttpResponseOutputStream<FileEntity>(new ProxyOutputStream(stream),
new BrickAttributesFinderFeature(session), status) {
private final AtomicBoolean close = new AtomicBoolean();
@Override
public FileEntity getStatus() throws BackgroundException {
return stream.getStatus();
}
@Override
public void close() throws IOException {
if(close.get()) {
log.warn(String.format("Skip double close of stream %s", this));
return;
}
super.close();
try {
new BrickUploadFeature(session, BrickWriteFeature.this)
.completeUpload(file, ref, status, Collections.singletonList(status));
}
catch(BackgroundException e) {
throw new IOException(e.getMessage(), e);
}
finally {
close.set(true);
}
}
};
}
return stream;
}
|
@Test
public void testWriteSmallPart() throws Exception {
final BrickWriteFeature feature = new BrickWriteFeature(session);
final Path container = new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path file = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final byte[] content = RandomUtils.nextBytes(56);
final TransferStatus status = new TransferStatus().withLength(content.length);
final HttpResponseOutputStream<FileEntity> out = feature.write(file, status, new DisabledConnectionCallback());
final ByteArrayInputStream in = new ByteArrayInputStream(content);
final TransferStatus progress = new TransferStatus();
final BytecountStreamListener count = new BytecountStreamListener();
new StreamCopier(new TransferStatus(), progress).withListener(count).transfer(in, out);
assertEquals(content.length, count.getSent());
in.close();
out.close();
assertNull(out.getStatus());
assertTrue(new BrickFindFeature(session).find(file));
final PathAttributes attributes = new BrickAttributesFinderFeature(session).find(file);
assertEquals(content.length, attributes.getSize());
final byte[] compare = new byte[content.length];
final InputStream stream = new BrickReadFeature(session).read(file, new TransferStatus().withLength(content.length), new DisabledConnectionCallback());
IOUtils.readFully(stream, compare);
stream.close();
assertArrayEquals(content, compare);
new BrickDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public SendResult send(
Message msg) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
msg.setTopic(withNamespace(msg.getTopic()));
if (this.getAutoBatch() && !(msg instanceof MessageBatch)) {
return sendByAccumulator(msg, null, null);
} else {
return sendDirect(msg, null, null);
}
}
|
@Test
public void assertSendByQueueSelector() throws MQBrokerException, RemotingException, InterruptedException, MQClientException, NoSuchFieldException, IllegalAccessException {
setDefaultMQProducerImpl();
MessageQueueSelector selector = mock(MessageQueueSelector.class);
SendResult send = producer.send(message, selector, 1);
assertNull(send);
send = producer.send(message, selector, 1, defaultTimeout);
assertNull(send);
}
|
protected ReferenceManager createReferenceManager() {
return new ReferenceManager();
}
|
@Test
void shouldSupportNullReference() {
// GC could happen during restructure so we must be able to create a reference
// for a null entry
map.createReferenceManager().createReference(null, 1234, null);
}
|
@Override
public Job save(Job jobToSave) {
try (final Connection conn = dataSource.getConnection(); final Transaction transaction = new Transaction(conn)) {
final Job savedJob = jobTable(conn).save(jobToSave);
transaction.commit();
notifyJobStatsOnChangeListeners();
return savedJob;
} catch (SQLException e) {
throw new StorageException(e);
}
}
|
@Test
void saveJobs_WhenSqlExceptionOccursAJobStorageExceptionIsThrown() throws SQLException {
doThrow(new SQLException("Boem")).when(preparedStatement).executeBatch();
assertThatThrownBy(() -> jobStorageProvider.save(singletonList(anEnqueuedJob().build()))).isInstanceOf(StorageException.class);
}
|
@Override
public Result invoke(Invocation invocation) throws RpcException {
if (invocation instanceof RpcInvocation) {
((RpcInvocation) invocation).setInvoker(this);
}
String mock = getUrl().getMethodParameter(invocation.getMethodName(), MOCK_KEY);
if (StringUtils.isBlank(mock)) {
throw new RpcException(new IllegalAccessException("mock can not be null. url :" + url));
}
mock = normalizeMock(URL.decode(mock));
if (mock.startsWith(RETURN_PREFIX)) {
mock = mock.substring(RETURN_PREFIX.length()).trim();
try {
Type[] returnTypes = RpcUtils.getReturnTypes(invocation);
Object value = parseMockValue(mock, returnTypes);
return AsyncRpcResult.newDefaultAsyncResult(value, invocation);
} catch (Exception ew) {
throw new RpcException(
"mock return invoke error. method :" + invocation.getMethodName() + ", mock:" + mock + ", url: "
+ url,
ew);
}
} else if (mock.startsWith(THROW_PREFIX)) {
mock = mock.substring(THROW_PREFIX.length()).trim();
if (StringUtils.isBlank(mock)) {
throw new RpcException("mocked exception for service degradation.");
} else { // user customized class
Throwable t = getThrowable(mock);
throw new RpcException(RpcException.BIZ_EXCEPTION, t);
}
} else { // impl mock
try {
Invoker<T> invoker = getInvoker(mock);
return invoker.invoke(invocation);
} catch (Throwable t) {
throw new RpcException("Failed to create mock implementation class " + mock, t);
}
}
}
|
@Test
void testInvokeThrowsRpcException2() {
URL url = URL.valueOf("remote://1.2.3.4/" + String.class.getName());
url = url.addParameter(MOCK_KEY, "fail");
MockInvoker mockInvoker = new MockInvoker(url, String.class);
RpcInvocation invocation = new RpcInvocation();
invocation.setMethodName("getSomething");
Assertions.assertThrows(RpcException.class, () -> mockInvoker.invoke(invocation));
}
|
@Deprecated
public static int MapSize(final int levelOfDetail) {
return (int) Math.round(MapSize((double) levelOfDetail));
}
|
@Test
public void test_MapSize() {
for (int zoomLevel = mMinZoomLevel; zoomLevel <= mMaxZoomLevel; zoomLevel++) {
Assert.assertEquals(256L << zoomLevel, (long) TileSystem.MapSize((double) zoomLevel));
}
}
|
public static <T> T toObj(byte[] json, Class<T> cls) {
try {
return mapper.readValue(json, cls);
} catch (Exception e) {
throw new NacosDeserializationException(cls, e);
}
}
|
@Test
void testToObject9() {
assertNull(JacksonUtils.toObj("null".getBytes(), new TypeReference<Object>() {
}));
assertEquals("string", JacksonUtils.toObj("\"string\"".getBytes(), new TypeReference<String>() {
}));
assertEquals(new BigDecimal(30), JacksonUtils.toObj("30".getBytes(), new TypeReference<BigDecimal>() {
}));
assertEquals(Collections.singletonMap("key", "value"),
JacksonUtils.toObj("{\"key\":\"value\"}".getBytes(), new TypeReference<Map<String, String>>() {
}));
assertEquals(Collections.singletonList(Collections.singletonMap("key", "value")),
JacksonUtils.toObj("[{\"key\":\"value\"}]".getBytes(), new TypeReference<List<Map<String, String>>>() {
}));
assertEquals(new TestOfAtomicObject(), JacksonUtils.toObj("{\"aLong\":0,\"aInteger\":1,\"aBoolean\":false}".getBytes(),
new TypeReference<TestOfAtomicObject>() {
}));
assertEquals(new TestOfDate(), JacksonUtils.toObj("{\"date\":1626192000000}".getBytes(), new TypeReference<TestOfDate>() {
}));
assertEquals(new TestOfAccessModifier(),
JacksonUtils.toObj("{\"publicAccessModifier\":\"public\"}".getBytes(), new TypeReference<TestOfAccessModifier>() {
}));
assertEquals(new TestOfGetter(),
JacksonUtils.toObj("{\"value\":\"value\",\"key\":\"key\"}".getBytes(), new TypeReference<TestOfGetter>() {
}));
assertEquals(new TestOfAnnotationSub(), JacksonUtils.toObj(
("{\"@type\":\"JacksonUtilsTest$TestOfAnnotationSub\",\"date\":\"2021-07-14\","
+ "\"subField\":\"subField\",\"camelCase\":\"value\"}").getBytes(),
new TypeReference<TestOfAnnotation>() {
}));
}
|
@Override
public RemotingCommand processRequest(final ChannelHandlerContext ctx,
RemotingCommand request) throws RemotingCommandException {
return this.processRequest(ctx.channel(), request, true);
}
|
@Test
public void testProcessRequest_SubscriptionGroupNotExist() throws RemotingCommandException {
when(subscriptionGroupManager.findSubscriptionGroupConfig(anyString())).thenReturn(null);
RemotingCommand request = createPeekMessageRequest("group","topic",0);
RemotingCommand response = peekMessageProcessor.processRequest(handlerContext, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.SUBSCRIPTION_GROUP_NOT_EXIST);
}
|
public static void putTokenCacheNode(long tokenId, TokenCacheNode cacheNode) {
TOKEN_CACHE_NODE_MAP.put(tokenId, cacheNode);
}
|
@Test
public void testPutTokenCacheNode() throws InterruptedException {
try (MockedStatic<TimeUtil> mocked = super.mockTimeUtil()) {
setCurrentMillis(mocked, System.currentTimeMillis());
for (long i = 0; i < 100; i++) {
final TokenCacheNode node = new TokenCacheNode();
node.setTokenId(i);
node.setFlowId(111L);
node.setResourceTimeout(10000L);
node.setClientTimeout(10000L);
node.setClientAddress("localhost");
if (TokenCacheNodeManager.validToken(node)) {
TokenCacheNodeManager.putTokenCacheNode(node.getTokenId(), node);
}
}
Assert.assertEquals(100, TokenCacheNodeManager.getSize());
for (int i = 0; i < 100; i++) {
TokenCacheNodeManager.getTokenCacheNode((long) (Math.random() * 100));
}
List<Long> keyList = new ArrayList<>(TokenCacheNodeManager.getCacheKeySet());
for (int i = 0; i < 100; i++) {
Assert.assertEquals(i, (long) keyList.get(i));
TokenCacheNodeManager.removeTokenCacheNode(i);
}
}
}
|
@Override
public void replay(
long offset,
long producerId,
short producerEpoch,
CoordinatorRecord record
) throws RuntimeException {
ApiMessageAndVersion key = record.key();
ApiMessageAndVersion value = record.value();
switch (key.version()) {
case 0:
case 1:
offsetMetadataManager.replay(
offset,
producerId,
(OffsetCommitKey) key.message(),
(OffsetCommitValue) Utils.messageOrNull(value)
);
break;
case 2:
groupMetadataManager.replay(
(GroupMetadataKey) key.message(),
(GroupMetadataValue) Utils.messageOrNull(value)
);
break;
case 3:
groupMetadataManager.replay(
(ConsumerGroupMetadataKey) key.message(),
(ConsumerGroupMetadataValue) Utils.messageOrNull(value)
);
break;
case 4:
groupMetadataManager.replay(
(ConsumerGroupPartitionMetadataKey) key.message(),
(ConsumerGroupPartitionMetadataValue) Utils.messageOrNull(value)
);
break;
case 5:
groupMetadataManager.replay(
(ConsumerGroupMemberMetadataKey) key.message(),
(ConsumerGroupMemberMetadataValue) Utils.messageOrNull(value)
);
break;
case 6:
groupMetadataManager.replay(
(ConsumerGroupTargetAssignmentMetadataKey) key.message(),
(ConsumerGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value)
);
break;
case 7:
groupMetadataManager.replay(
(ConsumerGroupTargetAssignmentMemberKey) key.message(),
(ConsumerGroupTargetAssignmentMemberValue) Utils.messageOrNull(value)
);
break;
case 8:
groupMetadataManager.replay(
(ConsumerGroupCurrentMemberAssignmentKey) key.message(),
(ConsumerGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value)
);
break;
case 9:
groupMetadataManager.replay(
(ShareGroupPartitionMetadataKey) key.message(),
(ShareGroupPartitionMetadataValue) Utils.messageOrNull(value)
);
break;
case 10:
groupMetadataManager.replay(
(ShareGroupMemberMetadataKey) key.message(),
(ShareGroupMemberMetadataValue) Utils.messageOrNull(value)
);
break;
case 11:
groupMetadataManager.replay(
(ShareGroupMetadataKey) key.message(),
(ShareGroupMetadataValue) Utils.messageOrNull(value)
);
break;
case 12:
groupMetadataManager.replay(
(ShareGroupTargetAssignmentMetadataKey) key.message(),
(ShareGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value)
);
break;
case 13:
groupMetadataManager.replay(
(ShareGroupTargetAssignmentMemberKey) key.message(),
(ShareGroupTargetAssignmentMemberValue) Utils.messageOrNull(value)
);
break;
case 14:
groupMetadataManager.replay(
(ShareGroupCurrentMemberAssignmentKey) key.message(),
(ShareGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value)
);
break;
default:
throw new IllegalStateException("Received an unknown record type " + key.version()
+ " in " + record);
}
}
|
@Test
public void testReplayConsumerGroupPartitionMetadata() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
ConsumerGroupPartitionMetadataKey key = new ConsumerGroupPartitionMetadataKey();
ConsumerGroupPartitionMetadataValue value = new ConsumerGroupPartitionMetadataValue();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, new CoordinatorRecord(
new ApiMessageAndVersion(key, (short) 4),
new ApiMessageAndVersion(value, (short) 0)
));
verify(groupMetadataManager, times(1)).replay(key, value);
}
|
@DELETE
@Path("{netId}/{ip}")
public Response releaseIp(@PathParam("netId") String netId,
@PathParam("ip") String ip) {
log.trace("Received IP release request of network " + netId);
K8sNetwork network =
nullIsNotFound(networkService.network(netId), NETWORK_ID_NOT_FOUND);
ipamService.releaseIp(network.networkId(), IpAddress.valueOf(ip));
return Response.noContent().build();
}
|
@Test
public void testReleaseIpWithCorrectNetIdAndIp() {
expect(mockNetworkService.network(anyObject())).andReturn(k8sNetwork);
expect(mockIpamService.releaseIp(anyObject(), anyObject())).andReturn(true);
replay(mockNetworkService);
replay(mockIpamService);
final WebTarget wt = target();
Response response = wt.path(IPAM + "/sona-network/10.10.10.2").request().delete();
final int status = response.getStatus();
assertEquals(204, status);
verify(mockNetworkService);
verify(mockIpamService);
}
|
@PublicAPI(usage = ACCESS)
public JavaClasses importPackagesOf(Class<?>... classes) {
return importPackagesOf(ImmutableSet.copyOf(classes));
}
|
@Test
public void imports_jdk_packages() {
JavaClasses classes = new ClassFileImporter().importPackagesOf(File.class);
assertThatTypes(classes).contain(File.class);
}
|
public static ObjectNode json(Highlights highlights) {
ObjectNode payload = objectNode();
ArrayNode devices = arrayNode();
ArrayNode hosts = arrayNode();
ArrayNode links = arrayNode();
payload.set(DEVICES, devices);
payload.set(HOSTS, hosts);
payload.set(LINKS, links);
highlights.devices().forEach(dh -> devices.add(json(dh)));
highlights.hosts().forEach(hh -> hosts.add(json(hh)));
highlights.links().forEach(lh -> links.add(json(lh)));
Highlights.Amount toSubdue = highlights.subdueLevel();
if (!toSubdue.equals(Highlights.Amount.ZERO)) {
payload.put(SUBDUE, toSubdue.toString());
}
int delay = highlights.delayMs();
if (delay > 0) {
payload.put(DELAY, delay);
}
return payload;
}
|
@Test
public void subdueMaximalHighlights() {
Highlights h = new Highlights().subdueAllElse(Amount.MAXIMALLY);
payload = TopoJson.json(h);
checkEmptyArrays();
String subdue = JsonUtils.string(payload, TopoJson.SUBDUE);
assertEquals("not max", "max", subdue);
}
|
public static <InputT, OutputT> MapElements<InputT, OutputT> via(
final InferableFunction<InputT, OutputT> fn) {
return new MapElements<>(fn, fn.getInputTypeDescriptor(), fn.getOutputTypeDescriptor());
}
|
@Test
public void testInferableFunctionClassDisplayData() {
InferableFunction<?, ?> inferableFn =
new InferableFunction<Integer, Integer>() {
@Override
public Integer apply(Integer input) throws Exception {
return input;
}
};
MapElements<?, ?> inferableMap = MapElements.via(inferableFn);
assertThat(DisplayData.from(inferableMap), hasDisplayItem("class", inferableFn.getClass()));
}
|
public static String getShortestTableStringFormat(List<List<String>> table)
{
if (table.isEmpty()) {
throw new IllegalArgumentException("Table must include at least one row");
}
int tableWidth = table.get(0).size();
int[] lengthTracker = new int[tableWidth];
for (List<String> row : table) {
if (row.size() != tableWidth) {
String errorString = format("All rows in the table are expected to have exactly same number of columns: %s != %s",
tableWidth, row.size());
throw new IllegalArgumentException(errorString);
}
for (int i = 0; i < row.size(); i++) {
lengthTracker[i] = max(row.get(i).length(), lengthTracker[i]);
}
}
StringBuilder sb = new StringBuilder();
sb.append('|');
for (int maxLen : lengthTracker) {
sb.append(" %-")
.append(maxLen)
.append("s |");
}
return sb.toString();
}
|
@Test
public void testGetShortestTableStringFormatBadInput()
{
List<List<String>> table = Arrays.asList(
Arrays.asList("Header1", "Header2", "Headr3"),
Arrays.asList("Value1", "Value2"),
Arrays.asList("LongValue1", "SVal2", "SVal3"));
assertThrows(
IllegalArgumentException.class,
() -> StringTableUtils.getShortestTableStringFormat(table));
}
|
@Override
public long searchOffset(MessageQueue mq, long timestamp) throws MQClientException {
return defaultMQAdminExtImpl.searchOffset(mq, timestamp);
}
|
@Test
@Ignore
public void testSearchOffset() throws Exception {
when(mQClientAPIImpl.searchOffset(anyString(), any(MessageQueue.class), anyLong(), anyLong())).thenReturn(101L);
assertThat(defaultMQAdminExt.searchOffset(new MessageQueue(TOPIC1, BROKER1_NAME, 0), System.currentTimeMillis())).isEqualTo(101L);
}
|
public void statusUpdate(TaskUmbilicalProtocol umbilical)
throws IOException {
int retries = MAX_RETRIES;
while (true) {
try {
if (!umbilical.statusUpdate(getTaskID(), taskStatus).getTaskFound()) {
if (uberized) {
LOG.warn("Task no longer available: " + taskId);
break;
} else {
LOG.warn("Parent died. Exiting " + taskId);
ExitUtil.terminate(66);
}
}
taskStatus.clearStatus();
return;
} catch (InterruptedException ie) {
Thread.currentThread().interrupt(); // interrupt ourself
} catch (IOException ie) {
LOG.warn("Failure sending status update: " +
StringUtils.stringifyException(ie));
if (--retries == 0) {
throw ie;
}
}
}
}
|
@Test(expected = ExitException.class)
public void testStatusUpdateExitsInNonUberMode() throws Exception {
setupTest(false);
task.statusUpdate(umbilical);
}
|
@Override
public MailAccountDO getMailAccount(Long id) {
return mailAccountMapper.selectById(id);
}
|
@Test
public void testGetMailAccount() {
// mock 数据
MailAccountDO dbMailAccount = randomPojo(MailAccountDO.class);
mailAccountMapper.insert(dbMailAccount);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbMailAccount.getId();
// 调用
MailAccountDO mailAccount = mailAccountService.getMailAccount(id);
// 断言
assertPojoEquals(dbMailAccount, mailAccount);
}
|
public Host get(final String url) throws HostParserException {
final StringReader reader = new StringReader(url);
final Protocol parsedProtocol, protocol;
if((parsedProtocol = findProtocol(reader, factory)) != null) {
protocol = parsedProtocol;
}
else {
protocol = defaultScheme;
}
final Consumer<HostParserException> parsedProtocolDecorator = e -> e.withProtocol(parsedProtocol);
final Host host = new Host(protocol);
final URITypes uriType = findURIType(reader);
if(uriType == URITypes.Undefined) {
// scheme:
if(StringUtils.isBlank(protocol.getDefaultHostname())) {
throw decorate(new HostParserException(String.format("Missing hostname in URI %s", url)), parsedProtocolDecorator);
}
return host;
}
if(uriType == URITypes.Authority) {
if(host.getProtocol().isHostnameConfigurable()) {
parseAuthority(reader, host, parsedProtocolDecorator);
}
else {
parseRootless(reader, host, parsedProtocolDecorator);
}
}
else if(uriType == URITypes.Rootless) {
parseRootless(reader, host, parsedProtocolDecorator);
}
else if(uriType == URITypes.Absolute) {
parseAbsolute(reader, host, parsedProtocolDecorator);
}
if(log.isDebugEnabled()) {
log.debug(String.format("Parsed %s as %s", url, host));
}
return host;
}
|
@Test
public void parseDefaultHostnameWithUserAbsolutePath() throws Exception {
final Host host = new HostParser(new ProtocolFactory(Collections.singleton(new TestProtocol(Scheme.https) {
@Override
public String getDefaultHostname() {
return "defaultHostname";
}
@Override
public boolean isHostnameConfigurable() {
return false;
}
}))).get("https://user@/folder/file");
assertEquals("defaultHostname", host.getHostname());
assertEquals("user", host.getCredentials().getUsername());
assertEquals("/folder/file", host.getDefaultPath());
}
|
public boolean validate(final CommandLine input) {
for(Option o : input.getOptions()) {
if(Option.UNINITIALIZED == o.getArgs()) {
continue;
}
if(o.hasOptionalArg()) {
continue;
}
if(o.getArgs() != o.getValuesList().size()) {
console.printf("Missing argument for option %s%n", o.getLongOpt());
return false;
}
}
final TerminalAction action = TerminalActionFinder.get(input);
if(null == action) {
console.printf("%s%n", "Missing argument");
return false;
}
if(input.hasOption(TerminalOptionsBuilder.Params.existing.name())) {
final String arg = input.getOptionValue(TerminalOptionsBuilder.Params.existing.name());
if(null == TransferAction.forName(arg)) {
final Set<TransferAction> actions = new HashSet<TransferAction>(TransferAction.forTransfer(Transfer.Type.download));
actions.add(TransferAction.cancel);
console.printf("Invalid argument '%s' for option %s. Must be one of %s%n",
arg, TerminalOptionsBuilder.Params.existing.name(), Arrays.toString(actions.toArray()));
return false;
}
switch(action) {
case download:
if(!validate(arg, Transfer.Type.download)) {
return false;
}
break;
case upload:
if(!validate(arg, Transfer.Type.upload)) {
return false;
}
break;
case synchronize:
if(!validate(arg, Transfer.Type.sync)) {
return false;
}
break;
case copy:
if(!validate(arg, Transfer.Type.copy)) {
return false;
}
break;
}
}
// Validate arguments
switch(action) {
case list:
case download:
if(!validate(input.getOptionValue(action.name()))) {
return false;
}
break;
case upload:
case copy:
case synchronize:
if(!validate(input.getOptionValue(action.name()))) {
return false;
}
break;
}
return true;
}
|
@Test
public void testValidate() {
assertTrue(new TerminalOptionsInputValidator(new ProtocolFactory(Collections.singleton(new FTPProtocol() {
@Override
public boolean isEnabled() {
return true;
}
})))
.validate("ftp://cdn.duck.sh/"));
assertTrue(new TerminalOptionsInputValidator(new ProtocolFactory(Collections.singleton(new FTPProtocol() {
@Override
public boolean isEnabled() {
return true;
}
})))
.validate("ftp://cdn.duck.sh/%%~nc"));
}
|
static Schema schemaWithName(final Schema schema, final String schemaName) {
if (schemaName == null || schema.type() != Schema.Type.STRUCT) {
return schema;
}
final SchemaBuilder builder = SchemaBuilder.struct();
for (final Field f : schema.fields()) {
builder.field(f.name(), f.schema());
}
if (schema.parameters() != null) {
builder.parameters(schema.parameters());
}
if (schema.isOptional()) {
builder.optional();
}
if (schema.defaultValue() != null) {
builder.defaultValue(schema.defaultValue());
}
builder.doc(schema.doc());
builder.version(schema.version());
return builder.name(schemaName).build();
}
|
@Test
public void shouldReplaceSchemaName() {
// Given
final Schema namedSchema = SchemaBuilder.struct()
.field("field1", Schema.INT32_SCHEMA)
.field("field2",
SchemaBuilder.struct()
.field("product_id", Schema.INT32_SCHEMA)
.build())
.name("Ole")
.build();
// When
final Schema schemaWithNewName = ProtobufSchemas.schemaWithName(namedSchema, CUSTOM_FULL_SCHEMA_NAME);
// Then
assertThat(schemaWithNewName, is(SchemaBuilder.struct()
.field("field1", Schema.INT32_SCHEMA)
.field("field2",
SchemaBuilder.struct()
.field("product_id", Schema.INT32_SCHEMA)
.build())
.name(CUSTOM_FULL_SCHEMA_NAME)
.build()));
}
|
@Override
public ExecuteContext before(ExecuteContext context) {
Object object = context.getObject();
if (object instanceof BaseLoadBalancer) {
List<Object> serverList = getServerList(context.getMethod().getName(), object);
if (CollectionUtils.isEmpty(serverList)) {
return context;
}
BaseLoadBalancer loadBalancer = (BaseLoadBalancer) object;
String name = loadBalancer.getName();
RequestData requestData = getRequestData().orElse(null);
List<Object> targetInstances = loadBalancerService.getTargetInstances(name, serverList, requestData);
context.skip(Collections.unmodifiableList(targetInstances));
}
return context;
}
|
@Test
public void testBeforeWithEmptyServers() {
loadBalancer.setServersList(Collections.emptyList());
ThreadLocalUtils.setRequestData(new RequestData(Collections.emptyMap(), "", ""));
interceptor.before(context);
BaseLoadBalancer loadBalancer = (BaseLoadBalancer) context.getObject();
List<Server> servers = loadBalancer.getAllServers();
Assert.assertNotNull(servers);
Assert.assertEquals(0, servers.size());
}
|
public int filterEntriesForConsumer(List<? extends Entry> entries, EntryBatchSizes batchSizes,
SendMessageInfo sendMessageInfo, EntryBatchIndexesAcks indexesAcks,
ManagedCursor cursor, boolean isReplayRead, Consumer consumer) {
return filterEntriesForConsumer(null, 0, entries, batchSizes,
sendMessageInfo, indexesAcks, cursor,
isReplayRead, consumer);
}
|
@Test
public void testFilterEntriesForConsumerOfServerOnlyMarker() {
List<Entry> entries = new ArrayList<>();
ByteBuf markerMessage =
Markers.newReplicatedSubscriptionsSnapshotRequest("testSnapshotId", "testSourceCluster");
entries.add(EntryImpl.create(1, 1, markerMessage));
SendMessageInfo sendMessageInfo = SendMessageInfo.getThreadLocal();
EntryBatchSizes batchSizes = EntryBatchSizes.get(entries.size());
int size = this.helper.filterEntriesForConsumer(entries, batchSizes, sendMessageInfo, null, null, false, null);
assertEquals(size, 0);
}
|
@Override
public void writeAll(Collection<Cache.Entry<? extends K, ? extends V>> collection) throws CacheWriterException {
long startNanos = Timer.nanos();
try {
delegate.get().writeAll(collection);
} finally {
writeAllProbe.recordValue(Timer.nanosElapsed(startNanos));
}
}
|
@Test
public void writeAll() {
Collection<Cache.Entry<? extends Integer, ? extends String>> c = new LinkedList<>();
cacheWriter.writeAll(c);
verify(delegate).writeAll(c);
assertProbeCalledOnce("writeAll");
}
|
public String getDiscriminatingValue(ILoggingEvent event) {
// http://jira.qos.ch/browse/LBCLASSIC-213
Map<String, String> mdcMap = event.getMDCPropertyMap();
if (mdcMap == null) {
return defaultValue;
}
String mdcValue = mdcMap.get(key);
if (mdcValue == null) {
return defaultValue;
} else {
return mdcValue;
}
}
|
@Test
public void smoke() {
MDC.put(key, value);
event = new LoggingEvent("a", logger, Level.DEBUG, "", null, null);
String discriminatorValue = discriminator.getDiscriminatingValue(event);
assertEquals(value, discriminatorValue);
}
|
@Override
public void createIngress(Ingress ingress) {
checkNotNull(ingress, ERR_NULL_INGRESS);
checkArgument(!Strings.isNullOrEmpty(ingress.getMetadata().getUid()),
ERR_NULL_INGRESS_UID);
k8sIngressStore.createIngress(ingress);
log.info(String.format(MSG_INGRESS, ingress.getMetadata().getName(), MSG_CREATED));
}
|
@Test(expected = NullPointerException.class)
public void testCreateNullIngress() {
target.createIngress(null);
}
|
@Override
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
ReflectionUtils.doWithMethods(bean.getClass(), recurringJobFinderMethodCallback);
return bean;
}
|
@Test
void beansWithMethodsAnnotatedWithRecurringAnnotationWillAutomaticallyBeRegistered() {
// GIVEN
final RecurringJobPostProcessor recurringJobPostProcessor = getRecurringJobPostProcessor();
// WHEN
recurringJobPostProcessor.postProcessAfterInitialization(new MyServiceWithRecurringJob(), "not important");
// THEN
verify(jobScheduler).scheduleRecurrently(eq("my-recurring-job"), jobDetailsArgumentCaptor.capture(), eq(CronExpression.create("0 0/15 * * *")), any(ZoneId.class));
final JobDetails actualJobDetails = jobDetailsArgumentCaptor.getValue();
assertThat(actualJobDetails)
.isCacheable()
.hasClassName(MyServiceWithRecurringJob.class.getName())
.hasMethodName("myRecurringMethod")
.hasNoArgs();
}
|
@GET
@Path("/tasks/subtask/progress")
@Produces(MediaType.APPLICATION_JSON)
@ApiOperation("Get finer grained task progress tracked in memory for the given subtasks")
@ApiResponses(value = {
@ApiResponse(code = 200, message = "Success"), @ApiResponse(code = 500, message = "Internal server error")
})
public String getSubtaskProgress(
@ApiParam(value = "Sub task names separated by comma") @QueryParam("subtaskNames") String subtaskNames) {
try {
LOGGER.debug("Getting progress for subtasks: {}", subtaskNames);
Map<String, Object> progress = new HashMap<>();
for (String subtaskName : StringUtils.split(subtaskNames, CommonConstants.Minion.TASK_LIST_SEPARATOR)) {
MinionEventObserver observer = MinionEventObservers.getInstance().getMinionEventObserver(subtaskName);
if (observer != null) {
progress.put(subtaskName, observer.getProgress());
}
}
LOGGER.debug("Got subtasks progress: {}", progress);
return JsonUtils.objectToString(progress);
} catch (Exception e) {
throw new WebApplicationException(Response.status(Response.Status.INTERNAL_SERVER_ERROR).entity(
String.format("Failed to get task progress for subtasks: %s due to error: %s", subtaskNames, e.getMessage()))
.build());
}
}
|
@Test
public void testGetGivenSubtaskOrStateProgress()
throws IOException {
MinionEventObserver observer1 = new MinionProgressObserver();
observer1.notifyTaskStart(null);
MinionEventObservers.getInstance().addMinionEventObserver("t01", observer1);
MinionEventObserver observer2 = new MinionProgressObserver();
observer2.notifyProgress(null, "");
MinionEventObservers.getInstance().addMinionEventObserver("t02", observer2);
MinionEventObserver observer3 = new MinionProgressObserver();
observer3.notifyTaskSuccess(null, "");
MinionEventObservers.getInstance().addMinionEventObserver("t03", observer3);
PinotTaskProgressResource pinotTaskProgressResource = new PinotTaskProgressResource();
// get all sub task progress
String allSubTaskProgress = pinotTaskProgressResource.getSubtaskProgress(null, null);
Map<String, Object> subtaskProgressMap = JsonUtils.stringToObject(allSubTaskProgress, Map.class);
assertEquals(subtaskProgressMap.size(), 3);
// get subtasks with given state
String subtaskWithInProgressState =
pinotTaskProgressResource.getSubtaskProgress(null, MinionTaskState.IN_PROGRESS.toString());
assertInProgressSubtasks(subtaskWithInProgressState);
String subtaskWithUndefinedState =
pinotTaskProgressResource.getSubtaskProgress(null, "Undefined");
assertInProgressSubtasks(subtaskWithUndefinedState);
String subtaskWithSucceededState =
pinotTaskProgressResource.getSubtaskProgress(null, MinionTaskState.SUCCEEDED.toString());
subtaskProgressMap = JsonUtils.stringToObject(subtaskWithSucceededState, Map.class);
assertEquals(subtaskProgressMap.size(), 1);
String subtaskWithUnknownState =
pinotTaskProgressResource.getSubtaskProgress(null, MinionTaskState.UNKNOWN.toString());
assertNoSubtaskWithTheGivenState(subtaskWithUnknownState);
String subtaskWithCancelledState =
pinotTaskProgressResource.getSubtaskProgress(null, MinionTaskState.CANCELLED.toString());
assertNoSubtaskWithTheGivenState(subtaskWithCancelledState);
String subtaskWithErrorState =
pinotTaskProgressResource.getSubtaskProgress(null, MinionTaskState.ERROR.toString());
assertNoSubtaskWithTheGivenState(subtaskWithErrorState);
// get subtasks with given name
String subTasksWithGivenNamesProgress = pinotTaskProgressResource.getSubtaskProgress(" t01 , t02 ", null);
assertInProgressSubtasks(subTasksWithGivenNamesProgress);
// get subtasks with given names and state
assertThrows(WebApplicationException.class,
() -> pinotTaskProgressResource.getSubtaskProgress(" t01 , t02 ", MinionTaskState.IN_PROGRESS.toString()));
}
|
public static Select select(String fieldName) { return new Select(fieldName);
}
|
@Test
void sub_expression_annotations() {
String q = Q.select("*")
.from("sd1")
.where("f1").contains("v1").annotate(A.a("ak1", "av1"))
.build();
assertEquals(q, "yql=select * from sd1 where ([{\"ak1\":\"av1\"}](f1 contains \"v1\"))");
}
|
public static StringBuilder leftAlign(StringBuilder in, int len) {
int sfx = len - in.length();
if (sfx <= 0) {
return in;
}
if (sfx > SPACES_LEN) {
sfx = SPACES_LEN;
}
in.append(SPACES_CHARS, 0, sfx);
return in;
}
|
@Test
public void testLeftAlign() {
assertEquals("foo ",
JOrphanUtils.leftAlign(new StringBuilder("foo"), 5).toString());
assertEquals("foo",
JOrphanUtils.leftAlign(new StringBuilder("foo"), 2).toString());
assertEquals("foo ",
JOrphanUtils.leftAlign(new StringBuilder("foo"), 39).toString());
}
|
@SuppressWarnings({"checkstyle:npathcomplexity", "checkstyle:cyclomaticcomplexity", "checkstyle:methodlength"})
void planMigrations(int partitionId, PartitionReplica[] oldReplicas, PartitionReplica[] newReplicas,
MigrationDecisionCallback callback) {
assert oldReplicas.length == newReplicas.length : "Replica addresses with different lengths! Old: "
+ Arrays.toString(oldReplicas) + ", New: " + Arrays.toString(newReplicas);
if (logger.isFinestEnabled()) {
logger.finest("partitionId=%d, Initial state: %s", partitionId, Arrays.toString(oldReplicas));
logger.finest("partitionId=%d, Final state: %s", partitionId, Arrays.toString(newReplicas));
}
initState(oldReplicas);
assertNoDuplicate(partitionId, oldReplicas, newReplicas);
// fix cyclic partition replica movements
if (fixCycle(oldReplicas, newReplicas)) {
if (logger.isFinestEnabled()) {
logger.finest("partitionId=%d, Final state (after cycle fix): %s", partitionId,
Arrays.toString(newReplicas));
}
}
int currentIndex = 0;
while (currentIndex < oldReplicas.length) {
if (logger.isFinestEnabled()) {
logger.finest("partitionId=%d, Current index: %d, state: %s", partitionId, currentIndex,
Arrays.toString(state));
}
assertNoDuplicate(partitionId, oldReplicas, newReplicas);
if (newReplicas[currentIndex] == null) {
if (state[currentIndex] != null) {
// replica owner is removed and no one will own this replica
logger.finest("partitionId=%d, New address is null at index: %d", partitionId, currentIndex);
callback.migrate(state[currentIndex], currentIndex, -1, null, -1, -1);
state[currentIndex] = null;
}
currentIndex++;
continue;
}
if (state[currentIndex] == null) {
int i = getReplicaIndex(state, newReplicas[currentIndex]);
if (i == -1) {
// fresh replica copy is needed, so COPY replica to newReplicas[currentIndex] from partition owner
logger.finest("partitionId=%d, COPY %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex);
callback.migrate(null, -1, -1, newReplicas[currentIndex], -1, currentIndex);
state[currentIndex] = newReplicas[currentIndex];
currentIndex++;
continue;
}
if (i > currentIndex) {
// SHIFT UP replica from i to currentIndex, copy data from partition owner
logger.finest("partitionId=%d, SHIFT UP-2 %s from old addresses index: %d to index: %d", partitionId,
state[i], i, currentIndex);
callback.migrate(null, -1, -1, state[i], i, currentIndex);
state[currentIndex] = state[i];
state[i] = null;
continue;
}
throw new AssertionError("partitionId=" + partitionId
+ "Migration decision algorithm failed during SHIFT UP! INITIAL: " + Arrays.toString(oldReplicas)
+ ", CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas));
}
if (newReplicas[currentIndex].equals(state[currentIndex])) {
// no change, no action needed
currentIndex++;
continue;
}
if (getReplicaIndex(newReplicas, state[currentIndex]) == -1
&& getReplicaIndex(state, newReplicas[currentIndex]) == -1) {
// MOVE partition replica from its old owner to new owner
logger.finest("partitionId=%d, MOVE %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex);
callback.migrate(state[currentIndex], currentIndex, -1, newReplicas[currentIndex], -1, currentIndex);
state[currentIndex] = newReplicas[currentIndex];
currentIndex++;
continue;
}
if (getReplicaIndex(state, newReplicas[currentIndex]) == -1) {
int newIndex = getReplicaIndex(newReplicas, state[currentIndex]);
assert newIndex > currentIndex : "partitionId=" + partitionId
+ ", Migration decision algorithm failed during SHIFT DOWN! INITIAL: "
+ Arrays.toString(oldReplicas) + ", CURRENT: " + Arrays.toString(state)
+ ", FINAL: " + Arrays.toString(newReplicas);
if (state[newIndex] == null) {
// it is a SHIFT DOWN
logger.finest("partitionId=%d, SHIFT DOWN %s to index: %d, COPY %s to index: %d", partitionId,
state[currentIndex], newIndex, newReplicas[currentIndex], currentIndex);
callback.migrate(state[currentIndex], currentIndex, newIndex, newReplicas[currentIndex], -1, currentIndex);
state[newIndex] = state[currentIndex];
} else {
logger.finest("partitionId=%d, MOVE-3 %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex);
callback.migrate(state[currentIndex], currentIndex, -1, newReplicas[currentIndex], -1, currentIndex);
}
state[currentIndex] = newReplicas[currentIndex];
currentIndex++;
continue;
}
planMigrations(partitionId, oldReplicas, newReplicas, callback, currentIndex);
}
assert Arrays.equals(state, newReplicas)
: "partitionId=" + partitionId + ", Migration decisions failed! INITIAL: " + Arrays.toString(oldReplicas)
+ " CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas);
}
|
@Test
public void test_SHIFT_UP_toReplicaIndexWithExistingOwner() throws UnknownHostException {
final PartitionReplica[] oldReplicas = {
new PartitionReplica(new Address("localhost", 5701), uuids[0]),
new PartitionReplica(new Address("localhost", 5702), uuids[1]),
new PartitionReplica(new Address("localhost", 5703), uuids[2]),
new PartitionReplica(new Address("localhost", 5704), uuids[3]),
null,
null,
null,
};
final PartitionReplica[] newReplicas = {
new PartitionReplica(new Address("localhost", 5701), uuids[0]),
new PartitionReplica(new Address("localhost", 5704), uuids[3]),
new PartitionReplica(new Address("localhost", 5703), uuids[2]),
null,
null,
null,
null,
};
migrationPlanner.planMigrations(0, oldReplicas, newReplicas, callback);
verify(callback).migrate(new PartitionReplica(new Address("localhost", 5702), uuids[1]), 1, -1, new PartitionReplica(new Address("localhost", 5704), uuids[3]), 3, 1);
}
|
@Override
public SqlRequest refactor(QueryParamEntity entity, Object... args) {
if (injector == null) {
initInjector();
}
return injector.refactor(entity, args);
}
|
@Test
void testParenthesisFrom() {
QueryAnalyzerImpl analyzer = new QueryAnalyzerImpl(
database,
"select * from (s_test) t");
SqlRequest request = analyzer
.refactor(QueryParamEntity.of().and("t.id", "eq", "test"), 1);
System.out.println(request);
}
|
<K, V> List<ConsumerRecord<K, V>> fetchRecords(FetchConfig fetchConfig,
Deserializers<K, V> deserializers,
int maxRecords) {
// Error when fetching the next record before deserialization.
if (corruptLastRecord)
throw new KafkaException("Received exception when fetching the next record from " + partition
+ ". If needed, please seek past the record to "
+ "continue consumption.", cachedRecordException);
if (isConsumed)
return Collections.emptyList();
List<ConsumerRecord<K, V>> records = new ArrayList<>();
try {
for (int i = 0; i < maxRecords; i++) {
// Only move to next record if there was no exception in the last fetch. Otherwise, we should
// use the last record to do deserialization again.
if (cachedRecordException == null) {
corruptLastRecord = true;
lastRecord = nextFetchedRecord(fetchConfig);
corruptLastRecord = false;
}
if (lastRecord == null)
break;
Optional<Integer> leaderEpoch = maybeLeaderEpoch(currentBatch.partitionLeaderEpoch());
TimestampType timestampType = currentBatch.timestampType();
ConsumerRecord<K, V> record = parseRecord(deserializers, partition, leaderEpoch, timestampType, lastRecord);
records.add(record);
recordsRead++;
bytesRead += lastRecord.sizeInBytes();
nextFetchOffset = lastRecord.offset() + 1;
// In some cases, the deserialization may have thrown an exception and the retry may succeed,
// we allow user to move forward in this case.
cachedRecordException = null;
}
} catch (SerializationException se) {
cachedRecordException = se;
if (records.isEmpty())
throw se;
} catch (KafkaException e) {
cachedRecordException = e;
if (records.isEmpty())
throw new KafkaException("Received exception when fetching the next record from " + partition
+ ". If needed, please seek past the record to "
+ "continue consumption.", e);
}
return records;
}
|
@Test
public void testCorruptedMessage() {
// Create one good record and then one "corrupted" record.
try (final MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), Compression.NONE, TimestampType.CREATE_TIME, 0);
final UUIDSerializer serializer = new UUIDSerializer()) {
builder.append(new SimpleRecord(serializer.serialize(TOPIC_NAME, UUID.randomUUID())));
builder.append(0L, "key".getBytes(), "value".getBytes());
builder.append(new SimpleRecord(serializer.serialize(TOPIC_NAME, UUID.randomUUID())));
Headers headers = new RecordHeaders();
headers.add("hkey", "hvalue".getBytes());
builder.append(10L, serializer.serialize("key", UUID.randomUUID()), "otherValue".getBytes(), headers.toArray());
Records records = builder.build();
FetchResponseData.PartitionData partitionData = new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(10)
.setLastStableOffset(20)
.setLogStartOffset(0)
.setRecords(records);
try (final Deserializers<UUID, UUID> deserializers = newUuidDeserializers()) {
FetchConfig fetchConfig = newFetchConfig(IsolationLevel.READ_COMMITTED, false);
CompletedFetch completedFetch = newCompletedFetch(0, partitionData);
completedFetch.fetchRecords(fetchConfig, deserializers, 10);
RecordDeserializationException thrown = assertThrows(RecordDeserializationException.class,
() -> completedFetch.fetchRecords(fetchConfig, deserializers, 10));
assertEquals(RecordDeserializationException.DeserializationExceptionOrigin.KEY, thrown.origin());
assertEquals(1, thrown.offset());
assertEquals(TOPIC_NAME, thrown.topicPartition().topic());
assertEquals(0, thrown.topicPartition().partition());
assertEquals(0, thrown.timestamp());
assertArrayEquals("key".getBytes(), Utils.toNullableArray(thrown.keyBuffer()));
assertArrayEquals("value".getBytes(), Utils.toNullableArray(thrown.valueBuffer()));
assertEquals(0, thrown.headers().toArray().length);
CompletedFetch completedFetch2 = newCompletedFetch(2, partitionData);
completedFetch2.fetchRecords(fetchConfig, deserializers, 10);
RecordDeserializationException valueThrown = assertThrows(RecordDeserializationException.class,
() -> completedFetch2.fetchRecords(fetchConfig, deserializers, 10));
assertEquals(RecordDeserializationException.DeserializationExceptionOrigin.VALUE, valueThrown.origin());
assertEquals(3, valueThrown.offset());
assertEquals(TOPIC_NAME, valueThrown.topicPartition().topic());
assertEquals(0, valueThrown.topicPartition().partition());
assertEquals(10L, valueThrown.timestamp());
assertNotNull(valueThrown.keyBuffer());
assertArrayEquals("otherValue".getBytes(), Utils.toNullableArray(valueThrown.valueBuffer()));
assertEquals(headers, valueThrown.headers());
}
}
}
|
@SuppressWarnings({"BooleanExpressionComplexity", "CyclomaticComplexity"})
public static boolean isScalablePushQuery(
final Statement statement,
final KsqlExecutionContext ksqlEngine,
final KsqlConfig ksqlConfig,
final Map<String, Object> overrides
) {
if (!isPushV2Enabled(ksqlConfig, overrides)) {
return false;
}
if (! (statement instanceof Query)) {
return false;
}
final Query query = (Query) statement;
final SourceFinder sourceFinder = new SourceFinder();
sourceFinder.process(query.getFrom(), null);
// It will be present if it's not a join, which we don't handle
if (!sourceFinder.getSourceName().isPresent()) {
return false;
}
// Find all of the writers to this particular source.
final SourceName sourceName = sourceFinder.getSourceName().get();
final Set<QueryId> upstreamQueries = ksqlEngine.getQueriesWithSink(sourceName);
// See if the config or override have set the stream to be "latest"
final boolean isLatest = isLatest(ksqlConfig, overrides);
// Cannot be a pull query, i.e. must be a push
return !query.isPullQuery()
// Group by is not supported
&& !query.getGroupBy().isPresent()
// Windowing is not supported
&& !query.getWindow().isPresent()
// Having clause is not supported
&& !query.getHaving().isPresent()
// Partition by is not supported
&& !query.getPartitionBy().isPresent()
// There must be an EMIT CHANGES clause
&& (query.getRefinement().isPresent()
&& query.getRefinement().get().getOutputRefinement() == OutputRefinement.CHANGES)
// Must be reading from "latest"
&& isLatest
// We only handle a single sink source at the moment from a CTAS/CSAS
&& upstreamQueries.size() == 1
// ROWPARTITION and ROWOFFSET are not currently supported in SPQs
&& !containsDisallowedColumns(query);
}
|
@Test
public void isScalablePushQuery_true_streamsOverride() {
try(MockedStatic<ColumnExtractor> columnExtractor = mockStatic(ColumnExtractor.class)) {
// When:
expectIsSPQ(ColumnName.of("foo"), columnExtractor);
// Then:
assertThat(ScalablePushUtil.isScalablePushQuery(query, ksqlEngine, ksqlConfig,
ImmutableMap.of(
KsqlConfig.KSQL_STREAMS_PREFIX + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest")),
equalTo(true));
}
}
|
@Override
public boolean put(K key, V value) {
return get(putAsync(key, value));
}
|
@Test
public void testContainsValue() {
RListMultimap<SimpleKey, SimpleValue> map = redisson.getListMultimap("{1}test1");
map.put(new SimpleKey("0"), new SimpleValue("1"));
assertThat(map.containsValue(new SimpleValue("1"))).isTrue();
assertThat(map.containsValue(new SimpleValue("0"))).isFalse();
}
|
public int runInteractively() {
displayWelcomeMessage();
RemoteServerSpecificCommand.validateClient(terminal.writer(), restClient);
boolean eof = false;
while (!eof) {
try {
handleLine(nextNonCliCommand());
} catch (final EndOfFileException exception) {
// EOF is fine, just terminate the REPL
terminal.writer().println("Exiting ksqlDB.");
eof = true;
} catch (final Exception exception) {
LOGGER.error("An error occurred while running a command. Error = "
+ exception.getMessage(), exception);
terminal.printError(ErrorMessageUtil.buildErrorMessage(exception),
exception.toString());
}
terminal.flush();
}
return NO_ERROR;
}
|
@Test
public void shouldFailOnUnsupportedStandaloneServerVersion() throws Exception {
givenRunInteractivelyWillExit();
final KsqlRestClient mockRestClient = givenMockRestClient("0.9.0-0");
assertThrows(
KsqlUnsupportedServerException.class,
() -> new Cli(1L, 1L, mockRestClient, console)
.runInteractively()
);
}
|
public BucketInfo addInsert(String partitionPath) {
// for new inserts, compute buckets depending on how many records we have for each partition
SmallFileAssign smallFileAssign = getSmallFileAssign(partitionPath);
// first try packing this into one of the smallFiles
if (smallFileAssign != null && smallFileAssign.assign()) {
return new BucketInfo(BucketType.UPDATE, smallFileAssign.getFileId(), partitionPath);
}
// if we have anything more, create new insert buckets, like normal
if (newFileAssignStates.containsKey(partitionPath)) {
NewFileAssignState newFileAssignState = newFileAssignStates.get(partitionPath);
if (newFileAssignState.canAssign()) {
newFileAssignState.assign();
final String key = StreamerUtil.generateBucketKey(partitionPath, newFileAssignState.fileId);
if (bucketInfoMap.containsKey(key)) {
// the newFileAssignStates is cleaned asynchronously when received the checkpoint success notification,
// the records processed within the time range:
// (start checkpoint, checkpoint success(and instant committed))
// should still be assigned to the small buckets of last checkpoint instead of new one.
// the bucketInfoMap is cleaned when checkpoint starts.
// A promotion: when the HoodieRecord can record whether it is an UPDATE or INSERT,
// we can always return an UPDATE BucketInfo here, and there is no need to record the
// UPDATE bucket through calling #addUpdate.
return bucketInfoMap.get(key);
}
return new BucketInfo(BucketType.UPDATE, newFileAssignState.fileId, partitionPath);
}
}
BucketInfo bucketInfo = new BucketInfo(BucketType.INSERT, createFileIdOfThisTask(), partitionPath);
final String key = StreamerUtil.generateBucketKey(partitionPath, bucketInfo.getFileIdPrefix());
bucketInfoMap.put(key, bucketInfo);
NewFileAssignState newFileAssignState = new NewFileAssignState(bucketInfo.getFileIdPrefix(), writeProfile.getRecordsPerBucket());
newFileAssignState.assign();
newFileAssignStates.put(partitionPath, newFileAssignState);
return bucketInfo;
}
|
@Test
public void testInsertOverBucketAssigned() {
conf.setInteger(HoodieCompactionConfig.COPY_ON_WRITE_INSERT_SPLIT_SIZE.key(), 2);
writeConfig = FlinkWriteClients.getHoodieClientConfig(conf);
MockBucketAssigner mockBucketAssigner = new MockBucketAssigner(context, writeConfig);
BucketInfo bucketInfo1 = mockBucketAssigner.addInsert("par1");
assertBucketEquals(bucketInfo1, "par1", BucketType.INSERT);
BucketInfo bucketInfo2 = mockBucketAssigner.addInsert("par1");
assertBucketEquals(bucketInfo2, "par1", BucketType.INSERT);
assertEquals(bucketInfo1, bucketInfo2);
BucketInfo bucketInfo3 = mockBucketAssigner.addInsert("par1");
assertBucketEquals(bucketInfo3, "par1", BucketType.INSERT);
assertNotEquals(bucketInfo1, bucketInfo3);
}
|
@Override
public AppResponse process(Flow flow, ActivationUsernamePasswordRequest body) throws SharedServiceClientException {
digidClient.remoteLog("1088", Map.of(lowerUnderscore(HIDDEN), true));
var result = digidClient.authenticate(body.getUsername(), body.getPassword());
if (result.get(lowerUnderscore(STATUS)).equals("NOK") && result.get(ERROR) != null ) {
final var error = (String) result.get(ERROR);
if (ERROR_DECEASED.equals(error)) {
digidClient.remoteLog("1482", Map.of(lowerUnderscore(ACCOUNT_ID), result.get(lowerUnderscore(ACCOUNT_ID)), "hidden", true));
} else if (ERROR_NO_BSN.equals(error)) {
digidClient.remoteLog("1074", Map.of(lowerUnderscore(ACCOUNT_ID), result.get(lowerUnderscore(ACCOUNT_ID))));
} else if (ERROR_ACCOUNT_BLOCKED.equals(error)) {
return new PasswordConfirmedResponse((String) result.get(ERROR), result);
}
return new NokResponse((String) result.get(ERROR));
}
return Optional.ofNullable(validateAmountOfApps(Long.valueOf((Integer) result.get(lowerUnderscore(ACCOUNT_ID))), body))
.orElseGet(() -> getActivationUsernamePasswordResponse(body, result));
}
|
@Test
void responseSuccessRemoveOldApp() throws SharedServiceClientException {
AppAuthenticator leastRecentApp = new AppAuthenticator();
leastRecentApp.setActivatedAt(ZonedDateTime.of(2022, 3, 30, 0, 0, 0, 0, ZoneId.systemDefault()));
leastRecentApp.setDeviceName("least-recent-app-name");
when(sharedServiceClientMock.getSSConfigInt("Maximum_aantal_DigiD_apps_eindgebruiker")).thenReturn(2);
when(digidClientMock.authenticate(anyString(), anyString())).thenReturn(responseDigidClient);
when(appAuthenticatorServiceMock.countByAccountIdAndInstanceIdNot(anyLong(), anyString())).thenReturn(2);
when(appAuthenticatorServiceMock.findLeastRecentApp(anyLong())).thenReturn(leastRecentApp);
AppResponse result = passwordConfirmed.process(flow, request);
assertNotNull(result);
assertTrue(result instanceof TooManyAppsResponse);
TooManyAppsResponse response = (TooManyAppsResponse) result;
assertEquals("NOK", response.getStatus());
assertEquals("30-03-2022", response.getLatestDate());
assertEquals("least-recent-app-name", response.getDeviceName());
}
|
public static boolean exists(String name) {
return pool.exists(name);
}
|
@Test
public void testExists() {
String name = "test";
assertFalse(ChannelOption.exists(name));
ChannelOption<String> option = ChannelOption.valueOf(name);
assertTrue(ChannelOption.exists(name));
assertNotNull(option);
}
|
@Override
public void deregisterService(String serviceName, String groupName, Instance instance) throws NacosException {
getExecuteClientProxy(instance).deregisterService(serviceName, groupName, instance);
}
|
@Test
void testDeregisterPersistentServiceGrpc() throws NacosException {
String serviceName = "service1";
String groupName = "group1";
Instance instance = new Instance();
instance.setServiceName(serviceName);
instance.setClusterName(groupName);
instance.setIp("1.1.1.1");
instance.setPort(1);
// persistent instance
instance.setEphemeral(false);
// when server support deregister persistent instance by grpc, will use grpc to deregister
when(mockGrpcClient.isAbilitySupportedByServer(
AbilityKey.SERVER_SUPPORT_PERSISTENT_INSTANCE_BY_GRPC)).thenReturn(true);
delegate.deregisterService(serviceName, groupName, instance);
verify(mockGrpcClient, times(1)).deregisterService(serviceName, groupName, instance);
}
|
public void flush() {
while (!shortTermStorage.isEmpty()) {
T oldestRecord = shortTermStorage.poll();
outputMechanism.accept(oldestRecord);
}
}
|
@Test
public void testFlush() {
/**
* Confirm all points are emitted in the proper order upon calling "flush"
*/
Duration maxLag = Duration.ofMinutes(5);
TimeOrderVerifyingConsumer consumer = new TimeOrderVerifyingConsumer();
ApproximateTimeSorter<TimePojo> sorter = new ApproximateTimeSorter<>(
maxLag,
consumer
);
for (TimePojo commonPoint : testData()) {
sorter.accept(commonPoint);
}
assertEquals(0, consumer.size());
sorter.flush();
assertEquals(9, consumer.size());
}
|
public BigDecimal calculateTDEE(ActiveLevel activeLevel) {
if(activeLevel == null) return BigDecimal.valueOf(0);
BigDecimal multiplayer = BigDecimal.valueOf(activeLevel.getMultiplayer());
return multiplayer.multiply(BMR).setScale(2, RoundingMode.HALF_DOWN);
}
|
@Test
void calculateTDEE_SEDNTARY() {
BigDecimal TDEE = bmrCalculator.calculate(attributes).calculateTDEE(ActiveLevel.SEDENTARY);
assertEquals(new BigDecimal("2451.00"), TDEE);
}
|
public static IRubyObject deep(final Ruby runtime, final Object input) {
if (input == null) {
return runtime.getNil();
}
final Class<?> cls = input.getClass();
final Rubyfier.Converter converter = CONVERTER_MAP.get(cls);
if (converter != null) {
return converter.convert(runtime, input);
}
return fallbackConvert(runtime, input, cls);
}
|
@Test
public void testDeepListWithBigDecimal() throws Exception {
List<BigDecimal> data = new ArrayList<>();
data.add(new BigDecimal(1));
@SuppressWarnings("rawtypes")
RubyArray rubyArray = (RubyArray)Rubyfier.deep(RubyUtil.RUBY, data);
// toJavaArray does not newFromRubyArray inner elements to Java types \o/
assertEquals(RubyBigDecimal.class, rubyArray.toJavaArray()[0].getClass());
assertEquals(1.0D, ((RubyBigDecimal)rubyArray.toJavaArray()[0]).getDoubleValue(), 0);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.