focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public void completeTx(SendRequest req) throws InsufficientMoneyException, CompletionException {
lock.lock();
try {
checkArgument(!req.completed, () ->
"given SendRequest has already been completed");
log.info("Completing send tx with {} outputs totalling {} and a fee of {}/vkB", req.tx.getOutputs().size(),
req.tx.getOutputSum().toFriendlyString(), req.feePerKb.toFriendlyString());
// Calculate a list of ALL potential candidates for spending and then ask a coin selector to provide us
// with the actual outputs that'll be used to gather the required amount of value. In this way, users
// can customize coin selection policies. The call below will ignore immature coinbases and outputs
// we don't have the keys for.
List<TransactionOutput> prelimCandidates = calculateAllSpendCandidates(true, req.missingSigsMode == MissingSigsMode.THROW);
// Connect (add a value amount) unconnected inputs
List<TransactionInput> inputs = connectInputs(prelimCandidates, req.tx.getInputs());
req.tx.clearInputs();
inputs.forEach(req.tx::addInput);
// Warn if there are remaining unconnected inputs whose value we do not know
// TODO: Consider throwing if there are inputs that we don't have a value for
if (req.tx.getInputs().stream()
.map(TransactionInput::getValue)
.anyMatch(Objects::isNull))
log.warn("SendRequest transaction already has inputs but we don't know how much they are worth - they will be added to fee.");
// If any inputs have already been added, we don't need to get their value from wallet
Coin totalInput = req.tx.getInputSum();
// Calculate the amount of value we need to import.
Coin valueNeeded = req.tx.getOutputSum().subtract(totalInput);
// Enforce the OP_RETURN limit
if (req.tx.getOutputs().stream()
.filter(o -> ScriptPattern.isOpReturn(o.getScriptPubKey()))
.count() > 1) // Only 1 OP_RETURN per transaction allowed.
throw new MultipleOpReturnRequested();
// Check for dusty sends
if (req.ensureMinRequiredFee && !req.emptyWallet) { // Min fee checking is handled later for emptyWallet.
if (req.tx.getOutputs().stream().anyMatch(TransactionOutput::isDust))
throw new DustySendRequested();
}
// Filter out candidates that are already included in the transaction inputs
List<TransactionOutput> candidates = prelimCandidates.stream()
.filter(output -> alreadyIncluded(req.tx.getInputs(), output))
.collect(StreamUtils.toUnmodifiableList());
CoinSelection bestCoinSelection;
TransactionOutput bestChangeOutput = null;
List<Coin> updatedOutputValues = null;
if (!req.emptyWallet) {
// This can throw InsufficientMoneyException.
FeeCalculation feeCalculation = calculateFee(req, valueNeeded, req.ensureMinRequiredFee, candidates);
bestCoinSelection = feeCalculation.bestCoinSelection;
bestChangeOutput = feeCalculation.bestChangeOutput;
updatedOutputValues = feeCalculation.updatedOutputValues;
} else {
// We're being asked to empty the wallet. What this means is ensuring "tx" has only a single output
// of the total value we can currently spend as determined by the selector, and then subtracting the fee.
checkState(req.tx.getOutputs().size() == 1, () ->
"empty wallet TX must have a single output only");
CoinSelector selector = req.coinSelector == null ? coinSelector : req.coinSelector;
bestCoinSelection = selector.select((Coin) network.maxMoney(), candidates);
candidates = null; // Selector took ownership and might have changed candidates. Don't access again.
req.tx.getOutput(0).setValue(bestCoinSelection.totalValue());
log.info(" emptying {}", bestCoinSelection.totalValue().toFriendlyString());
}
bestCoinSelection.outputs()
.forEach(req.tx::addInput);
if (req.emptyWallet) {
if (!adjustOutputDownwardsForFee(req.tx, bestCoinSelection, req.feePerKb, req.ensureMinRequiredFee))
throw new CouldNotAdjustDownwards();
}
if (updatedOutputValues != null) {
for (int i = 0; i < updatedOutputValues.size(); i++) {
req.tx.getOutput(i).setValue(updatedOutputValues.get(i));
}
}
if (bestChangeOutput != null) {
req.tx.addOutput(bestChangeOutput);
log.info(" with {} change", bestChangeOutput.getValue().toFriendlyString());
}
// Now shuffle the outputs to obfuscate which is the change.
if (req.shuffleOutputs)
req.tx.shuffleOutputs();
// Now sign the inputs, thus proving that we are entitled to redeem the connected outputs.
if (req.signInputs)
signTransaction(req);
// Check size.
final int size = req.tx.messageSize();
if (size > Transaction.MAX_STANDARD_TX_SIZE)
throw new ExceededMaxTransactionSize();
// Label the transaction as being self created. We can use this later to spend its change output even before
// the transaction is confirmed. We deliberately won't bother notifying listeners here as there's not much
// point - the user isn't interested in a confidence transition they made themselves.
getConfidence(req.tx).setSource(TransactionConfidence.Source.SELF);
// Label the transaction as being a user requested payment. This can be used to render GUI wallet
// transaction lists more appropriately, especially when the wallet starts to generate transactions itself
// for internal purposes.
req.tx.setPurpose(Transaction.Purpose.USER_PAYMENT);
// Record the exchange rate that was valid when the transaction was completed.
req.tx.setExchangeRate(req.exchangeRate);
req.tx.setMemo(req.memo);
req.completed = true;
log.info(" completed: {}", req.tx);
} finally {
lock.unlock();
}
}
|
@Test
public void opReturnMaxBytes() throws Exception {
receiveATransaction(wallet, myAddress);
Transaction tx = new Transaction();
Script script = ScriptBuilder.createOpReturnScript(new byte[80]);
tx.addOutput(Coin.ZERO, script);
SendRequest request = SendRequest.forTx(tx);
request.ensureMinRequiredFee = true;
wallet.completeTx(request);
}
|
@Override
public void run() {
// top-level command, do nothing
}
|
@Test
public void test_submit_server_cli_version_minor_mismatch_ignore() {
String serverVersion = "5.0.0";
System.setProperty(HAZELCAST_INTERNAL_OVERRIDE_VERSION, serverVersion);
Config cfg = smallInstanceConfig();
cfg.getJetConfig().setResourceUploadEnabled(true);
String clusterName = randomName();
cfg.setClusterName(clusterName);
hz = createHazelcastInstance(cfg);
System.setProperty(HAZELCAST_INTERNAL_OVERRIDE_VERSION, "5.1.0");
ClientConfig clientConfig = new ClientConfig();
clientConfig.setClusterName(clusterName);
client = createHazelcastClient(clientConfig);
run("submit", "--ignore-version-mismatch", testJobJarFile.toString());
Job job = hz.getJet().getJobs().get(0);
assertThat(job).eventuallyHasStatus(JobStatus.RUNNING);
}
|
@Override
public Column convert(BasicTypeDefine typeDefine) {
PhysicalColumn.PhysicalColumnBuilder builder =
PhysicalColumn.builder()
.name(typeDefine.getName())
.sourceType(typeDefine.getColumnType())
.nullable(typeDefine.isNullable())
.defaultValue(typeDefine.getDefaultValue())
.comment(typeDefine.getComment());
String mysqlDataType = typeDefine.getDataType().toUpperCase();
if (mysqlDataType.endsWith("ZEROFILL")) {
mysqlDataType =
mysqlDataType.substring(0, mysqlDataType.length() - "ZEROFILL".length()).trim();
}
if (typeDefine.isUnsigned() && !(mysqlDataType.endsWith(" UNSIGNED"))) {
mysqlDataType = mysqlDataType + " UNSIGNED";
}
switch (mysqlDataType) {
case MYSQL_NULL:
builder.dataType(BasicType.VOID_TYPE);
break;
case MYSQL_BIT:
if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) {
builder.dataType(BasicType.BOOLEAN_TYPE);
} else if (typeDefine.getLength() == 1) {
builder.dataType(BasicType.BOOLEAN_TYPE);
} else {
builder.dataType(PrimitiveByteArrayType.INSTANCE);
// BIT(M) -> BYTE(M/8)
long byteLength = typeDefine.getLength() / 8;
byteLength += typeDefine.getLength() % 8 > 0 ? 1 : 0;
builder.columnLength(byteLength);
}
break;
case MYSQL_TINYINT:
if (typeDefine.getColumnType().equalsIgnoreCase("tinyint(1)")) {
builder.dataType(BasicType.BOOLEAN_TYPE);
} else {
builder.dataType(BasicType.BYTE_TYPE);
}
break;
case MYSQL_TINYINT_UNSIGNED:
case MYSQL_SMALLINT:
builder.dataType(BasicType.SHORT_TYPE);
break;
case MYSQL_SMALLINT_UNSIGNED:
case MYSQL_MEDIUMINT:
case MYSQL_MEDIUMINT_UNSIGNED:
case MYSQL_INT:
case MYSQL_INTEGER:
case MYSQL_YEAR:
builder.dataType(BasicType.INT_TYPE);
break;
case MYSQL_INT_UNSIGNED:
case MYSQL_INTEGER_UNSIGNED:
case MYSQL_BIGINT:
builder.dataType(BasicType.LONG_TYPE);
break;
case MYSQL_BIGINT_UNSIGNED:
DecimalType intDecimalType = new DecimalType(20, 0);
builder.dataType(intDecimalType);
builder.columnLength(Long.valueOf(intDecimalType.getPrecision()));
builder.scale(intDecimalType.getScale());
break;
case MYSQL_FLOAT:
builder.dataType(BasicType.FLOAT_TYPE);
break;
case MYSQL_FLOAT_UNSIGNED:
log.warn("{} will probably cause value overflow.", MYSQL_FLOAT_UNSIGNED);
builder.dataType(BasicType.FLOAT_TYPE);
break;
case MYSQL_DOUBLE:
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case MYSQL_DOUBLE_UNSIGNED:
log.warn("{} will probably cause value overflow.", MYSQL_DOUBLE_UNSIGNED);
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case MYSQL_DECIMAL:
Preconditions.checkArgument(typeDefine.getPrecision() > 0);
DecimalType decimalType;
if (typeDefine.getPrecision() > DEFAULT_PRECISION) {
log.warn("{} will probably cause value overflow.", MYSQL_DECIMAL);
decimalType = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE);
} else {
decimalType =
new DecimalType(
typeDefine.getPrecision().intValue(),
typeDefine.getScale() == null
? 0
: typeDefine.getScale().intValue());
}
builder.dataType(decimalType);
builder.columnLength(Long.valueOf(decimalType.getPrecision()));
builder.scale(decimalType.getScale());
break;
case MYSQL_DECIMAL_UNSIGNED:
Preconditions.checkArgument(typeDefine.getPrecision() > 0);
log.warn("{} will probably cause value overflow.", MYSQL_DECIMAL_UNSIGNED);
DecimalType decimalUnsignedType =
new DecimalType(
typeDefine.getPrecision().intValue() + 1,
typeDefine.getScale() == null
? 0
: typeDefine.getScale().intValue());
builder.dataType(decimalUnsignedType);
builder.columnLength(Long.valueOf(decimalUnsignedType.getPrecision()));
builder.scale(decimalUnsignedType.getScale());
break;
case MYSQL_ENUM:
builder.dataType(BasicType.STRING_TYPE);
if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) {
builder.columnLength(100L);
} else {
builder.columnLength(typeDefine.getLength());
}
break;
case MYSQL_CHAR:
case MYSQL_VARCHAR:
if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) {
builder.columnLength(TypeDefineUtils.charTo4ByteLength(1L));
} else {
builder.columnLength(typeDefine.getLength());
}
builder.dataType(BasicType.STRING_TYPE);
break;
case MYSQL_TINYTEXT:
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(POWER_2_8 - 1);
break;
case MYSQL_TEXT:
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(POWER_2_16 - 1);
break;
case MYSQL_MEDIUMTEXT:
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(POWER_2_24 - 1);
break;
case MYSQL_LONGTEXT:
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(POWER_2_32 - 1);
break;
case MYSQL_JSON:
builder.dataType(BasicType.STRING_TYPE);
break;
case MYSQL_BINARY:
case MYSQL_VARBINARY:
if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) {
builder.columnLength(1L);
} else {
builder.columnLength(typeDefine.getLength());
}
builder.dataType(PrimitiveByteArrayType.INSTANCE);
break;
case MYSQL_TINYBLOB:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(POWER_2_8 - 1);
break;
case MYSQL_BLOB:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(POWER_2_16 - 1);
break;
case MYSQL_MEDIUMBLOB:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(POWER_2_24 - 1);
break;
case MYSQL_LONGBLOB:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(POWER_2_32 - 1);
break;
case MYSQL_GEOMETRY:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
break;
case MYSQL_DATE:
builder.dataType(LocalTimeType.LOCAL_DATE_TYPE);
break;
case MYSQL_TIME:
builder.dataType(LocalTimeType.LOCAL_TIME_TYPE);
builder.scale(typeDefine.getScale());
break;
case MYSQL_DATETIME:
case MYSQL_TIMESTAMP:
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
builder.scale(typeDefine.getScale());
break;
default:
throw CommonError.convertToSeaTunnelTypeError(
DatabaseIdentifier.MYSQL, mysqlDataType, typeDefine.getName());
}
return builder.build();
}
|
@Test
public void testConvertBlob() {
BasicTypeDefine<Object> typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("tinyblob")
.dataType("tinyblob")
.build();
Column column = MySqlTypeConverter.DEFAULT_INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(PrimitiveByteArrayType.INSTANCE, column.getDataType());
Assertions.assertEquals(255, column.getColumnLength());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType());
typeDefine =
BasicTypeDefine.builder().name("test").columnType("blob").dataType("blob").build();
column = MySqlTypeConverter.DEFAULT_INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(PrimitiveByteArrayType.INSTANCE, column.getDataType());
Assertions.assertEquals(65535, column.getColumnLength());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType());
typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("mediumblob")
.dataType("mediumblob")
.build();
column = MySqlTypeConverter.DEFAULT_INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(PrimitiveByteArrayType.INSTANCE, column.getDataType());
Assertions.assertEquals(16777215, column.getColumnLength());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType());
typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("longblob")
.dataType("longblob")
.build();
column = MySqlTypeConverter.DEFAULT_INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(PrimitiveByteArrayType.INSTANCE, column.getDataType());
Assertions.assertEquals(4294967295L, column.getColumnLength());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType());
}
|
@Override
public ParseResult parsePath(String path) {
String original = path;
path = path.replace('/', '\\');
if (WORKING_DIR_WITH_DRIVE.matcher(path).matches()) {
throw new InvalidPathException(
original,
"Jimfs does not currently support the Windows syntax for a relative path "
+ "on a specific drive (e.g. \"C:foo\\bar\")");
}
String root;
if (path.startsWith("\\\\")) {
root = parseUncRoot(path, original);
} else if (path.startsWith("\\")) {
throw new InvalidPathException(
original,
"Jimfs does not currently support the Windows syntax for an absolute path "
+ "on the current drive (e.g. \"\\foo\\bar\")");
} else {
root = parseDriveRoot(path);
}
// check for root.length() > 3 because only "C:\" type roots are allowed to have :
int startIndex = root == null || root.length() > 3 ? 0 : root.length();
for (int i = startIndex; i < path.length(); i++) {
char c = path.charAt(i);
if (isReserved(c)) {
throw new InvalidPathException(original, "Illegal char <" + c + ">", i);
}
}
Matcher trailingSpaceMatcher = TRAILING_SPACES.matcher(path);
if (trailingSpaceMatcher.find()) {
throw new InvalidPathException(original, "Trailing char < >", trailingSpaceMatcher.start());
}
if (root != null) {
path = path.substring(root.length());
if (!root.endsWith("\\")) {
root = root + "\\";
}
}
return new ParseResult(root, splitter().split(path));
}
|
@Test
public void testWindows_relativePathsWithDriveRoot_unsupported() {
try {
windows().parsePath("C:");
fail();
} catch (InvalidPathException expected) {
}
try {
windows().parsePath("C:foo\\bar");
fail();
} catch (InvalidPathException expected) {
}
}
|
public static byte[] readFileBytes(File file) {
if (file.exists()) {
String result = readFile(file);
if (result != null) {
return ByteUtils.toBytes(result);
}
}
return null;
}
|
@Test
void testReadFileBytes() {
assertNotNull(DiskUtils.readFileBytes(testFile));
}
|
@POST
@ApiOperation("Get all views that match given parameter value")
@NoAuditEvent("Only returning matching views, not changing any data")
public Collection<ViewParameterSummaryDTO> forParameter(@Context SearchUser searchUser) {
return qualifyingViewsService.forValue()
.stream()
.filter(searchUser::canReadView)
.collect(Collectors.toSet());
}
|
@Test
public void returnsNoViewsIfNoneArePresent() {
final SearchUser searchUser = TestSearchUser.builder().build();
QualifyingViewsService service = mockViewsService();
final QualifyingViewsResource resource = new QualifyingViewsResource(service);
final Collection<ViewParameterSummaryDTO> result = resource.forParameter(searchUser);
assertThat(result).isEmpty();
}
|
public static CertificateMetadata parseMetadata(String certificateValue, boolean isIndividual) throws LibCertificateException {
SecurityProviderInitializer.initKalkanProvider();
try {
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
dbf.setFeature(XML_DISALLOW_DOCTYPE_DECL_FEATURE, true);
dbf.setFeature(XML_LOAD_EXTERNAL_DTD_FEATURE, false);
Element rootEl = getRootElement(dbf, certificateValue);
return extractDataFromSignature(rootEl, isIndividual);
} catch (LibCertificateException lce) {
throw lce;
} catch (Exception e) {
throw new LibCertificateException("Failed to parse certificate", e);
}
}
|
@Test
public void testParseMetadataCorporate() throws LibCertificateException {
try (MockedStatic<DateUtils> dateUtilsMock = Mockito.mockStatic(DateUtils.class)) {
String certificateValue = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?><login><timeTicket>1714814361503</timeTicket><sessionid>86ed9a1d-c753-444b-a5a0-d0370ef32a5a</sessionid><ds:Signature xmlns:ds=\"http://www.w3.org/2000/09/xmldsig#\">\n" +
"<ds:SignedInfo>\n" +
"<ds:CanonicalizationMethod Algorithm=\"http://www.w3.org/TR/2001/REC-xml-c14n-20010315\"/>\n" +
"<ds:SignatureMethod Algorithm=\"http://www.w3.org/2001/04/xmldsig-more#rsa-sha256\"/>\n" +
"<ds:Reference URI=\"\">\n" +
"<ds:Transforms>\n" +
"<ds:Transform Algorithm=\"http://www.w3.org/2000/09/xmldsig#enveloped-signature\"/>\n" +
"<ds:Transform Algorithm=\"http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments\"/>\n" +
"</ds:Transforms>\n" +
"<ds:DigestMethod Algorithm=\"http://www.w3.org/2001/04/xmlenc#sha256\"/>\n" +
"<ds:DigestValue>/Bg+fR7MjDLjd6cSq+PC/d5tKBlN5OJwXwBgv3FOCQU=</ds:DigestValue>\n" +
"</ds:Reference>\n" +
"</ds:SignedInfo>\n" +
"<ds:SignatureValue>\n" +
"fJnVStCdI3iYWxy4mQaVt+xcy67FUjRl4V9N1wlge6q67CR11Q1UaCpvaAN/Fgb8GvTmeGJXCScJ\n" +
"s/t8suCep08AGwxDNOJbBuZzBPGS/Cm3gBG8j8QiXFzzsBj+sxGHUA1kZAXG4KTJ492+X+5fsfKZ\n" +
"rjZCm9TJim7NBj+ts08v0GvZOywbs5/N49EKG8QgnKv2gOh8bnMc1Mlfe9e9r479FHiEcdoJVs3S\n" +
"RwzemzFqxp6lSkXT7UDgBamDvarYHJAmFvBG9lpSEE5fLYHePVFM4WPyV7MjUYVTqsKRuAmzp9f0\n" +
"eeyCk2RzvrDBLzlFb/kXAW4vMdW8PCL5WZ40hg==\n" +
"</ds:SignatureValue>\n" +
"<ds:KeyInfo>\n" +
"<ds:X509Data>\n" +
"<ds:X509Certificate>\n" +
"MIIGVDCCBDygAwIBAgIUfxaIMGG09/JaVUyUY1vNFc0gg38wDQYJKoZIhvcNAQELBQAwLTELMAkG\n" +
"A1UEBhMCS1oxHjAcBgNVBAMMFdKw0JrQniAzLjAgKFJTQSBURVNUKTAeFw0yMzAyMDExMDQ0Mjha\n" +
"Fw0yNDAyMDExMDQ0MjhaMIGtMR4wHAYDVQQDDBXQotCV0KHQotCe0JIg0KLQldCh0KIxFTATBgNV\n" +
"BAQMDNCi0JXQodCi0J7QkjEYMBYGA1UEBRMPSUlOMTIzNDU2Nzg5MDExMQswCQYDVQQGEwJLWjEY\n" +
"MBYGA1UECgwP0JDQniAi0KLQldCh0KIiMRgwFgYDVQQLDA9CSU4xMjM0NTY3ODkwMjExGTAXBgNV\n" +
"BCoMENCi0JXQodCi0J7QktCY0KcwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsrfZW\n" +
"m/1c6mvwrn8xxeU6GqKLUfVcxO5p330lSPT3iCkQgXK93DAPnB2FEhQHWIOo8w7exGpvxzXUNQfL\n" +
"oWod/8o0Gcx+WhogZ8jSUwnkftDUD/LkgJkw6fp5r/ZNFTbZmpEakIWJiHmRgN26STvZAHFRsad6\n" +
"LfrzlpKjlE+v8Lw1gCZRbQ+yh2hG+1l33AzZDM1tV+rzeX9AL5gzelt6oLWLvZgSofol8CoPYy02\n" +
"9eEmY6vCqd1SJ3608Hqa0jwxUNd5oCPWEUwtphncawI5Y5Tvy1NTOCTOd/N4BJpALV0w1HFU7sec\n" +
"daFbSl6Ho/c7ZKVYlHalUdxaENi0/nlFAgMBAAGjggHpMIIB5TAOBgNVHQ8BAf8EBAMCBaAwKAYD\n" +
"VR0lBCEwHwYIKwYBBQUHAwIGCCqDDgMDBAECBgkqgw4DAwQBAgEwXgYDVR0gBFcwVTBTBgcqgw4D\n" +
"AwICMEgwIQYIKwYBBQUHAgEWFWh0dHA6Ly9wa2kuZ292Lmt6L2NwczAjBggrBgEFBQcCAjAXDBVo\n" +
"dHRwOi8vcGtpLmdvdi5rei9jcHMwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL3Rlc3QucGtpLmdv\n" +
"di5rei9jcmwvbmNhX3JzYV90ZXN0LmNybDA+BgNVHS4ENzA1MDOgMaAvhi1odHRwOi8vdGVzdC5w\n" +
"a2kuZ292Lmt6L2NybC9uY2FfZF9yc2FfdGVzdC5jcmwwcwYIKwYBBQUHAQEEZzBlMDkGCCsGAQUF\n" +
"BzAChi1odHRwOi8vdGVzdC5wa2kuZ292Lmt6L2NlcnQvbmNhX2dvc3RfdGVzdC5jZXIwKAYIKwYB\n" +
"BQUHMAGGHGh0dHA6Ly90ZXN0LnBraS5nb3Yua3ovb2NzcC8wHQYDVR0OBBYEFH8WiDBhtPfyWlVM\n" +
"lGNbzRXNIIN/MB8GA1UdIwQYMBaAFKaMFjN8uOg1ZwY+XkFXVaKvNFBoMBYGBiqDDgMDBQQMMAoG\n" +
"CCqDDgMDBQEBMA0GCSqGSIb3DQEBCwUAA4ICAQCSfrHvobShE6ZZVuzf3QL2E6j9+m5vUOVk/eCP\n" +
"wkkCPcEANRT9gUHxM1ZAv01lR2lkOwDOPUWoBlmXxtBhqY7Aeg48HA1vfDR4YH1fRLu9EX9n2ctx\n" +
"vWN8IOU8ZtELCKumwkwESy8c5URC+N9xtpzdMIm95Q9+hDjG6Fs533/EtdUsq1kE0RjKYQ7AFa31\n" +
"tPzrVgW94QPqFhKvq3okDeAoX+mmE4cN+HC0fqWubS+5tedTDRNQuzQmFJOueo1nmWFE1oJWAg4P\n" +
"PljM+Kbp8pZPq59IDfkcYuWmUJ+B6VDgN2N09xrZ+zUyvhdm/RJ5ky5qKVD6kStpq7SCceBrC1pE\n" +
"fgZRrtCSuTRweWWf+i0eJhFnhcUDORBugDkitymPU5Oz3FtGG7+dv6l1zd5Doh7TNNaZe6aWFTKq\n" +
"56jUIMamanOJMM2SKTiF8aWcACAlKc3TOKfw0Sx9dO4Df/xnSudxhDMq62f4uxL4juQ0jFwufs+z\n" +
"O9KkRf2r6UHPtKfQJCEzLDUdt3zw8XX3P2Yy26sjYDGVnv/Eor0x5hjeH9iyL/JamfqHpYXjXR1R\n" +
"VRRzvMNUH2s/3PZsTc/UaHOJYPbr+WHIa/ywKmFq8Yfe07ElvyjrjdAYBn4/24vszTdEn2qAxBHD\n" +
"/HWTyQm1aXbVaPbd7ZF/kVt04gDSVE2wz2G/Tg==\n" +
"</ds:X509Certificate>\n" +
"</ds:X509Data>\n" +
"</ds:KeyInfo>\n" +
"</ds:Signature></login>";
Date expireDateDayBefore = Date.from(LocalDate.of(2023, 11, 16).atStartOfDay().atZone(ZoneId.systemDefault()).toInstant());
dateUtilsMock.when(() -> DateUtils.getCurrentDate()).thenReturn(expireDateDayBefore);
CertificateMetadata metadata = CertificateParser.parseMetadata(certificateValue, false);
assertNotNull(metadata, "Failed to parse certificate metadata.");
assertEquals("123456789011", metadata.getTaxCode(), "Tax code not matched.");
assertEquals("ТЕСТ", metadata.getName(), "Name not matched.");
assertEquals("ТЕСТОВ", metadata.getSurname(), "Surname not matched.");
assertEquals("ТЕСТОВИЧ", metadata.getPatronymic(), "Patronymic not matched.");
assertEquals("", metadata.getEmail(), "Email not matched.");
assertEquals("", metadata.getBirtDate(), "Birth date not matched.");
assertEquals("KZ", metadata.getCountry(), "Country not matched.");
assertEquals("", metadata.getGender(), "Gender not matched.");
assertEquals("", metadata.getNotBefore(), "Not before date not matched.");
assertEquals("", metadata.getNotAfter(), "Not after date not matched.");
assertEquals("CN=ТЕСТОВ ТЕСТ,SURNAME=ТЕСТОВ,SERIALNUMBER=IIN123456789011,C=KZ,O=АО \\\"ТЕСТ\\\",OU=BIN123456789021,G=ТЕСТОВИЧ", metadata.getDn(), "DN not matched.");
assertEquals("АО \"ТЕСТ\"", metadata.getOrganization(), "Organization not matched.");
assertEquals("123456789021", metadata.getBin(), "BIN not matched.");
}
}
|
public static <T> ProcessorMetaSupplier metaSupplier(
@Nonnull String directoryName,
@Nonnull FunctionEx<? super T, ? extends String> toStringFn,
@Nonnull String charset,
@Nullable String datePattern,
long maxFileSize,
boolean exactlyOnce
) {
return metaSupplier(directoryName, toStringFn, charset, datePattern, maxFileSize, exactlyOnce, SYSTEM_CLOCK);
}
|
@Test
public void test_rollByFileSize() throws Exception {
int numItems = 10;
DAG dag = new DAG();
Vertex src = dag.newVertex("src", () -> new SlowSourceP(semaphore, numItems)).localParallelism(1);
Vertex map = dag.newVertex("map", mapP((Integer i) -> i + 100));
// maxFileSize is always large enough for 1 item but never for 2, both with windows and linux newlines
long maxFileSize = 6L;
Vertex sink = dag.newVertex("sink", WriteFileP.metaSupplier(
directory.toString(), Objects::toString, "utf-8", null, maxFileSize, true));
dag.edge(between(src, map));
dag.edge(between(map, sink));
Job job = instance().getJet().newJob(dag);
// Then
for (int i = 0; i < numItems; i++) {
semaphore.release();
int finalI = i;
assertTrueEventually(() -> checkFileContents(100, finalI + 101, false, false, true));
}
for (int i = 0, j = 100; i < numItems / 2; i++) {
Path file = directory.resolve("0-" + i);
assertEquals((j++) + System.lineSeparator() + (j++) + System.lineSeparator(),
Files.readString(file));
}
job.join();
}
|
public Optional<Object> evaluate(final Map<String, Object> columnPairsMap, final String outputColumn, final String regexField) {
return rows.stream()
.map(row -> row.evaluate(columnPairsMap, outputColumn, regexField))
.filter(Optional::isPresent)
.findFirst()
.map(Optional::get);
}
|
@Test
void evaluateKeyNotFound() {
KiePMMLInlineTable kiePMMLInlineTable = new KiePMMLInlineTable("name", Collections.emptyList(), ROWS);
Optional<Object> retrieved = kiePMMLInlineTable.evaluate(Collections.singletonMap("NOT-KEY", 0), "KEY-0-0",
null);
assertThat(retrieved).isNotPresent();
}
|
@Override
public HttpResponseOutputStream<File> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final String location = new StoregateWriteFeature(session, fileid).start(file, status);
final MultipartOutputStream proxy = new MultipartOutputStream(location, file, status);
return new HttpResponseOutputStream<File>(new MemorySegementingOutputStream(proxy,
new HostPreferences(session.getHost()).getInteger("storegate.upload.multipart.chunksize")),
new StoregateAttributesFinderFeature(session, fileid), status) {
@Override
public File getStatus() {
return proxy.getResult();
}
};
}
|
@Test(expected = TransferStatusCanceledException.class)
public void testWriteCancel() throws Exception {
final StoregateIdProvider nodeid = new StoregateIdProvider(session);
final Path room = new StoregateDirectoryFeature(session, nodeid).mkdir(
new Path(String.format("/My files/%s", new AlphanumericRandomStringService().random()),
EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final byte[] content = RandomUtils.nextBytes(32769);
final Path test = new Path(room, String.format("{%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.file));
final BytecountStreamListener listener = new BytecountStreamListener();
final TransferStatus status = new TransferStatus() {
@Override
public void validate() throws ConnectionCanceledException {
if(listener.getSent() >= 32768) {
throw new TransferStatusCanceledException();
}
super.validate();
}
};
status.setLength(content.length);
final StoregateMultipartWriteFeature writer = new StoregateMultipartWriteFeature(session, nodeid);
final HttpResponseOutputStream<File> out = writer.write(test, status, new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(status, status).withListener(listener).transfer(new ByteArrayInputStream(content), out);
assertFalse(new DefaultFindFeature(session).find(test));
out.getStatus();
}
|
@Override
public void toJson(Map<String, Object> json, Revision revision) {
json.put("folder", getFolder() == null ? "" : getFolder());
json.put("scmType", "Dependency");
json.put("location", pipelineName + "/" + stageName);
json.put("action", "Completed");
if (!CaseInsensitiveString.isBlank(getName())) {
json.put("materialName", CaseInsensitiveString.str(getName()));
}
}
|
@Test
void shouldReturnJson() {
Map<String, Object> json = new LinkedHashMap<>();
dependencyMaterial.toJson(json, create("pipeline", 10, "1.0.123", "stage", 1));
assertThat(json.get("location")).isEqualTo("pipeline/stage");
assertThat(json.get("scmType")).isEqualTo("Dependency");
assertThat(json.get("folder")).isEqualTo("");
assertThat(json.get("action")).isEqualTo("Completed");
}
|
public static CsvReader getReader(CsvReadConfig config) {
return new CsvReader(config);
}
|
@Test
@Disabled
public void readLfTest(){
final CsvReader reader = CsvUtil.getReader();
String path = FileUtil.isWindows() ? "d:/test/rw_test.csv" : "~/test/rw_test.csv";
final CsvData read = reader.read(FileUtil.file(path));
for (CsvRow row : read) {
Console.log(row);
}
}
|
public static void bindEnvironment(ScriptEngine engine, String requestContent, Map<String, Object> requestContext,
StateStore stateStore) {
// Build a map of header values.
bindEnvironment(engine, requestContent, requestContext, stateStore, null);
}
|
@Test
void testMicrocksXmlHolder() {
String body = """
<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">
<soapenv:Header/>
<soapenv:Body>
<hel:sayHello xmlns:hel="http://www.example.com/hello">
<name>Andrew</name>
</hel:sayHello>
</soapenv:Body>
</soapenv:Envelope>
""";
String script = """
import io.github.microcks.util.soapui.XmlHolder
def holder = new XmlHolder( mockRequest.requestContent )
def name = holder["//name"]
if (name == "Andrew"){
return "Andrew Response"
} else if (name == "Karla"){
return "Karla Response"
} else {
return "World Response"
}
""";
ScriptEngineManager sem = new ScriptEngineManager();
Map<String, Object> context = new HashMap<>();
try {
// Evaluating request with script coming from operation dispatcher rules.
ScriptEngine se = sem.getEngineByExtension("groovy");
ScriptEngineBinder.bindEnvironment(se, body, context, null);
String result = (String) se.eval(script);
assertEquals("Andrew Response", result);
} catch (Exception e) {
fail("Exception should no be thrown");
}
}
|
public List<String> getOnUpdateColumnsOnlyName() {
return allColumns.values().stream().filter(ColumnMeta::isOnUpdate).map(ColumnMeta::getColumnName).collect(Collectors.toList());
}
|
@Test
public void testGetOnUpdateColumnsOnlyName() {
List<String> onUpdateColumns = tableMeta.getOnUpdateColumnsOnlyName();
List<String> expected = Arrays.asList("col1");
assertEquals(expected.size(), onUpdateColumns.size());
assertTrue(onUpdateColumns.containsAll(expected));
}
|
public String transform() throws ScanException {
StringBuilder stringBuilder = new StringBuilder();
compileNode(node, stringBuilder, new Stack<Node>());
return stringBuilder.toString();
}
|
@Test
public void withDefaultValue() throws ScanException {
String input = "${k67:-b}c";
Node node = makeNode(input);
NodeToStringTransformer nodeToStringTransformer = new NodeToStringTransformer(node, propertyContainer0);
Assertions.assertEquals("bc", nodeToStringTransformer.transform());
}
|
@Override
public T load(long sequence) {
long startNanos = Timer.nanos();
try {
return delegate.load(sequence);
} finally {
loadProbe.recordValue(Timer.nanosElapsed(startNanos));
}
}
|
@Test
public void load() {
long sequence = 1L;
String value = "someValue";
when(delegate.load(sequence)).thenReturn(value);
String result = ringbufferStore.load(sequence);
assertEquals(value, result);
assertProbeCalledOnce("load");
}
|
public KVTable getBrokerRuntimeInfo(final String addr, final long timeoutMillis) throws RemotingConnectException,
RemotingSendRequestException, RemotingTimeoutException, InterruptedException, MQBrokerException {
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_BROKER_RUNTIME_INFO, null);
RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr),
request, timeoutMillis);
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
return KVTable.decode(response.getBody(), KVTable.class);
}
default:
break;
}
throw new MQBrokerException(response.getCode(), response.getRemark(), addr);
}
|
@Test
public void assertGetBrokerRuntimeInfo() throws RemotingException, InterruptedException, MQBrokerException {
mockInvokeSync();
KVTable responseBody = new KVTable();
responseBody.getTable().put("key", "value");
setResponseBody(responseBody);
KVTable actual = mqClientAPI.getBrokerRuntimeInfo(defaultBrokerAddr, defaultTimeout);
assertNotNull(actual);
assertEquals(1, actual.getTable().size());
}
|
@Override
public GoViewDataRespVO getDataBySQL(String sql) {
// 1. 执行查询
SqlRowSet sqlRowSet = jdbcTemplate.queryForRowSet(sql);
// 2. 构建返回结果
GoViewDataRespVO respVO = new GoViewDataRespVO();
// 2.1 解析元数据
SqlRowSetMetaData metaData = sqlRowSet.getMetaData();
String[] columnNames = metaData.getColumnNames();
respVO.setDimensions(Arrays.asList(columnNames));
// 2.2 解析数据明细
respVO.setSource(new LinkedList<>()); // 由于数据量不确认,使用 LinkedList 虽然内存占用大一点,但是不存在扩容复制的问题
while (sqlRowSet.next()) {
Map<String, Object> data = Maps.newHashMapWithExpectedSize(columnNames.length);
for (String columnName : columnNames) {
data.put(columnName, sqlRowSet.getObject(columnName));
}
respVO.getSource().add(data);
}
return respVO;
}
|
@Test
public void testGetDataBySQL() {
// 准备参数
String sql = "SELECT id, name FROM system_users";
// mock 方法
SqlRowSet sqlRowSet = mock(SqlRowSet.class);
when(jdbcTemplate.queryForRowSet(eq(sql))).thenReturn(sqlRowSet);
// mock 元数据
SqlRowSetMetaData metaData = mock(SqlRowSetMetaData.class);
when(sqlRowSet.getMetaData()).thenReturn(metaData);
when(metaData.getColumnNames()).thenReturn(new String[]{"id", "name"});
// mock 数据明细
when(sqlRowSet.next()).thenReturn(true).thenReturn(true).thenReturn(false);
when(sqlRowSet.getObject("id")).thenReturn(1L).thenReturn(2L);
when(sqlRowSet.getObject("name")).thenReturn("芋道源码").thenReturn("芋道");
// 调用
GoViewDataRespVO dataBySQL = goViewDataService.getDataBySQL(sql);
// 断言
assertEquals(Arrays.asList("id", "name"), dataBySQL.getDimensions());
assertEquals(2, dataBySQL.getDimensions().size());
assertEquals(2, dataBySQL.getSource().get(0).size());
assertEquals(1L, dataBySQL.getSource().get(0).get("id"));
assertEquals("芋道源码", dataBySQL.getSource().get(0).get("name"));
assertEquals(2, dataBySQL.getSource().get(1).size());
assertEquals(2L, dataBySQL.getSource().get(1).get("id"));
assertEquals("芋道", dataBySQL.getSource().get(1).get("name"));
}
|
private void sendResponse(Response response) {
try {
((GrpcConnection) this.currentConnection).sendResponse(response);
} catch (Exception e) {
LOGGER.error("[{}]Error to send ack response, ackId->{}", this.currentConnection.getConnectionId(),
response.getRequestId());
}
}
|
@Test
void testBindRequestStreamOnNextNoRequest()
throws NoSuchFieldException, IllegalAccessException, NoSuchMethodException, InvocationTargetException {
BiRequestStreamGrpc.BiRequestStreamStub stub = mock(BiRequestStreamGrpc.BiRequestStreamStub.class);
GrpcConnection grpcConnection = mock(GrpcConnection.class);
when(stub.requestBiStream(any())).thenAnswer((Answer<StreamObserver<Payload>>) invocationOnMock -> {
((StreamObserver<Payload>) invocationOnMock.getArgument(0)).onNext(GrpcUtils.convert(new ConnectResetRequest()));
return null;
});
grpcClient.registerServerRequestHandler((request, connection) -> null);
setCurrentConnection(grpcConnection, grpcClient);
invokeBindRequestStream(grpcClient, stub, grpcConnection);
verify(grpcConnection, never()).sendResponse(any(Response.class));
}
|
@Override
public void reportCompletedCheckpoint(CompletedCheckpointStats completed) {
statsReadWriteLock.lock();
try {
latestCompletedCheckpoint = completed;
counts.incrementCompletedCheckpoints();
history.replacePendingCheckpointById(completed);
summary.updateSummary(completed);
dirty = true;
logCheckpointStatistics(completed);
if (checkpointStatsListener != null) {
checkpointStatsListener.onCompletedCheckpoint();
}
} finally {
statsReadWriteLock.unlock();
}
}
|
@Test
void testCheckpointStatsListenerOnCompletedCheckpoint() {
testCheckpointStatsListener(
(checkpointStatsTracker, pendingCheckpointStats) ->
checkpointStatsTracker.reportCompletedCheckpoint(
pendingCheckpointStats.toCompletedCheckpointStats(
"random-external-pointer")),
1,
0);
}
|
public static BigDecimal pow(Number number, int n) {
return pow(toBigDecimal(number), n);
}
|
@Test
public void testPowNegative() {
BigDecimal number = new BigDecimal("2.5");
int exponent = -2;
BigDecimal expected = new BigDecimal("0.16");
assertEquals(expected, NumberUtil.pow(number, exponent));
}
|
void generate(MessageSpec message) throws Exception {
if (message.struct().versions().contains(Short.MAX_VALUE)) {
throw new RuntimeException("Message " + message.name() + " does " +
"not specify a maximum version.");
}
structRegistry.register(message);
schemaGenerator.generateSchemas(message);
messageFlexibleVersions = message.flexibleVersions();
generateClass(Optional.of(message),
message.dataClassName(),
message.struct(),
message.struct().versions());
headerGenerator.generate();
}
|
@Test
public void testNullDefaultsWithDeprecatedVersions() throws Exception {
MessageSpec testMessageSpec = MessageGenerator.JSON_SERDE.readValue(String.join("", Arrays.asList(
"{",
" \"type\": \"request\",",
" \"name\": \"FooBar\",",
" \"validVersions\": \"0-4\",",
" \"deprecatedVersions\": \"0-1\",",
" \"flexibleVersions\": \"none\",",
" \"fields\": [",
" { \"name\": \"field1\", \"type\": \"int32\", \"versions\": \"0+\" },",
" { \"name\": \"field2\", \"type\": \"[]TestStruct\", \"versions\": \"1+\", ",
" \"nullableVersions\": \"1+\", \"default\": \"null\", \"fields\": [",
" { \"name\": \"field1\", \"type\": \"int32\", \"versions\": \"0+\" }",
" ]},",
" { \"name\": \"field3\", \"type\": \"bytes\", \"versions\": \"2+\", ",
" \"nullableVersions\": \"2+\", \"default\": \"null\" }",
" ]",
"}")), MessageSpec.class);
new MessageDataGenerator("org.apache.kafka.common.message").generate(testMessageSpec);
}
|
@Override
public void setKeyboardTheme(@NonNull KeyboardTheme theme) {
super.setKeyboardTheme(theme);
mExtensionKeyboardYDismissPoint = getThemedKeyboardDimens().getNormalKeyHeight();
mGestureDrawingHelper =
GestureTypingPathDrawHelper.create(
this::invalidate,
GestureTrailTheme.fromThemeResource(
getContext(),
theme.getPackageContext(),
theme.getResourceMapping(),
theme.getGestureTrailThemeResId()));
}
|
@Test
public void testDisregardIfSameTheme() {
final KeyboardThemeFactory keyboardThemeFactory =
AnyApplication.getKeyboardThemeFactory(getApplicationContext());
Assert.assertTrue(mThemeWasSet);
mThemeWasSet = false;
mViewUnderTest.setKeyboardTheme(keyboardThemeFactory.getAllAddOns().get(2));
Assert.assertTrue(mThemeWasSet);
mThemeWasSet = false;
mViewUnderTest.setKeyboardTheme(keyboardThemeFactory.getAllAddOns().get(2));
Assert.assertFalse(mThemeWasSet);
mViewUnderTest.setKeyboardTheme(keyboardThemeFactory.getAllAddOns().get(3));
Assert.assertTrue(mThemeWasSet);
}
|
@Override
public void transform(Message message, DataType fromType, DataType toType) {
if (message.getHeaders().containsKey(Ddb2Constants.ITEM) ||
message.getHeaders().containsKey(Ddb2Constants.KEY)) {
return;
}
JsonNode jsonBody = getBodyAsJsonNode(message);
String operation
= Optional.ofNullable(jsonBody.get("operation")).map(JsonNode::asText).orElse(Ddb2Operations.PutItem.name());
if (message.getExchange().hasProperties() && message.getExchange().getProperty("operation", String.class) != null) {
operation = message.getExchange().getProperty("operation", String.class);
}
if (message.getHeaders().containsKey(Ddb2Constants.OPERATION)) {
operation = message.getHeader(Ddb2Constants.OPERATION, Ddb2Operations.class).name();
}
JsonNode key = jsonBody.get("key");
JsonNode item = jsonBody.get("item");
Map<String, Object> keyProps;
if (key != null) {
keyProps = dataFormat.getObjectMapper().convertValue(key, new TypeReference<>() {
});
} else {
keyProps = dataFormat.getObjectMapper().convertValue(jsonBody, new TypeReference<>() {
});
}
Map<String, Object> itemProps;
if (item != null) {
itemProps = dataFormat.getObjectMapper().convertValue(item, new TypeReference<>() {
});
} else {
itemProps = keyProps;
}
final Map<String, AttributeValue> keyMap = getAttributeValueMap(keyProps);
switch (Ddb2Operations.valueOf(operation)) {
case PutItem:
message.setHeader(Ddb2Constants.OPERATION, Ddb2Operations.PutItem);
message.setHeader(Ddb2Constants.ITEM, getAttributeValueMap(itemProps));
setHeaderIfNotPresent(Ddb2Constants.RETURN_VALUES, ReturnValue.ALL_OLD.toString(), message);
break;
case UpdateItem:
message.setHeader(Ddb2Constants.OPERATION, Ddb2Operations.UpdateItem);
message.setHeader(Ddb2Constants.KEY, keyMap);
message.setHeader(Ddb2Constants.UPDATE_VALUES, getAttributeValueUpdateMap(itemProps));
setHeaderIfNotPresent(Ddb2Constants.RETURN_VALUES, ReturnValue.ALL_NEW.toString(), message);
break;
case DeleteItem:
message.setHeader(Ddb2Constants.OPERATION, Ddb2Operations.DeleteItem);
message.setHeader(Ddb2Constants.KEY, keyMap);
setHeaderIfNotPresent(Ddb2Constants.RETURN_VALUES, ReturnValue.ALL_OLD.toString(), message);
break;
default:
throw new UnsupportedOperationException(String.format("Unsupported operation '%s'", operation));
}
}
|
@Test
void shouldFailForUnsupportedOperation() throws Exception {
Exchange exchange = new DefaultExchange(camelContext);
exchange.getMessage().setBody(Json.mapper().readTree("{}"));
exchange.setProperty("operation", Ddb2Operations.BatchGetItems.name());
Assertions.assertThrows(UnsupportedOperationException.class, () -> transformer.transform(exchange.getMessage(),
DataType.ANY, new DataType(AWS_2_DDB_APPLICATION_JSON_TRANSFORMER)));
}
|
@Override
public String toString() {
StringBuilder builder = new StringBuilder("AfterEach.inOrder(");
Joiner.on(", ").appendTo(builder, subTriggers);
builder.append(")");
return builder.toString();
}
|
@Test
public void testToString() {
TriggerStateMachine trigger =
AfterEachStateMachine.inOrder(
StubTriggerStateMachine.named("t1"),
StubTriggerStateMachine.named("t2"),
StubTriggerStateMachine.named("t3"));
assertEquals("AfterEach.inOrder(t1, t2, t3)", trigger.toString());
}
|
@Override
public void onConnected(Connection connection) {
connected = true;
LogUtils.NAMING_LOGGER.info("Grpc connection connect");
}
|
@Test
void testOnConnected() {
assertFalse(redoService.isConnected());
redoService.onConnected(new TestConnection(new RpcClient.ServerInfo()));
assertTrue(redoService.isConnected());
}
|
@Override
public MastershipRole getRole(DeviceId deviceId) {
checkPermission(DEVICE_READ);
checkNotNull(deviceId, DEVICE_ID_NULL);
return mastershipService.getLocalRole(deviceId);
}
|
@Test
public void getRole() {
connectDevice(DID1, SW1);
assertEquals("incorrect role", MastershipRole.MASTER, service.getRole(DID1));
}
|
public static LinkedHashSet<Class<?>> listBeansRecursiveInclusive(Class<?> beanClass) {
return listBeansRecursiveInclusive(beanClass, new LinkedHashSet<>());
}
|
@Test
public void listBeansRecursiveInclusiveTest() {
LinkedHashSet<Class<?>> classes = TypeUtils.listBeansRecursiveInclusive(BeanA.class);
// System.out.println(classes);
assertEquals(classes.size(), 2);
}
|
public static List<String> splitStatementsAcrossBlocks(CharSequence string) {
List<String> statements = codeAwareSplitOnChar(string, false, true, ';', '\n', '{', '}');
return statements.stream()
.filter(stmt -> !(stmt.isEmpty()))
.filter(stmt -> !(stmt.startsWith("//")))
.collect(Collectors.toList());
}
|
@Test
public void splitStatementsAcrossBlocksFor() {
String text = "for (int i = 0; i < 1; i++) {\n" +
" $fact.value1 = 2;\n" +
" drools.update($fact);\n" +
"}";
List<String> statements = splitStatementsAcrossBlocks(text);
assertThat(statements.get(0)).isEqualTo("for (int i = 0; i < 1; i++)");
assertThat(statements.get(1)).isEqualTo("$fact.value1 = 2");
assertThat(statements.get(2)).isEqualTo("drools.update($fact)");
}
|
@Override
public AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets(
String groupId,
Map<TopicPartition, OffsetAndMetadata> offsets,
AlterConsumerGroupOffsetsOptions options
) {
SimpleAdminApiFuture<CoordinatorKey, Map<TopicPartition, Errors>> future =
AlterConsumerGroupOffsetsHandler.newFuture(groupId);
AlterConsumerGroupOffsetsHandler handler = new AlterConsumerGroupOffsetsHandler(groupId, offsets, logContext);
invokeDriver(handler, future, options.timeoutMs);
return new AlterConsumerGroupOffsetsResult(future.get(CoordinatorKey.byGroupId(groupId)));
}
|
@Test
public void testAlterConsumerGroupOffsetsRetriableErrors() throws Exception {
// Retriable errors should be retried
final TopicPartition tp1 = new TopicPartition("foo", 0);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(
prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
env.kafkaClient().prepareResponse(
prepareOffsetCommitResponse(tp1, Errors.COORDINATOR_NOT_AVAILABLE));
env.kafkaClient().prepareResponse(
prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
env.kafkaClient().prepareResponse(
prepareOffsetCommitResponse(tp1, Errors.COORDINATOR_LOAD_IN_PROGRESS));
env.kafkaClient().prepareResponse(
prepareOffsetCommitResponse(tp1, Errors.NOT_COORDINATOR));
env.kafkaClient().prepareResponse(
prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
env.kafkaClient().prepareResponse(
prepareOffsetCommitResponse(tp1, Errors.REBALANCE_IN_PROGRESS));
env.kafkaClient().prepareResponse(
prepareOffsetCommitResponse(tp1, Errors.NONE));
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(tp1, new OffsetAndMetadata(123L));
final AlterConsumerGroupOffsetsResult result1 = env.adminClient()
.alterConsumerGroupOffsets(GROUP_ID, offsets);
assertNull(result1.all().get());
assertNull(result1.partitionResult(tp1).get());
}
}
|
@Override public SlotAssignmentResult ensure(long key1, long key2) {
assert key1 != unassignedSentinel : "ensure() called with key1 == nullKey1 (" + unassignedSentinel + ')';
return super.ensure0(key1, key2);
}
|
@Test
public void testCursor_advance() {
hsa.ensure(randomKey(), randomKey());
HashSlotCursor16byteKey cursor = hsa.cursor();
assertTrue(cursor.advance());
assertFalse(cursor.advance());
}
|
public static PDImageXObject createFromStream(PDDocument document, InputStream stream)
throws IOException
{
return createFromByteArray(document, stream.readAllBytes());
}
|
@Test
void testCreateFromStreamCMYK() throws IOException
{
PDDocument document = new PDDocument();
InputStream stream = JPEGFactoryTest.class.getResourceAsStream("jpegcmyk.jpg");
PDImageXObject ximage = JPEGFactory.createFromStream(document, stream);
validate(ximage, 8, 343, 287, "jpg", PDDeviceCMYK.INSTANCE.getName());
doWritePDF(document, ximage, TESTRESULTSDIR, "jpegcmykstream.pdf");
checkJpegStream(TESTRESULTSDIR, "jpegcmykstream.pdf", JPEGFactoryTest.class.getResourceAsStream("jpegcmyk.jpg"));
}
|
public Set<String> allPermissions() {
return allPermissions;
}
|
@Test
public void testAllPermissions() throws Exception {
assertThat(permissions.allPermissions())
.containsOnlyElementsOf(restPermissions.permissions()
.stream()
.map(Permission::permission)
.collect(Collectors.toSet()));
}
|
public Map<ExecNode<?>, Integer> calculate() {
createTopologyGraph();
// some boundaries node may be connected from the outside of the sub-graph,
// which we cannot deduce by the above process,
// so we need to check each pair of boundaries and see if they're related
dealWithPossiblyRelatedBoundaries();
Map<ExecNode<?>, Integer> distances = graph.calculateMaximumDistance();
// extract only the distances of the boundaries and renumbering the distances
// so that the smallest value starts from 0
// the smaller the distance, the higher the priority
Set<Integer> boundaryDistanceSet = new HashSet<>();
for (ExecNode<?> boundary : boundaries) {
boundaryDistanceSet.add(distances.getOrDefault(boundary, 0));
}
List<Integer> boundaryDistanceList = new ArrayList<>(boundaryDistanceSet);
Collections.sort(boundaryDistanceList);
Map<ExecNode<?>, Integer> results = new HashMap<>();
for (ExecNode<?> boundary : boundaries) {
results.put(boundary, boundaryDistanceList.indexOf(distances.get(boundary)));
}
return results;
}
|
@Test
void testCalculateInputOrder() {
// P = InputProperty.DamBehavior.PIPELINED, B = InputProperty.DamBehavior.BLOCKING
// P1 = PIPELINED + priority 1
//
// 0 -(P1)-> 3 -(B0)-\
// 6 -(B0)-\
// /-(P1)-/ \
// 1 -(P1)-> 4 8
// \-(B0)-\ /
// 7 -(P1)-/
// 2 -(P1)-> 5 -(P1)-/
TestingBatchExecNode[] nodes = new TestingBatchExecNode[9];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = new TestingBatchExecNode("TestingBatchExecNode" + i);
}
nodes[3].addInput(nodes[0], InputProperty.builder().priority(1).build());
nodes[4].addInput(nodes[1], InputProperty.builder().priority(1).build());
nodes[5].addInput(nodes[2], InputProperty.builder().priority(1).build());
nodes[6].addInput(
nodes[3],
InputProperty.builder()
.damBehavior(InputProperty.DamBehavior.BLOCKING)
.priority(0)
.build());
nodes[6].addInput(nodes[4], InputProperty.builder().priority(1).build());
nodes[7].addInput(
nodes[4],
InputProperty.builder()
.damBehavior(InputProperty.DamBehavior.BLOCKING)
.priority(0)
.build());
nodes[7].addInput(nodes[5], InputProperty.builder().priority(1).build());
nodes[8].addInput(
nodes[6],
InputProperty.builder()
.damBehavior(InputProperty.DamBehavior.BLOCKING)
.priority(0)
.build());
nodes[8].addInput(nodes[7], InputProperty.builder().priority(1).build());
InputOrderCalculator calculator =
new InputOrderCalculator(
nodes[8],
new HashSet<>(Arrays.asList(nodes[1], nodes[3], nodes[5])),
InputProperty.DamBehavior.BLOCKING);
Map<ExecNode<?>, Integer> result = calculator.calculate();
assertThat(result).hasSize(3);
assertThat(result.get(nodes[3]).intValue()).isEqualTo(0);
assertThat(result.get(nodes[1]).intValue()).isEqualTo(1);
assertThat(result.get(nodes[5]).intValue()).isEqualTo(2);
}
|
@Override
public RexNode visit(CallExpression call) {
boolean isBatchMode = unwrapContext(relBuilder).isBatchMode();
for (CallExpressionConvertRule rule : getFunctionConvertChain(isBatchMode)) {
Optional<RexNode> converted = rule.convert(call, newFunctionContext());
if (converted.isPresent()) {
return converted.get();
}
}
throw new RuntimeException("Unknown call expression: " + call);
}
|
@Test
void testIntervalYearMonth() {
Period value = Period.of(999, 3, 1);
RexNode rex = converter.visit(valueLiteral(value));
assertThat(((RexLiteral) rex).getValueAs(BigDecimal.class))
.isEqualTo(BigDecimal.valueOf(value.toTotalMonths()));
// TODO planner ignores the precision
assertThat(rex.getType().getSqlTypeName()).isEqualTo(SqlTypeName.INTERVAL_YEAR_MONTH);
assertThat(rex.getType().getPrecision())
.isEqualTo(2); // year precision, should actually be 3
}
|
public OpenAPI read(Class<?> cls) {
return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>());
}
|
@Test(description = "Parameter with ref")
public void testParameterWithRef() {
Components components = new Components();
components.addParameters("id", new Parameter()
.description("Id Description")
.schema(new IntegerSchema())
.in(ParameterIn.QUERY.toString())
.example(1)
.required(true));
OpenAPI oas = new OpenAPI()
.info(new Info().description("info"))
.components(components);
Reader reader = new Reader(oas);
OpenAPI openAPI = reader.read(RefParameterResource.class);
String yaml = "openapi: 3.0.1\n" +
"info:\n" +
" description: info\n" +
"paths:\n" +
" /:\n" +
" get:\n" +
" summary: Simple get operation\n" +
" description: Defines a simple get operation with a payload complex input object\n" +
" operationId: sendPayload\n" +
" parameters:\n" +
" - $ref: '#/components/parameters/id'\n" +
" responses:\n" +
" default:\n" +
" description: default response\n" +
" content:\n" +
" '*/*': {}\n" +
" deprecated: true\n" +
"components:\n" +
" parameters: \n" +
" id:\n" +
" in: query\n" +
" description: Id Description\n" +
" required: true\n" +
" schema:\n" +
" type: integer\n" +
" format: int32\n" +
" example: 1\n";
SerializationMatchers.assertEqualsToYaml(openAPI, yaml);
}
|
public static <T> AggregateOperation1<T, Set<T>, Set<T>> toSet() {
return toCollection(HashSet::new);
}
|
@Test
public void when_toSet() {
validateOpWithoutDeduct(
toSet(), identity(), 1, 2, singleton(1), new HashSet<>(asList(1, 2)), new HashSet<>(asList(1, 2)));
}
|
public static BitSetCoder of() {
return INSTANCE;
}
|
@Test
public void testEncodedTypeDescriptor() throws Exception {
assertThat(TEST_CODER.getEncodedTypeDescriptor(), equalTo(TypeDescriptor.of(BitSet.class)));
}
|
@Override
public long getPendingCount() {
try (DbSession dbSession = dbClient.openSession(false)) {
return dbClient.ceQueueDao().countByStatus(dbSession, CeQueueDto.Status.PENDING);
}
}
|
@Test
public void count_Pending_from_database() {
when(getDbClient().ceQueueDao().countByStatus(any(), eq(CeQueueDto.Status.PENDING))).thenReturn(42);
assertThat(underTest.getPendingCount()).isEqualTo(42);
}
|
public static boolean hasCollisionChars(String topic) {
return topic.contains("_") || topic.contains(".");
}
|
@Test
public void testTopicHasCollisionChars() {
List<String> falseTopics = Arrays.asList("start", "end", "middle", "many");
List<String> trueTopics = Arrays.asList(
".start", "end.", "mid.dle", ".ma.ny.",
"_start", "end_", "mid_dle", "_ma_ny."
);
for (String topic : falseTopics)
assertFalse(Topic.hasCollisionChars(topic));
for (String topic : trueTopics)
assertTrue(Topic.hasCollisionChars(topic));
}
|
public double vincentyDistance(LatLong other) {
return LatLongUtils.vincentyDistance(this, other);
}
|
@Test
public void vincentyDistance_southPoleToNorthPole_returnTwiceOfDistanceFromPoleToEquator() {
// Calculating the distance between the north pole and the equator
LatLong northPole = new LatLong(90d, 0d);
// Check if the distance from pole to pole works as well in the vincentyDistance
LatLong southPole = new LatLong(-90d, 0d);
double vincenty = LatLongUtils.vincentyDistance(southPole, northPole);
assertEquals(2 * DISTANCE_POLE_TO_EQUATOR, vincenty, 1);
}
|
public StreamGraph generate() {
streamGraph =
new StreamGraph(
configuration, executionConfig, checkpointConfig, savepointRestoreSettings);
shouldExecuteInBatchMode = shouldExecuteInBatchMode();
configureStreamGraph(streamGraph);
alreadyTransformed = new IdentityHashMap<>();
for (Transformation<?> transformation : transformations) {
transform(transformation);
}
streamGraph.setSlotSharingGroupResource(slotSharingGroupResources);
setFineGrainedGlobalStreamExchangeMode(streamGraph);
LineageGraph lineageGraph = LineageGraphUtils.convertToLineageGraph(transformations);
streamGraph.setLineageGraph(lineageGraph);
for (StreamNode node : streamGraph.getStreamNodes()) {
if (node.getInEdges().stream().anyMatch(this::shouldDisableUnalignedCheckpointing)) {
for (StreamEdge edge : node.getInEdges()) {
edge.setSupportsUnalignedCheckpoints(false);
}
}
}
final StreamGraph builtStreamGraph = streamGraph;
alreadyTransformed.clear();
alreadyTransformed = null;
streamGraph = null;
return builtStreamGraph;
}
|
@Test
void testSettingSavepointRestoreSettings() {
Configuration config = new Configuration();
config.set(StateRecoveryOptions.SAVEPOINT_PATH, "/tmp/savepoint");
final StreamGraph streamGraph =
new StreamGraphGenerator(
Collections.emptyList(),
new ExecutionConfig(),
new CheckpointConfig(),
config)
.generate();
SavepointRestoreSettings savepointRestoreSettings =
streamGraph.getSavepointRestoreSettings();
assertThat(savepointRestoreSettings)
.isEqualTo(SavepointRestoreSettings.forPath("/tmp/savepoint"));
}
|
@Override
@CacheEvict(value = RedisKeyConstants.MAIL_ACCOUNT, key = "#updateReqVO.id")
public void updateMailAccount(MailAccountSaveReqVO updateReqVO) {
// 校验是否存在
validateMailAccountExists(updateReqVO.getId());
// 更新
MailAccountDO updateObj = BeanUtils.toBean(updateReqVO, MailAccountDO.class);
mailAccountMapper.updateById(updateObj);
}
|
@Test
public void testUpdateMailAccount_notExists() {
// 准备参数
MailAccountSaveReqVO reqVO = randomPojo(MailAccountSaveReqVO.class);
// 调用, 并断言异常
assertServiceException(() -> mailAccountService.updateMailAccount(reqVO), MAIL_ACCOUNT_NOT_EXISTS);
}
|
@Override
public V fetch(final K key, final long time) {
Objects.requireNonNull(key, "key can't be null");
final List<ReadOnlyWindowStore<K, V>> stores = provider.stores(storeName, windowStoreType);
for (final ReadOnlyWindowStore<K, V> windowStore : stores) {
try {
final V result = windowStore.fetch(key, time);
if (result != null) {
return result;
}
} catch (final InvalidStateStoreException e) {
throw new InvalidStateStoreException(
"State store is not available anymore and may have been migrated to another instance; " +
"please re-discover its location from the state metadata.");
}
}
return null;
}
|
@Test
public void emptyIteratorPeekNextKeyShouldThrowNoSuchElementException() {
final StateStoreProvider storeProvider = mock(StateStoreProvider.class);
when(storeProvider.stores(anyString(), any())).thenReturn(emptyList());
final CompositeReadOnlyWindowStore<Object, Object> store = new CompositeReadOnlyWindowStore<>(
storeProvider,
QueryableStoreTypes.windowStore(),
"foo"
);
try (final WindowStoreIterator<Object> windowStoreIterator =
store.fetch("key", ofEpochMilli(1), ofEpochMilli(10))) {
assertThrows(NoSuchElementException.class, windowStoreIterator::peekNextKey);
}
}
|
RuleChange findChangesAndUpdateRule(RulesDefinition.Rule ruleDef, RuleDto ruleDto) {
RuleChange ruleChange = new RuleChange(ruleDto);
boolean ruleMerged = mergeRule(ruleDef, ruleDto, ruleChange);
boolean debtDefinitionsMerged = mergeDebtDefinitions(ruleDef, ruleDto);
boolean tagsMerged = mergeTags(ruleDef, ruleDto);
boolean securityStandardsMerged = mergeSecurityStandards(ruleDef, ruleDto);
boolean educationPrinciplesMerged = mergeEducationPrinciples(ruleDef, ruleDto);
ruleChange.ruleDefinitionChanged = ruleMerged || debtDefinitionsMerged || tagsMerged || securityStandardsMerged || educationPrinciplesMerged;
return ruleChange;
}
|
@Test
public void findChangesAndUpdateRule_whenNoCleanCodeTaxonomyChanged_thenPluginRuleChangeShouldBeNull() {
RulesDefinition.Rule ruleDef = getDefaultRuleDef();
when(ruleDef.cleanCodeAttribute()).thenReturn(CleanCodeAttribute.COMPLETE);
Map<SoftwareQuality, Severity> newImpacts = Map.of(SoftwareQuality.MAINTAINABILITY, Severity.LOW);
when(ruleDef.defaultImpacts()).thenReturn(newImpacts);
RuleDto rule = getDefaultRuleDto();
when(rule.getCleanCodeAttribute()).thenReturn(CleanCodeAttribute.COMPLETE);
Set<ImpactDto> oldImpacts = Set.of(new ImpactDto(SoftwareQuality.MAINTAINABILITY, Severity.LOW));
when(rule.getDefaultImpacts()).thenReturn(oldImpacts);
StartupRuleUpdater.RuleChange changesAndUpdateRule = underTest.findChangesAndUpdateRule(ruleDef, rule);
assertTrue(changesAndUpdateRule.hasRuleDefinitionChanged());
assertThat(changesAndUpdateRule.getPluginRuleUpdate()).isNull();
}
|
public static List<FileNode> listDirByPath(String path) {
List<FileNode> dirList = new ArrayList<>();
File logDir = new File(path);
if (logDir.isFile()) {
throw new BusException(StrUtil.format("Directory path {} is a file.", path));
}
File[] files = logDir.listFiles();
if (Asserts.isNull(files)) {
throw new BusException(StrUtil.format("Directory path {} does not exist.", path));
}
for (File file : files) {
FileNode fileNode = new FileNode(file.getName(), file.isDirectory(), 0, file.getAbsolutePath());
if (!fileNode.isDir()) {
fileNode.setSize(file.length());
}
dirList.add(fileNode);
}
return dirList;
}
|
@Ignore
@Test
public void testListDirByPath() {
List<FileNode> dirList = DirUtil.listDirByPath(DirConstant.getRootLogsPath());
Assertions.assertThat(dirList).isNotNull();
}
|
public Optional<Instant> getTimestamp() {
return Optional.ofNullable(timestamp);
}
|
@Test
public void testFilePropertiesSpec_timestampSpecIso8601() throws JsonProcessingException {
String data = "timestamp: 2020-06-08T14:54:36+00:00";
FilePropertiesSpec parsed = mapper.readValue(data, FilePropertiesSpec.class);
assertThat(parsed.getTimestamp().get()).isEqualTo(Instant.parse("2020-06-08T14:54:36Z"));
}
|
static boolean objectIsAcyclic(Object object)
{
if (object == null)
{
return true;
}
Class<?> klass = object.getClass();
if (isPrimitiveClass(klass))
{
return true;
}
else if (isComplexClass(klass))
{
DataComplex complex = (DataComplex) object;
try
{
Data.traverse(complex, new TraverseCallback() {});
return true;
}
catch (IOException e)
{
return false;
}
}
else
{
throw new IllegalStateException("Object of unknown type: " + object);
}
}
|
@Test
public void testDataListNoCyclesOnAdd()
{
// test with DataList
DataList a = new DataList();
DataList b = new DataList();
DataList c = new DataList();
a.add(b);
a.add(c);
assertTrue(Data.objectIsAcyclic(a));
DataList d = new DataList();
b.add(d);
c.add(d);
assertTrue(Data.objectIsAcyclic(a));
DataList e = new DataList();
d.add(e);
assertTrue(Data.objectIsAcyclic(a));
// cyclic due to edge from e to e.
addAndExpectIllegalArgumentException(e, e);
assertTrue(Data.objectIsAcyclic(a));
// cyclic due to edge from e to d.
addAndExpectIllegalArgumentException(e, d);
assertTrue(Data.objectIsAcyclic(a));
// cyclic due to edge from e to c
addAndExpectIllegalArgumentException(e, c);
assertTrue(Data.objectIsAcyclic(a));
// cyclic due to edge from e to b
addAndExpectIllegalArgumentException(e, b);
assertTrue(Data.objectIsAcyclic(a));
// cyclic due to edge from e to a
addAndExpectIllegalArgumentException(e, a);
assertTrue(Data.objectIsAcyclic(a));
}
|
public static URI getInfoServer(InetSocketAddress namenodeAddr,
Configuration conf, String scheme) throws IOException {
String[] suffixes = null;
if (namenodeAddr != null) {
// if non-default namenode, try reverse look up
// the nameServiceID if it is available
suffixes = getSuffixIDs(conf, namenodeAddr,
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
}
String authority;
if ("http".equals(scheme)) {
authority = getSuffixedConf(conf, DFS_NAMENODE_HTTP_ADDRESS_KEY,
DFS_NAMENODE_HTTP_ADDRESS_DEFAULT, suffixes);
} else if ("https".equals(scheme)) {
authority = getSuffixedConf(conf, DFS_NAMENODE_HTTPS_ADDRESS_KEY,
DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT, suffixes);
} else {
throw new IllegalArgumentException("Invalid scheme:" + scheme);
}
if (namenodeAddr != null) {
authority = substituteForWildcardAddress(authority,
namenodeAddr.getHostName());
}
return URI.create(scheme + "://" + authority);
}
|
@Test
public void testGetInfoServer() throws IOException, URISyntaxException {
HdfsConfiguration conf = new HdfsConfiguration();
URI httpsport = DFSUtil.getInfoServer(null, conf, "https");
assertEquals(new URI("https", null, "0.0.0.0",
DFS_NAMENODE_HTTPS_PORT_DEFAULT, null, null, null), httpsport);
URI httpport = DFSUtil.getInfoServer(null, conf, "http");
assertEquals(new URI("http", null, "0.0.0.0",
DFS_NAMENODE_HTTP_PORT_DEFAULT, null, null, null), httpport);
URI httpAddress = DFSUtil.getInfoServer(new InetSocketAddress(
"localhost", 8020), conf, "http");
assertEquals(
URI.create("http://localhost:" + DFS_NAMENODE_HTTP_PORT_DEFAULT),
httpAddress);
}
|
@Override
public Map<String, List<String>> getRequestTag(String path, String methodName, Map<String, List<String>> headers,
Map<String, List<String>> parameters) {
Set<String> injectTags = configService.getInjectTags();
if (CollectionUtils.isEmpty(injectTags)) {
// The staining mark is empty, which means that there are no staining rules, and it is returned directly
LOGGER.fine("Lane tags are empty.");
return Collections.emptyMap();
}
// Markers for upstream transparent transmissions
Map<String, List<String>> tags = getRequestTag(headers, injectTags);
// This staining marker
Map<String, List<String>> laneTag = laneService.getLaneByParameterList(path, methodName, headers, parameters);
if (CollectionUtils.isEmpty(laneTag)) {
LOGGER.fine("Lane is empty.");
return tags;
}
// If there is a marker in the upstream transmission that is the same as the one in this staining,
// the upstream transmission shall prevail
laneTag.forEach(tags::putIfAbsent);
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("Lane is " + tags);
}
return tags;
}
|
@Test
public void testGetRequestTag() {
// Test matchTags as null
configService.setReturnEmptyWhenGetMatchTags(true);
Map<String, List<String>> requestTag = handler.getRequestTag("", "", null, null);
Assert.assertEquals(requestTag, Collections.emptyMap());
// Test getLane returns null
configService.setReturnEmptyWhenGetMatchTags(false);
laneService.setReturnEmpty(true);
Map<String, List<String>> headers = new HashMap<>();
headers.put("bar", Collections.singletonList("bar1"));
headers.put("foo", Collections.singletonList("foo1"));
requestTag = handler.getRequestTag("", "", headers, null);
Assert.assertEquals(2, requestTag.size());
Assert.assertEquals("bar1", requestTag.get("bar").get(0));
Assert.assertEquals("foo1", requestTag.get("foo").get(0));
// Test getLane is not empty
configService.setReturnEmptyWhenGetMatchTags(false);
laneService.setReturnEmpty(false);
requestTag = handler.getRequestTag("", "", headers, null);
Assert.assertEquals(3, requestTag.size());
Assert.assertEquals("bar1", requestTag.get("bar").get(0));
Assert.assertEquals("foo1", requestTag.get("foo").get(0));
Assert.assertEquals("flag1", requestTag.get("sermant-flag").get(0));
}
|
@Override
public boolean wasNull() throws SQLException {
return mergeResultSet.wasNull();
}
|
@Test
void assertWasNull() throws SQLException {
assertFalse(shardingSphereResultSet.wasNull());
}
|
@Override
public Map<String, InterpreterClient> restore() throws IOException {
Map<String, InterpreterClient> clients = new HashMap<>();
File[] recoveryFiles = recoveryDir.listFiles(file -> file.getName().endsWith(".recovery"));
for (File recoveryFile : recoveryFiles) {
String fileName = recoveryFile.getName();
String interpreterSettingName = fileName.substring(0,
fileName.length() - ".recovery".length());
String recoveryData = org.apache.zeppelin.util.FileUtils.readFromFile(recoveryFile);
clients.putAll(RecoveryUtils.restoreFromRecoveryData(
recoveryData, interpreterSettingName, interpreterSettingManager, zConf));
}
return clients;
}
|
@Test
void testSingleInterpreterProcess() throws InterpreterException, IOException {
InterpreterSetting interpreterSetting = interpreterSettingManager.getByName("test");
interpreterSetting.getOption().setPerUser(InterpreterOption.SHARED);
Interpreter interpreter1 = interpreterSetting.getDefaultInterpreter("user1", note1Id);
RemoteInterpreter remoteInterpreter1 = (RemoteInterpreter) interpreter1;
InterpreterContext context1 = InterpreterContext.builder()
.setNoteId("noteId")
.setParagraphId("paragraphId")
.build();
remoteInterpreter1.interpret("hello", context1);
assertEquals(1, interpreterSettingManager.getRecoveryStorage().restore().size());
interpreterSetting.close();
assertEquals(0, interpreterSettingManager.getRecoveryStorage().restore().size());
}
|
public Path back() {
int size = back.size();
if(size > 1) {
forward.add(back.get(size - 1));
Path p = back.get(size - 2);
//delete the fetched path - otherwise we produce a loop
back.remove(size - 1);
back.remove(size - 2);
return p;
}
else if(1 == size) {
forward.add(back.get(0));
return back.get(0);
}
return null;
}
|
@Test
public void testBack() {
Navigation n = new Navigation();
assertNull(n.back());
n.add(new Path("a", EnumSet.of(Path.Type.directory)));
n.add(new Path("b", EnumSet.of(Path.Type.directory)));
assertEquals("a", n.back().getName());
assertEquals("b", n.forward().getName());
}
|
@Override
public boolean retainAll(Collection<?> c) {
throw new UnsupportedOperationException();
}
|
@Test(expected = UnsupportedOperationException.class)
public void testRetainAll() {
queue.retainAll(emptyList);
}
|
@Override
public void execute() throws CommandExecuteException, ConfigCheckException {
if (abstractCommandArgs.isDecrypt()) {
log.warn(
"When both --decrypt and --encrypt are specified, only --encrypt will take effect");
}
String encryptConfigFile = abstractCommandArgs.getConfigFile();
Path configPath = Paths.get(encryptConfigFile);
checkConfigExist(configPath);
Config config =
ConfigFactory.parseFile(configPath.toFile())
.resolve(ConfigResolveOptions.defaults().setAllowUnresolved(true));
if (abstractCommandArgs.getVariables() != null) {
abstractCommandArgs.getVariables().stream()
.filter(Objects::nonNull)
.map(variable -> variable.split("=", 2))
.filter(pair -> pair.length == 2)
.forEach(pair -> System.setProperty(pair[0], pair[1]));
config =
config.resolveWith(
ConfigFactory.systemProperties(),
ConfigResolveOptions.defaults().setAllowUnresolved(true));
}
Config encryptConfig = ConfigShadeUtils.encryptConfig(config);
log.info(
"Encrypt config: \n{}",
encryptConfig
.root()
.render(ConfigRenderOptions.defaults().setOriginComments(false)));
}
|
@Test
public void testEncrypt() throws URISyntaxException {
TestCommandArgs testCommandArgs = new TestCommandArgs();
Path filePath = getFilePath("/origin.conf");
testCommandArgs.setEncrypt(true);
testCommandArgs.setConfigFile(filePath.toString());
ConfEncryptCommand confEncryptCommand = new ConfEncryptCommand(testCommandArgs);
confEncryptCommand.execute();
}
|
public MessageType convert(Class<? extends TBase<?, ?>> thriftClass) {
return convert(toStructType(thriftClass));
}
|
@Test
public void testLogicalTypeConvertion() throws Exception {
String expected = "message ParquetSchema {\n" + " required int32 test_i16 (INTEGER(16,true)) = 1;\n"
+ " required int32 test_i8 (INTEGER(8,true)) = 2;\n"
+ "}\n";
ThriftSchemaConverter schemaConverter = new ThriftSchemaConverter();
final MessageType converted = schemaConverter.convert(TestLogicalType.class);
assertEquals(MessageTypeParser.parseMessageType(expected), converted);
}
|
@Override
public byte[] fromConnectData(String topic, Schema schema, Object value) {
try {
return serializer.serialize(topic, value == null ? null : cast(value));
} catch (ClassCastException e) {
throw new DataException("Failed to serialize to " + typeName + " (was " + value.getClass() + "): ", e);
} catch (SerializationException e) {
throw new DataException("Failed to serialize to " + typeName + ": ", e);
}
}
|
@Test
public void testNullToBytes() {
assertNull(converter.fromConnectData(TOPIC, schema, null));
}
|
@Override
public Void handleResponse(Response response) throws IOException, UnexpectedBlobDigestException {
blobSizeListener.accept(response.getContentLength());
try (OutputStream outputStream =
new NotifyingOutputStream(destinationOutputStream, writtenByteCountListener)) {
BlobDescriptor receivedBlobDescriptor =
Digests.computeDigest(response.getBody(), outputStream);
if (!blobDigest.equals(receivedBlobDescriptor.getDigest())) {
throw new UnexpectedBlobDigestException(
"The pulled BLOB has digest '"
+ receivedBlobDescriptor.getDigest()
+ "', but the request digest was '"
+ blobDigest
+ "'");
}
}
return null;
}
|
@Test
public void testHandleResponse() throws IOException, UnexpectedBlobDigestException {
InputStream blobContent =
new ByteArrayInputStream("some BLOB content".getBytes(StandardCharsets.UTF_8));
DescriptorDigest testBlobDigest = Digests.computeDigest(blobContent).getDigest();
blobContent.reset();
Response mockResponse = Mockito.mock(Response.class);
Mockito.when(mockResponse.getContentLength()).thenReturn((long) "some BLOB content".length());
Mockito.when(mockResponse.getBody()).thenReturn(blobContent);
LongAdder byteCount = new LongAdder();
BlobPuller blobPuller =
new BlobPuller(
fakeRegistryEndpointRequestProperties,
testBlobDigest,
layerOutputStream,
size -> Assert.assertEquals("some BLOB content".length(), size.longValue()),
byteCount::add);
blobPuller.handleResponse(mockResponse);
Assert.assertEquals(
"some BLOB content",
new String(layerContentOutputStream.toByteArray(), StandardCharsets.UTF_8));
Assert.assertEquals(testBlobDigest, layerOutputStream.computeDigest().getDigest());
Assert.assertEquals("some BLOB content".length(), byteCount.sum());
}
|
@Override
public void toPB(EncryptionKeyPB pb, KeyMgr mgr) {
super.toPB(pb, mgr);
pb.type = EncryptionKeyTypePB.NORMAL_KEY;
pb.algorithm = algorithm;
if (encryptedKey == null && plainKey != null) {
// it's a plain master key
pb.plainKey = plainKey;
} else {
pb.encryptedKey = encryptedKey;
}
}
|
@Test
public void testToPB() {
EncryptionKeyPB pb = new EncryptionKeyPB();
KeyMgr mgr = new KeyMgr();
normalKey.toPB(pb, mgr);
assertEquals(normalKey.getId(), pb.id.longValue());
assertEquals(normalKey.getCreateTime(), pb.createTime.longValue());
assertEquals(normalKey.getAlgorithm(), pb.algorithm);
assertArrayEquals(normalKey.getEncryptedKey(), pb.encryptedKey);
}
|
@Override
public ParSeqBasedCompletionStage<Void> thenAccept(Consumer<? super T> action)
{
return nextStageByComposingTask(_task.flatMap("thenAccept", (t) -> Task.action(() -> action.accept(t))));
}
|
@Test
public void testThenAccept() throws Exception
{
Consumer<String> consumer = mock(Consumer.class);
finish(createTestStage(TESTVALUE1).thenAccept(consumer));
verify(consumer, times(1)).accept(TESTVALUE1);
}
|
public TransactionRole getTransactionRole() {
return transactionRole;
}
|
@Test
public void getTransactionRole() {
Assertions.assertEquals(nettyPoolKey.getTransactionRole(), RM_ROLE);
}
|
public void validate(CreateReviewAnswerRequest request) {
validateNotContainingText(request);
Question question = questionRepository.findById(request.questionId())
.orElseThrow(() -> new SubmittedQuestionNotFoundException(request.questionId()));
OptionGroup optionGroup = optionGroupRepository.findByQuestionId(question.getId())
.orElseThrow(() -> new OptionGroupNotFoundByQuestionIdException(question.getId()));
validateRequiredQuestion(request, question);
validateOnlyIncludingProvidedOptionItem(request, optionGroup);
validateCheckedOptionItemCount(request, optionGroup);
}
|
@Test
void 저장되지_않은_질문에_대한_응답이면_예외가_발생한다() {
// given
long notSavedQuestionId = 100L;
CreateReviewAnswerRequest request = new CreateReviewAnswerRequest(
notSavedQuestionId, List.of(1L), null
);
// when, then
assertThatCode(() -> createCheckBoxAnswerRequestValidator.validate(request))
.isInstanceOf(SubmittedQuestionNotFoundException.class);
}
|
public double calculateMinPercentageUsedBy(NormalizedResources used, double totalMemoryMb, double usedMemoryMb) {
if (LOG.isTraceEnabled()) {
LOG.trace("Calculating min percentage used by. Used Mem: {} Total Mem: {}"
+ " Used Normalized Resources: {} Total Normalized Resources: {}", totalMemoryMb, usedMemoryMb,
toNormalizedMap(), used.toNormalizedMap());
}
double min = 1.0;
if (usedMemoryMb > totalMemoryMb) {
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb);
}
if (totalMemoryMb != 0.0) {
min = Math.min(min, usedMemoryMb / totalMemoryMb);
}
double totalCpu = getTotalCpu();
if (used.getTotalCpu() > totalCpu) {
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb);
}
if (totalCpu != 0.0) {
min = Math.min(min, used.getTotalCpu() / totalCpu);
}
if (used.otherResources.length > otherResources.length) {
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb);
}
for (int i = 0; i < otherResources.length; i++) {
if (otherResources[i] == 0.0) {
//Skip any resources where the total is 0, the percent used for this resource isn't meaningful.
//We fall back to prioritizing by cpu, memory and any other resources by ignoring this value
continue;
}
if (i >= used.otherResources.length) {
//Resources missing from used are using none of that resource
return 0;
}
if (used.otherResources[i] > otherResources[i]) {
String info = String.format("%s, %f > %f", getResourceNameForResourceIndex(i), used.otherResources[i], otherResources[i]);
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb, info);
}
min = Math.min(min, used.otherResources[i] / otherResources[i]);
}
return min * 100.0;
}
|
@Test
public void testCalculateMinThrowsIfTotalIsMissingCpu() {
NormalizedResources resources = new NormalizedResources(normalize(Collections.singletonMap(Constants.COMMON_CPU_RESOURCE_NAME, 2)));
NormalizedResources usedResources = new NormalizedResources(normalize(Collections.singletonMap(Constants.COMMON_CPU_RESOURCE_NAME, 5)));
assertThrows(IllegalArgumentException.class, () ->
resources.calculateMinPercentageUsedBy(usedResources, 0, 0));
}
|
public static Map<TopicPartition, Long> parseSinkConnectorOffsets(Map<Map<String, ?>, Map<String, ?>> partitionOffsets) {
Map<TopicPartition, Long> parsedOffsetMap = new HashMap<>();
for (Map.Entry<Map<String, ?>, Map<String, ?>> partitionOffset : partitionOffsets.entrySet()) {
Map<String, ?> partitionMap = partitionOffset.getKey();
if (partitionMap == null) {
throw new BadRequestException("The partition for a sink connector offset cannot be null or missing");
}
if (!partitionMap.containsKey(KAFKA_TOPIC_KEY) || !partitionMap.containsKey(KAFKA_PARTITION_KEY)) {
throw new BadRequestException(String.format("The partition for a sink connector offset must contain the keys '%s' and '%s'",
KAFKA_TOPIC_KEY, KAFKA_PARTITION_KEY));
}
if (partitionMap.get(KAFKA_TOPIC_KEY) == null) {
throw new BadRequestException("Kafka topic names must be valid strings and may not be null");
}
if (partitionMap.get(KAFKA_PARTITION_KEY) == null) {
throw new BadRequestException("Kafka partitions must be valid numbers and may not be null");
}
String topic = String.valueOf(partitionMap.get(KAFKA_TOPIC_KEY));
int partition;
try {
// We parse it this way because both "10" and 10 should be accepted as valid partition values in the REST API's
// JSON request payload. If it throws an exception, we should propagate it since it's indicative of a badly formatted value.
partition = Integer.parseInt(String.valueOf(partitionMap.get(KAFKA_PARTITION_KEY)));
} catch (Exception e) {
throw new BadRequestException("Failed to parse the following Kafka partition value in the provided offsets: '" +
partitionMap.get(KAFKA_PARTITION_KEY) + "'. Partition values for sink connectors need " +
"to be integers.", e);
}
TopicPartition tp = new TopicPartition(topic, partition);
Map<String, ?> offsetMap = partitionOffset.getValue();
if (offsetMap == null) {
// represents an offset reset
parsedOffsetMap.put(tp, null);
} else {
if (!offsetMap.containsKey(KAFKA_OFFSET_KEY)) {
throw new BadRequestException(String.format("The offset for a sink connector should either be null or contain " +
"the key '%s'", KAFKA_OFFSET_KEY));
}
long offset;
try {
// We parse it this way because both "1000" and 1000 should be accepted as valid offset values in the REST API's
// JSON request payload. If it throws an exception, we should propagate it since it's indicative of a badly formatted value.
offset = Long.parseLong(String.valueOf(offsetMap.get(KAFKA_OFFSET_KEY)));
} catch (Exception e) {
throw new BadRequestException("Failed to parse the following Kafka offset value in the provided offsets: '" +
offsetMap.get(KAFKA_OFFSET_KEY) + "'. Offset values for sink connectors need " +
"to be integers.", e);
}
parsedOffsetMap.put(tp, offset);
}
}
return parsedOffsetMap;
}
|
@Test
public void testValidateAndParseStringOffsetValue() {
Map<Map<String, ?>, Map<String, ?>> partitionOffsets = createPartitionOffsetMap("topic", "10", "100");
Map<TopicPartition, Long> parsedOffsets = SinkUtils.parseSinkConnectorOffsets(partitionOffsets);
assertEquals(1, parsedOffsets.size());
Long offsetValue = parsedOffsets.values().iterator().next();
assertEquals(100L, offsetValue.longValue());
}
|
@Override
public void setQuarantine(final Local file, final String originUrl, final String dataUrl) throws LocalAccessDeniedException {
if(StringUtils.isEmpty(originUrl)) {
log.warn("No origin url given for quarantine");
return;
}
if(StringUtils.isEmpty(dataUrl)) {
log.warn("No data url given for quarantine");
return;
}
synchronized(lock) {
if(!this.setQuarantine(file.getAbsolute(), originUrl, dataUrl)) {
throw new LocalAccessDeniedException(file.getAbsolute());
}
}
}
|
@Test
public void testSetQuarantine() throws Exception {
final QuarantineService q = new LaunchServicesQuarantineService();
Callable<Local> c = new Callable<Local>() {
@Override
public Local call() throws Exception {
final NullLocal l = new NullLocal(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString());
LocalTouchFactory.get().touch(l);
q.setQuarantine(l,
"http://cyberduck.ch", "http://cyberduck.ch");
l.delete();
return l;
}
};
c.call();
}
|
public void process()
throws Exception {
if (_segmentMetadata.getTotalDocs() == 0) {
LOGGER.info("Skip preprocessing empty segment: {}", _segmentMetadata.getName());
return;
}
// Segment processing has to be done with a local directory.
File indexDir = new File(_indexDirURI);
// This fixes the issue of temporary files not getting deleted after creating new inverted indexes.
removeInvertedIndexTempFiles(indexDir);
try (SegmentDirectory.Writer segmentWriter = _segmentDirectory.createWriter()) {
// Update default columns according to the schema.
if (_schema != null) {
DefaultColumnHandler defaultColumnHandler = DefaultColumnHandlerFactory
.getDefaultColumnHandler(indexDir, _segmentMetadata, _indexLoadingConfig, _schema, segmentWriter);
defaultColumnHandler.updateDefaultColumns();
_segmentMetadata = new SegmentMetadataImpl(indexDir);
_segmentDirectory.reloadMetadata();
} else {
LOGGER.warn("Skip creating default columns for segment: {} without schema", _segmentMetadata.getName());
}
// Update single-column indices, like inverted index, json index etc.
List<IndexHandler> indexHandlers = new ArrayList<>();
// We cannot just create all the index handlers in a random order.
// Specifically, ForwardIndexHandler needs to be executed first. This is because it modifies the segment metadata
// while rewriting forward index to create a dictionary. Some other handlers (like the range one) assume that
// metadata was already been modified by ForwardIndexHandler.
IndexHandler forwardHandler = createHandler(StandardIndexes.forward());
indexHandlers.add(forwardHandler);
forwardHandler.updateIndices(segmentWriter);
// Now that ForwardIndexHandler.updateIndices has been updated, we can run all other indexes in any order
_segmentMetadata = new SegmentMetadataImpl(indexDir);
_segmentDirectory.reloadMetadata();
for (IndexType<?, ?, ?> type : IndexService.getInstance().getAllIndexes()) {
if (type != StandardIndexes.forward()) {
IndexHandler handler = createHandler(type);
indexHandlers.add(handler);
handler.updateIndices(segmentWriter);
// Other IndexHandler classes may modify the segment metadata while creating a temporary forward
// index to generate their respective indexes from if the forward index was disabled. This new metadata is
// needed to construct other indexes like RangeIndex.
_segmentMetadata = _segmentDirectory.getSegmentMetadata();
}
}
// Perform post-cleanup operations on the index handlers.
for (IndexHandler handler : indexHandlers) {
handler.postUpdateIndicesCleanup(segmentWriter);
}
// Add min/max value to column metadata according to the prune mode.
ColumnMinMaxValueGeneratorMode columnMinMaxValueGeneratorMode =
_indexLoadingConfig.getColumnMinMaxValueGeneratorMode();
if (columnMinMaxValueGeneratorMode != ColumnMinMaxValueGeneratorMode.NONE) {
ColumnMinMaxValueGenerator columnMinMaxValueGenerator =
new ColumnMinMaxValueGenerator(_segmentMetadata, segmentWriter, columnMinMaxValueGeneratorMode);
columnMinMaxValueGenerator.addColumnMinMaxValue();
// NOTE: This step may modify the segment metadata. When adding new steps after this, un-comment the next line.
// _segmentMetadata = new SegmentMetadataImpl(indexDir);
}
segmentWriter.save();
}
// Startree creation will load the segment again, so we need to close and re-open the segment writer to make sure
// that the other required indices (e.g. forward index) are up-to-date.
try (SegmentDirectory.Writer segmentWriter = _segmentDirectory.createWriter()) {
// Create/modify/remove star-trees if required.
processStarTrees(indexDir);
_segmentDirectory.reloadMetadata();
segmentWriter.save();
}
}
|
@Test
public void testColumnMinMaxValue()
throws Exception {
constructV1Segment(Collections.emptyList(), Collections.emptyList(), Collections.emptyList(),
Collections.emptyList());
// Remove min/max value from the metadata
removeMinMaxValuesFromMetadataFile(_indexDir);
IndexLoadingConfig indexLoadingConfig = getDefaultIndexLoadingConfig();
indexLoadingConfig.setColumnMinMaxValueGeneratorMode(ColumnMinMaxValueGeneratorMode.NONE);
try (SegmentDirectory segmentDirectory = SegmentDirectoryLoaderRegistry.getDefaultSegmentDirectoryLoader()
.load(_indexDir.toURI(),
new SegmentDirectoryLoaderContext.Builder().setSegmentDirectoryConfigs(_configuration).build());
SegmentPreProcessor processor = new SegmentPreProcessor(segmentDirectory, indexLoadingConfig, null)) {
processor.process();
}
SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(_indexDir);
ColumnMetadata timeColumnMetadata = segmentMetadata.getColumnMetadataFor("daysSinceEpoch");
ColumnMetadata dimensionColumnMetadata = segmentMetadata.getColumnMetadataFor("column1");
ColumnMetadata metricColumnMetadata = segmentMetadata.getColumnMetadataFor("count");
assertNull(timeColumnMetadata.getMinValue());
assertNull(timeColumnMetadata.getMaxValue());
assertNull(dimensionColumnMetadata.getMinValue());
assertNull(dimensionColumnMetadata.getMaxValue());
assertNull(metricColumnMetadata.getMinValue());
assertNull(metricColumnMetadata.getMaxValue());
indexLoadingConfig.setColumnMinMaxValueGeneratorMode(ColumnMinMaxValueGeneratorMode.TIME);
try (SegmentDirectory segmentDirectory = SegmentDirectoryLoaderRegistry.getDefaultSegmentDirectoryLoader()
.load(_indexDir.toURI(),
new SegmentDirectoryLoaderContext.Builder().setSegmentDirectoryConfigs(_configuration).build());
SegmentPreProcessor processor = new SegmentPreProcessor(segmentDirectory, indexLoadingConfig, null)) {
processor.process();
}
segmentMetadata = new SegmentMetadataImpl(_indexDir);
timeColumnMetadata = segmentMetadata.getColumnMetadataFor("daysSinceEpoch");
dimensionColumnMetadata = segmentMetadata.getColumnMetadataFor("column5");
metricColumnMetadata = segmentMetadata.getColumnMetadataFor("count");
assertEquals(timeColumnMetadata.getMinValue(), 1756015683);
assertEquals(timeColumnMetadata.getMaxValue(), 1756015683);
assertNull(dimensionColumnMetadata.getMinValue());
assertNull(dimensionColumnMetadata.getMaxValue());
assertNull(metricColumnMetadata.getMinValue());
assertNull(metricColumnMetadata.getMaxValue());
indexLoadingConfig.setColumnMinMaxValueGeneratorMode(ColumnMinMaxValueGeneratorMode.NON_METRIC);
try (SegmentDirectory segmentDirectory = SegmentDirectoryLoaderRegistry.getDefaultSegmentDirectoryLoader()
.load(_indexDir.toURI(),
new SegmentDirectoryLoaderContext.Builder().setSegmentDirectoryConfigs(_configuration).build());
SegmentPreProcessor processor = new SegmentPreProcessor(segmentDirectory, indexLoadingConfig, null)) {
processor.process();
}
segmentMetadata = new SegmentMetadataImpl(_indexDir);
timeColumnMetadata = segmentMetadata.getColumnMetadataFor("daysSinceEpoch");
dimensionColumnMetadata = segmentMetadata.getColumnMetadataFor("column5");
metricColumnMetadata = segmentMetadata.getColumnMetadataFor("count");
assertEquals(timeColumnMetadata.getMinValue(), 1756015683);
assertEquals(timeColumnMetadata.getMaxValue(), 1756015683);
assertEquals(dimensionColumnMetadata.getMinValue(), "AKXcXcIqsqOJFsdwxZ");
assertEquals(dimensionColumnMetadata.getMaxValue(), "yQkJTLOQoOqqhkAClgC");
assertNull(metricColumnMetadata.getMinValue());
assertNull(metricColumnMetadata.getMaxValue());
indexLoadingConfig.setColumnMinMaxValueGeneratorMode(ColumnMinMaxValueGeneratorMode.ALL);
try (SegmentDirectory segmentDirectory = SegmentDirectoryLoaderRegistry.getDefaultSegmentDirectoryLoader()
.load(_indexDir.toURI(),
new SegmentDirectoryLoaderContext.Builder().setSegmentDirectoryConfigs(_configuration).build());
SegmentPreProcessor processor = new SegmentPreProcessor(segmentDirectory, indexLoadingConfig, null)) {
processor.process();
}
segmentMetadata = new SegmentMetadataImpl(_indexDir);
timeColumnMetadata = segmentMetadata.getColumnMetadataFor("daysSinceEpoch");
dimensionColumnMetadata = segmentMetadata.getColumnMetadataFor("column5");
metricColumnMetadata = segmentMetadata.getColumnMetadataFor("count");
assertEquals(timeColumnMetadata.getMinValue(), 1756015683);
assertEquals(timeColumnMetadata.getMaxValue(), 1756015683);
assertEquals(dimensionColumnMetadata.getMinValue(), "AKXcXcIqsqOJFsdwxZ");
assertEquals(dimensionColumnMetadata.getMaxValue(), "yQkJTLOQoOqqhkAClgC");
dimensionColumnMetadata = segmentMetadata.getColumnMetadataFor("column14");
assertEquals(dimensionColumnMetadata.getMaxValue(), -9223372036854775808L);
assertEquals(dimensionColumnMetadata.getMinValue(), -9223372036854775808L);
dimensionColumnMetadata = segmentMetadata.getColumnMetadataFor("column15");
assertEquals(dimensionColumnMetadata.getMaxValue(), Float.NEGATIVE_INFINITY);
assertEquals(dimensionColumnMetadata.getMinValue(), Float.NEGATIVE_INFINITY);
dimensionColumnMetadata = segmentMetadata.getColumnMetadataFor("column16");
assertEquals(dimensionColumnMetadata.getMaxValue(), Double.NEGATIVE_INFINITY);
assertEquals(dimensionColumnMetadata.getMinValue(), Double.NEGATIVE_INFINITY);
dimensionColumnMetadata = segmentMetadata.getColumnMetadataFor("column17");
assertEquals(dimensionColumnMetadata.getMaxValue(), new BigDecimal("0"));
assertEquals(dimensionColumnMetadata.getMinValue(), new BigDecimal("0"));
dimensionColumnMetadata = segmentMetadata.getColumnMetadataFor("column18");
assertEquals(dimensionColumnMetadata.getMaxValue(), 0);
assertEquals(dimensionColumnMetadata.getMinValue(), 0);
dimensionColumnMetadata = segmentMetadata.getColumnMetadataFor("column19");
assertEquals(dimensionColumnMetadata.getMaxValue().toString(), "0");
assertEquals(dimensionColumnMetadata.getMinValue().toString(), "0");
dimensionColumnMetadata = segmentMetadata.getColumnMetadataFor("column20");
assertEquals(dimensionColumnMetadata.getMaxValue(), "null");
assertEquals(dimensionColumnMetadata.getMinValue(), "null");
dimensionColumnMetadata = segmentMetadata.getColumnMetadataFor("column21");
assertEquals(dimensionColumnMetadata.getMaxValue(), "null");
assertEquals(dimensionColumnMetadata.getMinValue(), "null");
dimensionColumnMetadata = segmentMetadata.getColumnMetadataFor("column22");
assertEquals(dimensionColumnMetadata.getMaxValue().toString(), "");
assertEquals(dimensionColumnMetadata.getMinValue().toString(), "");
assertEquals(metricColumnMetadata.getMinValue(), 890662862);
assertEquals(metricColumnMetadata.getMaxValue(), 890662862);
}
|
public static void info(final Logger logger, final String format, final Supplier<Object> supplier) {
if (logger.isInfoEnabled()) {
logger.info(format, supplier.get());
}
}
|
@Test
public void testNeverInfoWithFormat() {
when(logger.isInfoEnabled()).thenReturn(false);
LogUtils.info(logger, "testInfo: {}", supplier);
verify(supplier, never()).get();
}
|
int commit(final Collection<Task> tasksToCommit) {
int committed = 0;
final Set<TaskId> ids =
tasksToCommit.stream()
.map(Task::id)
.collect(Collectors.toSet());
maybeLockTasks(ids);
// We have to throw the first uncaught exception after locking the tasks, to not attempt to commit failure records.
maybeThrowTaskExceptionsFromProcessingThreads();
final Map<Task, Map<TopicPartition, OffsetAndMetadata>> consumedOffsetsAndMetadataPerTask = new HashMap<>();
try {
committed = commitTasksAndMaybeUpdateCommittableOffsets(tasksToCommit, consumedOffsetsAndMetadataPerTask);
} catch (final TimeoutException timeoutException) {
consumedOffsetsAndMetadataPerTask
.keySet()
.forEach(t -> t.maybeInitTaskTimeoutOrThrow(time.milliseconds(), timeoutException));
}
maybeUnlockTasks(ids);
return committed;
}
|
@Test
public void shouldLockCommitableTasksOnCorruptionWithProcessingThreads() {
final StreamTask activeTask1 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions).build();
final StreamTask activeTask2 = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId01Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManager(ProcessingMode.AT_LEAST_ONCE, tasks, true, true);
final KafkaFuture<Void> mockFuture = KafkaFuture.completedFuture(null);
when(schedulingTaskManager.lockTasks(any())).thenReturn(mockFuture);
taskManager.commit(mkSet(activeTask1, activeTask2));
verify(schedulingTaskManager).lockTasks(mkSet(taskId00, taskId01));
verify(schedulingTaskManager).unlockTasks(mkSet(taskId00, taskId01));
}
|
public SharedPool getSharedPool() {
return sharedPool;
}
|
@Test
public void testSharedPool() {
ShenyuConfig.SharedPool sharedPool = config.getSharedPool();
sharedPool.setCorePoolSize(3);
sharedPool.setEnable(true);
sharedPool.setMaximumPoolSize(5);
sharedPool.setPrefix("test-");
sharedPool.setKeepAliveTime(1000L);
sharedPool.setMaxWorkQueueMemory(1024L);
notEmptyElements(sharedPool.getCorePoolSize(), sharedPool.getEnable(), sharedPool.getMaximumPoolSize(),
sharedPool.getPrefix(), sharedPool.getKeepAliveTime(), sharedPool.getMaxWorkQueueMemory());
}
|
@Override
public JMXReporter createMetricReporter(Properties properties) {
String portsConfig = properties.getProperty(ARG_PORT);
return new JMXReporter(portsConfig);
}
|
@Test
void testPortRangeArgument() {
Properties properties = new Properties();
properties.setProperty(JMXReporterFactory.ARG_PORT, "9000-9010");
JMXReporter metricReporter = new JMXReporterFactory().createMetricReporter(properties);
try {
assertThat(metricReporter.getPort())
.hasValueSatisfying(
port ->
assertThat(port)
.isGreaterThanOrEqualTo(9000)
.isLessThanOrEqualTo(9010));
} finally {
metricReporter.close();
}
}
|
public String toEnrichedRst() {
StringBuilder b = new StringBuilder();
String lastKeyGroupName = "";
for (ConfigKey key : sortedConfigs()) {
if (key.internalConfig) {
continue;
}
if (key.group != null) {
if (!lastKeyGroupName.equalsIgnoreCase(key.group)) {
b.append(key.group).append("\n");
char[] underLine = new char[key.group.length()];
Arrays.fill(underLine, '^');
b.append(new String(underLine)).append("\n\n");
}
lastKeyGroupName = key.group;
}
getConfigKeyRst(key, b);
if (key.dependents != null && key.dependents.size() > 0) {
int j = 0;
b.append(" * Dependents: ");
for (String dependent : key.dependents) {
b.append("``");
b.append(dependent);
if (++j == key.dependents.size())
b.append("``");
else
b.append("``, ");
}
b.append("\n");
}
b.append("\n");
}
return b.toString();
}
|
@Test
public void toEnrichedRst() {
final ConfigDef def = new ConfigDef()
.define("opt1.of.group1", Type.STRING, "a", ValidString.in("a", "b", "c"), Importance.HIGH, "Doc doc.",
"Group One", 0, Width.NONE, "..", Collections.emptyList())
.define("opt2.of.group1", Type.INT, ConfigDef.NO_DEFAULT_VALUE, Importance.MEDIUM, "Doc doc doc.",
"Group One", 1, Width.NONE, "..", Arrays.asList("some.option1", "some.option2"))
.define("opt2.of.group2", Type.BOOLEAN, false, Importance.HIGH, "Doc doc doc doc.",
"Group Two", 1, Width.NONE, "..", Collections.emptyList())
.define("opt1.of.group2", Type.BOOLEAN, false, Importance.HIGH, "Doc doc doc doc doc.",
"Group Two", 0, Width.NONE, "..", singletonList("some.option"))
.define("poor.opt", Type.STRING, "foo", Importance.HIGH, "Doc doc doc doc.");
final String expectedRst =
"``poor.opt``\n" +
" Doc doc doc doc.\n" +
"\n" +
" * Type: string\n" +
" * Default: foo\n" +
" * Importance: high\n" +
"\n" +
"Group One\n" +
"^^^^^^^^^\n" +
"\n" +
"``opt1.of.group1``\n" +
" Doc doc.\n" +
"\n" +
" * Type: string\n" +
" * Default: a\n" +
" * Valid Values: [a, b, c]\n" +
" * Importance: high\n" +
"\n" +
"``opt2.of.group1``\n" +
" Doc doc doc.\n" +
"\n" +
" * Type: int\n" +
" * Importance: medium\n" +
" * Dependents: ``some.option1``, ``some.option2``\n" +
"\n" +
"Group Two\n" +
"^^^^^^^^^\n" +
"\n" +
"``opt1.of.group2``\n" +
" Doc doc doc doc doc.\n" +
"\n" +
" * Type: boolean\n" +
" * Default: false\n" +
" * Importance: high\n" +
" * Dependents: ``some.option``\n" +
"\n" +
"``opt2.of.group2``\n" +
" Doc doc doc doc.\n" +
"\n" +
" * Type: boolean\n" +
" * Default: false\n" +
" * Importance: high\n" +
"\n";
assertEquals(expectedRst, def.toEnrichedRst());
}
|
public static String initCacheDir(String namespace, NacosClientProperties properties) {
String jmSnapshotPath = properties.getProperty(JM_SNAPSHOT_PATH_PROPERTY);
String namingCacheRegistryDir = "";
if (properties.getProperty(PropertyKeyConst.NAMING_CACHE_REGISTRY_DIR) != null) {
namingCacheRegistryDir =
File.separator + properties.getProperty(PropertyKeyConst.NAMING_CACHE_REGISTRY_DIR);
}
if (!StringUtils.isBlank(jmSnapshotPath)) {
cacheDir = jmSnapshotPath + File.separator + FILE_PATH_NACOS + namingCacheRegistryDir + File.separator
+ FILE_PATH_NAMING + File.separator + namespace;
} else {
cacheDir =
properties.getProperty(USER_HOME_PROPERTY) + File.separator + FILE_PATH_NACOS + namingCacheRegistryDir
+ File.separator + FILE_PATH_NAMING + File.separator + namespace;
}
return cacheDir;
}
|
@Test
void testInitCacheDirWithDefaultRootAndWithoutCache2() {
NacosClientProperties properties = NacosClientProperties.PROTOTYPE.derive();
properties.setProperty("user.home", "/home/test");
String actual = CacheDirUtil.initCacheDir("test", properties);
assertEquals("/home/test/nacos/naming/test", actual);
}
|
public List<String> getLiveBrokers() {
List<String> brokerUrls = new ArrayList<>();
try {
byte[] brokerResourceNodeData = _zkClient.readData(BROKER_EXTERNAL_VIEW_PATH, true);
brokerResourceNodeData = unpackZnodeIfNecessary(brokerResourceNodeData);
JsonNode jsonObject = OBJECT_READER.readTree(getInputStream(brokerResourceNodeData));
JsonNode brokerResourceNode = jsonObject.get("mapFields");
Iterator<Entry<String, JsonNode>> resourceEntries = brokerResourceNode.fields();
while (resourceEntries.hasNext()) {
JsonNode resource = resourceEntries.next().getValue();
Iterator<Entry<String, JsonNode>> brokerEntries = resource.fields();
while (brokerEntries.hasNext()) {
Entry<String, JsonNode> brokerEntry = brokerEntries.next();
String brokerName = brokerEntry.getKey();
if (brokerName.startsWith("Broker_") && "ONLINE".equals(brokerEntry.getValue().asText())) {
brokerUrls.add(getHostPort(brokerName));
}
}
}
} catch (Exception e) {
LOGGER.warn("Exception while reading External view from zookeeper", e);
// ignore
}
return brokerUrls;
}
|
@Test
public void testGetBrokerListByInstanceConfigDefault() {
configureData(_instanceConfigPlain, false);
final List<String> brokers = _externalViewReaderUnderTest.getLiveBrokers();
assertEquals(brokers, Arrays.asList("first.pug-pinot-broker-headless:8099"));
}
|
public static Float toFloat(Object value, Float defaultValue) {
return convertQuietly(Float.class, value, defaultValue);
}
|
@Test
public void toFloatTest() {
// https://gitee.com/dromara/hutool/issues/I4M0E4
final String hex2 = "CD0CCB43";
final byte[] value = HexUtil.decodeHex(hex2);
final float f = Convert.toFloat(value);
assertEquals(406.1F, f, 0);
}
|
@Override
public synchronized double getFractionConsumed() {
if (position == null) {
return 0;
} else if (done) {
return 1.0;
} else if (position.compareTo(range.getEndKey()) >= 0) {
return 1.0;
}
return range.estimateFractionForKey(position);
}
|
@Test
public void testGetFractionConsumed() {
ByteKeyRangeTracker tracker = ByteKeyRangeTracker.of(INITIAL_RANGE);
double delta = 0.00001;
assertEquals(0.0, tracker.getFractionConsumed(), delta);
tracker.tryReturnRecordAt(true, INITIAL_START_KEY);
assertEquals(0.0, tracker.getFractionConsumed(), delta);
tracker.tryReturnRecordAt(true, INITIAL_MIDDLE_KEY);
assertEquals(0.5, tracker.getFractionConsumed(), delta);
tracker.tryReturnRecordAt(true, BEFORE_END_KEY);
assertEquals(1 - 1 / INITIAL_RANGE_SIZE, tracker.getFractionConsumed(), delta);
}
|
@Override
public double getValue(double quantile) {
if (quantile < 0.0 || quantile > 1.0 || Double.isNaN( quantile )) {
throw new IllegalArgumentException(quantile + " is not in [0..1]");
}
if (values.length == 0) {
return 0.0;
}
final double pos = quantile * (values.length + 1);
final int index = (int) pos;
if (index < 1) {
return values[0];
}
if (index >= values.length) {
return values[values.length - 1];
}
final double lower = values[index - 1];
final double upper = values[index];
return lower + (pos - floor(pos)) * (upper - lower);
}
|
@Test
public void bigQuantilesAreTheLastValue() throws Exception {
assertThat(snapshot.getValue(1.0))
.isEqualTo(5, offset(0.1));
}
|
static List<GeneratedResource> getGeneratedResources(EfestoClassesContainer finalOutput) {
List<GeneratedResource> toReturn = new ArrayList<>();
for (String key : finalOutput.getCompiledClassesMap().keySet()) {
toReturn.add(getGeneratedClassResource(key));
}
return toReturn;
}
|
@Test
void getGeneratedResources() {
List<GeneratedResource> retrieved = CompilationManagerUtils.getGeneratedResources(finalOutput);
commonEvaluateGeneratedIntermediateResources(retrieved);
}
|
@Override
public TTableDescriptor toThrift(List<DescriptorTable.ReferencedPartitionInfo> partitions) {
TJDBCTable tJDBCTable = new TJDBCTable();
if (!Strings.isNullOrEmpty(resourceName)) {
JDBCResource resource =
(JDBCResource) (GlobalStateMgr.getCurrentState().getResourceMgr().getResource(resourceName));
tJDBCTable.setJdbc_driver_name(resource.getName());
tJDBCTable.setJdbc_driver_url(resource.getProperty(JDBCResource.DRIVER_URL));
tJDBCTable.setJdbc_driver_checksum(resource.getProperty(JDBCResource.CHECK_SUM));
tJDBCTable.setJdbc_driver_class(resource.getProperty(JDBCResource.DRIVER_CLASS));
tJDBCTable.setJdbc_url(resource.getProperty(JDBCResource.URI));
tJDBCTable.setJdbc_table(jdbcTable);
tJDBCTable.setJdbc_user(resource.getProperty(JDBCResource.USER));
tJDBCTable.setJdbc_passwd(resource.getProperty(JDBCResource.PASSWORD));
} else {
String uri = properties.get(JDBCResource.URI);
String driverName = buildCatalogDriveName(uri);
tJDBCTable.setJdbc_driver_name(driverName);
tJDBCTable.setJdbc_driver_url(properties.get(JDBCResource.DRIVER_URL));
tJDBCTable.setJdbc_driver_checksum(properties.get(JDBCResource.CHECK_SUM));
tJDBCTable.setJdbc_driver_class(properties.get(JDBCResource.DRIVER_CLASS));
if (properties.get(JDBC_TABLENAME) != null) {
tJDBCTable.setJdbc_url(uri);
} else {
int delimiterIndex = uri.indexOf("?");
if (delimiterIndex > 0) {
String urlPrefix = uri.substring(0, delimiterIndex);
String urlSuffix = uri.substring(delimiterIndex + 1);
tJDBCTable.setJdbc_url(urlPrefix + "/" + dbName + "?" + urlSuffix);
} else {
tJDBCTable.setJdbc_url(uri + "/" + dbName);
}
}
tJDBCTable.setJdbc_table(jdbcTable);
tJDBCTable.setJdbc_user(properties.get(JDBCResource.USER));
tJDBCTable.setJdbc_passwd(properties.get(JDBCResource.PASSWORD));
}
TTableDescriptor tTableDescriptor = new TTableDescriptor(getId(), TTableType.JDBC_TABLE,
fullSchema.size(), 0, getName(), "");
tTableDescriptor.setJdbcTable(tJDBCTable);
return tTableDescriptor;
}
|
@Test
public void testToThrift(@Mocked GlobalStateMgr globalStateMgr,
@Mocked ResourceMgr resourceMgr) throws Exception {
new Expectations() {
{
GlobalStateMgr.getCurrentState();
result = globalStateMgr;
globalStateMgr.getResourceMgr();
result = resourceMgr;
resourceMgr.getResource("jdbc0");
result = getMockedJDBCResource(resourceName);
}
};
JDBCTable table = new JDBCTable(1000, "jdbc_table", columns, properties);
TTableDescriptor tableDescriptor = table.toThrift(null);
// build expected table descriptor
JDBCResource resource = (JDBCResource) getMockedJDBCResource("jdbc0");
TTableDescriptor expectedDesc =
new TTableDescriptor(1000, TTableType.JDBC_TABLE, columns.size(), 0, "jdbc_table", "");
TJDBCTable expectedTable = new TJDBCTable();
// we will not compute checksum in ut, so we can skip to setJdbc_driver_checksum
expectedTable.setJdbc_driver_name(resource.getName());
expectedTable.setJdbc_driver_url(resource.getProperty(JDBCResource.DRIVER_URL));
expectedTable.setJdbc_driver_class(resource.getProperty(JDBCResource.DRIVER_CLASS));
expectedTable.setJdbc_url(resource.getProperty(JDBCResource.URI));
expectedTable.setJdbc_table(this.table);
expectedTable.setJdbc_user(resource.getProperty(JDBCResource.USER));
expectedTable.setJdbc_passwd(resource.getProperty(JDBCResource.PASSWORD));
expectedDesc.setJdbcTable(expectedTable);
Assert.assertEquals(tableDescriptor, expectedDesc);
}
|
EncodedDiscreteResources add(EncodedDiscreteResources other) {
checkArgument(this.codec.getClass() == other.codec.getClass());
RangeSet<Integer> newRangeSet = TreeRangeSet.create(this.rangeSet);
newRangeSet.addAll(other.rangeSet);
return new EncodedDiscreteResources(newRangeSet, this.codec);
}
|
@Test
public void testAdd() {
DiscreteResource res1 = Resources.discrete(DID, PN, VID1).resource();
DiscreteResource res2 = Resources.discrete(DID, PN, VID2).resource();
DiscreteResource res3 = Resources.discrete(DID, PN, VID3).resource();
EncodedDiscreteResources sut = EncodedDiscreteResources.of(ImmutableSet.of(res1, res2), new VlanIdCodec());
EncodedDiscreteResources other = EncodedDiscreteResources.of(ImmutableSet.of(res1, res3), new VlanIdCodec());
assertThat(sut.add(other),
is(EncodedDiscreteResources.of(ImmutableSet.of(res1, res2, res3), new VlanIdCodec())));
}
|
public ActionResult apply(Agent agent, Map<String, String> request) {
log.debug("Writing content to file {}", request.get("filename"));
String filename = request.get("filename");
if (filename == null || filename.isEmpty()) {
return ActionResult.builder()
.status(ActionResult.Status.FAILURE)
.summary("The filename parameter is missing or has an empty value.")
.error("The filename parameter is missing or has an empty value.")
.build();
}
Document document = workspace.addDocument(agent.getId(), filename, request.get("body").getBytes(), new HashMap<>());
return ActionResult.builder()
.status(ActionResult.Status.SUCCESS)
.summary(String.format("The file %s has been written to the Agent workspace.", request.get("filename")))
.result(String.format("The file %s has been written to the Agent workspace.", request.get("filename")))
.documents(Arrays.asList(document))
.build();
}
|
@Test
void testApplyWithValidInput() {
String agentId = "agent1";
String filename = "test.txt";
String fileContent = "This is a test file.";
Map<String, String> request = new HashMap<>();
request.put("filename", filename);
request.put("body", fileContent);
Document mockDocument = Document.builder().name(filename).metadata(Map.of("content-type", "text/plain")).build();
when(agent.getId()).thenReturn(agentId);
when(workspace.addDocument(eq(agentId), eq(filename), eq(fileContent.getBytes()), any())).thenReturn(mockDocument);
ActionResult result = writeFileAction.apply(agent, request);
assertEquals(ActionResult.Status.SUCCESS, result.getStatus());
assertEquals("The file test.txt has been written to the Agent workspace.", result.getSummary());
assertEquals("The file test.txt has been written to the Agent workspace.", result.getResult());
assertEquals(List.of(mockDocument), result.getDocuments());
assertNull(result.getError());
verify(workspace).addDocument(eq(agentId), eq(filename), eq(fileContent.getBytes()), any());
}
|
public Set<EntityDescriptor> resolveEntities(Collection<EntityDescriptor> unresolvedEntities) {
final MutableGraph<EntityDescriptor> dependencyGraph = GraphBuilder.directed()
.allowsSelfLoops(false)
.nodeOrder(ElementOrder.insertion())
.build();
unresolvedEntities.forEach(dependencyGraph::addNode);
final HashSet<EntityDescriptor> resolvedEntities = new HashSet<>();
final MutableGraph<EntityDescriptor> finalDependencyGraph = resolveDependencyGraph(dependencyGraph, resolvedEntities);
LOG.debug("Final dependency graph: {}", finalDependencyGraph);
return finalDependencyGraph.nodes();
}
|
@Test
public void resolveEntitiesWithTransitiveDependencies() throws NotFoundException {
final StreamMock streamMock = new StreamMock(ImmutableMap.of(
"_id", "stream-1234",
StreamImpl.FIELD_TITLE, "Stream Title")) {
@Override
public Set<Output> getOutputs() {
return Collections.singleton(
OutputImpl.create(
"output-1234",
"Output Title",
"org.example.outputs.SomeOutput",
"admin",
Collections.emptyMap(),
new Date(0L),
null
)
);
}
};
when(streamService.load("stream-1234")).thenReturn(streamMock);
final ImmutableSet<EntityDescriptor> unresolvedEntities = ImmutableSet.of(
EntityDescriptor.create("stream-1234", ModelTypes.STREAM_V1)
);
final Set<EntityDescriptor> resolvedEntities = contentPackService.resolveEntities(unresolvedEntities);
assertThat(resolvedEntities).containsOnly(
EntityDescriptor.create("stream-1234", ModelTypes.STREAM_V1),
EntityDescriptor.create("output-1234", ModelTypes.OUTPUT_V1)
);
}
|
@Override
public PollResult poll(long currentTimeMs) {
return pollInternal(
prepareFetchRequests(),
this::handleFetchSuccess,
this::handleFetchFailure
);
}
|
@Test
public void testFetchResponseMetrics() {
buildFetcher();
String topic1 = "foo";
String topic2 = "bar";
TopicPartition tp1 = new TopicPartition(topic1, 0);
TopicPartition tp2 = new TopicPartition(topic2, 0);
subscriptions.assignFromUser(mkSet(tp1, tp2));
Map<String, Integer> partitionCounts = new HashMap<>();
partitionCounts.put(topic1, 1);
partitionCounts.put(topic2, 1);
topicIds.put(topic1, Uuid.randomUuid());
topicIds.put(topic2, Uuid.randomUuid());
TopicIdPartition tidp1 = new TopicIdPartition(topicIds.get(topic1), tp1);
TopicIdPartition tidp2 = new TopicIdPartition(topicIds.get(topic2), tp2);
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, partitionCounts, tp -> validLeaderEpoch, topicIds));
int expectedBytes = 0;
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> fetchPartitionData = new LinkedHashMap<>();
for (TopicIdPartition tp : mkSet(tidp1, tidp2)) {
subscriptions.seek(tp.topicPartition(), 0);
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), Compression.NONE,
TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++)
builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
MemoryRecords records = builder.build();
for (Record record : records.records())
expectedBytes += record.sizeInBytes();
fetchPartitionData.put(tp, new FetchResponseData.PartitionData()
.setPartitionIndex(tp.topicPartition().partition())
.setHighWatermark(15)
.setLogStartOffset(0)
.setRecords(records));
}
assertEquals(1, sendFetches());
client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, fetchPartitionData));
networkClientDelegate.poll(time.timer(0));
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchRecords();
assertEquals(3, fetchedRecords.get(tp1).size());
assertEquals(3, fetchedRecords.get(tp2).size());
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
assertEquals(expectedBytes, (Double) fetchSizeAverage.metricValue(), EPSILON);
assertEquals(6, (Double) recordsCountAverage.metricValue(), EPSILON);
}
|
@Udf(description = "Converts a number of milliseconds since 1970-01-01 00:00:00 UTC/GMT into the"
+ " string representation of the timestamp in the given format. Single quotes in the"
+ " timestamp format can be escaped with '', for example: 'yyyy-MM-dd''T''HH:mm:ssX'."
+ " The system default time zone is used when no time zone is explicitly provided."
+ " The format pattern should be in the format expected"
+ " by java.time.format.DateTimeFormatter")
public String timestampToString(
@UdfParameter(
description = "Milliseconds since"
+ " January 1, 1970, 00:00:00 UTC/GMT.") final long epochMilli,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
if (formatPattern == null) {
return null;
}
try {
final Timestamp timestamp = new Timestamp(epochMilli);
final DateTimeFormatter formatter = formatters.get(formatPattern);
return timestamp.toInstant()
.atZone(ZoneId.systemDefault())
.format(formatter);
} catch (final ExecutionException | RuntimeException e) {
throw new KsqlFunctionException("Failed to format timestamp " + epochMilli
+ " with formatter '" + formatPattern
+ "': " + e.getMessage(), e);
}
}
|
@Test
public void shouldRoundTripWithStringToTimestamp() {
final String pattern = "yyyy-MM-dd HH:mm:ss.SSS'Freya'";
final StringToTimestamp stringToTimestamp = new StringToTimestamp();
IntStream.range(-10_000, 20_000)
.parallel()
.forEach(idx -> {
final long millis = 1538361611123L + idx;
final String result = udf.timestampToString(millis, pattern);
final String expectedResult = new SimpleDateFormat(pattern).format(new Date(millis));
assertThat(result, is(expectedResult));
final long roundtripMillis = stringToTimestamp.stringToTimestamp(result, pattern);
assertThat(roundtripMillis, is(millis));
});
}
|
@CheckForNull
static String checkEventName(@Nullable String name) {
if (name == null) {
return null;
}
checkArgument(name.length() <= MAX_NAME_LENGTH, "Event name length (%s) is longer than the maximum authorized (%s). '%s' was provided.",
name.length(), MAX_NAME_LENGTH, name);
return name;
}
|
@Test
void fail_if_name_longer_than_400() {
assertThatThrownBy(() -> EventValidator.checkEventName(repeat("a", 400 + 1)))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Event name length (401) is longer than the maximum authorized (400).");
}
|
public Object invokeMethod(String methodName) {
return invokeMethod(methodName, (Class []) null, (Object []) null);
}
|
@Test
void testInvokeMethod_shouldAbleToInvokeMethodWithClass() throws ClassNotFoundException,
NoSuchMethodException, InvocationTargetException, IllegalAccessException {
Resource r = new Resource(null, new ResourceId("pool1", "name1"), "object");
assertEquals(true, r.invokeMethod("startsWith", new Class[]{ java.lang.String.class }, new Object[]{"obj"}));
}
|
@Override
public GenericRow transform(GenericRow record) {
for (Map.Entry<String, FunctionEvaluator> entry : _expressionEvaluators.entrySet()) {
String column = entry.getKey();
FunctionEvaluator transformFunctionEvaluator = entry.getValue();
Object existingValue = record.getValue(column);
if (existingValue == null) {
try {
// Skip transformation if column value already exists
// NOTE: column value might already exist for OFFLINE data,
// For backward compatibility, The only exception here is that we will override nested field like array,
// collection or map since they were not included in the record transformation before.
record.putValue(column, transformFunctionEvaluator.evaluate(record));
} catch (Exception e) {
if (!_continueOnError) {
throw new RuntimeException("Caught exception while evaluation transform function for column: " + column, e);
} else {
LOGGER.debug("Caught exception while evaluation transform function for column: {}", column, e);
record.putValue(GenericRow.INCOMPLETE_RECORD_KEY, true);
}
}
} else if (existingValue.getClass().isArray() || existingValue instanceof Collections
|| existingValue instanceof Map) {
try {
Object transformedValue = transformFunctionEvaluator.evaluate(record);
// For backward compatibility, The only exception here is that we will override nested field like array,
// collection or map since they were not included in the record transformation before.
if (!isTypeCompatible(existingValue, transformedValue)) {
record.putValue(column, transformedValue);
}
} catch (Exception e) {
LOGGER.debug("Caught exception while evaluation transform function for column: {}", column, e);
}
}
}
return record;
}
|
@Test
public void testTransformConfigsFromTableConfig() {
Schema pinotSchema = new Schema.SchemaBuilder().addSingleValueDimension("userId", FieldSpec.DataType.LONG)
.addSingleValueDimension("fullName", FieldSpec.DataType.STRING)
.addMultiValueDimension("bids", FieldSpec.DataType.INT)
.addSingleValueDimension("maxBid", FieldSpec.DataType.INT)
.addMultiValueDimension("map2_keys", FieldSpec.DataType.STRING)
.addMultiValueDimension("map2_values", FieldSpec.DataType.INT).addMetric("cost", FieldSpec.DataType.DOUBLE)
.addDateTime("hoursSinceEpoch", FieldSpec.DataType.LONG, "1:HOURS:EPOCH", "1:HOURS").build();
List<TransformConfig> transformConfigs = Arrays.asList(
new TransformConfig("userId", "Groovy({user_id}, user_id)"),
new TransformConfig("fullName", "Groovy({firstName+' '+lastName}, firstName, lastName)"),
new TransformConfig("maxBid", "Groovy({bids.max{ it.toBigDecimal() }}, bids)"),
new TransformConfig("map2_keys", "Groovy({map2.sort()*.key}, map2)"),
new TransformConfig("map2_values", "Groovy({map2.sort()*.value}, map2)"),
new TransformConfig("hoursSinceEpoch", "Groovy({timestamp/(1000*60*60)}, timestamp)"));
IngestionConfig ingestionConfig = new IngestionConfig();
ingestionConfig.setTransformConfigs(transformConfigs);
TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName("testTransformFunctions")
.setIngestionConfig(ingestionConfig).build();
ExpressionTransformer expressionTransformer = new ExpressionTransformer(tableConfig, pinotSchema);
DataTypeTransformer dataTypeTransformer = new DataTypeTransformer(tableConfig, pinotSchema);
// test functions from schema
GenericRow genericRow = new GenericRow();
genericRow.putValue("user_id", 1L);
genericRow.putValue("firstName", "John");
genericRow.putValue("lastName", "Denver");
genericRow.putValue("bids", Arrays.asList(10, 20));
HashMap<String, String> map1 = new HashMap<>(); // keys in Map from avro are always in STRING
map1.put("30", "foo");
map1.put("200", "bar");
genericRow.putValue("map1", map1);
HashMap<String, Integer> map2 = new HashMap<>();
map2.put("k1", 10);
map2.put("k2", 20);
genericRow.putValue("map2", map2);
genericRow.putValue("cost", 1000.0);
genericRow.putValue("timestamp", 1574000000000L);
genericRow.putValue("lon", 1.0);
genericRow.putValue("lat", 2.0);
// expression transformer
expressionTransformer.transform(genericRow);
// extract userId
Assert.assertEquals(genericRow.getValue("userId"), 1L);
// concat fullName
Assert.assertEquals(genericRow.getValue("fullName"), "John Denver");
Assert.assertTrue(((List) genericRow.getValue("bids")).containsAll(Arrays.asList(10, 20)));
// find max bid from bids
Assert.assertEquals(genericRow.getValue("maxBid"), 20);
// handle Map through transform functions
ArrayList map2Keys = (ArrayList) genericRow.getValue("map2_keys");
Assert.assertEquals(map2Keys.get(0), "k1");
Assert.assertEquals(map2Keys.get(1), "k2");
ArrayList map2Values = (ArrayList) genericRow.getValue("map2_values");
Assert.assertEquals(map2Values.get(0), 10);
Assert.assertEquals(map2Values.get(1), 20);
Assert.assertEquals(genericRow.getValue("cost"), 1000.0);
// calculate hoursSinceEpoch
Assert.assertEquals(genericRow.getValue("hoursSinceEpoch").toString(), "437222.2222222222");
// data type transformer
dataTypeTransformer.transform(genericRow);
Assert.assertEquals(genericRow.getValue("userId"), 1L);
Assert.assertEquals(genericRow.getValue("fullName"), "John Denver");
Assert.assertEquals(((Object[]) genericRow.getValue("bids")), new Integer[]{10, 20});
Assert.assertEquals(genericRow.getValue("maxBid"), 20);
// handle Map through transform functions
Object[] map2KeysObject = (Object[]) genericRow.getValue("map2_keys");
Assert.assertEquals(map2KeysObject[0], "k1");
Assert.assertEquals(map2KeysObject[1], "k2");
Object[] map2ValuesObject = (Object[]) genericRow.getValue("map2_values");
Assert.assertEquals(map2ValuesObject[0], 10);
Assert.assertEquals(map2ValuesObject[1], 20);
Assert.assertEquals(genericRow.getValue("cost"), 1000.0);
// convert to LONG
Assert.assertEquals(genericRow.getValue("hoursSinceEpoch"), 437222L);
}
|
@Override
public Object[] toArray() {
List<Object> res = (List<Object>) get(valueRangeAsync(0, -1));
return res.toArray();
}
|
@Test
public void testToArray() {
RScoredSortedSet<String> set = redisson.getScoredSortedSet("simple");
set.add(0, "1");
set.add(1, "4");
set.add(2, "2");
set.add(3, "5");
set.add(4, "3");
assertThat(Arrays.asList(set.toArray())).containsExactly("1", "4", "2", "5", "3");
String[] strs = set.toArray(new String[0]);
assertThat(Arrays.asList(strs)).containsExactly("1", "4", "2", "5", "3");
}
|
@Override
public void setTimestamp(final Path file, final TransferStatus status) throws BackgroundException {
try {
final SoftwareVersionData version = session.softwareVersion();
final Matcher matcher = Pattern.compile(SDSSession.VERSION_REGEX).matcher(version.getRestApiVersion());
if(matcher.matches()) {
if(new Version(matcher.group(1)).compareTo(new Version("4.22")) < 0) {
throw new UnsupportedException();
}
}
final Node latest;
if(containerService.isContainer(file)) {
latest = new NodesApi(session.getClient()).updateRoom(new UpdateRoomRequest()
.timestampCreation(null != status.getCreated() ? new DateTime(status.getCreated()) : null)
.timestampModification(null != status.getModified() ? new DateTime(status.getModified()) : null),
Long.parseLong(nodeid.getVersionId(file)), StringUtils.EMPTY, null);
}
else if(file.isDirectory()) {
latest = new NodesApi(session.getClient()).updateFolder(new UpdateFolderRequest()
.timestampCreation(null != status.getCreated() ? new DateTime(status.getCreated()) : null)
.timestampModification(null != status.getModified() ? new DateTime(status.getModified()) : null),
Long.parseLong(nodeid.getVersionId(file)), StringUtils.EMPTY, null);
}
else {
latest = new NodesApi(session.getClient()).updateFile(new UpdateFileRequest()
.timestampCreation(null != status.getCreated() ? new DateTime(status.getCreated()) : null)
.timestampModification(null != status.getModified() ? new DateTime(status.getModified()) : null),
Long.parseLong(nodeid.getVersionId(file)), StringUtils.EMPTY, null);
}
status.setResponse(new SDSAttributesAdapter(session).toAttributes(latest));
}
catch(ApiException e) {
throw new SDSExceptionMappingService(nodeid).map("Failure to write attributes of {0}", e, file);
}
}
|
@Test
public void testWriteTimestampFile() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room = new SDSDirectoryFeature(session, nodeid).mkdir(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path test = new SDSTouchFeature(session, nodeid).touch(new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final Long revision = test.attributes().getRevision();
assertNotNull(revision);
final TransferStatus status = new TransferStatus().withModified(1599047952805L);
new SDSTimestampFeature(session, nodeid).setTimestamp(test, status);
final SDSAttributesFinderFeature f = new SDSAttributesFinderFeature(session, nodeid);
final PathAttributes attributes = f.find(test);
assertEquals(1599047952805L, attributes.getModificationDate());
assertEquals(status.getResponse(), attributes);
assertNotEquals(revision, attributes.getRevision());
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public String format() {
StringBuilder builder = new StringBuilder();
for (RestartActions.Entry entry : actions.getEntries()) {
builder.append("In cluster '" + entry.getClusterName() + "' of type '" + entry.getClusterType() + "':\n");
builder.append(" Restart services of type '" + entry.getServiceType() + "' because:\n");
int counter = 1;
for (String message : entry.getMessages()) {
builder.append(" " + counter++ + ") " + message + "\n");
}
}
return builder.toString();
}
|
@Test
public void formatting_of_multiple_actions() {
RestartActions actions = new ConfigChangeActionsBuilder().
restart(CHANGE_MSG, CLUSTER, CLUSTER_TYPE, SERVICE_TYPE, SERVICE_NAME).
restart(CHANGE_MSG_2, CLUSTER, CLUSTER_TYPE, SERVICE_TYPE, SERVICE_NAME).
restart(CHANGE_MSG, CLUSTER_2, CLUSTER_TYPE, SERVICE_TYPE, SERVICE_NAME).
build().getRestartActions();
assertThat(new RestartActionsFormatter(actions).format(),
equalTo("In cluster 'bar' of type 'search':\n" +
" Restart services of type 'searchnode' because:\n" +
" 1) change\n" +
"In cluster 'foo' of type 'search':\n" +
" Restart services of type 'searchnode' because:\n" +
" 1) change\n" +
" 2) other change\n"));
}
|
public RelDataType createRelDataTypeFromSchema(Schema schema) {
Builder builder = new Builder(this);
boolean enableNullHandling = schema.isEnableColumnBasedNullHandling();
for (Map.Entry<String, FieldSpec> entry : schema.getFieldSpecMap().entrySet()) {
builder.add(entry.getKey(), toRelDataType(entry.getValue(), enableNullHandling));
}
return builder.build();
}
|
@Test(dataProvider = "relDataTypeConversion")
public void testNotNullableScalarTypes(FieldSpec.DataType dataType, RelDataType scalarType, boolean columnNullMode) {
TypeFactory typeFactory = new TypeFactory();
Schema testSchema = new Schema.SchemaBuilder()
.addDimensionField("col", dataType, field -> field.setNullable(false))
.setEnableColumnBasedNullHandling(columnNullMode)
.build();
RelDataType relDataTypeFromSchema = typeFactory.createRelDataTypeFromSchema(testSchema);
List<RelDataTypeField> fieldList = relDataTypeFromSchema.getFieldList();
RelDataTypeField field = fieldList.get(0);
Assert.assertEquals(field.getType(), scalarType);
}
|
@Override
boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock) {
Preconditions.checkArgument(BlockIdManager.isStripedBlockID(
reportedBlock.getBlockId()), "reportedBlock is not striped");
Preconditions.checkArgument(BlockIdManager.convertToStripedID(
reportedBlock.getBlockId()) == this.getBlockId(),
"reported blk_%s does not belong to the group of stored blk_%s",
reportedBlock.getBlockId(), this.getBlockId());
int blockIndex = BlockIdManager.getBlockIndex(reportedBlock);
int index = blockIndex;
DatanodeStorageInfo old = getStorageInfo(index);
if (old != null && !old.equals(storage)) { // over replicated
// check if the storage has been stored
int i = findStorageInfo(storage);
if (i == -1) {
index = findSlot();
} else {
return true;
}
}
addStorage(storage, index, blockIndex);
return true;
}
|
@Test
public void testAddStorage() {
// first add NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS storages, i.e., a complete
// group of blocks/storages
DatanodeStorageInfo[] storageInfos = DFSTestUtil.createDatanodeStorageInfos(
totalBlocks);
Block[] blocks = createReportedBlocks(totalBlocks);
int i = 0;
for (; i < storageInfos.length; i += 2) {
info.addStorage(storageInfos[i], blocks[i]);
Assert.assertEquals(i/2 + 1, info.numNodes());
}
i /= 2;
for (int j = 1; j < storageInfos.length; j += 2) {
Assert.assertTrue(info.addStorage(storageInfos[j], blocks[j]));
Assert.assertEquals(i + (j+1)/2, info.numNodes());
}
// check
byte[] indices = (byte[]) Whitebox.getInternalState(info, "indices");
Assert.assertEquals(totalBlocks, info.getCapacity());
Assert.assertEquals(totalBlocks, indices.length);
i = 0;
for (DatanodeStorageInfo storage : storageInfos) {
int index = info.findStorageInfo(storage);
Assert.assertEquals(i++, index);
Assert.assertEquals(index, indices[index]);
}
// the same block is reported from the same storage twice
i = 0;
for (DatanodeStorageInfo storage : storageInfos) {
Assert.assertTrue(info.addStorage(storage, blocks[i++]));
}
Assert.assertEquals(totalBlocks, info.getCapacity());
Assert.assertEquals(totalBlocks, info.numNodes());
Assert.assertEquals(totalBlocks, indices.length);
i = 0;
for (DatanodeStorageInfo storage : storageInfos) {
int index = info.findStorageInfo(storage);
Assert.assertEquals(i++, index);
Assert.assertEquals(index, indices[index]);
}
// the same block is reported from another storage
DatanodeStorageInfo[] storageInfos2 =
DFSTestUtil.createDatanodeStorageInfos(totalBlocks * 2);
// only add the second half of info2
for (i = totalBlocks; i < storageInfos2.length; i++) {
info.addStorage(storageInfos2[i], blocks[i % totalBlocks]);
Assert.assertEquals(i + 1, info.getCapacity());
Assert.assertEquals(i + 1, info.numNodes());
indices = (byte[]) Whitebox.getInternalState(info, "indices");
Assert.assertEquals(i + 1, indices.length);
}
for (i = totalBlocks; i < storageInfos2.length; i++) {
int index = info.findStorageInfo(storageInfos2[i]);
Assert.assertEquals(i++, index);
Assert.assertEquals(index - totalBlocks, indices[index]);
}
}
|
@Override
public final void isEqualTo(@Nullable Object other) {
@SuppressWarnings("UndefinedEquals") // the contract of this method is to follow Multimap.equals
boolean isEqual = Objects.equal(actual, other);
if (isEqual) {
return;
}
// Fail but with a more descriptive message:
if ((actual instanceof ListMultimap && other instanceof SetMultimap)
|| (actual instanceof SetMultimap && other instanceof ListMultimap)) {
String actualType = (actual instanceof ListMultimap) ? "ListMultimap" : "SetMultimap";
String otherType = (other instanceof ListMultimap) ? "ListMultimap" : "SetMultimap";
failWithoutActual(
fact("expected", other),
fact("an instance of", otherType),
fact("but was", actualCustomStringRepresentationForPackageMembersToCall()),
fact("an instance of", actualType),
simpleFact(
lenientFormat(
"a %s cannot equal a %s if either is non-empty", actualType, otherType)));
} else if (actual instanceof ListMultimap) {
containsExactlyEntriesIn((Multimap<?, ?>) checkNotNull(other)).inOrder();
} else if (actual instanceof SetMultimap) {
containsExactlyEntriesIn((Multimap<?, ?>) checkNotNull(other));
} else {
super.isEqualTo(other);
}
}
|
@Test
public void setMultimapIsEqualToListMultimap_fails() {
ImmutableSetMultimap<String, String> multimapA =
ImmutableSetMultimap.<String, String>builder()
.putAll("kurt", "kluever", "russell", "cobain")
.build();
ImmutableListMultimap<String, String> multimapB =
ImmutableListMultimap.<String, String>builder()
.putAll("kurt", "kluever", "russell", "cobain")
.build();
expectFailureWhenTestingThat(multimapA).isEqualTo(multimapB);
assertFailureKeys(
"expected",
"an instance of",
"but was",
"an instance of",
"a SetMultimap cannot equal a ListMultimap if either is non-empty");
assertFailureValueIndexed("an instance of", 0, "ListMultimap");
assertFailureValueIndexed("an instance of", 1, "SetMultimap");
}
|
@Implementation
public static synchronized Context getRemoteContext(Context context) {
return googlePlayServicesUtilImpl.getRemoteContext(context);
}
|
@Test
public void getRemoteContext_defaultNotNull() {
assertNotNull(GooglePlayServicesUtil.getRemoteContext(RuntimeEnvironment.getApplication()));
}
|
@Override
public KsMaterializedQueryResult<Row> get(
final GenericKey key,
final int partition,
final Optional<Position> position
) {
try {
final KeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = KeyQuery.withKey(key);
StateQueryRequest<ValueAndTimestamp<GenericRow>>
request = inStore(stateStore.getStateStoreName())
.withQuery(query)
.withPartitions(ImmutableSet.of(partition));
if (position.isPresent()) {
request = request.withPositionBound(PositionBound.at(position.get()));
}
final StateQueryResult<ValueAndTimestamp<GenericRow>>
result = stateStore.getKafkaStreams().query(request);
final QueryResult<ValueAndTimestamp<GenericRow>> queryResult =
result.getPartitionResults().get(partition);
// Some of these failures are retriable, and in the future, we may want to retry
// locally before throwing.
if (queryResult.isFailure()) {
throw failedQueryException(queryResult);
} else if (queryResult.getResult() == null) {
return KsMaterializedQueryResult.rowIteratorWithPosition(
Collections.emptyIterator(), queryResult.getPosition());
} else {
final ValueAndTimestamp<GenericRow> row = queryResult.getResult();
return KsMaterializedQueryResult.rowIteratorWithPosition(
ImmutableList.of(Row.of(stateStore.schema(), key, row.value(), row.timestamp()))
.iterator(),
queryResult.getPosition());
}
} catch (final NotUpToBoundException | MaterializationException e) {
throw e;
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
}
|
@Test
public void shouldRangeQueryWithCorrectParams_noBounds() {
// Given:
when(kafkaStreams.query(any())).thenReturn(getIteratorResult());
// When:
table.get(PARTITION, null, null);
// Then:
verify(kafkaStreams).query(queryTypeCaptor.capture());
StateQueryRequest request = queryTypeCaptor.getValue();
assertThat(request.getQuery(), instanceOf(RangeQuery.class));
RangeQuery rangeQuery = (RangeQuery)request.getQuery();
assertThat(rangeQuery.getLowerBound(), is(Optional.empty()));
assertThat(rangeQuery.getUpperBound(), is(Optional.empty()));
}
|
@Override
public String get(String name) {
checkKey(name);
String value = null;
String[] keyParts = splitKey(name);
String ns = registry.getNamespaceURI(keyParts[0]);
if (ns != null) {
try {
XMPProperty prop = xmpData.getProperty(ns, keyParts[1]);
if (prop != null && prop.getOptions().isSimple()) {
value = prop.getValue();
} else if (prop != null && prop.getOptions().isArray()) {
prop = xmpData.getArrayItem(ns, keyParts[1], 1);
value = prop.getValue();
}
// in all other cases, null is returned
} catch (XMPException e) {
// Ignore
}
}
return value;
}
|
@Test
public void get_notQualifiedKey_throw() {
assertThrows(PropertyTypeException.class, () -> {
xmpMeta.get("wrongKey");
});
}
|
public static Map<TopicPartition, Long> parseSinkConnectorOffsets(Map<Map<String, ?>, Map<String, ?>> partitionOffsets) {
Map<TopicPartition, Long> parsedOffsetMap = new HashMap<>();
for (Map.Entry<Map<String, ?>, Map<String, ?>> partitionOffset : partitionOffsets.entrySet()) {
Map<String, ?> partitionMap = partitionOffset.getKey();
if (partitionMap == null) {
throw new BadRequestException("The partition for a sink connector offset cannot be null or missing");
}
if (!partitionMap.containsKey(KAFKA_TOPIC_KEY) || !partitionMap.containsKey(KAFKA_PARTITION_KEY)) {
throw new BadRequestException(String.format("The partition for a sink connector offset must contain the keys '%s' and '%s'",
KAFKA_TOPIC_KEY, KAFKA_PARTITION_KEY));
}
if (partitionMap.get(KAFKA_TOPIC_KEY) == null) {
throw new BadRequestException("Kafka topic names must be valid strings and may not be null");
}
if (partitionMap.get(KAFKA_PARTITION_KEY) == null) {
throw new BadRequestException("Kafka partitions must be valid numbers and may not be null");
}
String topic = String.valueOf(partitionMap.get(KAFKA_TOPIC_KEY));
int partition;
try {
// We parse it this way because both "10" and 10 should be accepted as valid partition values in the REST API's
// JSON request payload. If it throws an exception, we should propagate it since it's indicative of a badly formatted value.
partition = Integer.parseInt(String.valueOf(partitionMap.get(KAFKA_PARTITION_KEY)));
} catch (Exception e) {
throw new BadRequestException("Failed to parse the following Kafka partition value in the provided offsets: '" +
partitionMap.get(KAFKA_PARTITION_KEY) + "'. Partition values for sink connectors need " +
"to be integers.", e);
}
TopicPartition tp = new TopicPartition(topic, partition);
Map<String, ?> offsetMap = partitionOffset.getValue();
if (offsetMap == null) {
// represents an offset reset
parsedOffsetMap.put(tp, null);
} else {
if (!offsetMap.containsKey(KAFKA_OFFSET_KEY)) {
throw new BadRequestException(String.format("The offset for a sink connector should either be null or contain " +
"the key '%s'", KAFKA_OFFSET_KEY));
}
long offset;
try {
// We parse it this way because both "1000" and 1000 should be accepted as valid offset values in the REST API's
// JSON request payload. If it throws an exception, we should propagate it since it's indicative of a badly formatted value.
offset = Long.parseLong(String.valueOf(offsetMap.get(KAFKA_OFFSET_KEY)));
} catch (Exception e) {
throw new BadRequestException("Failed to parse the following Kafka offset value in the provided offsets: '" +
offsetMap.get(KAFKA_OFFSET_KEY) + "'. Offset values for sink connectors need " +
"to be integers.", e);
}
parsedOffsetMap.put(tp, offset);
}
}
return parsedOffsetMap;
}
|
@Test
public void testValidateAndParseEmptyPartitionOffsetMap() {
// expect no exception to be thrown
Map<TopicPartition, Long> parsedOffsets = SinkUtils.parseSinkConnectorOffsets(new HashMap<>());
assertTrue(parsedOffsets.isEmpty());
}
|
void writeConfigToDisk() {
VespaTlsConfig config = VespaZookeeperTlsContextUtils.tlsContext()
.map(ctx -> new VespaTlsConfig(ctx, TransportSecurityUtils.getInsecureMixedMode()))
.orElse(VespaTlsConfig.tlsDisabled());
writeConfigToDisk(config);
}
|
@Test(expected = RuntimeException.class)
public void require_that_this_id_must_be_present_amongst_servers() {
ZookeeperServerConfig.Builder builder = new ZookeeperServerConfig.Builder();
builder.zooKeeperConfigFile(cfgFile.getAbsolutePath());
builder.server(newServer(1, "bar", 234, 432, false));
builder.server(newServer(2, "baz", 345, 543, false));
builder.myid(0);
new Configurator(builder.build()).writeConfigToDisk(VespaTlsConfig.tlsDisabled());
}
|
@Override
public void execute(final List<String> args, final PrintWriter terminal) {
CliCmdUtil.ensureArgCountBounds(args, 0, 1, HELP);
if (args.isEmpty()) {
final String setting = requestPipeliningSupplier.get() ? "ON" : "OFF";
terminal.printf("Current %s configuration: %s%n", NAME, setting);
} else {
final String newSetting = args.get(0);
switch (newSetting.toUpperCase()) {
case "ON":
requestPipeliningConsumer.accept(true);
break;
case "OFF":
requestPipeliningConsumer.accept(false);
break;
default:
terminal.printf("Invalid %s setting: %s. ", NAME, newSetting);
terminal.println("Valid options are 'ON' and 'OFF'.");
return;
}
terminal.println(NAME + " configuration is now " + newSetting.toUpperCase());
}
}
|
@Test
public void shouldRejectUpdateOnInvalidSetting() {
// When:
requestPipeliningCommand.execute(ImmutableList.of("bad"), terminal);
// Then:
verify(settingConsumer, never()).accept(anyBoolean());
assertThat(out.toString(),
containsString(String.format("Invalid %s setting: bad", RequestPipeliningCommand.NAME)));
assertThat(out.toString(), containsString("Valid options are 'ON' and 'OFF'"));
}
|
@Override
public ResponseHeader execute() throws SQLException {
check(sqlStatement);
ProxyContext.getInstance().getContextManager().getPersistServiceFacade().getMetaDataManagerPersistService().createDatabase(sqlStatement.getDatabaseName());
return new UpdateResponseHeader(sqlStatement);
}
|
@Test
void assertExecuteCreateExistDatabaseWithIfNotExists() throws SQLException {
when(statement.getDatabaseName()).thenReturn("foo_db");
when(statement.isIfNotExists()).thenReturn(true);
ContextManager contextManager = mockContextManager();
when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager);
assertThat(handler.execute(), instanceOf(UpdateResponseHeader.class));
}
|
@Override
public void onWorkflowFinalized(Workflow workflow) {
WorkflowSummary summary = StepHelper.retrieveWorkflowSummary(objectMapper, workflow.getInput());
WorkflowRuntimeSummary runtimeSummary = retrieveWorkflowRuntimeSummary(workflow);
String reason = workflow.getReasonForIncompletion();
LOG.info(
"Workflow {} with execution_id [{}] is finalized with internal state [{}] and reason [{}]",
summary.getIdentity(),
workflow.getWorkflowId(),
workflow.getStatus(),
reason);
metrics.counter(
MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC,
getClass(),
TYPE_TAG,
"onWorkflowFinalized",
MetricConstants.STATUS_TAG,
workflow.getStatus().name());
if (reason != null
&& workflow.getStatus() == Workflow.WorkflowStatus.FAILED
&& reason.startsWith(MaestroStartTask.DEDUP_FAILURE_PREFIX)) {
LOG.info(
"Workflow {} with execution_id [{}] has not actually started, thus skip onWorkflowFinalized.",
summary.getIdentity(),
workflow.getWorkflowId());
return; // special case doing nothing
}
WorkflowInstance.Status instanceStatus =
instanceDao.getWorkflowInstanceStatus(
summary.getWorkflowId(), summary.getWorkflowInstanceId(), summary.getWorkflowRunId());
if (instanceStatus == null
|| (instanceStatus.isTerminal() && workflow.getStatus().isTerminal())) {
LOG.info(
"Workflow {} with execution_id [{}] does not exist or already "
+ "in a terminal state [{}] with internal state [{}], thus skip onWorkflowFinalized.",
summary.getIdentity(),
workflow.getWorkflowId(),
instanceStatus,
workflow.getStatus());
return;
}
Map<String, Task> realTaskMap = TaskHelper.getUserDefinedRealTaskMap(workflow);
// cancel internally failed tasks
realTaskMap.values().stream()
.filter(task -> !StepHelper.retrieveStepStatus(task.getOutputData()).isTerminal())
.forEach(task -> maestroTask.cancel(workflow, task, null));
WorkflowRuntimeOverview overview =
TaskHelper.computeOverview(
objectMapper, summary, runtimeSummary.getRollupBase(), realTaskMap);
try {
validateAndUpdateOverview(overview, summary);
switch (workflow.getStatus()) {
case TERMINATED: // stopped due to stop request
if (reason != null && reason.startsWith(FAILURE_REASON_PREFIX)) {
update(workflow, WorkflowInstance.Status.FAILED, summary, overview);
} else {
update(workflow, WorkflowInstance.Status.STOPPED, summary, overview);
}
break;
case TIMED_OUT:
update(workflow, WorkflowInstance.Status.TIMED_OUT, summary, overview);
break;
default: // other status (FAILED, COMPLETED, PAUSED, RUNNING) to be handled here.
Optional<Task.Status> done =
TaskHelper.checkProgress(realTaskMap, summary, overview, true);
switch (done.orElse(Task.Status.IN_PROGRESS)) {
/**
* This is a special status to indicate that the workflow has succeeded. Check {@link
* TaskHelper#checkProgress} for more details.
*/
case FAILED_WITH_TERMINAL_ERROR:
WorkflowInstance.Status nextStatus =
AggregatedViewHelper.deriveAggregatedStatus(
instanceDao, summary, WorkflowInstance.Status.SUCCEEDED, overview);
if (!nextStatus.isTerminal()) {
throw new MaestroInternalError(
"Invalid status: [%s], expecting a terminal one", nextStatus);
}
update(workflow, nextStatus, summary, overview);
break;
case FAILED:
case CANCELED: // due to step failure
update(workflow, WorkflowInstance.Status.FAILED, summary, overview);
break;
case TIMED_OUT:
update(workflow, WorkflowInstance.Status.TIMED_OUT, summary, overview);
break;
// all other status are invalid
default:
metrics.counter(
MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC,
getClass(),
TYPE_TAG,
"invalidStatusOnWorkflowFinalized");
throw new MaestroInternalError(
"Invalid status [%s] onWorkflowFinalized", workflow.getStatus());
}
break;
}
} catch (MaestroInternalError | IllegalArgumentException e) {
// non-retryable error and still fail the instance
LOG.warn("onWorkflowFinalized is failed with a non-retryable error", e);
metrics.counter(
MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC,
getClass(),
TYPE_TAG,
"nonRetryableErrorOnWorkflowFinalized");
update(
workflow,
WorkflowInstance.Status.FAILED,
summary,
overview,
Details.create(
e.getMessage(), "onWorkflowFinalized is failed with non-retryable error."));
}
}
|
@Test
public void testInconsistentStatsOnWorkflowFinalized() {
StepRuntimeState state = new StepRuntimeState();
state.setStatus(StepInstance.Status.FATALLY_FAILED);
when(stepInstanceDao.getAllStepStates(any(), anyLong(), anyLong()))
.thenReturn(singletonMap("foo", state));
when(workflow.getStatus()).thenReturn(Workflow.WorkflowStatus.TERMINATED);
when(instanceDao.getWorkflowInstanceStatus(eq("test-workflow-id"), anyLong(), anyLong()))
.thenReturn(WorkflowInstance.Status.IN_PROGRESS);
statusListener.onWorkflowFinalized(workflow);
Assert.assertEquals(
1L,
metricRepo
.getCounter(
MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC,
MaestroWorkflowStatusListener.class,
"type",
"inconsistentStatsOnWorkflowFinalized")
.count());
ArgumentCaptor<Timeline> timelineCaptor = ArgumentCaptor.forClass(Timeline.class);
verify(instanceDao, times(1))
.updateWorkflowInstance(
any(), any(), timelineCaptor.capture(), eq(WorkflowInstance.Status.STOPPED), anyLong());
Timeline timeline = timelineCaptor.getValue();
Assert.assertEquals(1, timeline.getTimelineEvents().size());
Assert.assertEquals(
"Workflow instance status is updated to [STOPPED] due to [test-reason]",
timeline.getTimelineEvents().get(0).getMessage());
verify(publisher, times(1)).publishOrThrow(any(), any());
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.