focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public Member getMember() {
return member;
}
|
@Test
public void testGetMember() {
assertEquals(member, logEvent.getMember());
}
|
public static void saveConsanguinity(Consanguinity consanguinity) {
if (CONSANGUINITY_MAP.containsKey(consanguinity.getServiceKey())) {
Consanguinity consanguinityOld = CONSANGUINITY_MAP.get(consanguinity.getServiceKey());
consanguinityOld.setProviders(consanguinity.getProviders());
} else {
CONSANGUINITY_MAP.putIfAbsent(consanguinity.getServiceKey(), consanguinity);
}
}
|
@Test
public void saveConsanguinity() {
Consanguinity consanguinity = new Consanguinity();
List<Contract> contractList = new ArrayList<>();
Contract contract = new Contract();
contract.setIp(DEFAULT_IP);
contract.setPort(DEFAULT_PORT);
contractList.add(contract);
consanguinity.setProviders(contractList);
consanguinity.setServiceKey(CollectorCacheTest.class.getName());
CollectorCache.saveConsanguinity(consanguinity);
Assert.assertFalse(CollectorCache.CONSANGUINITY_MAP.isEmpty());
Assert.assertTrue(CollectorCache.CONSANGUINITY_MAP.containsKey(CollectorCacheTest.class.getName()));
}
|
List<ParsedTerm> identifyUnknownFields(final Set<String> availableFields, final List<ParsedTerm> terms) {
final Map<String, List<ParsedTerm>> groupedByField = terms.stream()
.filter(t -> !t.isDefaultField())
.filter(term -> !SEARCHABLE_ES_FIELDS.contains(term.getRealFieldName()))
.filter(term -> !RESERVED_SETTABLE_FIELDS.contains(term.getRealFieldName()))
.filter(term -> !availableFields.contains(term.getRealFieldName()))
.distinct()
.collect(Collectors.groupingBy(ParsedTerm::getRealFieldName));
return unknownFieldsListLimiter.filterElementsContainingUsefulInformation(groupedByField);
}
|
@Test
void testDoesNotIdentifyGraylogReservedFieldsAsUnknown() {
final List<ParsedTerm> unknownFields = toTest.identifyUnknownFields(
Set.of("some_normal_field"),
RESERVED_SETTABLE_FIELDS.stream().map(f -> ParsedTerm.create(f, "buba")).collect(Collectors.toList())
);
assertTrue(unknownFields.isEmpty());
}
|
@Override
public Batch toBatch() {
return new SparkBatch(
sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode());
}
|
@Test
public void testPartitionedOr() throws Exception {
createPartitionedTable(spark, tableName, "years(ts), bucket(5, id)");
SparkScanBuilder builder = scanBuilder();
YearsFunction.TimestampToYearsFunction tsToYears = new YearsFunction.TimestampToYearsFunction();
UserDefinedScalarFunc udf1 = toUDF(tsToYears, expressions(fieldRef("ts")));
Predicate predicate1 = new Predicate("=", expressions(udf1, intLit(2018 - 1970)));
BucketFunction.BucketLong bucketLong = new BucketFunction.BucketLong(DataTypes.LongType);
UserDefinedScalarFunc udf = toUDF(bucketLong, expressions(intLit(5), fieldRef("id")));
Predicate predicate2 = new Predicate(">=", expressions(udf, intLit(2)));
Predicate predicate = new Or(predicate1, predicate2);
pushFilters(builder, predicate);
Batch scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(6);
// NOT (years(ts) = 48 OR bucket(id, 5) >= 2)
builder = scanBuilder();
predicate = new Not(predicate);
pushFilters(builder, predicate);
scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(4);
}
|
@Override
protected Future<KafkaMirrorMakerStatus> createOrUpdate(Reconciliation reconciliation, KafkaMirrorMaker assemblyResource) {
String namespace = reconciliation.namespace();
KafkaMirrorMakerCluster mirror;
KafkaMirrorMakerStatus kafkaMirrorMakerStatus = new KafkaMirrorMakerStatus();
try {
mirror = KafkaMirrorMakerCluster.fromCrd(reconciliation, assemblyResource, versions, sharedEnvironmentProvider);
} catch (Exception e) {
LOGGER.warnCr(reconciliation, e);
StatusUtils.setStatusConditionAndObservedGeneration(assemblyResource, kafkaMirrorMakerStatus, e);
return Future.failedFuture(new ReconciliationException(kafkaMirrorMakerStatus, e));
}
Map<String, String> annotations = new HashMap<>(1);
KafkaClientAuthentication authConsumer = assemblyResource.getSpec().getConsumer().getAuthentication();
List<CertSecretSource> trustedCertificatesConsumer = assemblyResource.getSpec().getConsumer().getTls() == null ? Collections.emptyList() : assemblyResource.getSpec().getConsumer().getTls().getTrustedCertificates();
KafkaClientAuthentication authProducer = assemblyResource.getSpec().getProducer().getAuthentication();
List<CertSecretSource> trustedCertificatesProducer = assemblyResource.getSpec().getProducer().getTls() == null ? Collections.emptyList() : assemblyResource.getSpec().getProducer().getTls().getTrustedCertificates();
Promise<KafkaMirrorMakerStatus> createOrUpdatePromise = Promise.promise();
boolean mirrorHasZeroReplicas = mirror.getReplicas() == 0;
LOGGER.debugCr(reconciliation, "Updating Kafka Mirror Maker cluster");
mirrorMakerServiceAccount(reconciliation, namespace, mirror)
.compose(i -> deploymentOperations.scaleDown(reconciliation, namespace, mirror.getComponentName(), mirror.getReplicas(), operationTimeoutMs))
.compose(i -> MetricsAndLoggingUtils.metricsAndLogging(reconciliation, configMapOperations, mirror.logging(), mirror.metrics()))
.compose(metricsAndLoggingCm -> {
ConfigMap logAndMetricsConfigMap = mirror.generateMetricsAndLogConfigMap(metricsAndLoggingCm);
annotations.put(Annotations.ANNO_STRIMZI_LOGGING_HASH, Util.hashStub(logAndMetricsConfigMap.getData().get(mirror.logging().configMapKey())));
return configMapOperations.reconcile(reconciliation, namespace, KafkaMirrorMakerResources.metricsAndLogConfigMapName(reconciliation.name()), logAndMetricsConfigMap);
})
.compose(i -> podDisruptionBudgetOperator.reconcile(reconciliation, namespace, mirror.getComponentName(), mirror.generatePodDisruptionBudget()))
.compose(i -> Future.join(VertxUtil.authTlsHash(secretOperations, namespace, authConsumer, trustedCertificatesConsumer),
VertxUtil.authTlsHash(secretOperations, namespace, authProducer, trustedCertificatesProducer)))
.compose(hashFut -> {
if (hashFut != null) {
annotations.put(Annotations.ANNO_STRIMZI_AUTH_HASH, Integer.toString((int) hashFut.resultAt(0) + (int) hashFut.resultAt(1)));
}
return Future.succeededFuture();
})
.compose(i -> deploymentOperations.reconcile(reconciliation, namespace, mirror.getComponentName(), mirror.generateDeployment(annotations, pfa.isOpenshift(), imagePullPolicy, imagePullSecrets)))
.compose(i -> deploymentOperations.scaleUp(reconciliation, namespace, mirror.getComponentName(), mirror.getReplicas(), operationTimeoutMs))
.compose(i -> deploymentOperations.waitForObserved(reconciliation, namespace, mirror.getComponentName(), 1_000, operationTimeoutMs))
.compose(i -> mirrorHasZeroReplicas ? Future.succeededFuture() : deploymentOperations.readiness(reconciliation, namespace, mirror.getComponentName(), 1_000, operationTimeoutMs))
.onComplete(reconciliationResult -> {
StatusUtils.setStatusConditionAndObservedGeneration(assemblyResource, kafkaMirrorMakerStatus, reconciliationResult.cause());
// Add warning about Mirror Maker 1 being deprecated and removed soon
LOGGER.warnCr(reconciliation, "Mirror Maker 1 is deprecated and will be removed in Apache Kafka 4.0.0. Please migrate to Mirror Maker 2.");
StatusUtils.addConditionsToStatus(kafkaMirrorMakerStatus, Set.of(StatusUtils.buildWarningCondition("MirrorMaker1Deprecation", "Mirror Maker 1 is deprecated and will be removed in Apache Kafka 4.0.0. Please migrate to Mirror Maker 2.")));
kafkaMirrorMakerStatus.setReplicas(mirror.getReplicas());
kafkaMirrorMakerStatus.setLabelSelector(mirror.getSelectorLabels().toSelectorString());
if (reconciliationResult.succeeded()) {
createOrUpdatePromise.complete(kafkaMirrorMakerStatus);
} else {
createOrUpdatePromise.fail(new ReconciliationException(kafkaMirrorMakerStatus, reconciliationResult.cause()));
}
}
);
return createOrUpdatePromise.future();
}
|
@Test
public void testUpdateClusterNoDiff(VertxTestContext context) {
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true);
CrdOperator<KubernetesClient, KafkaMirrorMaker, KafkaMirrorMakerList> mockMirrorOps = supplier.mirrorMakerOperator;
DeploymentOperator mockDcOps = supplier.deploymentOperations;
PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator;
ConfigMapOperator mockCmOps = supplier.configMapOperations;
String kmmName = "foo";
String kmmNamespace = "test";
KafkaMirrorMakerConsumerSpec consumer = new KafkaMirrorMakerConsumerSpecBuilder()
.withBootstrapServers(consumerBootstrapServers)
.withGroupId(groupId)
.withNumStreams(numStreams)
.build();
KafkaMirrorMakerProducerSpec producer = new KafkaMirrorMakerProducerSpecBuilder()
.withBootstrapServers(producerBootstrapServers)
.build();
KafkaMirrorMaker kmm = ResourceUtils.createKafkaMirrorMaker(kmmNamespace, kmmName, image, producer, consumer, include);
KafkaMirrorMakerCluster mirror = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm, VERSIONS, SHARED_ENV_PROVIDER);
when(mockMirrorOps.get(kmmNamespace, kmmName)).thenReturn(kmm);
when(mockMirrorOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kmm));
when(mockMirrorOps.updateStatusAsync(any(), any(KafkaMirrorMaker.class))).thenReturn(Future.succeededFuture());
when(mockDcOps.get(kmmNamespace, mirror.getComponentName())).thenReturn(mirror.generateDeployment(new HashMap<>(), true, null, null));
when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> dcNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<Deployment> dcCaptor = ArgumentCaptor.forClass(Deployment.class);
when(mockDcOps.reconcile(any(), eq(kmmNamespace), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<Integer> dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class);
when(mockDcOps.scaleUp(any(), eq(kmmNamespace), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture(), anyLong())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<Integer> dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class);
when(mockDcOps.scaleDown(any(), eq(kmmNamespace), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture(), anyLong())).thenReturn(Future.succeededFuture());
when(mockDcOps.readiness(any(), eq(kmmNamespace), eq(mirror.getComponentName()), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
ArgumentCaptor<PodDisruptionBudget> pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class);
when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture());
when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap())));
KafkaMirrorMakerAssemblyOperator ops = new KafkaMirrorMakerAssemblyOperator(vertx,
new PlatformFeaturesAvailability(true, kubernetesVersion),
new MockCertManager(), new PasswordGenerator(10, "a", "a"),
supplier,
ResourceUtils.dummyClusterOperatorConfig(VERSIONS));
Checkpoint async = context.checkpoint();
ops.createOrUpdate(new Reconciliation("test-trigger", KafkaMirrorMaker.RESOURCE_KIND, kmmNamespace, kmmName), kmm)
.onComplete(context.succeeding(v -> context.verify(() -> {
// Verify Deployment Config
List<Deployment> capturedDc = dcCaptor.getAllValues();
assertThat(capturedDc, hasSize(1));
// Verify PodDisruptionBudget
List<PodDisruptionBudget> capturedPdb = pdbCaptor.getAllValues();
assertThat(capturedPdb, hasSize(1));
PodDisruptionBudget pdb = capturedPdb.get(0);
assertThat(pdb.getMetadata().getName(), is(mirror.getComponentName()));
assertThat("PodDisruptionBudgets are not equal", pdb, is(mirror.generatePodDisruptionBudget()));
// Verify scaleDown / scaleUp were not called
assertThat(dcScaleDownNameCaptor.getAllValues(), hasSize(1));
assertThat(dcScaleUpNameCaptor.getAllValues(), hasSize(1));
async.flag();
})));
}
|
public Token nextToken() throws IOException
{
Token curToken = aheadToken;
//System.out.println(curToken); // for debugging
aheadToken = readToken(curToken);
return curToken;
}
|
@Test
void testEmptyName() throws IOException
{
String s = "dup 127 / put";
Type1Lexer t1l = new Type1Lexer(s.getBytes(StandardCharsets.US_ASCII));
Token nextToken;
try
{
do
{
nextToken = t1l.nextToken();
}
while (nextToken != null);
Assertions.fail("DamagedFontException expected");
}
catch (DamagedFontException ex)
{
Assertions.assertEquals("Could not read token at position 9", ex.getMessage());
}
}
|
@Override
public UnderFileSystem create(String path, UnderFileSystemConfiguration conf) {
Preconditions.checkNotNull(path, "Unable to create UnderFileSystem instance:"
+ " URI path should not be null");
try {
return S3AUnderFileSystem.createInstance(new AlluxioURI(path), conf);
} catch (AmazonClientException e) {
throw Throwables.propagate(e);
}
}
|
@Test
public void createInstanceWithNullPath() {
Exception e = assertThrows(NullPointerException.class, () -> mFactory1.create(
null, mConf));
assertTrue(e.getMessage().contains("Unable to create UnderFileSystem instance: URI "
+ "path should not be null"));
}
|
public Predicate convert(List<ScalarOperator> operators, DeltaLakeContext context) {
DeltaLakeExprVisitor visitor = new DeltaLakeExprVisitor();
List<Predicate> predicates = Lists.newArrayList();
for (ScalarOperator operator : operators) {
Predicate predicate = operator.accept(visitor, context);
if (predicate != null) {
predicates.add(predicate);
}
}
Optional<Predicate> result = predicates.stream().reduce(And::new);
return result.orElse(ALWAYS_TRUE);
}
|
@Test
public void testConvertIsNullPredicate() {
ScalarOperationToDeltaLakeExpr converter = new ScalarOperationToDeltaLakeExpr();
ScalarOperationToDeltaLakeExpr.DeltaLakeContext context =
new ScalarOperationToDeltaLakeExpr.DeltaLakeContext(schema, new HashSet<>());
List<ScalarOperator> operators;
// is null
IsNullPredicateOperator isNullPredicateOperator = new IsNullPredicateOperator(false, cIntCol);
operators = new ArrayList<>(List.of(isNullPredicateOperator));
Predicate convertExpr = converter.convert(operators, context);
Predicate expectedExpr = new Predicate("IS_NULL", cDeltaIntCol);
Assert.assertEquals(convertExpr.toString(), expectedExpr.toString());
// is not null
isNullPredicateOperator = new IsNullPredicateOperator(true, cIntCol);
operators = new ArrayList<>(List.of(isNullPredicateOperator));
convertExpr = converter.convert(operators, context);
expectedExpr = new Predicate("IS_NOT_NULL", cDeltaIntCol);
Assert.assertEquals(convertExpr.toString(), expectedExpr.toString());
}
|
MatchResult matchPluginTemplate(String ownerTemplate, String template) {
boolean matches = false;
String pluginName = null;
String templateName = template;
String ownerTemplateName = ownerTemplate;
if (StringUtils.isNotBlank(ownerTemplate)) {
Matcher ownerTemplateMatcher = PLUGIN_TEMPLATE_PATTERN.matcher(ownerTemplate);
if (ownerTemplateMatcher.matches()) {
matches = true;
pluginName = ownerTemplateMatcher.group(1);
ownerTemplateName = ownerTemplateMatcher.group(2);
}
}
Matcher templateMatcher = PLUGIN_TEMPLATE_PATTERN.matcher(template);
if (templateMatcher.matches()) {
matches = true;
pluginName = templateMatcher.group(1);
templateName = templateMatcher.group(2);
}
return new MatchResult(pluginName, ownerTemplateName, templateName, matches);
}
|
@Test
void matchPluginTemplateWhenDoesNotMatch() {
var result =
templateResolver.matchPluginTemplate("doc", "modules/layout");
assertThat(result.matches()).isFalse();
}
|
public static ByteString dataMapToByteString(Map<String, String> headers, DataMap dataMap) throws MimeTypeParseException, IOException
{
return ByteString.unsafeWrap(getContentType(headers).getCodec().mapToBytes(dataMap));
}
|
@Test
public void testDataMapToJSONByteStringWithUnsupportedContentType() throws MimeTypeParseException, IOException
{
// unsupport content type should fallback to JSON
DataMap testDataMap = createTestDataMap();
byte[] expectedBytes = JACKSON_DATA_CODEC.mapToBytes(testDataMap);
Map<String, String> headers = Collections.singletonMap(RestConstants.HEADER_CONTENT_TYPE, "mysuperkool/xson");
ByteString byteString = DataMapConverter.dataMapToByteString(headers, testDataMap);
Assert.assertEquals(byteString.copyBytes(), expectedBytes);
}
|
@VisibleForTesting
static String convertProtoPropertyNameToJavaPropertyName(String input) {
boolean capitalizeNextLetter = true;
Preconditions.checkArgument(!Strings.isNullOrEmpty(input));
StringBuilder result = new StringBuilder(input.length());
for (int i = 0; i < input.length(); i++) {
final char c = input.charAt(i);
if (Character.isLowerCase(c)) {
if (capitalizeNextLetter) {
result.append(Character.toUpperCase(c));
} else {
result.append(c);
}
capitalizeNextLetter = false;
} else if (Character.isUpperCase(c)) {
if (i == 0 && !capitalizeNextLetter) {
// Force first letter to lower-case unless explicitly told to
// capitalize it.
result.append(Character.toLowerCase(c));
} else {
// Capital letters after the first are left as-is.
result.append(c);
}
capitalizeNextLetter = false;
} else if ('0' <= c && c <= '9') {
result.append(c);
capitalizeNextLetter = true;
} else {
capitalizeNextLetter = true;
}
}
// Add a trailing "_" if the name should be altered.
if (input.charAt(input.length() - 1) == '#') {
result.append('_');
}
return result.toString();
}
|
@Test
public void testGetterNameCreationForProtoPropertyWithUnderscore() {
Assert.assertEquals(
JAVA_PROPERTY_FOR_PROTO_PROPERTY_WITH_UNDERSCORE,
ProtoByteBuddyUtils.convertProtoPropertyNameToJavaPropertyName(
PROTO_PROPERTY_WITH_UNDERSCORE));
}
|
@Override
public DataNodeDto startNode(String nodeId) throws NodeNotFoundException {
final DataNodeDto node = nodeService.byNodeId(nodeId);
if (node.getDataNodeStatus() != DataNodeStatus.UNAVAILABLE && node.getDataNodeStatus() != DataNodeStatus.PREPARED) {
throw new IllegalArgumentException("Only stopped data nodes can be started.");
}
DataNodeLifecycleEvent e = DataNodeLifecycleEvent.create(node.getNodeId(), DataNodeLifecycleTrigger.START);
clusterEventBus.post(e);
return node;
}
|
@Test
public void startNodePublishesClusterEvent() throws NodeNotFoundException {
final String testNodeId = "node";
nodeService.registerServer(buildTestNode(testNodeId, DataNodeStatus.UNAVAILABLE));
classUnderTest.startNode(testNodeId);
verify(clusterEventBus).post(DataNodeLifecycleEvent.create(testNodeId, DataNodeLifecycleTrigger.START));
}
|
static void validateCsvFormat(CSVFormat format) {
String[] header =
checkArgumentNotNull(format.getHeader(), "Illegal %s: header is required", CSVFormat.class);
checkArgument(header.length > 0, "Illegal %s: header cannot be empty", CSVFormat.class);
checkArgument(
!format.getAllowMissingColumnNames(),
"Illegal %s: cannot allow missing column names",
CSVFormat.class);
checkArgument(
!format.getIgnoreHeaderCase(), "Illegal %s: cannot ignore header case", CSVFormat.class);
checkArgument(
!format.getAllowDuplicateHeaderNames(),
"Illegal %s: cannot allow duplicate header names",
CSVFormat.class);
for (String columnName : header) {
checkArgument(
!Strings.isNullOrEmpty(columnName),
"Illegal %s: column name is required",
CSVFormat.class);
}
checkArgument(
!format.getSkipHeaderRecord(),
"Illegal %s: cannot skip header record because the header is already accounted for",
CSVFormat.class);
}
|
@Test
public void givenCSVFormatWithHeader_validates() {
CSVFormat format = csvFormatWithHeader();
CsvIOParseHelpers.validateCsvFormat(format);
}
|
public static DataflowRunner fromOptions(PipelineOptions options) {
DataflowPipelineOptions dataflowOptions =
PipelineOptionsValidator.validate(DataflowPipelineOptions.class, options);
ArrayList<String> missing = new ArrayList<>();
if (dataflowOptions.getAppName() == null) {
missing.add("appName");
}
if (Strings.isNullOrEmpty(dataflowOptions.getRegion())
&& isServiceEndpoint(dataflowOptions.getDataflowEndpoint())) {
missing.add("region");
}
if (missing.size() > 0) {
throw new IllegalArgumentException(
"Missing required pipeline options: " + Joiner.on(',').join(missing));
}
validateWorkerSettings(
PipelineOptionsValidator.validate(DataflowPipelineWorkerPoolOptions.class, options));
PathValidator validator = dataflowOptions.getPathValidator();
String gcpTempLocation;
try {
gcpTempLocation = dataflowOptions.getGcpTempLocation();
} catch (Exception e) {
throw new IllegalArgumentException(
"DataflowRunner requires gcpTempLocation, "
+ "but failed to retrieve a value from PipelineOptions",
e);
}
validator.validateOutputFilePrefixSupported(gcpTempLocation);
String stagingLocation;
try {
stagingLocation = dataflowOptions.getStagingLocation();
} catch (Exception e) {
throw new IllegalArgumentException(
"DataflowRunner requires stagingLocation, "
+ "but failed to retrieve a value from PipelineOptions",
e);
}
validator.validateOutputFilePrefixSupported(stagingLocation);
if (!isNullOrEmpty(dataflowOptions.getSaveProfilesToGcs())) {
validator.validateOutputFilePrefixSupported(dataflowOptions.getSaveProfilesToGcs());
}
if (dataflowOptions.getFilesToStage() != null) {
// The user specifically requested these files, so fail now if they do not exist.
// (automatically detected classpath elements are permitted to not exist, so later
// staging will not fail on nonexistent files)
dataflowOptions.getFilesToStage().stream()
.forEach(
stagedFileSpec -> {
File localFile;
if (stagedFileSpec.contains("=")) {
String[] components = stagedFileSpec.split("=", 2);
localFile = new File(components[1]);
} else {
localFile = new File(stagedFileSpec);
}
if (!localFile.exists()) {
// should be FileNotFoundException, but for build-time backwards compatibility
// cannot add checked exception
throw new RuntimeException(
String.format("Non-existent files specified in filesToStage: %s", localFile));
}
});
} else {
dataflowOptions.setFilesToStage(
detectClassPathResourcesToStage(DataflowRunner.class.getClassLoader(), options));
if (dataflowOptions.getFilesToStage().isEmpty()) {
throw new IllegalArgumentException("No files to stage has been found.");
} else {
LOG.info(
"PipelineOptions.filesToStage was not specified. "
+ "Defaulting to files from the classpath: will stage {} files. "
+ "Enable logging at DEBUG level to see which files will be staged.",
dataflowOptions.getFilesToStage().size());
LOG.debug("Classpath elements: {}", dataflowOptions.getFilesToStage());
}
}
// Verify jobName according to service requirements, truncating converting to lowercase if
// necessary.
String jobName = dataflowOptions.getJobName().toLowerCase();
checkArgument(
jobName.matches("[a-z]([-a-z0-9]*[a-z0-9])?"),
"JobName invalid; the name must consist of only the characters "
+ "[-a-z0-9], starting with a letter and ending with a letter "
+ "or number");
if (!jobName.equals(dataflowOptions.getJobName())) {
LOG.info(
"PipelineOptions.jobName did not match the service requirements. "
+ "Using {} instead of {}.",
jobName,
dataflowOptions.getJobName());
}
dataflowOptions.setJobName(jobName);
// Verify project
String project = dataflowOptions.getProject();
if (project.matches("[0-9]*")) {
throw new IllegalArgumentException(
"Project ID '"
+ project
+ "' invalid. Please make sure you specified the Project ID, not project number.");
} else if (!project.matches(PROJECT_ID_REGEXP)) {
throw new IllegalArgumentException(
"Project ID '"
+ project
+ "' invalid. Please make sure you specified the Project ID, not project"
+ " description.");
}
DataflowPipelineDebugOptions debugOptions =
dataflowOptions.as(DataflowPipelineDebugOptions.class);
// Verify the number of worker threads is a valid value
if (debugOptions.getNumberOfWorkerHarnessThreads() < 0) {
throw new IllegalArgumentException(
"Number of worker harness threads '"
+ debugOptions.getNumberOfWorkerHarnessThreads()
+ "' invalid. Please make sure the value is non-negative.");
}
// Verify that if recordJfrOnGcThrashing is set, the pipeline is at least on java 11
if (dataflowOptions.getRecordJfrOnGcThrashing()
&& Environments.getJavaVersion() == Environments.JavaVersion.java8) {
throw new IllegalArgumentException(
"recordJfrOnGcThrashing is only supported on java 9 and up.");
}
if (dataflowOptions.isStreaming() && dataflowOptions.getGcsUploadBufferSizeBytes() == null) {
dataflowOptions.setGcsUploadBufferSizeBytes(GCS_UPLOAD_BUFFER_SIZE_BYTES_DEFAULT);
}
// Adding the Java version to the SDK name for user's and support convenience.
String agentJavaVer = "(JRE 8 environment)";
if (Environments.getJavaVersion() != Environments.JavaVersion.java8) {
agentJavaVer =
String.format("(JRE %s environment)", Environments.getJavaVersion().specification());
}
DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo();
String userAgentName = dataflowRunnerInfo.getName();
Preconditions.checkArgument(
!userAgentName.equals(""), "Dataflow runner's `name` property cannot be empty.");
String userAgentVersion = dataflowRunnerInfo.getVersion();
Preconditions.checkArgument(
!userAgentVersion.equals(""), "Dataflow runner's `version` property cannot be empty.");
String userAgent =
String.format("%s/%s%s", userAgentName, userAgentVersion, agentJavaVer).replace(" ", "_");
dataflowOptions.setUserAgent(userAgent);
return new DataflowRunner(dataflowOptions);
}
|
@Test
public void testFromOptionsWithUppercaseConvertsToLowercase() throws Exception {
String mixedCase = "ThisJobNameHasMixedCase";
DataflowPipelineOptions options = buildPipelineOptions();
options.setJobName(mixedCase);
DataflowRunner.fromOptions(options);
assertThat(options.getJobName(), equalTo(mixedCase.toLowerCase()));
}
|
public static Duration parseDuration(String text) {
checkNotNull(text);
final String trimmed = text.trim();
checkArgument(!trimmed.isEmpty(), "argument is an empty- or whitespace-only string");
final int len = trimmed.length();
int pos = 0;
char current;
while (pos < len && (current = trimmed.charAt(pos)) >= '0' && current <= '9') {
pos++;
}
final String number = trimmed.substring(0, pos);
final String unitLabel = trimmed.substring(pos).trim().toLowerCase(Locale.US);
if (number.isEmpty()) {
throw new NumberFormatException("text does not start with a number");
}
final BigInteger value;
try {
value = new BigInteger(number); // this throws a NumberFormatException
} catch (NumberFormatException e) {
throw new IllegalArgumentException(
"The value '" + number + "' cannot be represented as an integer number.", e);
}
final ChronoUnit unit;
if (unitLabel.isEmpty()) {
unit = ChronoUnit.MILLIS;
} else {
unit = LABEL_TO_UNIT_MAP.get(unitLabel);
}
if (unit == null) {
throw new IllegalArgumentException(
"Time interval unit label '"
+ unitLabel
+ "' does not match any of the recognized units: "
+ TimeUnit.getAllUnits());
}
try {
return convertBigIntToDuration(value, unit);
} catch (ArithmeticException e) {
throw new IllegalArgumentException(
"The value '"
+ number
+ "' cannot be represented as java.time.Duration (numeric overflow).",
e);
}
}
|
@Test
void testParseDurationTrim() {
assertThat(TimeUtils.parseDuration(" 155 ").toMillis()).isEqualTo(155L);
assertThat(TimeUtils.parseDuration(" 155 ms ").toMillis()).isEqualTo(155L);
}
|
@Override
public Sensor addRateTotalSensor(final String scopeName,
final String entityName,
final String operationName,
final Sensor.RecordingLevel recordingLevel,
final String... tags) {
final String threadId = Thread.currentThread().getName();
final Map<String, String> tagMap = customizedTags(threadId, scopeName, entityName, tags);
return customInvocationRateAndCountSensor(
threadId,
groupNameFromScope(scopeName),
entityName,
operationName,
tagMap,
recordingLevel
);
}
|
@Test
public void shouldAddRateTotalSensorWithCustomTags() {
final Sensor sensor = streamsMetrics.addRateTotalSensor(
SCOPE_NAME,
ENTITY_NAME,
OPERATION_NAME,
RecordingLevel.DEBUG,
CUSTOM_TAG_KEY1,
CUSTOM_TAG_VALUE1,
CUSTOM_TAG_KEY2,
CUSTOM_TAG_VALUE2
);
final Map<String, String> tags = customTags(streamsMetrics);
shouldAddCustomSensorWithTags(
sensor,
Arrays.asList(
OPERATION_NAME + TOTAL_SUFFIX,
OPERATION_NAME + RATE_SUFFIX
),
tags
);
}
|
@Override
public RemotingCommand processRequest(ChannelHandlerContext ctx,
RemotingCommand request) throws RemotingCommandException {
switch (request.getCode()) {
case RequestCode.QUERY_ASSIGNMENT:
return this.queryAssignment(ctx, request);
case RequestCode.SET_MESSAGE_REQUEST_MODE:
return this.setMessageRequestMode(ctx, request);
default:
break;
}
return null;
}
|
@Test
public void testSetMessageRequestMode_Success() throws Exception {
brokerController.getProducerManager().registerProducer(group, clientInfo);
final RemotingCommand request = createSetMessageRequestModeRequest(topic);
RemotingCommand responseToReturn = queryAssignmentProcessor.processRequest(handlerContext, request);
assertThat(responseToReturn.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
|
@Override
public ConsumeQueueInterface getConsumeQueue(String topic, int queueId) {
return findConsumeQueue(topic, queueId);
}
|
@Test
public void testGetStoreTime_EverythingIsOk() {
if (notExecuted()) {
return;
}
final int totalCount = 10;
int queueId = 0;
String topic = "FooBar";
AppendMessageResult[] appendMessageResults = putMessages(totalCount, topic, queueId, false);
//Thread.sleep(10);
StoreTestUtil.waitCommitLogReput((RocksDBMessageStore) messageStore);
ConsumeQueueInterface consumeQueue = messageStore.getConsumeQueue(topic, queueId);
for (int i = 0; i < totalCount; i++) {
CqUnit cqUnit = consumeQueue.get(i);
long storeTime = getStoreTime(cqUnit);
assertThat(storeTime).isEqualTo(appendMessageResults[i].getStoreTimestamp());
}
}
|
@Override
public ValidationResult validateSecretsConfig(String pluginId, final Map<String, String> configuration) {
return pluginRequestHelper.submitRequest(pluginId, REQUEST_VALIDATE_SECRETS_CONFIG,
new DefaultPluginInteractionCallback<>() {
@Override
public String requestBody(String resolvedExtensionVersion) {
return secretsMessageConverterV1.validatePluginConfigurationRequestBody(configuration);
}
@Override
public ValidationResult onSuccess(String responseBody, Map<String, String> responseHeaders,
String resolvedExtensionVersion) {
return secretsMessageConverterV1.getSecretsConfigValidationResultFromResponse(responseBody);
}
});
}
|
@Test
void shouldTalkToPlugin_toValidateSecretsConfig() {
String responseBody = "[{\"message\":\"Vault Url cannot be blank.\",\"key\":\"Url\"},{\"message\":\"Path cannot be blank.\",\"key\":\"Path\"}]";
when(pluginManager.submitTo(eq(PLUGIN_ID), eq(SECRETS_EXTENSION), requestArgumentCaptor.capture())).thenReturn(DefaultGoPluginApiResponse.success(responseBody));
final ValidationResult result = secretsExtensionV1.validateSecretsConfig(PLUGIN_ID, Map.of("username", "some_name"));
assertThat(result.isSuccessful()).isFalse();
assertThat(result.getErrors()).contains(new ValidationError("Url", "Vault Url cannot be blank."), new ValidationError("Path", "Path cannot be blank."));
assertExtensionRequest(REQUEST_VALIDATE_SECRETS_CONFIG, "{\"username\":\"some_name\"}");
}
|
@Override
public void createFolder() throws FileSystemException {
requireResolvedFileObject().createFolder();
}
|
@Test
public void testDelegatesCreateFolder() throws FileSystemException {
fileObject.createFolder();
verify( resolvedFileObject, times( 1 ) ).createFolder();
}
|
public static JsonToRowWithErrFn withExceptionReporting(Schema rowSchema) {
return JsonToRowWithErrFn.forSchema(rowSchema);
}
|
@Test
@Category(NeedsRunner.class)
public void testParsesErrorWithErrorMsgRowsDeadLetter() throws Exception {
PCollection<String> jsonPersons =
pipeline.apply("jsonPersons", Create.of(JSON_PERSON_WITH_ERR));
ParseResult results =
jsonPersons.apply(JsonToRow.withExceptionReporting(PERSON_SCHEMA).withExtendedErrorInfo());
PCollection<Row> personRows = results.getResults();
PCollection<Row> errorsWithMsg = results.getFailedToParseLines();
PAssert.that(personRows).containsInAnyOrder(PERSON_ROWS);
PAssert.that(errorsWithMsg)
.containsInAnyOrder(
row(
JsonToRowWithErrFn.ERROR_ROW_WITH_ERR_MSG_SCHEMA,
"{}",
"Non-nullable field 'name' is not present in the JSON object."),
row(
JsonToRowWithErrFn.ERROR_ROW_WITH_ERR_MSG_SCHEMA,
"Is it 42?",
"Unable to parse Row"));
pipeline.run();
}
|
public static void setOutput(Job job, OutputJobInfo outputJobInfo) throws IOException {
setOutput(job.getConfiguration(), job.getCredentials(), outputJobInfo);
}
|
@Test
public void testGetTableSchema() throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, "test getTableSchema");
HCatOutputFormat.setOutput(
job,
OutputJobInfo.create(
dbName,
tblName,
new HashMap<String, String>() {{put("colname", "col_value");}}
)
);
HCatSchema rowSchema = HCatOutputFormat.getTableSchema(job.getConfiguration());
assertEquals("Row-schema should have exactly one column.",
1, rowSchema.getFields().size());
assertEquals("Row-schema must contain the data column.",
"data_column", rowSchema.getFields().get(0).getName());
assertEquals("Data column should have been STRING type.",
serdeConstants.STRING_TYPE_NAME, rowSchema.getFields().get(0).getTypeString());
HCatSchema tableSchema = HCatOutputFormat.getTableSchemaWithPartitionColumns(job.getConfiguration());
assertEquals("Table-schema should have exactly 2 columns.",
2, tableSchema.getFields().size());
assertEquals("Table-schema must contain the data column.",
"data_column", tableSchema.getFields().get(0).getName());
assertEquals("Data column should have been STRING type.",
serdeConstants.STRING_TYPE_NAME, tableSchema.getFields().get(0).getTypeString());
assertEquals("Table-schema must contain the partition column.",
"colname", tableSchema.getFields().get(1).getName());
assertEquals("Partition column should have been STRING type.",
serdeConstants.STRING_TYPE_NAME, tableSchema.getFields().get(1).getTypeString());
}
|
public NewTopic configs(Map<String, String> configs) {
this.configs = configs;
return this;
}
|
@Test
public void testConfigsNotNull() {
NewTopic newTopic = new NewTopic(TEST_TOPIC, NUM_PARTITIONS, REPLICATION_FACTOR);
Map<String, String> configs = new HashMap<>();
configs.put(CLEANUP_POLICY_CONFIG_KEY, CLEANUP_POLICY_CONFIG_VALUE);
newTopic.configs(configs);
assertEquals(configs, newTopic.configs());
}
|
@Override
public Health check() {
if (isConnectedToDB()) {
return Health.GREEN;
}
return RED_HEALTH;
}
|
@Test
public void status_is_RED_with_single_cause_if_isAlive_does_not_return_1() {
when(isAliveMapper.isAlive()).thenReturn(12);
Health health = underTest.check();
verifyRedStatus(health);
}
|
@Override
public <V> MultiLabel generateOutput(V label) {
if (label instanceof Collection) {
Collection<?> c = (Collection<?>) label;
List<Pair<String,Boolean>> dimensions = new ArrayList<>();
for (Object o : c) {
dimensions.add(MultiLabel.parseElement(o.toString()));
}
return MultiLabel.createFromPairList(dimensions);
}
return MultiLabel.parseString(label.toString());
}
|
@Test
public void testGenerateOutput_set() {
MultiLabelFactory factory = new MultiLabelFactory();
MultiLabel output = factory.generateOutput(new HashSet<>(Arrays.asList("a=true", "b=true", "c=true")));
assertEquals(3, output.getLabelSet().size());
assertEquals("a,b,c", output.getLabelString());
output = factory.generateOutput(new HashSet<>(Arrays.asList("a", "b", "c")));
assertEquals(3, output.getLabelSet().size());
assertEquals("a,b,c", output.getLabelString());
}
|
public static <T, PartitionColumnT> ReadWithPartitions<T, PartitionColumnT> readWithPartitions(
TypeDescriptor<PartitionColumnT> partitioningColumnType) {
return new AutoValue_JdbcIO_ReadWithPartitions.Builder<T, PartitionColumnT>()
.setPartitionColumnType(partitioningColumnType)
.setNumPartitions(DEFAULT_NUM_PARTITIONS)
.setFetchSize(DEFAULT_FETCH_SIZE)
.setUseBeamSchema(false)
.build();
}
|
@Test
public void testIfNumPartitionsIsZero() {
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("numPartitions can not be less than 1");
pipeline.apply(
JdbcIO.<TestRow>readWithPartitions()
.withDataSourceConfiguration(DATA_SOURCE_CONFIGURATION)
.withRowMapper(new JdbcTestHelper.CreateTestRowOfNameAndId())
.withTable(READ_TABLE_NAME)
.withNumPartitions(0)
.withPartitionColumn("id")
.withLowerBound(0L)
.withUpperBound(1000L));
pipeline.run();
}
|
public static List<SessionInformations> getAllSessionsInformations() {
final Collection<HttpSession> sessions = SESSION_MAP_BY_ID.values();
final List<SessionInformations> sessionsInformations = new ArrayList<>(sessions.size());
for (final HttpSession session : sessions) {
try {
sessionsInformations.add(new SessionInformations(session, false));
} catch (final Exception e) {
// Tomcat can throw "java.lang.IllegalStateException: getLastAccessedTime: Session already invalidated"
continue;
}
}
sortSessions(sessionsInformations);
return Collections.unmodifiableList(sessionsInformations);
}
|
@Test
public void testGetAllSessionsInformations() {
final long now = System.currentTimeMillis();
sessionListener.sessionCreated(createSessionEvent("1", true, now));
sessionListener.sessionCreated(createSessionEvent("2", true, now + 2));
sessionListener.sessionCreated(createSessionEvent("3", true, now));
sessionListener.sessionCreated(createSessionEvent("4", true, now - 2));
sessionListener.sessionCreated(createSessionEvent("5", true, now));
if (SessionListener.getAllSessionsInformations().size() != 5) {
fail("getAllSessions");
}
}
|
public static RestartBackoffTimeStrategy.Factory createRestartBackoffTimeStrategyFactory(
final RestartStrategies.RestartStrategyConfiguration jobRestartStrategyConfiguration,
final Configuration jobConfiguration,
final Configuration clusterConfiguration,
final boolean isCheckpointingEnabled) {
checkNotNull(jobRestartStrategyConfiguration);
checkNotNull(jobConfiguration);
checkNotNull(clusterConfiguration);
return getJobRestartStrategyFactory(jobRestartStrategyConfiguration)
.orElse(
getRestartStrategyFactoryFromConfig(jobConfiguration)
.orElse(
(getRestartStrategyFactoryFromConfig(clusterConfiguration)
.orElse(
getDefaultRestartStrategyFactory(
isCheckpointingEnabled)))));
}
|
@Test
void testInvalidStrategySpecifiedInClusterConfig() {
final Configuration conf = new Configuration();
conf.set(RestartStrategyOptions.RESTART_STRATEGY, "invalid-strategy");
assertThatThrownBy(
() ->
RestartBackoffTimeStrategyFactoryLoader
.createRestartBackoffTimeStrategyFactory(
DEFAULT_JOB_LEVEL_RESTART_CONFIGURATION,
new Configuration(),
conf,
false))
.isInstanceOf(IllegalArgumentException.class);
}
|
@Override
protected int compareFirst(final Path p1, final Path p2) {
if((p1.isDirectory() && p2.isDirectory())
|| p1.isFile() && p2.isFile()) {
if(ascending) {
return impl.compare(descriptor.getKind(p1), descriptor.getKind(p2));
}
return -impl.compare(descriptor.getKind(p1), descriptor.getKind(p2));
}
if(p1.isFile()) {
return ascending ? 1 : -1;
}
return ascending ? -1 : 1;
}
|
@Test
public void testCompareFirst() {
assertEquals(0,
new FileTypeComparator(true).compareFirst(new Path("/a", EnumSet.of(Path.Type.file)), new Path("/a", EnumSet.of(Path.Type.file))));
assertEquals(0,
new FileTypeComparator(true).compareFirst(new Path("/a", EnumSet.of(Path.Type.directory)), new Path("/b", EnumSet.of(Path.Type.directory))));
assertEquals(1,
new FileTypeComparator(true).compareFirst(new Path("/a", EnumSet.of(Path.Type.file)), new Path("/b", EnumSet.of(Path.Type.directory))));
assertEquals(-1,
new FileTypeComparator(true).compareFirst(new Path("/a", EnumSet.of(Path.Type.directory)), new Path("/b", EnumSet.of(Path.Type.file))));
}
|
public void close() {
spillAndReleaseAllData();
spiller.close();
poolSizeChecker.shutdown();
}
|
@Test
void testResultPartitionClosed() throws Exception {
CompletableFuture<Void> resultPartitionReleaseFuture = new CompletableFuture<>();
HsSpillingStrategy spillingStrategy =
TestingSpillingStrategy.builder()
.setOnResultPartitionClosedFunction(
(ignore) -> {
resultPartitionReleaseFuture.complete(null);
return Decision.NO_ACTION;
})
.build();
HsMemoryDataManager memoryDataManager = createMemoryDataManager(spillingStrategy);
memoryDataManager.close();
assertThat(resultPartitionReleaseFuture).isCompleted();
}
|
public NamesrvConfig getNamesrvConfig() {
return namesrvConfig;
}
|
@Test
public void getNamesrvConfig() {
NamesrvConfig namesrvConfig = namesrvController.getNamesrvConfig();
Assert.assertNotNull(namesrvConfig);
}
|
public static void checkTenant(String tenant) {
if (StringUtils.isNotBlank(tenant)) {
if (!isValid(tenant.trim())) {
throw new IllegalArgumentException("invalid tenant");
}
if (tenant.length() > TENANT_MAX_LEN) {
throw new IllegalArgumentException("too long tenant, over 128");
}
}
}
|
@Test
void testCheckTenant() {
//tag invalid
String tenant = "test!";
try {
ParamUtils.checkTenant(tenant);
fail();
} catch (IllegalArgumentException e) {
System.out.println(e.toString());
}
//tag over length
int tanantMaxLen = 128;
StringBuilder tenantBuilder = new StringBuilder();
for (int i = 0; i < tanantMaxLen + 1; i++) {
tenantBuilder.append("t");
}
tenant = tenantBuilder.toString();
try {
ParamUtils.checkTenant(tenant);
fail();
} catch (IllegalArgumentException e) {
System.out.println(e.toString());
}
}
|
public static <T> int getNullIndex(final T[] array) {
for (int i = 0; i < array.length; i++) {
if (array[i] == null) {
return i;
}
}
return -1;
}
|
@Test
public void shouldGetCorrectNullIndex() {
final Double[] doubles1 = new Double[]{10.0, null, null};
final Double[] doubles2 = new Double[]{null, null, null};
final Double[] doubles3 = new Double[]{10.0, 9.0, 8.0};
assertThat(ArrayUtil.getNullIndex(doubles1), equalTo(1));
assertThat(ArrayUtil.getNullIndex(doubles2), equalTo(0));
assertThat(ArrayUtil.getNullIndex(doubles3), equalTo(-1));
}
|
@Udf(description = "Returns a masked version of the input string. All characters except for the"
+ " last n will be replaced according to the default masking rules.")
@SuppressWarnings("MethodMayBeStatic") // Invoked via reflection
public String mask(
@UdfParameter("input STRING to be masked") final String input,
@UdfParameter("number of characters to keep unmasked at the end") final int numChars
) {
return doMask(new Masker(), input, numChars);
}
|
@Test
public void shouldApplyAllExplicitTypeMasks() {
final String result = udf.mask("AbCd#$123xy Z", 5, "Q", "q", "9", "@");
assertThat(result, is("QqQq@@993xy Z"));
}
|
public static String getClientIp(ServerHttpRequest request) {
for (String header : IP_HEADER_NAMES) {
String ipList = request.getHeaders().getFirst(header);
if (StringUtils.hasText(ipList) && !UNKNOWN.equalsIgnoreCase(ipList)) {
String[] ips = ipList.trim().split("[,;]");
for (String ip : ips) {
if (StringUtils.hasText(ip) && !UNKNOWN.equalsIgnoreCase(ip)) {
return ip;
}
}
}
}
var remoteAddress = request.getRemoteAddress();
return remoteAddress == null || remoteAddress.isUnresolved()
? UNKNOWN : remoteAddress.getAddress().getHostAddress();
}
|
@Test
void testGetIPAddressFromXRealIpHeader() {
var request = MockServerHttpRequest.get("/")
.header("X-Real-IP", "127.0.0.1")
.build();
var expected = "127.0.0.1";
var actual = IpAddressUtils.getClientIp(request);
assertEquals(expected, actual);
}
|
@Override
public void publish(ScannerReportWriter writer) {
List<Map.Entry<String, String>> properties = new ArrayList<>(cache.getAll().entrySet());
properties.add(constructScmInfo());
properties.add(constructCiInfo());
// properties that are automatically included to report so that
// they can be included to webhook payloads
properties.addAll(config.getProperties().entrySet()
.stream()
.filter(e -> e.getKey().startsWith(CorePropertyDefinitions.SONAR_ANALYSIS))
.toList());
writer.writeContextProperties(properties
.stream()
.map(e -> ScannerReport.ContextProperty.newBuilder()
.setKey(e.getKey())
.setValue(e.getValue())
.build())
.toList());
}
|
@Test
public void publish_settings_prefixed_with_sonar_analysis_for_webhooks() {
props.put("foo", "should not be exported");
props.put("sonar.analysis.revision", "ab45b3");
props.put("sonar.analysis.build.number", "B123");
underTest.publish(writer);
List<ScannerReport.ContextProperty> expected = Arrays.asList(
newContextProperty("sonar.analysis.revision", "ab45b3"),
newContextProperty("sonar.analysis.build.number", "B123"),
newContextProperty("sonar.analysis.detectedscm", "undetected"),
newContextProperty("sonar.analysis.detectedci", "undetected"));
expectWritten(expected);
}
|
public Schema addToSchema(Schema schema) {
validate(schema);
schema.addProp(LOGICAL_TYPE_PROP, name);
schema.setLogicalType(this);
return schema;
}
|
@Test
void durationExtendsFixed12() {
Schema durationSchema = LogicalTypes.duration().addToSchema(Schema.createFixed("f", null, null, 12));
assertEquals(LogicalTypes.duration(), durationSchema.getLogicalType());
assertThrows("Duration requires a fixed(12)", IllegalArgumentException.class,
"Duration can only be used with an underlying fixed type of size 12.",
() -> LogicalTypes.duration().addToSchema(Schema.create(Schema.Type.INT)));
assertThrows("Duration requires a fixed(12)", IllegalArgumentException.class,
"Duration can only be used with an underlying fixed type of size 12.",
() -> LogicalTypes.duration().addToSchema(Schema.createFixed("wrong", null, null, 42)));
}
|
public SqlType getExpressionSqlType(final Expression expression) {
return getExpressionSqlType(expression, Collections.emptyMap());
}
|
@Test
public void shouldFailIfThereIsInvalidFieldNameInStructCall() {
// Given:
final Expression expression = new DereferenceExpression(
Optional.empty(),
new UnqualifiedColumnReferenceExp(ColumnName.of("COL6")),
"ZIP"
);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> expressionTypeManager.getExpressionSqlType(expression)
);
// Then:
assertThat(e.getMessage(), containsString(
"Could not find field 'ZIP' in 'COL6'."));
}
|
@Override
public Mono<LookupUsernameHashResponse> lookupUsernameHash(final LookupUsernameHashRequest request) {
if (request.getUsernameHash().size() != AccountController.USERNAME_HASH_LENGTH) {
throw Status.INVALID_ARGUMENT
.withDescription(String.format("Illegal username hash length; expected %d bytes, but got %d bytes",
AccountController.USERNAME_HASH_LENGTH, request.getUsernameHash().size()))
.asRuntimeException();
}
return RateLimitUtil.rateLimitByRemoteAddress(rateLimiters.getUsernameLookupLimiter())
.then(Mono.fromFuture(() -> accountsManager.getByUsernameHash(request.getUsernameHash().toByteArray())))
.map(maybeAccount -> maybeAccount.orElseThrow(Status.NOT_FOUND::asRuntimeException))
.map(account -> LookupUsernameHashResponse.newBuilder()
.setServiceIdentifier(ServiceIdentifierUtil.toGrpcServiceIdentifier(new AciServiceIdentifier(account.getUuid())))
.build());
}
|
@Test
void lookupUsernameHashRateLimited() {
final Duration retryAfter = Duration.ofSeconds(13);
when(rateLimiter.validateReactive(anyString()))
.thenReturn(Mono.error(new RateLimitExceededException(retryAfter)));
//noinspection ResultOfMethodCallIgnored
GrpcTestUtils.assertRateLimitExceeded(retryAfter,
() -> unauthenticatedServiceStub().lookupUsernameHash(LookupUsernameHashRequest.newBuilder()
.setUsernameHash(ByteString.copyFrom(new byte[AccountController.USERNAME_HASH_LENGTH]))
.build()),
accountsManager);
}
|
@Override
public String resolve(Method method, Object[] arguments, String spelExpression) {
if (StringUtils.isEmpty(spelExpression)) {
return spelExpression;
}
if (spelExpression.matches(PLACEHOLDER_SPEL_REGEX) && stringValueResolver != null) {
return stringValueResolver.resolveStringValue(spelExpression);
}
if (spelExpression.matches(METHOD_SPEL_REGEX)) {
SpelRootObject rootObject = new SpelRootObject(method, arguments);
MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer);
Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext);
return (String) evaluated;
}
if (spelExpression.matches(BEAN_SPEL_REGEX)) {
SpelRootObject rootObject = new SpelRootObject(method, arguments);
MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer);
evaluationContext.setBeanResolver(new BeanFactoryResolver(this.beanFactory));
Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext);
return (String) evaluated;
}
return spelExpression;
}
|
@Test
public void beanMethodSpelTest() throws Exception {
String testExpression = "@dummySpelBean.getBulkheadName(#parameter)";
String testMethodArg = "argg";
String bulkheadName = "sgt. bulko";
DefaultSpelResolverTest target = new DefaultSpelResolverTest();
Method testMethod = target.getClass().getMethod("testMethod", String.class);
given(dummySpelBean.getBulkheadName(testMethodArg)).willReturn(bulkheadName);
String result = sut.resolve(testMethod, new Object[]{testMethodArg}, testExpression);
then(dummySpelBean).should(times(1)).getBulkheadName(testMethodArg);
assertThat(result).isEqualTo(bulkheadName);
}
|
@POST
@Path("run/{noteId}/{paragraphId}")
@ZeppelinApi
public Response runParagraphSynchronously(@PathParam("noteId") String noteId,
@PathParam("paragraphId") String paragraphId,
@QueryParam("sessionId") String sessionId,
String message)
throws IOException, IllegalArgumentException {
LOGGER.info("Run paragraph synchronously {} {} {}", noteId, paragraphId, message);
return notebook.processNote(noteId,
note -> {
checkIfNoteIsNotNull(note, noteId);
Paragraph paragraph = note.getParagraph(paragraphId);
checkIfParagraphIsNotNull(paragraph, paragraphId);
Map<String, Object> params = new HashMap<>();
if (!StringUtils.isEmpty(message)) {
ParametersRequest request = GSON.fromJson(message, ParametersRequest.class);
params = request.getParams();
}
if (notebookService.runParagraph(note, paragraphId, paragraph.getTitle(),
paragraph.getText(), params,
new HashMap<>(), sessionId, false, true, getServiceContext(), new RestServiceCallback<>())) {
return notebookService.getNote(noteId, getServiceContext(), new RestServiceCallback<>(),
noteRun -> {
Paragraph p = noteRun.getParagraph(paragraphId);
InterpreterResult result = p.getReturn();
return new JsonResponse<>(Status.OK, result).build();
});
} else {
return new JsonResponse<>(Status.INTERNAL_SERVER_ERROR, "Fail to run paragraph").build();
}
});
}
|
@Test
void testRunParagraphSynchronously() throws IOException {
LOG.info("Running testRunParagraphSynchronously");
String note1Id = null;
try {
note1Id = notebook.createNote("note1", anonymous);
Paragraph p = notebook.processNote(note1Id,
note1 -> {
return note1.addNewParagraph(AuthenticationInfo.ANONYMOUS);
});
// run non-blank paragraph
String title = "title";
String text = "%sh\n sleep 1";
p.setTitle(title);
p.setText(text);
CloseableHttpResponse post = httpPost("/notebook/run/" + note1Id + "/" + p.getId(), "");
assertThat(post, isAllowed());
Map<String, Object> resp = gson.fromJson(EntityUtils.toString(post.getEntity(), StandardCharsets.UTF_8),
new TypeToken<Map<String, Object>>() {}.getType());
assertEquals("OK", resp.get("status"));
post.close();
assertNotEquals(Job.Status.READY, p.getStatus());
// Check if the paragraph is emptied
assertEquals(title, p.getTitle());
assertEquals(text, p.getText());
// run invalid code
text = "%sh\n invalid_cmd";
p.setTitle(title);
p.setText(text);
post = httpPost("/notebook/run/" + note1Id + "/" + p.getId(), "");
assertEquals(200, post.getStatusLine().getStatusCode());
resp = gson.fromJson(EntityUtils.toString(post.getEntity(), StandardCharsets.UTF_8),
new TypeToken<Map<String, Object>>() {}.getType());
assertEquals("OK", resp.get("status"));
Map stringMap = (Map) resp.get("body");
assertEquals("ERROR", stringMap.get("code"));
List<Map> interpreterResults = (List<Map>) stringMap.get("msg");
assertTrue(interpreterResults.get(0).get("data").toString()
.contains("invalid_cmd: "), interpreterResults.get(0).toString());
post.close();
assertNotEquals(Job.Status.READY, p.getStatus());
// Check if the paragraph is emptied
assertEquals(title, p.getTitle());
assertEquals(text, p.getText());
} finally {
// cleanup
if (null != note1Id) {
notebook.removeNote(note1Id, anonymous);
}
}
}
|
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) {
CharStream input = CharStreams.fromString(source);
FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input );
CommonTokenStream tokens = new CommonTokenStream( lexer );
FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens );
ParserHelper parserHelper = new ParserHelper(eventsManager);
additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol()));
parser.setHelper(parserHelper);
parser.setErrorHandler( new FEELErrorHandler() );
parser.removeErrorListeners(); // removes the error listener that prints to the console
parser.addErrorListener( new FEELParserErrorListener( eventsManager ) );
// pre-loads the parser with symbols
defineVariables( inputVariableTypes, inputVariables, parser );
if (typeRegistry != null) {
parserHelper.setTypeRegistry(typeRegistry);
}
return parser;
}
|
@Test
void power4() {
String inputExpression = "y ** ( 5 * 3 )";
BaseNode infix = parse( inputExpression, mapOf(entry("y", BuiltInType.NUMBER)) );
assertThat( infix).isInstanceOf(InfixOpNode.class);
assertThat( infix.getResultType()).isEqualTo(BuiltInType.NUMBER);
assertThat( infix.getText()).isEqualTo(inputExpression);
InfixOpNode exp = (InfixOpNode) infix;
assertThat( exp.getLeft()).isInstanceOf(NameRefNode.class);
assertThat( exp.getLeft().getText()).isEqualTo("y");
assertThat( exp.getOperator()).isEqualTo(InfixOperator.POW);
assertThat( exp.getRight()).isInstanceOf(InfixOpNode.class);
assertThat( exp.getRight().getText()).isEqualTo( "5 * 3");
InfixOpNode mult = (InfixOpNode) exp.getRight();
assertThat( mult.getLeft()).isInstanceOf(NumberNode.class);
assertThat( mult.getLeft().getText()).isEqualTo("5");
assertThat( mult.getOperator()).isEqualTo(InfixOperator.MULT);
assertThat( mult.getRight()).isInstanceOf(NumberNode.class);
assertThat( mult.getRight().getText()).isEqualTo("3");
}
|
public Map<String, String> getProperties() {
return properties;
}
|
@Test
public void testInlinePropertiesUDF() throws Exception {
String createFunctionSql = "CREATE FUNCTION get_typeb(INT) RETURNS \n" +
"STRING\n" +
" type = 'Python'\n" +
" symbol = 'echo'\n" +
"AS \n" +
"$$ \n" +
"def echo(x):\n" +
" return str(type(x)) \n" +
"$$;";
CreateFunctionStmt stmt = (CreateFunctionStmt) com.starrocks.sql.parser.SqlParser.parse(
createFunctionSql, 32).get(0);
Assert.assertEquals("Python", stmt.getProperties().get("type"));
Assert.assertEquals("echo", stmt.getProperties().get("symbol"));
createFunctionSql = "CREATE FUNCTION get_type(INT) RETURNS\n" +
"STRING\n" +
" type = 'Python'\n" +
" symbol = 'echo'\n" +
" file = 'http://localhost:8000/echo.py.zip';";
stmt = (CreateFunctionStmt) com.starrocks.sql.parser.SqlParser.parse(
createFunctionSql, 32).get(0);
Assert.assertEquals(stmt.getProperties().get("file"), "http://localhost:8000/echo.py.zip");
}
|
public static SessionBytesStoreSupplier persistentSessionStore(final String name,
final Duration retentionPeriod) {
Objects.requireNonNull(name, "name cannot be null");
final String msgPrefix = prepareMillisCheckFailMsgPrefix(retentionPeriod, "retentionPeriod");
final long retentionPeriodMs = validateMillisecondDuration(retentionPeriod, msgPrefix);
if (retentionPeriodMs < 0) {
throw new IllegalArgumentException("retentionPeriod cannot be negative");
}
return new RocksDbSessionBytesStoreSupplier(name, retentionPeriodMs);
}
|
@Test
public void shouldThrowIfIPersistentSessionStoreStoreNameIsNull() {
final Exception e = assertThrows(NullPointerException.class, () -> Stores.persistentSessionStore(null, ofMillis(0)));
assertEquals("name cannot be null", e.getMessage());
}
|
@Override
public void setDefaultHandler(final Application application, final List<String> schemes) {
final WorkspaceSchemeHandlerProxy proxy = WorkspaceSchemeHandlerProxy.create();
for(String scheme : schemes) {
final String path = workspace.absolutePathForAppBundleWithIdentifier(application.getIdentifier());
if(null != path) {
final CountDownLatch lock = new CountDownLatch(1);
final Proxy callback = new Proxy(new WorkspaceSchemeHandlerProxy.CompletionHandler() {
@Override
public void didFinishWithError(final NSError error) {
if(error != null) {
log.warn(String.format("Setting scheme handler returned with error %s", error));
}
lock.countDown();
}
});
proxy.setDefaultHandler(NSURL.fileURLWithPath(path), scheme, callback.id());
if(log.isInfoEnabled()) {
log.info(String.format("Await result from %s", proxy));
}
Uninterruptibles.awaitUninterruptibly(lock);
}
}
}
|
@Test
@Ignore
public void testSetDefaultHandler() {
assumeTrue(Factory.Platform.osversion.matches("12\\..*"));
final Application application = new Application("com.apple.finder");
final WorkspaceSchemeHandler handler = new WorkspaceSchemeHandler(new LaunchServicesApplicationFinder());
final String scheme = new AlphanumericRandomStringService().random();
handler.setDefaultHandler(application, Collections.singletonList(scheme));
assertTrue(handler.getAllHandlers(scheme).contains(application));
assertEquals(application, handler.getDefaultHandler(scheme));
}
|
@VisibleForTesting
static Comparator<ActualProperties> streamingExecutionPreference(PreferredProperties preferred)
{
// Calculating the matches can be a bit expensive, so cache the results between comparisons
LoadingCache<List<LocalProperty<VariableReferenceExpression>>, List<Optional<LocalProperty<VariableReferenceExpression>>>> matchCache = CacheBuilder.newBuilder()
.build(CacheLoader.from(actualProperties -> LocalProperties.match(actualProperties, preferred.getLocalProperties())));
return (actual1, actual2) -> {
List<Optional<LocalProperty<VariableReferenceExpression>>> matchLayout1 = matchCache.getUnchecked(actual1.getLocalProperties());
List<Optional<LocalProperty<VariableReferenceExpression>>> matchLayout2 = matchCache.getUnchecked(actual2.getLocalProperties());
return ComparisonChain.start()
.compareTrueFirst(hasLocalOptimization(preferred.getLocalProperties(), matchLayout1), hasLocalOptimization(preferred.getLocalProperties(), matchLayout2))
.compareTrueFirst(meetsPartitioningRequirements(preferred, actual1), meetsPartitioningRequirements(preferred, actual2))
.compare(matchLayout1, matchLayout2, matchedLayoutPreference())
.result();
};
}
|
@Test
public void testPickLayoutAnyPreference()
{
Comparator<ActualProperties> preference = streamingExecutionPreference(PreferredProperties.any());
List<ActualProperties> input = ImmutableList.<ActualProperties>builder()
.add(builder()
.global(streamPartitionedOn("a", "b"))
.build())
.add(builder()
.global(singleStreamPartition())
.build())
.add(builder()
.global(arbitraryPartition())
.local(ImmutableList.of(grouped("a", "b")))
.build())
.add(builder()
.global(arbitraryPartition())
.build())
.add(builder()
.global(hashDistributedOn("a"))
.build())
.add(builder()
.global(singleStream())
.local(ImmutableList.of(constant("a"), sorted("b", ASC_NULLS_FIRST)))
.build())
.add(builder()
.global(singleStreamPartition())
.local(ImmutableList.of(sorted("a", ASC_NULLS_FIRST)))
.build())
.build();
// Given no preferences, the original input order should be maintained
assertEquals(stableSort(input, preference), input);
}
|
@Override
public <T> ResponseFuture<T> sendRequest(Request<T> request, RequestContext requestContext)
{
doEvaluateDisruptContext(request, requestContext);
return _client.sendRequest(request, requestContext);
}
|
@Test
public void testSendRequest11()
{
when(_controller.getDisruptContext(any(String.class))).thenReturn(_disrupt);
_client.sendRequest(_multiplexed);
verify(_underlying, times(1)).sendRequest(eq(_multiplexed), any(RequestContext.class), any(Callback.class));;
}
|
@Udf
public List<String> keys(@UdfParameter final String jsonObj) {
if (jsonObj == null) {
return null;
}
final JsonNode node = UdfJsonMapper.parseJson(jsonObj);
if (node.isMissingNode() || !node.isObject()) {
return null;
}
final List<String> ret = new ArrayList<>();
node.fieldNames().forEachRemaining(ret::add);
return ret;
}
|
@Test
public void shouldReturnNullForString() {
assertNull(udf.keys("\"abc\""));
}
|
public static void negate(Slice decimal)
{
setNegative(decimal, !isNegative(decimal));
}
|
@Test
public void testUnscaledBigIntegerToDecimal()
{
assertConvertsUnscaledBigIntegerToDecimal(MAX_DECIMAL_UNSCALED_VALUE);
assertConvertsUnscaledBigIntegerToDecimal(MIN_DECIMAL_UNSCALED_VALUE);
assertConvertsUnscaledBigIntegerToDecimal(BigInteger.ZERO);
assertConvertsUnscaledBigIntegerToDecimal(BigInteger.ONE);
assertConvertsUnscaledBigIntegerToDecimal(BigInteger.ONE.negate());
}
|
public FEELFnResult<Boolean> invoke(@ParameterName( "point" ) Comparable point, @ParameterName( "range" ) Range range) {
if ( point == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be null"));
}
if ( range == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range", "cannot be null"));
}
try {
boolean result = ( range.getLowBoundary() == Range.RangeBoundary.CLOSED && point.compareTo( range.getLowEndPoint() ) == 0 );
return FEELFnResult.ofResult( result );
} catch( Exception e ) {
// points are not comparable
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be compared to range"));
}
}
|
@Test
void invokeParamRangeAndRange() {
FunctionTestUtil.assertResult( startsFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ) ),
Boolean.TRUE );
FunctionTestUtil.assertResult( startsFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "k", Range.RangeBoundary.CLOSED ) ),
Boolean.TRUE );
FunctionTestUtil.assertResult( startsFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.CLOSED, "f", "k", Range.RangeBoundary.CLOSED ) ),
Boolean.FALSE );
FunctionTestUtil.assertResult( startsFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.OPEN, "a", "k", Range.RangeBoundary.CLOSED ) ),
Boolean.FALSE );
}
|
public synchronized void add(T node) {
add(node, defaultReplication);
}
|
@Test
public void usesCustomHash() {
final RuntimeException exception = new RuntimeException();
ConsistentHash.Hash<String> hashFunction = str -> {
throw exception;
};
ConsistentHash<String> hash = new ConsistentHash<>(hashFunction);
final RuntimeException e = assertThrows(RuntimeException.class, () -> hash.add("foo"));
assertSame(exception, e);
}
|
@Secured(resource = Commons.NACOS_CORE_CONTEXT_V2 + "/loader", action = ActionTypes.READ)
@GetMapping("/cluster")
public ResponseEntity<Map<String, Object>> loaderMetrics() {
Map<String, Object> serverLoadMetrics = getServerLoadMetrics();
return ResponseEntity.ok().body(serverLoadMetrics);
}
|
@Test
void testLoaderMetrics() throws NacosException {
EnvUtil.setEnvironment(new MockEnvironment());
Member member = new Member();
member.setIp("1.1.1.1");
member.setPort(8848);
ServerAbilities serverAbilities = new ServerAbilities();
ServerRemoteAbility serverRemoteAbility = new ServerRemoteAbility();
serverRemoteAbility.setSupportRemoteConnection(true);
serverAbilities.setRemoteAbility(serverRemoteAbility);
member.setAbilities(serverAbilities);
Mockito.when(serverMemberManager.allMembersWithoutSelf()).thenReturn(Collections.singletonList(member));
Map<String, String> metrics = new HashMap<>();
metrics.put("conCount", "1");
ServerLoaderInfoResponse serverLoaderInfoResponse = new ServerLoaderInfoResponse();
serverLoaderInfoResponse.setLoaderMetrics(metrics);
Mockito.when(serverLoaderInfoRequestHandler.handle(Mockito.any(), Mockito.any())).thenReturn(serverLoaderInfoResponse);
Mockito.when(serverMemberManager.getSelf()).thenReturn(member);
ResponseEntity<Map<String, Object>> result = serverLoaderController.loaderMetrics();
assertEquals(9, result.getBody().size());
}
|
public static ColumnIndex build(
PrimitiveType type,
BoundaryOrder boundaryOrder,
List<Boolean> nullPages,
List<Long> nullCounts,
List<ByteBuffer> minValues,
List<ByteBuffer> maxValues) {
return build(type, boundaryOrder, nullPages, nullCounts, minValues, maxValues, null, null);
}
|
@Test
public void testStaticBuildInt64() {
ColumnIndex columnIndex = ColumnIndexBuilder.build(
Types.required(INT64).named("test_int64"),
BoundaryOrder.UNORDERED,
asList(true, false, true, false, true, false),
asList(1l, 2l, 3l, 4l, 5l, 6l),
toBBList(null, 2l, null, 4l, null, 9l),
toBBList(null, 3l, null, 15l, null, 10l));
assertEquals(BoundaryOrder.UNORDERED, columnIndex.getBoundaryOrder());
assertCorrectNullCounts(columnIndex, 1, 2, 3, 4, 5, 6);
assertCorrectNullPages(columnIndex, true, false, true, false, true, false);
assertCorrectValues(columnIndex.getMaxValues(), null, 3l, null, 15l, null, 10l);
assertCorrectValues(columnIndex.getMinValues(), null, 2l, null, 4l, null, 9l);
}
|
public static byte[] serializeToByteArray(Serializable value) {
try {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
try (ObjectOutputStream oos = new ObjectOutputStream(new SnappyOutputStream(buffer))) {
oos.writeObject(value);
}
return buffer.toByteArray();
} catch (IOException exn) {
throw new IllegalArgumentException("unable to serialize " + value, exn);
}
}
|
@Test
public void testSerializationError() {
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("unable to serialize");
SerializableUtils.serializeToByteArray(new UnserializableByJava());
}
|
@Override
public String format(final Schema schema) {
final String converted = SchemaWalker.visit(schema, new Converter()) + typePostFix(schema);
return options.contains(Option.AS_COLUMN_LIST)
? stripTopLevelStruct(converted)
: converted;
}
|
@Test
public void shouldFormatOptionalStruct() {
// Given:
final Schema structSchema = SchemaBuilder.struct()
.field("COL1", Schema.OPTIONAL_STRING_SCHEMA)
.field("COL4", SchemaBuilder
.array(Schema.OPTIONAL_FLOAT64_SCHEMA)
.optional()
.build())
.field("COL5", SchemaBuilder
.map(Schema.OPTIONAL_STRING_SCHEMA, Schema.OPTIONAL_FLOAT64_SCHEMA)
.optional()
.build())
.optional()
.build();
// Then:
assertThat(DEFAULT.format(structSchema), is(
"STRUCT<"
+ "COL1 VARCHAR, "
+ "COL4 ARRAY<DOUBLE>, "
+ "COL5 MAP<VARCHAR, DOUBLE>"
+ ">"));
assertThat(STRICT.format(structSchema), is(
"STRUCT<"
+ "COL1 VARCHAR, "
+ "COL4 ARRAY<DOUBLE>, "
+ "COL5 MAP<VARCHAR, DOUBLE>"
+ ">"));
}
|
public Random() {
real = new UniversalGenerator();
twister = new MersenneTwister();
}
|
@Test
public void testRandom() {
System.out.println("random");
smile.math.Random instance = new Random(System.currentTimeMillis());
for (int i = 0; i < 1000000; i++) {
double result = instance.nextDouble();
assertTrue(result >= 0.0);
assertTrue(result < 1.0);
}
}
|
void handleFinish(HttpResponse response, Span span) {
if (response == null) throw new NullPointerException("response == null");
if (span == null) throw new NullPointerException("span == null");
if (span.isNoop()) return;
if (response.error() != null) {
span.error(response.error()); // Ensures MutableSpan.error() for SpanHandler
}
try {
parseResponse(response, span);
} catch (Throwable t) {
propagateIfFatal(t);
Platform.get().log("error parsing response {0}", response, t);
} finally {
long finishTimestamp = response.finishTimestamp();
if (finishTimestamp == 0L) {
span.finish();
} else {
span.finish(finishTimestamp);
}
}
}
|
@Test void handleFinish_parsesTagsWithCustomizer() {
when(span.customizer()).thenReturn(spanCustomizer);
handler.handleFinish(response, span);
verify(responseParser).parse(response, context, spanCustomizer);
}
|
@Override
public PlanNode optimize(
PlanNode maxSubplan,
ConnectorSession session,
VariableAllocator variableAllocator,
PlanNodeIdAllocator idAllocator)
{
return rewriteWith(new Rewriter(session, idAllocator), maxSubplan);
}
|
@Test
public void testJdbcComputePushdownBooleanOperations()
{
String table = "test_table";
String schema = "test_schema";
String expression = "(((c1 + c2) - c2 <> c2) OR c2 = c1) AND c1 <> c2";
TypeProvider typeProvider = TypeProvider.copyOf(ImmutableMap.of("c1", BIGINT, "c2", BIGINT));
RowExpression rowExpression = sqlToRowExpressionTranslator.translateAndOptimize(expression(expression), typeProvider);
Set<ColumnHandle> columns = Stream.of("c1", "c2").map(TestJdbcComputePushdown::integerJdbcColumnHandle).collect(Collectors.toSet());
PlanNode original = filter(jdbcTableScan(schema, table, BIGINT, "c1", "c2"), rowExpression);
JdbcTableHandle jdbcTableHandle = new JdbcTableHandle(CONNECTOR_ID, new SchemaTableName(schema, table), CATALOG_NAME, schema, table);
ConnectorSession session = new TestingConnectorSession(ImmutableList.of());
JdbcTableLayoutHandle jdbcTableLayoutHandle = new JdbcTableLayoutHandle(
session.getSqlFunctionProperties(),
jdbcTableHandle,
TupleDomain.none(),
Optional.of(new JdbcExpression("((((((('c1' + 'c2') - 'c2') <> 'c2')) OR (('c2' = 'c1')))) AND (('c1' <> 'c2')))")));
PlanNode actual = this.jdbcComputePushdown.optimize(original, session, null, ID_ALLOCATOR);
assertPlanMatch(
actual,
PlanMatchPattern.filter(expression, JdbcTableScanMatcher.jdbcTableScanPattern(jdbcTableLayoutHandle, columns)));
}
|
@Override
@CheckForNull
public EmailMessage format(Notification notification) {
if (!"alerts".equals(notification.getType())) {
return null;
}
// Retrieve useful values
String projectId = notification.getFieldValue("projectId");
String projectKey = notification.getFieldValue("projectKey");
String projectName = notification.getFieldValue("projectName");
String projectVersion = notification.getFieldValue("projectVersion");
String branchName = notification.getFieldValue("branch");
String alertName = notification.getFieldValue("alertName");
String alertText = notification.getFieldValue("alertText");
String alertLevel = notification.getFieldValue("alertLevel");
String ratingMetricsInOneString = notification.getFieldValue("ratingMetrics");
boolean isNewAlert = Boolean.parseBoolean(notification.getFieldValue("isNewAlert"));
String fullProjectName = computeFullProjectName(projectName, branchName);
// Generate text
String subject = generateSubject(fullProjectName, alertLevel, isNewAlert);
String messageBody = generateMessageBody(projectName, projectKey, projectVersion, branchName, alertName, alertText, isNewAlert, ratingMetricsInOneString);
// And finally return the email that will be sent
return new EmailMessage()
.setMessageId("alerts/" + projectId)
.setSubject(subject)
.setPlainTextMessage(messageBody);
}
|
@UseDataProvider("alertTextAndFormattedText")
@Test
public void shouldFormatNewAlertWithThresholdProperlyFormatted(String alertText, String expectedFormattedAlertText) {
Notification notification = createNotification("Failed", alertText, "ERROR", "true");
EmailMessage message = template.format(notification);
assertThat(message.getMessageId(), is("alerts/45"));
assertThat(message.getSubject(), is("New quality gate threshold reached on \"Foo\""));
assertThat(message.getMessage(), is("" +
"Project: Foo\n" +
"Version: V1-SNAP\n" +
"Quality gate status: Failed\n" +
"\n" +
"New quality gate threshold: " + expectedFormattedAlertText + "\n" +
"\n" +
"More details at: http://nemo.sonarsource.org/dashboard?id=org.sonar.foo:foo"));
}
|
@Override
public Token redeem(@NonNull String code, String redirectUri, String clientId) {
var redeemed = codeRepo.remove(code).orElse(null);
if (redeemed == null) {
return null;
}
if (!validateCode(redeemed, redirectUri, clientId)) {
return null;
}
var accessTokenTtl = Duration.ofMinutes(5);
return new Token(
issueAccessToken(accessTokenTtl, redeemed.clientId()),
issueIdToken(redeemed.clientId(), redeemed.nonce(), redeemed.federatedIdToken()),
accessTokenTtl.getSeconds());
}
|
@Test
void redeem_twice() throws JOSEException {
var issuer = URI.create("https://idp.example.com");
var k = genKey();
var keyStore = mock(KeyStore.class);
when(keyStore.signingKey()).thenReturn(k);
var codeRepo = mock(CodeRepo.class);
var sut = new TokenIssuerImpl(issuer, keyStore, codeRepo);
var redirectUri = URI.create("https://myapp.example.com");
var clientId = "myapp";
var federatedIdToken =
new IdTokenJWS(
null,
new IdToken(
null, "tobias", null, 0, 0, 0, null, null, null, null, null, null, null, null, null,
null, null, null, null));
var id = UUID.randomUUID().toString();
var code =
new Code(
id, null, Instant.now().plusSeconds(10), redirectUri, null, clientId, federatedIdToken);
when(codeRepo.remove(id)).thenReturn(Optional.of(code), Optional.empty());
// when
var t1 = sut.redeem(id, redirectUri.toString(), clientId);
var t2 = sut.redeem(id, redirectUri.toString(), clientId);
// then
assertNotNull(t1);
assertNull(t2);
}
|
public long getWorkerTabletNum(String workerIpPort) {
try {
WorkerInfo workerInfo = client.getWorkerInfo(serviceId, workerIpPort);
return workerInfo.getTabletNum();
} catch (StarClientException e) {
LOG.info("Failed to get worker tablet num from starMgr, Error: {}.", e.getMessage());
}
return 0;
}
|
@Test
public void testGetWorkerTabletNumExcepted() throws StarClientException {
String serviceId = "1";
String workerIpPort = "127.0.0.1:8093";
Deencapsulation.setField(starosAgent, "serviceId", serviceId);
new Expectations() {
{
client.getWorkerInfo(serviceId, anyString);
result = new StarClientException(
StarStatus.newBuilder().setStatusCode(StatusCode.INTERNAL).setErrorMsg("injected error")
.build());
minTimes = 1;
}
};
ExceptionChecker.expectThrowsNoException(() -> {
// no exception at all, return 0 instead
long tabletNum = starosAgent.getWorkerTabletNum(workerIpPort);
Assert.assertEquals(0, tabletNum);
});
}
|
public static boolean parse(final String str, ResTable_config out) {
return parse(str, out, true);
}
|
@Test
public void parse_density_mdpi() {
ResTable_config config = new ResTable_config();
ConfigDescription.parse("mdpi", config);
assertThat(config.density).isEqualTo(DENSITY_MEDIUM);
}
|
static Type inferIcebergType(Object value, IcebergSinkConfig config) {
return new SchemaGenerator(config).inferIcebergType(value);
}
|
@Test
public void testInferIcebergTypeEmpty() {
IcebergSinkConfig config = mock(IcebergSinkConfig.class);
// skip infer for null
assertThat(SchemaUtils.inferIcebergType(null, config)).isNull();
// skip infer for empty list
assertThat(SchemaUtils.inferIcebergType(ImmutableList.of(), config)).isNull();
// skip infer for list if first element is null
List<?> list = Lists.newArrayList();
list.add(null);
assertThat(SchemaUtils.inferIcebergType(list, config)).isNull();
// skip infer for list if first element is an empty object
assertThat(SchemaUtils.inferIcebergType(ImmutableList.of(ImmutableMap.of()), config)).isNull();
// skip infer for empty object
assertThat(SchemaUtils.inferIcebergType(ImmutableMap.of(), config)).isNull();
// skip infer for object if values are null
Map<String, ?> map = Maps.newHashMap();
map.put("col", null);
assertThat(SchemaUtils.inferIcebergType(map, config)).isNull();
// skip infer for object if values are empty objects
assertThat(SchemaUtils.inferIcebergType(ImmutableMap.of("nested", ImmutableMap.of()), config))
.isNull();
}
|
@Override
public void onWorkflowFinalized(Workflow workflow) {
WorkflowSummary summary = StepHelper.retrieveWorkflowSummary(objectMapper, workflow.getInput());
WorkflowRuntimeSummary runtimeSummary = retrieveWorkflowRuntimeSummary(workflow);
String reason = workflow.getReasonForIncompletion();
LOG.info(
"Workflow {} with execution_id [{}] is finalized with internal state [{}] and reason [{}]",
summary.getIdentity(),
workflow.getWorkflowId(),
workflow.getStatus(),
reason);
metrics.counter(
MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC,
getClass(),
TYPE_TAG,
"onWorkflowFinalized",
MetricConstants.STATUS_TAG,
workflow.getStatus().name());
if (reason != null
&& workflow.getStatus() == Workflow.WorkflowStatus.FAILED
&& reason.startsWith(MaestroStartTask.DEDUP_FAILURE_PREFIX)) {
LOG.info(
"Workflow {} with execution_id [{}] has not actually started, thus skip onWorkflowFinalized.",
summary.getIdentity(),
workflow.getWorkflowId());
return; // special case doing nothing
}
WorkflowInstance.Status instanceStatus =
instanceDao.getWorkflowInstanceStatus(
summary.getWorkflowId(), summary.getWorkflowInstanceId(), summary.getWorkflowRunId());
if (instanceStatus == null
|| (instanceStatus.isTerminal() && workflow.getStatus().isTerminal())) {
LOG.info(
"Workflow {} with execution_id [{}] does not exist or already "
+ "in a terminal state [{}] with internal state [{}], thus skip onWorkflowFinalized.",
summary.getIdentity(),
workflow.getWorkflowId(),
instanceStatus,
workflow.getStatus());
return;
}
Map<String, Task> realTaskMap = TaskHelper.getUserDefinedRealTaskMap(workflow);
// cancel internally failed tasks
realTaskMap.values().stream()
.filter(task -> !StepHelper.retrieveStepStatus(task.getOutputData()).isTerminal())
.forEach(task -> maestroTask.cancel(workflow, task, null));
WorkflowRuntimeOverview overview =
TaskHelper.computeOverview(
objectMapper, summary, runtimeSummary.getRollupBase(), realTaskMap);
try {
validateAndUpdateOverview(overview, summary);
switch (workflow.getStatus()) {
case TERMINATED: // stopped due to stop request
if (reason != null && reason.startsWith(FAILURE_REASON_PREFIX)) {
update(workflow, WorkflowInstance.Status.FAILED, summary, overview);
} else {
update(workflow, WorkflowInstance.Status.STOPPED, summary, overview);
}
break;
case TIMED_OUT:
update(workflow, WorkflowInstance.Status.TIMED_OUT, summary, overview);
break;
default: // other status (FAILED, COMPLETED, PAUSED, RUNNING) to be handled here.
Optional<Task.Status> done =
TaskHelper.checkProgress(realTaskMap, summary, overview, true);
switch (done.orElse(Task.Status.IN_PROGRESS)) {
/**
* This is a special status to indicate that the workflow has succeeded. Check {@link
* TaskHelper#checkProgress} for more details.
*/
case FAILED_WITH_TERMINAL_ERROR:
WorkflowInstance.Status nextStatus =
AggregatedViewHelper.deriveAggregatedStatus(
instanceDao, summary, WorkflowInstance.Status.SUCCEEDED, overview);
if (!nextStatus.isTerminal()) {
throw new MaestroInternalError(
"Invalid status: [%s], expecting a terminal one", nextStatus);
}
update(workflow, nextStatus, summary, overview);
break;
case FAILED:
case CANCELED: // due to step failure
update(workflow, WorkflowInstance.Status.FAILED, summary, overview);
break;
case TIMED_OUT:
update(workflow, WorkflowInstance.Status.TIMED_OUT, summary, overview);
break;
// all other status are invalid
default:
metrics.counter(
MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC,
getClass(),
TYPE_TAG,
"invalidStatusOnWorkflowFinalized");
throw new MaestroInternalError(
"Invalid status [%s] onWorkflowFinalized", workflow.getStatus());
}
break;
}
} catch (MaestroInternalError | IllegalArgumentException e) {
// non-retryable error and still fail the instance
LOG.warn("onWorkflowFinalized is failed with a non-retryable error", e);
metrics.counter(
MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC,
getClass(),
TYPE_TAG,
"nonRetryableErrorOnWorkflowFinalized");
update(
workflow,
WorkflowInstance.Status.FAILED,
summary,
overview,
Details.create(
e.getMessage(), "onWorkflowFinalized is failed with non-retryable error."));
}
}
|
@Test
public void testWorkflowFinalizedFailed() {
StepRuntimeState state = new StepRuntimeState();
state.setStatus(StepInstance.Status.FATALLY_FAILED);
when(stepInstanceDao.getAllStepStates(any(), anyLong(), anyLong()))
.thenReturn(singletonMap("foo", state));
when(workflow.getStatus()).thenReturn(Workflow.WorkflowStatus.FAILED);
when(instanceDao.getWorkflowInstanceStatus(eq("test-workflow-id"), anyLong(), anyLong()))
.thenReturn(WorkflowInstance.Status.IN_PROGRESS);
statusListener.onWorkflowFinalized(workflow);
Assert.assertEquals(
1L,
metricRepo
.getCounter(
MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC,
MaestroWorkflowStatusListener.class,
"type",
"onWorkflowFinalized",
"status",
"FAILED")
.count());
verify(instanceDao, times(1))
.updateWorkflowInstance(any(), any(), any(), eq(WorkflowInstance.Status.FAILED), anyLong());
verify(publisher, times(1)).publishOrThrow(any(), any());
}
|
public boolean subsumedBy(BlocksGroup other, int indexCorrection) {
return subsumedBy(this, other, indexCorrection);
}
|
@Test
public void testSubsumedBy() {
BlocksGroup group1 = newBlocksGroup(newBlock("a", 1), newBlock("b", 2));
BlocksGroup group2 = newBlocksGroup(newBlock("a", 2), newBlock("b", 3), newBlock("c", 4));
// block "c" from group2 does not have corresponding block in group1
assertThat(group2.subsumedBy(group1, 1), is(false));
}
|
@Override
public Authentication getAuthentication(String token) throws AccessException {
return getExecuteTokenManager().getAuthentication(token);
}
|
@Test
void testGetAuthentication() throws AccessException {
assertNotNull(tokenManagerDelegate.getAuthentication("token"));
}
|
public AggregateAnalysisResult analyze(
final ImmutableAnalysis analysis,
final List<SelectExpression> finalProjection
) {
if (!analysis.getGroupBy().isPresent()) {
throw new IllegalArgumentException("Not an aggregate query");
}
final AggAnalyzer aggAnalyzer = new AggAnalyzer(analysis, functionRegistry);
aggAnalyzer.process(finalProjection);
return aggAnalyzer.result();
}
|
@Test
public void shouldNotCaptureWindowEndAsRequiredColumn() {
// Given:
givenWindowExpression();
givenSelectExpression(new UnqualifiedColumnReferenceExp(SystemColumns.WINDOWEND_NAME));
// When:
final AggregateAnalysisResult result = analyzer.analyze(analysis, selects);
// Then:
final List<ColumnName> requiredColumnNames = result.getRequiredColumns().stream()
.map(ColumnReferenceExp::getColumnName)
.collect(Collectors.toList());
assertThat(requiredColumnNames, not(hasItem(SystemColumns.WINDOWEND_NAME)));
}
|
public MergePolicyConfig setBatchSize(int batchSize) {
this.batchSize = checkPositive("batchSize", batchSize);
return this;
}
|
@Test(expected = IllegalArgumentException.class)
public void setBatchSize_withZero() {
config.setBatchSize(0);
}
|
public boolean unsetLine(DefaultIssue issue, IssueChangeContext context) {
Integer currentValue = issue.line();
if (currentValue != null) {
issue.setFieldChange(context, LINE, currentValue, "");
issue.setLine(null);
issue.setChanged(true);
return true;
}
return false;
}
|
@Test
void unset_line_has_no_effect_if_line_is_already_null() {
issue.setLine(null);
boolean updated = underTest.unsetLine(issue, context);
assertThat(updated).isFalse();
assertThat(issue.line()).isNull();
assertThat(issue.isChanged()).isFalse();
assertThat(issue.currentChange()).isNull();
assertThat(issue.mustSendNotifications()).isFalse();
}
|
public static void prepareFilesForStaging(FileStagingOptions options) {
List<String> filesToStage = options.getFilesToStage();
if (filesToStage == null || filesToStage.isEmpty()) {
filesToStage = detectClassPathResourcesToStage(ReflectHelpers.findClassLoader(), options);
LOG.info(
"PipelineOptions.filesToStage was not specified. "
+ "Defaulting to files from the classpath: will stage {} files. "
+ "Enable logging at DEBUG level to see which files will be staged.",
filesToStage.size());
LOG.debug("Classpath elements: {}", filesToStage);
}
final String tmpJarLocation =
MoreObjects.firstNonNull(options.getTempLocation(), System.getProperty("java.io.tmpdir"));
final List<String> resourcesToStage = prepareFilesForStaging(filesToStage, tmpJarLocation);
options.setFilesToStage(resourcesToStage);
}
|
@Test
public void testPackagingDirectoryResourceToJarFile() throws IOException {
String directoryPath = tmpFolder.newFolder().getAbsolutePath();
List<String> filesToStage = Arrays.asList(directoryPath);
String temporaryLocation = tmpFolder.newFolder().getAbsolutePath();
List<String> result = PipelineResources.prepareFilesForStaging(filesToStage, temporaryLocation);
assertTrue(new File(result.get(0)).exists());
assertTrue(result.get(0).matches(".*\\.jar"));
}
|
public boolean setResolution(DefaultIssue issue, @Nullable String resolution, IssueChangeContext context) {
if (!Objects.equals(resolution, issue.resolution())) {
issue.setFieldChange(context, RESOLUTION, issue.resolution(), resolution);
issue.setResolution(resolution);
issue.setUpdateDate(context.date());
issue.setChanged(true);
issue.setSendNotifications(true);
return true;
}
return false;
}
|
@Test
void setResolution_shouldNotTriggerFieldChange() {
boolean updated = underTest.setResolution(issue, Issue.STATUS_OPEN, context);
assertThat(updated).isTrue();
assertThat(issue.resolution()).isEqualTo(Issue.STATUS_OPEN);
FieldDiffs.Diff diff = issue.currentChange().get(IssueFieldsSetter.RESOLUTION);
assertThat(diff.oldValue()).isNull();
assertThat(diff.newValue()).isEqualTo("OPEN");
assertThat(issue.mustSendNotifications()).isTrue();
}
|
public static String getTypeName(final int type) {
switch (type) {
case START_EVENT_V3:
return "Start_v3";
case STOP_EVENT:
return "Stop";
case QUERY_EVENT:
return "Query";
case ROTATE_EVENT:
return "Rotate";
case INTVAR_EVENT:
return "Intvar";
case LOAD_EVENT:
return "Load";
case NEW_LOAD_EVENT:
return "New_load";
case SLAVE_EVENT:
return "Slave";
case CREATE_FILE_EVENT:
return "Create_file";
case APPEND_BLOCK_EVENT:
return "Append_block";
case DELETE_FILE_EVENT:
return "Delete_file";
case EXEC_LOAD_EVENT:
return "Exec_load";
case RAND_EVENT:
return "RAND";
case XID_EVENT:
return "Xid";
case USER_VAR_EVENT:
return "User var";
case FORMAT_DESCRIPTION_EVENT:
return "Format_desc";
case TABLE_MAP_EVENT:
return "Table_map";
case PRE_GA_WRITE_ROWS_EVENT:
return "Write_rows_event_old";
case PRE_GA_UPDATE_ROWS_EVENT:
return "Update_rows_event_old";
case PRE_GA_DELETE_ROWS_EVENT:
return "Delete_rows_event_old";
case WRITE_ROWS_EVENT_V1:
return "Write_rows_v1";
case UPDATE_ROWS_EVENT_V1:
return "Update_rows_v1";
case DELETE_ROWS_EVENT_V1:
return "Delete_rows_v1";
case BEGIN_LOAD_QUERY_EVENT:
return "Begin_load_query";
case EXECUTE_LOAD_QUERY_EVENT:
return "Execute_load_query";
case INCIDENT_EVENT:
return "Incident";
case HEARTBEAT_LOG_EVENT:
case HEARTBEAT_LOG_EVENT_V2:
return "Heartbeat";
case IGNORABLE_LOG_EVENT:
return "Ignorable";
case ROWS_QUERY_LOG_EVENT:
return "Rows_query";
case WRITE_ROWS_EVENT:
return "Write_rows";
case UPDATE_ROWS_EVENT:
return "Update_rows";
case DELETE_ROWS_EVENT:
return "Delete_rows";
case GTID_LOG_EVENT:
return "Gtid";
case ANONYMOUS_GTID_LOG_EVENT:
return "Anonymous_Gtid";
case PREVIOUS_GTIDS_LOG_EVENT:
return "Previous_gtids";
case PARTIAL_UPDATE_ROWS_EVENT:
return "Update_rows_partial";
case TRANSACTION_CONTEXT_EVENT :
return "Transaction_context";
case VIEW_CHANGE_EVENT :
return "view_change";
case XA_PREPARE_LOG_EVENT :
return "Xa_prepare";
case TRANSACTION_PAYLOAD_EVENT :
return "transaction_payload";
default:
return "Unknown type:" + type;
}
}
|
@Test
public void getTypeNameInputPositiveOutputNotNull29() {
// Arrange
final int type = 22;
// Act
final String actual = LogEvent.getTypeName(type);
// Assert result
Assert.assertEquals("Delete_rows_event_old", actual);
}
|
public Optional<Measure> toMeasure(@Nullable ScannerReport.Measure batchMeasure, Metric metric) {
Objects.requireNonNull(metric);
if (batchMeasure == null) {
return Optional.empty();
}
Measure.NewMeasureBuilder builder = Measure.newMeasureBuilder();
switch (metric.getType().getValueType()) {
case INT:
return toIntegerMeasure(builder, batchMeasure);
case LONG:
return toLongMeasure(builder, batchMeasure);
case DOUBLE:
return toDoubleMeasure(builder, batchMeasure);
case BOOLEAN:
return toBooleanMeasure(builder, batchMeasure);
case STRING:
return toStringMeasure(builder, batchMeasure);
case LEVEL:
return toLevelMeasure(builder, batchMeasure);
case NO_VALUE:
return toNoValueMeasure(builder);
default:
throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType());
}
}
|
@Test
public void toMeasure_returns_no_value_if_dto_has_no_value_for_Long_Metric() {
Optional<Measure> measure = underTest.toMeasure(EMPTY_BATCH_MEASURE, SOME_LONG_METRIC);
assertThat(measure).isPresent();
assertThat(measure.get().getValueType()).isEqualTo(Measure.ValueType.NO_VALUE);
}
|
@Override
public ObjectNode encode(VirtualHost vHost, CodecContext context) {
checkNotNull(vHost, NULL_OBJECT_MSG);
final JsonCodec<HostLocation> locationCodec =
context.codec(HostLocation.class);
final ObjectNode result = context.mapper().createObjectNode()
.put(NETWORK_ID, vHost.networkId().toString())
.put(HOST_ID, vHost.id().toString())
.put(MAC_ADDRESS, vHost.mac().toString())
.put(VLAN, vHost.vlan().toString());
final ArrayNode jsonIpAddresses = result.putArray(IP_ADDRESSES);
for (final IpAddress ipAddress : vHost.ipAddresses()) {
jsonIpAddresses.add(ipAddress.toString());
}
result.set(IP_ADDRESSES, jsonIpAddresses);
final ArrayNode jsonLocations = result.putArray("locations");
for (final HostLocation location : vHost.locations()) {
jsonLocations.add(locationCodec.encode(location, context));
}
result.set("locations", jsonLocations);
return result;
}
|
@Test
public void testEncode() {
MockCodecContext context = new MockCodecContext();
NetworkId networkId = NetworkId.networkId(TEST_NETWORK_ID);
HostId id = NetTestTools.hid(TEST_HOST_ID);
MacAddress mac = MacAddress.valueOf(TEST_MAC_ADDRESS);
VlanId vlan = VlanId.vlanId(TEST_VLAN_ID);
HostLocation location =
new HostLocation(CONNECT_POINT, 0L);
Set<IpAddress> ips = ImmutableSet.of(IpAddress.valueOf(TEST_IP1),
IpAddress.valueOf(TEST_IP2));
VirtualHost host =
new DefaultVirtualHost(networkId, id, mac, vlan, location, ips);
JsonCodec<VirtualHost> codec = context.codec(VirtualHost.class);
ObjectNode node = codec.encode(host, context);
assertThat(node.get(VirtualHostCodec.NETWORK_ID).asLong(),
is(TEST_NETWORK_ID));
assertThat(node.get(VirtualHostCodec.HOST_ID).asText(),
is(TEST_HOST_ID));
assertThat(node.get(VirtualHostCodec.MAC_ADDRESS).asText(),
is(TEST_MAC_ADDRESS));
assertThat(node.get(VirtualHostCodec.VLAN).asInt(),
is((int) TEST_VLAN_ID));
assertThat(node.get(VirtualHostCodec.HOST_LOCATION).get(0).get("elementId").asText(),
is(location.deviceId().toString()));
assertThat(node.get(VirtualHostCodec.HOST_LOCATION).get(0).get("port").asLong(),
is(location.port().toLong()));
JsonNode jsonIps = node.get(VirtualHostCodec.IP_ADDRESSES);
assertThat(jsonIps, notNullValue());
assertThat(jsonIps.isArray(), is(true));
assertThat(jsonIps.size(), is(ips.size()));
IntStream.of(0, 1).forEach(index ->
assertThat(jsonIps.get(index).asText(),
isOneOf(TEST_IP1, TEST_IP2)));
}
|
public Flux<E> getCacheAll() {
return getCache().getFlux(ALL_DATA_KEY, () -> EnableCacheReactiveCrudService.super.createQuery().fetch());
}
|
@Test
public void test2() {
TestEntity entity = TestEntity.of("test1",100,"testName");
entityService
.createDelete()
.notNull(TestEntity::getId)
.execute()
.block();
entityService
.insert(Mono.just(entity))
.as(StepVerifier::create)
.expectNext(1)
.verifyComplete();
entityService
.getCacheAll()
.as(StepVerifier::create)
.expectNextCount(1)
.verifyComplete();
entity.setAge(120);
entityService
.updateById(entity.getId(), entity)
.as(StepVerifier::create)
.expectNext(1)
.verifyComplete();
entityService
.getCacheAll()
.switchIfEmpty(Mono.error(NullPointerException::new))
.as(StepVerifier::create)
.expectNextMatches(t -> t.getAge().equals(120))
.verifyComplete();
entity.setId(null);
entityService
.insert(Mono.just(entity))
.as(StepVerifier::create)
.expectNext(1)
.verifyComplete();
entityService
.getCacheAll()
.as(StepVerifier::create)
.expectNextCount(2)
.verifyComplete();
entityService
.deleteById(entity.getId())
.as(StepVerifier::create)
.expectNextCount(1)
.verifyComplete();
entityService
.getCacheAll()
.as(StepVerifier::create)
.expectNextCount(1)
.verifyComplete();
}
|
public static Type toZetaSqlType(FieldType fieldType) {
switch (fieldType.getTypeName()) {
case INT64:
return TypeFactory.createSimpleType(TypeKind.TYPE_INT64);
case DOUBLE:
return TypeFactory.createSimpleType(TypeKind.TYPE_DOUBLE);
case BOOLEAN:
return TypeFactory.createSimpleType(TypeKind.TYPE_BOOL);
case STRING:
return TypeFactory.createSimpleType(TypeKind.TYPE_STRING);
case BYTES:
return TypeFactory.createSimpleType(TypeKind.TYPE_BYTES);
case DECIMAL:
return TypeFactory.createSimpleType(TypeKind.TYPE_NUMERIC);
case DATETIME:
// TODO[https://github.com/apache/beam/issues/20364]: Mapping TIMESTAMP to a Beam
// LogicalType instead?
return TypeFactory.createSimpleType(TypeKind.TYPE_TIMESTAMP);
case LOGICAL_TYPE:
String identifier = fieldType.getLogicalType().getIdentifier();
if (SqlTypes.DATE.getIdentifier().equals(identifier)) {
return TypeFactory.createSimpleType(TypeKind.TYPE_DATE);
} else if (SqlTypes.TIME.getIdentifier().equals(identifier)) {
return TypeFactory.createSimpleType(TypeKind.TYPE_TIME);
} else if (SqlTypes.DATETIME.getIdentifier().equals(identifier)) {
return TypeFactory.createSimpleType(TypeKind.TYPE_DATETIME);
} else {
throw new UnsupportedOperationException("Unknown Beam logical type: " + identifier);
}
case ARRAY:
return toZetaSqlArrayType(fieldType.getCollectionElementType());
case ROW:
return toZetaSqlStructType(fieldType.getRowSchema());
default:
throw new UnsupportedOperationException(
"Unknown Beam fieldType: " + fieldType.getTypeName());
}
}
|
@Test
public void testBeamFieldTypeToZetaSqlType() {
assertEquals(ZetaSqlBeamTranslationUtils.toZetaSqlType(TEST_FIELD_TYPE), TEST_TYPE);
}
|
public DLQEntry pollEntry(long timeout) throws IOException, InterruptedException {
byte[] bytes = pollEntryBytes(timeout);
if (bytes == null) {
return null;
}
return DLQEntry.deserialize(bytes);
}
|
@Test
public void testBlockAndSegmentBoundary() throws Exception {
Event event = createEventWithConstantSerializationOverhead();
event.setField("T", generateMessageContent(PAD_FOR_BLOCK_SIZE_EVENT));
Timestamp timestamp = constantSerializationLengthTimestamp();
try(DeadLetterQueueWriter writeManager = DeadLetterQueueWriter
.newBuilder(dir, BLOCK_SIZE, defaultDlqSize, Duration.ofSeconds(1))
.build()) {
for (int i = 0; i < 2; i++) {
DLQEntry entry = new DLQEntry(event, "", "", "", timestamp);
assertThat(entry.serialize().length + RecordIOWriter.RECORD_HEADER_SIZE, is(BLOCK_SIZE));
writeManager.writeEntry(entry);
}
}
try (DeadLetterQueueReader readManager = new DeadLetterQueueReader(dir)) {
for (int i = 0; i < 2;i++) {
readManager.pollEntry(100);
}
}
}
|
public <T> void resolve(T resolvable) {
ParamResolver resolver = this;
if (ParamScope.class.isAssignableFrom(resolvable.getClass())) {
ParamScope newScope = (ParamScope) resolvable;
resolver = newScope.applyOver(resolver);
}
resolveStringLeaves(resolvable, resolver);
resolveNonStringLeaves(resolvable, resolver);
resolveNodes(resolvable, resolver);
}
|
@Test
public void shouldProvideContextWhenAnExceptionOccursBecauseOfIncompleteParamAtEnd() {
PipelineConfig pipelineConfig = PipelineConfigMother.createPipelineConfig("cruise", "dev", "ant");
pipelineConfig.setLabelTemplate("abc#{");
new ParamResolver(new ParamSubstitutionHandlerFactory(params(param("foo", "pavan"), param("bar", "jj"))), fieldCache).resolve(pipelineConfig);
assertThat(pipelineConfig.errors().on("labelTemplate"), is("Incomplete param usage in 'abc#{'"));
}
|
public List<DirectEncryptedPseudonymType> provideDep(ProvideDEPsRequest request) throws BsnkException {
try {
return ((BSNKDEPPort) this.bindingProvider).bsnkProvideDEPs(request).getDirectEncryptedPseudonyms();
} catch (SOAPFaultException ex) {
if (ex.getCause().getMessage().equals("The signature or decryption was invalid")) {
throw new BsnkException("SignatureValidationFault", ex.getCause().getMessage(), ex.getCause());
}
throw new BsnkException("BSNKProvideDEPFault", ex.getMessage(), ex);
} catch (WebServiceException ex) {
throw new BsnkException("Could not send bsnkProvidePPPPCAOptimized", ex.getCause().getMessage(),
ex.getCause());
} catch (BSNKProvideDEPFault ex) {
throw new BsnkException("BSNKProvideDEPFault", ex.getCause().getMessage(), ex.getCause());
}
}
|
@Test
public void testRequestHasValidSignature() throws BsnkException {
setupWireMock();
client.provideDep(request);
wireMockServer.verify(postRequestedFor(urlPathEqualTo("/bsnk_stub/provideDep")).withHeader("Content-Type",
containing("xml")));
ServeEvent serveEvent = wireMockServer.getAllServeEvents().get(0);
String requestBody = new String(serveEvent.getRequest().getBody());
SignatureValidator signatureValidator = new SignatureValidator(this.signingTruststore,
this.signingTruststorePassword);
try {
signatureValidator.validate(requestBody);
} catch (WSSecurityException ex) {
fail("Signature was invalid");
} catch (Exception ex) {
fail(ex.getMessage());
}
}
|
Optional<ImageMetadataTemplate> retrieveMetadata(ImageReference imageReference)
throws IOException, CacheCorruptedException {
Path imageDirectory = cacheStorageFiles.getImageDirectory(imageReference);
Path metadataPath = imageDirectory.resolve("manifests_configs.json");
if (!Files.exists(metadataPath)) {
return Optional.empty();
}
ImageMetadataTemplate metadata;
try (LockFile ignored = LockFile.lock(imageDirectory.resolve("lock"))) {
metadata = JsonTemplateMapper.readJsonFromFile(metadataPath, ImageMetadataTemplate.class);
}
verifyImageMetadata(metadata, imageDirectory);
return Optional.of(metadata);
}
|
@Test
public void testRetrieveMetadata_v22SingleManifest()
throws IOException, URISyntaxException, CacheCorruptedException {
setupCachedMetadataV22(cacheDirectory);
ImageMetadataTemplate metadata =
cacheStorageReader.retrieveMetadata(ImageReference.of("test", "image", "tag")).get();
Assert.assertNull(metadata.getManifestList());
Assert.assertEquals(1, metadata.getManifestsAndConfigs().size());
V22ManifestTemplate manifestTemplate =
(V22ManifestTemplate) metadata.getManifestsAndConfigs().get(0).getManifest();
Assert.assertEquals(2, manifestTemplate.getSchemaVersion());
Assert.assertEquals(
"8c662931926fa990b41da3c9f42663a537ccd498130030f9149173a0493832ad",
manifestTemplate.getContainerConfiguration().getDigest().getHash());
}
|
public static void preserve(FileSystem targetFS, Path path,
CopyListingFileStatus srcFileStatus,
EnumSet<FileAttribute> attributes,
boolean preserveRawXattrs) throws IOException {
// strip out those attributes we don't need any more
attributes.remove(FileAttribute.BLOCKSIZE);
attributes.remove(FileAttribute.CHECKSUMTYPE);
// If not preserving anything from FileStatus, don't bother fetching it.
FileStatus targetFileStatus = attributes.isEmpty() ? null :
targetFS.getFileStatus(path);
String group = targetFileStatus == null ? null :
targetFileStatus.getGroup();
String user = targetFileStatus == null ? null :
targetFileStatus.getOwner();
boolean chown = false;
if (attributes.contains(FileAttribute.ACL)) {
List<AclEntry> srcAcl = srcFileStatus.getAclEntries();
List<AclEntry> targetAcl = getAcl(targetFS, targetFileStatus);
if (!srcAcl.equals(targetAcl)) {
targetFS.removeAcl(path);
targetFS.setAcl(path, srcAcl);
}
// setAcl doesn't preserve sticky bit, so also call setPermission if needed.
if (srcFileStatus.getPermission().getStickyBit() !=
targetFileStatus.getPermission().getStickyBit()) {
targetFS.setPermission(path, srcFileStatus.getPermission());
}
} else if (attributes.contains(FileAttribute.PERMISSION) &&
!srcFileStatus.getPermission().equals(targetFileStatus.getPermission())) {
targetFS.setPermission(path, srcFileStatus.getPermission());
}
final boolean preserveXAttrs = attributes.contains(FileAttribute.XATTR);
if (preserveXAttrs || preserveRawXattrs) {
final String rawNS =
StringUtils.toLowerCase(XAttr.NameSpace.RAW.name());
Map<String, byte[]> srcXAttrs = srcFileStatus.getXAttrs();
Map<String, byte[]> targetXAttrs = getXAttrs(targetFS, path);
if (srcXAttrs != null && !srcXAttrs.equals(targetXAttrs)) {
for (Entry<String, byte[]> entry : srcXAttrs.entrySet()) {
String xattrName = entry.getKey();
if (xattrName.startsWith(rawNS) || preserveXAttrs) {
targetFS.setXAttr(path, xattrName, entry.getValue());
}
}
}
}
// The replication factor can only be preserved for replicated files.
// It is ignored when either the source or target file are erasure coded.
if (attributes.contains(FileAttribute.REPLICATION) &&
!targetFileStatus.isDirectory() &&
!targetFileStatus.isErasureCoded() &&
!srcFileStatus.isErasureCoded() &&
srcFileStatus.getReplication() != targetFileStatus.getReplication()) {
targetFS.setReplication(path, srcFileStatus.getReplication());
}
if (attributes.contains(FileAttribute.GROUP) &&
!group.equals(srcFileStatus.getGroup())) {
group = srcFileStatus.getGroup();
chown = true;
}
if (attributes.contains(FileAttribute.USER) &&
!user.equals(srcFileStatus.getOwner())) {
user = srcFileStatus.getOwner();
chown = true;
}
if (chown) {
targetFS.setOwner(path, user, group);
}
if (attributes.contains(FileAttribute.TIMES)) {
targetFS.setTimes(path,
srcFileStatus.getModificationTime(),
srcFileStatus.getAccessTime());
}
}
|
@Test
public void testSkipsNeedlessAttributes() throws Exception {
FileSystem fs = FileSystem.get(config);
// preserve replication, block size, user, group, permission,
// checksum type and timestamps
Path src = new Path("/tmp/testSkipsNeedlessAttributes/source");
Path dst = new Path("/tmp/testSkipsNeedlessAttributes/dest");
// there is no need to actually create a source file, just a file
// status of one
CopyListingFileStatus srcStatus = new CopyListingFileStatus(
new FileStatus(0, false, 1, 32, 0, src));
// if an attribute is needed, preserve will fail to find the file
EnumSet<FileAttribute> attrs = EnumSet.of(FileAttribute.ACL,
FileAttribute.GROUP,
FileAttribute.PERMISSION,
FileAttribute.TIMES,
FileAttribute.XATTR);
for (FileAttribute attr : attrs) {
intercept(FileNotFoundException.class, () ->
DistCpUtils.preserve(fs, dst, srcStatus,
EnumSet.of(attr),
false));
}
// but with the preservation flags only used
// in file creation, this does not happen
DistCpUtils.preserve(fs, dst, srcStatus,
EnumSet.of(
FileAttribute.BLOCKSIZE,
FileAttribute.CHECKSUMTYPE),
false);
}
|
public void remove(Supplier<JournalContext> ctx, String path, Set<String> keys) {
try (LockResource r = new LockResource(mLock.writeLock())) {
Map<String, String> properties = mState.getProperties(path);
if (!properties.isEmpty()) {
keys.forEach(properties::remove);
if (properties.isEmpty()) {
mState.applyAndJournal(ctx, RemovePathPropertiesEntry.newBuilder().setPath(path).build());
} else {
mState.applyAndJournal(ctx, PathPropertiesEntry.newBuilder()
.setPath(path).putAllProperties(properties).build());
}
mHash.markOutdated();
}
}
}
|
@Test
public void remove() {
PathProperties properties = new PathProperties();
// remove from empty properties
properties.remove(NoopJournalContext.INSTANCE, ROOT, new HashSet<>(Arrays.asList(
PropertyKey.USER_FILE_READ_TYPE_DEFAULT.getName(),
PropertyKey.USER_FILE_WRITE_TYPE_DEFAULT.getName())));
properties.removeAll(NoopJournalContext.INSTANCE, DIR1);
Assert.assertTrue(properties.get().isEmpty());
properties.add(NoopJournalContext.INSTANCE, ROOT, READ_CACHE_WRITE_CACHE_THROUGH);
properties.add(NoopJournalContext.INSTANCE, DIR1, READ_NO_CACHE_WRITE_THROUGH);
Map<String, Map<String, String>> got = properties.get();
Assert.assertEquals(2, got.size());
assertPropertiesEqual(READ_CACHE_WRITE_CACHE_THROUGH, got.get(ROOT));
assertPropertiesEqual(READ_NO_CACHE_WRITE_THROUGH, got.get(DIR1));
// remove non-existent paths
properties.removeAll(NoopJournalContext.INSTANCE, "non-existent");
properties.remove(NoopJournalContext.INSTANCE, "non-existent", new HashSet<>(Arrays.asList(
PropertyKey.USER_FILE_READ_TYPE_DEFAULT.getName(),
PropertyKey.USER_FILE_WRITE_TYPE_DEFAULT.getName())));
got = properties.get();
Assert.assertEquals(2, got.size());
assertPropertiesEqual(READ_CACHE_WRITE_CACHE_THROUGH, got.get(ROOT));
assertPropertiesEqual(READ_NO_CACHE_WRITE_THROUGH, got.get(DIR1));
// remove non-existent keys
properties.remove(NoopJournalContext.INSTANCE, ROOT, new HashSet<>(Arrays.asList(
PropertyKey.USER_APP_ID.getName())));
properties.remove(NoopJournalContext.INSTANCE, DIR1, new HashSet<>(Arrays.asList(
PropertyKey.UNDERFS_S3_BULK_DELETE_ENABLED.getName())));
got = properties.get();
Assert.assertEquals(2, got.size());
assertPropertiesEqual(READ_CACHE_WRITE_CACHE_THROUGH, got.get(ROOT));
assertPropertiesEqual(READ_NO_CACHE_WRITE_THROUGH, got.get(DIR1));
// remove existing keys
properties.remove(NoopJournalContext.INSTANCE, ROOT, new HashSet<>(Arrays.asList(
PropertyKey.USER_FILE_WRITE_TYPE_DEFAULT.getName())));
properties.remove(NoopJournalContext.INSTANCE, DIR1, new HashSet<>(Arrays.asList(
PropertyKey.USER_FILE_READ_TYPE_DEFAULT.getName())));
got = properties.get();
Assert.assertEquals(2, got.size());
assertPropertiesEqual(READ_CACHE, got.get(ROOT));
assertPropertiesEqual(WRITE_THROUGH, got.get(DIR1));
// remove existing paths
properties.removeAll(NoopJournalContext.INSTANCE, ROOT);
got = properties.get();
Assert.assertEquals(1, got.size());
assertPropertiesEqual(WRITE_THROUGH, got.get(DIR1));
properties.removeAll(NoopJournalContext.INSTANCE, DIR1);
got = properties.get();
Assert.assertEquals(0, got.size());
}
|
public void removeExpireConsumerGroupInfo() {
List<String> removeList = new ArrayList<>();
consumerCompensationTable.forEach((group, consumerGroupInfo) -> {
List<String> removeTopicList = new ArrayList<>();
ConcurrentMap<String, SubscriptionData> subscriptionTable = consumerGroupInfo.getSubscriptionTable();
subscriptionTable.forEach((topic, subscriptionData) -> {
long diff = System.currentTimeMillis() - subscriptionData.getSubVersion();
if (diff > subscriptionExpiredTimeout) {
removeTopicList.add(topic);
}
});
for (String topic : removeTopicList) {
subscriptionTable.remove(topic);
if (subscriptionTable.isEmpty()) {
removeList.add(group);
}
}
});
for (String group : removeList) {
consumerCompensationTable.remove(group);
}
}
|
@Test
public void removeExpireConsumerGroupInfo() {
SubscriptionData subscriptionData = new SubscriptionData(TOPIC, SubscriptionData.SUB_ALL);
subscriptionData.setSubVersion(System.currentTimeMillis() - brokerConfig.getSubscriptionExpiredTimeout() * 2);
consumerManager.compensateSubscribeData(GROUP, TOPIC, subscriptionData);
consumerManager.compensateSubscribeData(GROUP, TOPIC + "_1", new SubscriptionData(TOPIC, SubscriptionData.SUB_ALL));
consumerManager.removeExpireConsumerGroupInfo();
Assertions.assertThat(consumerManager.getConsumerGroupInfo(GROUP, true)).isNotNull();
Assertions.assertThat(consumerManager.findSubscriptionData(GROUP, TOPIC)).isNull();
Assertions.assertThat(consumerManager.findSubscriptionData(GROUP, TOPIC + "_1")).isNotNull();
}
|
@Override
public Optional<NativeEntity<GrokPattern>> findExisting(Entity entity, Map<String, ValueReference> parameters) {
if (entity instanceof EntityV1) {
return findExisting((EntityV1) entity);
} else {
throw new IllegalArgumentException("Unsupported entity version: " + entity.getClass());
}
}
|
@Test
public void findExistingFailsWithDivergingPatterns() throws ValidationException {
grokPatternService.save(GrokPattern.create("Test", "[a-z]+"));
final Entity grokPatternEntity = EntityV1.builder()
.id(ModelId.of("1"))
.type(ModelTypes.GROK_PATTERN_V1)
.data(objectMapper.convertValue(GrokPatternEntity.create("Test", "BOOM"), JsonNode.class))
.build();
assertThatThrownBy(() -> facade.findExisting(grokPatternEntity, Collections.emptyMap()))
.isInstanceOf(DivergingEntityConfigurationException.class)
.hasMessage("Expected Grok pattern for name \"Test\": <BOOM>; actual Grok pattern: <[a-z]+>");
}
|
public static List<AclEntry> mergeAclEntries(List<AclEntry> existingAcl,
List<AclEntry> inAclSpec) throws AclException {
ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec);
ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES);
List<AclEntry> foundAclSpecEntries =
Lists.newArrayListWithCapacity(MAX_ENTRIES);
EnumMap<AclEntryScope, AclEntry> providedMask =
Maps.newEnumMap(AclEntryScope.class);
EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class);
EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class);
for (AclEntry existingEntry: existingAcl) {
AclEntry aclSpecEntry = aclSpec.findByKey(existingEntry);
if (aclSpecEntry != null) {
foundAclSpecEntries.add(aclSpecEntry);
scopeDirty.add(aclSpecEntry.getScope());
if (aclSpecEntry.getType() == MASK) {
providedMask.put(aclSpecEntry.getScope(), aclSpecEntry);
maskDirty.add(aclSpecEntry.getScope());
} else {
aclBuilder.add(aclSpecEntry);
}
} else {
if (existingEntry.getType() == MASK) {
providedMask.put(existingEntry.getScope(), existingEntry);
} else {
aclBuilder.add(existingEntry);
}
}
}
// ACL spec entries that were not replacements are new additions.
for (AclEntry newEntry: aclSpec) {
if (Collections.binarySearch(foundAclSpecEntries, newEntry,
ACL_ENTRY_COMPARATOR) < 0) {
scopeDirty.add(newEntry.getScope());
if (newEntry.getType() == MASK) {
providedMask.put(newEntry.getScope(), newEntry);
maskDirty.add(newEntry.getScope());
} else {
aclBuilder.add(newEntry);
}
}
}
copyDefaultsIfNeeded(aclBuilder);
calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty);
return buildAndValidateAcl(aclBuilder);
}
|
@Test
public void testMergeAclEntriesAutomaticDefaultGroup() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, READ));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, READ_EXECUTE))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, OTHER, READ))
.build();
assertEquals(expected, mergeAclEntries(existing, aclSpec));
}
|
@PostMapping("/authorize")
@Operation(summary = "申请授权", description = "适合 code 授权码模式,或者 implicit 简化模式;在 sso.vue 单点登录界面被【提交】调用")
@Parameters({
@Parameter(name = "response_type", required = true, description = "响应类型", example = "code"),
@Parameter(name = "client_id", required = true, description = "客户端编号", example = "tudou"),
@Parameter(name = "scope", description = "授权范围", example = "userinfo.read"), // 使用 Map<String, Boolean> 格式,Spring MVC 暂时不支持这么接收参数
@Parameter(name = "redirect_uri", required = true, description = "重定向 URI", example = "https://www.iocoder.cn"),
@Parameter(name = "auto_approve", required = true, description = "用户是否接受", example = "true"),
@Parameter(name = "state", example = "1")
})
public CommonResult<String> approveOrDeny(@RequestParam("response_type") String responseType,
@RequestParam("client_id") String clientId,
@RequestParam(value = "scope", required = false) String scope,
@RequestParam("redirect_uri") String redirectUri,
@RequestParam(value = "auto_approve") Boolean autoApprove,
@RequestParam(value = "state", required = false) String state) {
@SuppressWarnings("unchecked")
Map<String, Boolean> scopes = JsonUtils.parseObject(scope, Map.class);
scopes = ObjectUtil.defaultIfNull(scopes, Collections.emptyMap());
// 0. 校验用户已经登录。通过 Spring Security 实现
// 1.1 校验 responseType 是否满足 code 或者 token 值
OAuth2GrantTypeEnum grantTypeEnum = getGrantTypeEnum(responseType);
// 1.2 校验 redirectUri 重定向域名是否合法 + 校验 scope 是否在 Client 授权范围内
OAuth2ClientDO client = oauth2ClientService.validOAuthClientFromCache(clientId, null,
grantTypeEnum.getGrantType(), scopes.keySet(), redirectUri);
// 2.1 假设 approved 为 null,说明是场景一
if (Boolean.TRUE.equals(autoApprove)) {
// 如果无法自动授权通过,则返回空 url,前端不进行跳转
if (!oauth2ApproveService.checkForPreApproval(getLoginUserId(), getUserType(), clientId, scopes.keySet())) {
return success(null);
}
} else { // 2.2 假设 approved 非 null,说明是场景二
// 如果计算后不通过,则跳转一个错误链接
if (!oauth2ApproveService.updateAfterApproval(getLoginUserId(), getUserType(), clientId, scopes)) {
return success(OAuth2Utils.buildUnsuccessfulRedirect(redirectUri, responseType, state,
"access_denied", "User denied access"));
}
}
// 3.1 如果是 code 授权码模式,则发放 code 授权码,并重定向
List<String> approveScopes = convertList(scopes.entrySet(), Map.Entry::getKey, Map.Entry::getValue);
if (grantTypeEnum == OAuth2GrantTypeEnum.AUTHORIZATION_CODE) {
return success(getAuthorizationCodeRedirect(getLoginUserId(), client, approveScopes, redirectUri, state));
}
// 3.2 如果是 token 则是 implicit 简化模式,则发送 accessToken 访问令牌,并重定向
return success(getImplicitGrantRedirect(getLoginUserId(), client, approveScopes, redirectUri, state));
}
|
@Test // autoApprove = false,通过 + code
public void testApproveOrDeny_approveWithCode() {
// 准备参数
String responseType = "code";
String clientId = randomString();
String scope = "{\"read\": true, \"write\": false}";
String redirectUri = "https://www.iocoder.cn";
String state = "test";
// mock 方法(client)
OAuth2ClientDO client = randomPojo(OAuth2ClientDO.class).setClientId(clientId).setAdditionalInformation(null);
when(oauth2ClientService.validOAuthClientFromCache(eq(clientId), isNull(), eq("authorization_code"),
eq(asSet("read", "write")), eq(redirectUri))).thenReturn(client);
// mock 方法(场景二)
when(oauth2ApproveService.updateAfterApproval(isNull(), eq(UserTypeEnum.ADMIN.getValue()), eq(clientId),
eq(MapUtil.builder(new LinkedHashMap<String, Boolean>()).put("read", true).put("write", false).build())))
.thenReturn(true);
// mock 方法(访问令牌)
String authorizationCode = "test_code";
when(oauth2GrantService.grantAuthorizationCodeForCode(isNull(), eq(UserTypeEnum.ADMIN.getValue()),
eq(clientId), eq(ListUtil.toList("read")), eq(redirectUri), eq(state))).thenReturn(authorizationCode);
// 调用
CommonResult<String> result = oauth2OpenController.approveOrDeny(responseType, clientId,
scope, redirectUri, false, state);
// 断言
assertEquals(0, result.getCode());
assertEquals("https://www.iocoder.cn?code=test_code&state=test", result.getData());
}
|
public static String[] splitString( String string, String separator ) {
/*
* 0123456 Example a;b;c;d --> new String[] { a, b, c, d }
*/
// System.out.println("splitString ["+path+"] using ["+separator+"]");
List<String> list = new ArrayList<>();
if ( string == null || string.length() == 0 ) {
return new String[] {};
}
int sepLen = separator.length();
int from = 0;
int end = string.length() - sepLen + 1;
for ( int i = from; i < end; i += sepLen ) {
if ( string.substring( i, i + sepLen ).equalsIgnoreCase( separator ) ) {
// OK, we found a separator, the string to add to the list
// is [from, i[
list.add( nullToEmpty( string.substring( from, i ) ) );
from = i + sepLen;
}
}
// Wait, if the string didn't end with a separator, we still have information at the end of the string...
// In our example that would be "d"...
if ( from + sepLen <= string.length() ) {
list.add( nullToEmpty( string.substring( from, string.length() ) ) );
}
return list.toArray( new String[list.size()] );
}
|
@Test
public void testSplitStringNullWithDelimiterNullAndEnclosureNull() {
String[] result = Const.splitString( null, null, null );
assertNull( result );
}
|
@Override
public void deleteAiVideoConfig(Long id) {
// 校验存在
validateAiVideoConfigExists(id);
// 删除
aiVideoConfigMapper.deleteById(id);
}
|
@Test
public void testDeleteAiVideoConfig_success() {
// mock 数据
AiVideoConfigDO dbAiVideoConfig = randomPojo(AiVideoConfigDO.class);
aiVideoConfigMapper.insert(dbAiVideoConfig);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbAiVideoConfig.getId();
// 调用
aiVideoConfigService.deleteAiVideoConfig(id);
// 校验数据不存在了
assertNull(aiVideoConfigMapper.selectById(id));
}
|
public Entry findChildByPath(Entry top, String path) {
final String canonicalPath = replaceAliases(path);
return top.getChildByPath(canonicalPath.split("/"));
}
|
@Test
public void findsChildByPathAlias() throws Exception {
final EntryNavigator entryNavigator = new EntryNavigator(Collections.singletonMap("medium", "middle"));
Entry top = new Entry();
Entry middle = new Entry();
top.addChild(middle);
middle.setName("middle");
Entry down = new Entry();
middle.addChild(down);
down.setName("down");
assertThat(entryNavigator.findChildByPath(top, "medium/down"), equalTo(down));
}
|
@Override
public void setTimestamp(final Path file, final TransferStatus status) throws BackgroundException {
try {
if(null != status.getModified()) {
final FileEntity response = new FilesApi(new BrickApiClient(session))
.patchFilesPath(StringUtils.removeStart(file.getAbsolute(), String.valueOf(Path.DELIMITER)),
new FilesPathBody().providedMtime(status.getModified() != null ? new DateTime(status.getModified()) : null));
status.setResponse(new BrickAttributesFinderFeature(session).toAttributes(response));
}
}
catch(ApiException e) {
throw new BrickExceptionMappingService().map("Failure to write attributes of {0}", e, file);
}
}
|
@Test
public void testSetTimestampFile() throws Exception {
final Path file = new BrickTouchFeature(session).touch(new Path(new DefaultHomeFinderService(session).find(),
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final TransferStatus status = new TransferStatus().withModified(5000L);
new BrickTimestampFeature(session).setTimestamp(file, status);
final PathAttributes attr = new BrickAttributesFinderFeature(session).find(file);
assertEquals(5000L, attr.getModificationDate());
assertEquals(attr, status.getResponse());
assertEquals(5000L, new DefaultAttributesFinderFeature(session).find(file).getModificationDate());
new BrickDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public NetworkClientDelegate.PollResult poll(long currentTimeMs) {
if (!coordinatorRequestManager.coordinator().isPresent() ||
shareMembershipManager.shouldSkipHeartbeat() ||
pollTimer.isExpired()) {
shareMembershipManager.onHeartbeatRequestSkipped();
return NetworkClientDelegate.PollResult.EMPTY;
}
pollTimer.update(currentTimeMs);
if (pollTimer.isExpired() && !shareMembershipManager.isLeavingGroup()) {
logger.warn("Share consumer poll timeout has expired. This means the time between subsequent calls to poll() " +
"was longer than the configured max.poll.interval.ms, which typically implies that " +
"the poll loop is spending too much time processing messages. You can address this " +
"either by increasing max.poll.interval.ms or by reducing the maximum size of batches " +
"returned in poll() with max.poll.records.");
shareMembershipManager.transitionToSendingLeaveGroup(true);
NetworkClientDelegate.UnsentRequest request = makeHeartbeatRequest(currentTimeMs, true);
// We can ignore the leave response because we can join before or after receiving the response.
heartbeatRequestState.reset();
heartbeatState.reset();
return new NetworkClientDelegate.PollResult(heartbeatRequestState.heartbeatIntervalMs, Collections.singletonList(request));
}
boolean heartbeatNow = shareMembershipManager.shouldHeartbeatNow() && !heartbeatRequestState.requestInFlight();
if (!heartbeatRequestState.canSendRequest(currentTimeMs) && !heartbeatNow) {
return new NetworkClientDelegate.PollResult(heartbeatRequestState.timeToNextHeartbeatMs(currentTimeMs));
}
NetworkClientDelegate.UnsentRequest request = makeHeartbeatRequest(currentTimeMs, false);
return new NetworkClientDelegate.PollResult(heartbeatRequestState.heartbeatIntervalMs, Collections.singletonList(request));
}
|
@Test
public void testHeartbeatNotSentIfAnotherOneInFlight() {
time.sleep(DEFAULT_HEARTBEAT_INTERVAL_MS);
// Heartbeat sent (no response received)
NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(1, result.unsentRequests.size());
NetworkClientDelegate.UnsentRequest inflightReq = result.unsentRequests.get(0);
time.sleep(DEFAULT_HEARTBEAT_INTERVAL_MS);
result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(0, result.unsentRequests.size(), "No heartbeat should be sent while a " +
"previous one in-flight");
time.sleep(DEFAULT_HEARTBEAT_INTERVAL_MS);
result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(0, result.unsentRequests.size(), "No heartbeat should be sent when the " +
"interval expires if there is a previous heartbeat request in-flight");
// Receive response for the inflight after the interval expired. The next HB should be sent
// on the next poll waiting only for the minimal backoff.
inflightReq.handler().onComplete(createHeartbeatResponse(inflightReq, Errors.NONE));
time.sleep(DEFAULT_RETRY_BACKOFF_MS);
result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(1, result.unsentRequests.size(), "A next heartbeat should be sent on " +
"the first poll after receiving a response that took longer than the interval, " +
"waiting only for the minimal backoff.");
}
|
public WorkflowTimeline getWorkflowTimeline(String workflowId) {
List<WorkflowVersionUpdateJobEvent> jobEvents =
withMetricLogError(
() ->
getPayloads(
GET_WORKFLOW_TIMELINE_QUERY,
stmt -> {
stmt.setString(1, workflowId);
},
WorkflowVersionUpdateJobEvent.class),
"getWorkflowTimeline",
"Failed getting timeline events for workflow id [{}]",
workflowId);
return new WorkflowTimeline(
workflowId,
jobEvents.stream()
.map(
e ->
new WorkflowTimeline.WorkflowTimelineEvent(
e.getAuthor(), e.getLog(), e.getEventTime()))
.collect(Collectors.toList()));
}
|
@Test
public void testGetWorkflowTimeline() throws Exception {
WorkflowDefinition wfd = loadWorkflow(TEST_WORKFLOW_ID1);
WorkflowDefinition definition =
workflowDao.addWorkflowDefinition(wfd, wfd.getPropertiesSnapshot().extractProperties());
assertNotNull(wfd.getInternalId());
assertEquals(wfd, definition);
WorkflowTimeline results = workflowDao.getWorkflowTimeline(TEST_WORKFLOW_ID1);
assertEquals(TEST_WORKFLOW_ID1, results.getWorkflowId());
assertEquals(1, results.getTimelineEvents().size());
assertEquals(
"Created a new workflow version [1] for workflow id [sample-active-wf-with-props]",
results.getTimelineEvents().get(0).getLog());
assertEquals(1598399975650L, results.getTimelineEvents().get(0).getTimestamp());
}
|
public IssueQuery create(SearchRequest request) {
try (DbSession dbSession = dbClient.openSession(false)) {
final ZoneId timeZone = parseTimeZone(request.getTimeZone()).orElse(clock.getZone());
Collection<RuleDto> ruleDtos = ruleKeysToRuleId(dbSession, request.getRules());
Collection<String> ruleUuids = ruleDtos.stream().map(RuleDto::getUuid).collect(Collectors.toSet());
Collection<String> issueKeys = collectIssueKeys(dbSession, request);
if (request.getRules() != null && request.getRules().stream().collect(Collectors.toSet()).size() != ruleDtos.size()) {
ruleUuids.add("non-existing-uuid");
}
IssueQuery.Builder builder = IssueQuery.builder()
.issueKeys(issueKeys)
.severities(request.getSeverities())
.cleanCodeAttributesCategories(request.getCleanCodeAttributesCategories())
.impactSoftwareQualities(request.getImpactSoftwareQualities())
.impactSeverities(request.getImpactSeverities())
.statuses(request.getStatuses())
.resolutions(request.getResolutions())
.issueStatuses(request.getIssueStatuses())
.resolved(request.getResolved())
.prioritizedRule(request.getPrioritizedRule())
.rules(ruleDtos)
.ruleUuids(ruleUuids)
.assigneeUuids(request.getAssigneeUuids())
.authors(request.getAuthors())
.scopes(request.getScopes())
.languages(request.getLanguages())
.tags(request.getTags())
.types(request.getTypes())
.pciDss32(request.getPciDss32())
.pciDss40(request.getPciDss40())
.owaspAsvs40(request.getOwaspAsvs40())
.owaspAsvsLevel(request.getOwaspAsvsLevel())
.owaspTop10(request.getOwaspTop10())
.owaspTop10For2021(request.getOwaspTop10For2021())
.stigAsdR5V3(request.getStigAsdV5R3())
.casa(request.getCasa())
.sansTop25(request.getSansTop25())
.cwe(request.getCwe())
.sonarsourceSecurity(request.getSonarsourceSecurity())
.assigned(request.getAssigned())
.createdAt(parseStartingDateOrDateTime(request.getCreatedAt(), timeZone))
.createdBefore(parseEndingDateOrDateTime(request.getCreatedBefore(), timeZone))
.facetMode(request.getFacetMode())
.timeZone(timeZone)
.codeVariants(request.getCodeVariants());
List<ComponentDto> allComponents = new ArrayList<>();
boolean effectiveOnComponentOnly = mergeDeprecatedComponentParameters(dbSession, request, allComponents);
addComponentParameters(builder, dbSession, effectiveOnComponentOnly, allComponents, request);
setCreatedAfterFromRequest(dbSession, builder, request, allComponents, timeZone);
String sort = request.getSort();
if (!isNullOrEmpty(sort)) {
builder.sort(sort);
builder.asc(request.getAsc());
}
return builder.build();
}
}
|
@Test
public void set_created_after_from_created_since() {
Date now = parseDateTime("2013-07-25T07:35:00+0100");
when(clock.instant()).thenReturn(now.toInstant());
when(clock.getZone()).thenReturn(ZoneOffset.UTC);
SearchRequest request = new SearchRequest()
.setCreatedInLast("1y2m3w4d");
assertThat(underTest.create(request).createdAfter().date()).isEqualTo(parseDateTime("2012-04-30T07:35:00+0100"));
assertThat(underTest.create(request).createdAfter().inclusive()).isTrue();
}
|
public static StanzaError parseError(XmlPullParser parser) throws XmlPullParserException, IOException, SmackParsingException {
return parseError(parser, null);
}
|
@Test
public void ensureNoNullLangInParsedDescriptiveTexts() throws Exception {
final String text = "Dummy descriptive text";
final String errorXml = XMLBuilder
.create(StanzaError.ERROR).a("type", "cancel").up()
.element("internal-server-error", StanzaError.ERROR_CONDITION_AND_TEXT_NAMESPACE).up()
.element("text", StanzaError.ERROR_CONDITION_AND_TEXT_NAMESPACE).t(text).up()
.asString();
XmlPullParser parser = TestUtils.getParser(errorXml);
StanzaError error = PacketParserUtils.parseError(parser);
assertEquals(text, error.getDescriptiveText());
}
|
public FEELFnResult<String> invoke(@ParameterName("string") String string, @ParameterName("start position") Number start) {
return invoke(string, start, null);
}
|
@Test
void invokeNull3ParamsMethod() {
FunctionTestUtil.assertResultError(substringFunction.invoke(null, null, null), InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(substringFunction.invoke("test", null, null), InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(substringFunction.invoke("test", null, 2), InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(substringFunction.invoke(null, 0, null), InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(substringFunction.invoke(null, null, 2), InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(substringFunction.invoke(null, 0, 2), InvalidParametersEvent.class);
}
|
@Deprecated
@Restricted(DoNotUse.class)
public static String resolve(ConfigurationContext context, String toInterpolate) {
return context.getSecretSourceResolver().resolve(toInterpolate);
}
|
@Test
public void resolve_multipleEntriesWithDefaultValueAndEnvDefined() {
environment.set("FOO", "hello");
environment.set("BAR", "world");
assertThat(resolve("${FOO:-default}:${BAR:-default}"), equalTo("hello:world"));
}
|
@VisibleForTesting
static void startSqlGateway(PrintStream stream, String[] args) {
SqlGatewayOptions cliOptions = SqlGatewayOptionsParser.parseSqlGatewayOptions(args);
if (cliOptions.isPrintHelp()) {
SqlGatewayOptionsParser.printHelpSqlGateway(stream);
return;
}
// startup checks and logging
EnvironmentInformation.logEnvironmentInfo(LOG, "SqlGateway", args);
SignalHandler.register(LOG);
JvmShutdownSafeguard.installAsShutdownHook(LOG);
DefaultContext defaultContext =
DefaultContext.load(
ConfigurationUtils.createConfiguration(cliOptions.getDynamicConfigs()),
Collections.emptyList(),
true);
SqlGateway gateway =
new SqlGateway(
defaultContext.getFlinkConfig(), SessionManager.create(defaultContext));
try {
Runtime.getRuntime().addShutdownHook(new ShutdownThread(gateway));
gateway.start();
gateway.waitUntilStop();
} catch (Throwable t) {
// User uses ctrl + c to cancel the Gateway manually
if (t instanceof InterruptedException) {
LOG.info("Caught " + t.getClass().getSimpleName() + ". Shutting down.");
return;
}
// make space in terminal
stream.println();
stream.println();
if (t instanceof SqlGatewayException) {
// Exception that the gateway can not handle.
throw (SqlGatewayException) t;
} else {
LOG.error(
"SqlGateway must stop. Unexpected exception. This is a bug. Please consider filing an issue.",
t);
throw new SqlGatewayException(
"Unexpected exception. This is a bug. Please consider filing an issue.", t);
}
} finally {
gateway.stop();
}
}
|
@Test
void testFailedToStartSqlGateway() {
try (PrintStream stream = new PrintStream(output)) {
assertThatThrownBy(() -> SqlGateway.startSqlGateway(stream, new String[0]))
.doesNotHaveToString(
"Unexpected exception. This is a bug. Please consider filing an issue.");
}
}
|
@Override
public KTable<Windowed<K>, V> aggregate(final Initializer<V> initializer) {
return aggregate(initializer, Materialized.with(null, null));
}
|
@Test
public void shouldNotHaveNullInitializerTwoOptionNamedOnAggregate() {
assertThrows(NullPointerException.class, () -> windowedCogroupedStream.aggregate(null, Named.as("test")));
}
|
@Override
public Predicate accept(Visitor visitor, IndexRegistry indexes) {
Predicate[] result = VisitorUtils.acceptVisitor(predicates, visitor, indexes);
if (result != predicates) {
//inner predicates were modified by a visitor
AndPredicate newPredicate = new AndPredicate(result);
return visitor.visit(newPredicate, indexes);
}
return visitor.visit(this, indexes);
}
|
@Test
public void accept_whenInnerPredicateChangedOnAccept_thenReturnAndNewAndPredicate() {
Visitor mockVisitor = createPassthroughVisitor();
IndexRegistry mockIndexes = mock(IndexRegistry.class);
Predicate transformed = mock(Predicate.class);
Predicate innerPredicate = createMockVisitablePredicate(transformed);
Predicate[] innerPredicates = new Predicate[1];
innerPredicates[0] = innerPredicate;
AndPredicate andPredicate = new AndPredicate(innerPredicates);
AndPredicate result = (AndPredicate) andPredicate.accept(mockVisitor, mockIndexes);
assertThat(result).isNotSameAs(andPredicate);
Predicate[] newInnerPredicates = result.predicates;
assertThat(newInnerPredicates).hasSize(1);
assertThat(newInnerPredicates[0]).isEqualTo(transformed);
}
|
public static String getAttributesXml( Map<String, Map<String, String>> attributesMap ) {
return getAttributesXml( attributesMap, XML_TAG );
}
|
@Test
public void testGetAttributesXml_CustomTag() {
try ( MockedStatic<AttributesUtil> attributesUtilMockedStatic = mockStatic( AttributesUtil.class ) ) {
attributesUtilMockedStatic.when( () -> AttributesUtil.getAttributesXml( anyMap(), anyString() ) )
.thenCallRealMethod();
Map<String, String> attributesGroup = new HashMap<>();
Map<String, Map<String, String>> attributesMap = new HashMap<>();
attributesGroup.put( A_KEY, A_VALUE );
attributesMap.put( A_GROUP, attributesGroup );
String attributesXml = AttributesUtil.getAttributesXml( attributesMap, CUSTOM_TAG );
assertNotNull( attributesXml );
// The custom tag was used
assertTrue( attributesXml.contains( CUSTOM_TAG ) );
// The group is present
assertTrue( attributesXml.contains( A_GROUP ) );
// Both Key and Value are present
assertTrue( attributesXml.contains( A_KEY ) );
assertTrue( attributesXml.contains( A_VALUE ) );
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.