focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public String getName() {
return name;
} | @Test
public void testEqualsAndHashCode() {
assumeDifferentHashCodes();
EqualsVerifier.forClass(MultiMapConfig.class)
.suppress(Warning.NONFINAL_FIELDS, Warning.NULL_FIELDS)
.withPrefabValues(MultiMapConfigReadOnly.class,
new MultiMapConfigReadOnly(new MultiMapConfig("red")),
new MultiMapConfigReadOnly(new MultiMapConfig("black")))
.withPrefabValues(MergePolicyConfig.class,
new MergePolicyConfig(PutIfAbsentMergePolicy.class.getName(), 100),
new MergePolicyConfig(DiscardMergePolicy.class.getName(), 200))
.verify();
} |
@Override
protected String defaultWarehouseLocation(TableIdentifier tableIdentifier) {
validateTableIdentifier(tableIdentifier);
GetItemResponse response =
dynamo.getItem(
GetItemRequest.builder()
.tableName(awsProperties.dynamoDbTableName())
.consistentRead(true)
.key(namespacePrimaryKey(tableIdentifier.namespace()))
.build());
if (!response.hasItem()) {
throw new NoSuchNamespaceException(
"Cannot find default warehouse location: namespace %s does not exist",
tableIdentifier.namespace());
}
String defaultLocationCol = toPropertyCol(PROPERTY_DEFAULT_LOCATION);
if (response.item().containsKey(defaultLocationCol)) {
return String.format(
"%s/%s", response.item().get(defaultLocationCol).s(), tableIdentifier.name());
} else {
return String.format(
"%s/%s.db/%s", warehousePath, tableIdentifier.namespace(), tableIdentifier.name());
}
} | @Test
public void testDefaultWarehouseLocationNoDbUri() {
Mockito.doReturn(GetItemResponse.builder().item(Maps.newHashMap()).build())
.when(dynamo)
.getItem(any(GetItemRequest.class));
String warehousePath = WAREHOUSE_PATH + "/db.db/table";
String defaultWarehouseLocation = dynamoCatalog.defaultWarehouseLocation(TABLE_IDENTIFIER);
assertThat(defaultWarehouseLocation).isEqualTo(warehousePath);
} |
public static UriTemplate create(String template, Charset charset) {
return new UriTemplate(template, true, charset);
} | @Test
void simpleTemplateMultipleExpressions() {
String template = "https://www.example.com/{foo}/{bar}/details";
UriTemplate uriTemplate = UriTemplate.create(template, Util.UTF_8);
/* verify that the template has 2 variables names foo and bar */
List<String> uriTemplateVariables = uriTemplate.getVariables();
assertThat(uriTemplateVariables).contains("foo", "bar").hasSize(2);
/* expand the template */
Map<String, Object> variables = new LinkedHashMap<>();
variables.put("foo", "first");
variables.put("bar", "second");
String expandedTemplate = uriTemplate.expand(variables);
assertThat(expandedTemplate)
.isEqualToIgnoringCase("https://www.example.com/first/second/details");
assertThat(URI.create(expandedTemplate)).isNotNull();
} |
@VisibleForTesting
public static String getUserAgentString(String id, CommonUtils.ProcessType processType,
List<String> additionalInfo) {
List<String> info = new ArrayList<>();
info.add(id);
addUserAgentEnvironments(info);
info.addAll(additionalInfo);
info.add(String.format("processType:%s", processType.toString()));
return String.format("Alluxio/%s (%s)", ProjectConstants.VERSION,
Joiner.on(USER_AGENT_SEPARATOR + " ").skipNulls().join(info));
} | @Test
public void userAgent() {
String userAgentString = UpdateCheckUtils.getUserAgentString("cluster1",
CommonUtils.ProcessType.MASTER, new ArrayList<>());
Pattern pattern = Pattern.compile(
String.format("Alluxio\\/%s \\(cluster1(?:.+)[^;]\\)", ProjectConstants.VERSION));
Matcher matcher = pattern.matcher(userAgentString);
Assert.assertTrue(matcher.matches());
} |
public CounterRequest getCounterRequest(CounterRequestContext context) {
return getCounterRequestByName(context.getRequestName(), false);
} | @Test
public void testGetCounterRequest() {
counter.unbindContext();
final String requestName = "get counter request";
counter.bindContext(requestName, "my context", null, -1, -1);
final CounterRequest counterRequest = counter
.getCounterRequest(counter.getOrderedRootCurrentContexts().get(0));
assertEquals("request name", requestName, counterRequest.getName());
} |
public static KubernetesJobManagerSpecification buildKubernetesJobManagerSpecification(
FlinkPod podTemplate, KubernetesJobManagerParameters kubernetesJobManagerParameters)
throws IOException {
FlinkPod flinkPod = Preconditions.checkNotNull(podTemplate).copy();
List<HasMetadata> accompanyingResources = new ArrayList<>();
final List<KubernetesStepDecorator> stepDecorators =
new ArrayList<>(
Arrays.asList(
new InitJobManagerDecorator(kubernetesJobManagerParameters),
new EnvSecretsDecorator(kubernetesJobManagerParameters),
new MountSecretsDecorator(kubernetesJobManagerParameters),
new CmdJobManagerDecorator(kubernetesJobManagerParameters),
new InternalServiceDecorator(kubernetesJobManagerParameters),
new ExternalServiceDecorator(kubernetesJobManagerParameters)));
Configuration configuration = kubernetesJobManagerParameters.getFlinkConfiguration();
if (configuration.get(KUBERNETES_HADOOP_CONF_MOUNT_DECORATOR_ENABLED)) {
stepDecorators.add(new HadoopConfMountDecorator(kubernetesJobManagerParameters));
}
if (configuration.get(KUBERNETES_KERBEROS_MOUNT_DECORATOR_ENABLED)) {
stepDecorators.add(new KerberosMountDecorator(kubernetesJobManagerParameters));
}
stepDecorators.addAll(
Arrays.asList(
new FlinkConfMountDecorator(kubernetesJobManagerParameters),
new PodTemplateMountDecorator(kubernetesJobManagerParameters)));
for (KubernetesStepDecorator stepDecorator : stepDecorators) {
flinkPod = stepDecorator.decorateFlinkPod(flinkPod);
accompanyingResources.addAll(stepDecorator.buildAccompanyingKubernetesResources());
}
final Deployment deployment =
createJobManagerDeployment(flinkPod, kubernetesJobManagerParameters);
return new KubernetesJobManagerSpecification(deployment, accompanyingResources);
} | @Test
void testServices() throws IOException {
kubernetesJobManagerSpecification =
KubernetesJobManagerFactory.buildKubernetesJobManagerSpecification(
flinkPod, kubernetesJobManagerParameters);
final List<Service> resultServices =
this.kubernetesJobManagerSpecification.getAccompanyingResources().stream()
.filter(x -> x instanceof Service)
.map(x -> (Service) x)
.collect(Collectors.toList());
assertThat(resultServices).hasSize(2);
final List<Service> internalServiceCandidates =
resultServices.stream()
.filter(
x ->
x.getMetadata()
.getName()
.equals(
InternalServiceDecorator
.getInternalServiceName(
CLUSTER_ID)))
.collect(Collectors.toList());
assertThat(internalServiceCandidates).hasSize(1);
final List<Service> restServiceCandidates =
resultServices.stream()
.filter(
x ->
x.getMetadata()
.getName()
.equals(
ExternalServiceDecorator
.getExternalServiceName(
CLUSTER_ID)))
.collect(Collectors.toList());
assertThat(restServiceCandidates).hasSize(1);
final Service resultInternalService = internalServiceCandidates.get(0);
assertThat(resultInternalService.getMetadata().getLabels()).hasSize(2);
assertThat(resultInternalService.getSpec().getType()).isNull();
assertThat(resultInternalService.getSpec().getClusterIP())
.isEqualTo(HeadlessClusterIPService.HEADLESS_CLUSTER_IP);
assertThat(resultInternalService.getSpec().getPorts()).hasSize(2);
assertThat(resultInternalService.getSpec().getSelector()).hasSize(3);
final Service resultRestService = restServiceCandidates.get(0);
assertThat(resultRestService.getMetadata().getLabels()).hasSize(2);
assertThat(resultRestService.getSpec().getType()).isEqualTo("ClusterIP");
assertThat(resultRestService.getSpec().getPorts()).hasSize(1);
assertThat(resultRestService.getSpec().getSelector()).hasSize(3);
} |
@Override
public void resetConfigStats(RedisClusterNode node) {
RedisClient entry = getEntry(node);
RFuture<Void> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CONFIG_RESETSTAT);
syncFuture(f);
} | @Test
public void testResetConfigStats() {
RedisClusterNode master = getFirstMaster();
connection.resetConfigStats(master);
} |
@Override
public void readLine(String line) {
if (line.startsWith("%") || line.isEmpty()) {
return;
}
if(line.startsWith("descr") && this.organization == null) {
this.organization = lineValue(line);
}
// Some responses have an org-name. Let this always overwrite the organization parsed from "descr" field.
if(line.startsWith("org-name:")) {
this.organization = lineValue(line);
}
if(line.startsWith("country:") && this.countryCode == null) {
this.countryCode = lineValue(line);
}
} | @Test
public void testRunDirectMatchWithShortResultFormat() throws Exception {
RIPENCCResponseParser parser = new RIPENCCResponseParser();
for (String line : MATCH_WITH_SHORT_RESULT.split("\n")) {
parser.readLine(line);
}
assertEquals("US", parser.getCountryCode());
assertEquals("Facebook", parser.getOrganization());
} |
@Override
public CompletableFuture<JobMasterGateway> getJobMasterGatewayFuture() {
return jobMasterGatewayFuture;
} | @Test
void testJobMasterGatewayGetsForwarded() {
final CompletableFuture<JobMasterService> jobMasterServiceFuture =
new CompletableFuture<>();
DefaultJobMasterServiceProcess serviceProcess = createTestInstance(jobMasterServiceFuture);
TestingJobMasterGateway testingGateway = new TestingJobMasterGatewayBuilder().build();
TestingJobMasterService testingJobMasterService =
new TestingJobMasterService("localhost", null, testingGateway);
jobMasterServiceFuture.complete(testingJobMasterService);
assertThat(serviceProcess.getJobMasterGatewayFuture()).isCompletedWithValue(testingGateway);
} |
@Bean
public ShenyuPlugin globalPlugin(final ShenyuContextBuilder shenyuContextBuilder) {
return new GlobalPlugin(shenyuContextBuilder);
} | @Test
public void testGlobalPlugin() {
applicationContextRunner.run(context -> {
ShenyuPlugin plugin = context.getBean("globalPlugin", ShenyuPlugin.class);
assertNotNull(plugin);
}
);
} |
public ConnectionDetails getConnectionDetails( IMetaStore metaStore, String key, String name ) {
ConnectionProvider<? extends ConnectionDetails> connectionProvider = getConnectionProvider( key );
if ( connectionProvider != null ) {
Class<? extends ConnectionDetails> clazz = connectionProvider.getClassType();
return loadElement( getMetaStoreFactory( metaStore, clazz ), name );
}
return null;
} | @Test
public void testDefaultPropertiesNotNull() {
addOne();
TestConnectionWithBucketsDetails connectionDetails =
(TestConnectionWithBucketsDetails) connectionManager.getConnectionDetails( CONNECTION_NAME );
assertNotNull( connectionDetails );
assertNotNull( connectionDetails.getProperties() );
assertNotNull( connectionDetails.getProperties().get( "baRoles" ) );
} |
public static void validateMaterializedViewPartitionColumns(
SemiTransactionalHiveMetastore metastore,
MetastoreContext metastoreContext,
Table viewTable,
MaterializedViewDefinition viewDefinition)
{
SchemaTableName viewName = new SchemaTableName(viewTable.getDatabaseName(), viewTable.getTableName());
Map<String, Map<SchemaTableName, String>> viewToBaseDirectColumnMap = viewDefinition.getDirectColumnMappingsAsMap();
if (viewToBaseDirectColumnMap.isEmpty()) {
throw new PrestoException(
NOT_SUPPORTED,
format("Materialized view %s must have at least one column directly defined by a base table column.", viewName));
}
List<Column> viewPartitions = viewTable.getPartitionColumns();
if (viewPartitions.isEmpty()) {
throw new PrestoException(NOT_SUPPORTED, "Unpartitioned materialized view is not supported.");
}
List<Table> baseTables = viewDefinition.getBaseTables().stream()
.map(baseTableName -> metastore.getTable(metastoreContext, baseTableName.getSchemaName(), baseTableName.getTableName())
.orElseThrow(() -> new TableNotFoundException(baseTableName)))
.collect(toImmutableList());
Map<Table, List<Column>> baseTablePartitions = baseTables.stream()
.collect(toImmutableMap(
table -> table,
Table::getPartitionColumns));
for (Table baseTable : baseTablePartitions.keySet()) {
SchemaTableName schemaBaseTable = new SchemaTableName(baseTable.getDatabaseName(), baseTable.getTableName());
if (!isCommonPartitionFound(schemaBaseTable, baseTablePartitions.get(baseTable), viewPartitions, viewToBaseDirectColumnMap)) {
throw new PrestoException(
NOT_SUPPORTED,
format("Materialized view %s must have at least one partition column that exists in %s as well", viewName, baseTable.getTableName()));
}
if (viewDefinition.getBaseTablesOnOuterJoinSide().contains(schemaBaseTable) && viewToBaseTableOnOuterJoinSideIndirectMappedPartitions(viewDefinition, baseTable).get().isEmpty()) {
throw new PrestoException(
NOT_SUPPORTED,
format("Outer join conditions in Materialized view %s must have at least one common partition equality constraint", viewName));
}
}
} | @Test
public void testValidateMaterializedViewPartitionColumnsTwoColumnMatchDifferentTable()
{
TestingSemiTransactionalHiveMetastore testMetastore = TestingSemiTransactionalHiveMetastore.create();
Column dsColumn = new Column("ds", HIVE_STRING, Optional.empty(), Optional.empty());
Column shipmodeColumn = new Column("shipmode", HIVE_STRING, Optional.empty(), Optional.empty());
SchemaTableName tableName1 = new SchemaTableName(SCHEMA_NAME, TABLE_NAME);
testMetastore.addTable(SCHEMA_NAME, TABLE_NAME, getTable(ImmutableList.of(dsColumn)), ImmutableList.of());
String table2 = "table2";
SchemaTableName tableName2 = new SchemaTableName(SCHEMA_NAME, table2);
testMetastore.addTable(SCHEMA_NAME, table2, getTable(table2, ImmutableList.of(shipmodeColumn)), ImmutableList.of());
Map<String, Map<SchemaTableName, String>> originalColumnMapping = ImmutableMap.of(
dsColumn.getName(), ImmutableMap.of(tableName1, dsColumn.getName()),
shipmodeColumn.getName(), ImmutableMap.of(tableName2, shipmodeColumn.getName()));
List<Column> viewPartitionColumns = ImmutableList.of(dsColumn, shipmodeColumn);
validateMaterializedViewPartitionColumns(testMetastore, metastoreContext, getTable(viewPartitionColumns), getConnectorMaterializedViewDefinition(ImmutableList.of(tableName1, tableName2), originalColumnMapping));
} |
@Override
protected List<String> getArgs() {
TableMetadata tableMetadata = getMetadata();
RetentionConfig config = tablesClient.getTableRetention(tableMetadata).get();
String columnName = config.getColumnName();
List<String> jobArgs =
Stream.of(
"--tableName",
tableMetadata.fqtn(),
"--columnName",
columnName,
"--granularity",
config.getGranularity().getValue(),
"--count",
Integer.toString(config.getCount()))
.collect(Collectors.toList());
if (!StringUtils.isBlank(config.getColumnPattern())) {
jobArgs.add("--columnPattern");
jobArgs.add(config.getColumnPattern());
}
return jobArgs;
} | @Test
void testRetentionJobArgsForTableWithPattern() {
TableRetentionTask tableRetentionTask =
new TableRetentionTask(jobsClient, tablesClient, tableMetadata);
String columnPattern = "yyyy-MM-DD";
String columnName = "testColumnName";
int count = 1;
Retention.GranularityEnum retentionGranularity = Retention.GranularityEnum.DAY;
RetentionConfig retentionConfigMock = Mockito.mock(RetentionConfig.class);
Mockito.when(retentionConfigMock.getColumnPattern()).thenReturn(columnPattern);
Mockito.when(retentionConfigMock.getColumnName()).thenReturn(columnName);
Mockito.when(retentionConfigMock.getGranularity()).thenReturn(retentionGranularity);
Mockito.when(retentionConfigMock.getCount()).thenReturn(count);
Mockito.when(tablesClient.getTableRetention(tableMetadata))
.thenReturn(Optional.of(retentionConfigMock));
List<String> expectedArgs =
Stream.of(
"--tableName",
tableMetadata.fqtn(),
"--columnName",
columnName,
"--granularity",
retentionGranularity.getValue(),
"--count",
String.valueOf(count),
"--columnPattern",
columnPattern)
.collect(Collectors.toList());
Assertions.assertEquals(expectedArgs, tableRetentionTask.getArgs());
} |
@Override
public CompletableFuture<RegistrationResponse> registerTaskManager(
final JobID jobId,
final TaskManagerRegistrationInformation taskManagerRegistrationInformation,
final Time timeout) {
if (!jobGraph.getJobID().equals(jobId)) {
log.debug(
"Rejecting TaskManager registration attempt because of wrong job id {}.",
jobId);
return CompletableFuture.completedFuture(
new JMTMRegistrationRejection(
String.format(
"The JobManager is not responsible for job %s. Maybe the TaskManager used outdated connection information.",
jobId)));
}
final TaskManagerLocation taskManagerLocation;
try {
taskManagerLocation =
resolveTaskManagerLocation(
taskManagerRegistrationInformation.getUnresolvedTaskManagerLocation());
} catch (FlinkException exception) {
log.error("Could not accept TaskManager registration.", exception);
return CompletableFuture.completedFuture(new RegistrationResponse.Failure(exception));
}
final ResourceID taskManagerId = taskManagerLocation.getResourceID();
final UUID sessionId = taskManagerRegistrationInformation.getTaskManagerSession();
final TaskManagerRegistration taskManagerRegistration =
registeredTaskManagers.get(taskManagerId);
if (taskManagerRegistration != null) {
if (taskManagerRegistration.getSessionId().equals(sessionId)) {
log.debug(
"Ignoring registration attempt of TaskManager {} with the same session id {}.",
taskManagerId,
sessionId);
final RegistrationResponse response = new JMTMRegistrationSuccess(resourceId);
return CompletableFuture.completedFuture(response);
} else {
disconnectTaskManager(
taskManagerId,
new FlinkException(
String.format(
"A registered TaskManager %s re-registered with a new session id. This indicates a restart of the TaskManager. Closing the old connection.",
taskManagerId)));
}
}
CompletableFuture<RegistrationResponse> registrationResponseFuture =
getRpcService()
.connect(
taskManagerRegistrationInformation.getTaskManagerRpcAddress(),
TaskExecutorGateway.class)
.handleAsync(
(TaskExecutorGateway taskExecutorGateway, Throwable throwable) -> {
if (throwable != null) {
return new RegistrationResponse.Failure(throwable);
}
slotPoolService.registerTaskManager(taskManagerId);
registeredTaskManagers.put(
taskManagerId,
TaskManagerRegistration.create(
taskManagerLocation,
taskExecutorGateway,
sessionId));
// monitor the task manager as heartbeat target
taskManagerHeartbeatManager.monitorTarget(
taskManagerId,
new TaskExecutorHeartbeatSender(taskExecutorGateway));
return new JMTMRegistrationSuccess(resourceId);
},
getMainThreadExecutor());
if (fetchAndRetainPartitions) {
registrationResponseFuture.whenComplete(
(ignored, throwable) ->
fetchAndRetainPartitionWithMetricsOnTaskManager(taskManagerId));
}
return registrationResponseFuture;
} | @Test
void testJobMasterDisconnectsOldTaskExecutorIfNewSessionIsSeen() throws Exception {
try (final JobMaster jobMaster =
new JobMasterBuilder(jobGraph, rpcService).createJobMaster()) {
final CompletableFuture<Void> firstTaskExecutorDisconnectedFuture =
new CompletableFuture<>();
final TestingTaskExecutorGateway firstTaskExecutorGateway =
new TestingTaskExecutorGatewayBuilder()
.setAddress("firstTaskExecutor")
.setDisconnectJobManagerConsumer(
(jobID, throwable) ->
firstTaskExecutorDisconnectedFuture.complete(null))
.createTestingTaskExecutorGateway();
final TestingTaskExecutorGateway secondTaskExecutorGateway =
new TestingTaskExecutorGatewayBuilder()
.setAddress("secondTaskExecutor")
.createTestingTaskExecutorGateway();
rpcService.registerGateway(
firstTaskExecutorGateway.getAddress(), firstTaskExecutorGateway);
rpcService.registerGateway(
secondTaskExecutorGateway.getAddress(), secondTaskExecutorGateway);
jobMaster.start();
final LocalUnresolvedTaskManagerLocation taskManagerLocation =
new LocalUnresolvedTaskManagerLocation();
final UUID firstTaskManagerSessionId = UUID.randomUUID();
final CompletableFuture<RegistrationResponse> firstRegistrationResponse =
jobMaster.registerTaskManager(
jobGraph.getJobID(),
TaskManagerRegistrationInformation.create(
firstTaskExecutorGateway.getAddress(),
taskManagerLocation,
firstTaskManagerSessionId),
testingTimeout);
assertThat(firstRegistrationResponse.get()).isInstanceOf(JMTMRegistrationSuccess.class);
final UUID secondTaskManagerSessionId = UUID.randomUUID();
final CompletableFuture<RegistrationResponse> secondRegistrationResponse =
jobMaster.registerTaskManager(
jobGraph.getJobID(),
TaskManagerRegistrationInformation.create(
secondTaskExecutorGateway.getAddress(),
taskManagerLocation,
secondTaskManagerSessionId),
testingTimeout);
assertThat(secondRegistrationResponse.get())
.isInstanceOf(JMTMRegistrationSuccess.class);
// the first TaskExecutor should be disconnected
firstTaskExecutorDisconnectedFuture.get();
}
} |
public static boolean isInventoryFinished(final int jobShardingCount, final Collection<TransmissionJobItemProgress> jobItemProgresses) {
return isAllProgressesFilled(jobShardingCount, jobItemProgresses) && isAllInventoryTasksCompleted(jobItemProgresses);
} | @Test
void assertIsInventoryFinishedWhenInventoryTaskProgressHasEmptyMap() {
JobItemInventoryTasksProgress jobItemInventoryTasksProgress = new JobItemInventoryTasksProgress(Collections.emptyMap());
TransmissionJobItemProgress transmissionJobItemProgress = new TransmissionJobItemProgress();
transmissionJobItemProgress.setInventory(jobItemInventoryTasksProgress);
assertFalse(PipelineJobProgressDetector.isInventoryFinished(1, Collections.singleton(transmissionJobItemProgress)));
} |
void fetchPluginSettingsMetaData(GoPluginDescriptor pluginDescriptor) {
String pluginId = pluginDescriptor.id();
List<ExtensionSettingsInfo> allMetadata = findSettingsAndViewOfAllExtensionsIn(pluginId);
List<ExtensionSettingsInfo> validMetadata = allSettingsAndViewPairsWhichAreValid(allMetadata);
if (validMetadata.isEmpty()) {
LOGGER.warn("Failed to fetch plugin settings metadata for plugin {}. Maybe the plugin does not implement plugin settings and view?", pluginId);
LOGGER.warn("Plugin: {} - Metadata load info: {}", pluginId, allMetadata);
LOGGER.warn("Not all plugins are required to implement the request above. This error may be safe to ignore.");
return;
}
if (validMetadata.size() > 1) {
throw new RuntimeException(String.format("Plugin with ID: %s has more than one extension which supports plugin settings. " +
"Only one extension should support it and respond to %s and %s.", pluginId, REQUEST_PLUGIN_SETTINGS_CONFIGURATION, REQUEST_PLUGIN_SETTINGS_VIEW));
}
ExtensionSettingsInfo extensionSettingsInfo = validMetadata.get(0);
metadataStore.addMetadataFor(pluginId, extensionSettingsInfo.extensionName, extensionSettingsInfo.configuration, extensionSettingsInfo.viewTemplate);
} | @Test
public void shouldNotFetchPluginSettingsMetadataForTaskPlugin() {
PluginSettingsConfiguration configuration = new PluginSettingsConfiguration();
configuration.add(new PluginSettingsProperty("k1").with(Property.REQUIRED, true).with(Property.SECURE, false));
GoPluginDescriptor pluginDescriptor = GoPluginDescriptor.builder().id(UUID.randomUUID().toString()).isBundledPlugin(true).build();
setupSettingsResponses(taskExtension, pluginDescriptor.id(), configuration, "template");
metadataLoader.fetchPluginSettingsMetaData(pluginDescriptor);
verify(taskExtension, never()).getPluginSettingsConfiguration(pluginDescriptor.id());
verify(taskExtension, never()).getPluginSettingsView(pluginDescriptor.id());
assertThat(PluginSettingsMetadataStore.getInstance().configuration(pluginDescriptor.id())).isNull();
} |
public String getPath() {
return null == parentNode ? String.join("/", type) : String.join("/", parentNode, type);
} | @Test
void assertPathWithNullParentNode() {
UniqueRuleItemNodePath uniqueRuleItemNodePath = new UniqueRuleItemNodePath(new RuleRootNodePath("foo"), "test_path");
assertThat(uniqueRuleItemNodePath.getPath(), is("test_path"));
} |
@Override
public MapTileArea computeFromSource(final MapTileArea pSource, final MapTileArea pReuse) {
final MapTileArea out = pReuse != null ? pReuse : new MapTileArea();
if (pSource.size() == 0) {
out.reset();
return out;
}
final int left = pSource.getLeft() - mBorder;
final int top = pSource.getTop() - mBorder;
final int additional = 2 * mBorder - 1;
out.set(pSource.getZoom(),
left, top,
left + pSource.getWidth() + additional, top + pSource.getHeight() + additional);
return out;
} | @Test
public void testTwoContiguousPointsModulo() {
final MapTileArea source = new MapTileArea();
final MapTileArea dest = new MapTileArea();
final Set<Long> set = new HashSet<>();
final int border = 2;
final MapTileAreaBorderComputer computer = new MapTileAreaBorderComputer(border);
final int zoom = 5;
final int sourceX = 1;
final int sourceY = 31;
source.set(zoom, sourceX, sourceY, sourceX + 1, sourceY);
add(set, zoom, sourceX, sourceY, border);
add(set, zoom, sourceX + 1, sourceY, border);
computer.computeFromSource(source, dest);
check(dest, set, zoom);
} |
@Override
public SelBoolean assignOps(SelOp op, SelType rhs) {
if (op == SelOp.ASSIGN) {
SelTypeUtil.checkTypeMatch(this.type(), rhs.type());
this.val = ((SelBoolean) rhs).val; // direct assignment
return this;
}
throw new UnsupportedOperationException(
"boolean/Boolean DO NOT support assignment operation " + op);
} | @Test
public void testAssignOps() {
SelBoolean res = one.assignOps(SelOp.ASSIGN, another);
another.assignOps(SelOp.ASSIGN, SelBoolean.of(true));
assertEquals("BOOLEAN: false", one.type() + ": " + one);
assertEquals("BOOLEAN: true", another.type() + ": " + another);
} |
public static String hashpw(String password, String salt) throws IllegalArgumentException {
BCrypt B;
String real_salt;
byte passwordb[], saltb[], hashed[];
char minor = (char) 0;
int rounds, off = 0;
StringBuilder rs = new StringBuilder();
if (salt == null) {
throw new IllegalArgumentException("salt cannot be null");
}
int saltLength = salt.length();
if (saltLength < 28) {
throw new IllegalArgumentException("Invalid salt");
}
if (salt.charAt(0) != '$' || salt.charAt(1) != '2') {
throw new IllegalArgumentException("Invalid salt version");
}
if (salt.charAt(2) == '$') {
off = 3;
} else {
minor = salt.charAt(2);
if (minor != 'a' || salt.charAt(3) != '$') {
throw new IllegalArgumentException("Invalid salt revision");
}
off = 4;
}
if (saltLength - off < 25) {
throw new IllegalArgumentException("Invalid salt");
}
// Extract number of rounds
if (salt.charAt(off + 2) > '$') {
throw new IllegalArgumentException("Missing salt rounds");
}
rounds = Integer.parseInt(salt.substring(off, off + 2));
real_salt = salt.substring(off + 3, off + 25);
try {
passwordb = (password + (minor >= 'a' ? "\000" : "")).getBytes("UTF-8");
} catch (UnsupportedEncodingException uee) {
throw new AssertionError("UTF-8 is not supported");
}
saltb = decode_base64(real_salt, BCRYPT_SALT_LEN);
B = new BCrypt();
hashed = B.crypt_raw(passwordb, saltb, rounds);
rs.append("$2");
if (minor >= 'a') {
rs.append(minor);
}
rs.append("$");
if (rounds < 10) {
rs.append("0");
}
rs.append(rounds);
rs.append("$");
encode_base64(saltb, saltb.length, rs);
encode_base64(hashed, bf_crypt_ciphertext.length * 4 - 1, rs);
return rs.toString();
} | @Test
public void testHashpwMissingSaltRounds() throws IllegalArgumentException {
thrown.expect(IllegalArgumentException.class);
BCrypt.hashpw("foo", "$2$a10$.....................");
} |
@Override
public <T extends GetWorkBudgetSpender> void distributeBudget(
ImmutableCollection<T> budgetOwners, GetWorkBudget getWorkBudget) {
if (budgetOwners.isEmpty()) {
LOG.debug("Cannot distribute budget to no owners.");
return;
}
if (getWorkBudget.equals(GetWorkBudget.noBudget())) {
LOG.debug("Cannot distribute 0 budget.");
return;
}
Map<T, GetWorkBudget> desiredBudgets = computeDesiredBudgets(budgetOwners, getWorkBudget);
for (Entry<T, GetWorkBudget> streamAndDesiredBudget : desiredBudgets.entrySet()) {
GetWorkBudgetSpender getWorkBudgetSpender = streamAndDesiredBudget.getKey();
GetWorkBudget desired = streamAndDesiredBudget.getValue();
GetWorkBudget remaining = getWorkBudgetSpender.remainingBudget();
if (isBelowFiftyPercentOfTarget(remaining, desired)) {
GetWorkBudget adjustment = desired.subtract(remaining);
getWorkBudgetSpender.adjustBudget(adjustment);
}
}
} | @Test
public void testDistributeBudget_doesNothingWithNoBudget() {
GetWorkBudgetSpender getWorkBudgetSpender =
spy(createGetWorkBudgetOwnerWithRemainingBudgetOf(GetWorkBudget.noBudget()));
createBudgetDistributor(1L)
.distributeBudget(ImmutableList.of(getWorkBudgetSpender), GetWorkBudget.noBudget());
verifyNoInteractions(getWorkBudgetSpender);
} |
@VisibleForTesting
void validateParentMenu(Long parentId, Long childId) {
if (parentId == null || ID_ROOT.equals(parentId)) {
return;
}
// 不能设置自己为父菜单
if (parentId.equals(childId)) {
throw exception(MENU_PARENT_ERROR);
}
MenuDO menu = menuMapper.selectById(parentId);
// 父菜单不存在
if (menu == null) {
throw exception(MENU_PARENT_NOT_EXISTS);
}
// 父菜单必须是目录或者菜单类型
if (!MenuTypeEnum.DIR.getType().equals(menu.getType())
&& !MenuTypeEnum.MENU.getType().equals(menu.getType())) {
throw exception(MENU_PARENT_NOT_DIR_OR_MENU);
}
} | @Test
public void testValidateParentMenu_success() {
// mock 数据
MenuDO menuDO = buildMenuDO(MenuTypeEnum.MENU, "parent", 0L);
menuMapper.insert(menuDO);
// 准备参数
Long parentId = menuDO.getId();
// 调用,无需断言
menuService.validateParentMenu(parentId, null);
} |
public static Type beamTypeToSpannerType(Schema.FieldType beamType) {
switch (beamType.getTypeName()) {
case ARRAY:
case ITERABLE:
Schema.@Nullable FieldType elementType = beamType.getCollectionElementType();
if (elementType == null) {
throw new NullPointerException("Null element type");
} else {
return Type.array(simpleBeamTypeToSpannerType(elementType));
}
default:
return simpleBeamTypeToSpannerType(beamType);
}
} | @Test
public void testBeamTypeToSpannerTypeTranslation() {
assertEquals(Type.int64(), beamTypeToSpannerType(Schema.FieldType.INT64));
assertEquals(Type.int64(), beamTypeToSpannerType(Schema.FieldType.INT32));
assertEquals(Type.int64(), beamTypeToSpannerType(Schema.FieldType.INT16));
assertEquals(Type.int64(), beamTypeToSpannerType(Schema.FieldType.BYTE));
assertEquals(Type.bytes(), beamTypeToSpannerType(Schema.FieldType.BYTES));
assertEquals(Type.string(), beamTypeToSpannerType(Schema.FieldType.STRING));
assertEquals(Type.float32(), beamTypeToSpannerType(Schema.FieldType.FLOAT));
assertEquals(Type.float64(), beamTypeToSpannerType(Schema.FieldType.DOUBLE));
assertEquals(Type.bool(), beamTypeToSpannerType(Schema.FieldType.BOOLEAN));
assertEquals(Type.numeric(), beamTypeToSpannerType(Schema.FieldType.DECIMAL));
assertEquals(
Type.struct(ImmutableList.of(Type.StructField.of("int64", Type.int64()))),
beamTypeToSpannerType(Schema.FieldType.row(INT64_SCHEMA)));
assertEquals(
Type.array(Type.int64()),
beamTypeToSpannerType(Schema.FieldType.array(Schema.FieldType.INT64)));
} |
public Plan validateReservationUpdateRequest(
ReservationSystem reservationSystem, ReservationUpdateRequest request)
throws YarnException {
ReservationId reservationId = request.getReservationId();
Plan plan = validateReservation(reservationSystem, reservationId,
AuditConstants.UPDATE_RESERVATION_REQUEST);
validateReservationDefinition(reservationId,
request.getReservationDefinition(), plan,
AuditConstants.UPDATE_RESERVATION_REQUEST);
return plan;
} | @Test
public void testUpdateReservationEmptyRR() {
ReservationUpdateRequest request =
createSimpleReservationUpdateRequest(1, 0, 1, 5, 3);
Plan plan = null;
try {
plan = rrValidator.validateReservationUpdateRequest(rSystem, request);
Assert.fail();
} catch (YarnException e) {
Assert.assertNull(plan);
String message = e.getMessage();
Assert.assertTrue(message
.startsWith("No resources have been specified to reserve"));
LOG.info(message);
}
} |
@Override
public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo,
List<String> partNames, boolean areAllPartsFound) throws MetaException {
checkStatisticsList(colStatsWithSourceInfo);
ColumnStatisticsObj statsObj = null;
String colType;
String colName = null;
// check if all the ColumnStatisticsObjs contain stats and all the ndv are
// bitvectors
boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size();
NumDistinctValueEstimator ndvEstimator = null;
boolean areAllNDVEstimatorsMergeable = true;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
if (statsObj == null) {
colName = cso.getColName();
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType,
cso.getStatsData().getSetField());
LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName,
doAllPartitionContainStats);
}
DoubleColumnStatsDataInspector columnStatsData = doubleInspectorFromStats(cso);
// check if we can merge NDV estimators
if (columnStatsData.getNdvEstimator() == null) {
areAllNDVEstimatorsMergeable = false;
break;
} else {
NumDistinctValueEstimator estimator = columnStatsData.getNdvEstimator();
if (ndvEstimator == null) {
ndvEstimator = estimator;
} else {
if (!ndvEstimator.canMerge(estimator)) {
areAllNDVEstimatorsMergeable = false;
break;
}
}
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
LOG.debug("all of the bit vectors can merge for {} is {}", colName, areAllNDVEstimatorsMergeable);
ColumnStatisticsData columnStatisticsData = initColumnStatisticsData();
if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) {
DoubleColumnStatsDataInspector aggregateData = null;
long lowerBound = 0;
long higherBound = 0;
double densityAvgSum = 0.0;
DoubleColumnStatsMerger merger = new DoubleColumnStatsMerger();
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
DoubleColumnStatsDataInspector newData = doubleInspectorFromStats(cso);
lowerBound = Math.max(lowerBound, newData.getNumDVs());
higherBound += newData.getNumDVs();
densityAvgSum += (newData.getHighValue() - newData.getLowValue()) / newData.getNumDVs();
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(merger.mergeLowValue(
merger.getLowValue(aggregateData), merger.getLowValue(newData)));
aggregateData.setHighValue(merger.mergeHighValue(
merger.getHighValue(aggregateData), merger.getHighValue(newData)));
aggregateData.setNumNulls(merger.mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls()));
aggregateData.setNumDVs(merger.mergeNumDVs(aggregateData.getNumDVs(), newData.getNumDVs()));
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
// if all the ColumnStatisticsObjs contain bitvectors, we do not need to
// use uniform distribution assumption because we can merge bitvectors
// to get a good estimation.
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
} else {
long estimation;
if (useDensityFunctionForNDVEstimation) {
// We have estimation, lowerbound and higherbound. We use estimation
// if it is between lowerbound and higherbound.
double densityAvg = densityAvgSum / partNames.size();
estimation = (long) ((aggregateData.getHighValue() - aggregateData.getLowValue()) / densityAvg);
if (estimation < lowerBound) {
estimation = lowerBound;
} else if (estimation > higherBound) {
estimation = higherBound;
}
} else {
estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner);
}
aggregateData.setNumDVs(estimation);
}
columnStatisticsData.setDoubleStats(aggregateData);
} else {
// TODO: bail out if missing stats are over a certain threshold
// we need extrapolation
LOG.debug("start extrapolation for {}", colName);
Map<String, Integer> indexMap = new HashMap<>();
for (int index = 0; index < partNames.size(); index++) {
indexMap.put(partNames.get(index), index);
}
Map<String, Double> adjustedIndexMap = new HashMap<>();
Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>();
// while we scan the css, we also get the densityAvg, lowerbound and
// higherbound when useDensityFunctionForNDVEstimation is true.
double densityAvgSum = 0.0;
if (!areAllNDVEstimatorsMergeable) {
// if not every partition uses bitvector for ndv, we just fall back to
// the traditional extrapolation methods.
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DoubleColumnStatsData newData = cso.getStatsData().getDoubleStats();
if (useDensityFunctionForNDVEstimation && newData.isSetLowValue() && newData.isSetHighValue()) {
densityAvgSum += (newData.getHighValue() - newData.getLowValue()) / newData.getNumDVs();
}
adjustedIndexMap.put(partName, (double) indexMap.get(partName));
adjustedStatsMap.put(partName, cso.getStatsData());
}
} else {
// we first merge all the adjacent bitvectors that we could merge and
// derive new partition names and index.
StringBuilder pseudoPartName = new StringBuilder();
double pseudoIndexSum = 0;
int length = 0;
int curIndex = -1;
DoubleColumnStatsData aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DoubleColumnStatsDataInspector newData =
doubleInspectorFromStats(cso);
// newData.isSetBitVectors() should be true for sure because we
// already checked it before.
if (indexMap.get(partName) != curIndex) {
// There is bitvector, but it is not adjacent to the previous ones.
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDoubleStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += (aggregateData.getHighValue() - aggregateData.getLowValue()) / aggregateData.getNumDVs();
}
// reset everything
pseudoPartName = new StringBuilder();
pseudoIndexSum = 0;
length = 0;
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
aggregateData = null;
}
curIndex = indexMap.get(partName);
pseudoPartName.append(partName);
pseudoIndexSum += curIndex;
length++;
curIndex++;
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue()));
aggregateData.setHighValue(Math.max(aggregateData.getHighValue(),
newData.getHighValue()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDoubleStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += (aggregateData.getHighValue() - aggregateData.getLowValue()) / aggregateData.getNumDVs();
}
}
}
extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(),
adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size());
}
LOG.debug(
"Ndv estimation for {} is {}. # of partitions requested: {}. # of partitions found: {}",
colName, columnStatisticsData.getDoubleStats().getNumDVs(), partNames.size(),
colStatsWithSourceInfo.size());
KllHistogramEstimator mergedKllHistogramEstimator = mergeHistograms(colStatsWithSourceInfo);
if (mergedKllHistogramEstimator != null) {
columnStatisticsData.getDoubleStats().setHistogram(mergedKllHistogramEstimator.serialize());
}
statsObj.setStatsData(columnStatisticsData);
return statsObj;
} | @Test
public void testAggregateMultiStatsOnlySomeAvailableButUnmergeableBitVector() throws MetaException {
List<String> partitions = Arrays.asList("part1", "part2", "part3");
ColumnStatisticsData data1 = new ColStatsBuilder<>(double.class).numNulls(1).numDVs(3)
.low(1d).high(6d).fmSketch(1, 2, 6).build();
ColumnStatisticsData data3 = new ColStatsBuilder<>(double.class).numNulls(3).numDVs(1)
.low(7d).high(7d).hll(7).build();
List<ColStatsObjWithSourceInfo> statsList = Arrays.asList(
createStatsWithInfo(data1, TABLE, COL, partitions.get(0)),
createStatsWithInfo(data3, TABLE, COL, partitions.get(2)));
DoubleColumnStatsAggregator aggregator = new DoubleColumnStatsAggregator();
ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, false);
// hll in case of missing stats is left as null, only numDVs is updated
ColumnStatisticsData expectedStats = new ColStatsBuilder<>(double.class).numNulls(6).numDVs(3)
.low(1d).high(7.5).build();
assertEqualStatistics(expectedStats, computedStatsObj.getStatsData());
aggregator.useDensityFunctionForNDVEstimation = true;
computedStatsObj = aggregator.aggregate(statsList, partitions, false);
// the use of the density function leads to a different estimation for numNDV
expectedStats = new ColStatsBuilder<>(double.class).numNulls(6).numDVs(4)
.low(1d).high(7.5).build();
assertEqualStatistics(expectedStats, computedStatsObj.getStatsData());
} |
public static long calculateIntervalEnd(long startTs, IntervalType intervalType, ZoneId tzId) {
var startTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(startTs), tzId);
switch (intervalType) {
case WEEK:
return startTime.truncatedTo(ChronoUnit.DAYS).with(WeekFields.SUNDAY_START.dayOfWeek(), 1).plusDays(7).toInstant().toEpochMilli();
case WEEK_ISO:
return startTime.truncatedTo(ChronoUnit.DAYS).with(WeekFields.ISO.dayOfWeek(), 1).plusDays(7).toInstant().toEpochMilli();
case MONTH:
return startTime.truncatedTo(ChronoUnit.DAYS).withDayOfMonth(1).plusMonths(1).toInstant().toEpochMilli();
case QUARTER:
return startTime.truncatedTo(ChronoUnit.DAYS).with(IsoFields.DAY_OF_QUARTER, 1).plusMonths(3).toInstant().toEpochMilli();
default:
throw new RuntimeException("Not supported!");
}
} | @Test
void testWeekEnd() {
long ts = 1704899727000L; // Wednesday, January 10 15:15:27 GMT
assertThat(TimeUtils.calculateIntervalEnd(ts, IntervalType.WEEK, ZoneId.of("Europe/Kyiv"))).isEqualTo(1705183200000L); // Sunday, January 14, 2024 0:00:00 GMT+02:00
assertThat(TimeUtils.calculateIntervalEnd(ts, IntervalType.WEEK_ISO, ZoneId.of("Europe/Kyiv"))).isEqualTo(1705269600000L); // Monday, January 15, 2024 0:00:00 GMT+02:00
assertThat(TimeUtils.calculateIntervalEnd(ts, IntervalType.WEEK, ZoneId.of("Europe/Amsterdam"))).isEqualTo(1705186800000L); // Sunday, January 14, 2024 0:00:00 GMT+01:00
assertThat(TimeUtils.calculateIntervalEnd(ts, IntervalType.WEEK_ISO, ZoneId.of("Europe/Amsterdam"))).isEqualTo(1705273200000L); // Monday, January 15, 2024 0:00:00 GMT+01:00
ts = 1704621600000L; // Sunday, January 7, 2024 12:00:00 GMT+02:00
assertThat(TimeUtils.calculateIntervalEnd(ts, IntervalType.WEEK, ZoneId.of("Europe/Kyiv"))).isEqualTo(1705183200000L); // Sunday, January 14, 2024 0:00:00 GMT+02:00
assertThat(TimeUtils.calculateIntervalEnd(ts, IntervalType.WEEK_ISO, ZoneId.of("Europe/Kyiv"))).isEqualTo(1704664800000L); // Monday, January 8, 2024 0:00:00 GMT+02:00
} |
public static List<URI> getPeerServerURIs(HelixManager helixManager, String tableNameWithType, String segmentName,
String downloadScheme) {
HelixAdmin helixAdmin = helixManager.getClusterManagmentTool();
String clusterName = helixManager.getClusterName();
List<URI> onlineServerURIs = new ArrayList<>();
try {
RetryPolicies.exponentialBackoffRetryPolicy(MAX_NUM_ATTEMPTS, INITIAL_DELAY_MS, DELAY_SCALE_FACTOR)
.attempt(() -> {
getOnlineServersFromExternalView(helixAdmin, clusterName, tableNameWithType, segmentName, downloadScheme,
onlineServerURIs);
return !onlineServerURIs.isEmpty();
});
} catch (AttemptsExceededException e) {
LOGGER.error("Failed to find ONLINE servers for segment: {} in table: {} after {} attempts", segmentName,
tableNameWithType, MAX_NUM_ATTEMPTS);
} catch (Exception e) {
LOGGER.error("Caught exception while getting peer server URIs for segment: {} in table: {}", segmentName,
tableNameWithType, e);
}
return onlineServerURIs;
} | @Test
public void testSegmentFoundSuccessfully()
throws Exception {
// SEGMENT_1 has only 2 online replicas.
List<URI> httpServerURIs = PeerServerSegmentFinder.getPeerServerURIs(_helixManager, REALTIME_TABLE_NAME, SEGMENT_1,
CommonConstants.HTTP_PROTOCOL);
assertEquals(httpServerURIs.size(), 2);
assertTrue(httpServerURIs.contains(new URI(
String.format("http://%s:%d/segments/%s/%s", HOSTNAME_1, HTTP_ADMIN_PORT, REALTIME_TABLE_NAME, SEGMENT_1))));
assertTrue(httpServerURIs.contains(new URI(
String.format("http://%s:%d/segments/%s/%s", HOSTNAME_3, HTTP_ADMIN_PORT, REALTIME_TABLE_NAME, SEGMENT_1))));
List<URI> httpsServerURIs = PeerServerSegmentFinder.getPeerServerURIs(_helixManager, REALTIME_TABLE_NAME, SEGMENT_1,
CommonConstants.HTTPS_PROTOCOL);
assertEquals(httpsServerURIs.size(), 2);
assertTrue(httpsServerURIs.contains(new URI(
String.format("https://%s:%d/segments/%s/%s", HOSTNAME_1, HTTPS_ADMIN_PORT, REALTIME_TABLE_NAME, SEGMENT_1))));
assertTrue(httpsServerURIs.contains(new URI(
String.format("https://%s:%d/segments/%s/%s", HOSTNAME_3, HTTPS_ADMIN_PORT, REALTIME_TABLE_NAME, SEGMENT_1))));
} |
static void unregisterCommand(PrintStream stream, Admin adminClient, int id) throws Exception {
try {
adminClient.unregisterBroker(id).all().get();
stream.println("Broker " + id + " is no longer registered.");
} catch (ExecutionException ee) {
Throwable cause = ee.getCause();
if (cause instanceof UnsupportedVersionException) {
stream.println("The target cluster does not support the broker unregistration API.");
} else {
throw ee;
}
}
} | @Test
public void testLegacyModeClusterCannotUnregisterBroker() throws Exception {
Admin adminClient = new MockAdminClient.Builder().numBrokers(3).
usingRaftController(false).
build();
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ClusterTool.unregisterCommand(new PrintStream(stream), adminClient, 0);
assertEquals("The target cluster does not support the broker unregistration API.\n", stream.toString());
} |
public static ExpansionServer create(ExpansionService service, String host, int port)
throws IOException {
return new ExpansionServer(service, host, port);
} | @Test
public void testPassingPipelineArguments() {
String[] args = {
"--defaultEnvironmentType=PROCESS",
"--defaultEnvironmentConfig={\"command\": \"/opt/apache/beam/boot\"}"
};
ExpansionService service = new ExpansionService(args);
assertThat(
service
.createPipeline(PipelineOptionsFactory.create())
.getOptions()
.as(PortablePipelineOptions.class)
.getDefaultEnvironmentType(),
equalTo("PROCESS"));
} |
public static Optional<PfxOptions> getPfxTrustStoreOptions(final Map<String, String> props) {
final String location = getTrustStoreLocation(props);
final String password = getTrustStorePassword(props);
if (!Strings.isNullOrEmpty(location)) {
return Optional.of(buildPfxOptions(location, password));
}
return Optional.empty();
} | @Test
public void shouldBuildTrustStorePfxOptionsWithPathAndPassword() {
// When
final Optional<PfxOptions> pfxOptions = VertxSslOptionsFactory.getPfxTrustStoreOptions(
ImmutableMap.of(
SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG,
"path",
SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG,
"password"
)
);
// Then
assertThat(pfxOptions.get().getPath(), is("path"));
assertThat(pfxOptions.get().getPassword(), is("password"));
} |
static TimelineFilterList parseKVFilters(String expr, boolean valueAsString)
throws TimelineParseException {
return parseFilters(new TimelineParserForKVFilters(expr, valueAsString));
} | @Test
void testInfoFiltersParsing() throws Exception {
String expr = "(((key11 ne 234 AND key12 eq val12) AND " +
"(key13 ene val13 OR key14 eq 567)) OR (key21 eq val_21 OR key22 eq " +
"5.0))";
TimelineFilterList expectedList = new TimelineFilterList(
Operator.OR,
new TimelineFilterList(
Operator.AND,
new TimelineFilterList(
Operator.AND,
new TimelineKeyValueFilter(TimelineCompareOp.NOT_EQUAL,
"key11", 234, false),
new TimelineKeyValueFilter(TimelineCompareOp.EQUAL,
"key12", "val12", true)
),
new TimelineFilterList(
Operator.OR,
new TimelineKeyValueFilter(TimelineCompareOp.NOT_EQUAL,
"key13", "val13", true),
new TimelineKeyValueFilter(TimelineCompareOp.EQUAL,
"key14", 567, true)
)
),
new TimelineFilterList(
Operator.OR,
new TimelineKeyValueFilter(TimelineCompareOp.EQUAL,
"key21", "val_21", true),
new TimelineKeyValueFilter(TimelineCompareOp.EQUAL,
"key22", 5.0, true)
)
);
verifyFilterList(expr, TimelineReaderWebServicesUtils.
parseKVFilters(expr, false), expectedList);
expr = "abc ne 234 AND def eq 23 OR rst ene 24 OR xyz eq 456 AND pqr eq " +
"val.1234";
expectedList = new TimelineFilterList(
new TimelineFilterList(
Operator.OR,
new TimelineFilterList(
new TimelineKeyValueFilter(TimelineCompareOp.NOT_EQUAL,
"abc", 234, false),
new TimelineKeyValueFilter(TimelineCompareOp.EQUAL,
"def", 23, true)
),
new TimelineKeyValueFilter(TimelineCompareOp.NOT_EQUAL,
"rst", 24, true),
new TimelineKeyValueFilter(TimelineCompareOp.EQUAL,
"xyz", 456, true)
),
new TimelineKeyValueFilter(TimelineCompareOp.EQUAL,
"pqr", "val.1234", true)
);
verifyFilterList(expr, TimelineReaderWebServicesUtils.
parseKVFilters(expr, false), expectedList);
// Test with unnecessary spaces.
expr = " abc ne 234 AND def eq 23 OR rst ene " +
" 24 OR xyz eq 456 AND pqr eq 2 ";
expectedList = new TimelineFilterList(
new TimelineFilterList(
Operator.OR,
new TimelineFilterList(
new TimelineKeyValueFilter(TimelineCompareOp.NOT_EQUAL,
"abc", 234, false),
new TimelineKeyValueFilter(TimelineCompareOp.EQUAL,
"def", 23, true)
),
new TimelineKeyValueFilter(TimelineCompareOp.NOT_EQUAL,
"rst", 24, true),
new TimelineKeyValueFilter(TimelineCompareOp.EQUAL,
"xyz", 456, true)
),
new TimelineKeyValueFilter(TimelineCompareOp.EQUAL,
"pqr", 2, true)
);
verifyFilterList(expr, TimelineReaderWebServicesUtils.
parseKVFilters(expr, false), expectedList);
expr = "abdeq";
try {
TimelineReaderWebServicesUtils.parseKVFilters(expr, false);
fail("Expression valuation should throw exception.");
} catch (TimelineParseException e) {
// expected: do nothing
}
expr = "abc gt 234 AND defeq";
try {
TimelineReaderWebServicesUtils.parseKVFilters(expr, false);
fail("Expression valuation should throw exception.");
} catch (TimelineParseException e) {
// expected: do nothing
}
expr = "((key11 ne 234 AND key12 eq val12) AND (key13eq OR key14 eq va14))";
try {
TimelineReaderWebServicesUtils.parseKVFilters(expr, false);
fail("Expression valuation should throw exception.");
} catch (TimelineParseException e) {
// expected: do nothing
}
} |
public synchronized ValuesAndExtrapolations aggregate(SortedSet<Long> windowIndices, MetricDef metricDef) {
return aggregate(windowIndices, metricDef, true);
} | @Test
public void testExtrapolationAdjacentAvgAtMiddle() {
RawMetricValues rawValues = new RawMetricValues(NUM_WINDOWS_TO_KEEP, MIN_SAMPLES_PER_WINDOW, NUM_RAW_METRICS);
prepareWindowMissingAtIndex(rawValues, 1);
ValuesAndExtrapolations valuesAndExtrapolations = aggregate(rawValues, allWindowIndices(0));
assertEquals(11.5, valuesAndExtrapolations.metricValues().valuesFor((short) 0).get(1), EPSILON);
assertEquals(13.0, valuesAndExtrapolations.metricValues().valuesFor((short) 1).get(1), EPSILON);
assertEquals(13.0, valuesAndExtrapolations.metricValues().valuesFor((short) 2).get(1), EPSILON);
assertEquals(1, valuesAndExtrapolations.extrapolations().size());
Assert.assertEquals(Extrapolation.AVG_ADJACENT, valuesAndExtrapolations.extrapolations().get(1));
} |
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof EntityDto entityDto)) {
return false;
}
return Objects.equals(uuid, entityDto.uuid);
} | @Test
void equals_whenEmptyObjects_shouldReturnTrue() {
PortfolioDto p1 = new PortfolioDto();
PortfolioDto p2 = new PortfolioDto();
boolean equals = p1.equals(p2);
assertThat(equals).isTrue();
} |
@Override
public void getConfig(StorServerConfig.Builder builder) {
super.getConfig(builder);
provider.getConfig(builder);
} | @Test
void testGarbageCollectionOffByDefault() {
StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder();
parse("<cluster id=\"storage\">\n" +
" <redundancy>3</redundancy>" +
" <documents>\n" +
" <document type=\"music\"/>\n" +
" </documents>\n" +
" <group>" +
" <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
" </group>" +
"</cluster>").getConfig(builder);
StorDistributormanagerConfig conf = new StorDistributormanagerConfig(builder);
assertEquals(0, conf.garbagecollection().interval());
assertEquals("", conf.garbagecollection().selectiontoremove());
} |
public int run() throws IOException {
Set<MasterInfoField> masterInfoFilter = new HashSet<>(Arrays
.asList(MasterInfoField.LEADER_MASTER_ADDRESS, MasterInfoField.WEB_PORT,
MasterInfoField.RPC_PORT, MasterInfoField.START_TIME_MS,
MasterInfoField.UP_TIME_MS, MasterInfoField.VERSION,
MasterInfoField.SAFE_MODE, MasterInfoField.ZOOKEEPER_ADDRESSES,
MasterInfoField.RAFT_JOURNAL, MasterInfoField.RAFT_ADDRESSES,
MasterInfoField.MASTER_VERSION));
MasterInfo masterInfo = mMetaMasterClient.getMasterInfo(masterInfoFilter);
Set<BlockMasterInfoField> blockMasterInfoFilter = new HashSet<>(Arrays
.asList(BlockMasterInfoField.LIVE_WORKER_NUM, BlockMasterInfoField.LOST_WORKER_NUM,
BlockMasterInfoField.CAPACITY_BYTES, BlockMasterInfoField.USED_BYTES,
BlockMasterInfoField.FREE_BYTES, BlockMasterInfoField.CAPACITY_BYTES_ON_TIERS,
BlockMasterInfoField.USED_BYTES_ON_TIERS));
BlockMasterInfo blockMasterInfo = mBlockMasterClient.getBlockMasterInfo(blockMasterInfoFilter);
ObjectMapper objectMapper = new ObjectMapper();
SummaryOutput summaryInfo = new SummaryOutput(masterInfo, blockMasterInfo);
try {
String json = objectMapper.writeValueAsString(summaryInfo);
mPrintStream.println(json);
} catch (JsonProcessingException e) {
mPrintStream.println("Failed to convert summaryInfo output to JSON. "
+ "Check the command line log for the detailed error message.");
LOG.error("Failed to output JSON object {}", summaryInfo);
e.printStackTrace();
return -1;
}
return 0;
} | @Test
public void RaftHaSummary() throws IOException {
MasterVersion primaryVersion = MasterVersion.newBuilder()
.setVersion(RuntimeConstants.VERSION).setState("Primary").setAddresses(
NetAddress.newBuilder().setHost("hostname1").setRpcPort(10000).build()
).build();
MasterVersion standby1Version = MasterVersion.newBuilder()
.setVersion(RuntimeConstants.VERSION).setState("Standby").setAddresses(
NetAddress.newBuilder().setHost("hostname2").setRpcPort(10001).build()
).build();
MasterVersion standby2Version = MasterVersion.newBuilder()
.setVersion(RuntimeConstants.VERSION).setState("Standby").setAddresses(
NetAddress.newBuilder().setHost("hostname3").setRpcPort(10002).build()
).build();
mMasterInfo = MasterInfo.newBuilder(mMasterInfo)
.setRaftJournal(true)
.addAllRaftAddress(Arrays.asList("[raftJournal_hostname1]:19200",
"[raftJournal_hostname2]:19200", "[raftJournal_hostname3]:19200"))
.addAllMasterVersions(Arrays.asList(primaryVersion, standby1Version, standby2Version))
.build();
when(mMetaMasterClient.getMasterInfo(any())).thenReturn(mMasterInfo);
SummaryCommand summaryCommand = new SummaryCommand(mMetaMasterClient,
mBlockMasterClient, sConf.getString(PropertyKey.USER_DATE_FORMAT_PATTERN), mPrintStream);
summaryCommand.run();
checkIfOutputValid(sConf.getString(PropertyKey.USER_DATE_FORMAT_PATTERN), "raft");
} |
public Collection<V> remove(K key)
{
List<V> removed = data.remove(key);
if (removed != null) {
for (V val : removed) {
inverse.remove(val);
}
}
return removed;
} | @Test
public void testRemoveKey()
{
Collection<Integer> removed = map.remove(1L);
assertThat(removed, is(Collections.singletonList(42)));
assertSize(0);
} |
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
QueryEntry that = (QueryEntry) o;
if (!key.equals(that.key)) {
return false;
}
return true;
} | @Test(expected = NullPointerException.class)
@SuppressWarnings("ResultOfMethodCallIgnored")
public void test_equality_empty() {
QueryableEntry entryKeyLeft = createEntry();
QueryableEntry entryKeyRight = createEntry();
entryKeyLeft.equals(entryKeyRight);
} |
public void markAsUnchanged(DefaultInputFile file) {
if (isFeatureActive()) {
if (file.status() != InputFile.Status.SAME) {
LOG.error("File '{}' was marked as unchanged but its status is {}", file.getProjectRelativePath(), file.status());
} else {
LOG.debug("File '{}' marked as unchanged", file.getProjectRelativePath());
file.setMarkedAsUnchanged(true);
}
}
} | @Test
public void dont_mark_file_is_status_is_not_same() {
when(file.status()).thenReturn(InputFile.Status.CHANGED);
executingSensorContext.setSensorExecuting(new SensorId("cpp", "CFamily"));
UnchangedFilesHandler handler = new UnchangedFilesHandler(enabledConfig, defaultBranchConfig, executingSensorContext);
handler.markAsUnchanged(file);
verify(file, never()).setMarkedAsUnchanged(true);
} |
public static ZombieUpstream transform(final CommonUpstream commonUpstream, final int zombieCheckTimes, final String selectorId) {
return ZombieUpstream.builder().commonUpstream(commonUpstream).zombieCheckTimes(zombieCheckTimes).selectorId(selectorId).build();
} | @Test
public void testTransform() {
ZombieUpstream upstream = ZombieUpstream.transform(new CommonUpstream(), 10, "id");
assertThat(upstream, is(notNullValue()));
} |
@Override
public PostScript readPostScript(byte[] data, int offset, int length)
throws IOException
{
long cpuStart = THREAD_MX_BEAN.getCurrentThreadCpuTime();
CodedInputStream input = CodedInputStream.newInstance(data, offset, length);
DwrfProto.PostScript postScript = DwrfProto.PostScript.parseFrom(input);
HiveWriterVersion writerVersion = postScript.hasWriterVersion() && postScript.getWriterVersion() > 0 ? ORC_HIVE_8732 : ORIGINAL;
OptionalInt stripeCacheLength = OptionalInt.empty();
Optional<DwrfStripeCacheMode> stripeCacheMode = Optional.empty();
if (postScript.hasCacheSize() && postScript.hasCacheMode()) {
stripeCacheLength = OptionalInt.of(postScript.getCacheSize());
stripeCacheMode = Optional.of(toStripeCacheMode(postScript.getCacheMode()));
}
runtimeStats.addMetricValue("DwrfReadPostScriptTimeNanos", RuntimeUnit.NANO, THREAD_MX_BEAN.getCurrentThreadCpuTime() - cpuStart);
return new PostScript(
ImmutableList.of(),
postScript.getFooterLength(),
0,
toCompression(postScript.getCompression()),
postScript.getCompressionBlockSize(),
writerVersion,
stripeCacheLength,
stripeCacheMode);
} | @Test
public void testReadPostScriptMissingDwrfStripeCacheLength()
throws IOException
{
DwrfProto.PostScript protoPostScript = baseProtoPostScript.toBuilder()
.clearCacheSize()
.build();
byte[] data = protoPostScript.toByteArray();
PostScript postScript = dwrfMetadataReader.readPostScript(data, 0, data.length);
assertFalse(postScript.getDwrfStripeCacheLength().isPresent());
assertFalse(postScript.getDwrfStripeCacheMode().isPresent());
} |
public void notifyMessageArriving(final String topic, final int queueId, final long maxOffset) {
notifyMessageArriving(topic, queueId, maxOffset, null, 0, null, null);
} | @Test
public void notifyMessageArrivingTest() {
Assertions.assertThatCode(() -> pullRequestHoldService.notifyMessageArriving(TEST_TOPIC, DEFAULT_QUEUE_ID, MAX_OFFSET)).doesNotThrowAnyException();
Assertions.assertThatCode(() -> pullRequestHoldService.suspendPullRequest(TEST_TOPIC, DEFAULT_QUEUE_ID, pullRequest)).doesNotThrowAnyException();
Assertions.assertThatCode(() -> pullRequestHoldService.notifyMessageArriving(TEST_TOPIC, DEFAULT_QUEUE_ID, MAX_OFFSET,
1L, System.currentTimeMillis(), new byte[10], new HashMap<>())).doesNotThrowAnyException();
} |
void notifyPendingReceivedCallback(final Message<T> message, Exception exception) {
if (pendingReceives.isEmpty()) {
return;
}
// fetch receivedCallback from queue
final CompletableFuture<Message<T>> receivedFuture = nextPendingReceive();
if (receivedFuture == null) {
return;
}
if (exception != null) {
internalPinnedExecutor.execute(() -> receivedFuture.completeExceptionally(exception));
return;
}
if (message == null) {
IllegalStateException e = new IllegalStateException("received message can't be null");
internalPinnedExecutor.execute(() -> receivedFuture.completeExceptionally(e));
return;
}
if (getCurrentReceiverQueueSize() == 0) {
// call interceptor and complete received callback
trackMessage(message);
interceptAndComplete(message, receivedFuture);
return;
}
// increase permits for available message-queue
messageProcessed(message);
// call interceptor and complete received callback
interceptAndComplete(message, receivedFuture);
} | @Test(invocationTimeOut = 1000)
public void testNotifyPendingReceivedCallback_CompleteWithExceptionWhenMessageIsNull() {
CompletableFuture<Message<byte[]>> receiveFuture = new CompletableFuture<>();
consumer.pendingReceives.add(receiveFuture);
consumer.notifyPendingReceivedCallback(null, null);
try {
receiveFuture.join();
} catch (CompletionException e) {
Assert.assertEquals("received message can't be null", e.getCause().getMessage());
}
Assert.assertTrue(receiveFuture.isCompletedExceptionally());
} |
public void check(Metadata metadata) throws AccessPermissionException {
if (!needToCheck) {
return;
}
if ("false".equals(metadata.get(AccessPermissions.EXTRACT_CONTENT))) {
if (allowExtractionForAccessibility) {
if ("true".equals(metadata.get(AccessPermissions.EXTRACT_FOR_ACCESSIBILITY))) {
return;
}
throw new AccessPermissionException(
"Content extraction for accessibility is not allowed.");
}
throw new AccessPermissionException("Content extraction is not allowed.");
}
} | @Test
public void testNoExtraction() {
Metadata m = null;
//allow nothing
AccessChecker checker = new AccessChecker(false);
boolean ex = false;
try {
m = getMetadata(false, false);
checker.check(m);
} catch (AccessPermissionException e) {
ex = true;
}
assertTrue(ex, "correct exception with no extraction, no extract for accessibility");
ex = false;
try {
//document allows extraction for accessibility
m = getMetadata(false, true);
checker.check(m);
} catch (AccessPermissionException e) {
//but application is not an accessibility application
ex = true;
}
assertTrue(ex, "correct exception with no extraction, no extract for accessibility");
} |
@GetMapping(value = "/node/list")
@Secured(action = ActionTypes.READ, resource = "nacos/admin", signType = SignType.CONSOLE)
public Result<Collection<Member>> listNodes(@RequestParam(value = "address", required = false) String address,
@RequestParam(value = "state", required = false) String state) throws NacosException {
NodeState nodeState = null;
if (StringUtils.isNoneBlank(state)) {
try {
nodeState = NodeState.valueOf(state.toUpperCase(Locale.ROOT));
} catch (IllegalArgumentException e) {
throw new NacosApiException(HttpStatus.BAD_REQUEST.value(), ErrorCode.ILLEGAL_STATE, "Illegal state: " + state);
}
}
return Result.success(nacosClusterOperationService.listNodes(address, nodeState));
} | @Test
void testListNodes() throws NacosException {
Member member1 = new Member();
member1.setIp("1.1.1.1");
member1.setPort(8848);
member1.setState(NodeState.DOWN);
Member member2 = new Member();
member2.setIp("2.2.2.2");
member2.setPort(8848);
List<Member> members = Arrays.asList(member1, member2);
Mockito.when(nacosClusterOperationService.listNodes(any(), any())).thenReturn(members);
Result<Collection<Member>> result = nacosClusterControllerV2.listNodes("1.1.1.1", null);
assertEquals(ErrorCode.SUCCESS.getCode(), result.getCode());
assertTrue(result.getData().stream().findFirst().isPresent());
assertEquals("1.1.1.1:8848", result.getData().stream().findFirst().get().getAddress());
} |
public void sendCouponNewsletter() {
try {
// Retrieve the list of contacts from the "weekly-coupons-newsletter" contact
// list
// snippet-start:[sesv2.java2.newsletter.ListContacts]
ListContactsRequest contactListRequest = ListContactsRequest.builder()
.contactListName(CONTACT_LIST_NAME)
.build();
List<String> contactEmails;
try {
ListContactsResponse contactListResponse = sesClient.listContacts(contactListRequest);
contactEmails = contactListResponse.contacts().stream()
.map(Contact::emailAddress)
.toList();
} catch (Exception e) {
// TODO: Remove when listContacts's GET body issue is resolved.
contactEmails = this.contacts;
}
// snippet-end:[sesv2.java2.newsletter.ListContacts]
// Send an email using the "weekly-coupons" template to each contact in the list
// snippet-start:[sesv2.java2.newsletter.SendEmail.template]
String coupons = Files.readString(Paths.get("resources/coupon_newsletter/sample_coupons.json"));
for (String emailAddress : contactEmails) {
SendEmailRequest newsletterRequest = SendEmailRequest.builder()
.destination(Destination.builder().toAddresses(emailAddress).build())
.content(EmailContent.builder()
.template(Template.builder()
.templateName(TEMPLATE_NAME)
.templateData(coupons)
.build())
.build())
.fromEmailAddress(this.verifiedEmail)
.listManagementOptions(ListManagementOptions.builder()
.contactListName(CONTACT_LIST_NAME)
.build())
.build();
SendEmailResponse newsletterResponse = sesClient.sendEmail(newsletterRequest);
System.out.println("Newsletter sent to " + emailAddress + ": " + newsletterResponse.messageId());
}
// snippet-end:[sesv2.java2.newsletter.SendEmail.template]
} catch (NotFoundException e) {
// If the contact list does not exist, fail the workflow and inform the user
System.err.println("The contact list is missing. Please create the contact list and try again.");
} catch (AccountSuspendedException e) {
// If the account is suspended, fail the workflow and inform the user
System.err.println("Your account is suspended. Please resolve the issue and try again.");
} catch (MailFromDomainNotVerifiedException e) {
// If the sending domain is not verified, fail the workflow and inform the user
System.err.println("The sending domain is not verified. Please verify your domain and try again.");
throw e;
} catch (MessageRejectedException e) {
// If the message is rejected due to invalid content, fail the workflow and
// inform the user
System.err.println("The message content is invalid. Please check your template and try again.");
throw e;
} catch (SendingPausedException e) {
// If sending is paused, fail the workflow and inform the user
System.err.println("Sending is currently paused for your account. Please resolve the issue and try again.");
throw e;
} catch (Exception e) {
System.err.println("Error occurred while sending the newsletter: " + e.getMessage());
e.printStackTrace();
}
} | @Test
public void test_sendCouponNewsletter_error_contactListNotFound() {
// Mock the necessary AWS SDK calls and responses
CreateEmailTemplateResponse templateResponse = CreateEmailTemplateResponse.builder().build();
when(sesClient.createEmailTemplate(any(CreateEmailTemplateRequest.class))).thenReturn(templateResponse);
when(sesClient.listContacts(any(ListContactsRequest.class))).thenThrow(
NotFoundException.class);
try {
scenario.sendCouponNewsletter();
} catch (Exception e) {
}
String errorOutput = errContent.toString();
assertThat(errorOutput,
containsString("The contact list is missing. Please create the contact list and try again."));
} |
@Override
public boolean betterThan(Num criterionValue1, Num criterionValue2) {
return lessIsBetter ? criterionValue1.isLessThan(criterionValue2)
: criterionValue1.isGreaterThan(criterionValue2);
} | @Test
public void betterThanWithLessIsNotBetter() {
AnalysisCriterion criterion = getCriterion(new ProfitLossCriterion(), false);
assertTrue(criterion.betterThan(numOf(5000), numOf(4500)));
assertFalse(criterion.betterThan(numOf(4500), numOf(5000)));
} |
void checkSupportedCipherSuites() {
if (getSupportedCipherSuites() == null) {
setSupportedCipherSuites(Collections.singletonList(HTTP2_DEFAULT_CIPHER));
} else if (!getSupportedCipherSuites().contains(HTTP2_DEFAULT_CIPHER)) {
throw new IllegalArgumentException("HTTP/2 server configuration must include cipher: " + HTTP2_DEFAULT_CIPHER);
}
} | @Test
void testSetDefaultHttp2Cipher() {
http2ConnectorFactory.checkSupportedCipherSuites();
assertThat(http2ConnectorFactory.getSupportedCipherSuites()).containsExactly(
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256");
} |
@Override
public String getFailureMessage() {
if (hasAccess()) {
return StringUtils.EMPTY;
}
List<String> failedTablesList = new ArrayList<>(_failedTables);
Collections.sort(failedTablesList); // Sort to make output deterministic
return "Authorization Failed for tables: " + failedTablesList;
} | @Test
public void testGetFailureMessage() {
TableAuthorizationResult result = new TableAuthorizationResult(Set.of("table1", "table2"));
Assert.assertEquals(result.getFailureMessage(), "Authorization Failed for tables: [table1, table2]");
} |
@Override
public int getOrder() {
return LOWEST_PRECEDENCE;
} | @Test
public void testOrder() {
Assert.assertEquals(Ordered.LOWEST_PRECEDENCE, instance.getOrder());
} |
@Override
public Map<String, Metric> getMetrics() {
final Map<String, Metric> gauges = new HashMap<>();
for (String pool : POOLS) {
for (int i = 0; i < ATTRIBUTES.length; i++) {
final String attribute = ATTRIBUTES[i];
final String name = NAMES[i];
try {
final ObjectName on = new ObjectName("java.nio:type=BufferPool,name=" + pool);
mBeanServer.getMBeanInfo(on);
gauges.put(name(pool, name), new JmxAttributeGauge(mBeanServer, on, attribute));
} catch (JMException ignored) {
LOGGER.debug("Unable to load buffer pool MBeans, possibly running on Java 6");
}
}
}
return Collections.unmodifiableMap(gauges);
} | @Test
public void ignoresGaugesForObjectsWhichCannotBeFound() throws Exception {
when(mBeanServer.getMBeanInfo(mapped)).thenThrow(new InstanceNotFoundException());
assertThat(buffers.getMetrics().keySet())
.containsOnly("direct.count",
"direct.capacity",
"direct.used");
} |
@Override
public void execute(Exchange exchange) throws SmppException {
SubmitMulti[] submitMulties = createSubmitMulti(exchange);
List<SubmitMultiResult> results = new ArrayList<>(submitMulties.length);
for (SubmitMulti submitMulti : submitMulties) {
SubmitMultiResult result;
if (log.isDebugEnabled()) {
log.debug("Sending multiple short messages for exchange id '{}'...", exchange.getExchangeId());
}
try {
result = session.submitMultiple(
submitMulti.getServiceType(),
TypeOfNumber.valueOf(submitMulti.getSourceAddrTon()),
NumberingPlanIndicator.valueOf(submitMulti.getSourceAddrNpi()),
submitMulti.getSourceAddr(),
(Address[]) submitMulti.getDestAddresses(),
new ESMClass(submitMulti.getEsmClass()),
submitMulti.getProtocolId(),
submitMulti.getPriorityFlag(),
submitMulti.getScheduleDeliveryTime(),
submitMulti.getValidityPeriod(),
new RegisteredDelivery(submitMulti.getRegisteredDelivery()),
new ReplaceIfPresentFlag(submitMulti.getReplaceIfPresentFlag()),
DataCodings.newInstance(submitMulti.getDataCoding()),
submitMulti.getSmDefaultMsgId(),
submitMulti.getShortMessage(),
submitMulti.getOptionalParameters());
results.add(result);
} catch (Exception e) {
throw new SmppException(e);
}
}
if (log.isDebugEnabled()) {
log.debug("Sent multiple short messages for exchange id '{}' and received results '{}'", exchange.getExchangeId(),
results);
}
List<String> messageIDs = new ArrayList<>(results.size());
// {messageID : [{destAddr : address, error : errorCode}]}
Map<String, List<Map<String, Object>>> errors = new HashMap<>();
for (SubmitMultiResult result : results) {
UnsuccessDelivery[] deliveries = result.getUnsuccessDeliveries();
if (deliveries != null) {
List<Map<String, Object>> undelivered = new ArrayList<>();
for (UnsuccessDelivery delivery : deliveries) {
Map<String, Object> error = new HashMap<>();
error.put(SmppConstants.DEST_ADDR, delivery.getDestinationAddress().getAddress());
error.put(SmppConstants.ERROR, delivery.getErrorStatusCode());
undelivered.add(error);
}
if (!undelivered.isEmpty()) {
errors.put(result.getMessageId(), undelivered);
}
}
messageIDs.add(result.getMessageId());
}
Message message = ExchangeHelper.getResultMessage(exchange);
message.setHeader(SmppConstants.ID, messageIDs);
message.setHeader(SmppConstants.SENT_MESSAGE_COUNT, messageIDs.size());
if (!errors.isEmpty()) {
message.setHeader(SmppConstants.ERROR, errors);
}
} | @Test
public void singleDlrRequestOverridesDeliveryReceiptFlag() throws Exception {
String longSms = "123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890" +
"12345678901234567890123456789012345678901234567890123456789012345678901";
Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut);
exchange.getIn().setHeader(SmppConstants.COMMAND, "SubmitMulti");
exchange.getIn().setHeader(SmppConstants.SINGLE_DLR, "true");
exchange.getIn().setBody(longSms.getBytes());
Address[] destAddrs = new Address[] {
new Address(
TypeOfNumber.UNKNOWN,
NumberingPlanIndicator.UNKNOWN,
"1717")
};
when(session.submitMultiple(eq("CMT"),
eq(TypeOfNumber.UNKNOWN), eq(NumberingPlanIndicator.UNKNOWN), eq("1616"),
eq(destAddrs),
eq(new ESMClass((byte) 64)), eq((byte) 0), eq((byte) 1),
(String) isNull(), (String) isNull(), eq(new RegisteredDelivery(SMSCDeliveryReceipt.DEFAULT)),
eq(ReplaceIfPresentFlag.DEFAULT),
eq(DataCodings.newInstance((byte) 0)), eq((byte) 0), any(byte[].class)))
.thenReturn(new SubmitMultiResult("1", null, null));
when(session.submitMultiple(eq("CMT"),
eq(TypeOfNumber.UNKNOWN), eq(NumberingPlanIndicator.UNKNOWN), eq("1616"),
eq(destAddrs),
eq(new ESMClass((byte) 64)), eq((byte) 0), eq((byte) 1),
(String) isNull(), (String) isNull(), eq(new RegisteredDelivery(SMSCDeliveryReceipt.SUCCESS_FAILURE)),
eq(ReplaceIfPresentFlag.DEFAULT),
eq(DataCodings.newInstance((byte) 0)), eq((byte) 0), any(byte[].class)))
.thenReturn(new SubmitMultiResult("2", null, null));
command.execute(exchange);
assertEquals(Arrays.asList("1", "2"), exchange.getMessage().getHeader(SmppConstants.ID));
assertEquals(2, exchange.getMessage().getHeader(SmppConstants.SENT_MESSAGE_COUNT));
} |
@Override
public void open() throws Exception {
windowSerializer = windowAssigner.getWindowSerializer(new ExecutionConfig());
internalTimerService = getInternalTimerService("window-timers", windowSerializer, this);
// The structure is: [type]|[normal record]|[timestamp]|[current watermark]|[timer data]
// If the type is 'NORMAL_RECORD', store the RowData object in the 2nd column.
// If the type is 'TRIGGER_TIMER', store the timestamp in 3rd column and the timer
// data in 5th column.
reuseRowData =
new UpdatableRowData(GenericRowData.of(NORMAL_RECORD, null, null, null, null), 5);
reuseTimerRowData =
new UpdatableRowData(GenericRowData.of(TRIGGER_TIMER, null, null, null, null), 5);
// The structure is: [timer_type]|[row key]|[encoded namespace]
reuseTimerData = new UpdatableRowData(GenericRowData.of(0, null, 0), 3);
reuseTimerRowData.setField(4, reuseTimerData);
keyLength = getKeyType().getFieldCount();
keySerializer = (RowDataSerializer) getKeySerializer();
super.open();
} | @Test
void testFinishBundleTriggeredByCount() throws Exception {
Configuration conf = new Configuration();
conf.set(PythonOptions.MAX_BUNDLE_SIZE, 4);
OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = getTestHarness(conf);
long initialTime = 0L;
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
testHarness.open();
testHarness.processElement(newRecord(true, initialTime + 1, "c1", "c2", 0L, 0L));
testHarness.processElement(newRecord(true, initialTime + 2, "c1", "c4", 1L, 6000L));
testHarness.processElement(newRecord(true, initialTime + 3, "c1", "c6", 2L, 10000L));
testHarness.processElement(newRecord(true, initialTime + 4, "c2", "c8", 3L, 0L));
testHarness.processWatermark(new Watermark(10000L));
expectedOutput.add(newWindowRecord(-5000L, 5000L, "c1", 0L));
expectedOutput.add(newStateCleanupRecord(-5000L, 5000L, "c1"));
expectedOutput.add(newWindowRecord(-5000L, 5000L, "c2", 3L));
expectedOutput.add(newStateCleanupRecord(-5000L, 5000L, "c2"));
expectedOutput.add(newWindowRecord(0, 10000L, "c2", 3L));
expectedOutput.add(newStateCleanupRecord(0L, 10000L, "c2"));
expectedOutput.add(newWindowRecord(0, 10000L, "c1", 0L));
expectedOutput.add(newStateCleanupRecord(0L, 10000L, "c1"));
expectedOutput.add(new Watermark(10000L));
assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput());
testHarness.processWatermark(20000L);
testHarness.close();
expectedOutput.add(newWindowRecord(5000L, 15000L, "c1", 1L));
expectedOutput.add(newStateCleanupRecord(5000L, 15000L, "c1"));
expectedOutput.add(newWindowRecord(10000L, 20000L, "c1", 2L));
expectedOutput.add(newStateCleanupRecord(10000L, 20000L, "c1"));
expectedOutput.add(new Watermark(20000L));
assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput());
} |
@Override
public long checkAndGetApplicationLifetime(String queueName,
long lifetimeRequestedByApp) {
readLock.lock();
try {
CSQueue queue = getQueue(queueName);
if (!(queue instanceof AbstractLeafQueue)) {
return lifetimeRequestedByApp;
}
long defaultApplicationLifetime =
queue.getDefaultApplicationLifetime();
long maximumApplicationLifetime =
queue.getMaximumApplicationLifetime();
// check only for maximum, that's enough because default can't
// exceed maximum
if (maximumApplicationLifetime <= 0) {
return (lifetimeRequestedByApp <= 0) ? defaultApplicationLifetime :
lifetimeRequestedByApp;
}
if (lifetimeRequestedByApp <= 0) {
return defaultApplicationLifetime;
} else if (lifetimeRequestedByApp > maximumApplicationLifetime) {
return maximumApplicationLifetime;
}
return lifetimeRequestedByApp;
} finally {
readLock.unlock();
}
} | @Test(timeout = 30000)
public void testcheckAndGetApplicationLifetime() throws Exception {
long maxLifetime = 10;
long defaultLifetime = 5;
// positive integer value
CapacityScheduler cs = setUpCSQueue(maxLifetime, defaultLifetime);
Assert.assertEquals(maxLifetime,
cs.checkAndGetApplicationLifetime("default", 100));
Assert.assertEquals(9, cs.checkAndGetApplicationLifetime("default", 9));
Assert.assertEquals(defaultLifetime,
cs.checkAndGetApplicationLifetime("default", -1));
Assert.assertEquals(defaultLifetime,
cs.checkAndGetApplicationLifetime("default", 0));
Assert.assertEquals(maxLifetime,
cs.getMaximumApplicationLifetime("default"));
maxLifetime = -1;
defaultLifetime = -1;
// test for default values
cs = setUpCSQueue(maxLifetime, defaultLifetime);
Assert.assertEquals(100, cs.checkAndGetApplicationLifetime("default", 100));
Assert.assertEquals(defaultLifetime,
cs.checkAndGetApplicationLifetime("default", -1));
Assert.assertEquals(defaultLifetime,
cs.checkAndGetApplicationLifetime("default", 0));
Assert.assertEquals(maxLifetime,
cs.getMaximumApplicationLifetime("default"));
maxLifetime = 10;
defaultLifetime = 10;
cs = setUpCSQueue(maxLifetime, defaultLifetime);
Assert.assertEquals(maxLifetime,
cs.checkAndGetApplicationLifetime("default", 100));
Assert.assertEquals(defaultLifetime,
cs.checkAndGetApplicationLifetime("default", -1));
Assert.assertEquals(defaultLifetime,
cs.checkAndGetApplicationLifetime("default", 0));
Assert.assertEquals(maxLifetime,
cs.getMaximumApplicationLifetime("default"));
maxLifetime = 0;
defaultLifetime = 0;
cs = setUpCSQueue(maxLifetime, defaultLifetime);
Assert.assertEquals(100, cs.checkAndGetApplicationLifetime("default", 100));
Assert.assertEquals(defaultLifetime,
cs.checkAndGetApplicationLifetime("default", -1));
Assert.assertEquals(defaultLifetime,
cs.checkAndGetApplicationLifetime("default", 0));
maxLifetime = 10;
defaultLifetime = -1;
cs = setUpCSQueue(maxLifetime, defaultLifetime);
Assert.assertEquals(maxLifetime,
cs.checkAndGetApplicationLifetime("default", 100));
Assert.assertEquals(maxLifetime,
cs.checkAndGetApplicationLifetime("default", -1));
Assert.assertEquals(maxLifetime,
cs.checkAndGetApplicationLifetime("default", 0));
maxLifetime = 5;
defaultLifetime = 10;
try {
setUpCSQueue(maxLifetime, defaultLifetime);
Assert.fail("Expected to fails since maxLifetime < defaultLifetime.");
} catch (ServiceStateException sse) {
Throwable rootCause = sse.getCause().getCause();
Assert.assertTrue(
rootCause.getMessage().contains("can't exceed maximum lifetime"));
}
maxLifetime = -1;
defaultLifetime = 10;
cs = setUpCSQueue(maxLifetime, defaultLifetime);
Assert.assertEquals(100,
cs.checkAndGetApplicationLifetime("default", 100));
Assert.assertEquals(defaultLifetime,
cs.checkAndGetApplicationLifetime("default", -1));
Assert.assertEquals(defaultLifetime,
cs.checkAndGetApplicationLifetime("default", 0));
} |
public boolean isUserAuthorized(List<String> userAndRoles) {
if (!option.permissionIsSet()) {
return true;
}
Set<String> intersection = new HashSet<>(userAndRoles);
intersection.retainAll(option.getOwners());
return !intersection.isEmpty();
} | @Test
void testIsUserAuthorized() {
List<String> userAndRoles = new ArrayList<>();
userAndRoles.add("User1");
userAndRoles.add("Role1");
userAndRoles.add("Role2");
List<String> owners;
InterpreterSetting interpreterSetting;
InterpreterOption interpreterOption;
// With match
owners = new ArrayList<>();
owners.add("Role1");
interpreterOption = new InterpreterOption();
interpreterOption.setUserPermission(true);
interpreterOption.owners = owners;
interpreterSetting = new InterpreterSetting.Builder()
.setId("id")
.setName("id")
.setGroup("group")
.setOption(interpreterOption)
.setConf(zConf)
.create();
assertTrue(interpreterSetting.isUserAuthorized(userAndRoles));
// Without match
owners = new ArrayList<>();
owners.add("Role88");
interpreterOption = new InterpreterOption();
interpreterOption.setUserPermission(true);
interpreterOption.owners = owners;
interpreterSetting = new InterpreterSetting.Builder()
.setId("id")
.setName("id")
.setGroup("group")
.setOption(interpreterOption)
.setConf(zConf)
.create();
assertFalse(interpreterSetting.isUserAuthorized(userAndRoles));
// Without permissions
owners = new ArrayList<>();
interpreterOption = new InterpreterOption();
interpreterOption.setUserPermission(false);
interpreterOption.owners = owners;
interpreterSetting = new InterpreterSetting.Builder()
.setId("id")
.setName("id")
.setGroup("group")
.setOption(interpreterOption)
.create();
assertTrue(interpreterSetting.isUserAuthorized(userAndRoles));
} |
public PrepareResult prepare(HostValidator hostValidator, DeployLogger logger, PrepareParams params,
Optional<ApplicationVersions> activeApplicationVersions, Instant now, File serverDbSessionDir,
ApplicationPackage applicationPackage, SessionZooKeeperClient sessionZooKeeperClient) {
ApplicationId applicationId = params.getApplicationId();
Preparation preparation = new Preparation(hostValidator, logger, params, activeApplicationVersions,
TenantRepository.getTenantPath(applicationId.tenant()),
serverDbSessionDir, applicationPackage, sessionZooKeeperClient,
onnxModelCost, endpointCertificateSecretStores);
preparation.preprocess();
try {
AllocatedHosts allocatedHosts = preparation.buildModels(now);
preparation.makeResult(allocatedHosts);
if ( ! params.isDryRun()) {
FileReference fileReference = preparation.triggerDistributionOfApplicationPackage();
preparation.writeStateZK(fileReference);
preparation.writeEndpointCertificateMetadataZK();
preparation.writeContainerEndpointsZK();
}
log.log(Level.FINE, () -> "time used " + params.getTimeoutBudget().timesUsed() + " : " + applicationId);
return preparation.result();
}
catch (IllegalArgumentException e) {
if (e instanceof InvalidApplicationException)
throw e;
throw new InvalidApplicationException("Invalid application package", e);
}
} | @Test
public void require_that_application_is_prepared() throws Exception {
prepare(testApp);
assertTrue(curator.exists(sessionPath(1).append(ZKApplication.USERAPP_ZK_SUBPATH).append("services.xml")));
} |
@Bean
@ConditionalOnMissingBean(ConsulDataChangedListener.class)
public DataChangedListener consulDataChangedListener(final ConsulClient consulClient) {
return new ConsulDataChangedListener(consulClient);
} | @Test
public void testConsulDataChangedListener() {
ConsulSyncConfiguration consulListener = new ConsulSyncConfiguration();
ConsulClient consulClient = mock(ConsulClient.class);
assertNotNull(consulListener.consulDataChangedListener(consulClient));
} |
static long stringToSeconds(String time) throws NumberFormatException, DateTimeParseException
{
long duration = 0;
if (time.matches(INPUT_HMS_REGEX))
{
String textWithoutWhitespaces = time.replaceAll(WHITESPACE_REGEX, "");
//parse input using ISO-8601 Duration format (e.g. 'PT1h30m10s')
duration = Duration.parse("PT" + textWithoutWhitespaces).toMillis() / 1000;
}
else
{
String[] parts = time.split(":");
// parse from back to front, so as to accept hour:min:sec, min:sec, and sec formats
for (int i = parts.length - 1, multiplier = 1; i >= 0 && multiplier <= 3600; i--, multiplier *= 60)
{
duration += Integer.parseInt(parts[i].trim()) * multiplier;
}
}
return duration;
} | @Test
public void properIntuitiveTimeStringShouldReturnCorrectSeconds()
{
assertEquals(5, ClockPanel.stringToSeconds("5s"));
assertEquals(50, ClockPanel.stringToSeconds("50s"));
assertEquals(120, ClockPanel.stringToSeconds("2m"));
assertEquals(120, ClockPanel.stringToSeconds("120s"));
assertEquals(1200, ClockPanel.stringToSeconds("20m"));
assertEquals(121, ClockPanel.stringToSeconds("2m1s"));
assertEquals(121, ClockPanel.stringToSeconds("2m 1s"));
assertEquals(3660, ClockPanel.stringToSeconds("1h 1m"));
assertEquals(3660, ClockPanel.stringToSeconds("61m"));
assertEquals(3660, ClockPanel.stringToSeconds("3660s"));
assertEquals(9000, ClockPanel.stringToSeconds("2h 30m"));
assertEquals(9033, ClockPanel.stringToSeconds("2h 30m 33s"));
assertEquals(82800, ClockPanel.stringToSeconds("23h"));
assertEquals(400271, ClockPanel.stringToSeconds("111h 11m 11s"));
} |
protected static String getConfigStr(String name) {
if (cacheMap.containsKey(name)) {
return (String) cacheMap.get(name);
}
String val = getConfig(name);
if (StringUtils.isBlank(val)) {
return null;
}
cacheMap.put(name, val);
return val;
} | @Test
public void testGetConfigStr() {
// clear cache
DashboardConfig.clearCache();
// if not set, return null
assertEquals(null, DashboardConfig.getConfigStr("a"));
// test property
System.setProperty("a", "111");
assertEquals("111", DashboardConfig.getConfigStr("a"));
// test env
environmentVariables.set("a", "222");
// return value in cache
assertEquals("111", DashboardConfig.getConfigStr("a"));
// clear cache and then test
DashboardConfig.clearCache();
assertEquals("222", DashboardConfig.getConfigStr("a"));
} |
public LatencyProbe newProbe(String serviceName, String dataStructureName, String methodName) {
ServiceProbes serviceProbes = getOrPutIfAbsent(
metricsPerServiceMap, serviceName, metricsPerServiceConstructorFunction);
return serviceProbes.newProbe(dataStructureName, methodName);
} | @Test
public void testMaxMicros() {
LatencyProbeImpl probe = (LatencyProbeImpl) plugin.newProbe("foo", "queue", "somemethod");
probe.recordValue(MICROSECONDS.toNanos(10));
probe.recordValue(MICROSECONDS.toNanos(1000));
probe.recordValue(MICROSECONDS.toNanos(4));
assertEquals(1000, probe.distribution.maxMicros());
} |
@Override
public EntityStatementJWS establishIdpTrust(URI issuer) {
var trustedFederationStatement = fetchTrustedFederationStatement(issuer);
// the federation statement from the master will establish trust in the JWKS and the issuer URL
// of the idp,
// we still need to fetch the entity configuration directly afterward to get the full
// entity statement
return fetchTrustedEntityConfiguration(issuer, trustedFederationStatement.body().jwks());
} | @Test
void establishTrust() {
var client = new FederationMasterClientImpl(FEDERATION_MASTER, federationApiClient, clock);
var issuer = URI.create("https://idp-tk.example.com");
var federationFetchUrl = FEDERATION_MASTER.resolve("/fetch");
var fedmasterKeypair = ECKeyGenerator.example();
var fedmasterEntityConfigurationJws =
federationFetchFedmasterConfiguration(federationFetchUrl, fedmasterKeypair);
var sectoralIdpKeypair = ECKeyGenerator.generate();
var trustedFederationStatement =
trustedFederationStatement(issuer, sectoralIdpKeypair, fedmasterKeypair);
var sectoralEntityConfiguration = sectoralIdpEntityConfiguration(issuer, sectoralIdpKeypair);
when(federationApiClient.fetchEntityConfiguration(FEDERATION_MASTER))
.thenReturn(fedmasterEntityConfigurationJws);
when(federationApiClient.fetchFederationStatement(
federationFetchUrl, FEDERATION_MASTER.toString(), issuer.toString()))
.thenReturn(trustedFederationStatement);
when(federationApiClient.fetchEntityConfiguration(issuer))
.thenReturn(sectoralEntityConfiguration);
// when
var entityStatementJWS = client.establishIdpTrust(issuer);
// then
assertEquals(entityStatementJWS.body().sub(), issuer.toString());
} |
@Override
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
try {
String partitionColumn = job.get(Constants.JDBC_PARTITION_COLUMN);
int numPartitions = job.getInt(Constants.JDBC_NUM_PARTITIONS, -1);
String lowerBound = job.get(Constants.JDBC_LOW_BOUND);
String upperBound = job.get(Constants.JDBC_UPPER_BOUND);
InputSplit[] splits;
if (!job.getBoolean(Constants.JDBC_SPLIT_QUERY, true) || numPartitions <= 1) {
// We will not split this query if:
// 1. hive.sql.query.split is set to false (either manually or automatically by calcite
// 2. numPartitions == 1
splits = new InputSplit[1];
splits[0] = new JdbcInputSplit(FileInputFormat.getInputPaths(job)[0]);
LOGGER.info("Creating 1 input split " + splits[0]);
return splits;
}
dbAccessor = DatabaseAccessorFactory.getAccessor(job);
Path[] tablePaths = FileInputFormat.getInputPaths(job);
// We will split this query into n splits
LOGGER.debug("Creating {} input splits", numPartitions);
if (partitionColumn != null) {
List<String> columnNames = dbAccessor.getColumnNames(job);
if (!columnNames.contains(partitionColumn)) {
throw new IOException("Cannot find partitionColumn:" + partitionColumn + " in " + columnNames);
}
List<TypeInfo> hiveColumnTypesList = dbAccessor.getColumnTypes(job);
TypeInfo typeInfo = hiveColumnTypesList.get(columnNames.indexOf(partitionColumn));
if (!(typeInfo instanceof PrimitiveTypeInfo)) {
throw new IOException(partitionColumn + " is a complex type, only primitive type can be a partition column");
}
if (lowerBound == null || upperBound == null) {
Pair<String, String> boundary = dbAccessor.getBounds(job, partitionColumn, lowerBound == null,
upperBound == null);
if (lowerBound == null) {
lowerBound = boundary.getLeft();
}
if (upperBound == null) {
upperBound = boundary.getRight();
}
}
if (lowerBound == null) {
throw new IOException("lowerBound of " + partitionColumn + " cannot be null");
}
if (upperBound == null) {
throw new IOException("upperBound of " + partitionColumn + " cannot be null");
}
IntervalSplitter intervalSplitter = IntervalSplitterFactory.newIntervalSpitter(typeInfo);
List<MutablePair<String, String>> intervals = intervalSplitter.getIntervals(lowerBound, upperBound, numPartitions,
typeInfo);
if (intervals.size()<=1) {
LOGGER.debug("Creating 1 input splits");
splits = new InputSplit[1];
splits[0] = new JdbcInputSplit(FileInputFormat.getInputPaths(job)[0]);
return splits;
}
intervals.get(0).setLeft(null);
intervals.get(intervals.size()-1).setRight(null);
splits = new InputSplit[intervals.size()];
for (int i = 0; i < intervals.size(); i++) {
splits[i] = new JdbcInputSplit(partitionColumn, intervals.get(i).getLeft(), intervals.get(i).getRight(), tablePaths[0]);
}
} else {
int numRecords = dbAccessor.getTotalNumberOfRecords(job);
if (numRecords < numPartitions) {
numPartitions = numRecords;
}
int numRecordsPerSplit = numRecords / numPartitions;
int numSplitsWithExtraRecords = numRecords % numPartitions;
LOGGER.debug("Num records = {}", numRecords);
splits = new InputSplit[numPartitions];
int offset = 0;
for (int i = 0; i < numPartitions; i++) {
int numRecordsInThisSplit = numRecordsPerSplit;
if (i < numSplitsWithExtraRecords) {
numRecordsInThisSplit++;
}
splits[i] = new JdbcInputSplit(numRecordsInThisSplit, offset, tablePaths[0]);
offset += numRecordsInThisSplit;
}
}
dbAccessor = null;
LOGGER.info("Num input splits created {}", splits.length);
for (InputSplit split : splits) {
LOGGER.info("split:" + split.toString());
}
return splits;
}
catch (Exception e) {
LOGGER.error("Error while splitting input data.", e);
throw new IOException(e);
}
} | @Test
public void testIntervalSplit_Double() throws HiveJdbcDatabaseAccessException, IOException {
JdbcInputFormat f = new JdbcInputFormat();
when(mockDatabaseAccessor.getColumnNames(any(Configuration.class))).thenReturn(Lists.newArrayList("a"));
List<TypeInfo> columnTypes = Collections.singletonList(TypeInfoFactory.doubleTypeInfo);
when(mockDatabaseAccessor.getColumnTypes(any(Configuration.class))).thenReturn(columnTypes);
JobConf conf = new JobConf();
conf.set("mapred.input.dir", "/temp");
conf.set("hive.sql.partitionColumn", "a");
conf.set("hive.sql.numPartitions", "3");
conf.set("hive.sql.lowerBound", "0");
conf.set("hive.sql.upperBound", "10");
InputSplit[] splits = f.getSplits(conf, -1);
assertThat(splits, is(notNullValue()));
assertThat(splits.length, is(3));
assertNull(((JdbcInputSplit)splits[0]).getLowerBound());
assertTrue(Double.parseDouble(((JdbcInputSplit)splits[0]).getUpperBound()) > 3.3 && Double.parseDouble((
(JdbcInputSplit)splits[0]).getUpperBound()) < 3.4);
assertTrue(Double.parseDouble(((JdbcInputSplit)splits[1]).getLowerBound()) > 3.3 && Double.parseDouble((
(JdbcInputSplit)splits[1]).getLowerBound()) < 3.4);
assertTrue(Double.parseDouble(((JdbcInputSplit)splits[1]).getUpperBound()) > 6.6 && Double.parseDouble((
(JdbcInputSplit)splits[1]).getUpperBound()) < 6.7);
assertTrue(Double.parseDouble(((JdbcInputSplit)splits[2]).getLowerBound()) > 6.6 && Double.parseDouble((
(JdbcInputSplit)splits[2]).getLowerBound()) < 6.7);
assertNull(((JdbcInputSplit)splits[2]).getUpperBound());
} |
public Set<String> getKeySetToDownload(Set<String> blobStoreKeySet, Set<String> zookeeperKeySet) {
zookeeperKeySet.removeAll(blobStoreKeySet);
LOG.debug("Key list to download {}", zookeeperKeySet);
return zookeeperKeySet;
} | @Test
public void testBlobSynchronizerForKeysToDownload() {
BlobStore store = initLocalFs();
LocalFsBlobStoreSynchronizer sync = new LocalFsBlobStoreSynchronizer(store, conf);
// test for keylist to download
Set<String> zkSet = new HashSet<>();
zkSet.add("key1");
Set<String> blobStoreSet = new HashSet<>();
blobStoreSet.add("key1");
Set<String> resultSet = sync.getKeySetToDownload(blobStoreSet, zkSet);
assertTrue(resultSet.isEmpty(), "Not Empty");
zkSet.add("key1");
blobStoreSet.add("key2");
resultSet = sync.getKeySetToDownload(blobStoreSet, zkSet);
assertTrue(resultSet.isEmpty(), "Not Empty");
blobStoreSet.remove("key1");
blobStoreSet.remove("key2");
zkSet.add("key1");
resultSet = sync.getKeySetToDownload(blobStoreSet, zkSet);
assertTrue((resultSet.size() == 1) && (resultSet.contains("key1")), "Unexpected keys to download");
} |
@Override
public UnboundedReader<KinesisRecord> createReader(
PipelineOptions options, @Nullable KinesisReaderCheckpoint checkpointMark)
throws IOException {
KinesisReaderCheckpoint initCheckpoint;
if (checkpointMark != null) {
LOG.info("Got checkpoint mark {}", checkpointMark);
initCheckpoint = checkpointMark;
} else {
try {
LOG.info("No checkpointMark specified, fall back to initial {}", this.initialCheckpoint);
initCheckpoint = Preconditions.checkArgumentNotNull(this.initialCheckpoint);
} catch (Exception e) {
throw new IOException(e);
}
}
return initReader(spec, options, initCheckpoint, this);
} | @Test
public void testCreateReaderOfCorrectType() throws Exception {
KinesisIO.Read readSpec =
KinesisIO.read()
.withStreamName("stream-xxx")
.withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON);
KinesisIO.Read readSpecEFO =
KinesisIO.read()
.withStreamName("stream-xxx")
.withConsumerArn("consumer-aaa")
.withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON);
KinesisReaderCheckpoint initCheckpoint = new KinesisReaderCheckpoint(ImmutableList.of());
UnboundedSource.UnboundedReader<KinesisRecord> reader =
new KinesisSource(readSpec, initCheckpoint).createReader(options, null);
assertThat(reader).isInstanceOf(KinesisReader.class);
UnboundedSource.UnboundedReader<KinesisRecord> efoReader =
new KinesisSource(readSpecEFO, initCheckpoint).createReader(options, null);
assertThat(efoReader).isInstanceOf(EFOKinesisReader.class);
} |
public List<GetBucketListReply.BucketInfo> retrieveBucketList(BucketId bucketId, String bucketSpace) throws BucketStatsException {
GetBucketListMessage msg = new GetBucketListMessage(bucketId, bucketSpace);
GetBucketListReply bucketListReply = sendMessage(msg, GetBucketListReply.class);
return bucketListReply.getBuckets();
} | @Test
void testRetrieveBucketList() throws BucketStatsException {
String bucketInfo = "I like turtles!";
BucketId bucketId = bucketIdFactory.getBucketId(new DocumentId("id:ns:type::another"));
GetBucketListReply reply = new GetBucketListReply();
reply.getBuckets().add(new GetBucketListReply.BucketInfo(bucketId, bucketInfo));
when(mockedSession.syncSend(any())).thenReturn(reply);
List<GetBucketListReply.BucketInfo> bucketList = createRetriever().retrieveBucketList(bucketId, bucketSpace);
verify(mockedSession, times(1)).syncSend(any());
assertEquals(1, bucketList.size());
assertEquals(bucketInfo, bucketList.get(0).getBucketInformation());
} |
@Override
protected boolean isSecure(String key) {
AuthorizationPluginInfo pluginInfo = this.metadataStore().getPluginInfo(getPluginId());
if (pluginInfo == null
|| pluginInfo.getAuthConfigSettings() == null
|| pluginInfo.getAuthConfigSettings().getConfiguration(key) == null) {
return false;
}
return pluginInfo.getAuthConfigSettings().getConfiguration(key).isSecure();
} | @Test
public void addConfiguration_shouldEncryptASecureVariable() throws Exception {
PluggableInstanceSettings profileSettings = new PluggableInstanceSettings(List.of(new PluginConfiguration("password", new Metadata(true, true))));
AuthorizationPluginInfo pluginInfo = new AuthorizationPluginInfo(pluginDescriptor("plugin_id"), profileSettings, null, null, null);
store.setPluginInfo(pluginInfo);
SecurityAuthConfig authConfig = new SecurityAuthConfig("id", "plugin_id");
authConfig.addConfigurations(List.of(new ConfigurationProperty(new ConfigurationKey("password"), new ConfigurationValue("pass"))));
assertThat(authConfig.size(), is(1));
assertTrue(authConfig.first().isSecure());
} |
public Serde<GenericRow> buildValueSerde(
final FormatInfo format,
final PhysicalSchema schema,
final QueryContext queryContext
) {
final String loggerNamePrefix = QueryLoggerUtil.queryLoggerName(queryId, queryContext);
schemas.trackValueSerdeCreation(
loggerNamePrefix,
schema.logicalSchema(),
ValueFormat.of(format, schema.valueSchema().features())
);
return valueSerdeFactory.create(
format,
schema.valueSchema(),
ksqlConfig,
serviceContext.getSchemaRegistryClientFactory(),
loggerNamePrefix,
processingLogContext,
getSerdeTracker(loggerNamePrefix)
);
} | @Test
public void shouldBuildValueSerde() {
// Then:
runtimeBuildContext.buildValueSerde(
FORMAT_INFO,
PHYSICAL_SCHEMA,
queryContext
);
// Then:
verify(valueSerdeFactory).create(
FORMAT_INFO,
PHYSICAL_SCHEMA.valueSchema(),
ksqlConfig,
srClientFactory,
QueryLoggerUtil.queryLoggerName(QUERY_ID, queryContext),
processingLogContext,
Optional.empty()
);
} |
public static List<TimeSlot> split(TimeSlot timeSlot, SegmentInMinutes unit) {
TimeSlot normalizedSlot = normalizeToSegmentBoundaries(timeSlot, unit);
return new SlotToSegments().apply(normalizedSlot, unit);
} | @Test
void splittingIntoSegmentsJustNormalizesIfChosenSegmentLargerThanPassedSlot() {
//given
Instant start = Instant.parse("2023-09-09T00:10:00Z");
Instant end = Instant.parse("2023-09-09T01:00:00Z");
TimeSlot timeSlot = new TimeSlot(start, end);
//when
List<TimeSlot> segments = Segments.split(timeSlot, SegmentInMinutes.of(90, FIFTEEN_MINUTES_SEGMENT_DURATION));
//then
assertEquals(1, segments.size());
assertEquals(Instant.parse("2023-09-09T00:00:00Z"), segments.get(0).from());
assertEquals(Instant.parse("2023-09-09T01:30:00Z"), segments.get(0).to());
} |
@Override
public SocialUserDO getSocialUser(Long id) {
return socialUserMapper.selectById(id);
} | @Test
public void testGetSocialUser() {
// 准备参数
Integer userType = UserTypeEnum.ADMIN.getValue();
Integer type = SocialTypeEnum.GITEE.getType();
String code = "tudou";
String state = "yuanma";
// mock 社交用户
SocialUserDO socialUserDO = randomPojo(SocialUserDO.class).setType(type).setCode(code).setState(state);
socialUserMapper.insert(socialUserDO);
// mock 社交用户的绑定
Long userId = randomLong();
SocialUserBindDO socialUserBind = randomPojo(SocialUserBindDO.class).setUserType(userType).setUserId(userId)
.setSocialType(type).setSocialUserId(socialUserDO.getId());
socialUserBindMapper.insert(socialUserBind);
// 调用
SocialUserRespDTO socialUser = socialUserService.getSocialUserByCode(userType, type, code, state);
// 断言
assertEquals(userId, socialUser.getUserId());
assertEquals(socialUserDO.getOpenid(), socialUser.getOpenid());
} |
public List<String> getLiveBrokers() {
List<String> brokerUrls = new ArrayList<>();
try {
byte[] brokerResourceNodeData = _zkClient.readData(BROKER_EXTERNAL_VIEW_PATH, true);
brokerResourceNodeData = unpackZnodeIfNecessary(brokerResourceNodeData);
JsonNode jsonObject = OBJECT_READER.readTree(getInputStream(brokerResourceNodeData));
JsonNode brokerResourceNode = jsonObject.get("mapFields");
Iterator<Entry<String, JsonNode>> resourceEntries = brokerResourceNode.fields();
while (resourceEntries.hasNext()) {
JsonNode resource = resourceEntries.next().getValue();
Iterator<Entry<String, JsonNode>> brokerEntries = resource.fields();
while (brokerEntries.hasNext()) {
Entry<String, JsonNode> brokerEntry = brokerEntries.next();
String brokerName = brokerEntry.getKey();
if (brokerName.startsWith("Broker_") && "ONLINE".equals(brokerEntry.getValue().asText())) {
brokerUrls.add(getHostPort(brokerName));
}
}
}
} catch (Exception e) {
LOGGER.warn("Exception while reading External view from zookeeper", e);
// ignore
}
return brokerUrls;
} | @Test
public void testGetBrokerListByInstanceConfig() {
configureData(_instanceConfigPlain, true);
final List<String> brokers = _externalViewReaderUnderTest.getLiveBrokers();
assertEquals(brokers, Arrays.asList("first.pug-pinot-broker-headless:8099"));
} |
public static Object newInstance(ClassLoader cl, Class<?>[] interfaces, InvocationHandler handler) {
return getProxy(cl, interfaces, handler).newInstance();
} | @Test
void testNewInstance() throws Throwable {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
InvokerInvocationHandler handler = Mockito.mock(InvokerInvocationHandler.class);
Object proxy = ByteBuddyProxy.newInstance(cl, new Class<?>[] {RemoteService.class}, handler);
assertInstanceOf(RemoteService.class, proxy);
assertInstanceOf(Proxy.class, proxy);
RemoteService remoteService = (RemoteService) proxy;
remoteService.getThreadName();
remoteService.sayHello("test");
Mockito.verify(handler, Mockito.times(2)).invoke(any(), any(), any());
} |
@Override
public boolean contains(Object o) {
QueryableEntry entry = (QueryableEntry) o;
if (index != null) {
return checkFromIndex(entry);
} else {
//todo: what is the point of this condition? Is it some kind of optimization?
if (resultSets.size() > 3) {
index = new HashSet<>();
for (Map<Data, QueryableEntry> result : resultSets) {
for (QueryableEntry queryableEntry : result.values()) {
index.add(queryableEntry.getKeyData());
}
}
return checkFromIndex(entry);
} else {
for (Map<Data, QueryableEntry> resultSet : resultSets) {
if (resultSet.containsKey(entry.getKeyData())) {
return true;
}
}
return false;
}
}
} | @Test
public void testContains_empty() {
assertThat(result.contains(entry(data()))).isFalse();
} |
public String getStringHeader(Message in, String header, String defaultValue) {
String headerValue = in.getHeader(header, String.class);
return ObjectHelper.isNotEmpty(headerValue) ? headerValue : defaultValue;
} | @Test
public void testGetStringHeader() {
when(in.getHeader(HEADER_METRIC_NAME, String.class)).thenReturn("A");
assertThat(okProducer.getStringHeader(in, HEADER_METRIC_NAME, "value"), is("A"));
inOrder.verify(in, times(1)).getHeader(HEADER_METRIC_NAME, String.class);
inOrder.verifyNoMoreInteractions();
} |
@SuppressWarnings("unchecked")
protected Object newInstanceFromString(Class c, String s) {
return this.getReaderCache().newInstanceFromString(c, s);
} | @Test
void newInstanceFromString() {
final GenericDatumReader.ReaderCache cache = new GenericDatumReader.ReaderCache(this::findStringClass);
Object object = cache.newInstanceFromString(StringBuilder.class, "Hello");
assertEquals(StringBuilder.class, object.getClass());
StringBuilder builder = (StringBuilder) object;
assertEquals("Hello", builder.toString());
} |
public static AbsoluteUnixPath get(String unixPath) {
if (!unixPath.startsWith("/")) {
throw new IllegalArgumentException("Path does not start with forward slash (/): " + unixPath);
}
return new AbsoluteUnixPath(UnixPathParser.parse(unixPath));
} | @Test
public void testGet_notAbsolute() {
try {
AbsoluteUnixPath.get("not/absolute");
Assert.fail();
} catch (IllegalArgumentException ex) {
Assert.assertEquals(
"Path does not start with forward slash (/): not/absolute", ex.getMessage());
}
} |
public static Getter newMethodGetter(Object object, Getter parent, Method method, String modifier) throws Exception {
return newGetter(object, parent, modifier, method.getReturnType(), method::invoke,
(t, et) -> new MethodGetter(parent, method, modifier, t, et));
} | @Test
public void newMethodGetter_whenExtractingFromEmpty_Collection_FieldAndParentIsNonEmptyMultiResult_thenInferReturnType()
throws Exception {
OuterObject object = new OuterObject("name", InnerObject.emptyInner("inner"));
Getter parentGetter = GetterFactory.newMethodGetter(object, null, innersCollectionMethod, "[any]");
Getter innerObjectNameGetter
= GetterFactory.newMethodGetter(object, parentGetter, innerAttributesCollectionMethod, "[any]");
assertSame(NullMultiValueGetter.NULL_MULTIVALUE_GETTER, innerObjectNameGetter);
} |
public static Getter newMethodGetter(Object object, Getter parent, Method method, String modifier) throws Exception {
return newGetter(object, parent, modifier, method.getReturnType(), method::invoke,
(t, et) -> new MethodGetter(parent, method, modifier, t, et));
} | @Test
public void newMethodGetter_whenExtractingFromNonEmpty_Collection_FieldAndParentIsNonEmptyMultiResult_thenInferReturnType()
throws Exception {
OuterObject object = new OuterObject("name", new InnerObject("inner", 0, 1, 2, 3));
Getter parentGetter = GetterFactory.newMethodGetter(object, null, innersCollectionMethod, "[any]");
Getter innerObjectNameGetter
= GetterFactory.newMethodGetter(object, parentGetter, innerAttributesCollectionMethod, "[any]");
Class<?> returnType = innerObjectNameGetter.getReturnType();
assertEquals(Integer.class, returnType);
} |
@Override
public void process(Exchange exchange) throws Exception {
Object payload = exchange.getMessage().getBody();
if (payload == null) {
return;
}
AvroSchema answer = computeIfAbsent(exchange);
if (answer != null) {
exchange.setProperty(SchemaHelper.CONTENT_SCHEMA, answer);
exchange.setProperty(SchemaHelper.CONTENT_SCHEMA_TYPE, SchemaType.AVRO.type());
exchange.setProperty(SchemaHelper.CONTENT_CLASS, SchemaHelper.resolveContentClass(exchange, this.contentClass));
}
} | @Test
void shouldReadSchemaFromClasspathResource() throws Exception {
Exchange exchange = new DefaultExchange(camelContext);
exchange.setProperty(SchemaHelper.CONTENT_CLASS, Person.class.getName());
exchange.getMessage().setBody(person);
AvroSchemaResolver schemaResolver = new AvroSchemaResolver();
schemaResolver.process(exchange);
Assertions.assertNotNull(exchange.getProperty(SchemaHelper.CONTENT_SCHEMA));
Assertions.assertEquals(AvroSchema.class, exchange.getProperty(SchemaHelper.CONTENT_SCHEMA).getClass());
Assertions.assertEquals(SchemaType.AVRO.type(), exchange.getProperty(SchemaHelper.CONTENT_SCHEMA_TYPE));
Assertions.assertEquals(Person.class.getName(), exchange.getProperty(SchemaHelper.CONTENT_CLASS));
} |
public KsqlEntityList execute(
final KsqlSecurityContext securityContext,
final List<ParsedStatement> statements,
final SessionProperties sessionProperties
) {
final KsqlEntityList entities = new KsqlEntityList();
for (final ParsedStatement parsed : statements) {
final PreparedStatement<?> prepared = ksqlEngine.prepare(
parsed,
(isVariableSubstitutionEnabled(sessionProperties)
? sessionProperties.getSessionVariables()
: Collections.emptyMap())
);
executeStatement(
securityContext,
prepared,
sessionProperties,
entities
).ifPresent(entities::add);
}
return entities;
} | @Test
public void shouldDistributeProperties() {
// Given
givenRequestHandler(ImmutableMap.of());
when(sessionProperties.getMutableScopedProperties()).thenReturn(ImmutableMap.of("x", "y"));
// When
final List<ParsedStatement> statements =
KSQL_PARSER.parse(SOME_STREAM_SQL);
final KsqlEntityList entities = handler.execute(
securityContext,
statements,
sessionProperties
);
// Then
assertThat(entities, contains(entity));
verify(distributor, times(2))
.execute(
argThat(is(configured(
preparedStatement(instanceOf(CreateStream.class)),
ImmutableMap.of("x", "y"),
ksqlConfig))),
eq(ksqlEngine),
eq(securityContext)
);
} |
static public Entry buildMenuStructure(String xml) {
final Reader reader = new StringReader(xml);
return buildMenuStructure(reader);
} | @Test
public void givenXmlWithChildEntryWithTrue_createsBooleanObject() {
String xmlWithoutContent = "<FreeplaneUIEntries><Entry builderSpecificAttribute='true'/></FreeplaneUIEntries>";
Entry builtMenuStructure = XmlEntryStructureBuilder.buildMenuStructure(xmlWithoutContent);
Entry menuStructureWithChildEntry = new Entry();
final Entry childEntry = new Entry();
childEntry.setAttribute("builderSpecificAttribute", true);
menuStructureWithChildEntry.addChild(childEntry);
assertThat(builtMenuStructure, equalTo(menuStructureWithChildEntry));
} |
@Override
public void configure(Map<String, ?> props) {
final SimpleConfig config = new SimpleConfig(CONFIG_DEF, props);
casts = parseFieldTypes(config.getList(SPEC_CONFIG));
wholeValueCastType = casts.get(WHOLE_VALUE_CAST);
schemaUpdateCache = new SynchronizedCache<>(new LRUCache<>(16));
replaceNullWithDefault = config.getBoolean(REPLACE_NULL_WITH_DEFAULT_CONFIG);
} | @Test
public void testConfigInvalidMap() {
assertThrows(ConfigException.class, () -> xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:int8:extra")));
} |
public static String generateInstanceId(Instance instance) {
String instanceIdGeneratorType = instance.getInstanceIdGenerator();
if (StringUtils.isBlank(instanceIdGeneratorType)) {
instanceIdGeneratorType = Constants.DEFAULT_INSTANCE_ID_GENERATOR;
}
return INSTANCE.getInstanceIdGenerator(instanceIdGeneratorType).generateInstanceId(instance);
} | @Test
void testGenerateSnowFlakeInstanceId() {
Instance instance = new Instance();
Map<String, String> metaData = new HashMap<>(1);
metaData.put(PreservedMetadataKeys.INSTANCE_ID_GENERATOR, SNOWFLAKE_INSTANCE_ID_GENERATOR);
instance.setMetadata(metaData);
instance.setServiceName("service");
instance.setClusterName("cluster");
instance.setIp("1.1.1.1");
instance.setPort(1000);
String instanceId = InstanceIdGeneratorManager.generateInstanceId(instance);
assertTrue(instanceId.endsWith("#cluster#service"));
} |
public static Status getSummaryStatus(Map<String, Status> statuses) {
Level level = Level.OK;
StringBuilder msg = new StringBuilder();
for (Map.Entry<String, Status> entry : statuses.entrySet()) {
String key = entry.getKey();
Status status = entry.getValue();
Level l = status.getLevel();
if (Level.ERROR.equals(l)) {
level = Level.ERROR;
if (msg.length() > 0) {
msg.append(',');
}
msg.append(key);
} else if (Level.WARN.equals(l)) {
if (!Level.ERROR.equals(level)) {
level = Level.WARN;
}
if (msg.length() > 0) {
msg.append(',');
}
msg.append(key);
}
}
return new Status(level, msg.toString());
} | @Test
void testGetSummaryStatus1() throws Exception {
Status status1 = new Status(Status.Level.ERROR);
Status status2 = new Status(Status.Level.WARN);
Status status3 = new Status(Status.Level.OK);
Map<String, Status> statuses = new HashMap<String, Status>();
statuses.put("status1", status1);
statuses.put("status2", status2);
statuses.put("status3", status3);
Status status = StatusUtils.getSummaryStatus(statuses);
assertThat(status.getLevel(), is(Status.Level.ERROR));
assertThat(status.getMessage(), containsString("status1"));
assertThat(status.getMessage(), containsString("status2"));
assertThat(status.getMessage(), not(containsString("status3")));
} |
public static YearsWindows years(int number) {
return new YearsWindows(number, 1, 1, DEFAULT_START_DATE, DateTimeZone.UTC);
} | @Test
public void testYears() throws Exception {
Map<IntervalWindow, Set<String>> expected = new HashMap<>();
final List<Long> timestamps =
Arrays.asList(
makeTimestamp(2000, 5, 5, 0, 0).getMillis(),
makeTimestamp(2010, 5, 4, 23, 59).getMillis(),
makeTimestamp(2010, 5, 5, 0, 0).getMillis(),
makeTimestamp(2015, 3, 1, 0, 0).getMillis(),
makeTimestamp(2052, 1, 5, 0, 0).getMillis(),
makeTimestamp(2060, 5, 4, 5, 5).getMillis());
expected.put(
new IntervalWindow(makeTimestamp(2000, 5, 5, 0, 0), makeTimestamp(2010, 5, 5, 0, 0)),
set(timestamps.get(0), timestamps.get(1)));
expected.put(
new IntervalWindow(makeTimestamp(2010, 5, 5, 0, 0), makeTimestamp(2020, 5, 5, 0, 0)),
set(timestamps.get(2), timestamps.get(3)));
expected.put(
new IntervalWindow(makeTimestamp(2050, 5, 5, 0, 0), makeTimestamp(2060, 5, 5, 0, 0)),
set(timestamps.get(4), timestamps.get(5)));
assertEquals(
expected,
runWindowFn(
CalendarWindows.years(10).withStartingYear(2000).beginningOnDay(5, 5), timestamps));
} |
@Override
public ByteBuf writeInt(int value) {
ensureWritable0(4);
_setInt(writerIndex, value);
writerIndex += 4;
return this;
} | @Test
public void testWriteIntAfterRelease() {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() {
releasedBuffer().writeInt(1);
}
});
} |
public boolean startsWithTablePrefix(@NotNull String tableName) {
return this.tablePrefix.stream().anyMatch(tableName::startsWith);
} | @Test
void startsWithTablePrefixTest() {
StrategyConfig.Builder strategyConfigBuilder = GeneratorBuilder.strategyConfigBuilder();
Assertions.assertFalse(strategyConfigBuilder.build().startsWithTablePrefix("t_name"));
strategyConfigBuilder.addTablePrefix("a_", "t_");
Assertions.assertTrue(strategyConfigBuilder.build().startsWithTablePrefix("t_name"));
} |
@Override
public void doFilter(ServletRequest req, ServletResponse resp, FilterChain chain) throws IOException, ServletException {
String path = ((HttpServletRequest) req).getRequestURI().replaceFirst(((HttpServletRequest) req).getContextPath(), "");
MAX_AGE_BY_PATH.entrySet().stream()
.filter(m -> path.startsWith(m.getKey()))
.map(Map.Entry::getValue)
.findFirst()
.ifPresent(maxAge -> ((HttpServletResponse) resp).addHeader(CACHE_CONTROL_HEADER, format(MAX_AGE_TEMPLATE, maxAge)));
chain.doFilter(req, resp);
} | @Test
public void does_nothing_on_web_service() throws Exception {
HttpServletRequest request = newRequest("/api/ping");
underTest.doFilter(request, response, chain);
verifyNoInteractions(response);
} |
public static ShorthandProjectionSegment bind(final ShorthandProjectionSegment segment, final TableSegment boundTableSegment,
final Map<String, TableSegmentBinderContext> tableBinderContexts) {
ShorthandProjectionSegment result = copy(segment);
if (segment.getOwner().isPresent()) {
expandVisibleColumns(getProjectionSegmentsByTableAliasOrName(tableBinderContexts, segment.getOwner().get().getIdentifier().getValue()), result);
} else {
expandNoOwnerProjections(boundTableSegment, tableBinderContexts, result);
}
return result;
} | @Test
void assertBindWithoutOwnerForSimpleTableSegment() {
ColumnProjectionSegment invisibleColumn = new ColumnProjectionSegment(new ColumnSegment(0, 0, new IdentifierValue("status")));
invisibleColumn.setVisible(false);
Map<String, TableSegmentBinderContext> tableBinderContexts = Collections.singletonMap(
"o", new SimpleTableSegmentBinderContext(Arrays.asList(new ColumnProjectionSegment(new ColumnSegment(0, 0, new IdentifierValue("order_id"))), invisibleColumn)));
SimpleTableSegment boundTableSegment = new SimpleTableSegment(new TableNameSegment(0, 0, new IdentifierValue("t_order")));
boundTableSegment.setAlias(new AliasSegment(0, 0, new IdentifierValue("o")));
ShorthandProjectionSegment actual = ShorthandProjectionSegmentBinder.bind(new ShorthandProjectionSegment(0, 0), boundTableSegment, tableBinderContexts);
assertThat(actual.getActualProjectionSegments().size(), is(1));
ProjectionSegment visibleColumn = actual.getActualProjectionSegments().iterator().next();
assertThat(visibleColumn.getColumnLabel(), is("order_id"));
assertTrue(visibleColumn.isVisible());
} |
@Override
public boolean canFastDuplicate(StreamStateHandle stateHandle) throws IOException {
if (!(stateHandle instanceof FileStateHandle)) {
return false;
}
final Path srcPath = ((FileStateHandle) stateHandle).getFilePath();
final Path dst = getNewDstPath(srcPath.getName());
return fs.canCopyPaths(srcPath, dst);
} | @Test
void testCannotDuplicate() throws IOException {
final FsCheckpointStateToolset stateToolset =
new FsCheckpointStateToolset(
new Path("test-path"), new TestDuplicatingFileSystem());
final boolean canFastDuplicate =
stateToolset.canFastDuplicate(
new FileStateHandle(new Path("test-path", "test-file"), 0));
assertThat(canFastDuplicate).isFalse();
} |
@Nullable
public TrackerClient getTrackerClient(Request request,
RequestContext requestContext,
Ring<URI> ring,
Map<URI, TrackerClient> trackerClients)
{
TrackerClient trackerClient;
URI targetHostUri = KeyMapper.TargetHostHints.getRequestContextTargetHost(requestContext);
if (targetHostUri != null)
{
trackerClient = getTrackerClientFromTarget(targetHostUri, requestContext, trackerClients);
}
else
{
trackerClient = getTrackerClientFromRing(request, requestContext, ring, trackerClients);
}
addToExcludedHosts(trackerClient, requestContext);
return trackerClient;
} | @Test
public void testClientsPartiallyExcluded()
{
LoadBalancerStrategy.ExcludedHostHints.addRequestContextExcludedHost(_requestContext, URI_1);
LoadBalancerStrategy.ExcludedHostHints.addRequestContextExcludedHost(_requestContext, URI_2);
TrackerClient trackerClient = _clientSelector.getTrackerClient(_request, _requestContext, DEFAULT_RING, DEFAULT_TRACKER_CLIENT_MAP);
assertEquals(trackerClient, TRACKER_CLIENT_3);
} |
@Override
public URI getUri() {
return DefaultServiceInstance.getUri(this);
} | @Test
public void testGetUri() {
assertThat(polarisRegistration1.getUri().toString()).isEqualTo("http://" + HOST + ":" + PORT);
} |
FileContext getLocalFileContext(Configuration conf) {
try {
return FileContext.getLocalFSFileContext(conf);
} catch (IOException e) {
throw new YarnRuntimeException("Failed to access local fs");
}
} | @Test
public void testDirectoryCleanupOnNewlyCreatedStateStore()
throws IOException, URISyntaxException {
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
AsyncDispatcher dispatcher = new AsyncDispatcher();
dispatcher.init(new Configuration());
ContainerExecutor exec = mock(ContainerExecutor.class);
DeletionService delService = spy(new DeletionService(exec));
delService.init(conf);
delService.start();
List<Path> localDirs = new ArrayList<Path>();
String[] sDirs = new String[4];
for (int i = 0; i < 4; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir, i + "")));
sDirs[i] = localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs);
LocalDirsHandlerService diskhandler = new LocalDirsHandlerService();
diskhandler.init(conf);
NMStateStoreService nmStateStoreService = mock(NMStateStoreService.class);
when(nmStateStoreService.canRecover()).thenReturn(true);
when(nmStateStoreService.isNewlyCreated()).thenReturn(true);
ResourceLocalizationService locService =
spy(new ResourceLocalizationService(dispatcher, exec, delService,
diskhandler, nmContext, metrics));
doReturn(lfs)
.when(locService).getLocalFileContext(isA(Configuration.class));
try {
dispatcher.start();
// initialize ResourceLocalizationService
locService.init(conf);
final FsPermission defaultPerm = new FsPermission((short)0755);
// verify directory creation
for (Path p : localDirs) {
p = new Path((new URI(p.toString())).getPath());
Path usercache = new Path(p, ContainerLocalizer.USERCACHE);
verify(spylfs)
.rename(eq(usercache), any(Path.class), any());
verify(spylfs)
.mkdir(eq(usercache),
eq(defaultPerm), eq(true));
Path publicCache = new Path(p, ContainerLocalizer.FILECACHE);
verify(spylfs)
.rename(eq(usercache), any(Path.class), any());
verify(spylfs)
.mkdir(eq(publicCache),
eq(defaultPerm), eq(true));
Path nmPriv = new Path(p, ResourceLocalizationService.NM_PRIVATE_DIR);
verify(spylfs)
.rename(eq(usercache), any(Path.class), any());
verify(spylfs).mkdir(eq(nmPriv),
eq(ResourceLocalizationService.NM_PRIVATE_PERM), eq(true));
}
} finally {
dispatcher.stop();
delService.stop();
}
} |
static Duration getDefaultPeriod(Duration size) {
if (size.isLongerThan(Duration.standardHours(1))) {
return Duration.standardHours(1);
}
if (size.isLongerThan(Duration.standardMinutes(1))) {
return Duration.standardMinutes(1);
}
if (size.isLongerThan(Duration.standardSeconds(1))) {
return Duration.standardSeconds(1);
}
return Duration.millis(1);
} | @Test
public void testDefaultPeriods() throws Exception {
assertEquals(
Duration.standardHours(1), SlidingWindows.getDefaultPeriod(Duration.standardDays(1)));
assertEquals(
Duration.standardHours(1), SlidingWindows.getDefaultPeriod(Duration.standardHours(2)));
assertEquals(
Duration.standardMinutes(1), SlidingWindows.getDefaultPeriod(Duration.standardHours(1)));
assertEquals(
Duration.standardMinutes(1), SlidingWindows.getDefaultPeriod(Duration.standardMinutes(10)));
assertEquals(
Duration.standardSeconds(1), SlidingWindows.getDefaultPeriod(Duration.standardMinutes(1)));
assertEquals(
Duration.standardSeconds(1), SlidingWindows.getDefaultPeriod(Duration.standardSeconds(10)));
assertEquals(Duration.millis(1), SlidingWindows.getDefaultPeriod(Duration.standardSeconds(1)));
assertEquals(Duration.millis(1), SlidingWindows.getDefaultPeriod(Duration.millis(10)));
assertEquals(Duration.millis(1), SlidingWindows.getDefaultPeriod(Duration.millis(1)));
} |
public static JsonElement parseReader(Reader reader) throws JsonIOException, JsonSyntaxException {
try {
JsonReader jsonReader = new JsonReader(reader);
JsonElement element = parseReader(jsonReader);
if (!element.isJsonNull() && jsonReader.peek() != JsonToken.END_DOCUMENT) {
throw new JsonSyntaxException("Did not consume the entire document.");
}
return element;
} catch (MalformedJsonException e) {
throw new JsonSyntaxException(e);
} catch (IOException e) {
throw new JsonIOException(e);
} catch (NumberFormatException e) {
throw new JsonSyntaxException(e);
}
} | @Test
public void testParseDeeplyNestedArrays() throws IOException {
int times = 10000;
// [[[ ... ]]]
String json = "[".repeat(times) + "]".repeat(times);
JsonReader jsonReader = new JsonReader(new StringReader(json));
jsonReader.setNestingLimit(Integer.MAX_VALUE);
int actualTimes = 0;
JsonArray current = JsonParser.parseReader(jsonReader).getAsJsonArray();
while (true) {
actualTimes++;
if (current.isEmpty()) {
break;
}
assertThat(current.size()).isEqualTo(1);
current = current.get(0).getAsJsonArray();
}
assertThat(actualTimes).isEqualTo(times);
} |
@Override
public boolean isBuffer() {
return dataType.isBuffer();
} | @Test
void testEventBufferIsBuffer() {
assertThat(newBuffer(1024, 1024, false).isBuffer()).isFalse();
} |
public boolean filterMatchesEntry(String filter, FeedEntry entry) throws FeedEntryFilterException {
if (StringUtils.isBlank(filter)) {
return true;
}
Script script;
try {
script = ENGINE.createScript(filter);
} catch (JexlException e) {
throw new FeedEntryFilterException("Exception while parsing expression " + filter, e);
}
JexlContext context = new MapContext();
context.set("title", entry.getContent().getTitle() == null ? "" : Jsoup.parse(entry.getContent().getTitle()).text().toLowerCase());
context.set("author", entry.getContent().getAuthor() == null ? "" : entry.getContent().getAuthor().toLowerCase());
context.set("content",
entry.getContent().getContent() == null ? "" : Jsoup.parse(entry.getContent().getContent()).text().toLowerCase());
context.set("url", entry.getUrl() == null ? "" : entry.getUrl().toLowerCase());
context.set("categories", entry.getContent().getCategories() == null ? "" : entry.getContent().getCategories().toLowerCase());
context.set("year", Year.now().getValue());
Callable<Object> callable = script.callable(context);
Future<Object> future = executor.submit(callable);
Object result;
try {
result = future.get(config.feedRefresh().filteringExpressionEvaluationTimeout().toMillis(), TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new FeedEntryFilterException("interrupted while evaluating expression " + filter, e);
} catch (ExecutionException e) {
throw new FeedEntryFilterException("Exception while evaluating expression " + filter, e);
} catch (TimeoutException e) {
throw new FeedEntryFilterException("Took too long evaluating expression " + filter, e);
}
try {
return (boolean) result;
} catch (ClassCastException e) {
throw new FeedEntryFilterException(e.getMessage(), e);
}
} | @Test
void simpleExpression() throws FeedEntryFilterException {
Assertions.assertTrue(service.filterMatchesEntry("author.toString() eq 'athou'", entry));
} |
@Subscribe
public void onChatMessage(ChatMessage e)
{
if (e.getType() != ChatMessageType.GAMEMESSAGE && e.getType() != ChatMessageType.SPAM)
{
return;
}
CompostState compostUsed = determineCompostUsed(e.getMessage());
if (compostUsed == null)
{
return;
}
this.expirePendingActions();
pendingCompostActions.values()
.stream()
.filter(this::playerIsBesidePatch)
.findFirst()
.ifPresent(pc ->
{
setCompostState(pc.getFarmingPatch(), compostUsed);
pendingCompostActions.remove(pc.getFarmingPatch());
});
} | @Test
public void onChatMessage_ignoresInvalidTypes()
{
ChatMessage chatEvent = mock(ChatMessage.class);
when(chatEvent.getType()).thenReturn(ChatMessageType.PUBLICCHAT);
compostTracker.onChatMessage(chatEvent);
verifyNoInteractions(client);
verifyNoInteractions(farmingWorld);
} |
@VisibleForTesting
WxMpService getWxMpService(Integer userType) {
// 第一步,查询 DB 的配置项,获得对应的 WxMpService 对象
SocialClientDO client = socialClientMapper.selectBySocialTypeAndUserType(
SocialTypeEnum.WECHAT_MP.getType(), userType);
if (client != null && Objects.equals(client.getStatus(), CommonStatusEnum.ENABLE.getStatus())) {
return wxMpServiceCache.getUnchecked(client.getClientId() + ":" + client.getClientSecret());
}
// 第二步,不存在 DB 配置项,则使用 application-*.yaml 对应的 WxMpService 对象
return wxMpService;
} | @Test
public void testGetWxMpService_clientEnable() {
// 准备参数
Integer userType = randomPojo(UserTypeEnum.class).getValue();
// mock 数据
SocialClientDO client = randomPojo(SocialClientDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus())
.setUserType(userType).setSocialType(SocialTypeEnum.WECHAT_MP.getType()));
socialClientMapper.insert(client);
// mock 方法
WxMpProperties.ConfigStorage configStorage = mock(WxMpProperties.ConfigStorage.class);
when(wxMpProperties.getConfigStorage()).thenReturn(configStorage);
// 调用
WxMpService result = socialClientService.getWxMpService(userType);
// 断言
assertNotSame(wxMpService, result);
assertEquals(client.getClientId(), result.getWxMpConfigStorage().getAppId());
assertEquals(client.getClientSecret(), result.getWxMpConfigStorage().getSecret());
} |
@Override
public String toString() {
return StringUtil.simpleClassName(this) + '(' + contentToString() + ')';
} | @Test
public void testToString() {
ByteBufHolder holder = new DefaultByteBufHolder(Unpooled.buffer());
assertEquals(1, holder.refCnt());
assertNotNull(holder.toString());
assertTrue(holder.release());
assertNotNull(holder.toString());
} |
public static List<BindAddress> validateBindAddresses(ServiceConfiguration config, Collection<String> schemes) {
// migrate the existing configuration properties
List<BindAddress> addresses = migrateBindAddresses(config);
// parse the list of additional bind addresses
Arrays
.stream(StringUtils.split(StringUtils.defaultString(config.getBindAddresses()), ","))
.map(s -> {
Matcher m = BIND_ADDRESSES_PATTERN.matcher(s);
if (!m.matches()) {
throw new IllegalArgumentException("bindAddresses: malformed: " + s);
}
return m;
})
.map(m -> new BindAddress(m.group("name"), URI.create(m.group("url"))))
.forEach(addresses::add);
// apply the filter
if (schemes != null) {
addresses.removeIf(a -> !schemes.contains(a.getAddress().getScheme()));
}
return addresses;
} | @Test
public void testOneListenerMultipleAddresses() {
ServiceConfiguration config = newEmptyConfiguration();
config.setBindAddresses("internal:pulsar://0.0.0.0:6650,internal:pulsar+ssl://0.0.0.0:6651");
List<BindAddress> addresses = BindAddressValidator.validateBindAddresses(config, null);
assertEquals(Arrays.asList(
new BindAddress("internal", URI.create("pulsar://0.0.0.0:6650")),
new BindAddress("internal", URI.create("pulsar+ssl://0.0.0.0:6651"))), addresses);
} |
public void logOnAddPassiveMember(final int memberId, final long correlationId, final String memberEndpoints)
{
final int length = addPassiveMemberLength(memberEndpoints);
final int captureLength = captureLength(length);
final int encodedLength = encodedLength(captureLength);
final ManyToOneRingBuffer ringBuffer = this.ringBuffer;
final int index = ringBuffer.tryClaim(ADD_PASSIVE_MEMBER.toEventCodeId(), encodedLength);
if (index > 0)
{
try
{
ClusterEventEncoder.encodeOnAddPassiveMember(
(UnsafeBuffer)ringBuffer.buffer(),
index,
length,
length,
memberId,
correlationId,
memberEndpoints);
}
finally
{
ringBuffer.commit(index);
}
}
} | @Test
void logOnAddPassiveMember()
{
final int offset = ALIGNMENT + 4;
logBuffer.putLong(CAPACITY + TAIL_POSITION_OFFSET, offset);
final long correlationId = 28397456L;
final String memberEndpoints = "localhost:20113,localhost:20223,localhost:20333,localhost:0,localhost:8013";
final int memberId = 42;
logger.logOnAddPassiveMember(memberId, correlationId, memberEndpoints);
final int length = SIZE_OF_LONG + 2 * SIZE_OF_INT + memberEndpoints.length();
verifyLogHeader(logBuffer, offset, ADD_PASSIVE_MEMBER.toEventCodeId(), length, length);
int index = encodedMsgOffset(offset) + LOG_HEADER_LENGTH;
assertEquals(correlationId, logBuffer.getLong(index, LITTLE_ENDIAN));
index += SIZE_OF_LONG;
assertEquals(memberId, logBuffer.getInt(index, LITTLE_ENDIAN));
index += SIZE_OF_INT;
assertEquals(memberEndpoints.length(), logBuffer.getInt(index, LITTLE_ENDIAN));
index += SIZE_OF_INT;
assertEquals(memberEndpoints, logBuffer.getStringWithoutLengthAscii(index, memberEndpoints.length()));
index += memberEndpoints.length();
final StringBuilder sb = new StringBuilder();
ClusterEventDissector.dissectAddPassiveMember(
ADD_PASSIVE_MEMBER, logBuffer, encodedMsgOffset(offset), sb);
final String expectedMessagePattern = "\\[[0-9]+\\.[0-9]+] CLUSTER: ADD_PASSIVE_MEMBER " +
"\\[90/90]: memberId=42 correlationId=28397456 " +
"memberEndpoints=localhost:20113,localhost:20223,localhost:20333,localhost:0,localhost:8013";
assertThat(sb.toString(), Matchers.matchesPattern(expectedMessagePattern));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.